feat(roles): remove roles before adding submodules

This commit is contained in:
Bertrand Lanson 2023-12-22 23:18:47 +01:00
parent cf1bea35c2
commit 965f955a7c
167 changed files with 15 additions and 6293 deletions

View File

@ -1,3 +1,9 @@
---
- name: Include a playbook from a collection
ansible.builtin.import_playbook: ednxzu.hashistack.preflight.yml
- name: Include a playbook from a collection
ansible.builtin.import_playbook: ednxzu.hashistack.prepare.yml
- name: Include a playbook from a collection
ansible.builtin.import_playbook: ednxzu.hashistack.deploy.yml

9
playbooks/preflight.yml Normal file
View File

@ -0,0 +1,9 @@
---
# hashistack deployment playbook
- name: "Preflight"
hosts: all
gather_facts: true
tasks:
- name: "Debug"
ansible.builtin.debug:
msg: "{{ ansible_hostname }}"

View File

@ -1,20 +0,0 @@
The MIT License (MIT)
Copyright (c) 2017 Bertrand Lanson
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

View File

@ -1,106 +0,0 @@
hashicorp_consul
=========
> This repository is only a mirror. Development and testing is done on a private gitea server.
This role install and configure consul on **debian-based** distributions.
Requirements
------------
None.
Role Variables
--------------
Available variables are listed below, along with default values. A sample file for the default values is available in `default/hashicorp_consul.yml.sample` in case you need it for any `group_vars` or `host_vars` configuration.
```yaml
hashi_consul_install: true # by default, set to true
```
This variable defines if the consul package is to be installed or not before configuring. If you install consul using another task, you can set this to `false`.
```yaml
hashi_consul_auto_update: false # by default, set to false
```
This variable allows you to choose to automatically update consul if a newer version is available. Updating consul is usually pretty safe if done on a regular basis, but for better control over the upgrade process, see `hashi_consul_version`.
```yaml
hashi_consul_start_service: true
```
This variable defines if the consul service should be started once it has been configured. This is usefull in case you're using this role to build golden images, in which case you might want to only enable the service, to have it start on the next boot (when the image is launched)
```yaml
hashi_consul_version: latest # by default, set to latest
```
This variable specifies the version of consul to install when `hashi_consul_install` is set to `true`. The version to specify is the version of the package on the hashicorp repository (`1.14.1-1` for example). This can be found by running `apt-cache madison consul` on a machine with the repository installed.
```yaml
hashi_consul_deploy_method: host # by default, set to host
```
This variable defines the method of deployment of consul. The `host` method installs the binary directly on the host, and runs consul as a systemd service. The `docker` method install consul as a docker container.
> Currently, only the `host` method is available, the `docker` method will be added later.
```yaml
hashi_consul_env_variables: # by default, set to {}
env_var: value
```
This value is a list of key/value that will populate the `consul.env` file. You do not have to capitalize the KEYS, as it will be done automatically.
```yaml
hashi_consul_data_dir: "/opt/consul" # by default, set to /opt/consul
```
This value defines the path where consul data will be stored on the node. Defaults to `/opt/consul`.
```yaml
hashi_consul_extra_files: false # by default, set to false
```
This variable defines whether or not there is extra configuration files to copy to the target. If there are, these extra files are expected to be jinja2 templates located all in the same directory, and will be copied to the specified directory on the target machine.
```yaml
hashi_consul_extra_files_src: /tmp/extra_files # by default, set to /tmp/extra_files
```
This variable defines the source directory (without the trailing /) for the extra files to be copied in case there are some.
```yaml
hashi_consul_extra_files_dst: /etc/consul.d/extra_files # by default, set to /etc/consul.d/extra_files
```
This variable defines the destination directory (without the trailing /) for the extra files to be copied.
```yaml
hashi_consul_envoy_install: false # by default, set to false
```
This variable allows you to install the envoy binary on the consul node, in case you need to deploy connect proxies. This feature is usefull when deploying consul agents that will handle services in the service mesh. It is NOT required on server nodes (since they most likely wont have services running in service mesh).
```yaml
hashi_consul_envoy_version: latest # by default, set to latest
```
This variable defines which version of envoy to install in case `hashi_consul_envoy_install` is set to true. **IMPORTANT:** The `latest` version set by default is not guaranteed to work, please refer to the [documentation](https://developer.hashicorp.com/consul/docs/connect/proxies/envoy#supported-versions) for informations about the support matrix for consul and envoy.
```yaml
hashi_consul_configuration: {} # by default, set to a simple configuration
```
This variable sets all of the configuration parameters for consul. For more information on all of them, please check the [documentation](https://developer.hashicorp.com/consul/docs/agent/config/config-files). This variable is parsed and converted to json format to create the config file, so each key and value should be set according to the documentation. This method of passing configuration allows for compatibility with every configuration parameters that consul has to offer. The defaults are simply here to deploy a simple, single-node consul server without much configuration, and should NOT be used in production. You will want to edit this to deploy production-ready clusters.
Dependencies
------------
`ednxzu.manage_repositories` to configure the hashicorp apt repository.
`ednxzu.manage_apt_packages` to install consul.
Example Playbook
----------------
```yaml
# calling the role inside a playbook with either the default or group_vars/host_vars
- hosts: servers
roles:
- ednxzu.hashicorp_consul
```
License
-------
MIT / BSD
Author Information
------------------
This role was created by Bertrand Lanson in 2023.

View File

@ -1,69 +0,0 @@
---
# hashi_consul_install: true
# hashi_consul_auto_update: false
# hashi_consul_start_service: true
# hashi_consul_version: latest
# hashi_consul_deploy_method: host # deployment method, either host or docker.
# hashi_consul_env_variables: {}
# hashi_consul_data_dir: "/opt/consul"
# hashi_consul_extra_files: false
# hashi_consul_extra_files_src: /tmp/extra_files
# hashi_consul_extra_files_dst: /etc/consul.d/extra_files
# hashi_consul_envoy_install: false
# hashi_consul_envoy_version: latest
# #! consul configuration
# hashi_consul_configuration:
# domain: consul
# datacenter: dc1
# primary_datacenter: dc1
# client_addr: "0.0.0.0"
# bind_addr: "{{ ansible_default_ipv4.address }}"
# advertise_addr: "{{ ansible_default_ipv4.address }}"
# data_dir: "{{ hashi_consul_data_dir }}"
# encrypt: "{{ 'mysupersecretgossipencryptionkey'|b64encode }}"
# server: true
# bootstrap_expect: 1
# retry_join:
# - "{{ ansible_default_ipv4.address }}"
# ui_config:
# enabled: true
# connect:
# enabled: false
# leave_on_terminate: true
# rejoin_after_leave: true
# enable_script_checks: true
# enable_syslog: true
# log_level: INFO
# acl:
# enabled: false
# default_policy: "allow"
# enable_token_persistence: true
# tokens:
# initial_management: ""
# agent: ""
# dns_config:
# allow_stale: true
# enable_truncate: true
# only_passing: true
# ports:
# dns: 8600
# http: 8500
# https: -1
# grpc: 8502
# grpc_tls: 8503
# server: 8300
# serf_lan: 8301
# serf_wan: 8302
# sidecar_min_port: 21000
# sidecar_max_port: 21255
# expose_min_port: 21500
# expose_max_port: 21755
# # tls:
# # defaults:
# # ca_file: "{{ hashi_consul_data_dir }}/tls/ca.pem"
# # cert_file: "{{ hashi_consul_data_dir }}/tls/cert.pem"
# # key_file: "{{ hashi_consul_data_dir }}/tls/key.pem"
# # verify_incoming: false
# # verify_outgoing: true
# # grpc: {}
# # https: {}

View File

@ -1,70 +0,0 @@
---
# defaults file for hashicorp_consul
hashi_consul_install: true
hashi_consul_auto_update: false
hashi_consul_start_service: true
hashi_consul_version: latest
hashi_consul_deploy_method: host # deployment method, either host or docker.
hashi_consul_env_variables: {}
hashi_consul_data_dir: "/opt/consul"
hashi_consul_extra_files: false
hashi_consul_extra_files_src: /tmp/extra_files
hashi_consul_extra_files_dst: /etc/consul.d/extra_files
hashi_consul_envoy_install: false
hashi_consul_envoy_version: latest
#! consul configuration
hashi_consul_configuration:
domain: consul
datacenter: dc1
primary_datacenter: dc1
client_addr: "0.0.0.0"
bind_addr: "{{ ansible_default_ipv4.address }}"
advertise_addr: "{{ ansible_default_ipv4.address }}"
data_dir: "{{ hashi_consul_data_dir }}"
encrypt: "{{ 'mysupersecretgossipencryptionkey'|b64encode }}"
server: true
bootstrap_expect: 1
retry_join:
- "{{ ansible_default_ipv4.address }}"
ui_config:
enabled: true
connect:
enabled: false
leave_on_terminate: true
rejoin_after_leave: true
enable_script_checks: true
enable_syslog: true
log_level: INFO
acl:
enabled: false
default_policy: "allow"
enable_token_persistence: true
tokens:
initial_management: ""
agent: ""
dns_config:
allow_stale: true
enable_truncate: true
only_passing: true
ports:
dns: 8600
http: 8500
https: -1
grpc: 8502
grpc_tls: 8503
server: 8300
serf_lan: 8301
serf_wan: 8302
sidecar_min_port: 21000
sidecar_max_port: 21255
expose_min_port: 21500
expose_max_port: 21755
# tls:
# defaults:
# ca_file: "{{ hashi_consul_data_dir }}/tls/ca.pem"
# cert_file: "{{ hashi_consul_data_dir }}/tls/cert.pem"
# key_file: "{{ hashi_consul_data_dir }}/tls/key.pem"
# verify_incoming: false
# verify_outgoing: true
# grpc: {}
# https: {}

View File

@ -1,20 +0,0 @@
---
# handlers file for hashicorp_consul
- name: "Reload systemd file"
ansible.builtin.systemd:
daemon_reload: true
listen: "systemctl-daemon-reload"
- name: "Enable consul service"
ansible.builtin.service:
name: consul
enabled: true
listen: "systemctl-enable-consul"
- name: "Start consul service"
ansible.builtin.service:
name: consul
state: restarted
listen: "systemctl-restart-consul"
throttle: 1
when: hashi_consul_start_service

View File

@ -1,25 +0,0 @@
---
# meta file for hashicorp_consul
galaxy_info:
namespace: 'ednxzu'
role_name: 'hashicorp_consul'
author: 'Bertrand Lanson'
description: 'Install and configure hashicorp consul for debian-based distros.'
license: 'license (BSD, MIT)'
min_ansible_version: '2.10'
platforms:
- name: Ubuntu
versions:
- focal
- jammy
- name: Debian
versions:
- bullseye
- bookworm
galaxy_tags:
- 'ubuntu'
- 'debian'
- 'hashicorp'
- 'consul'
dependencies: []

View File

@ -1,8 +0,0 @@
---
- name: Converge
hosts: all
become: true
tasks:
- name: "Include ednxzu.hashicorp_consul"
ansible.builtin.include_role:
name: "ednxzu.hashicorp_consul"

View File

@ -1,37 +0,0 @@
---
dependency:
name: galaxy
options:
requirements-file: ./requirements.yml
driver:
name: docker
platforms:
- name: instance
image: geerlingguy/docker-${MOLECULE_TEST_OS}-ansible
command: ""
volumes:
- /sys/fs/cgroup:/sys/fs/cgroup
cgroupns_mode: host
privileged: true
pre_build_image: true
provisioner:
name: ansible
config_options:
defaults:
remote_tmp: /tmp/.ansible
verifier:
name: ansible
scenario:
name: default
test_sequence:
- dependency
- cleanup
- destroy
- syntax
- create
- prepare
- converge
- idempotence
- verify
- cleanup
- destroy

View File

@ -1,5 +0,0 @@
---
# requirements file for molecule
roles:
- name: ednxzu.manage_repositories
- name: ednxzu.manage_apt_packages

View File

@ -1,146 +0,0 @@
---
- name: Verify
hosts: all
gather_facts: true
become: true
tasks:
- name: "Test: consul user and group"
block:
- name: "Getent user consul"
ansible.builtin.getent:
database: passwd
key: consul
register: consul_user
- name: "Getent group consul"
ansible.builtin.getent:
database: group
key: consul
register: consul_group
- name: "Verify consul user and group"
ansible.builtin.assert:
that:
- not consul_user.failed
- not consul_group.failed
- "'consul' in consul_user.ansible_facts.getent_passwd.keys()"
- "'/home/consul' in consul_user.ansible_facts.getent_passwd['consul']"
- "'/bin/false' in consul_user.ansible_facts.getent_passwd['consul']"
- "'consul' in consul_group.ansible_facts.getent_group.keys()"
- name: "Test: directory /etc/consul.d"
block:
- name: "Stat directory /etc/consul.d"
ansible.builtin.stat:
path: "/etc/consul.d"
register: stat_etc_consul_d
- name: "Stat file /etc/consul.d/consul.env"
ansible.builtin.stat:
path: "/etc/consul.d/consul.env"
register: stat_etc_consul_d_consul_env
- name: "Stat file /etc/consul.d/consul.json"
ansible.builtin.stat:
path: "/etc/consul.d/consul.json"
register: stat_etc_consul_d_consul_json
- name: "Slurp file /etc/consul.d/consul.json"
ansible.builtin.slurp:
src: "/etc/consul.d/consul.json"
register: slurp_etc_consul_d_consul_json
- name: "Verify directory /etc/consul.d"
ansible.builtin.assert:
that:
- stat_etc_consul_d.stat.exists
- stat_etc_consul_d.stat.isdir
- stat_etc_consul_d.stat.pw_name == 'consul'
- stat_etc_consul_d.stat.gr_name == 'consul'
- stat_etc_consul_d.stat.mode == '0755'
- stat_etc_consul_d_consul_env.stat.exists
- stat_etc_consul_d_consul_env.stat.isreg
- stat_etc_consul_d_consul_env.stat.pw_name == 'consul'
- stat_etc_consul_d_consul_env.stat.gr_name == 'consul'
- stat_etc_consul_d_consul_env.stat.mode == '0600'
- stat_etc_consul_d_consul_json.stat.exists
- stat_etc_consul_d_consul_json.stat.isreg
- stat_etc_consul_d_consul_json.stat.pw_name == 'consul'
- stat_etc_consul_d_consul_json.stat.gr_name == 'consul'
- stat_etc_consul_d_consul_json.stat.mode == '0600'
- slurp_etc_consul_d_consul_json.content != ''
- name: "Test: directory /opt/consul"
block:
- name: "Stat directory /opt/consul"
ansible.builtin.stat:
path: "/opt/consul"
register: stat_opt_consul
- name: "Verify directory /opt/consul"
ansible.builtin.assert:
that:
- stat_opt_consul.stat.exists
- stat_opt_consul.stat.isdir
- stat_opt_consul.stat.pw_name == 'consul'
- stat_opt_consul.stat.gr_name == 'consul'
- stat_opt_consul.stat.mode == '0755'
- name: "Test: service consul"
block:
- name: "Get service consul"
ansible.builtin.service_facts:
- name: "Stat file /etc/systemd/system/consul.service"
ansible.builtin.stat:
path: "/etc/systemd/system/consul.service"
register: stat_etc_systemd_system_consul_service
- name: "Slurp file /etc/systemd/system/consul.service"
ansible.builtin.slurp:
src: "/etc/systemd/system/consul.service"
register: slurp_etc_systemd_system_consul_service
- name: "Verify service consul"
ansible.builtin.assert:
that:
- stat_etc_systemd_system_consul_service.stat.exists
- stat_etc_systemd_system_consul_service.stat.isreg
- stat_etc_systemd_system_consul_service.stat.pw_name == 'root'
- stat_etc_systemd_system_consul_service.stat.gr_name == 'root'
- stat_etc_systemd_system_consul_service.stat.mode == '0644'
- slurp_etc_systemd_system_consul_service.content != ''
- ansible_facts.services['consul.service'] is defined
- ansible_facts.services['consul.service']['source'] == 'systemd'
- ansible_facts.services['consul.service']['state'] == 'running'
- ansible_facts.services['consul.service']['status'] == 'enabled'
- name: "Test: interaction consul"
block:
- name: "Command consul kv put"
ansible.builtin.command: "consul kv put foo bar"
changed_when: false
register: consul_kv_put
- name: "Command consul kv get"
ansible.builtin.command: "consul kv get foo"
changed_when: false
register: consul_kv_get
- name: "Command consul kv delete"
ansible.builtin.command: "consul kv delete foo"
changed_when: false
register: consul_kv_delete
- name: "Command consul members"
ansible.builtin.command: "consul members"
changed_when: false
register: consul_members
- name: "Verify consul interaction"
ansible.builtin.assert:
that:
- "'instance' in consul_members.stdout"
- consul_kv_put.stdout == 'Success! Data written to: foo'
- consul_kv_get.stdout == 'bar'
- consul_kv_delete.stdout == 'Success! Deleted key: foo'

View File

@ -1,8 +0,0 @@
---
- name: Converge
hosts: all
become: true
tasks:
- name: "Include ednxzu.hashicorp_consul"
ansible.builtin.include_role:
name: "ednxzu.hashicorp_consul"

View File

@ -1,35 +0,0 @@
---
dependency:
name: galaxy
options:
requirements-file: ./requirements.yml
driver:
name: vagrant
provider:
name: libvirt
platforms:
- name: instance
box: generic/${MOLECULE_TEST_OS}
cpus: 4
memory: 4096
provisioner:
name: ansible
config_options:
defaults:
remote_tmp: /tmp/.ansible
verifier:
name: ansible
scenario:
name: default_vagrant
test_sequence:
- dependency
- cleanup
- destroy
- syntax
- create
- prepare
- converge
- idempotence
- verify
- cleanup
- destroy

View File

@ -1,5 +0,0 @@
---
# requirements file for molecule
roles:
- name: ednxzu.manage_repositories
- name: ednxzu.manage_apt_packages

View File

@ -1,146 +0,0 @@
---
- name: Verify
hosts: all
gather_facts: true
become: true
tasks:
- name: "Test: consul user and group"
block:
- name: "Getent user consul"
ansible.builtin.getent:
database: passwd
key: consul
register: consul_user
- name: "Getent group consul"
ansible.builtin.getent:
database: group
key: consul
register: consul_group
- name: "Verify consul user and group"
ansible.builtin.assert:
that:
- not consul_user.failed
- not consul_group.failed
- "'consul' in consul_user.ansible_facts.getent_passwd.keys()"
- "'/home/consul' in consul_user.ansible_facts.getent_passwd['consul']"
- "'/bin/false' in consul_user.ansible_facts.getent_passwd['consul']"
- "'consul' in consul_group.ansible_facts.getent_group.keys()"
- name: "Test: directory /etc/consul.d"
block:
- name: "Stat directory /etc/consul.d"
ansible.builtin.stat:
path: "/etc/consul.d"
register: stat_etc_consul_d
- name: "Stat file /etc/consul.d/consul.env"
ansible.builtin.stat:
path: "/etc/consul.d/consul.env"
register: stat_etc_consul_d_consul_env
- name: "Stat file /etc/consul.d/consul.json"
ansible.builtin.stat:
path: "/etc/consul.d/consul.json"
register: stat_etc_consul_d_consul_json
- name: "Slurp file /etc/consul.d/consul.json"
ansible.builtin.slurp:
src: "/etc/consul.d/consul.json"
register: slurp_etc_consul_d_consul_json
- name: "Verify directory /etc/consul.d"
ansible.builtin.assert:
that:
- stat_etc_consul_d.stat.exists
- stat_etc_consul_d.stat.isdir
- stat_etc_consul_d.stat.pw_name == 'consul'
- stat_etc_consul_d.stat.gr_name == 'consul'
- stat_etc_consul_d.stat.mode == '0755'
- stat_etc_consul_d_consul_env.stat.exists
- stat_etc_consul_d_consul_env.stat.isreg
- stat_etc_consul_d_consul_env.stat.pw_name == 'consul'
- stat_etc_consul_d_consul_env.stat.gr_name == 'consul'
- stat_etc_consul_d_consul_env.stat.mode == '0600'
- stat_etc_consul_d_consul_json.stat.exists
- stat_etc_consul_d_consul_json.stat.isreg
- stat_etc_consul_d_consul_json.stat.pw_name == 'consul'
- stat_etc_consul_d_consul_json.stat.gr_name == 'consul'
- stat_etc_consul_d_consul_json.stat.mode == '0600'
- slurp_etc_consul_d_consul_json.content != ''
- name: "Test: directory /opt/consul"
block:
- name: "Stat directory /opt/consul"
ansible.builtin.stat:
path: "/opt/consul"
register: stat_opt_consul
- name: "Verify directory /opt/consul"
ansible.builtin.assert:
that:
- stat_opt_consul.stat.exists
- stat_opt_consul.stat.isdir
- stat_opt_consul.stat.pw_name == 'consul'
- stat_opt_consul.stat.gr_name == 'consul'
- stat_opt_consul.stat.mode == '0755'
- name: "Test: service consul"
block:
- name: "Get service consul"
ansible.builtin.service_facts:
- name: "Stat file /etc/systemd/system/consul.service"
ansible.builtin.stat:
path: "/etc/systemd/system/consul.service"
register: stat_etc_systemd_system_consul_service
- name: "Slurp file /etc/systemd/system/consul.service"
ansible.builtin.slurp:
src: "/etc/systemd/system/consul.service"
register: slurp_etc_systemd_system_consul_service
- name: "Verify service consul"
ansible.builtin.assert:
that:
- stat_etc_systemd_system_consul_service.stat.exists
- stat_etc_systemd_system_consul_service.stat.isreg
- stat_etc_systemd_system_consul_service.stat.pw_name == 'root'
- stat_etc_systemd_system_consul_service.stat.gr_name == 'root'
- stat_etc_systemd_system_consul_service.stat.mode == '0644'
- slurp_etc_systemd_system_consul_service.content != ''
- ansible_facts.services['consul.service'] is defined
- ansible_facts.services['consul.service']['source'] == 'systemd'
- ansible_facts.services['consul.service']['state'] == 'running'
- ansible_facts.services['consul.service']['status'] == 'enabled'
- name: "Test: interaction consul"
block:
- name: "Command consul kv put"
ansible.builtin.command: "consul kv put foo bar"
changed_when: false
register: consul_kv_put
- name: "Command consul kv get"
ansible.builtin.command: "consul kv get foo"
changed_when: false
register: consul_kv_get
- name: "Command consul kv delete"
ansible.builtin.command: "consul kv delete foo"
changed_when: false
register: consul_kv_delete
- name: "Command consul members"
ansible.builtin.command: "consul members"
changed_when: false
register: consul_members
- name: "Verify consul interaction"
ansible.builtin.assert:
that:
- "'instance' in consul_members.stdout"
- consul_kv_put.stdout == 'Success! Data written to: foo'
- consul_kv_get.stdout == 'bar'
- consul_kv_delete.stdout == 'Success! Deleted key: foo'

View File

@ -1,8 +0,0 @@
---
- name: Converge
hosts: all
become: true
tasks:
- name: "Include ednxzu.hashicorp_consul"
ansible.builtin.include_role:
name: "ednxzu.hashicorp_consul"

View File

@ -1,69 +0,0 @@
---
hashi_consul_install: true
hashi_consul_auto_update: true
hashi_consul_start_service: true
hashi_consul_version: latest
hashi_consul_deploy_method: host # deployment method, either host or docker.
hashi_consul_env_variables: {}
hashi_consul_data_dir: "/opt/consul"
hashi_consul_extra_files: false
hashi_consul_extra_files_src: /tmp/extra_files
hashi_consul_extra_files_dst: /etc/consul.d/extra_files
hashi_consul_envoy_install: true
hashi_consul_envoy_version: v1.26.3
#! consul configuration
hashi_consul_configuration:
domain: consul
datacenter: dc1
primary_datacenter: dc1
client_addr: "0.0.0.0"
bind_addr: "{{ ansible_default_ipv4.address }}"
advertise_addr: "{{ ansible_default_ipv4.address }}"
data_dir: "{{ hashi_consul_data_dir }}"
encrypt: "{{ 'mysupersecretgossipencryptionkey'|b64encode }}"
server: true
bootstrap_expect: 1
retry_join:
- "{{ ansible_default_ipv4.address }}"
ui_config:
enabled: true
connect:
enabled: false
leave_on_terminate: true
rejoin_after_leave: true
enable_script_checks: true
enable_syslog: true
log_level: INFO
acl:
enabled: true
default_policy: "deny"
enable_token_persistence: true
tokens:
initial_management: "1a1f2ce5-3730-47de-9a9c-89e037376bab"
agent: "1a1f2ce5-3730-47de-9a9c-89e037376bab"
dns_config:
allow_stale: true
enable_truncate: true
only_passing: true
ports:
dns: 8600
http: 8500
https: -1
grpc: 8502
grpc_tls: 8503
server: 8300
serf_lan: 8301
serf_wan: 8302
sidecar_min_port: 21000
sidecar_max_port: 21255
expose_min_port: 21500
expose_max_port: 21755
# tls:
# defaults:
# ca_file: "{{ hashi_consul_data_dir }}/tls/ca.pem"
# cert_file: "{{ hashi_consul_data_dir }}/tls/cert.pem"
# key_file: "{{ hashi_consul_data_dir }}/tls/key.pem"
# verify_incoming: false
# verify_outgoing: true
# grpc: {}
# https: {}

View File

@ -1,37 +0,0 @@
---
dependency:
name: galaxy
options:
requirements-file: ./requirements.yml
driver:
name: docker
platforms:
- name: instance
image: geerlingguy/docker-${MOLECULE_TEST_OS}-ansible
command: ""
volumes:
- /sys/fs/cgroup:/sys/fs/cgroup
cgroupns_mode: host
privileged: true
pre_build_image: true
provisioner:
name: ansible
config_options:
defaults:
remote_tmp: /tmp/.ansible
verifier:
name: ansible
scenario:
name: with_acl_enabled
test_sequence:
- dependency
- cleanup
- destroy
- syntax
- create
- prepare
- converge
- idempotence
- verify
- cleanup
- destroy

View File

@ -1,5 +0,0 @@
---
# requirements file for molecule
roles:
- name: ednxzu.manage_repositories
- name: ednxzu.manage_apt_packages

View File

@ -1,156 +0,0 @@
---
- name: Verify
hosts: all
gather_facts: true
become: true
tasks:
- name: "Test: consul user and group"
block:
- name: "Getent user consul"
ansible.builtin.getent:
database: passwd
key: consul
register: consul_user
- name: "Getent group consul"
ansible.builtin.getent:
database: group
key: consul
register: consul_group
- name: "Verify consul user and group"
ansible.builtin.assert:
that:
- not consul_user.failed
- not consul_group.failed
- "'consul' in consul_user.ansible_facts.getent_passwd.keys()"
- "'/home/consul' in consul_user.ansible_facts.getent_passwd['consul']"
- "'/bin/false' in consul_user.ansible_facts.getent_passwd['consul']"
- "'consul' in consul_group.ansible_facts.getent_group.keys()"
- name: "Test: directory /etc/consul.d"
block:
- name: "Stat directory /etc/consul.d"
ansible.builtin.stat:
path: "/etc/consul.d"
register: stat_etc_consul_d
- name: "Stat file /etc/consul.d/consul.env"
ansible.builtin.stat:
path: "/etc/consul.d/consul.env"
register: stat_etc_consul_d_consul_env
- name: "Stat file /etc/consul.d/consul.json"
ansible.builtin.stat:
path: "/etc/consul.d/consul.json"
register: stat_etc_consul_d_consul_json
- name: "Slurp file /etc/consul.d/consul.json"
ansible.builtin.slurp:
src: "/etc/consul.d/consul.json"
register: slurp_etc_consul_d_consul_json
- name: "Verify directory /etc/consul.d"
ansible.builtin.assert:
that:
- stat_etc_consul_d.stat.exists
- stat_etc_consul_d.stat.isdir
- stat_etc_consul_d.stat.pw_name == 'consul'
- stat_etc_consul_d.stat.gr_name == 'consul'
- stat_etc_consul_d.stat.mode == '0755'
- stat_etc_consul_d_consul_env.stat.exists
- stat_etc_consul_d_consul_env.stat.isreg
- stat_etc_consul_d_consul_env.stat.pw_name == 'consul'
- stat_etc_consul_d_consul_env.stat.gr_name == 'consul'
- stat_etc_consul_d_consul_env.stat.mode == '0600'
- stat_etc_consul_d_consul_json.stat.exists
- stat_etc_consul_d_consul_json.stat.isreg
- stat_etc_consul_d_consul_json.stat.pw_name == 'consul'
- stat_etc_consul_d_consul_json.stat.gr_name == 'consul'
- stat_etc_consul_d_consul_json.stat.mode == '0600'
- slurp_etc_consul_d_consul_json.content != ''
- name: "Test: directory /opt/consul"
block:
- name: "Stat directory /opt/consul"
ansible.builtin.stat:
path: "/opt/consul"
register: stat_opt_consul
- name: "Verify directory /opt/consul"
ansible.builtin.assert:
that:
- stat_opt_consul.stat.exists
- stat_opt_consul.stat.isdir
- stat_opt_consul.stat.pw_name == 'consul'
- stat_opt_consul.stat.gr_name == 'consul'
- stat_opt_consul.stat.mode == '0755'
- name: "Test: service consul"
block:
- name: "Get service consul"
ansible.builtin.service_facts:
- name: "Stat file /etc/systemd/system/consul.service"
ansible.builtin.stat:
path: "/etc/systemd/system/consul.service"
register: stat_etc_systemd_system_consul_service
- name: "Slurp file /etc/systemd/system/consul.service"
ansible.builtin.slurp:
src: "/etc/systemd/system/consul.service"
register: slurp_etc_systemd_system_consul_service
- name: "Verify service consul"
ansible.builtin.assert:
that:
- stat_etc_systemd_system_consul_service.stat.exists
- stat_etc_systemd_system_consul_service.stat.isreg
- stat_etc_systemd_system_consul_service.stat.pw_name == 'root'
- stat_etc_systemd_system_consul_service.stat.gr_name == 'root'
- stat_etc_systemd_system_consul_service.stat.mode == '0644'
- slurp_etc_systemd_system_consul_service.content != ''
- ansible_facts.services['consul.service'] is defined
- ansible_facts.services['consul.service']['source'] == 'systemd'
- ansible_facts.services['consul.service']['state'] == 'running'
- ansible_facts.services['consul.service']['status'] == 'enabled'
- name: "Test: interaction consul"
vars:
acl_token: "1a1f2ce5-3730-47de-9a9c-89e037376bab"
block:
- name: "Command consul kv put"
ansible.builtin.command: "consul kv put foo bar"
environment:
CONSUL_HTTP_TOKEN: "{{ acl_token }}"
changed_when: false
register: consul_kv_put
- name: "Command consul kv get"
ansible.builtin.command: "consul kv get foo"
environment:
CONSUL_HTTP_TOKEN: "{{ acl_token }}"
changed_when: false
register: consul_kv_get
- name: "Command consul kv delete"
ansible.builtin.command: "consul kv delete foo"
environment:
CONSUL_HTTP_TOKEN: "{{ acl_token }}"
changed_when: false
register: consul_kv_delete
- name: "Command consul members"
ansible.builtin.command: "consul members"
environment:
CONSUL_HTTP_TOKEN: "{{ acl_token }}"
changed_when: false
register: consul_members
- name: "Verify consul interaction"
ansible.builtin.assert:
that:
- "'instance' in consul_members.stdout"
- consul_kv_put.stdout == 'Success! Data written to: foo'
- consul_kv_get.stdout == 'bar'
- consul_kv_delete.stdout == 'Success! Deleted key: foo'

View File

@ -1,8 +0,0 @@
---
- name: Converge
hosts: all
become: true
tasks:
- name: "Include ednxzu.hashicorp_consul"
ansible.builtin.include_role:
name: "ednxzu.hashicorp_consul"

View File

@ -1,69 +0,0 @@
---
hashi_consul_install: true
hashi_consul_auto_update: true
hashi_consul_start_service: true
hashi_consul_version: latest
hashi_consul_deploy_method: host # deployment method, either host or docker.
hashi_consul_env_variables: {}
hashi_consul_data_dir: "/opt/consul"
hashi_consul_extra_files: false
hashi_consul_extra_files_src: /tmp/extra_files
hashi_consul_extra_files_dst: /etc/consul.d/extra_files
hashi_consul_envoy_install: true
hashi_consul_envoy_version: v1.26.3
#! consul configuration
hashi_consul_configuration:
domain: consul
datacenter: dc1
primary_datacenter: dc1
client_addr: "0.0.0.0"
bind_addr: "{{ ansible_default_ipv4.address }}"
advertise_addr: "{{ ansible_default_ipv4.address }}"
data_dir: "{{ hashi_consul_data_dir }}"
encrypt: "{{ 'mysupersecretgossipencryptionkey'|b64encode }}"
server: true
bootstrap_expect: 1
retry_join:
- "{{ ansible_default_ipv4.address }}"
ui_config:
enabled: true
connect:
enabled: false
leave_on_terminate: true
rejoin_after_leave: true
enable_script_checks: true
enable_syslog: true
log_level: INFO
acl:
enabled: true
default_policy: "deny"
enable_token_persistence: true
tokens:
initial_management: "1a1f2ce5-3730-47de-9a9c-89e037376bab"
agent: "1a1f2ce5-3730-47de-9a9c-89e037376bab"
dns_config:
allow_stale: true
enable_truncate: true
only_passing: true
ports:
dns: 8600
http: 8500
https: -1
grpc: 8502
grpc_tls: 8503
server: 8300
serf_lan: 8301
serf_wan: 8302
sidecar_min_port: 21000
sidecar_max_port: 21255
expose_min_port: 21500
expose_max_port: 21755
# tls:
# defaults:
# ca_file: "{{ hashi_consul_data_dir }}/tls/ca.pem"
# cert_file: "{{ hashi_consul_data_dir }}/tls/cert.pem"
# key_file: "{{ hashi_consul_data_dir }}/tls/key.pem"
# verify_incoming: false
# verify_outgoing: true
# grpc: {}
# https: {}

View File

@ -1,35 +0,0 @@
---
dependency:
name: galaxy
options:
requirements-file: ./requirements.yml
driver:
name: vagrant
provider:
name: libvirt
platforms:
- name: instance
box: generic/${MOLECULE_TEST_OS}
cpus: 4
memory: 4096
provisioner:
name: ansible
config_options:
defaults:
remote_tmp: /tmp/.ansible
verifier:
name: ansible
scenario:
name: with_acl_enabled_vagrant
test_sequence:
- dependency
- cleanup
- destroy
- syntax
- create
- prepare
- converge
- idempotence
- verify
- cleanup
- destroy

View File

@ -1,5 +0,0 @@
---
# requirements file for molecule
roles:
- name: ednxzu.manage_repositories
- name: ednxzu.manage_apt_packages

View File

@ -1,156 +0,0 @@
---
- name: Verify
hosts: all
gather_facts: true
become: true
tasks:
- name: "Test: consul user and group"
block:
- name: "Getent user consul"
ansible.builtin.getent:
database: passwd
key: consul
register: consul_user
- name: "Getent group consul"
ansible.builtin.getent:
database: group
key: consul
register: consul_group
- name: "Verify consul user and group"
ansible.builtin.assert:
that:
- not consul_user.failed
- not consul_group.failed
- "'consul' in consul_user.ansible_facts.getent_passwd.keys()"
- "'/home/consul' in consul_user.ansible_facts.getent_passwd['consul']"
- "'/bin/false' in consul_user.ansible_facts.getent_passwd['consul']"
- "'consul' in consul_group.ansible_facts.getent_group.keys()"
- name: "Test: directory /etc/consul.d"
block:
- name: "Stat directory /etc/consul.d"
ansible.builtin.stat:
path: "/etc/consul.d"
register: stat_etc_consul_d
- name: "Stat file /etc/consul.d/consul.env"
ansible.builtin.stat:
path: "/etc/consul.d/consul.env"
register: stat_etc_consul_d_consul_env
- name: "Stat file /etc/consul.d/consul.json"
ansible.builtin.stat:
path: "/etc/consul.d/consul.json"
register: stat_etc_consul_d_consul_json
- name: "Slurp file /etc/consul.d/consul.json"
ansible.builtin.slurp:
src: "/etc/consul.d/consul.json"
register: slurp_etc_consul_d_consul_json
- name: "Verify directory /etc/consul.d"
ansible.builtin.assert:
that:
- stat_etc_consul_d.stat.exists
- stat_etc_consul_d.stat.isdir
- stat_etc_consul_d.stat.pw_name == 'consul'
- stat_etc_consul_d.stat.gr_name == 'consul'
- stat_etc_consul_d.stat.mode == '0755'
- stat_etc_consul_d_consul_env.stat.exists
- stat_etc_consul_d_consul_env.stat.isreg
- stat_etc_consul_d_consul_env.stat.pw_name == 'consul'
- stat_etc_consul_d_consul_env.stat.gr_name == 'consul'
- stat_etc_consul_d_consul_env.stat.mode == '0600'
- stat_etc_consul_d_consul_json.stat.exists
- stat_etc_consul_d_consul_json.stat.isreg
- stat_etc_consul_d_consul_json.stat.pw_name == 'consul'
- stat_etc_consul_d_consul_json.stat.gr_name == 'consul'
- stat_etc_consul_d_consul_json.stat.mode == '0600'
- slurp_etc_consul_d_consul_json.content != ''
- name: "Test: directory /opt/consul"
block:
- name: "Stat directory /opt/consul"
ansible.builtin.stat:
path: "/opt/consul"
register: stat_opt_consul
- name: "Verify directory /opt/consul"
ansible.builtin.assert:
that:
- stat_opt_consul.stat.exists
- stat_opt_consul.stat.isdir
- stat_opt_consul.stat.pw_name == 'consul'
- stat_opt_consul.stat.gr_name == 'consul'
- stat_opt_consul.stat.mode == '0755'
- name: "Test: service consul"
block:
- name: "Get service consul"
ansible.builtin.service_facts:
- name: "Stat file /etc/systemd/system/consul.service"
ansible.builtin.stat:
path: "/etc/systemd/system/consul.service"
register: stat_etc_systemd_system_consul_service
- name: "Slurp file /etc/systemd/system/consul.service"
ansible.builtin.slurp:
src: "/etc/systemd/system/consul.service"
register: slurp_etc_systemd_system_consul_service
- name: "Verify service consul"
ansible.builtin.assert:
that:
- stat_etc_systemd_system_consul_service.stat.exists
- stat_etc_systemd_system_consul_service.stat.isreg
- stat_etc_systemd_system_consul_service.stat.pw_name == 'root'
- stat_etc_systemd_system_consul_service.stat.gr_name == 'root'
- stat_etc_systemd_system_consul_service.stat.mode == '0644'
- slurp_etc_systemd_system_consul_service.content != ''
- ansible_facts.services['consul.service'] is defined
- ansible_facts.services['consul.service']['source'] == 'systemd'
- ansible_facts.services['consul.service']['state'] == 'running'
- ansible_facts.services['consul.service']['status'] == 'enabled'
- name: "Test: interaction consul"
vars:
acl_token: "1a1f2ce5-3730-47de-9a9c-89e037376bab"
block:
- name: "Command consul kv put"
ansible.builtin.command: "consul kv put foo bar"
environment:
CONSUL_HTTP_TOKEN: "{{ acl_token }}"
changed_when: false
register: consul_kv_put
- name: "Command consul kv get"
ansible.builtin.command: "consul kv get foo"
environment:
CONSUL_HTTP_TOKEN: "{{ acl_token }}"
changed_when: false
register: consul_kv_get
- name: "Command consul kv delete"
ansible.builtin.command: "consul kv delete foo"
environment:
CONSUL_HTTP_TOKEN: "{{ acl_token }}"
changed_when: false
register: consul_kv_delete
- name: "Command consul members"
ansible.builtin.command: "consul members"
environment:
CONSUL_HTTP_TOKEN: "{{ acl_token }}"
changed_when: false
register: consul_members
- name: "Verify consul interaction"
ansible.builtin.assert:
that:
- "'instance' in consul_members.stdout"
- consul_kv_put.stdout == 'Success! Data written to: foo'
- consul_kv_get.stdout == 'bar'
- consul_kv_delete.stdout == 'Success! Deleted key: foo'

View File

@ -1,46 +0,0 @@
---
# task/configure file for hashicorp_consul
- name: "Ensure default consul.hcl is removed"
ansible.builtin.file:
path: /etc/consul.d/consul.hcl
state: absent
- name: "Copy consul.json template"
ansible.builtin.template:
src: consul.json.j2
dest: "{{ hashi_consul_config_dir }}/consul.json"
owner: "{{ hashi_consul_user }}"
group: "{{ hashi_consul_group }}"
mode: '0600'
notify:
- "systemctl-enable-consul"
- "systemctl-restart-consul"
- name: "Create consul.env"
ansible.builtin.template:
src: consul.env.j2
dest: "{{ hashi_consul_config_dir }}/consul.env"
owner: "{{ hashi_consul_user }}"
group: "{{ hashi_consul_group }}"
mode: '0600'
- name: "Copy extra configuration files"
when: hashi_consul_extra_files
block:
- name: "Create directory {{ hashi_consul_extra_files_dst }}"
ansible.builtin.file:
path: "{{ hashi_consul_extra_files_dst }}"
state: directory
owner: "{{ hashi_consul_user }}"
group: "{{ hashi_consul_group }}"
mode: '0755'
- name: "Copy extra configuration files"
ansible.builtin.template:
src: "{{ item }}"
dest: "{{ hashi_consul_extra_files_dst }}/{{ (item | basename).split('.')[:-1] | join('.')}}"
owner: "{{ hashi_consul_user }}"
group: "{{ hashi_consul_group }}"
mode: '0600'
with_fileglob:
- "{{ hashi_consul_extra_files_src }}/*"

View File

@ -1,25 +0,0 @@
---
# task/install file for hashicorp_consul
- name: "Configure hashicorp repository"
ansible.builtin.include_role:
name: ednxzu.manage_repositories
vars:
manage_repositories_enable_default_repo: false
manage_repositories_enable_custom_repo: true
manage_repositories_custom_repo: "{{ hashi_consul_repository }}"
- name: "Install consul:{{ hashi_consul_version }}"
ansible.builtin.include_role:
name: ednxzu.manage_apt_packages
vars:
manage_apt_packages_list: "{{ hashi_consul_packages }}"
- name: "Copy systemd service file for consul"
ansible.builtin.template:
src: "consul.service.j2"
dest: "/etc/systemd/system/consul.service"
owner: root
group: root
mode: '0644'
notify:
- "systemctl-daemon-reload"

View File

@ -1,66 +0,0 @@
---
# task/install_envoy file for hashicorp_consul
- name: "Get release for envoy:{{ hashi_consul_envoy_version }}"
vars:
_envoy_url_ext: "{% if hashi_consul_envoy_version == 'latest'%}releases{% else %}releases/tags{% endif %}"
ansible.builtin.uri:
url: "{{ hashi_consul_envoy_github_api }}/{{ _envoy_url_ext }}/{{ hashi_consul_envoy_version }}"
return_content: true
register: _envoy_new_release
- name: "Check if envoy is already installed"
ansible.builtin.stat:
path: "{{ hashi_consul_data_dir }}/envoy/version"
changed_when: false
check_mode: false
register: _envoy_is_installed
- name: "Check current envoy version"
ansible.builtin.command: "cat {{ hashi_consul_data_dir }}/envoy/version"
changed_when: false
check_mode: false
register: _envoy_old_release
when: _envoy_is_installed.stat.exists
- name: "Set facts for wanted envoy release"
ansible.builtin.set_fact:
hashi_consul_envoy_wanted_version: "{{ _envoy_new_release.json['tag_name']|regex_replace('v', '') }}"
when: _envoy_new_release.json is defined
and (_envoy_new_release.json | length > 0)
- name: "Set facts for current envoy release"
ansible.builtin.set_fact:
hashi_consul_envoy_current_version: "{{ _envoy_old_release.stdout | regex_replace('v', '') }}"
when: _envoy_old_release.stdout is defined
and (_envoy_old_release.stdout | length > 0)
- name: "Create envoy directory"
ansible.builtin.file:
path: "{{ hashi_consul_data_dir }}/envoy"
state: directory
mode: "0775"
- name: "Install envoy"
when: hashi_consul_envoy_current_version is not defined
or hashi_consul_envoy_wanted_version not in hashi_consul_envoy_current_version
block:
- name: "Remove old compose binary if different"
ansible.builtin.file:
path: "{{ hashi_consul_envoy_path }}"
state: absent
register: _envoy_binary_removed
- name: "Download and install envoy version:{{ hashi_consul_envoy_version }}"
ansible.builtin.get_url:
url: "{{ hashi_consul_envoy_github_url }}/releases/download/v{{ hashi_consul_envoy_wanted_version }}/envoy-{{ hashi_consul_envoy_wanted_version }}-linux-{{ hashi_consul_envoy_arch }} "
dest: "{{ hashi_consul_envoy_path }}"
owner: root
group: root
mode: '0755'
- name: "Update version file"
ansible.builtin.copy:
content: "{{ hashi_consul_envoy_wanted_version }}"
dest: "{{ hashi_consul_data_dir }}/envoy/version"
mode: "0600"

View File

@ -1,14 +0,0 @@
---
# task/main file for hashicorp_consul
- name: "Import prerequisites.yml"
ansible.builtin.include_tasks: prerequisites.yml
- name: "Import install.yml"
ansible.builtin.include_tasks: install.yml
- name: "Import install_envoy.yml"
ansible.builtin.include_tasks: install_envoy.yml
when: hashi_consul_envoy_install
- name: "Import configure.yml"
ansible.builtin.include_tasks: configure.yml

View File

@ -1,29 +0,0 @@
---
# task/prerequisites file for hashicorp_consul
- name: "Create group {{ hashi_consul_group }}"
ansible.builtin.group:
name: "{{ hashi_consul_group }}"
state: present
- name: "Create user {{ hashi_consul_user }}"
ansible.builtin.user:
name: "{{ hashi_consul_user }}"
group: "{{ hashi_consul_group }}"
shell: /bin/false
state: present
- name: "Create directory {{ hashi_consul_config_dir }}"
ansible.builtin.file:
path: "{{ hashi_consul_config_dir }}"
state: directory
owner: "{{ hashi_consul_user }}"
group: "{{ hashi_consul_group }}"
mode: '0755'
- name: "Create directory {{ hashi_consul_data_dir}}"
ansible.builtin.file:
path: "{{ hashi_consul_data_dir }}"
state: directory
owner: "{{ hashi_consul_user }}"
group: "{{ hashi_consul_group }}"
mode: '0755'

View File

@ -1,4 +0,0 @@
# {{ ansible_managed }}
{% for item in hashi_consul_env_variables %}
{{ item|upper }}="{{ hashi_consul_env_variables[item] }}"
{% endfor %}

View File

@ -1 +0,0 @@
{{ hashi_consul_configuration|to_nice_json }}

View File

@ -1,20 +0,0 @@
[Unit]
Description=Consul
Documentation=https://developer.hashicorp.com/consul/docs
Requires=network-online.target
After=network-online.target
ConditionFileNotEmpty={{ hashi_consul_config_dir }}/consul.json
[Service]
EnvironmentFile=-{{ hashi_consul_config_dir }}/consul.env
User={{ hashi_consul_user }}
Group={{ hashi_consul_group }}
ExecStart=/usr/bin/consul agent -config-dir={{ hashi_consul_config_dir }}
ExecReload=/bin/kill --signal HUP $MAINPID
KillMode=process
KillSignal=SIGTERM
Restart=on-failure
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target

View File

@ -1,28 +0,0 @@
---
# vars file for hashicorp_consul
hashi_consul_user: consul
hashi_consul_group: consul
hashi_consul_config_dir: "/etc/consul.d"
hashi_consul_envoy_github_api: https://api.github.com/repos/envoyproxy/envoy
hashi_consul_envoy_github_url: https://github.com/envoyproxy/envoy
hashi_consul_envoy_path: "/usr/local/bin/envoy"
hashi_consul_envoy_arch_map:
x86_64: 'x86_64'
aarch64: 'aarch64'
hashi_consul_envoy_arch: "{{ hashi_consul_envoy_arch_map[ansible_architecture] | default(ansible_architecture) }}"
hashi_consul_repository:
- name: hashicorp
uri: "https://apt.releases.hashicorp.com"
comments: "hashicorp repository"
types:
- deb
suites:
- "{{ ansible_distribution_release }}"
components:
- main
options:
Signed-By: "https://apt.releases.hashicorp.com/gpg"
hashi_consul_packages:
- name: consul
version: "{{ hashi_consul_version }}"
state: "{% if hashi_consul_auto_update %}latest{% else %}present{% endif %}"

View File

@ -1,20 +0,0 @@
The MIT License (MIT)
Copyright (c) 2017 Bertrand Lanson
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

View File

@ -1,107 +0,0 @@
hashicorp_nomad
=========
> This repository is only a mirror. Development and testing is done on a private gitea server.
This role install and configure nomad on **debian-based** distributions.
Requirements
------------
None.
Role Variables
--------------
Available variables are listed below, along with default values. A sample file for the default values is available in `default/hashicorp_nomad.yml.sample` in case you need it for any `group_vars` or `host_vars` configuration.
```yaml
hashi_nomad_install: true # by default, set to true
```
This variable defines if the nomad package is to be installed or not before configuring. If you install nomad using another task, you can set this to `false`.
```yaml
hashi_nomad_auto_update: false # by default, set to false
```
This variable allows you to choose to automatically update nomad if a newer version is available. Updating nomad is usually pretty safe if done on a regular basis, but for better control over the upgrade process, see `hashi_nomad_version`.
```yaml
hashi_nomad_cni_plugins_install: true # by default, set to true
```
This variable defines whether or not to install the CNI plugins on the host. Defaults to `true`.
```yaml
hashi_nomad_start_service: true
```
This variable defines if the nomad service should be started once it has been configured. This is usefull in case you're using this role to build golden images, in which case you might want to only enable the service, to have it start on the next boot (when the image is launched)
```yaml
hashi_nomad_cni_plugins_version: latest # by default, set to latest
```
This variable defines the version of the CNI plugins to install.
```yaml
hashi_nomad_cni_plugins_install_path: /opt/cni/bin
```
This variable defines where to install the CNI plugins. Note that it should be referenced in the nomad configuration.
```yaml
hashi_nomad_version: latest # by default, set to latest
```
This variable specifies the version of nomad to install when `hashi_nomad_install` is set to `true`. The version to specify is the version of the package on the hashicorp repository (`1.5.1-1` for example). This can be found by running `apt-cache madison consul` on a machine with the repository installed.
```yaml
hashi_nomad_deploy_method: host # by default, set to host
```
This variable defines the method of deployment of nomad. The `host` method installs the binary directly on the host, and runs nomad as a systemd service. The `docker` method install nomad as a docker container.
> Currently, only the `host` method is available, the `docker` method will be added later.
```yaml
hashi_nomad_env_variables: # by default, set to empty
env_var: value
```
This value is a list of key/value that will populate the `nomad.env` file. You do not have to capitalize the KEYS, as it will be done automatically.
```yaml
hashi_nomad_extra_files: false # by default, set to false
```
This variable defines whether or not there is extra configuration files to copy to the target. If there are, these extra files are expected to be jinja2 templates located all in the same directory, and will be copied to the specified directory on the target machine.
```yaml
hashi_nomad_extra_files_src: /tmp/extra_files # by default, set to /tmp/extra_files
```
This variable defines the source directory (without the trailing /) for the extra files to be copied in case there are some.
```yaml
hashi_nomad_extra_files_dst: /etc/nomad.d/extra_files # by default, set to /etc/nomad.d/extra_files
```
This variable defines the destination directory (without the trailing /) for the extra files to be copied.
```yaml
hashi_nomad_configuration: {} # by default, set to a simple configuration
```
This variable sets all of the configuration parameters for nomad. For more information on all of them, please check the [documentation](https://developer.hashicorp.com/nomad/docs/configuration). This variable is parsed and converted to json format to create the config file, so each key and value should be set according to the documentation. This method of passing configuration allows for compatibility with every configuration parameters that nomad has to offer. The defaults are simply here to deploy a simple, single-node nomad server without much configuration, and should NOT be used in production. You will want to edit this to deploy production-ready clusters.
Dependencies
------------
`ednxzu.manage_repositories` to configure the hashicorp apt repository.
`ednxzu.manage_apt_packages` to install nomad.
Example Playbook
----------------
```yaml
# calling the role inside a playbook with either the default or group_vars/host_vars
- hosts: servers
roles:
- ednxzu.hashicorp_nomad
```
License
-------
MIT / BSD
Author Information
------------------
This role was created by Bertrand Lanson in 2023.

View File

@ -1,86 +0,0 @@
---
# hashi_nomad_install: true
# hashi_nomad_auto_update: false
# hashi_nomad_cni_plugins_install: true
# hashi_nomad_start_service: true
# hashi_nomad_cni_plugins_version: latest
# hashi_nomad_cni_plugins_install_path: "/opt/cni/bin"
# hashi_nomad_version: latest
# hashi_nomad_deploy_method: host # deployment method, either host or docker
# hashi_nomad_env_variables: {}
# hashi_nomad_data_dir: /opt/nomad
# hashi_nomad_extra_files: false
# hashi_nomad_extra_files_src: /tmp/extra_files
# hashi_nomad_extra_files_dst: /etc/nomad.d/extra_files
# #! nomad configuration
# hashi_nomad_configuration:
# bind_addr: "0.0.0.0"
# datacenter: dc1
# log_level: INFO
# leave_on_terminate: false
# data_dir: "{{ hashi_nomad_data_dir }}"
# advertise:
# http: "{{ ansible_default_ipv4.address }}"
# rpc: "{{ ansible_default_ipv4.address }}"
# serf: "{{ ansible_default_ipv4.address }}"
# server:
# enabled: true
# bootstrap_expect: 1
# server_join:
# retry_join:
# - "{{ ansible_default_ipv4.address }}"
# client:
# enabled: false
# node_class: default
# reserved:
# cpu: 500
# memory: 300
# cni_path: "{{ hashi_nomad_cni_plugins_install_path }}"
# bridge_network_name: nomad
# bridge_network_subnet: "172.26.64.0/20"
# ui:
# enabled: true
# acl:
# enabled: false
# token_ttl: 30s
# policy_ttl: 30s
# role_ttl: 30s
# token_min_expiration_ttl: 30s
# token_max_expiration_ttl: 24h
# telemetry:
# collection_interval: 1s
# disable_hostname: false
# use_node_name: false
# publish_allocation_metrics: false
# publish_node_metrics: false
# prefix_filter: []
# disable_dispatched_job_summary_metrics: false
# prometheus_metrics: false
# # tls:
# # http: false
# # rpc: false
# # ca_file: "{{ hashi_nomad_data_dir }}/tls/ca.pem"
# # cert_file: "{{ hashi_nomad_data_dir }}/tls/cert.pem"
# # key_file: "{{ hashi_nomad_data_dir }}/tls/key.pem"
# # plugin:
# # docker:
# # config:
# # endpoint: "unix:///var/run/docker.sock"
# # allow_privileged: false
# # allow_caps: ["all"]
# # volumes:
# # enabled: true
# # consul:
# # address: "127.0.0.1:8500"
# # token: ""
# # auto_advertise: true
# # vault:
# # address: http://vault.service.consul:8200
# # token: ""
# # create_from_role: nomad-cluster
# # plugin:
# # docker:
# # endpoint: "unix:///var/run/docker.sock"
# # allow_privileged: false
# # allow_caps: ["all"]
# # volumes_enabled: true

View File

@ -1,87 +0,0 @@
---
# defaults file for hashicorp_nomad
hashi_nomad_install: true
hashi_nomad_auto_update: false
hashi_nomad_cni_plugins_install: true
hashi_nomad_start_service: true
hashi_nomad_cni_plugins_version: latest
hashi_nomad_cni_plugins_install_path: /opt/cni/bin
hashi_nomad_version: latest
hashi_nomad_deploy_method: host # deployment method, either host or docker
hashi_nomad_env_variables: {}
hashi_nomad_data_dir: /opt/nomad
hashi_nomad_extra_files: false
hashi_nomad_extra_files_src: /tmp/extra_files
hashi_nomad_extra_files_dst: /etc/nomad.d/extra_files
#! nomad configuration
hashi_nomad_configuration:
bind_addr: "0.0.0.0"
datacenter: dc1
log_level: INFO
leave_on_terminate: false
data_dir: "{{ hashi_nomad_data_dir }}"
advertise:
http: "{{ ansible_default_ipv4.address }}"
rpc: "{{ ansible_default_ipv4.address }}"
serf: "{{ ansible_default_ipv4.address }}"
server:
enabled: true
bootstrap_expect: 1
server_join:
retry_join:
- "{{ ansible_default_ipv4.address }}"
client:
enabled: false
node_class: default
reserved:
cpu: 500
memory: 300
cni_path: "{{ hashi_nomad_cni_plugins_install_path }}"
bridge_network_name: nomad
bridge_network_subnet: "172.26.64.0/20"
ui:
enabled: true
acl:
enabled: false
token_ttl: 30s
policy_ttl: 30s
role_ttl: 30s
token_min_expiration_ttl: 30s
token_max_expiration_ttl: 24h
telemetry:
collection_interval: 1s
disable_hostname: false
use_node_name: false
publish_allocation_metrics: false
publish_node_metrics: false
prefix_filter: []
disable_dispatched_job_summary_metrics: false
prometheus_metrics: false
# tls:
# http: false
# rpc: false
# ca_file: "{{ hashi_nomad_data_dir }}/tls/ca.pem"
# cert_file: "{{ hashi_nomad_data_dir }}/tls/cert.pem"
# key_file: "{{ hashi_nomad_data_dir }}/tls/key.pem"
# plugin:
# docker:
# config:
# endpoint: "unix:///var/run/docker.sock"
# allow_privileged: false
# allow_caps: ["all"]
# volumes:
# enabled: true
# consul:
# address: "127.0.0.1:8500"
# token: ""
# auto_advertise: true
# vault:
# address: http://vault.service.consul:8200
# token: ""
# create_from_role: nomad-cluster
# plugin:
# docker:
# endpoint: "unix:///var/run/docker.sock"
# allow_privileged: false
# allow_caps: ["all"]
# volumes_enabled: true

View File

@ -1,20 +0,0 @@
---
# handlers file for hashicorp_nomad
- name: "Reload systemd file"
ansible.builtin.systemd:
daemon_reload: true
listen: "systemctl-daemon-reload"
- name: "Enable nomad service"
ansible.builtin.service:
name: nomad
enabled: true
listen: "systemctl-enable-nomad"
- name: "Start nomad service"
ansible.builtin.service:
name: nomad
state: restarted
listen: "systemctl-restart-nomad"
throttle: 1
when: hashi_nomad_start_service

View File

@ -1,25 +0,0 @@
---
# meta file for hashicorp_nomad
galaxy_info:
namespace: 'ednxzu'
role_name: 'hashicorp_nomad'
author: 'Bertrand Lanson'
description: 'Install and configure hashicorp nomad for debian-based distros.'
license: 'license (BSD, MIT)'
min_ansible_version: '2.10'
platforms:
- name: Ubuntu
versions:
- focal
- jammy
- name: Debian
versions:
- bullseye
- bookworm
galaxy_tags:
- 'ubuntu'
- 'debian'
- 'hashicorp'
- 'nomad'
dependencies: []

View File

@ -1,8 +0,0 @@
---
- name: Converge
hosts: all
become: true
tasks:
- name: "Include ednxzu.hashicorp_nomad"
ansible.builtin.include_role:
name: "ednxzu.hashicorp_nomad"

View File

@ -1,37 +0,0 @@
---
dependency:
name: galaxy
options:
requirements-file: ./requirements.yml
driver:
name: docker
platforms:
- name: instance
image: geerlingguy/docker-${MOLECULE_TEST_OS}-ansible
command: ""
volumes:
- /sys/fs/cgroup:/sys/fs/cgroup
cgroupns_mode: host
privileged: true
pre_build_image: true
provisioner:
name: ansible
config_options:
defaults:
remote_tmp: /tmp/.ansible
verifier:
name: ansible
scenario:
name: default
test_sequence:
- dependency
- cleanup
- destroy
- syntax
- create
- prepare
- converge
- idempotence
- verify
- cleanup
- destroy

View File

@ -1,5 +0,0 @@
---
# requirements file for molecule
roles:
- name: ednxzu.manage_repositories
- name: ednxzu.manage_apt_packages

View File

@ -1,146 +0,0 @@
---
- name: Verify
hosts: all
gather_facts: true
become: true
tasks:
- name: "Test: nomad user and group"
block:
- name: "Getent user nomad"
ansible.builtin.getent:
database: passwd
key: nomad
register: nomad_user
- name: "Getent group nomad"
ansible.builtin.getent:
database: group
key: nomad
register: nomad_group
- name: "Verify nomad user and group"
ansible.builtin.assert:
that:
- not nomad_user.failed
- not nomad_group.failed
- "'nomad' in nomad_user.ansible_facts.getent_passwd.keys()"
- "'/home/nomad' in nomad_user.ansible_facts.getent_passwd['nomad']"
- "'/bin/false' in nomad_user.ansible_facts.getent_passwd['nomad']"
- "'nomad' in nomad_group.ansible_facts.getent_group.keys()"
- name: "Test: directory /etc/nomad.d"
block:
- name: "Stat directory /etc/nomad.d"
ansible.builtin.stat:
path: "/etc/nomad.d"
register: stat_etc_nomad_d
- name: "Stat file /etc/nomad.d/nomad.env"
ansible.builtin.stat:
path: "/etc/nomad.d/nomad.env"
register: stat_etc_nomad_d_nomad_env
- name: "Stat file /etc/nomad.d/nomad.json"
ansible.builtin.stat:
path: "/etc/nomad.d/nomad.json"
register: stat_etc_nomad_d_nomad_json
- name: "Slurp file /etc/nomad.d/nomad.json"
ansible.builtin.slurp:
src: "/etc/nomad.d/nomad.json"
register: slurp_etc_nomad_d_nomad_json
- name: "Verify directory /etc/nomad.d"
ansible.builtin.assert:
that:
- stat_etc_nomad_d.stat.exists
- stat_etc_nomad_d.stat.isdir
- stat_etc_nomad_d.stat.pw_name == 'nomad'
- stat_etc_nomad_d.stat.gr_name == 'nomad'
- stat_etc_nomad_d.stat.mode == '0755'
- stat_etc_nomad_d_nomad_env.stat.exists
- stat_etc_nomad_d_nomad_env.stat.isreg
- stat_etc_nomad_d_nomad_env.stat.pw_name == 'nomad'
- stat_etc_nomad_d_nomad_env.stat.gr_name == 'nomad'
- stat_etc_nomad_d_nomad_env.stat.mode == '0600'
- stat_etc_nomad_d_nomad_json.stat.exists
- stat_etc_nomad_d_nomad_json.stat.isreg
- stat_etc_nomad_d_nomad_json.stat.pw_name == 'nomad'
- stat_etc_nomad_d_nomad_json.stat.gr_name == 'nomad'
- stat_etc_nomad_d_nomad_json.stat.mode == '0600'
- slurp_etc_nomad_d_nomad_json.content != ''
- name: "Test: directory /opt/nomad"
block:
- name: "Stat directory /opt/nomad"
ansible.builtin.stat:
path: "/opt/nomad"
register: stat_opt_nomad
- name: "Verify directory /opt/nomad"
ansible.builtin.assert:
that:
- stat_opt_nomad.stat.exists
- stat_opt_nomad.stat.isdir
- stat_opt_nomad.stat.pw_name == 'nomad'
- stat_opt_nomad.stat.gr_name == 'nomad'
- stat_opt_nomad.stat.mode == '0755'
- name: "Test: service nomad"
block:
- name: "Get service nomad"
ansible.builtin.service_facts:
- name: "Stat file /etc/systemd/system/nomad.service"
ansible.builtin.stat:
path: "/etc/systemd/system/nomad.service"
register: stat_etc_systemd_system_nomad_service
- name: "Slurp file /etc/systemd/system/nomad.service"
ansible.builtin.slurp:
src: "/etc/systemd/system/nomad.service"
register: slurp_etc_systemd_system_nomad_service
- name: "Verify service nomad"
ansible.builtin.assert:
that:
- stat_etc_systemd_system_nomad_service.stat.exists
- stat_etc_systemd_system_nomad_service.stat.isreg
- stat_etc_systemd_system_nomad_service.stat.pw_name == 'root'
- stat_etc_systemd_system_nomad_service.stat.gr_name == 'root'
- stat_etc_systemd_system_nomad_service.stat.mode == '0644'
- slurp_etc_systemd_system_nomad_service.content != ''
- ansible_facts.services['nomad.service'] is defined
- ansible_facts.services['nomad.service']['source'] == 'systemd'
- ansible_facts.services['nomad.service']['state'] == 'running'
- ansible_facts.services['nomad.service']['status'] == 'enabled'
- name: "Test: interaction nomad"
block:
- name: "Command nomad var put"
ansible.builtin.command: "nomad var put secret/foobar foo=bar"
changed_when: false
register: nomad_var_put
- name: "Command nomad var get"
ansible.builtin.command: "nomad var get secret/foobar"
changed_when: false
register: nomad_var_get
- name: "Command nomad var purge"
ansible.builtin.command: "nomad var purge secret/foobar"
changed_when: false
register: nomad_var_purge
- name: "Command nomad server members"
ansible.builtin.command: "nomad server members"
changed_when: false
register: nomad_server_members
- name: "Verify nomad interaction"
ansible.builtin.assert:
that:
- "'instance.global' in nomad_server_members.stdout"
- "'\"Items\": {\n \"foo\": \"bar\"\n }' in nomad_var_put.stdout"
- "'\"Items\": {\n \"foo\": \"bar\"\n }' in nomad_var_get.stdout"
- nomad_var_purge.stdout == 'Successfully purged variable \"secret/foobar\"!'

View File

@ -1,8 +0,0 @@
---
- name: Converge
hosts: all
become: true
tasks:
- name: "Include ednxzu.hashicorp_nomad"
ansible.builtin.include_role:
name: "ednxzu.hashicorp_nomad"

View File

@ -1,35 +0,0 @@
---
dependency:
name: galaxy
options:
requirements-file: ./requirements.yml
driver:
name: vagrant
provider:
name: libvirt
platforms:
- name: instance
box: generic/${MOLECULE_TEST_OS}
cpus: 4
memory: 4096
provisioner:
name: ansible
config_options:
defaults:
remote_tmp: /tmp/.ansible
verifier:
name: ansible
scenario:
name: default_vagrant
test_sequence:
- dependency
- cleanup
- destroy
- syntax
- create
- prepare
- converge
- idempotence
- verify
- cleanup
- destroy

View File

@ -1,5 +0,0 @@
---
# requirements file for molecule
roles:
- name: ednxzu.manage_repositories
- name: ednxzu.manage_apt_packages

View File

@ -1,146 +0,0 @@
---
- name: Verify
hosts: all
gather_facts: true
become: true
tasks:
- name: "Test: nomad user and group"
block:
- name: "Getent user nomad"
ansible.builtin.getent:
database: passwd
key: nomad
register: nomad_user
- name: "Getent group nomad"
ansible.builtin.getent:
database: group
key: nomad
register: nomad_group
- name: "Verify nomad user and group"
ansible.builtin.assert:
that:
- not nomad_user.failed
- not nomad_group.failed
- "'nomad' in nomad_user.ansible_facts.getent_passwd.keys()"
- "'/home/nomad' in nomad_user.ansible_facts.getent_passwd['nomad']"
- "'/bin/false' in nomad_user.ansible_facts.getent_passwd['nomad']"
- "'nomad' in nomad_group.ansible_facts.getent_group.keys()"
- name: "Test: directory /etc/nomad.d"
block:
- name: "Stat directory /etc/nomad.d"
ansible.builtin.stat:
path: "/etc/nomad.d"
register: stat_etc_nomad_d
- name: "Stat file /etc/nomad.d/nomad.env"
ansible.builtin.stat:
path: "/etc/nomad.d/nomad.env"
register: stat_etc_nomad_d_nomad_env
- name: "Stat file /etc/nomad.d/nomad.json"
ansible.builtin.stat:
path: "/etc/nomad.d/nomad.json"
register: stat_etc_nomad_d_nomad_json
- name: "Slurp file /etc/nomad.d/nomad.json"
ansible.builtin.slurp:
src: "/etc/nomad.d/nomad.json"
register: slurp_etc_nomad_d_nomad_json
- name: "Verify directory /etc/nomad.d"
ansible.builtin.assert:
that:
- stat_etc_nomad_d.stat.exists
- stat_etc_nomad_d.stat.isdir
- stat_etc_nomad_d.stat.pw_name == 'nomad'
- stat_etc_nomad_d.stat.gr_name == 'nomad'
- stat_etc_nomad_d.stat.mode == '0755'
- stat_etc_nomad_d_nomad_env.stat.exists
- stat_etc_nomad_d_nomad_env.stat.isreg
- stat_etc_nomad_d_nomad_env.stat.pw_name == 'nomad'
- stat_etc_nomad_d_nomad_env.stat.gr_name == 'nomad'
- stat_etc_nomad_d_nomad_env.stat.mode == '0600'
- stat_etc_nomad_d_nomad_json.stat.exists
- stat_etc_nomad_d_nomad_json.stat.isreg
- stat_etc_nomad_d_nomad_json.stat.pw_name == 'nomad'
- stat_etc_nomad_d_nomad_json.stat.gr_name == 'nomad'
- stat_etc_nomad_d_nomad_json.stat.mode == '0600'
- slurp_etc_nomad_d_nomad_json.content != ''
- name: "Test: directory /opt/nomad"
block:
- name: "Stat directory /opt/nomad"
ansible.builtin.stat:
path: "/opt/nomad"
register: stat_opt_nomad
- name: "Verify directory /opt/nomad"
ansible.builtin.assert:
that:
- stat_opt_nomad.stat.exists
- stat_opt_nomad.stat.isdir
- stat_opt_nomad.stat.pw_name == 'nomad'
- stat_opt_nomad.stat.gr_name == 'nomad'
- stat_opt_nomad.stat.mode == '0755'
- name: "Test: service nomad"
block:
- name: "Get service nomad"
ansible.builtin.service_facts:
- name: "Stat file /etc/systemd/system/nomad.service"
ansible.builtin.stat:
path: "/etc/systemd/system/nomad.service"
register: stat_etc_systemd_system_nomad_service
- name: "Slurp file /etc/systemd/system/nomad.service"
ansible.builtin.slurp:
src: "/etc/systemd/system/nomad.service"
register: slurp_etc_systemd_system_nomad_service
- name: "Verify service nomad"
ansible.builtin.assert:
that:
- stat_etc_systemd_system_nomad_service.stat.exists
- stat_etc_systemd_system_nomad_service.stat.isreg
- stat_etc_systemd_system_nomad_service.stat.pw_name == 'root'
- stat_etc_systemd_system_nomad_service.stat.gr_name == 'root'
- stat_etc_systemd_system_nomad_service.stat.mode == '0644'
- slurp_etc_systemd_system_nomad_service.content != ''
- ansible_facts.services['nomad.service'] is defined
- ansible_facts.services['nomad.service']['source'] == 'systemd'
- ansible_facts.services['nomad.service']['state'] == 'running'
- ansible_facts.services['nomad.service']['status'] == 'enabled'
- name: "Test: interaction nomad"
block:
- name: "Command nomad var put"
ansible.builtin.command: "nomad var put secret/foobar foo=bar"
changed_when: false
register: nomad_var_put
- name: "Command nomad var get"
ansible.builtin.command: "nomad var get secret/foobar"
changed_when: false
register: nomad_var_get
- name: "Command nomad var purge"
ansible.builtin.command: "nomad var purge secret/foobar"
changed_when: false
register: nomad_var_purge
- name: "Command nomad server members"
ansible.builtin.command: "nomad server members"
changed_when: false
register: nomad_server_members
- name: "Verify nomad interaction"
ansible.builtin.assert:
that:
- "'instance.global' in nomad_server_members.stdout"
- "'\"Items\": {\n \"foo\": \"bar\"\n }' in nomad_var_put.stdout"
- "'\"Items\": {\n \"foo\": \"bar\"\n }' in nomad_var_get.stdout"
- nomad_var_purge.stdout == 'Successfully purged variable \"secret/foobar\"!'

View File

@ -1,8 +0,0 @@
---
- name: Converge
hosts: all
become: true
tasks:
- name: "Include ednxzu.hashicorp_nomad"
ansible.builtin.include_role:
name: "ednxzu.hashicorp_nomad"

View File

@ -1,86 +0,0 @@
---
hashi_nomad_install: true
hashi_nomad_auto_update: true
hashi_nomad_cni_plugins_install: true
hashi_nomad_start_service: true
hashi_nomad_cni_plugins_version: latest
hashi_nomad_cni_plugins_install_path: "/opt/cni/bin"
hashi_nomad_version: latest
hashi_nomad_deploy_method: host # deployment method, either host or docker
hashi_nomad_env_variables: {}
hashi_nomad_data_dir: /opt/nomad
hashi_nomad_extra_files: true
hashi_nomad_extra_files_src: /tmp/extra_files
hashi_nomad_extra_files_dst: /etc/nomad.d/extra_files
#! nomad configuration
hashi_nomad_configuration:
bind_addr: "0.0.0.0"
datacenter: dc1
log_level: INFO
leave_on_terminate: false
data_dir: "{{ hashi_nomad_data_dir }}"
advertise:
http: "{{ ansible_default_ipv4.address }}"
rpc: "{{ ansible_default_ipv4.address }}"
serf: "{{ ansible_default_ipv4.address }}"
server:
enabled: true
bootstrap_expect: 1
server_join:
retry_join:
- "{{ ansible_default_ipv4.address }}"
client:
enabled: false
node_class: default
reserved:
cpu: 500
memory: 300
cni_path: "{{ hashi_nomad_cni_plugins_install_path }}"
bridge_network_name: nomad
bridge_network_subnet: "172.26.64.0/20"
ui:
enabled: true
acl:
enabled: true
token_ttl: 30s
policy_ttl: 30s
role_ttl: 30s
token_min_expiration_ttl: 30s
token_max_expiration_ttl: 24h
telemetry:
collection_interval: 1s
disable_hostname: false
use_node_name: false
publish_allocation_metrics: false
publish_node_metrics: false
prefix_filter: []
disable_dispatched_job_summary_metrics: false
prometheus_metrics: false
# tls:
# http: false
# rpc: false
# ca_file: "{{ hashi_nomad_data_dir }}/tls/ca.pem"
# cert_file: "{{ hashi_nomad_data_dir }}/tls/cert.pem"
# key_file: "{{ hashi_nomad_data_dir }}/tls/key.pem"
# plugin:
# docker:
# config:
# endpoint: "unix:///var/run/docker.sock"
# allow_privileged: false
# allow_caps: ["all"]
# volumes:
# enabled: true
# consul:
# address: "127.0.0.1:8500"
# token: ""
# auto_advertise: true
# vault:
# address: http://vault.service.consul:8200
# token: ""
# create_from_role: nomad-cluster
# plugin:
# docker:
# endpoint: "unix:///var/run/docker.sock"
# allow_privileged: false
# allow_caps: ["all"]
# volumes_enabled: true

View File

@ -1,37 +0,0 @@
---
dependency:
name: galaxy
options:
requirements-file: ./requirements.yml
driver:
name: docker
platforms:
- name: instance
image: geerlingguy/docker-${MOLECULE_TEST_OS}-ansible
command: ""
volumes:
- /sys/fs/cgroup:/sys/fs/cgroup
cgroupns_mode: host
privileged: true
pre_build_image: true
provisioner:
name: ansible
config_options:
defaults:
remote_tmp: /tmp/.ansible
verifier:
name: ansible
scenario:
name: with_acl_enabled
test_sequence:
- dependency
- cleanup
- destroy
- syntax
- create
- prepare
- converge
- idempotence
- verify
- cleanup
- destroy

View File

@ -1,5 +0,0 @@
---
# requirements file for molecule
roles:
- name: ednxzu.manage_repositories
- name: ednxzu.manage_apt_packages

View File

@ -1,163 +0,0 @@
---
- name: Verify
hosts: all
gather_facts: true
become: true
tasks:
- name: "Test: nomad user and group"
block:
- name: "Getent user nomad"
ansible.builtin.getent:
database: passwd
key: nomad
register: nomad_user
- name: "Getent group nomad"
ansible.builtin.getent:
database: group
key: nomad
register: nomad_group
- name: "Verify nomad user and group"
ansible.builtin.assert:
that:
- not nomad_user.failed
- not nomad_group.failed
- "'nomad' in nomad_user.ansible_facts.getent_passwd.keys()"
- "'/home/nomad' in nomad_user.ansible_facts.getent_passwd['nomad']"
- "'/bin/false' in nomad_user.ansible_facts.getent_passwd['nomad']"
- "'nomad' in nomad_group.ansible_facts.getent_group.keys()"
- name: "Test: directory /etc/nomad.d"
block:
- name: "Stat directory /etc/nomad.d"
ansible.builtin.stat:
path: "/etc/nomad.d"
register: stat_etc_nomad_d
- name: "Stat file /etc/nomad.d/nomad.env"
ansible.builtin.stat:
path: "/etc/nomad.d/nomad.env"
register: stat_etc_nomad_d_nomad_env
- name: "Stat file /etc/nomad.d/nomad.json"
ansible.builtin.stat:
path: "/etc/nomad.d/nomad.json"
register: stat_etc_nomad_d_nomad_json
- name: "Slurp file /etc/nomad.d/nomad.json"
ansible.builtin.slurp:
src: "/etc/nomad.d/nomad.json"
register: slurp_etc_nomad_d_nomad_json
- name: "Verify directory /etc/nomad.d"
ansible.builtin.assert:
that:
- stat_etc_nomad_d.stat.exists
- stat_etc_nomad_d.stat.isdir
- stat_etc_nomad_d.stat.pw_name == 'nomad'
- stat_etc_nomad_d.stat.gr_name == 'nomad'
- stat_etc_nomad_d.stat.mode == '0755'
- stat_etc_nomad_d_nomad_env.stat.exists
- stat_etc_nomad_d_nomad_env.stat.isreg
- stat_etc_nomad_d_nomad_env.stat.pw_name == 'nomad'
- stat_etc_nomad_d_nomad_env.stat.gr_name == 'nomad'
- stat_etc_nomad_d_nomad_env.stat.mode == '0600'
- stat_etc_nomad_d_nomad_json.stat.exists
- stat_etc_nomad_d_nomad_json.stat.isreg
- stat_etc_nomad_d_nomad_json.stat.pw_name == 'nomad'
- stat_etc_nomad_d_nomad_json.stat.gr_name == 'nomad'
- stat_etc_nomad_d_nomad_json.stat.mode == '0600'
- slurp_etc_nomad_d_nomad_json.content != ''
- name: "Test: directory /opt/nomad"
block:
- name: "Stat directory /opt/nomad"
ansible.builtin.stat:
path: "/opt/nomad"
register: stat_opt_nomad
- name: "Verify directory /opt/nomad"
ansible.builtin.assert:
that:
- stat_opt_nomad.stat.exists
- stat_opt_nomad.stat.isdir
- stat_opt_nomad.stat.pw_name == 'nomad'
- stat_opt_nomad.stat.gr_name == 'nomad'
- stat_opt_nomad.stat.mode == '0755'
- name: "Test: service nomad"
block:
- name: "Get service nomad"
ansible.builtin.service_facts:
- name: "Stat file /etc/systemd/system/nomad.service"
ansible.builtin.stat:
path: "/etc/systemd/system/nomad.service"
register: stat_etc_systemd_system_nomad_service
- name: "Slurp file /etc/systemd/system/nomad.service"
ansible.builtin.slurp:
src: "/etc/systemd/system/nomad.service"
register: slurp_etc_systemd_system_nomad_service
- name: "Verify service nomad"
ansible.builtin.assert:
that:
- stat_etc_systemd_system_nomad_service.stat.exists
- stat_etc_systemd_system_nomad_service.stat.isreg
- stat_etc_systemd_system_nomad_service.stat.pw_name == 'root'
- stat_etc_systemd_system_nomad_service.stat.gr_name == 'root'
- stat_etc_systemd_system_nomad_service.stat.mode == '0644'
- slurp_etc_systemd_system_nomad_service.content != ''
- ansible_facts.services['nomad.service'] is defined
- ansible_facts.services['nomad.service']['source'] == 'systemd'
- ansible_facts.services['nomad.service']['state'] == 'running'
- ansible_facts.services['nomad.service']['status'] == 'enabled'
- name: "Test: bootstrap acl nomad"
block:
- name: "Command nomad acl bootstrap"
ansible.builtin.command: "nomad acl bootstrap -json"
changed_when: false
register: nomad_acl_bootstrap
- name: "Test: interaction nomad"
vars:
acl_token: "{{ nomad_acl_bootstrap.stdout|from_json|json_query('SecretID') }}"
block:
- name: "Command nomad var put"
ansible.builtin.command: "nomad var put secret/foobar foo=bar"
environment:
NOMAD_TOKEN: "{{ acl_token }}"
changed_when: false
register: nomad_var_put
- name: "Command nomad var get"
ansible.builtin.command: "nomad var get secret/foobar"
environment:
NOMAD_TOKEN: "{{ acl_token }}"
changed_when: false
register: nomad_var_get
- name: "Command nomad var purge"
ansible.builtin.command: "nomad var purge secret/foobar"
environment:
NOMAD_TOKEN: "{{ acl_token }}"
changed_when: false
register: nomad_var_purge
- name: "Command nomad server members"
ansible.builtin.command: "nomad server members"
environment:
NOMAD_TOKEN: "{{ acl_token }}"
changed_when: false
register: nomad_server_members
- name: "Verify nomad interaction"
ansible.builtin.assert:
that:
- "'instance.global' in nomad_server_members.stdout"
- "'\"Items\": {\n \"foo\": \"bar\"\n }' in nomad_var_put.stdout"
- "'\"Items\": {\n \"foo\": \"bar\"\n }' in nomad_var_get.stdout"
- nomad_var_purge.stdout == 'Successfully purged variable \"secret/foobar\"!'

View File

@ -1,8 +0,0 @@
---
- name: Converge
hosts: all
become: true
tasks:
- name: "Include ednxzu.hashicorp_nomad"
ansible.builtin.include_role:
name: "ednxzu.hashicorp_nomad"

View File

@ -1,86 +0,0 @@
---
hashi_nomad_install: true
hashi_nomad_auto_update: true
hashi_nomad_cni_plugins_install: true
hashi_nomad_start_service: true
hashi_nomad_cni_plugins_version: latest
hashi_nomad_cni_plugins_install_path: "/opt/cni/bin"
hashi_nomad_version: latest
hashi_nomad_deploy_method: host # deployment method, either host or docker
hashi_nomad_env_variables: {}
hashi_nomad_data_dir: /opt/nomad
hashi_nomad_extra_files: true
hashi_nomad_extra_files_src: /tmp/extra_files
hashi_nomad_extra_files_dst: /etc/nomad.d/extra_files
#! nomad configuration
hashi_nomad_configuration:
bind_addr: "0.0.0.0"
datacenter: dc1
log_level: INFO
leave_on_terminate: false
data_dir: "{{ hashi_nomad_data_dir }}"
advertise:
http: "{{ ansible_default_ipv4.address }}"
rpc: "{{ ansible_default_ipv4.address }}"
serf: "{{ ansible_default_ipv4.address }}"
server:
enabled: true
bootstrap_expect: 1
server_join:
retry_join:
- "{{ ansible_default_ipv4.address }}"
client:
enabled: false
node_class: default
reserved:
cpu: 500
memory: 300
cni_path: "{{ hashi_nomad_cni_plugins_install_path }}"
bridge_network_name: nomad
bridge_network_subnet: "172.26.64.0/20"
ui:
enabled: true
acl:
enabled: true
token_ttl: 30s
policy_ttl: 30s
role_ttl: 30s
token_min_expiration_ttl: 30s
token_max_expiration_ttl: 24h
telemetry:
collection_interval: 1s
disable_hostname: false
use_node_name: false
publish_allocation_metrics: false
publish_node_metrics: false
prefix_filter: []
disable_dispatched_job_summary_metrics: false
prometheus_metrics: false
# tls:
# http: false
# rpc: false
# ca_file: "{{ hashi_nomad_data_dir }}/tls/ca.pem"
# cert_file: "{{ hashi_nomad_data_dir }}/tls/cert.pem"
# key_file: "{{ hashi_nomad_data_dir }}/tls/key.pem"
# plugin:
# docker:
# config:
# endpoint: "unix:///var/run/docker.sock"
# allow_privileged: false
# allow_caps: ["all"]
# volumes:
# enabled: true
# consul:
# address: "127.0.0.1:8500"
# token: ""
# auto_advertise: true
# vault:
# address: http://vault.service.consul:8200
# token: ""
# create_from_role: nomad-cluster
# plugin:
# docker:
# endpoint: "unix:///var/run/docker.sock"
# allow_privileged: false
# allow_caps: ["all"]
# volumes_enabled: true

View File

@ -1,35 +0,0 @@
---
dependency:
name: galaxy
options:
requirements-file: ./requirements.yml
driver:
name: vagrant
provider:
name: libvirt
platforms:
- name: instance
box: generic/${MOLECULE_TEST_OS}
cpus: 4
memory: 4096
provisioner:
name: ansible
config_options:
defaults:
remote_tmp: /tmp/.ansible
verifier:
name: ansible
scenario:
name: with_acl_enabled_vagrant
test_sequence:
- dependency
- cleanup
- destroy
- syntax
- create
- prepare
- converge
- idempotence
- verify
- cleanup
- destroy

View File

@ -1,5 +0,0 @@
---
# requirements file for molecule
roles:
- name: ednxzu.manage_repositories
- name: ednxzu.manage_apt_packages

View File

@ -1,163 +0,0 @@
---
- name: Verify
hosts: all
gather_facts: true
become: true
tasks:
- name: "Test: nomad user and group"
block:
- name: "Getent user nomad"
ansible.builtin.getent:
database: passwd
key: nomad
register: nomad_user
- name: "Getent group nomad"
ansible.builtin.getent:
database: group
key: nomad
register: nomad_group
- name: "Verify nomad user and group"
ansible.builtin.assert:
that:
- not nomad_user.failed
- not nomad_group.failed
- "'nomad' in nomad_user.ansible_facts.getent_passwd.keys()"
- "'/home/nomad' in nomad_user.ansible_facts.getent_passwd['nomad']"
- "'/bin/false' in nomad_user.ansible_facts.getent_passwd['nomad']"
- "'nomad' in nomad_group.ansible_facts.getent_group.keys()"
- name: "Test: directory /etc/nomad.d"
block:
- name: "Stat directory /etc/nomad.d"
ansible.builtin.stat:
path: "/etc/nomad.d"
register: stat_etc_nomad_d
- name: "Stat file /etc/nomad.d/nomad.env"
ansible.builtin.stat:
path: "/etc/nomad.d/nomad.env"
register: stat_etc_nomad_d_nomad_env
- name: "Stat file /etc/nomad.d/nomad.json"
ansible.builtin.stat:
path: "/etc/nomad.d/nomad.json"
register: stat_etc_nomad_d_nomad_json
- name: "Slurp file /etc/nomad.d/nomad.json"
ansible.builtin.slurp:
src: "/etc/nomad.d/nomad.json"
register: slurp_etc_nomad_d_nomad_json
- name: "Verify directory /etc/nomad.d"
ansible.builtin.assert:
that:
- stat_etc_nomad_d.stat.exists
- stat_etc_nomad_d.stat.isdir
- stat_etc_nomad_d.stat.pw_name == 'nomad'
- stat_etc_nomad_d.stat.gr_name == 'nomad'
- stat_etc_nomad_d.stat.mode == '0755'
- stat_etc_nomad_d_nomad_env.stat.exists
- stat_etc_nomad_d_nomad_env.stat.isreg
- stat_etc_nomad_d_nomad_env.stat.pw_name == 'nomad'
- stat_etc_nomad_d_nomad_env.stat.gr_name == 'nomad'
- stat_etc_nomad_d_nomad_env.stat.mode == '0600'
- stat_etc_nomad_d_nomad_json.stat.exists
- stat_etc_nomad_d_nomad_json.stat.isreg
- stat_etc_nomad_d_nomad_json.stat.pw_name == 'nomad'
- stat_etc_nomad_d_nomad_json.stat.gr_name == 'nomad'
- stat_etc_nomad_d_nomad_json.stat.mode == '0600'
- slurp_etc_nomad_d_nomad_json.content != ''
- name: "Test: directory /opt/nomad"
block:
- name: "Stat directory /opt/nomad"
ansible.builtin.stat:
path: "/opt/nomad"
register: stat_opt_nomad
- name: "Verify directory /opt/nomad"
ansible.builtin.assert:
that:
- stat_opt_nomad.stat.exists
- stat_opt_nomad.stat.isdir
- stat_opt_nomad.stat.pw_name == 'nomad'
- stat_opt_nomad.stat.gr_name == 'nomad'
- stat_opt_nomad.stat.mode == '0755'
- name: "Test: service nomad"
block:
- name: "Get service nomad"
ansible.builtin.service_facts:
- name: "Stat file /etc/systemd/system/nomad.service"
ansible.builtin.stat:
path: "/etc/systemd/system/nomad.service"
register: stat_etc_systemd_system_nomad_service
- name: "Slurp file /etc/systemd/system/nomad.service"
ansible.builtin.slurp:
src: "/etc/systemd/system/nomad.service"
register: slurp_etc_systemd_system_nomad_service
- name: "Verify service nomad"
ansible.builtin.assert:
that:
- stat_etc_systemd_system_nomad_service.stat.exists
- stat_etc_systemd_system_nomad_service.stat.isreg
- stat_etc_systemd_system_nomad_service.stat.pw_name == 'root'
- stat_etc_systemd_system_nomad_service.stat.gr_name == 'root'
- stat_etc_systemd_system_nomad_service.stat.mode == '0644'
- slurp_etc_systemd_system_nomad_service.content != ''
- ansible_facts.services['nomad.service'] is defined
- ansible_facts.services['nomad.service']['source'] == 'systemd'
- ansible_facts.services['nomad.service']['state'] == 'running'
- ansible_facts.services['nomad.service']['status'] == 'enabled'
- name: "Test: bootstrap acl nomad"
block:
- name: "Command nomad acl bootstrap"
ansible.builtin.command: "nomad acl bootstrap -json"
changed_when: false
register: nomad_acl_bootstrap
- name: "Test: interaction nomad"
vars:
acl_token: "{{ nomad_acl_bootstrap.stdout|from_json|json_query('SecretID') }}"
block:
- name: "Command nomad var put"
ansible.builtin.command: "nomad var put secret/foobar foo=bar"
environment:
NOMAD_TOKEN: "{{ acl_token }}"
changed_when: false
register: nomad_var_put
- name: "Command nomad var get"
ansible.builtin.command: "nomad var get secret/foobar"
environment:
NOMAD_TOKEN: "{{ acl_token }}"
changed_when: false
register: nomad_var_get
- name: "Command nomad var purge"
ansible.builtin.command: "nomad var purge secret/foobar"
environment:
NOMAD_TOKEN: "{{ acl_token }}"
changed_when: false
register: nomad_var_purge
- name: "Command nomad server members"
ansible.builtin.command: "nomad server members"
environment:
NOMAD_TOKEN: "{{ acl_token }}"
changed_when: false
register: nomad_server_members
- name: "Verify nomad interaction"
ansible.builtin.assert:
that:
- "'instance.global' in nomad_server_members.stdout"
- "'\"Items\": {\n \"foo\": \"bar\"\n }' in nomad_var_put.stdout"
- "'\"Items\": {\n \"foo\": \"bar\"\n }' in nomad_var_get.stdout"
- nomad_var_purge.stdout == 'Successfully purged variable \"secret/foobar\"!'

View File

@ -1,73 +0,0 @@
---
# task/cni_install file for hashicorp_nomad
- name: "Get release for cni_plugins:{{ hashi_nomad_cni_plugins_version }}"
vars:
_cni_plugins_url_ext: "{% if hashi_nomad_cni_plugins_version == 'latest'%}releases{% else %}releases/tags{% endif %}"
ansible.builtin.uri:
url: "{{ hashi_nomad_cni_plugins_github_api }}/{{ _cni_plugins_url_ext }}/{{ hashi_nomad_cni_plugins_version }}"
return_content: true
register: _cni_plugins_new_release
- name: "Check if cni plugin is already installed"
ansible.builtin.stat:
path: "{{ hashi_nomad_cni_plugins_install_path }}/version"
changed_when: false
check_mode: false
register: _cni_plugins_is_installed
- name: "Check current cni plugin version"
ansible.builtin.command: "cat {{ hashi_nomad_cni_plugins_install_path }}/version"
changed_when: false
check_mode: false
register: _cni_plugins_old_release
when: _cni_plugins_is_installed.stat.exists
- name: "Set facts for wanted cni plugins release"
ansible.builtin.set_fact:
hashi_nomad_cni_plugins_wanted_version: "{{ _cni_plugins_new_release.json['tag_name']|regex_replace('v', '') }}"
when: _cni_plugins_new_release.json is defined
and (_cni_plugins_new_release.json | length > 0)
- name: "Set facts for current cni plugins release"
ansible.builtin.set_fact:
hashi_nomad_cni_plugins_current_version: "{{ _cni_plugins_old_release.stdout | regex_replace('v', '') }}"
when: _cni_plugins_old_release.stdout is defined
and (_cni_plugins_old_release.stdout | length > 0)
- name: "Create cni directory"
ansible.builtin.file:
path: "{{ hashi_nomad_cni_plugins_install_path }}"
state: directory
mode: "0775"
- name: "Install cni plugins"
when: hashi_nomad_cni_plugins_current_version is not defined
or hashi_nomad_cni_plugins_wanted_version not in hashi_nomad_cni_plugins_current_version
block:
- name: "Install cni plugins version:{{ hashi_nomad_cni_plugins_version }}"
ansible.builtin.get_url:
url: "{{ hashi_nomad_cni_plugins_github_url }}/releases/download/v{{ hashi_nomad_cni_plugins_wanted_version }}/cni-plugins-linux-{{ hashi_nomad_cni_plugins_arch }}-v{{ hashi_nomad_cni_plugins_wanted_version }}.tgz"
dest: "/tmp/cni_plugin.tgz"
mode: "0644"
register: _cni_plugins_download_archive
until: _cni_plugins_download_archive is succeeded
retries: 5
delay: 2
check_mode: false
- name: "Unpack cni plugins"
ansible.builtin.unarchive:
src: "/tmp/cni_plugin.tgz"
dest: "{{ hashi_nomad_cni_plugins_install_path }}"
remote_src: true
- name: "Remove temporary archive"
ansible.builtin.file:
path: "/tmp/cni_plugin.tgz"
state: absent
- name: "Update version file"
ansible.builtin.copy:
content: "{{ hashi_nomad_cni_plugins_wanted_version }}"
dest: "{{ hashi_nomad_cni_plugins_install_path }}/version"
mode: "0600"

View File

@ -1,46 +0,0 @@
---
# task/configure file for hashicorp_nomad
- name: "Ensure default nomad.hcl is removed"
ansible.builtin.file:
path: /etc/nomad.d/nomad.hcl
state: absent
- name: "Copy nomad.json template"
ansible.builtin.template:
src: nomad.json.j2
dest: "{{ hashi_nomad_config_dir }}/nomad.json"
owner: "{{ hashi_nomad_user }}"
group: "{{ hashi_nomad_group }}"
mode: '0600'
notify:
- "systemctl-enable-nomad"
- "systemctl-restart-nomad"
- name: "Create nomad.env"
ansible.builtin.template:
src: nomad.env.j2
dest: "{{ hashi_nomad_config_dir }}/nomad.env"
owner: "{{ hashi_nomad_user }}"
group: "{{ hashi_nomad_group }}"
mode: '0600'
- name: "Copy extra configuration files"
when: hashi_nomad_extra_files
block:
- name: "Create directory {{ hashi_nomad_extra_files_dst }}"
ansible.builtin.file:
path: "{{ hashi_nomad_extra_files_dst }}"
state: directory
owner: "{{ hashi_nomad_user }}"
group: "{{ hashi_nomad_group }}"
mode: '0755'
- name: "Copy extra configuration files"
ansible.builtin.template:
src: "{{ item }}"
dest: "{{ hashi_nomad_extra_files_dst }}/{{ (item | basename).split('.')[:-1] | join('.')}}"
owner: "{{ hashi_nomad_user }}"
group: "{{ hashi_nomad_group }}"
mode: '0600'
with_fileglob:
- "{{ hashi_nomad_extra_files_src }}/*"

View File

@ -1,25 +0,0 @@
---
# task/install file for hashicorp_nomad
- name: "Configure hashicorp repository"
ansible.builtin.include_role:
name: ednxzu.manage_repositories
vars:
manage_repositories_enable_default_repo: false
manage_repositories_enable_custom_repo: true
manage_repositories_custom_repo: "{{ hashi_nomad_repository }}"
- name: "Install nomad:{{ hashi_nomad_version }}"
ansible.builtin.include_role:
name: ednxzu.manage_apt_packages
vars:
manage_apt_packages_list: "{{ hashi_nomad_packages }}"
- name: "Copy systemd service file for nomad"
ansible.builtin.template:
src: "nomad.service.j2"
dest: "/etc/systemd/system/nomad.service"
owner: root
group: root
mode: '0644'
notify:
- "systemctl-daemon-reload"

View File

@ -1,15 +0,0 @@
---
# task/main file for hashicorp_nomad
- name: "Import prerequisites.yml"
ansible.builtin.include_tasks: prerequisites.yml
- name: "Import install.yml"
ansible.builtin.include_tasks: install.yml
when: hashi_nomad_install
- name: "Import cni_install.yml"
ansible.builtin.include_tasks: cni_install.yml
when: hashi_nomad_cni_plugins_install
- name: "Import configure.yml"
ansible.builtin.include_tasks: configure.yml

View File

@ -1,29 +0,0 @@
---
# task/prerequisites file for hashicorp_nomad
- name: "Create group {{ hashi_nomad_group }}"
ansible.builtin.group:
name: "{{ hashi_nomad_user }}"
state: present
- name: "Create user {{ hashi_nomad_user }}"
ansible.builtin.user:
name: "{{ hashi_nomad_user }}"
group: "{{ hashi_nomad_group }}"
shell: /bin/false
state: present
- name: "Create directory {{ hashi_nomad_config_dir }}"
ansible.builtin.file:
path: "{{ hashi_nomad_config_dir }}"
state: directory
owner: "{{ hashi_nomad_user }}"
group: "{{ hashi_nomad_group }}"
mode: '0755'
- name: "Create directory {{ hashi_nomad_data_dir }}"
ansible.builtin.file:
path: "{{ hashi_nomad_data_dir }}"
state: directory
owner: "{{ hashi_nomad_user }}"
group: "{{ hashi_nomad_group }}"
mode: '0755'

View File

@ -1,4 +0,0 @@
# {{ ansible_managed }}
{% for item in hashi_nomad_env_variables %}
{{ item|upper }}="{{ hashi_nomad_env_variables[item] }}"
{% endfor %}

View File

@ -1 +0,0 @@
{{ hashi_nomad_configuration|to_nice_json }}

View File

@ -1,33 +0,0 @@
[Unit]
Description=Nomad
Documentation=https://developer.hashicorp.com/nomad/docs
Wants=network-online.target
After=network-online.target
ConditionFileNotEmpty={{ hashi_nomad_config_dir }}/nomad.json
{% if hashi_nomad_configuration.consul.address is defined %}
Wants=consul.service
After=consul.service
{% endif %}
[Service]
EnvironmentFile=-{{ hashi_nomad_config_dir }}/nomad.env
{% if not (hashi_nomad_configuration.client.enabled is defined and hashi_nomad_configuration.client.enabled) %}
User={{ hashi_nomad_user }}
Group={{ hashi_nomad_group }}
{% else %}
User=root
Group=root
{% endif %}
ExecStart=/usr/bin/nomad agent -config {{ hashi_nomad_config_dir }}/nomad.json
ExecReload=/bin/kill -HUP $MAINPID
KillMode=process
KillSignal=SIGINT
Restart=on-failure
LimitNOFILE=65536
LimitNPROC=infinity
RestartSec=2
TasksMax=infinity
OOMScoreAdjust=-1000
[Install]
WantedBy=multi-user.target

View File

@ -1,30 +0,0 @@
---
# vars file for hashicorp_nomad
hashi_nomad_user: nomad
hashi_nomad_group: nomad
hashi_nomad_config_dir: "/etc/nomad.d"
hashi_nomad_cni_plugins_arch_map:
i386: '386'
x86_64: 'amd64'
aarch64: 'arm'
armv7l: 'arm'
armv6l: 'arm'
hashi_nomad_cni_plugins_arch: "{{ hashi_nomad_cni_plugins_arch_map[ansible_architecture] | default(ansible_architecture) }}"
hashi_nomad_cni_plugins_github_api: https://api.github.com/repos/containernetworking/plugins
hashi_nomad_cni_plugins_github_url: https://github.com/containernetworking/plugins
hashi_nomad_repository:
- name: hashicorp
uri: "https://apt.releases.hashicorp.com"
comments: "hashicorp repository"
types:
- deb
suites:
- "{{ ansible_distribution_release }}"
components:
- main
options:
Signed-By: "https://apt.releases.hashicorp.com/gpg"
hashi_nomad_packages:
- name: nomad
version: "{{ hashi_nomad_version }}"
state: "{% if hashi_nomad_auto_update %}latest{% else %}present{% endif %}"

View File

@ -1,20 +0,0 @@
The MIT License (MIT)
Copyright (c) 2017 Bertrand Lanson
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

View File

@ -1,98 +0,0 @@
hashicorp_vault
=========
> This repository is only a mirror. Development and testing is done on a private gitea server.
This role install and configure vault on **debian-based** distributions.
Requirements
------------
None.
Role Variables
--------------
Available variables are listed below, along with default values. A sample file for the default values is available in `default/hashicorp_vault.yml.sample` in case you need it for any `group_vars` or `host_vars` configuration.
```yaml
hashi_vault_install: true # by default, set to true
```
This variable defines if the vault package is to be installed or not before configuring. If you install vault using another task, you can set this to `false`.
```yaml
hashi_vault_auto_update: false # by default, set to false
```
This variable allows you to choose to automatically update vault if a newer version is available. Updating vault is usually pretty safe if done on a regular basis, but for better control over the upgrade process, see `hashi_vault_version`.
```yaml
hashi_vault_start_service: true
```
This variable defines if the vault service should be started once it has been configured. This is usefull in case you're using this role to build golden images, in which case you might want to only enable the service, to have it start on the next boot (when the image is launched)
```yaml
hashi_vault_version: latest # by default, set to latest
```
This variable specifies the version of vault to install when `hashi_vault_install` is set to `true`. The version to specify is the version of the package on the hashicorp repository (`1.10.1-1` for example). This can be found by running `apt-cache madison vault` on a machine with the repository installed.
```yaml
hashi_vault_deploy_method: host # by default, set to host
```
This variable defines the method of deployment of vault. The `host` method installs the binary directly on the host, and runs vault as a systemd service. The `docker` method install vault as a docker container.
> Currently, only the `host` method is available, the `docker` method will be added later.
```yaml
hashi_vault_env_variables: # by default, set to {}
env_var: value
```
This value is a list of key/value that will populate the `vault.env` file. You do not have to capitalize the KEYS, as it will be done automatically.
```yaml
hashi_vault_data_dir: "/opt/vault" # by default, set to /opt/vault
```
This value defines the path where consul data will be stored on the node. Defaults to `/opt/consul`.
```yaml
hashi_vault_extra_files: false # by default, set to false
```
This variable defines whether or not there is extra configuration files to copy to the target. If there are, these extra files are expected to be jinja2 templates located all in the same directory, and will be copied to the specified directory on the target machine.
```yaml
hashi_vault_extra_files_src: /tmp/extra_files # by default, set to /tmp/extra_files
```
This variable defines the source directory (without the trailing /) for the extra files to be copied in case there are some.
```yaml
hashi_vault_extra_files_dst: /etc/vault.d/extra_files # by default, set to /etc/vault.d/extra_files
```
This variable defines the destination directory (without the trailing /) for the extra files to be copied.
```yaml
hashi_vault_configuration: {} # by default, set to a simple configuration
```
This variable sets all of the configuration parameters for vault. For more information on all of them, please check the [documentation](https://developer.hashicorp.com/vault/docs/configuration). This variable is parsed and converted to json format to create the config file, so each key and value should be set according to the documentation. This method of passing configuration allows for compatibility with every configuration parameters that vault has to offer. The defaults are simply here to deploy a simple, single-node vault server without much configuration, and should NOT be used in production. You will want to edit this to deploy production-ready clusters.
Dependencies
------------
`ednxzu.manage_repositories` to configure the hashicorp apt repository.
`ednxzu.manage_apt_packages` to install vault.
Example Playbook
----------------
Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too:
```yaml
# calling the role inside a playbook with either the default or group_vars/host_vars
- hosts: servers
roles:
- ednxzu.hashicorp_vault
```
License
-------
MIT / BSD
Author Information
------------------
This role was created by Bertrand Lanson in 2023.

View File

@ -1,46 +0,0 @@
---
# hashi_vault_install: true
# hashi_vault_auto_update: false
# hashi_vault_start_service: true
# hashi_vault_version: latest
# hashi_vault_deploy_method: host # deployment method, either host or docker
# hashi_vault_env_variables: {}
# hashi_vault_data_dir: "/opt/vault"
# hashi_vault_extra_files: false
# hashi_vault_extra_files_src: /tmp/extra_files
# hashi_vault_extra_files_dst: /etc/vault.d/extra_files
# #! vault configuration
# hashi_vault_configuration:
# cluster_name: vault
# cluster_addr: "https://127.0.0.1:8201"
# api_addr: "https://127.0.0.1:8200"
# ui: true
# disable_mlock: false
# disable_cache: false
# listener:
# tcp:
# address: "127.0.0.1:8200"
# cluster_address: "127.0.0.1:8201"
# tls_disable: 0
# tls_disable_client_certs: false
# tls_cert_file: "{{ hashi_vault_data_dir }}/tls/tls.crt" # this use the autogenerated TLS certificates
# tls_key_file: "{{ hashi_vault_data_dir }}/tls/tls.key" # this use the autogenerated TLS certificates
# storage:
# file:
# path: "{{ hashi_vault_data_dir }}/data"
# # service_registration:
# # consul:
# # address: 127.0.0.1:8500
# # scheme: https
# # token: someUUIDforconsul
# telemetry:
# usage_gauge_period: 10m
# maximum_gauge_cardinality: 500
# disable_hostname: false
# enable_hostname_label: false
# lease_metrics_epsilon: 1h
# num_lease_metrics_buckets: 168
# add_lease_metrics_namespace_labels: false
# filter_default: true
# prefix_filter: []
# prometheus_retention_time: 24h

View File

@ -1,47 +0,0 @@
---
# defaults file for hashicorp_vault
hashi_vault_install: true
hashi_vault_auto_update: false
hashi_vault_start_service: true
hashi_vault_version: latest
hashi_vault_deploy_method: host # deployment method, either host or docker
hashi_vault_env_variables: {}
hashi_vault_data_dir: "/opt/vault"
hashi_vault_extra_files: false
hashi_vault_extra_files_src: /tmp/extra_files
hashi_vault_extra_files_dst: /etc/vault.d/extra_files
#! vault configuration
hashi_vault_configuration:
cluster_name: vault
cluster_addr: "https://127.0.0.1:8201"
api_addr: "https://127.0.0.1:8200"
ui: true
disable_mlock: false
disable_cache: false
listener:
tcp:
address: "127.0.0.1:8200"
cluster_address: "127.0.0.1:8201"
tls_disable: 0
tls_disable_client_certs: false
tls_cert_file: "{{ hashi_vault_data_dir }}/tls/tls.crt" # this use the autogenerated TLS certificates
tls_key_file: "{{ hashi_vault_data_dir }}/tls/tls.key" # this use the autogenerated TLS certificates
storage:
file:
path: "{{ hashi_vault_data_dir }}/data"
# service_registration:
# consul:
# address: 127.0.0.1:8500
# scheme: https
# token: someUUIDforconsul
telemetry:
usage_gauge_period: 10m
maximum_gauge_cardinality: 500
disable_hostname: false
enable_hostname_label: false
lease_metrics_epsilon: 1h
num_lease_metrics_buckets: 168
add_lease_metrics_namespace_labels: false
filter_default: true
prefix_filter: []
prometheus_retention_time: 24h

View File

@ -1,20 +0,0 @@
---
# handlers file for hashicorp_vault
- name: "Reload systemd file"
ansible.builtin.systemd:
daemon_reload: true
listen: "systemctl-daemon-reload"
- name: "Enable vault service"
ansible.builtin.service:
name: vault
enabled: true
listen: "systemctl-enable-vault"
- name: "Start vault service"
ansible.builtin.service:
name: vault
state: restarted
listen: "systemctl-restart-vault"
throttle: 1
when: hashi_vault_start_service

View File

@ -1,25 +0,0 @@
---
# meta file for hashicorp_vault
galaxy_info:
namespace: 'ednxzu'
role_name: 'hashicorp_vault'
author: 'Bertrand Lanson'
description: 'Install and configure hashicorp vault for debian-based distros.'
license: 'license (BSD, MIT)'
min_ansible_version: '2.10'
platforms:
- name: Ubuntu
versions:
- focal
- jammy
- name: Debian
versions:
- bullseye
- bookworm
galaxy_tags:
- 'ubuntu'
- 'debian'
- 'hashicorp'
- 'vault'
dependencies: []

View File

@ -1,8 +0,0 @@
---
- name: Converge
hosts: all
become: true
tasks:
- name: "Include ednxzu.hashicorp_vault"
ansible.builtin.include_role:
name: "ednxzu.hashicorp_vault"

View File

@ -1,37 +0,0 @@
---
dependency:
name: galaxy
options:
requirements-file: ./requirements.yml
driver:
name: docker
platforms:
- name: instance
image: geerlingguy/docker-${MOLECULE_TEST_OS}-ansible
command: ""
volumes:
- /sys/fs/cgroup:/sys/fs/cgroup
cgroupns_mode: host
privileged: true
pre_build_image: true
provisioner:
name: ansible
config_options:
defaults:
remote_tmp: /tmp/.ansible
verifier:
name: ansible
scenario:
name: default
test_sequence:
- dependency
- cleanup
- destroy
- syntax
- create
- prepare
- converge
- idempotence
- verify
- cleanup
- destroy

View File

@ -1,5 +0,0 @@
---
# requirements file for molecule
roles:
- name: ednxzu.manage_repositories
- name: ednxzu.manage_apt_packages

View File

@ -1,162 +0,0 @@
---
- name: Verify
hosts: all
gather_facts: true
become: true
tasks:
- name: "Test: vault user and group"
block:
- name: "Getent user vault"
ansible.builtin.getent:
database: passwd
key: vault
register: vault_user
- name: "Getent group vault"
ansible.builtin.getent:
database: group
key: vault
register: vault_group
- name: "Verify vault user and group"
ansible.builtin.assert:
that:
- not vault_user.failed
- not vault_group.failed
- "'vault' in vault_user.ansible_facts.getent_passwd.keys()"
- "'/home/vault' in vault_user.ansible_facts.getent_passwd['vault']"
- "'/bin/false' in vault_user.ansible_facts.getent_passwd['vault']"
- "'vault' in vault_group.ansible_facts.getent_group.keys()"
- name: "Test: directory /etc/vault.d"
block:
- name: "Stat directory /etc/vault.d"
ansible.builtin.stat:
path: "/etc/vault.d"
register: stat_etc_vault_d
- name: "Stat file /etc/vault.d/vault.env"
ansible.builtin.stat:
path: "/etc/vault.d/vault.env"
register: stat_etc_vault_d_vault_env
- name: "Stat file /etc/vault.d/vault.json"
ansible.builtin.stat:
path: "/etc/vault.d/vault.json"
register: stat_etc_vault_d_vault_json
- name: "Slurp file /etc/vault.d/vault.json"
ansible.builtin.slurp:
src: "/etc/vault.d/vault.json"
register: slurp_etc_vault_d_vault_json
- name: "Verify directory /etc/vault.d"
ansible.builtin.assert:
that:
- stat_etc_vault_d.stat.exists
- stat_etc_vault_d.stat.isdir
- stat_etc_vault_d.stat.pw_name == 'vault'
- stat_etc_vault_d.stat.gr_name == 'vault'
- stat_etc_vault_d.stat.mode == '0755'
- stat_etc_vault_d_vault_env.stat.exists
- stat_etc_vault_d_vault_env.stat.isreg
- stat_etc_vault_d_vault_env.stat.pw_name == 'vault'
- stat_etc_vault_d_vault_env.stat.gr_name == 'vault'
- stat_etc_vault_d_vault_env.stat.mode == '0600'
- stat_etc_vault_d_vault_json.stat.exists
- stat_etc_vault_d_vault_json.stat.isreg
- stat_etc_vault_d_vault_json.stat.pw_name == 'vault'
- stat_etc_vault_d_vault_json.stat.gr_name == 'vault'
- stat_etc_vault_d_vault_json.stat.mode == '0600'
- slurp_etc_vault_d_vault_json.content != ''
- name: "Test: directory /opt/vault"
block:
- name: "Stat directory /opt/vault"
ansible.builtin.stat:
path: "/opt/vault"
register: stat_opt_vault
- name: "Verify directory /opt/vault"
ansible.builtin.assert:
that:
- stat_opt_vault.stat.exists
- stat_opt_vault.stat.isdir
- stat_opt_vault.stat.pw_name == 'vault'
- stat_opt_vault.stat.gr_name == 'vault'
- stat_opt_vault.stat.mode == '0755'
- name: "Test: service vault"
block:
- name: "Get service vault"
ansible.builtin.service_facts:
- name: "Stat file /etc/systemd/system/vault.service"
ansible.builtin.stat:
path: "/etc/systemd/system/vault.service"
register: stat_etc_systemd_system_vault_service
- name: "Slurp file /etc/systemd/system/vault.service"
ansible.builtin.slurp:
src: "/etc/systemd/system/vault.service"
register: slurp_etc_systemd_system_vault_service
- name: "Verify service vault"
ansible.builtin.assert:
that:
- stat_etc_systemd_system_vault_service.stat.exists
- stat_etc_systemd_system_vault_service.stat.isreg
- stat_etc_systemd_system_vault_service.stat.pw_name == 'root'
- stat_etc_systemd_system_vault_service.stat.gr_name == 'root'
- stat_etc_systemd_system_vault_service.stat.mode == '0644'
- slurp_etc_systemd_system_vault_service.content != ''
- ansible_facts.services['vault.service'] is defined
- ansible_facts.services['vault.service']['source'] == 'systemd'
- ansible_facts.services['vault.service']['state'] == 'running'
- ansible_facts.services['vault.service']['status'] == 'enabled'
- name: "Test: bootstrap vault cluster"
block:
- name: "Command vault operator init"
ansible.builtin.command: "vault operator init -non-interactive -key-shares=3 -key-threshold=2 -tls-skip-verify -format=json"
changed_when: false
register: vault_operator_init
- name: "Test: unseal vault cluster"
vars:
vault_unseal_keys: "{{ vault_operator_init.stdout|from_json|json_query('unseal_keys_hex') }}"
block:
- name: "Command vault operator init"
ansible.builtin.command: "vault operator unseal -format=json -tls-skip-verify {{ vault_unseal_keys[0] }}"
changed_when: false
register: vault_operator_unseal_0
- name: "Command vault operator init"
ansible.builtin.command: "vault operator unseal -format=json -tls-skip-verify {{ vault_unseal_keys[1] }}"
changed_when: false
register: vault_operator_unseal_1
- name: "Verify vault operator unseal"
vars:
vault_seal_state_0: "{{ vault_operator_unseal_0.stdout|from_json|json_query('sealed') }}"
vault_seal_state_1: "{{ vault_operator_unseal_1.stdout|from_json|json_query('sealed') }}"
ansible.builtin.assert:
that:
- vault_seal_state_0
- not vault_seal_state_1
- name: "Test: vault interaction"
vars:
root_token: "{{ vault_operator_init.stdout|from_json|json_query('root_token') }}"
block:
- name: "Command vault secret enable"
ansible.builtin.command: "vault secrets enable -version=1 -tls-skip-verify kv"
environment:
VAULT_TOKEN: "{{ root_token }}"
changed_when: false
register: vault_secret_enable
- name: "Verify vault interaction"
ansible.builtin.assert:
that:
- vault_secret_enable.stdout == 'Success! Enabled the kv secrets engine at: kv/'

View File

@ -1,8 +0,0 @@
---
- name: Converge
hosts: all
become: true
tasks:
- name: "Include ednxzu.hashicorp_vault"
ansible.builtin.include_role:
name: "ednxzu.hashicorp_vault"

View File

@ -1,35 +0,0 @@
---
dependency:
name: galaxy
options:
requirements-file: ./requirements.yml
driver:
name: vagrant
provider:
name: libvirt
platforms:
- name: instance
box: generic/${MOLECULE_TEST_OS}
cpus: 4
memory: 4096
provisioner:
name: ansible
config_options:
defaults:
remote_tmp: /tmp/.ansible
verifier:
name: ansible
scenario:
name: default_vagrant
test_sequence:
- dependency
- cleanup
- destroy
- syntax
- create
- prepare
- converge
- idempotence
- verify
- cleanup
- destroy

View File

@ -1,5 +0,0 @@
---
# requirements file for molecule
roles:
- name: ednxzu.manage_repositories
- name: ednxzu.manage_apt_packages

View File

@ -1,162 +0,0 @@
---
- name: Verify
hosts: all
gather_facts: true
become: true
tasks:
- name: "Test: vault user and group"
block:
- name: "Getent user vault"
ansible.builtin.getent:
database: passwd
key: vault
register: vault_user
- name: "Getent group vault"
ansible.builtin.getent:
database: group
key: vault
register: vault_group
- name: "Verify vault user and group"
ansible.builtin.assert:
that:
- not vault_user.failed
- not vault_group.failed
- "'vault' in vault_user.ansible_facts.getent_passwd.keys()"
- "'/home/vault' in vault_user.ansible_facts.getent_passwd['vault']"
- "'/bin/false' in vault_user.ansible_facts.getent_passwd['vault']"
- "'vault' in vault_group.ansible_facts.getent_group.keys()"
- name: "Test: directory /etc/vault.d"
block:
- name: "Stat directory /etc/vault.d"
ansible.builtin.stat:
path: "/etc/vault.d"
register: stat_etc_vault_d
- name: "Stat file /etc/vault.d/vault.env"
ansible.builtin.stat:
path: "/etc/vault.d/vault.env"
register: stat_etc_vault_d_vault_env
- name: "Stat file /etc/vault.d/vault.json"
ansible.builtin.stat:
path: "/etc/vault.d/vault.json"
register: stat_etc_vault_d_vault_json
- name: "Slurp file /etc/vault.d/vault.json"
ansible.builtin.slurp:
src: "/etc/vault.d/vault.json"
register: slurp_etc_vault_d_vault_json
- name: "Verify directory /etc/vault.d"
ansible.builtin.assert:
that:
- stat_etc_vault_d.stat.exists
- stat_etc_vault_d.stat.isdir
- stat_etc_vault_d.stat.pw_name == 'vault'
- stat_etc_vault_d.stat.gr_name == 'vault'
- stat_etc_vault_d.stat.mode == '0755'
- stat_etc_vault_d_vault_env.stat.exists
- stat_etc_vault_d_vault_env.stat.isreg
- stat_etc_vault_d_vault_env.stat.pw_name == 'vault'
- stat_etc_vault_d_vault_env.stat.gr_name == 'vault'
- stat_etc_vault_d_vault_env.stat.mode == '0600'
- stat_etc_vault_d_vault_json.stat.exists
- stat_etc_vault_d_vault_json.stat.isreg
- stat_etc_vault_d_vault_json.stat.pw_name == 'vault'
- stat_etc_vault_d_vault_json.stat.gr_name == 'vault'
- stat_etc_vault_d_vault_json.stat.mode == '0600'
- slurp_etc_vault_d_vault_json.content != ''
- name: "Test: directory /opt/vault"
block:
- name: "Stat directory /opt/vault"
ansible.builtin.stat:
path: "/opt/vault"
register: stat_opt_vault
- name: "Verify directory /opt/vault"
ansible.builtin.assert:
that:
- stat_opt_vault.stat.exists
- stat_opt_vault.stat.isdir
- stat_opt_vault.stat.pw_name == 'vault'
- stat_opt_vault.stat.gr_name == 'vault'
- stat_opt_vault.stat.mode == '0755'
- name: "Test: service vault"
block:
- name: "Get service vault"
ansible.builtin.service_facts:
- name: "Stat file /etc/systemd/system/vault.service"
ansible.builtin.stat:
path: "/etc/systemd/system/vault.service"
register: stat_etc_systemd_system_vault_service
- name: "Slurp file /etc/systemd/system/vault.service"
ansible.builtin.slurp:
src: "/etc/systemd/system/vault.service"
register: slurp_etc_systemd_system_vault_service
- name: "Verify service vault"
ansible.builtin.assert:
that:
- stat_etc_systemd_system_vault_service.stat.exists
- stat_etc_systemd_system_vault_service.stat.isreg
- stat_etc_systemd_system_vault_service.stat.pw_name == 'root'
- stat_etc_systemd_system_vault_service.stat.gr_name == 'root'
- stat_etc_systemd_system_vault_service.stat.mode == '0644'
- slurp_etc_systemd_system_vault_service.content != ''
- ansible_facts.services['vault.service'] is defined
- ansible_facts.services['vault.service']['source'] == 'systemd'
- ansible_facts.services['vault.service']['state'] == 'running'
- ansible_facts.services['vault.service']['status'] == 'enabled'
- name: "Test: bootstrap vault cluster"
block:
- name: "Command vault operator init"
ansible.builtin.command: "vault operator init -non-interactive -key-shares=3 -key-threshold=2 -tls-skip-verify -format=json"
changed_when: false
register: vault_operator_init
- name: "Test: unseal vault cluster"
vars:
vault_unseal_keys: "{{ vault_operator_init.stdout|from_json|json_query('unseal_keys_hex') }}"
block:
- name: "Command vault operator init"
ansible.builtin.command: "vault operator unseal -format=json -tls-skip-verify {{ vault_unseal_keys[0] }}"
changed_when: false
register: vault_operator_unseal_0
- name: "Command vault operator init"
ansible.builtin.command: "vault operator unseal -format=json -tls-skip-verify {{ vault_unseal_keys[1] }}"
changed_when: false
register: vault_operator_unseal_1
- name: "Verify vault operator unseal"
vars:
vault_seal_state_0: "{{ vault_operator_unseal_0.stdout|from_json|json_query('sealed') }}"
vault_seal_state_1: "{{ vault_operator_unseal_1.stdout|from_json|json_query('sealed') }}"
ansible.builtin.assert:
that:
- vault_seal_state_0
- not vault_seal_state_1
- name: "Test: vault interaction"
vars:
root_token: "{{ vault_operator_init.stdout|from_json|json_query('root_token') }}"
block:
- name: "Command vault secret enable"
ansible.builtin.command: "vault secrets enable -version=1 -tls-skip-verify kv"
environment:
VAULT_TOKEN: "{{ root_token }}"
changed_when: false
register: vault_secret_enable
- name: "Verify vault interaction"
ansible.builtin.assert:
that:
- vault_secret_enable.stdout == 'Success! Enabled the kv secrets engine at: kv/'

View File

@ -1,8 +0,0 @@
---
- name: Converge
hosts: all
become: true
tasks:
- name: "Include ednxzu.hashicorp_vault"
ansible.builtin.include_role:
name: "ednxzu.hashicorp_vault"

View File

@ -1,52 +0,0 @@
---
# defaults file for hashicorp_vault
hashi_vault_install: true
hashi_vault_auto_update: true
hashi_vault_start_service: true
hashi_vault_version: latest
hashi_vault_deploy_method: host # deployment method, either host or docker
hashi_vault_env_variables: {}
hashi_vault_data_dir: "/opt/vault"
hashi_vault_extra_files: false
hashi_vault_extra_files_src: /tmp/extra_files
hashi_vault_extra_files_dst: /etc/vault.d/extra_files
#! vault configuration
hashi_vault_configuration:
cluster_name: vault
cluster_addr: "http://127.0.0.1:8201"
api_addr: "http://127.0.0.1:8200"
ui: true
disable_mlock: false
disable_cache: false
listener:
tcp:
address: "127.0.0.1:8200"
cluster_address: "127.0.0.1:8201"
tls_disable: 1
tls_disable_client_certs: false
tls_cert_file: "{{ hashi_vault_data_dir }}/tls/tls.crt" # this use the autogenerated TLS certificates
tls_key_file: "{{ hashi_vault_data_dir }}/tls/tls.key" # this use the autogenerated TLS certificates
storage:
raft:
path: "{{ hashi_vault_data_dir }}/data"
node_id: "{{ ansible_hostname }}"
retry_join:
- leader_api_addr: "http://127.0.0.1:8200"
- leader_api_addr: "http://127.0.0.2:8200"
- leader_api_addr: "http://127.0.0.3:8200"
# service_registration:
# consul:
# address: 127.0.0.1:8500
# scheme: https
# token: someUUIDforconsul
telemetry:
usage_gauge_period: 10m
maximum_gauge_cardinality: 500
disable_hostname: false
enable_hostname_label: false
lease_metrics_epsilon: 1h
num_lease_metrics_buckets: 168
add_lease_metrics_namespace_labels: false
filter_default: true
prefix_filter: []
prometheus_retention_time: 24h

View File

@ -1,37 +0,0 @@
---
dependency:
name: galaxy
options:
requirements-file: ./requirements.yml
driver:
name: docker
platforms:
- name: instance
image: geerlingguy/docker-${MOLECULE_TEST_OS}-ansible
command: ""
volumes:
- /sys/fs/cgroup:/sys/fs/cgroup
cgroupns_mode: host
privileged: true
pre_build_image: true
provisioner:
name: ansible
config_options:
defaults:
remote_tmp: /tmp/.ansible
verifier:
name: ansible
scenario:
name: with_raft_enabled
test_sequence:
- dependency
- cleanup
- destroy
- syntax
- create
- prepare
- converge
- idempotence
- verify
- cleanup
- destroy

View File

@ -1,5 +0,0 @@
---
# requirements file for molecule
roles:
- name: ednxzu.manage_repositories
- name: ednxzu.manage_apt_packages

View File

@ -1,169 +0,0 @@
---
- name: Verify
hosts: all
gather_facts: true
become: true
tasks:
- name: "Test: vault user and group"
block:
- name: "Getent user vault"
ansible.builtin.getent:
database: passwd
key: vault
register: vault_user
- name: "Getent group vault"
ansible.builtin.getent:
database: group
key: vault
register: vault_group
- name: "Verify vault user and group"
ansible.builtin.assert:
that:
- not vault_user.failed
- not vault_group.failed
- "'vault' in vault_user.ansible_facts.getent_passwd.keys()"
- "'/home/vault' in vault_user.ansible_facts.getent_passwd['vault']"
- "'/bin/false' in vault_user.ansible_facts.getent_passwd['vault']"
- "'vault' in vault_group.ansible_facts.getent_group.keys()"
- name: "Test: directory /etc/vault.d"
block:
- name: "Stat directory /etc/vault.d"
ansible.builtin.stat:
path: "/etc/vault.d"
register: stat_etc_vault_d
- name: "Stat file /etc/vault.d/vault.env"
ansible.builtin.stat:
path: "/etc/vault.d/vault.env"
register: stat_etc_vault_d_vault_env
- name: "Stat file /etc/vault.d/vault.json"
ansible.builtin.stat:
path: "/etc/vault.d/vault.json"
register: stat_etc_vault_d_vault_json
- name: "Slurp file /etc/vault.d/vault.json"
ansible.builtin.slurp:
src: "/etc/vault.d/vault.json"
register: slurp_etc_vault_d_vault_json
- name: "Verify directory /etc/vault.d"
ansible.builtin.assert:
that:
- stat_etc_vault_d.stat.exists
- stat_etc_vault_d.stat.isdir
- stat_etc_vault_d.stat.pw_name == 'vault'
- stat_etc_vault_d.stat.gr_name == 'vault'
- stat_etc_vault_d.stat.mode == '0755'
- stat_etc_vault_d_vault_env.stat.exists
- stat_etc_vault_d_vault_env.stat.isreg
- stat_etc_vault_d_vault_env.stat.pw_name == 'vault'
- stat_etc_vault_d_vault_env.stat.gr_name == 'vault'
- stat_etc_vault_d_vault_env.stat.mode == '0600'
- stat_etc_vault_d_vault_json.stat.exists
- stat_etc_vault_d_vault_json.stat.isreg
- stat_etc_vault_d_vault_json.stat.pw_name == 'vault'
- stat_etc_vault_d_vault_json.stat.gr_name == 'vault'
- stat_etc_vault_d_vault_json.stat.mode == '0600'
- slurp_etc_vault_d_vault_json.content != ''
- name: "Test: directory /opt/vault"
block:
- name: "Stat directory /opt/vault"
ansible.builtin.stat:
path: "/opt/vault"
register: stat_opt_vault
- name: "Verify directory /opt/vault"
ansible.builtin.assert:
that:
- stat_opt_vault.stat.exists
- stat_opt_vault.stat.isdir
- stat_opt_vault.stat.pw_name == 'vault'
- stat_opt_vault.stat.gr_name == 'vault'
- stat_opt_vault.stat.mode == '0755'
- name: "Test: service vault"
block:
- name: "Get service vault"
ansible.builtin.service_facts:
- name: "Stat file /etc/systemd/system/vault.service"
ansible.builtin.stat:
path: "/etc/systemd/system/vault.service"
register: stat_etc_systemd_system_vault_service
- name: "Slurp file /etc/systemd/system/vault.service"
ansible.builtin.slurp:
src: "/etc/systemd/system/vault.service"
register: slurp_etc_systemd_system_vault_service
- name: "Verify service vault"
ansible.builtin.assert:
that:
- stat_etc_systemd_system_vault_service.stat.exists
- stat_etc_systemd_system_vault_service.stat.isreg
- stat_etc_systemd_system_vault_service.stat.pw_name == 'root'
- stat_etc_systemd_system_vault_service.stat.gr_name == 'root'
- stat_etc_systemd_system_vault_service.stat.mode == '0644'
- slurp_etc_systemd_system_vault_service.content != ''
- ansible_facts.services['vault.service'] is defined
- ansible_facts.services['vault.service']['source'] == 'systemd'
- ansible_facts.services['vault.service']['state'] == 'running'
- ansible_facts.services['vault.service']['status'] == 'enabled'
- name: "Test: bootstrap vault cluster"
block:
- name: "Command vault operator init"
ansible.builtin.command: "vault operator init -non-interactive -key-shares=3 -key-threshold=2 -tls-skip-verify -format=json"
environment:
VAULT_ADDR: "http://localhost:8200"
changed_when: false
register: vault_operator_init
- name: "Test: unseal vault cluster"
vars:
vault_unseal_keys: "{{ vault_operator_init.stdout|from_json|json_query('unseal_keys_hex') }}"
block:
- name: "Command vault operator init"
ansible.builtin.command: "vault operator unseal -format=json -tls-skip-verify {{ vault_unseal_keys[0] }}"
environment:
VAULT_ADDR: "http://localhost:8200"
changed_when: false
register: vault_operator_unseal_0
- name: "Command vault operator init"
ansible.builtin.command: "vault operator unseal -format=json -tls-skip-verify {{ vault_unseal_keys[1] }}"
environment:
VAULT_ADDR: "http://localhost:8200"
changed_when: false
register: vault_operator_unseal_1
- name: "Verify vault operator unseal"
vars:
vault_seal_state_0: "{{ vault_operator_unseal_0.stdout|from_json|json_query('sealed') }}"
vault_seal_state_1: "{{ vault_operator_unseal_1.stdout|from_json|json_query('sealed') }}"
ansible.builtin.assert:
that:
- vault_seal_state_0
- not vault_seal_state_1
- name: "Test: vault interaction"
vars:
root_token: "{{ vault_operator_init.stdout|from_json|json_query('root_token') }}"
block:
- name: "Command vault secret enable"
ansible.builtin.command: "vault secrets enable -version=1 -tls-skip-verify kv"
environment:
VAULT_ADDR: "http://localhost:8200"
VAULT_TOKEN: "{{ root_token }}"
changed_when: false
register: vault_secret_enable
- name: "Verify vault interaction"
ansible.builtin.assert:
that:
- vault_secret_enable.stdout == 'Success! Enabled the kv secrets engine at: kv/'

View File

@ -1,8 +0,0 @@
---
- name: Converge
hosts: all
become: true
tasks:
- name: "Include ednxzu.hashicorp_vault"
ansible.builtin.include_role:
name: "ednxzu.hashicorp_vault"

View File

@ -1,52 +0,0 @@
---
# defaults file for hashicorp_vault
hashi_vault_install: true
hashi_vault_auto_update: true
hashi_vault_start_service: true
hashi_vault_version: latest
hashi_vault_deploy_method: host # deployment method, either host or docker
hashi_vault_env_variables: {}
hashi_vault_data_dir: "/opt/vault"
hashi_vault_extra_files: false
hashi_vault_extra_files_src: /tmp/extra_files
hashi_vault_extra_files_dst: /etc/vault.d/extra_files
#! vault configuration
hashi_vault_configuration:
cluster_name: vault
cluster_addr: "http://127.0.0.1:8201"
api_addr: "http://127.0.0.1:8200"
ui: true
disable_mlock: false
disable_cache: false
listener:
tcp:
address: "127.0.0.1:8200"
cluster_address: "127.0.0.1:8201"
tls_disable: 1
tls_disable_client_certs: false
tls_cert_file: "{{ hashi_vault_data_dir }}/tls/tls.crt" # this use the autogenerated TLS certificates
tls_key_file: "{{ hashi_vault_data_dir }}/tls/tls.key" # this use the autogenerated TLS certificates
storage:
raft:
path: "{{ hashi_vault_data_dir }}/data"
node_id: "{{ ansible_hostname }}"
retry_join:
- leader_api_addr: "http://127.0.0.1:8200"
- leader_api_addr: "http://127.0.0.2:8200"
- leader_api_addr: "http://127.0.0.3:8200"
# service_registration:
# consul:
# address: 127.0.0.1:8500
# scheme: https
# token: someUUIDforconsul
telemetry:
usage_gauge_period: 10m
maximum_gauge_cardinality: 500
disable_hostname: false
enable_hostname_label: false
lease_metrics_epsilon: 1h
num_lease_metrics_buckets: 168
add_lease_metrics_namespace_labels: false
filter_default: true
prefix_filter: []
prometheus_retention_time: 24h

View File

@ -1,35 +0,0 @@
---
dependency:
name: galaxy
options:
requirements-file: ./requirements.yml
driver:
name: vagrant
provider:
name: libvirt
platforms:
- name: instance
box: generic/${MOLECULE_TEST_OS}
cpus: 4
memory: 4096
provisioner:
name: ansible
config_options:
defaults:
remote_tmp: /tmp/.ansible
verifier:
name: ansible
scenario:
name: with_raft_enabled_vagrant
test_sequence:
- dependency
- cleanup
- destroy
- syntax
- create
- prepare
- converge
- idempotence
- verify
- cleanup
- destroy

View File

@ -1,5 +0,0 @@
---
# requirements file for molecule
roles:
- name: ednxzu.manage_repositories
- name: ednxzu.manage_apt_packages

View File

@ -1,169 +0,0 @@
---
- name: Verify
hosts: all
gather_facts: true
become: true
tasks:
- name: "Test: vault user and group"
block:
- name: "Getent user vault"
ansible.builtin.getent:
database: passwd
key: vault
register: vault_user
- name: "Getent group vault"
ansible.builtin.getent:
database: group
key: vault
register: vault_group
- name: "Verify vault user and group"
ansible.builtin.assert:
that:
- not vault_user.failed
- not vault_group.failed
- "'vault' in vault_user.ansible_facts.getent_passwd.keys()"
- "'/home/vault' in vault_user.ansible_facts.getent_passwd['vault']"
- "'/bin/false' in vault_user.ansible_facts.getent_passwd['vault']"
- "'vault' in vault_group.ansible_facts.getent_group.keys()"
- name: "Test: directory /etc/vault.d"
block:
- name: "Stat directory /etc/vault.d"
ansible.builtin.stat:
path: "/etc/vault.d"
register: stat_etc_vault_d
- name: "Stat file /etc/vault.d/vault.env"
ansible.builtin.stat:
path: "/etc/vault.d/vault.env"
register: stat_etc_vault_d_vault_env
- name: "Stat file /etc/vault.d/vault.json"
ansible.builtin.stat:
path: "/etc/vault.d/vault.json"
register: stat_etc_vault_d_vault_json
- name: "Slurp file /etc/vault.d/vault.json"
ansible.builtin.slurp:
src: "/etc/vault.d/vault.json"
register: slurp_etc_vault_d_vault_json
- name: "Verify directory /etc/vault.d"
ansible.builtin.assert:
that:
- stat_etc_vault_d.stat.exists
- stat_etc_vault_d.stat.isdir
- stat_etc_vault_d.stat.pw_name == 'vault'
- stat_etc_vault_d.stat.gr_name == 'vault'
- stat_etc_vault_d.stat.mode == '0755'
- stat_etc_vault_d_vault_env.stat.exists
- stat_etc_vault_d_vault_env.stat.isreg
- stat_etc_vault_d_vault_env.stat.pw_name == 'vault'
- stat_etc_vault_d_vault_env.stat.gr_name == 'vault'
- stat_etc_vault_d_vault_env.stat.mode == '0600'
- stat_etc_vault_d_vault_json.stat.exists
- stat_etc_vault_d_vault_json.stat.isreg
- stat_etc_vault_d_vault_json.stat.pw_name == 'vault'
- stat_etc_vault_d_vault_json.stat.gr_name == 'vault'
- stat_etc_vault_d_vault_json.stat.mode == '0600'
- slurp_etc_vault_d_vault_json.content != ''
- name: "Test: directory /opt/vault"
block:
- name: "Stat directory /opt/vault"
ansible.builtin.stat:
path: "/opt/vault"
register: stat_opt_vault
- name: "Verify directory /opt/vault"
ansible.builtin.assert:
that:
- stat_opt_vault.stat.exists
- stat_opt_vault.stat.isdir
- stat_opt_vault.stat.pw_name == 'vault'
- stat_opt_vault.stat.gr_name == 'vault'
- stat_opt_vault.stat.mode == '0755'
- name: "Test: service vault"
block:
- name: "Get service vault"
ansible.builtin.service_facts:
- name: "Stat file /etc/systemd/system/vault.service"
ansible.builtin.stat:
path: "/etc/systemd/system/vault.service"
register: stat_etc_systemd_system_vault_service
- name: "Slurp file /etc/systemd/system/vault.service"
ansible.builtin.slurp:
src: "/etc/systemd/system/vault.service"
register: slurp_etc_systemd_system_vault_service
- name: "Verify service vault"
ansible.builtin.assert:
that:
- stat_etc_systemd_system_vault_service.stat.exists
- stat_etc_systemd_system_vault_service.stat.isreg
- stat_etc_systemd_system_vault_service.stat.pw_name == 'root'
- stat_etc_systemd_system_vault_service.stat.gr_name == 'root'
- stat_etc_systemd_system_vault_service.stat.mode == '0644'
- slurp_etc_systemd_system_vault_service.content != ''
- ansible_facts.services['vault.service'] is defined
- ansible_facts.services['vault.service']['source'] == 'systemd'
- ansible_facts.services['vault.service']['state'] == 'running'
- ansible_facts.services['vault.service']['status'] == 'enabled'
- name: "Test: bootstrap vault cluster"
block:
- name: "Command vault operator init"
ansible.builtin.command: "vault operator init -non-interactive -key-shares=3 -key-threshold=2 -tls-skip-verify -format=json"
environment:
VAULT_ADDR: "http://localhost:8200"
changed_when: false
register: vault_operator_init
- name: "Test: unseal vault cluster"
vars:
vault_unseal_keys: "{{ vault_operator_init.stdout|from_json|json_query('unseal_keys_hex') }}"
block:
- name: "Command vault operator init"
ansible.builtin.command: "vault operator unseal -format=json -tls-skip-verify {{ vault_unseal_keys[0] }}"
environment:
VAULT_ADDR: "http://localhost:8200"
changed_when: false
register: vault_operator_unseal_0
- name: "Command vault operator init"
ansible.builtin.command: "vault operator unseal -format=json -tls-skip-verify {{ vault_unseal_keys[1] }}"
environment:
VAULT_ADDR: "http://localhost:8200"
changed_when: false
register: vault_operator_unseal_1
- name: "Verify vault operator unseal"
vars:
vault_seal_state_0: "{{ vault_operator_unseal_0.stdout|from_json|json_query('sealed') }}"
vault_seal_state_1: "{{ vault_operator_unseal_1.stdout|from_json|json_query('sealed') }}"
ansible.builtin.assert:
that:
- vault_seal_state_0
- not vault_seal_state_1
- name: "Test: vault interaction"
vars:
root_token: "{{ vault_operator_init.stdout|from_json|json_query('root_token') }}"
block:
- name: "Command vault secret enable"
ansible.builtin.command: "vault secrets enable -version=1 -tls-skip-verify kv"
environment:
VAULT_ADDR: "http://localhost:8200"
VAULT_TOKEN: "{{ root_token }}"
changed_when: false
register: vault_secret_enable
- name: "Verify vault interaction"
ansible.builtin.assert:
that:
- vault_secret_enable.stdout == 'Success! Enabled the kv secrets engine at: kv/'

View File

@ -1,46 +0,0 @@
---
# task/configure file for hashicorp_vault
- name: "Ensure default vault.hcl is removed"
ansible.builtin.file:
path: /etc/vault.d/vault.hcl
state: absent
- name: "Copy vault.json template"
ansible.builtin.template:
src: vault.json.j2
dest: "{{ hashi_vault_config_dir }}/vault.json"
owner: "{{ hashi_vault_user }}"
group: "{{ hashi_vault_group }}"
mode: '0600'
notify:
- "systemctl-enable-vault"
- "systemctl-restart-vault"
- name: "Create vault.env"
ansible.builtin.template:
src: vault.env.j2
dest: "{{ hashi_vault_config_dir }}/vault.env"
owner: "{{ hashi_vault_user }}"
group: "{{ hashi_vault_group }}"
mode: '0600'
- name: "Copy extra configuration files"
when: hashi_vault_extra_files
block:
- name: "Create directory {{ hashi_vault_extra_files_dst }}"
ansible.builtin.file:
path: "{{ hashi_vault_extra_files_dst }}"
state: directory
owner: "{{ hashi_vault_user }}"
group: "{{ hashi_vault_group }}"
mode: '0755'
- name: "Copy extra configuration files"
ansible.builtin.template:
src: "{{ item }}"
dest: "{{ hashi_vault_extra_files_dst }}/{{ (item | basename).split('.')[:-1] | join('.')}}"
owner: "{{ hashi_vault_user }}"
group: "{{ hashi_vault_group }}"
mode: '0600'
with_fileglob:
- "{{ hashi_vault_extra_files_src }}/*"

View File

@ -1,25 +0,0 @@
---
# task/install file for hashicorp_vault
- name: "Configure hashicorp repository"
ansible.builtin.include_role:
name: ednxzu.manage_repositories
vars:
manage_repositories_enable_default_repo: false
manage_repositories_enable_custom_repo: true
manage_repositories_custom_repo: "{{ hashi_vault_repository }}"
- name: "Install vault:{{ hashi_vault_version }}"
ansible.builtin.include_role:
name: ednxzu.manage_apt_packages
vars:
manage_apt_packages_list: "{{ hashi_vault_packages }}"
- name: "Copy systemd service file for vault"
ansible.builtin.template:
src: "vault.service.j2"
dest: "/etc/systemd/system/vault.service"
owner: root
group: root
mode: '0644'
notify:
- "systemctl-daemon-reload"

View File

@ -1,11 +0,0 @@
---
# task/main file for hashicorp_vault
- name: "Import prerequisites.yml"
ansible.builtin.include_tasks: prerequisites.yml
- name: "Import install.yml"
ansible.builtin.include_tasks: install.yml
when: hashi_vault_install
- name: "Import configure.yml"
ansible.builtin.include_tasks: configure.yml

View File

@ -1,29 +0,0 @@
---
# task/prerequisites file for hashicorp_vault
- name: "Create group {{ hashi_vault_group }}"
ansible.builtin.group:
name: "{{ hashi_vault_user }}"
state: present
- name: "Create user {{ hashi_vault_user }}"
ansible.builtin.user:
name: "{{ hashi_vault_user }}"
group: "{{ hashi_vault_group }}"
shell: /bin/false
state: present
- name: "Create directory {{ hashi_vault_config_dir }}"
ansible.builtin.file:
path: "{{ hashi_vault_config_dir }}"
state: directory
owner: "{{ hashi_vault_user }}"
group: "{{ hashi_vault_group }}"
mode: '0755'
- name: "Create directory {{ hashi_vault_data_dir }}"
ansible.builtin.file:
path: "{{ hashi_vault_data_dir }}"
state: directory
owner: "{{ hashi_vault_user }}"
group: "{{ hashi_vault_group }}"
mode: '0755'

View File

@ -1,4 +0,0 @@
# {{ ansible_managed }}
{% for item in hashi_vault_env_variables %}
{{ item|upper }}="{{ hashi_vault_env_variables[item] }}"
{% endfor %}

View File

@ -1 +0,0 @@
{{ hashi_vault_configuration|to_nice_json }}

View File

@ -1,38 +0,0 @@
[Unit]
Description="HashiCorp Vault - A tool for managing secrets"
Documentation=https://www.vaultproject.io/docs/
Requires=network-online.target
After=network-online.target
ConditionFileNotEmpty={{ hashi_vault_config_dir }}/vault.json
StartLimitIntervalSec=60
StartLimitBurst=3
{% if hashi_vault_configuration.storage.consul is defined or hashi_vault_configuration.service_registration.consul is defined %}
Wants=consul.service
After=consul.service
{% endif %}
[Service]
Type=notify
EnvironmentFile=-{{ hashi_vault_config_dir }}/vault.env
User={{ hashi_vault_user }}
Group={{ hashi_vault_group }}
ProtectSystem=full
ProtectHome=read-only
PrivateTmp=yes
PrivateDevices=yes
SecureBits=keep-caps
AmbientCapabilities=CAP_IPC_LOCK
CapabilityBoundingSet=CAP_SYSLOG CAP_IPC_LOCK
NoNewPrivileges=yes
ExecStart=/usr/bin/vault server -config={{ hashi_vault_config_dir }}
ExecReload=/bin/kill --signal HUP $MAINPID
KillMode=process
KillSignal=SIGINT
Restart=on-failure
RestartSec=5
TimeoutStopSec=30
LimitNOFILE=65536
LimitMEMLOCK=infinity
[Install]
WantedBy=multi-user.target

Some files were not shown because too many files have changed in this diff Show More