diff --git a/ansible/playbooks/paas/main.yml b/ansible/playbooks/paas/main.yml index b62bf7ae..85c0b7f3 100644 --- a/ansible/playbooks/paas/main.yml +++ b/ansible/playbooks/paas/main.yml @@ -6,13 +6,13 @@ become: true pre_tasks: - name: Create ansible facts.d directory - become: yes - file: + become: true + ansible.builtin.file: path: /etc/ansible/facts.d state: directory owner: "root" group: "root" - mode: 0755 + mode: '0755' - name: Get ipinfo.io ansible.builtin.uri: @@ -32,13 +32,8 @@ ansible.builtin.apt: pkg: - python3-debian - - python3-passlib - - libxml2-utils - - ruby-rubygems - - binutils - unzip - make - - python3-pymysql - jq state: present @@ -49,4 +44,3 @@ retries: 10 roles: - unattended-upgrades - - restic diff --git a/ansible/playbooks/paas/nomad.yml b/ansible/playbooks/paas/nomad.yml index 18299451..20760e0d 100644 --- a/ansible/playbooks/paas/nomad.yml +++ b/ansible/playbooks/paas/nomad.yml @@ -1,10 +1,10 @@ --- - name: Create CA TLS any_errors_fatal: true - hosts: "{{ hosts_limit | default('infrastructure') }}" + hosts: localhost gather_facts: false strategy: linear - become: true + become: false tasks: - name: "Create TLS CA" ansible.builtin.include_role: diff --git a/ansible/playbooks/paas/roles/certificate/defaults/main.yml b/ansible/playbooks/paas/roles/certificate/defaults/main.yml index 67b69d2d..a7d47e9e 100644 --- a/ansible/playbooks/paas/roles/certificate/defaults/main.yml +++ b/ansible/playbooks/paas/roles/certificate/defaults/main.yml @@ -2,8 +2,8 @@ # defaults file for certificate certificate_ca_host: localhost -certificate_ca_host_dir: "~/.simple-stack/tls/{{ inventory_hostname }}" +certificate_ca_host_dir: "~/.simple-stack/tls" certificate_ca_pubkey: simplestack-ca.pem certificate_ca_privatekey: simplestack-ca-key.pem certificate_host_certificate_dir: /etc/ssl/simplestack -certificate_common_name: simplestack +certificate_common_name: nomad diff --git a/ansible/playbooks/paas/roles/certificate/tasks/ca.yml b/ansible/playbooks/paas/roles/certificate/tasks/ca.yml index 576e363e..2c634e6a 100644 --- a/ansible/playbooks/paas/roles/certificate/tasks/ca.yml +++ b/ansible/playbooks/paas/roles/certificate/tasks/ca.yml @@ -15,7 +15,7 @@ become: false register: cert_ca_tls_priv_present -- name: "Certificate | Create private key for CA (simplestack-ca-key.pem)" +- name: "Certificate | Create private key for CA" community.crypto.openssl_privatekey: path: "{{ certificate_ca_host_dir }}/{{ certificate_ca_privatekey }}" delegate_to: "{{ certificate_ca_host }}" @@ -54,19 +54,3 @@ become: false when: not cert_ca_tls_priv_present.stat.exists -- name: "Certificate | Create TLS directory on target" - ansible.builtin.file: - path: "{{ certificate_host_certificate_dir }}" - state: directory - mode: '0755' - recurse: true - when: not ca_pubkey_present.stat.exists - -- name: "Certificate | Copy Public certs on nodes - {{ certificate_ca_pubkey }}" - ansible.builtin.copy: - src: "{{ certificate_ca_host_dir }}/{{ certificate_ca_pubkey }}" - dest: "{{ certificate_host_certificate_dir }}/{{ certificate_ca_pubkey }}" - owner: "root" - group: "root" - mode: "0640" - when: not ca_pubkey_present.stat.exists diff --git a/ansible/playbooks/paas/roles/nomad/defaults/main.yml b/ansible/playbooks/paas/roles/nomad/defaults/main.yml index ec6ba2e4..b4945a05 100644 --- a/ansible/playbooks/paas/roles/nomad/defaults/main.yml +++ b/ansible/playbooks/paas/roles/nomad/defaults/main.yml @@ -2,23 +2,29 @@ # defaults file for install # General -nomad_mode: "{% if (ansible_play_hosts | length) == 1 %}single{% elif (ansible_play_hosts | length) > 1 %}cluster{% endif %}" -nomad_node_role: "{% if nomad_mode == 'single' %}both{% elif nomad_mode == 'cluster' %}{{ nomad_node_role | default('both') }}{% endif %}" + +## single / cluster +nomad_mode: single + +## client / server / both +nomad_node_role: both nomad_deploy_cluster_name: "{{ nomad_cluster_name | default('default') }}" -nomad_firewall: true nomad_timezone: "Europe/Paris" -nomad_group: "simplestack" +nomad_group: simplestack # Configuration nomad_dc_name: "dc1" -nomad_node_name: "{{ inventory_hostname }}" +nomad_project: "{{ fact_instance.project }}" nomad_region: "{{ fact_instance.region }}" +nomad_location: "{{ fact_instance.region }}" + +nomad_node_name: "{{ inventory_hostname }}" -nomad_system_user: "{% if nomad_node_role == 'server' %}nomad{% elif nomad_node_role == 'client' or nomad_node_role == 'both' %}root{% endif %}" -nomad_system_group: "{% if nomad_node_role == 'server' %}nomad{% elif nomad_node_role == 'client' or nomad_node_role == 'both' %}root{% endif %}" +nomad_system_user: "{{ (nomad_node_role == 'server') | ternary('nomad', 'root') }}" +nomad_system_group: "{{ (nomad_node_role == 'server') | ternary('nomad', 'root') }}" nomad_config_dir: "/etc/nomad.d" nomad_config_file: "nomad.hcl" @@ -28,97 +34,93 @@ nomad_data_dir_server: "/opt/nomad/server" nomad_state_dir_client: "/opt/nomad/client" nomad_job_files_dir: "/var/tmp" -nomad_disable_anonymous_signature: "false" -nomad_disable_update_check: "false" +nomad_disable_anonymous_signature: false +nomad_disable_update_check: false nomad_leave_on_terminate: true nomad_leave_on_interrupt: true -nomad_client_auto_join: 'true' -nomad_server_auto_join: 'true' +nomad_client_auto_join: true +nomad_server_auto_join: true nomad_s3_storage_enabled: true # Network -nomad_http_scheme: "https" +nomad_http_scheme: https nomad_http_ip: "127.0.0.1" nomad_http_port: 4646 -nomad_cluster_bridge: "br0" -nomad_iface: "{{ ansible_default_ipv4.interface }}" +nomad_cluster_bridge: "ens3" +nomad_iface: "ens3" -nomad_list_node_ip: "\ - {% set nomad_list_node_ip = [] %}\ - {% for host in groups[consul_deploy_cluster_name] %}\ - {% if nomad_list_node_ip.append(hostvars[host]['ansible_' + hostvars[host].nomad_cluster_bridge].ipv4.address | default(nomad_cluster_bridge)) %}{% endif %}\ - {% endfor %}\ - {{ nomad_list_node_ip }}" +nomad_bind_address: "0.0.0.0" +nomad_advertise_address: "{{ hostvars[inventory_hostname]['ansible_' + nomad_iface]['ipv4']['address'] }}" -nomad_bind_address: "{{ hostvars[inventory_hostname]['ansible_' + nomad_iface | replace('-', '_')]['ipv4']['address'] }}" +nomad_ports: + http: "{{ nomad_ports_http | default('4646', true) }}" + rpc: "{{ nomad_ports_rpc | default('4647', true) }}" + serf: "{{ nomad_ports_serf | default('4648', true) }}" -nomad_bind_addr: "0.0.0.0" - -nomad_address_http: "0.0.0.0" -nomad_address_rpc: "{% if nomad_mode == 'single' %}0.0.0.0{% else %}{{ hostvars[inventory_hostname]['ansible_' + nomad_cluster_bridge | replace('-', '_')]['ipv4']['address'] }}{% endif %}" -nomad_address_serf: "{% if nomad_mode == 'single' %}0.0.0.0{% else %}{{ hostvars[inventory_hostname]['ansible_' + nomad_cluster_bridge | replace('-', '_')]['ipv4']['address'] }}{% endif %}" - -nomad_advertise_http: "{% if nomad_mode == 'single' %}127.0.0.1{% else %}{{ hostvars[inventory_hostname]['ansible_' + nomad_cluster_bridge | replace('-', '_')]['ipv4']['address'] }}{% endif %}" -nomad_advertise_rpc: "{% if nomad_mode == 'single' %}127.0.0.1{% else %}{{ hostvars[inventory_hostname]['ansible_' + nomad_cluster_bridge | replace('-', '_')]['ipv4']['address'] }}{% endif %}" -nomad_advertise_serf: "{% if nomad_mode == 'single' %}127.0.0.1{% else %}{{ hostvars[inventory_hostname]['ansible_' + nomad_cluster_bridge | replace('-', '_')]['ipv4']['address'] }}{% endif %}" - -nomad_port_http: 4646 -nomad_port_rpc: 4647 -nomad_port_serf: 4648 - -# Network -nomad_bridge: true - -nomad_bridge_list: - - { name: "internal", interface: "br-internal", ip_range: "172.18.0.1/16" } - - { name: "metrics", interface: "br-metrics", ip_range: "172.19.0.1/16" } # Log -nomad_debug: "false" -nomad_log_file: "/var/log/nomad/nomad.log" -nomad_log_level: "WARN" +nomad_debug: false +nomad_log_file: /var/log/nomad/nomad.log +nomad_log_level: WARN nomad_log_rotate_bytes: 0 -nomad_log_rotate_duration: "24h" +nomad_log_rotate_duration: 24h nomad_log_rotate_max_files: 0 # Server -nomad_server_enabled: "true" +nomad_server_enabled: true -nomad_server_bootstrap_expect: "\ - {% set nomad_server_bootstrap_expect = [] %}\ +nomad_servers: "\ + {% set nomad_servers = [] %}\ {% if nomad_mode == 'single' %}\ - {% set _ = nomad_server_bootstrap_expect.append(1) %}\ + {% set _ = nomad_servers.append(inventory_hostname) %}\ {% else %}\ {% for host in groups[nomad_deploy_cluster_name] %}\ - {% if hostvars[host].consul_node_role in ['server', 'both'] %}\ - {% set _ = nomad_server_bootstrap_expect.append(1) %}\ + {% if hostvars[host].nomad_node_role in ['server', 'both'] %}\ + {% set _ = nomad_servers.append(host) %}\ {% endif %}\ {% endfor %}\ {% endif %}\ - {{ nomad_server_bootstrap_expect | length }}" + {{ nomad_servers }}" -nomad_server_rejoin_after_leave: "true" +nomad_servers_advertise_address: "\ + {% set nomad_servers_advertise_address = [] %}\ + {% if nomad_mode == 'single' %}\ + {% set _ = nomad_servers_advertise_address.append(hostvars[inventory_hostname]['ansible_' + nomad_iface]['ipv4']['address']) %}\ + {% else %}\ + {% for host in groups[nomad_deploy_cluster_name] %}\ + {% if hostvars[host].nomad_node_role in ['server', 'both'] %}\ + {% set _ = nomad_servers_advertise_address.append(hostvars[host]['ansible_' + hostvars[host].nomad_iface]['ipv4']['address']) %}\ + {% endif %}\ + {% endfor %}\ + {% endif %}\ + {{ nomad_servers_advertise_address }}" + +nomad_server_retry_max: 0 +nomad_server_retry_join: false +nomad_serer_retry_interval: 30s +nomad_server_rejoin_after_leave: true nomad_server_enabled_schedulers: - service - batch - system +nomad_num_schedulers: "{{ ansible_processor_vcpus }}" -nomad_server_num_schedulers: 2 +nomad_server_num_schedulers: 1 nomad_server_event_buffer_size: 100 -nomad_server_node_gc_threshold: "24h" -nomad_server_eval_gc_threshold: "1h" -nomad_server_job_gc_threshold: "4h" -nomad_server_deployment_gc_threshold: "1h" - -nomad_server_heartbeat_grace: "10s" -nomad_server_min_heartbeat_ttl: "10s" -nomad_server_failover_heartbeat_ttl: "5m" +nomad_server_node_gc_threshold: 24h +nomad_server_eval_gc_threshold: 1h +nomad_server_job_gc_threshold: 4h +nomad_server_deployment_gc_threshold: 1h + +nomad_server_heartbeat_grace: 10s +nomad_server_min_heartbeat_ttl: 10s +nomad_server_failover_heartbeat_ttl: 5m nomad_server_max_heartbeats_per_second: 50.0 # nomad_server_encrypt: "" @@ -126,22 +128,22 @@ nomad_server_max_heartbeats_per_second: 50.0 nomad_server_raft_protocol: 3 # Client -nomad_client_enabled: "true" +nomad_client_enabled: true nomad_client_node_class: "{{ inventory_hostname }}-client" -nomad_client_node_pool: "default" +nomad_client_node_pool: default -nomad_client_no_host_uuid: "false" -nomad_client_max_kill_timeout: "30s" +nomad_client_no_host_uuid: false +nomad_client_max_kill_timeout: 30s -nomad_client_network_interface: docker0 +nomad_client_network_interface: "{{ nomad_iface }}" nomad_client_host_network_default: - name: "public" + name: public interface: "{{ ansible_default_ipv4.interface }}" nomad_client_host_network_cluster: - name: "cluster" + name: cluster interface: "{{ nomad_cluster_bridge }}" nomad_client_meta_list: {"arch": "{{ architecture_map[ansible_facts.architecture] }}", "location": "{{ fact_instance.location }}", "instance": "{{ inventory_hostname }}"} @@ -173,7 +175,7 @@ nomad_client_reserved_disk: 0 # TLS nomad_tls_ca_host: localhost -nomad_tls_ca_host_dir: "~/.simple-stack/tls/{{ inventory_hostname }}" +nomad_tls_ca_host_dir: "~/.simple-stack/tls" nomad_tls_ca_pubkey: "simplestack-ca.pem" nomad_tls_ca_privatekey: "simplestack-ca-key.pem" nomad_tls_ca_provider: "ownca" @@ -181,31 +183,33 @@ nomad_tls_host_certificate_dir: "/etc/ssl/simplestack" nomad_tls_common_name: "nomad" nomad_tls_check_delay: "+2w" -nomad_tls_http: "true" -nomad_tls_rpc: "true" - # TLS Server nomad_tls_cert_server: "{{ nomad_dc_name }}-server-nomad.pem" nomad_tls_privatekey_server: "{{ nomad_dc_name }}-server-nomad.key" -nomad_tls_common_name_server: "*.{{ nomad_dc_name }}.nomad" -nomad_tls_subject_alt_name_server: "DNS:localhost,IP:127.0.0.1,DNS:server.global.nomad,DNS:server.{{ nomad_region }}.nomad,DNS:server.{{ nomad_dc_name }}.nomad,DNS:*.{{ nomad_dc_name }}.nomad,IP:172.26.64.1,IP:172.17.0.1,IP:172.18.0.1" +nomad_tls_common_name_server: "*.{{ nomad_dc_name }}.{{ nomad_tls_common_name }}" +# nomad_tls_subject_alt_name_server: "DNS:localhost,IP:127.0.0.1,DNS:server.global.{{ certificate_subject_alt_name }},DNS:server.{{ nomad_region }}.{{ certificate_subject_alt_name }},DNS:server.{{ nomad_dc_name }}.{{ certificate_subject_alt_name }},DNS:*.{{ nomad_dc_name }}.{{ certificate_subject_alt_name }},IP:172.26.64.1,IP:172.17.0.1,IP:172.18.0.1" +# nomad_tls_subject_alt_name_server: "DNS:localhost,IP:127.0.0.1,DNS:server.global.nomad,DNS:server.{{ nomad_region }}.nomad,DNS:server.{{ nomad_dc_name }}.nomad,DNS:*.{{ nomad_dc_name }}.nomad,IP:172.26.64.1,IP:172.17.0.1,IP:172.18.0.1" +nomad_tls_subject_alt_name_server: "DNS:localhost,IP:127.0.0.1,IP:172.17.0.1,DNS:server.global.nomad,DNS:server.{{ nomad_region }}.nomad,DNS:server.{{ nomad_dc_name }}.nomad,DNS:*.{{ nomad_dc_name }}.nomad" # TLS client -nomad_tls_cert_client: "{{ nomad_dc_name }}-client-nomad.pem" -nomad_tls_privatekey_client: "{{ nomad_dc_name }}-client-nomad.key" +nomad_tls_cert_client: "{{ inventory_hostname }}-{{ nomad_dc_name }}-client-nomad.pem" +nomad_tls_privatekey_client: "{{ inventory_hostname }}-{{ nomad_dc_name }}-client-nomad.key" -nomad_tls_common_name_client: "*.{{ nomad_dc_name }}.nomad" -nomad_tls_subject_alt_name_client: "DNS:localhost,IP:127.0.0.1,DNS:client.global.nomad,DNS:client.{{ nomad_region }}.nomad,DNS:client.{{ nomad_dc_name }}.nomad,DNS:*.{{ nomad_dc_name }}.nomad,IP:172.26.64.1,IP:172.17.0.1,IP:172.18.0.1" +nomad_tls_common_name_client: "*.{{ nomad_dc_name }}.{{ nomad_tls_common_name }}" +# nomad_tls_subject_alt_name_client: "DNS:localhost,IP:127.0.0.1,DNS:client.global.{{ certificate_subject_alt_name }},DNS:client.{{ nomad_region }}.{{ nomad_tls_common_name }},DNS:client.{{ nomad_dc_name }}.{{ nomad_tls_common_name }},DNS:*.{{ nomad_dc_name }}.{{ nomad_tls_common_name }},IP:172.26.64.1,IP:172.17.0.1,IP:172.18.0.1" +# nomad_tls_subject_alt_name_client: "DNS:localhost,IP:127.0.0.1,DNS:client.global.nomad,DNS:client.{{ nomad_region }}.nomad,DNS:client.{{ nomad_dc_name }}.nomad,DNS:*.{{ nomad_dc_name }}.nomad,IP:172.26.64.1,IP:172.17.0.1,IP:172.18.0.1" +nomad_tls_subject_alt_name_client: "DNS:localhost,IP:127.0.0.1,IP:172.17.0.1,DNS:client.global.nomad,DNS:client.{{ nomad_region }}.nomad,DNS:client.{{ nomad_dc_name }}.nomad,DNS:*.{{ nomad_dc_name }}.nomad" nomad_tls_rpc_upgrade_mode: "false" nomad_tls_verify_server_hostname: "true" nomad_tls_verify_https_client: "false" # ACL -nomad_acl_enabled: "true" -nomad_acl_token_ttl: "30s" -nomad_acl_policy_ttl: "30s" +nomad_acl_enabled: true +nomad_acl_token_ttl: 30s +nomad_acl_policy_ttl: 30s +nomad_acl_replication_token: "" # Docker nomad_docker_client_dc_name: "dc1" @@ -279,13 +283,13 @@ nomad_telemetry_circonus_check_force_metric_activation: "false" # nomad_telemetry_circonus_broker_select_tag: "" # Autopilot -nomad_autopilot_cleanup_dead_servers: "true" -nomad_autopilot_last_contact_threshold: "200ms" +nomad_autopilot_cleanup_dead_servers: true +nomad_autopilot_last_contact_threshold: 200ms nomad_autopilot_max_trailing_logs: 250 -nomad_autopilot_server_stabilization_time: "10s" +nomad_autopilot_server_stabilization_time: 10s # UI -nomad_ui_enabled: "true" +nomad_ui_enabled: true nomad_ui_content_security_policy_connect_src: "*" nomad_ui_content_security_policy_default_src: "'none'" nomad_ui_content_security_policy_form_action: "'none'" diff --git a/ansible/playbooks/paas/roles/nomad/handlers/main.yml b/ansible/playbooks/paas/roles/nomad/handlers/main.yml index 9ea93323..7ce58f2c 100644 --- a/ansible/playbooks/paas/roles/nomad/handlers/main.yml +++ b/ansible/playbooks/paas/roles/nomad/handlers/main.yml @@ -5,13 +5,6 @@ ansible.builtin.apt: update_cache: true -- name: Netplan_apply - ansible.builtin.command: "netplan apply" - async: 45 - poll: 0 - register: netplan_apply - changed_when: netplan_apply.ansible_job_id != 0 - - name: Nomad_restart ansible.builtin.systemd_service: name: nomad diff --git a/ansible/playbooks/paas/roles/nomad/tasks/01_debug.yml b/ansible/playbooks/paas/roles/nomad/tasks/01_debug.yml new file mode 100644 index 00000000..b3a46a7a --- /dev/null +++ b/ansible/playbooks/paas/roles/nomad/tasks/01_debug.yml @@ -0,0 +1,35 @@ +--- +- name: "Nomad | debug | Check number of hosts" + ansible.builtin.debug: + msg: "Nomad will be deployed on {{ (ansible_play_hosts | length) }} host{% if (ansible_play_hosts | length) > 1 %}s{% endif %}" + verbosity: 1 + +- name: "Nomad | debug | nomad_node_role" + ansible.builtin.debug: + msg: "{{ nomad_node_role }}" + verbosity: 1 + +- name: "Nomad | debug | nomad_bind_address" + ansible.builtin.debug: + msg: "{{ nomad_bind_address }}" + verbosity: 1 + +- name: "Nomad | debug | nomad_advertise_address" + ansible.builtin.debug: + msg: "{{ nomad_advertise_address }}" + verbosity: 1 + +- name: "Nomad | debug | nomad_servers" + ansible.builtin.debug: + msg: "{{ nomad_servers }}" + verbosity: 1 + +- name: "Nomad | debug | nomad_client_meta_list" + ansible.builtin.debug: + msg: "{{ nomad_client_meta_list }}" + verbosity: 1 + +- name: "Nomad | debug | nomad_servers_advertise_address" + ansible.builtin.debug: + msg: "{{ nomad_servers_advertise_address }}" + verbosity: 1 diff --git a/ansible/playbooks/paas/roles/nomad/tasks/01_nodes_roles.yml b/ansible/playbooks/paas/roles/nomad/tasks/01_nodes_roles.yml deleted file mode 100644 index 9ec5412d..00000000 --- a/ansible/playbooks/paas/roles/nomad/tasks/01_nodes_roles.yml +++ /dev/null @@ -1,71 +0,0 @@ ---- -- name: Check number of host in play - ansible.builtin.debug: - msg: "Nomad will be deployed on {{ (ansible_play_hosts | length) }} host{% if (ansible_play_hosts | length) > 1 %}s{% endif %}" - verbosity: 1 - run_once: true - -- name: "Nomad | Server Quorum" - ansible.builtin.debug: - msg: "Server quorum: {{ nomad_servers_quorum }}" - verbosity: 1 - run_once: true - when: - - nomad_mode == 'cluster' - - nomad_roles_auto_assign - -- name: "Nomad | Clients available" - ansible.builtin.debug: - msg: "Clients available: {{ nomad_clients_available }}" - verbosity: 1 - run_once: true - when: - - nomad_mode == 'cluster' - - nomad_roles_auto_assign - -- name: "Nomad | Assign server role" - ansible.builtin.set_fact: - nomad_node_role: "server" - when: - - nomad_mode == 'cluster' - - inventory_hostname in groups[nomad_cluster_name][0:(nomad_servers_quorum | int )] - - nomad_roles_auto_assign - -- name: "Nomad | Assign client role" - ansible.builtin.set_fact: - nomad_node_role: "client" - when: - - nomad_mode == 'cluster' - - inventory_hostname in groups[nomad_cluster_name][(nomad_servers_quorum | int ):] - - nomad_roles_auto_assign - -- name: "Nomad | Set role to nodes" - ansible.builtin.set_fact: - nomad_node_role: "{{ nomad_node_role }}" - when: - - nomad_mode == 'cluster' - - nomad_node_role is defined - -- name: "Nomad | Set role to node" - ansible.builtin.set_fact: - nomad_node_role: "both" - when: nomad_mode == 'single' - -- name: "Nomad | Insert Node role in local facts" - ansible.builtin.copy: - dest: /etc/ansible/facts.d/nomad_node_role.fact - content: "{{ nomad_node_role | to_nice_json }}" - mode: "0600" - when: - - ansible_local.nomad_node_role.nomad_node_role is not defined - -- name: "Nomad | Roles Status" - ansible.builtin.debug: - msg: | - {{ inventory_hostname }} - Nomad role: {{ nomad_node_role }} - verbosity: 1 - -- name: "Nomad | Commons tasks | Set-Timezone" - community.general.timezone: - name: "{{ nomad_timezone }}" - hwclock: local diff --git a/ansible/playbooks/paas/roles/nomad/tasks/02_network.yml b/ansible/playbooks/paas/roles/nomad/tasks/02_network.yml index f6d53e9c..263a9373 100644 --- a/ansible/playbooks/paas/roles/nomad/tasks/02_network.yml +++ b/ansible/playbooks/paas/roles/nomad/tasks/02_network.yml @@ -35,9 +35,9 @@ verbosity: 1 when: not cni_install.stat.exists -- name: "Nomad | CNI | Unarchive plugin CNI for {{ architecture | upper }}" +- name: "Nomad | CNI | Unarchive plugin CNI" ansible.builtin.unarchive: - src: https://github.com/containernetworking/plugins/releases/download/{{ tag.json.tag_name }}/cni-plugins-linux-{{ architecture }}-{{ tag.json.tag_name }}.tgz + src: https://github.com/containernetworking/plugins/releases/download/{{ tag.json.tag_name }}/cni-plugins-linux-{{ nomad_architecture }}-{{ tag.json.tag_name }}.tgz dest: /opt/cni/bin remote_src: true when: not cni_install.stat.exists @@ -56,13 +56,3 @@ - { name: "net.bridge.bridge-nf-call-arptables", value: "1" } - { name: "net.bridge.bridge-nf-call-ip6tables", value: "1" } - { name: "net.bridge.bridge-nf-call-iptables", value: "1" } - -- name: "Nomad | Network | Conf Netplan for bridge(s)" - ansible.builtin.template: - src: 50-bridge.yaml.j2 - dest: /etc/netplan/50-bridge.yaml - mode: "0600" - notify: Netplan_apply - -- name: "Nomad | Network | Flush handlers" - ansible.builtin.meta: flush_handlers diff --git a/ansible/playbooks/paas/roles/nomad/tasks/04_tls_certs.yml b/ansible/playbooks/paas/roles/nomad/tasks/04_tls_certs.yml index d231059b..54f88757 100644 --- a/ansible/playbooks/paas/roles/nomad/tasks/04_tls_certs.yml +++ b/ansible/playbooks/paas/roles/nomad/tasks/04_tls_certs.yml @@ -1,4 +1,18 @@ --- +- name: "Certificate | Create TLS directory on target" + ansible.builtin.file: + path: "{{ nomad_tls_host_certificate_dir }}" + state: directory + mode: '0755' + +- name: "Certificate | Copy Public certs on nodes - {{ nomad_tls_ca_pubkey }}" + ansible.builtin.copy: + src: "{{ nomad_tls_ca_host_dir }}/{{ nomad_tls_ca_pubkey }}" + dest: "{{ nomad_tls_host_certificate_dir }}/{{ nomad_tls_ca_pubkey }}" + owner: "root" + group: "root" + mode: "0640" + - name: Nomad | Copy certificate on server (or both) nodes when: nomad_node_role in ['server', 'both'] block: @@ -20,7 +34,6 @@ name: certificate tasks_from: client vars: - certificate_ca_host_dir: "{{ nomad_tls_ca_host_dir }}" certificate_ca_pubkey: "{{ nomad_tls_ca_pubkey }}" certificate_ca_privatekey: "{{ nomad_tls_ca_privatekey }}" certificate_ca_provider: "{{ nomad_tls_ca_provider }}" @@ -29,6 +42,7 @@ certificate_client_privatekey: "{{ nomad_tls_privatekey_server }}" certificate_common_name: "{{ nomad_tls_common_name_server }}" certificate_subject_alt_name: "{{ nomad_tls_subject_alt_name_server }}" + run_once: true when: not cert_tls_server_present.stat.exists or (cert_tls_server_present.stat.exists and not tls_check_server.valid_at.delay) - name: "Nomad | Copy cert private server key on nodes" @@ -52,7 +66,7 @@ block: - name: "Nomad | Check if TLS cert exists for Client" ansible.builtin.stat: - path: "{{ nomad_tls_host_certificate_dir }}/{{ nomad_tls_cert_client }}" + path: "{{ nomad_tls_ca_host_dir }}{{ nomad_tls_cert_client }}" register: cert_tls_client_present - name: "Nomad | Get information on generated certificate for Clients" @@ -68,7 +82,6 @@ name: certificate tasks_from: client vars: - certificate_ca_host_dir: "{{ nomad_tls_ca_host_dir }}" certificate_ca_pubkey: "{{ nomad_tls_ca_pubkey }}" certificate_ca_privatekey: "{{ nomad_tls_ca_privatekey }}" certificate_ca_provider: "{{ nomad_tls_ca_provider }}" @@ -77,11 +90,7 @@ certificate_client_privatekey: "{{ nomad_tls_privatekey_client }}" certificate_common_name: "{{ nomad_tls_common_name_client }}" certificate_subject_alt_name: "{{ nomad_tls_subject_alt_name_client }}" - when: - - nomad_mode == 'cluster' - - ( groups[nomad_deploy_cluster_name] | map('extract', hostvars) | selectattr('nomad_node_role', 'equalto', 'client') | map(attribute='inventory_hostname') | length ) >= 1 - - ( not ( groups[nomad_deploy_cluster_name] | map('extract', hostvars) | selectattr('nomad_node_role', 'equalto', 'client') | map(attribute='cert_tls_client_present.stat.exists') | list | first ) ) or - (( groups[nomad_deploy_cluster_name] | map('extract', hostvars) | selectattr('nomad_node_role', 'equalto', 'client') | map(attribute='cert_tls_client_present.stat.exists') | list | first ) and not (groups[nomad_deploy_cluster_name] | map('extract', hostvars) | selectattr('nomad_node_role', 'equalto', 'client') | map(attribute='tls_check_client.valid_at.delay') | list | first ) ) + when: nomad_mode == 'cluster' - name: "Nomad | Copy cert client key on nodes" ansible.builtin.copy: diff --git a/ansible/playbooks/paas/roles/nomad/tasks/05_install.yml b/ansible/playbooks/paas/roles/nomad/tasks/05_install.yml index dc2cda04..8970d792 100644 --- a/ansible/playbooks/paas/roles/nomad/tasks/05_install.yml +++ b/ansible/playbooks/paas/roles/nomad/tasks/05_install.yml @@ -1,6 +1,9 @@ +--- - name: "Nomad Install | Install binary" ansible.builtin.apt: name: nomad + state: latest + allow_change_held_packages: true update_cache: true when: nomad_version is not defined diff --git a/ansible/playbooks/paas/roles/nomad/tasks/06_configuration.yml b/ansible/playbooks/paas/roles/nomad/tasks/06_configuration.yml index bd6e5ed5..fc3057c1 100644 --- a/ansible/playbooks/paas/roles/nomad/tasks/06_configuration.yml +++ b/ansible/playbooks/paas/roles/nomad/tasks/06_configuration.yml @@ -15,26 +15,16 @@ ansible.builtin.set_fact: nomad_encrypt_key: "{{ lookup('simple-stack-ui', type='secret', key=inventory_hostname, subkey='nomad_encrypt_key', missing='create', userpass=nomad_encrypt_key_out.stdout) }}" -- name: "Nomad Install | Set Gossip Encryption Key init done local facts" - ansible.builtin.copy: - dest: /etc/ansible/facts.d/nomad_encrypt_key.fact - content: | - { - "nomad_encrypt_key": "init_done" - } - mode: "0600" - when: ansible_local.nomad_encrypt_key.nomad_encrypt_key is not defined - - name: "Nomad Configuration | Add user nomad to docker group" ansible.builtin.user: - name: "nomad" + name: nomad groups: docker append: true when: nomad_node_role == 'client' or nomad_node_role == 'both' - name: "Nomad Configuration | Insert Nomad docker configuration" ansible.builtin.template: - src: "docker.hcl.j2" + src: docker.hcl.j2 dest: "{{ nomad_config_dir }}/docker.hcl" owner: nomad group: nomad @@ -43,7 +33,7 @@ - name: "Nomad Install | Copy configurations files" ansible.builtin.template: - src: "nomad.hcl.j2" + src: nomad.hcl.j2 dest: "{{ nomad_config_dir }}/nomad.hcl" owner: nomad group: nomad @@ -51,23 +41,23 @@ - name: "Nomad Install | Copy configurations files for servers" ansible.builtin.template: - src: "server.hcl.j2" + src: server.hcl.j2 dest: "{{ nomad_config_dir }}/server.hcl" owner: nomad group: nomad mode: '0644' notify: Nomad_restart - when: nomad_node_role == 'server' or nomad_node_role == 'both' + when: nomad_node_role in ['server', 'both'] - name: "Nomad Install | Copy configurations files for clients" ansible.builtin.template: - src: "client.hcl.j2" + src: client.hcl.j2 dest: "{{ nomad_config_dir }}/client.hcl" owner: nomad group: nomad mode: '0644' notify: Nomad_restart - when: nomad_node_role == 'client' or nomad_node_role == 'both' + when: nomad_node_role in ['client', 'both'] - name: "Nomad | Configuration | Flush handlers" ansible.builtin.meta: flush_handlers @@ -79,7 +69,7 @@ - name: Block block: - - name: "Nomad Install | Read Nomad management token from PasswordStore" + - name: "Nomad Install | Read Nomad management token from UI" ansible.builtin.set_fact: nomad_management_token: "{{ lookup('simple-stack-ui', type='secret', key=inventory_hostname, subkey='nomad_management_token', missing='error') }}" rescue: @@ -96,20 +86,10 @@ register: nomad_management_token_result run_once: true - - name: "Nomad Install | Set Nomad management token and insert in PasswordStore" + - name: "Nomad Install | Set Nomad management token and insert in UI" ansible.builtin.set_fact: nomad_management_token: "{{ lookup('simple-stack-ui', type='secret', key=inventory_hostname, subkey='nomad_management_token', missing='create', userpass=nomad_management_token_result.json.SecretID) }}" -- name: "Nomad Install | Set Nomad management token init done local facts" - ansible.builtin.copy: - dest: /etc/ansible/facts.d/nomad_management_token.fact - content: | - { - "nomad_management_token": "init_done" - } - mode: "0600" - when: ansible_local.nomad_management_token.nomad_management_token is not defined - - name: "Nomad Configuration | Enable MemoryOversubscription" ansible.builtin.uri: url: "{{ nomad_http_scheme }}://{{ nomad_http_ip }}:{{ nomad_http_port }}/v1/operator/scheduler/configuration" @@ -125,7 +105,7 @@ return_content: true status_code: - 200 - when: nomad_node_role == 'server' or nomad_node_role == 'both' + when: nomad_node_role in ['server', 'both'] register: nomad_memoryoversubscription ignore_errors: true @@ -139,7 +119,7 @@ - "plugin-s3-node.hcl" when: - nomad_s3_storage_enabled - - nomad_node_role == 'client' or nomad_node_role == 'both' + - nomad_node_role in ['client', 'both'] notify: Nomad_s3_jobs - name: "Nomad Configuration | Flush handlers" diff --git a/ansible/playbooks/paas/roles/nomad/tasks/07_autoeligibility.yml b/ansible/playbooks/paas/roles/nomad/tasks/07_autoeligibility.yml index 824b1b67..08c70aa2 100644 --- a/ansible/playbooks/paas/roles/nomad/tasks/07_autoeligibility.yml +++ b/ansible/playbooks/paas/roles/nomad/tasks/07_autoeligibility.yml @@ -1,103 +1,110 @@ --- -- name: "Nomad Policy | Get policies list" - ansible.builtin.uri: - url: "{{ nomad_http_scheme }}://{{ nomad_http_ip }}:{{ nomad_http_port }}/v1/acl/policies" - ca_path: "{{ nomad_tls_host_certificate_dir }}/{{ nomad_tls_ca_pubkey }}" - client_cert: "{{ nomad_tls_host_certificate_dir }}/{{ nomad_tls_cert_server }}" - client_key: "{{ nomad_tls_host_certificate_dir }}/{{ nomad_tls_privatekey_server }}" - method: GET - headers: - X-Nomad-Token: "{{ nomad_management_token | default(ansible_local.nomad_management_token.nomad_management_token) }}" - status_code: - - 200 - - 404 - return_content: true - register: nomad_policies_list_raw - -- name: "Nomad Policy | Set policies list fact" - ansible.builtin.set_fact: - nomad_policies_list: "{{ nomad_policies_list_raw.json | community.general.json_query('[*].Name') | string }}" - -- name: "Nomad Policy | Create policy for Nomad access autoeligibility" - ansible.builtin.uri: - url: "{{ nomad_http_scheme }}://{{ nomad_http_ip }}:{{ nomad_http_port }}/v1/acl/policy/autoeligibility" - ca_path: "{{ nomad_tls_host_certificate_dir }}/{{ nomad_tls_ca_pubkey }}" - client_cert: "{{ nomad_tls_host_certificate_dir }}/{{ nomad_tls_cert_server }}" - client_key: "{{ nomad_tls_host_certificate_dir }}/{{ nomad_tls_privatekey_server }}" - method: POST - headers: - X-Nomad-Token: "{{ nomad_management_token | default(ansible_local.nomad_management_token.nomad_management_token) }}" - body: | - { - "Name": "autoeligibility", - "Description": "Nomad policy for single mode install operations (drain)", - "Rules": "node {\n policy = \"write\"\n}\n\nagent {\n policy = \"write\"\n}" - } - body_format: json - status_code: - - 200 - - 201 - when: '"autoeligibility" not in nomad_policies_list' - -- name: "Nomad Policy | Warning policy already created" - ansible.builtin.debug: - msg: "Policy already created" - verbosity: 1 - when: '"autoeligibility" in nomad_policies_list' - -- name: "Nomad Token | Get tokens list" - ansible.builtin.uri: - url: "{{ nomad_http_scheme }}://{{ nomad_http_ip }}:{{ nomad_http_port }}/v1/acl/tokens" - ca_path: "{{ nomad_tls_host_certificate_dir }}/{{ nomad_tls_ca_pubkey }}" - client_cert: "{{ nomad_tls_host_certificate_dir }}/{{ nomad_tls_cert_server }}" - client_key: "{{ nomad_tls_host_certificate_dir }}/{{ nomad_tls_privatekey_server }}" - method: GET - headers: - X-Nomad-Token: "{{ nomad_management_token | default(ansible_local.nomad_management_token.nomad_management_token) }}" - status_code: - - 200 - - 404 - return_content: true - register: nomad_tokens_list_raw - -- name: "Nomad Token | Set tokens list fact" - ansible.builtin.set_fact: - nomad_tokens_list: "{{ nomad_tokens_list_raw.json | community.general.json_query('[*].Name') | string }}" +- name: Nomad | Copy certificate on server (or both) nodes + when: nomad_node_role in ['server', 'both'] + block: + - name: "Nomad Policy | Get policies list" + ansible.builtin.uri: + url: "{{ nomad_http_scheme }}://{{ nomad_http_ip }}:{{ nomad_http_port }}/v1/acl/policies" + ca_path: "{{ nomad_tls_host_certificate_dir }}/{{ nomad_tls_ca_pubkey }}" + client_cert: "{{ nomad_tls_host_certificate_dir }}/{{ nomad_tls_cert_server }}" + client_key: "{{ nomad_tls_host_certificate_dir }}/{{ nomad_tls_privatekey_server }}" + method: GET + headers: + X-Nomad-Token: "{{ lookup('simple-stack-ui', type='secret', key=inventory_hostname, subkey='nomad_management_token', missing='error') }}" + status_code: + - 200 + - 404 + return_content: true + register: nomad_policies_list_raw -- name: "Nomad Token | Debug nomad_tokens_list" - ansible.builtin.debug: - msg: "{{ nomad_tokens_list }}" - verbosity: 1 + - name: Debug nomad_policies_list_raw (for auto eligibility) + ansible.builtin.debug: + msg: "{{ nomad_policies_list_raw.json }}" -- name: Block - block: - - name: "Nomad Install | Read Nomad nomad autoeligibility token" + - name: "Nomad Policy | Set policies list fact" ansible.builtin.set_fact: - nomad_autoeligibility_token: "{{ lookup('simple-stack-ui', type='secret', key=inventory_hostname, subkey='nomad_autoeligibility_token', missing='error') }}" + nomad_policies_list: "{{ nomad_policies_list_raw.json | community.general.json_query('[*].Name') | string }}" - rescue: - - name: "Nomad Token | Create token for Nomad access autoeligibility" + - name: "Nomad Policy | Create policy for Nomad access autoeligibility" ansible.builtin.uri: - url: "{{ nomad_http_scheme }}://{{ nomad_http_ip }}:{{ nomad_http_port }}/v1/acl/token" + url: "{{ nomad_http_scheme }}://{{ nomad_http_ip }}:{{ nomad_http_port }}/v1/acl/policy/autoeligibility" ca_path: "{{ nomad_tls_host_certificate_dir }}/{{ nomad_tls_ca_pubkey }}" client_cert: "{{ nomad_tls_host_certificate_dir }}/{{ nomad_tls_cert_server }}" client_key: "{{ nomad_tls_host_certificate_dir }}/{{ nomad_tls_privatekey_server }}" - method: PUT + method: POST headers: - X-Nomad-Token: "{{ nomad_management_token | default(ansible_local.nomad_management_token.nomad_management_token) }}" + X-Nomad-Token: "{{ lookup('simple-stack-ui', type='secret', key=inventory_hostname, subkey='nomad_management_token', missing='error') }}" body: | { "Name": "autoeligibility", - "Type": "client", - "Policies": ["autoeligibility"], - "Global": false + "Description": "Nomad policy for single mode install operations (drain)", + "Rules": "node {\n policy = \"write\"\n}\n\nagent {\n policy = \"write\"\n}" } body_format: json status_code: - 200 - register: nomad_new_token_name + - 201 + when: '"autoeligibility" not in nomad_policies_list' - - name: "Nomad Install | Set Nomad Autoeligibility token and insert in PasswordStore" + - name: "Nomad Policy | Warning policy already created" + ansible.builtin.debug: + msg: "Policy already created" + verbosity: 1 + when: '"autoeligibility" in nomad_policies_list' + + - name: "Nomad Token | Get tokens list" + ansible.builtin.uri: + url: "{{ nomad_http_scheme }}://{{ nomad_http_ip }}:{{ nomad_http_port }}/v1/acl/tokens" + ca_path: "{{ nomad_tls_host_certificate_dir }}/{{ nomad_tls_ca_pubkey }}" + client_cert: "{{ nomad_tls_host_certificate_dir }}/{{ nomad_tls_cert_server }}" + client_key: "{{ nomad_tls_host_certificate_dir }}/{{ nomad_tls_privatekey_server }}" + method: GET + headers: + X-Nomad-Token: "{{ lookup('simple-stack-ui', type='secret', key=inventory_hostname, subkey='nomad_management_token', missing='error') }}" + status_code: + - 200 + - 404 + return_content: true + register: nomad_tokens_list_raw + + - name: "Nomad Token | Set tokens list fact" ansible.builtin.set_fact: - nomad_autoeligibility_token: "{{ lookup('simple-stack-ui', type='secret', key=inventory_hostname, subkey='nomad_autoeligibility_token', missing='create', userpass=nomad_new_token_name.json.SecretID) }}" + nomad_tokens_list: "{{ nomad_tokens_list_raw.json | community.general.json_query('[*].Name') | string }}" + + - name: "Nomad Token | Debug nomad_tokens_list" + ansible.builtin.debug: + msg: "{{ nomad_tokens_list }}" + verbosity: 1 + + - name: Block + block: + - name: "Nomad Install | Read Nomad nomad autoeligibility token" + ansible.builtin.set_fact: + nomad_autoeligibility_token: "{{ lookup('simple-stack-ui', type='secret', key=inventory_hostname, subkey='nomad_autoeligibility_token', missing='error') }}" + + rescue: + - name: "Nomad Token | Create token for Nomad access autoeligibility" + ansible.builtin.uri: + url: "{{ nomad_http_scheme }}://{{ nomad_http_ip }}:{{ nomad_http_port }}/v1/acl/token" + ca_path: "{{ nomad_tls_host_certificate_dir }}/{{ nomad_tls_ca_pubkey }}" + client_cert: "{{ nomad_tls_host_certificate_dir }}/{{ nomad_tls_cert_server }}" + client_key: "{{ nomad_tls_host_certificate_dir }}/{{ nomad_tls_privatekey_server }}" + method: PUT + headers: + X-Nomad-Token: "{{ lookup('simple-stack-ui', type='secret', key=inventory_hostname, subkey='nomad_management_token', missing='error') }}" + body: | + { + "Name": "autoeligibility", + "Type": "client", + "Policies": ["autoeligibility"], + "Global": false + } + body_format: json + status_code: + - 200 + register: nomad_new_token_name + + - name: "Nomad Install | Set Nomad Autoeligibility token and insert in PasswordStore" + ansible.builtin.set_fact: + nomad_autoeligibility_token: "{{ lookup('simple-stack-ui', type='secret', key=inventory_hostname, subkey='nomad_autoeligibility_token', missing='create', userpass=nomad_new_token_name.json.SecretID) }}" diff --git a/ansible/playbooks/paas/roles/nomad/tasks/main.yml b/ansible/playbooks/paas/roles/nomad/tasks/main.yml index 77f05d64..75a72cd8 100644 --- a/ansible/playbooks/paas/roles/nomad/tasks/main.yml +++ b/ansible/playbooks/paas/roles/nomad/tasks/main.yml @@ -1,6 +1,11 @@ --- -- name: "Nomad | Commons tasks" - ansible.builtin.include_tasks: "01_nodes_roles.yml" +- name: "Nomad | Debug" + ansible.builtin.include_tasks: "01_debug.yml" + +- name: "Nomad | Set timezone" + community.general.timezone: + name: "{{ nomad_timezone }}" + hwclock: local - name: "Nomad | Install CNI" ansible.builtin.include_tasks: "02_network.yml" @@ -19,11 +24,7 @@ - name: "Nomad | Install Nomad Auto Eligibility Node" ansible.builtin.include_tasks: "07_autoeligibility.yml" - when: nomad_mode == 'single' - name: "Nomad | Change SystemD configuration" ansible.builtin.include_tasks: "08_systemd_tuning.yml" -- name: "Nomad | Firewall configuration" - ansible.builtin.include_tasks: "09_firewall.yml" - when: nomad_firewall diff --git a/ansible/playbooks/paas/roles/nomad/templates/50-bridge.yaml.j2 b/ansible/playbooks/paas/roles/nomad/templates/50-bridge.yaml.j2 deleted file mode 100644 index 711ab642..00000000 --- a/ansible/playbooks/paas/roles/nomad/templates/50-bridge.yaml.j2 +++ /dev/null @@ -1,9 +0,0 @@ -network: - version: 2 - bridges: -{% for item in nomad_bridge_list %} - {{ item.interface }}: - addresses: [{{ item.ip_range }}] - dhcp4: no - dhcp6: no -{% endfor %} diff --git a/ansible/playbooks/paas/roles/nomad/templates/client.hcl.j2 b/ansible/playbooks/paas/roles/nomad/templates/client.hcl.j2 index 5eb0041d..184a5b9d 100644 --- a/ansible/playbooks/paas/roles/nomad/templates/client.hcl.j2 +++ b/ansible/playbooks/paas/roles/nomad/templates/client.hcl.j2 @@ -1,14 +1,19 @@ client { - enabled = {{ nomad_client_enabled }} - + enabled = {{ nomad_client_enabled | bool | lower }} state_dir = "{{ nomad_state_dir_client }}" node_class = "{{ nomad_client_node_class }}" node_pool = "{{ nomad_client_node_pool }}" - no_host_uuid = {{ nomad_client_no_host_uuid }} + no_host_uuid = {{ nomad_client_no_host_uuid | bool | lower }} + servers = [ + {%- set comma = joiner(",") -%} + {%- for server in nomad_servers_advertise_address -%} + {{ comma() }}"{{ server }}:{{ nomad_ports.rpc }}" + {%- endfor -%} ] + {% if nomad_client_network_interface is defined %} network_interface = "{{ nomad_client_network_interface }}" {% endif %} @@ -22,6 +27,7 @@ client { reserved_ports = "{{ nomad_client_host_network_default.reserved_ports }}" {% endif %} } + {% if nomad_mode == "cluster" %} host_network "{{ nomad_client_host_network_cluster.name }}" { interface = "{{ nomad_client_host_network_cluster.interface }}" @@ -33,21 +39,6 @@ client { {% endif %} } {% endif %} -{% if nomad_bridge %} -{% for item in nomad_bridge_list %} - host_network "{{ item.name }}" { -{% if nomad_bridge_list is defined and item.interface is defined %} - interface = "{{ item.interface }}" -{% endif %} -{% if nomad_bridge_list is defined and item.cidr is defined %} - cidr = "{{ item.cidr }}" -{% endif %} -{% if nomad_bridge_list is defined and item.reserved_ports is defined %} - reserved_ports = "{{ item.reserved_ports }}" -{% endif %} - } -{% endfor %} -{% endif %} {% if nomad_client_meta_list%} meta = { diff --git a/ansible/playbooks/paas/roles/nomad/templates/docker.hcl.j2 b/ansible/playbooks/paas/roles/nomad/templates/docker.hcl.j2 index 467a8256..14043718 100644 --- a/ansible/playbooks/paas/roles/nomad/templates/docker.hcl.j2 +++ b/ansible/playbooks/paas/roles/nomad/templates/docker.hcl.j2 @@ -26,15 +26,15 @@ plugin "docker" { allow_caps = [{% for item in nomad_docker_client_allow_caps %}"{{ item }}"{% if not loop.last %}, {% endif %}{% endfor %}] gc { - image = {{ nomad_docker_gc_image }} - image_delay = "{{ nomad_docker_gc_image_delay }}" - container = {{ nomad_docker_gc_container }} + image = {{ nomad_docker_gc_image }} + image_delay = "{{ nomad_docker_gc_image_delay }}" + container = {{ nomad_docker_gc_container }} - dangling_containers { - enabled = {{ nomad_docker_gc_dangling_containers_enabled }} - dry_run = {{ nomad_docker_gc_dangling_containers_dry_run }} - period = "{{ nomad_docker_gc_dangling_containers_period }}" - creation_grace = "{{ nomad_docker_gc_dangling_containers_creation_grace }}" + dangling_containers { + enabled = {{ nomad_docker_gc_dangling_containers_enabled }} + dry_run = {{ nomad_docker_gc_dangling_containers_dry_run }} + period = "{{ nomad_docker_gc_dangling_containers_period }}" + creation_grace = "{{ nomad_docker_gc_dangling_containers_creation_grace }}" } } } diff --git a/ansible/playbooks/paas/roles/nomad/templates/nomad.hcl.j2 b/ansible/playbooks/paas/roles/nomad/templates/nomad.hcl.j2 index 2d493d4b..7d027483 100644 --- a/ansible/playbooks/paas/roles/nomad/templates/nomad.hcl.j2 +++ b/ansible/playbooks/paas/roles/nomad/templates/nomad.hcl.j2 @@ -2,76 +2,50 @@ name = "{{ nomad_node_name }}" region = "{{ nomad_region }}" datacenter = "{{ nomad_dc_name }}" -disable_anonymous_signature = {{ nomad_disable_anonymous_signature }} -disable_update_check = {{ nomad_disable_update_check }} +disable_anonymous_signature = {{ nomad_disable_anonymous_signature | bool | lower }} +disable_update_check = {{ nomad_disable_update_check | bool | lower }} data_dir = "{{ nomad_data_dir }}" -{% if nomad_mode == 'single'%} -bind_addr = "{{ nomad_bind_addr }}" -{% else %} -addresses { - http = "{{ nomad_address_http }}" - rpc = "{{ nomad_address_rpc }}" - serf = "{{ nomad_address_serf }}" -} -{% endif %} +bind_addr = "{{ nomad_bind_address }}" advertise { - http = "{{ nomad_advertise_http }}" - rpc = "{{ nomad_advertise_rpc }}" - serf = "{{ nomad_advertise_serf }}" + http = "{{ nomad_advertise_address }}:{{ nomad_ports.http }}" + rpc = "{{ nomad_advertise_address }}:{{ nomad_ports.rpc }}" + serf = "{{ nomad_advertise_address }}:{{ nomad_ports.serf }}" } ports { - http = {{ nomad_port_http }} - rpc = {{ nomad_port_rpc }} - serf = {{ nomad_port_serf }} + http = {{ nomad_ports.http }} + rpc = {{ nomad_ports.rpc }} + serf = {{ nomad_ports.serf }} } -enable_debug = {{ nomad_debug }} +enable_debug = {{ nomad_debug | bool | lower }} log_file = "{{ nomad_log_file }}" log_level = "{{ nomad_log_level }}" log_rotate_bytes = {{ nomad_log_rotate_bytes }} log_rotate_duration = "{{ nomad_log_rotate_duration }}" log_rotate_max_files = {{ nomad_log_rotate_max_files }} -leave_on_terminate = {{ nomad_leave_on_terminate | lower }} -leave_on_interrupt = {{ nomad_leave_on_interrupt | lower }} - - -{% if nomad_node_role == 'server' or nomad_node_role == 'both' %} - -tls { - http = {{ nomad_tls_http }} - rpc = {{ nomad_tls_rpc }} - ca_file = "{{ nomad_tls_host_certificate_dir }}/{{ nomad_tls_ca_pubkey }}" - cert_file = "{{ nomad_tls_host_certificate_dir }}/{{ nomad_tls_cert_server }}" - key_file = "{{ nomad_tls_host_certificate_dir }}/{{ nomad_tls_privatekey_server }}" - rpc_upgrade_mode = {{ nomad_tls_rpc_upgrade_mode }} - verify_server_hostname = "{{ nomad_tls_verify_server_hostname }}" - verify_https_client = "{{ nomad_tls_verify_https_client }}" -} - -{% elif nomad_node_role == 'client' %} +leave_on_terminate = {{ nomad_leave_on_terminate | bool | lower }} +leave_on_interrupt = {{ nomad_leave_on_interrupt | bool | lower }} tls { - http = {{ nomad_tls_http }} - rpc = {{ nomad_tls_rpc }} + http = true + rpc = true ca_file = "{{ nomad_tls_host_certificate_dir }}/{{ nomad_tls_ca_pubkey }}" - cert_file = "{{ nomad_tls_host_certificate_dir }}/{{ nomad_tls_cert_client }}" - key_file = "{{ nomad_tls_host_certificate_dir }}/{{ nomad_tls_privatekey_client }}" + cert_file = "{{ nomad_tls_host_certificate_dir }}/{{ (nomad_node_role == 'client') | ternary(nomad_tls_cert_client, nomad_tls_cert_server) }}" + key_file = "{{ nomad_tls_host_certificate_dir }}/{{ (nomad_node_role == 'client') | ternary(nomad_tls_privatekey_client, nomad_tls_privatekey_server) }}" rpc_upgrade_mode = {{ nomad_tls_rpc_upgrade_mode }} verify_server_hostname = "{{ nomad_tls_verify_server_hostname }}" verify_https_client = "{{ nomad_tls_verify_https_client }}" } -{% endif %} - acl { - enabled = {{ nomad_acl_enabled }} + enabled = {{ nomad_acl_enabled | bool | lower }} token_ttl = "{{ nomad_acl_token_ttl }}" policy_ttl = "{{ nomad_acl_policy_ttl }}" - replication_token = "" + replication_token = "{{ nomad_acl_replication_token }}" } telemetry { @@ -104,14 +78,19 @@ telemetry { } autopilot { - cleanup_dead_servers = {{ nomad_autopilot_cleanup_dead_servers }} + cleanup_dead_servers = {{ nomad_autopilot_cleanup_dead_servers | bool | lower }} last_contact_threshold = "{{ nomad_autopilot_last_contact_threshold }}" max_trailing_logs = {{ nomad_autopilot_max_trailing_logs }} server_stabilization_time = "{{ nomad_autopilot_server_stabilization_time }}" } +limits { + http_max_conns_per_client = 300 + rpc_max_conns_per_client = 300 +} + ui { - enabled = {{ nomad_ui_enabled }} + enabled = {{ nomad_ui_enabled | bool | lower }} content_security_policy { connect_src = ["{{ nomad_ui_content_security_policy_connect_src }}"] diff --git a/ansible/playbooks/paas/roles/nomad/templates/server.hcl.j2 b/ansible/playbooks/paas/roles/nomad/templates/server.hcl.j2 index 486e6a01..e9edfca8 100644 --- a/ansible/playbooks/paas/roles/nomad/templates/server.hcl.j2 +++ b/ansible/playbooks/paas/roles/nomad/templates/server.hcl.j2 @@ -1,17 +1,34 @@ server { - enabled = {{ nomad_server_enabled }} - - bootstrap_expect = {{ nomad_server_bootstrap_expect }} - + enabled = {{ nomad_server_enabled | bool | lower }} + bootstrap_expect = {{ nomad_servers | length }} data_dir = "{{ nomad_data_dir_server }}" - rejoin_after_leave = {{ nomad_server_rejoin_after_leave }} - - # enabled_schedulers = [{% for item in nomad_server_enabled_schedulers %}"{{ item }}"{% if not loop.last %},{% endif %}{% endfor %}] - - # num_schedulers = {{ nomad_server_num_schedulers }} + {% if nomad_server_retry_join | bool -%} + retry_join = [ + {%- set comma = joiner(",") -%} + {% for server in nomad_servers_advertise_address -%} + {{ comma() }}"{{ server }}" + {%- endfor -%} ] + retry_max = {{ nomad_server_retry_max }} + retry_interval = "{{ nomad_serer_retry_interval }}" + {% else -%} + start_join = [ + {%- set comma = joiner(",") -%} + {% for server in nomad_servers_advertise_address -%} + {{ comma() }}"{{ server }}" + {%- endfor -%} ] + {%- endif %} + + rejoin_after_leave = {{ nomad_server_rejoin_after_leave | bool | lower }} + + enabled_schedulers = [ + {%- set comma = joiner(",") -%} + {% for scheduler in nomad_server_enabled_schedulers -%} + {{ comma() }}"{{ scheduler }}" + {%- endfor -%} ] + num_schedulers = {{ nomad_server_num_schedulers }} heartbeat_grace = "{{ nomad_server_heartbeat_grace }}" min_heartbeat_ttl = "{{ nomad_server_min_heartbeat_ttl }}" diff --git a/ansible/playbooks/paas/roles/nomad/vars/main.yml b/ansible/playbooks/paas/roles/nomad/vars/main.yml index ee5acfd6..524bbba6 100644 --- a/ansible/playbooks/paas/roles/nomad/vars/main.yml +++ b/ansible/playbooks/paas/roles/nomad/vars/main.yml @@ -2,44 +2,11 @@ # vars file for install # Architecture replacement -architecture_map: - i386: '386' - x86_64: 'amd64' - aarch64: 'arm64' - armv7l: 'armv7' - armv6l: 'armv6' +nomad_architecture_map: + i386: "386" + x86_64: amd64 + aarch64: arm64 + armv7l: armv7 + armv6l: armv6 -architecture: "{{ architecture_map[ansible_architecture] | default(ansible_architecture) }}" - -nomad_ufw_rules: - client: - - { proto: "tcp", port: "4646", direction: "in" } - - { proto: "tcp", port: "4646", direction: "out" } - - { proto: "tcp", port: "4647", direction: "in" } - - { proto: "tcp", port: "4647", direction: "out" } - - { proto: "tcp", port: "20000:32000", direction: "in" } - - { proto: "tcp", port: "20000:32000", direction: "out" } - - { proto: "udp", port: "20000:32000", direction: "in" } - - { proto: "udp", port: "20000:32000", direction: "out" } - server: - - { proto: "tcp", port: "4646", direction: "in" } - - { proto: "tcp", port: "4646", direction: "out" } - - { proto: "tcp", port: "4647", direction: "in" } - - { proto: "tcp", port: "4647", direction: "out" } - - { proto: "tcp", port: "4648", direction: "in" } - - { proto: "tcp", port: "4648", direction: "out" } - - { proto: "udp", port: "4648", direction: "in" } - - { proto: "udp", port: "4648", direction: "out" } - both: - - { proto: "tcp", port: "4646", direction: "in" } - - { proto: "tcp", port: "4646", direction: "out" } - - { proto: "tcp", port: "4647", direction: "in" } - - { proto: "tcp", port: "4647", direction: "out" } - - { proto: "tcp", port: "4648", direction: "in" } - - { proto: "tcp", port: "4648", direction: "out" } - - { proto: "udp", port: "4648", direction: "in" } - - { proto: "udp", port: "4648", direction: "out" } - - { proto: "tcp", port: "20000:32000", direction: "in" } - - { proto: "tcp", port: "20000:32000", direction: "out" } - - { proto: "udp", port: "20000:32000", direction: "in" } - - { proto: "udp", port: "20000:32000", direction: "out" } +nomad_architecture: "{{ nomad_architecture_map[ansible_architecture] }}" diff --git a/ansible/playbooks/paas/systemd-resolved.yml b/ansible/playbooks/paas/systemd-resolved.yml new file mode 100644 index 00000000..f07ac0bb --- /dev/null +++ b/ansible/playbooks/paas/systemd-resolved.yml @@ -0,0 +1,28 @@ +--- +- name: Configure systemd-resolved + any_errors_fatal: true + hosts: "{{ hosts_limit | default('infrastructure') }}" + gather_facts: true + become: true + tasks: + - name: Systemd-resolved | Create resolved directory + ansible.builtin.file: + path: /etc/systemd/resolved.conf.d + state: directory + mode: '0755' + + - name: Systemd-resolved | Copy systemd resolved config + ansible.builtin.copy: + content: | + [Resolve] + DNSStubListener=yes + dest: /etc/systemd/resolved.conf.d/systemd-resolved.conf + mode: '0644' + notify: Restart systemd-resolved + + handlers: + - name: Restart systemd-resolved + ansible.builtin.systemd: + state: restarted + daemon_reload: true + name: systemd-resolved diff --git a/ansible/playbooks/paas/timesyncd.yml b/ansible/playbooks/paas/timesyncd.yml index 27494e08..e96b42aa 100644 --- a/ansible/playbooks/paas/timesyncd.yml +++ b/ansible/playbooks/paas/timesyncd.yml @@ -6,13 +6,13 @@ become: true pre_tasks: - name: Create ansible facts.d directory - become: yes - file: + become: true + ansible.builtin.file: path: /etc/systemd/timesyncd.conf.d state: directory owner: "root" group: "root" - mode: 0755 + mode: '0755' - name: Update timesyncd.conf ansible.builtin.copy: diff --git a/ansible/playbooks/saas/image.yml b/ansible/playbooks/saas/image.yml index bedbb887..9f819d3d 100644 --- a/ansible/playbooks/saas/image.yml +++ b/ansible/playbooks/saas/image.yml @@ -19,6 +19,17 @@ path: "/tmp/{{ catalog }}" pre_tasks: + - name: Create temporary build directory + ansible.builtin.file: + path: "{{ item }}" + recurse: true + state: directory + mode: '0755' + loop: + - /root/.docker + - "{{ build_work_dir }}/download" + - "{{ build_work_dir }}/{{ upstream_default_arch }}" + - name: Copy docker config file ansible.builtin.copy: content: | @@ -34,16 +45,6 @@ group: root mode: '0600' - - name: Create temporary build directory - ansible.builtin.file: - path: "{{ item }}" - recurse: true - state: directory - mode: '0755' - loop: - - "{{ build_work_dir }}/download" - - "{{ build_work_dir }}/{{ upstream_default_arch }}" - tasks: - name: Install dependencies ansible.builtin.include_role: diff --git a/ansible/playbooks/saas/roles/adguard/templates/nomad.hcl b/ansible/playbooks/saas/roles/adguard/templates/nomad.hcl index 25f6db97..c3a315a5 100644 --- a/ansible/playbooks/saas/roles/adguard/templates/nomad.hcl +++ b/ansible/playbooks/saas/roles/adguard/templates/nomad.hcl @@ -3,14 +3,16 @@ job "{{ domain }}" { datacenters = ["{{ fact_instance.datacenter }}"] type = "service" +{% if software.constraints.location %} constraint { attribute = "${meta.location}" - set_contains = "{{ fact_instance.location }}" + set_contains = "{{ software.constraints.location }}" } +{% endif %} constraint { attribute = "${meta.instance}" - set_contains = "{{ inventory_hostname }}" + set_contains = "{{ software.instance }}" } group "{{ domain }}" { diff --git a/ansible/playbooks/saas/roles/arangodb/templates/nomad.hcl b/ansible/playbooks/saas/roles/arangodb/templates/nomad.hcl index 7d76e5b3..e6956e43 100644 --- a/ansible/playbooks/saas/roles/arangodb/templates/nomad.hcl +++ b/ansible/playbooks/saas/roles/arangodb/templates/nomad.hcl @@ -3,14 +3,16 @@ job "{{ domain }}" { datacenters = ["{{ fact_instance.datacenter }}"] type = "service" +{% if software.constraints.location %} constraint { attribute = "${meta.location}" - set_contains = "{{ fact_instance.location }}" + set_contains = "{{ software.constraints.location }}" } +{% endif %} constraint { attribute = "${meta.instance}" - set_contains = "{{ inventory_hostname }}" + set_contains = "{{ software.instance }}" } group "{{ domain }}" { diff --git a/ansible/playbooks/saas/roles/caddy/tasks/main.yml b/ansible/playbooks/saas/roles/caddy/tasks/main.yml index bd65ce9a..b45d0b3e 100644 --- a/ansible/playbooks/saas/roles/caddy/tasks/main.yml +++ b/ansible/playbooks/saas/roles/caddy/tasks/main.yml @@ -8,14 +8,7 @@ mode: "0755" loop: - "{{ software_path }}/etc/caddy" - -- name: Copy default config file - ansible.builtin.template: - src: Caddyfile - dest: "{{ software_path }}/etc/caddy/Caddyfile" - owner: root - group: root - mode: "0644" + delegate_to: "{{ software.instance }}" - name: Copy nomad job to destination ansible.builtin.template: diff --git a/ansible/playbooks/saas/roles/caddy/templates/nomad.hcl b/ansible/playbooks/saas/roles/caddy/templates/nomad.hcl index f1126c2a..edbe32a0 100644 --- a/ansible/playbooks/saas/roles/caddy/templates/nomad.hcl +++ b/ansible/playbooks/saas/roles/caddy/templates/nomad.hcl @@ -3,14 +3,16 @@ job "{{ domain }}" { datacenters = ["{{ fact_instance.datacenter }}"] type = "service" +{% if software.constraints.location %} constraint { attribute = "${meta.location}" - set_contains = "{{ fact_instance.location }}" + set_contains = "{{ software.constraints.location }}" } +{% endif %} constraint { attribute = "${meta.instance}" - set_contains = "{{ inventory_hostname }}" + set_contains = "{{ software.instance }}" } group "{{ domain }}" { @@ -55,6 +57,14 @@ job "{{ domain }}" { ports = ["caddy", "metrics"] } + template { + change_mode = "noop" + destination = "{{ software_path }}/etc/caddy/Caddyfile" + data = < { const LEVELS = Object.freeze(['project', 'provider', 'region', 'location', 'instance']); for (const { index_key } of dataset) { + if (!index_key) continue; // guard against missing data const parts = index_key.split('.'); // e.g. ["instance001","frontends","region1","provider1","project1"] @@ -62,15 +63,15 @@ NEWSCHEMA('Graphs', schema => { // Flatten the tfstate resources → instances → index_key const dataset = (infraResult?.items ?? []) - .flatMap(item => (item.tfstate?.resources ?? [])) - .flatMap(resource => (resource?.instances ?? [])) - .map(inst => ({ index_key: inst.index_key })); + .flatMap(item => (item.tfstate?.resources ?? [])) + .filter(resource => resource.type === "ansible_host") + .flatMap(resource => (resource?.instances ?? [])) + .map(inst => ({ index_key: inst.attributes.name })); // Load software definitions (they are displayed as separate nodes) const softResult = await DATA .find('nosql/softwares') .where('uid', $.user.id) - .error('@(Error)') .promise($); const softwareNodes = (softResult ?? []).map(s => ({ diff --git a/ui/schemas/infrastructures.js b/ui/schemas/infrastructures.js index 8642aab6..3327c72c 100644 --- a/ui/schemas/infrastructures.js +++ b/ui/schemas/infrastructures.js @@ -49,11 +49,15 @@ NEWSCHEMA('Infrastructures', function (schema) { if (!infra.tfstate?.resources) continue; for (const resource of infra.tfstate.resources) { if (!resource.instances) continue; + if (resource.type !== "ansible_host") continue; for (const instance of resource.instances) { - instances.push({ id: instance.index_key, name: instance.index_key }); + // instances.push({ id: instance.index_key, name: instance.index_key }); + instances.push({ id: instance.attributes.name, name: instance.attributes.name }); } } } + + console.log(instances); $.callback(instances); } }); diff --git a/ui/schemas/inventory.js b/ui/schemas/inventory.js index db3d19b2..ee6ff867 100644 --- a/ui/schemas/inventory.js +++ b/ui/schemas/inventory.js @@ -64,7 +64,7 @@ NEWSCHEMA('Inventory', function (schema) { const softwares = await DATA.find('nosql/catalogs') .fields('name,version') - .error('@(Error)') + // .error('@(Error)') .promise(); inventory.infrastructure.vars.softwares = softwares.reduce((acc, cur) => { @@ -88,12 +88,12 @@ NEWSCHEMA('Inventory', function (schema) { for (const item of result.items) { if(!item.tfstate.resources) continue; for (const resource of item.tfstate.resources) { + if(resource.type !== "ansible_host") continue; for (const instance of resource.instances) { - dataset.push({ id: item.id, hostname: instance.index_key }); + dataset.push({ id: item.id, hostname: instance.attributes.name }); } } } - $.callback(await buildInventory(dataset), null, 2); } }); diff --git a/ui/schemas/softwares.js b/ui/schemas/softwares.js index 7eb6d90f..7cbf9e16 100644 --- a/ui/schemas/softwares.js +++ b/ui/schemas/softwares.js @@ -124,8 +124,8 @@ NEWSCHEMA('Softwares', function (schema) { input: '*instance:String, *software:UID, *size:String, *domain:String, domain_alias:String, *exposition:String', action: async function ($, model) { const rules = { - instance: { regex: REGEX_SOFTWARES.instance, comment: REGEX_SOFTWARES.instance.comment }, - software: { regex: REGEX_SOFTWARES.software, comment: REGEX_SOFTWARES.software.comment }, + // instance: { regex: REGEX_SOFTWARES.instance, comment: REGEX_SOFTWARES.instance.comment }, + // software: { regex: REGEX_SOFTWARES.software, comment: REGEX_SOFTWARES.software.comment }, size: { regex: REGEX_SOFTWARES.size, comment: REGEX_SOFTWARES.size.comment }, domain: { regex: REGEX_SOFTWARES.domain, comment: REGEX_SOFTWARES.domain.comment }, domain_alias: { regex: REGEX_SOFTWARES.domain_alias, comment: REGEX_SOFTWARES.domain_alias.comment, optional: true }, diff --git a/ui/schemas/variables.js b/ui/schemas/variables.js index b6937730..471a051a 100644 --- a/ui/schemas/variables.js +++ b/ui/schemas/variables.js @@ -60,7 +60,7 @@ NEWSCHEMA('Variables', function (schema) { const key2 = model.key2.replace(/\./g, '_'); const variables = await DATA.find('nosql/variables') .where('key2', key2) - .in('type', ['project', 'provider', 'region', 'instance']) + .in('type', ['project', 'provider', 'location', 'region', 'instance']) .promise($); if (!variables?.length) { @@ -204,7 +204,7 @@ NEWSCHEMA('Variables', function (schema) { stored[model.subkey] = generatePassword(model.userpass, model.nosymbols, model.length); await DATA.update('nosql/variables', { value: ENCRYPT(stored, CONF.auth_secret), dtupdated: NOW }) .where('id', result.id) - .error('@(Error)') + // .error('@(Error)') .promise($); $.callback(stored[model.subkey]); return; @@ -214,7 +214,7 @@ NEWSCHEMA('Variables', function (schema) { stored[model.subkey] = generatePassword(model.userpass, model.nosymbols, model.length); await DATA.update('nosql/variables', { value: ENCRYPT(stored, CONF.auth_secret), dtupdated: NOW }) .where('id', result.id) - .error('@(Error)') + // .error('@(Error)') .promise($); $.success(stored[model.subkey]); return; @@ -224,7 +224,7 @@ NEWSCHEMA('Variables', function (schema) { delete stored[model.subkey]; await DATA.update('nosql/variables', { value: ENCRYPT(stored, CONF.auth_secret), dtupdated: NOW }) .where('id', result.id) - .error('@(Error)') + // .error('@(Error)') .promise($); $.success(); return;