commit 101fc6e79106fc64ad29ebea1babf7c947fcf49f Author: dave Date: Thu Nov 24 10:02:48 2022 +0300 init diff --git a/all.yml b/all.yml new file mode 100644 index 0000000..d899437 --- /dev/null +++ b/all.yml @@ -0,0 +1,68 @@ +- hosts: all + gather_facts: no + serial: "{{ hosts_per_batch | d(1) | int }}" + strategy: "{{ hosts_strategy | d('linear') }}" + tasks: + - name: get primary role + set_fact: + host_primary_role: "{%- if primary_role is defined -%}{{ primary_role }}\ + {%- elif hostvars[inventory_hostname]['primary_role'] is defined -%}{{ hostvars[inventory_hostname]['primary_role'] }}\ + {%- else -%}{{ inventory_hostname }}\ + {%- endif -%}" + + + - name: import role mappings + import_tasks: mappings.yml + + + - name: fail if mappings are missing + fail: + msg: role mappings are missing or invalid + when: (common_roles is not defined) or (common_roles | type_debug != 'list') + + + - name: warn if current role mapping is missing + debug: + msg: "mapping for role \"{{ host_primary_role }}\" is missing - using default role mapping at stage 6" + when: (extra_roles | d({}))[host_primary_role] is not defined + + + - name: build role mapping + set_fact: + role_mapping: "{{ (((extra_roles | d({}))[host_primary_role] | d([{ 'stage': 6, 'role': host_primary_role }])) + + ([] if host_primary_role in (no_common_roles | d([])) else common_roles) + + ([{ 'stage': 1, 'role': 'container' }] if 'containers' in group_names else []) + + ([{ 'stage': 3, 'role': 'postgres', 'function': 'integrate' }] if host_primary_role in (database_roles | d([])) else []) + ) | sort(attribute='stage') }}" + + + - name: remember selected stages + set_fact: + selected_stages: "{%- if stage is defined and ((stage | string) is search(',')) -%}{{ stage | string | split(',') | list | map('int') | list }}\ + {%- elif (stage is not defined) or ((stage | int) == 0) -%}{{ [1,2,3,4,5,6,7,8,9] }}\ + {%- else -%}{{ [stage | int] }}\ + {%- endif -%}" + no_log: yes + + + - name: show deployment info + debug: + msg: "deploying primary role \"{{ host_primary_role }}\" on host \"{{ inventory_hostname }}\", {{ + (('stages ' if (selected_stages | length > 1) else 'stage ') ~ (selected_stages | join(', '))) + if ([1,2,3,4,5,6,7,8,9] | symmetric_difference(selected_stages) | list | length > 0) else 'all stages' }}" + + + - name: run pre_tasks + include_tasks: tasks/pre_tasks.yml + + + - name: run stages + include_tasks: tasks/includes/stage.yml + loop: "{{ selected_stages }}" + loop_control: + loop_var: this_stage + + + - name: show deployment info + debug: + msg: "ok: deployment completed on host \"{{ inventory_hostname }}\"" diff --git a/ansible.cfg b/ansible.cfg new file mode 100644 index 0000000..a83d8df --- /dev/null +++ b/ansible.cfg @@ -0,0 +1,13 @@ +[defaults] +interpreter_python = auto_silent +stdout_callback = debug +use_persistent_connections = true +forks = 6 +internal_poll_interval = 0.01 +jinja2_native = true + +[ssh_connection] +pipelining = true +transfer_method = scp +scp_if_ssh = smart +ssh_args = -C -o ControlMaster=auto -o ControlPersist=60s -o PreferredAuthentications=publickey,password diff --git a/group_vars/all.yml b/group_vars/all.yml new file mode 100644 index 0000000..adf36e1 --- /dev/null +++ b/group_vars/all.yml @@ -0,0 +1,36 @@ +ansible_user: root +ansible_dir: /etc/ansible +ansible_key_dir: keys +alpine_version: "3.16" + +mac_prefix: 02:FF + +default_container_hardware: + cores: 1 + cpus: 1 + cpuunits: 1024 + memory: 128 + swap: 128 + disk: 0.4 + +known_external_ca: + - url: letsencrypt.org + wildcard: no + validation_methods: + - dns-01 + - url: ';' + wildcard: yes + +bogons: + - 0.0.0.0/8 + - 127.0.0.0/8 + - 169.254.0.0/16 + - 192.0.0.0/24 + - 192.0.2.0/24 + - 198.18.0.0/15 + - 198.51.100.0/24 + - 203.0.113.0/24 + - 240.0.0.0/4 + +services: {} +mail_server: {} diff --git a/group_vars/infra.yml b/group_vars/infra.yml new file mode 100644 index 0000000..4bf9af0 --- /dev/null +++ b/group_vars/infra.yml @@ -0,0 +1,126 @@ +timezone: Europe/Kirov +org: Organization Name +org_localized: Название организации +tld: org.local +int_net: 10.0.0.0/8 + +int_tld: "corp.{{ tld }}" +maintainer_email: "admin@{{ tld }}" + +timezone_win: Russian Standard Time + +container_default_nameserver: 10.40.0.1 + +networks: + srv: + gw: 10.41.0.1/16 + tag: 11 + priv: + gw: 10.42.0.1/16 + tag: 12 + dmz: + gw: 10.43.0.1/16 + tag: 13 + + +services: + db: + hostname: postgres + vault: + hostname: vault + backup: + hostname: rest-server + port: 443 + internal_ns: + hostname: ns + recursive_ns: + hostname: ns-rec + filtering_ns: + - hostname: blocky1 + - hostname: blocky2 + acme_dns: + hostname: acme-dns + rest_server: + hostname: rest-server + mariadb: + hostname: mariadb + smb: + hostname: smb + +use_alternative_apk_repo: yes + +mail_server: + tld: "{{ tld }}" + max_mail_size_bytes: 75000000 + admin_email: "admin@{{ tld }}" + + db_server_hostname: postgres + db_name: mail + db_user: mail + db_pass: pass + + mta_hostname: postfix + mua_hostname: dovecot + rspamd_hostname: rspamd + webmail_hostname: mail + clamav_hostname: clamav + + mua_lmtp_port: 11001 + mua_quota_port: 11002 + mua_auth_port: 11003 + mua_managesieve_port: 4190 + rspamd_port: 11332 + mta_sts_port: 11000 + clamav_port: 7357 + + mta_actual_hostname: smtp + mua_actual_hostname: imap + + allowed_spf: + - 1.1.1.1 + + domains: + - "{{ tld }}" + + aliases: + - { source: 'postmaster', source_domain: "{{ tld }}", target: 'admin', target_domain: "{{ tld }}" } + - { source: 'hostmaster', source_domain: "{{ tld }}", target: 'admin', target_domain: "{{ tld }}" } + - { source: 'webmaster', source_domain: "{{ tld }}", target: 'admin', target_domain: "{{ tld }}" } + - { source: 'abuse', source_domain: "{{ tld }}", target: 'admin', target_domain: "{{ tld }}" } + - { source: 'caa-report', source_domain: "{{ tld }}", target: 'admin', target_domain: "{{ tld }}" } + - { source: 'dkim-report', source_domain: "{{ tld }}", target: 'admin', target_domain: "{{ tld }}" } + - { source: 'dmarc-report', source_domain: "{{ tld }}", target: 'admin', target_domain: "{{ tld }}" } + - { source: 'smtp-tls-report', source_domain: "{{ tld }}", target: 'admin', target_domain: "{{ tld }}" } + + + + +acme_preferred_chain: ISRG Root X1 + +winrm_remote_user: remote-admin +winrm_bootstrap_password: bootstrap123 + + +backup_filters: + none: + - "*" + - "!*/" + + office: + - "!*.doc" + - "!*.docx" + - "!*.xls" + - "!*.xlsx" + - "!*.ppt" + - "!*.pptx" + - "!*.txt" + - "!*.ods" + - "!*.odt" + - "!*.odp" + - "!*.pdf" + + images: + - "!*.jpg" + - "!*.jpeg" + - "!*.png" + - "!*.tiff" diff --git a/group_vars/windows.yml b/group_vars/windows.yml new file mode 100644 index 0000000..827dd6c --- /dev/null +++ b/group_vars/windows.yml @@ -0,0 +1,10 @@ +is_windows: true + +ansible_connection: winrm +ansible_user: "{{ winrm_remote_user }}" + +ansible_winrm_transport: credssp +ansible_winrm_scheme: http +ansible_port: 5985 + +primary_role: workstation diff --git a/hosts b/hosts new file mode 100644 index 0000000..7d43589 --- /dev/null +++ b/hosts @@ -0,0 +1,36 @@ +all: + children: + containers: + hosts: + ansible: + ansible_host: 10.0.0.3 + ansible_ssh_private_key_file: /etc/ansible/keys/ansible + container_password: --- + container_id: 100 + container_network: srv + database: {user: 'test', name: 'test', pass: 'test'} + + + nodes: + hosts: + node1: + ansible_host: 10.0.0.2 + ansible_password: --- + ansible_ssh_extra_args: -o StrictHostKeyChecking=no + external_ipv4: 1.1.1.1 + primary_role: proxmox + container_mtu: 1390 + + + windows: + children: + workstations: + + + infra: + vars: + ansible_group_priority: 1000 + children: + containers: + nodes: + windows: diff --git a/mappings.yml b/mappings.yml new file mode 100644 index 0000000..3699a98 --- /dev/null +++ b/mappings.yml @@ -0,0 +1,138 @@ +- name: define role list + set_fact: + # common roles for all primary roles + common_roles: + - {stage: 2, role: 'common'} + - {stage: 3, role: 'ns', function: 'add_records'} + - {stage: 5, role: 'mail-user'} + - {stage: 8, role: 'iptables'} + - {stage: 9, role: 'backup', function: 'setup'} + + # these primary roles do not inherit common roles + no_common_roles: + - mikrotik + - workstation + + # these primary roles will always inherit postgres integration + database_roles: + - acme-dns + - asterisk + - gitea + - roundcube + - shop + - wikijs + - vault + + # additional roles for specific primary roles + extra_roles: + ca: + - {stage: 2, role: 'ca', function: 'install'} + coredns: + - {stage: 2, role: 'coredns', function: 'install'} + - {stage: 4, role: 'coredns', function: 'install_tls'} + mariadb: + - {stage: 4, role: 'mariadb', function: 'install'} + mikrotik: + - {stage: 3, role: 'ns', function: 'add_records'} + - {stage: 5, role: 'mikrotik'} + nsd: + - {stage: 4, role: 'nsd', function: 'install'} + - {stage: 4, role: 'nsd', function: 'populate'} + - {stage: 5, role: 'nsd', function: 'install_dnssec'} + - {stage: 5, role: 'nsd', function: 'install_tls'} + postfix: + - {stage: 3, role: 'mail-db'} + - {stage: 4, role: 'postfix'} + postgres: + - {stage: 2, role: 'postgres', function: 'install'} + - {stage: 3, role: 'postgres', function: 'install_tls'} + powerdns: + - {stage: 2, role: 'postgres', function: 'integrate'} + - {stage: 2, role: 'powerdns', function: 'install'} + - {stage: 3, role: 'ca', function: 'certs'} + proxmox: + - {stage: 1, role: 'common'} + - {stage: 1, role: 'proxmox', function: 'install'} + - {stage: 5, role: 'mail-user'} + - {stage: 5, role: 'proxmox', function: 'tls'} + - {stage: 6, role: 'proxmox', function: 'mail'} + rest-server: + - {stage: 6, role: 'rest-server', function: 'install'} + workstation: + - {stage: 3, role: 'ns', function: 'add_records'} + - {stage: 5, role: 'workstation'} + + # recommended hardware parameters for each primary role + role_hardware: + acme-dns: {cores: 2, memory: 96, swap: 64, disk: 0.15} + ansible: {cores: 4, memory: 256, swap: 384, disk: 1.5} + asterisk: {cores: 4, memory: 192, swap: 96, disk: 0.6, cpuunits: 2048} + blocky: {cores: 4, memory: 384, swap: 128, disk: 0.15} + ca: {cores: 2, memory: 128, swap: 64, disk: 0.15, cpuunits: 512} + clamav: {cores: 4, memory: 2048, swap: 256, disk: 0.75} + coredns: {cores: 4, memory: 128, swap: 64, disk: 0.15} + crl: {cores: 2, memory: 128, swap: 48, disk: 0.15} + dovecot: {cores: 4, memory: 256, swap: 64, disk: 0.15} + gitea: {cores: 4, memory: 512, swap: 256, disk: 1} + grafana: {cores: 4, memory: 512, swap: 256, disk: 0.4} + mariadb: {cores: 4, memory: 256, swap: 128, disk: 0.4} + mc: {cores: 4, memory: 2048, swap: 512, disk: 0.5} + nsd: {cores: 2, memory: 256, swap: 256, disk: 0.15} + ntp: {cores: 2, memory: 64, swap: 128, disk: 0.15} + postfix: {cores: 4, memory: 256, swap: 48, disk: 0.15} + postgres: {cores: 4, memory: 256, swap: 256, disk: 0.5} + powerdns: {cores: 2, memory: 96, swap: 64, disk: 0.15} + prometheus: {cores: 4, memory: 512, swap: 256, disk: 0.3} + rclone: {cores: 4, memory: 192, swap: 96, disk: 0.2, cpuunits: 768} + rest-server: {cores: 4, memory: 256, swap: 192, disk: 0.2, cpuunits: 512} + roundcube: {cores: 4, memory: 384, swap: 256, disk: 0.5} + rspamd: {cores: 4, memory: 768, swap: 128, disk: 0.3} + seafile: {cores: 4, memory: 1024, swap: 1024, disk: 5} + shop: {cores: 4, memory: 192, swap: 128, disk: 0.4} + smb: {cores: 2, memory: 128, swap: 64, disk: 0.15} + strongswan: {cores: 4, memory: 128, swap: 48, disk: 0.15} + unbound: {cores: 2, memory: 128, swap: 64, disk: 0.15} + uptime-kuma: {cores: 4, memory: 384, swap: 128, disk: 0.5} + vault: {cores: 4, memory: 128, swap: 64, disk: 0.3} + web: {cores: 4, memory: 128, swap: 64, disk: 0.2} + wikijs: {cores: 4, memory: 256, swap: 256, disk: 0.75} + + # role dependency table + # 0 - DNS ok + # 1 - DB ok + role_dependency: + acme-dns: 0 + ansible: 0 + asterisk: 2 + blocky: 0 + ca: 0 + clamav: 1 + coredns: 0 + crl: 1 + dovecot: 2 + gitea: 2 + grafana: 2 + mariadb: 0 + mc: 3 + nsd: 0 + ntp: 0 + postfix: 2 + postgres: 0 + powerdns: 1 + prometheus: 1 + rclone: 1 + rest-server: 0 + roundcube: 2 + rspamd: 2 + seafile: 3 + shop: 2 + smb: 1 + strongswan: 1 + unbound: 0 + uptime-kuma: 3 + vault: 2 + web: 1 + wikijs: 3 + + run_once: yes + no_log: yes diff --git a/roles/acme-dns/defaults/main.yml b/roles/acme-dns/defaults/main.yml new file mode 100644 index 0000000..4bc76a0 --- /dev/null +++ b/roles/acme-dns/defaults/main.yml @@ -0,0 +1,44 @@ +acme_dns_user: acmedns +acme_dns_group: acmedns +acme_dns_dir: /opt/acmedns + +acme_dns_tld: "acme-dns.{{ acme_tld | d(tld) }}" +acme_dns_ns: "ns.acme-dns.{{ acme_tld | d(tld) }}" +acme_dns_admin: "{{ maintainer_email | d('admin@' ~ (acme_tld | d(tld))) }}" + +acme_dns_api_port: 8080 + + +acme_dns_default_config: + general: + listen: ":53" + protocol: both4 + domain: "{{ acme_dns_tld }}" + nsname: "{{ acme_dns_ns | d(acme_dns_tld) }}" + nsadmin: "{{ acme_dns_admin | replace('@', '.') }}" + + records: + - "{{ acme_dns_tld ~ '. A ' ~ acme_dns_external_ipv4 }}" + - "{{ (acme_dns_ns | d(acme_dns_tld)) ~ '. A ' ~ acme_dns_external_ipv4 }}" + - "{{ acme_dns_tld ~ '. NS ' ~ (acme_dns_ns | d(acme_dns_tld)) ~ '.' }}" + + database: + engine: postgres + connection: "{{ 'postgresql://' ~ database_user ~ ':' ~ database_pass ~ '@' ~ database_host ~ '/' ~ database_name ~ '?sslmode=disable' }}" + + api: + ip: "0.0.0.0" + autocert_port: 80 + port: "{{ acme_dns_api_port }}" + disable_registration: no + tls: none + use_header: no + + notification_email: "{{ letsencrypt_email | d(maintainer_email) }}" + corsorigins: + - "*" + + logconfig: + loglevel: debug + logtype: stdout + logformat: text diff --git a/roles/acme-dns/handlers/main.yml b/roles/acme-dns/handlers/main.yml new file mode 100644 index 0000000..75d2a1a --- /dev/null +++ b/roles/acme-dns/handlers/main.yml @@ -0,0 +1,5 @@ +- name: restart acme-dns + service: + name: acme-dns + state: restarted + \ No newline at end of file diff --git a/roles/acme-dns/tasks/main.yml b/roles/acme-dns/tasks/main.yml new file mode 100644 index 0000000..5308e5f --- /dev/null +++ b/roles/acme-dns/tasks/main.yml @@ -0,0 +1,113 @@ +- name: set acme_dns_cfg + set_fact: + acme_dns_cfg: "{{ acme_dns_default_config | d({}) | combine(acme_dns_config | d({}), recursive=true) }}" + + +- name: install dependencies + include_tasks: tasks/install_packages.yml + vars: + package: + - libcap + + +- name: create user and group + include_tasks: tasks/create_user.yml + vars: + user: + name: "{{ acme_dns_user }}" + group: "{{ acme_dns_group }}" + dir: "{{ acme_dns_dir }}" + + +- name: get and extract latest version of acme-dns + include_tasks: tasks/get_lastversion.yml + vars: + package: + name: fritterhoff/acme-dns + location: github + assets: yes + asset_filter: 'Linux_amd64.tar.gz$' + file: "{{ acme_dns_dir }}/last_version" + extract: "{{ acme_dns_dir }}" + user: "{{ acme_dns_user }}" + group: "{{ acme_dns_group }}" + notify: restart acme-dns + + +- name: delete unnecessary files + file: + path: "{{ acme_dns_dir }}/{{ item }}" + state: absent + loop: + - CHANGELOG.md + - LICENSE + - README.md + + +- name: template acme-dns config + template: + src: config.j2 + dest: "{{ acme_dns_dir }}/config.cfg" + force: yes + mode: 0400 + owner: "{{ acme_dns_user }}" + group: "{{ acme_dns_group }}" + lstrip_blocks: yes + notify: restart acme-dns + + +- name: template init script + template: + src: init.j2 + dest: /etc/init.d/acme-dns + force: yes + mode: "+x" + notify: restart acme-dns + + +- name: ensure acme-dns binary has executable bit set + file: + path: "{{ acme_dns_dir }}/acme-dns" + mode: "+x" + + +- name: add cap_net_bind_service to acme-dns executable + community.general.capabilities: + path: "{{ acme_dns_dir }}/acme-dns" + capability: cap_net_bind_service+ep + changed_when: no + + +- name: set acme server address + set_fact: + acme_server: "http://127.0.0.1:{{ acme_dns_api_port }}" + + +- name: install and configure nginx + include_role: + name: nginx + vars: + nginx: + servers: + - conf: nginx_server + certs: "{{ host_tls }}" + + +- name: flush handlers + meta: flush_handlers + + +- name: add directories to backup plan + include_role: + name: backup + vars: + function: add + backup_items: + - "{{ acme_dns_dir }}" + + +- name: enable and start acme-dns + service: + name: acme-dns + state: started + enabled: yes diff --git a/roles/acme-dns/templates/config.j2 b/roles/acme-dns/templates/config.j2 new file mode 100644 index 0000000..4e16508 --- /dev/null +++ b/roles/acme-dns/templates/config.j2 @@ -0,0 +1,26 @@ +{% macro acme_dns_option(option) -%} + {% if option.value is boolean -%} + {{ option.key }} = {{ 'true' if option.value else 'false' }} + {% elif option.value | type_debug == 'list' -%} + {{ option.key }} = [ + {%- for s in option.value -%} + "{{- s -}}", + {%- endfor -%} + ] + {% elif option.value != None -%} + {{ option.key }} = "{{ option.value }}" + {% endif -%} +{% endmacro -%} + + + +{% for section in (acme_dns_cfg | d({}) | dict2items) -%} + [{{ section.key | lower }}] + {% for option in (section.value | d({}) | dict2items) -%} + {{ acme_dns_option(option) -}} + {% endfor %} + + {%- if not loop.last %} + + {% endif -%} +{% endfor %} diff --git a/roles/acme-dns/templates/init.j2 b/roles/acme-dns/templates/init.j2 new file mode 100644 index 0000000..683e567 --- /dev/null +++ b/roles/acme-dns/templates/init.j2 @@ -0,0 +1,18 @@ +#!/sbin/openrc-run + +name="$SVCNAME" +command="{{ acme_dns_dir }}/$SVCNAME" +directory="{{ acme_dns_dir }}" +command_user="{{ acme_dns_user }}:{{ acme_dns_group }}" +pidfile="/var/run/$SVCNAME.pid" +command_background=true +start_stop_daemon_args="--stdout-logger logger --stderr-logger logger" + +depend() { + need net + use dns +} + +start_pre() { + setcap 'cap_net_bind_service=+ep' {{ acme_dns_dir }}/$SVCNAME +} diff --git a/roles/acme-dns/templates/nginx_server.j2 b/roles/acme-dns/templates/nginx_server.j2 new file mode 100644 index 0000000..1eba50f --- /dev/null +++ b/roles/acme-dns/templates/nginx_server.j2 @@ -0,0 +1,8 @@ +location / { + proxy_pass http://127.0.0.1:{{ acme_dns_api_port }}; + proxy_http_version 1.1; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; +} diff --git a/roles/acme/defaults/main.yml b/roles/acme/defaults/main.yml new file mode 100644 index 0000000..7738dd9 --- /dev/null +++ b/roles/acme/defaults/main.yml @@ -0,0 +1,2 @@ +acme_directory: /etc/letsencrypt +acme_max_log_backups: 5 diff --git a/roles/acme/tasks/main.yml b/roles/acme/tasks/main.yml new file mode 100644 index 0000000..20d2e30 --- /dev/null +++ b/roles/acme/tasks/main.yml @@ -0,0 +1,202 @@ +- name: install certbot + include_tasks: tasks/install_packages.yml + vars: + package: certbot + + +- name: create certbot directories + file: + path: "{{ item }}" + state: directory + loop: + - "{{ acme_directory }}" + - "{{ acme_directory }}/cron" + + +- name: change certbot directory permissions + file: + path: "{{ acme_directory ~ '/' ~ item }}" + state: directory + mode: "g+rx,o+rx" + loop: + - archive + - live + + +- name: check if acme-dns auth hook already exists + stat: + path: "{{ acme_directory }}/acme-dns-auth.py" + register: result + + +- name: download acme-dns auth hook + get_url: + url: "https://raw.githubusercontent.com/RangeForce/acme-dns-certbot-joohoi/master/acme-dns-auth.py" + dest: "{{ acme_directory }}/acme-dns-auth.py" + force: no + mode: "+x" + when: result.stat.exists == false + + +- name: update python interpreter in acme-dns-auth to python3 + lineinfile: + path: "{{ acme_directory }}/acme-dns-auth.py" + regexp: '^#!\/usr\/bin\/env python\s*$' + line: '#!/usr/bin/env python3' + + +- name: clear acme fqdn list + set_fact: + acme_domain_list: "{{ [] }}" + + +- name: build acme fqdn list + set_fact: + acme_domain_list: "{{ (acme_domain_list | d([])) + + ([item.fqdn | d((item.hostname | d(host_name)) ~ '.' ~ (item.tld | d(host_tld)))] if item is mapping else + [item]) }}" + loop: "{{ acme_hosts if (acme_hosts | type_debug == 'list') else [] }}" + + +- name: build single acme fqdn + set_fact: + acme_domain_list: "{%- if acme_fqdn is defined and acme_fqdn != None -%}\ + {{ [ acme_fqdn ] }}\ + {%- elif (acme_hostname is defined and acme_hostname != None) or (acme_tld is defined and acme_tld != None) -%}\ + {{ [((acme_hostname is defined and acme_hostname != None) | ternary(acme_hostname, host_name)) ~ '.' ~ + ((acme_tld is defined and acme_tld != None) | ternary(acme_tld, host_tld))] }}\ + {%- else -%}\ + {{ [ host_fqdn ] }}\ + {%- endif -%}" + when: (acme_hosts is not defined) or (acme_hosts | type_debug != 'list') + + +- name: set acme parameters + set_fact: + acme_cert_name: "{{ acme_id if (acme_id is defined) and (acme_id != None) else (host_name ~ ('-ecc' if (acme_ecc | d(false) == true) else '')) }}" + acme_target_server: "{%- if (acme_server is defined) and (acme_server != None) -%}\ + {{ acme_server }}\ + {%- else -%}\ + {{ (services.acme_dns.protocol | d('https')) ~ '://' ~ services.acme_dns.hostname ~ '.' ~ (services.acme_dns.tld | d(int_tld)) ~ + ((':' ~ services.acme_dns.port) if services.acme_dns.port is defined else '') }}\ + {%- endif -%}" + + +- name: set certbot parameters + set_fact: + acme_params: "{{ ['--manual', + '--manual-auth-hook ' ~ ((acme_directory ~ '/acme-dns-auth.py') | quote), + '--preferred-challenges dns', + '--debug-challenges', + ('--key-type ecdsa' if (acme_ecc | d(false) == true) else ''), + ('--staging' if (acme_staging | d(false) == true) else ''), + ('--force-renewal' if (acme_force | d(false) == true) else ''), + ('--must-staple' if (acme_stapling | d(false) == true) else ''), + '--cert-name ' ~ (acme_cert_name | quote), + '--non-interactive', + '--agree-tos', + '--email ' ~ ((acme_email | d(maintainer_email)) | quote), + '--no-eff-email', + (('--preferred-chain ' ~ (acme_preferred_chain | quote)) if acme_preferred_chain is defined else ''), + '--max-log-backups ' ~ (acme_max_log_backups | quote) + ] | select() | list | join(' ') }}" + + +- block: + - name: issue cert with dns mode + shell: + cmd: "certbot certonly {{ acme_params }} -d {{ acme_domain_list | map('quote') | join(' -d ') }}" + chdir: /usr/bin + environment: + ACMEDNS_URL: "{{ acme_target_server }}" + register: result + changed_when: ('Successfully received certificate' in result.stdout) + notify: "{{ acme_notify if (acme_notify is defined) and (acme_notify != None) else omit }}" + + rescue: + - name: wait for user interaction (CNAME record must be set manually) + pause: + prompt: "{{ result.stdout }}" + + - name: try again to issue cert with dns mode + shell: + cmd: "certbot certonly {{ acme_params }} -d {{ acme_domain_list | map('quote') | join(' -d ') }}" + chdir: /usr/bin + environment: + ACMEDNS_URL: "{{ acme_target_server }}" + register: result + changed_when: ('Successfully received certificate' in result.stdout) + notify: "{{ acme_notify if (acme_notify is defined) and (acme_notify != None) else omit }}" + + +- name: create symlinks + file: + path: "{{ item.dest }}" + src: "{{ acme_directory ~ '/live/' ~ acme_cert_name ~ '/' ~ item.src }}" + state: link + force: yes + when: (item.dest is string) and (item.dest | length > 0) and (acme_use_symlinks | d(true) == true) + loop: + - { src: 'fullchain.pem', dest: "{{ acme_cert | d(None) }}" } + - { src: 'privkey.pem', dest: "{{ acme_key | d(None) }}" } + - { src: 'cert.pem', dest: "{{ acme_cert_single | d(None) }}" } + - { src: 'chain.pem', dest: "{{ acme_chain | d(None) }}" } + notify: "{{ acme_notify if (acme_notify is defined) and (acme_notify != None) else omit }}" + + +- name: fix ownership on archive dir + file: + path: "{{ acme_directory ~ '/archive/' ~ acme_cert_name }}" + follow: no + recurse: yes + owner: "{{ acme_owner if (acme_owner is defined) and (acme_owner != None) else omit }}" + group: "{{ acme_group if (acme_group is defined) and (acme_group != None) else omit }}" + + +- name: copy certs + copy: + src: "{{ acme_directory ~ '/live/' ~ acme_cert_name ~ '/' ~ item.src }}" + dest: "{{ item.dest }}" + remote_src: yes + mode: 0600 + owner: "{{ acme_owner if (acme_owner is defined) and (acme_owner != None) else omit }}" + group: "{{ acme_group if (acme_group is defined) and (acme_group != None) else omit }}" + when: (item.dest is string) and (item.dest | length > 0) and (acme_use_symlinks | d(true) == false) + loop: + - { src: 'fullchain.pem', dest: "{{ acme_cert | d(None) }}" } + - { src: 'privkey.pem', dest: "{{ acme_key | d(None) }}" } + - { src: 'cert.pem', dest: "{{ acme_cert_single | d(None) }}" } + - { src: 'chain.pem', dest: "{{ acme_chain | d(None) }}" } + notify: "{{ acme_notify | d(omit) }}" + + +- name: edit renewal file + lineinfile: + path: "{{ acme_directory ~ '/renewal/' ~ acme_cert_name ~ '.conf' }}" + regexp: '^{{ item.name | regex_escape }}(\s+)=' + line: '{{ item.name }} = {{ item.value }}' + insertafter: '^\[renewalparams\]' + create: no + firstmatch: yes + when: (item.value is string) and (item.value | length > 0) and + ((item.extra_condition is not defined) or (item.extra_condition | d(true))) + loop: + - { name: 'renew_hook', value: "{{ acme_directory ~ '/cron/' ~ acme_cert_name ~ '.sh' }}" } + + +- name: create custom renewal hook file + template: + src: renewal.j2 + dest: "{{ acme_directory ~ '/cron/' ~ acme_cert_name ~ '.sh' }}" + force: yes + mode: 0500 + lstrip_blocks: yes + + +- name: add certbot to crontab + cron: + name: "certbot renewal ({{ acme_cert_name ~ ' on ' ~ acme_target_server }})" + job: "ACMEDNS_URL={{ acme_target_server | quote }} \ + /usr/bin/certbot renew --cert-name {{ acme_cert_name | quote }} --max-log-backups {{ acme_max_log_backups | quote }}" + hour: "{{ 4 | random(start=1, seed=(host_name ~ acme_cert_name)) }}" + minute: "{{ 59 | random(seed=(host_name ~ acme_cert_name)) }}" diff --git a/roles/acme/templates/renewal.j2 b/roles/acme/templates/renewal.j2 new file mode 100644 index 0000000..19414e3 --- /dev/null +++ b/roles/acme/templates/renewal.j2 @@ -0,0 +1,49 @@ +#!/bin/sh + +{% if (acme_owner is string) and (acme_group is string) and (acme_owner | length > 0) and (acme_group | length > 0) and (acme_use_symlinks | d(true) == true) -%} + chown -R {{ acme_owner ~ ':' ~ acme_group }} {{ (acme_directory ~ '/archive/' ~ acme_cert_name ~ '/') | quote }} +{% endif -%} + + +{{ acme_before_copy_hook | d('') }} + + +{% if (acme_cert is string) and (acme_cert | length > 0) and (acme_use_symlinks | d(true) == false) -%} + cp -fpT {{ (acme_directory ~ '/live/' ~ acme_cert_name ~ '/fullchain.pem') | quote }} {{ acme_cert | quote }} + {% if (acme_owner is not string) and (acme_group is string) -%} + chgrp -f {{ acme_group }} {{ acme_cert | quote }} + {% elif acme_owner is defined -%} + chown -f {{ acme_owner ~ ((':' ~ acme_group) if acme_group is string else '') }} {{ acme_cert | quote }} + {% endif -%} +{% endif -%} + +{% if (acme_key is string) and (acme_key | length > 0) and (acme_use_symlinks | d(true) == false) -%} + cp -fpT {{ (acme_directory ~ '/live/' ~ acme_cert_name ~ '/privkey.pem') | quote }} {{ acme_key | quote }} + {% if (acme_owner is not string) and (acme_group is string) -%} + chgrp -f {{ acme_group }} {{ acme_key | quote }} + {% elif acme_owner is defined -%} + chown -f {{ acme_owner ~ ((':' ~ acme_group) if acme_group is string else '') }} {{ acme_key | quote }} + {% endif -%} +{% endif -%} + +{% if (acme_cert_single is string) and (acme_cert_single | length > 0) and (acme_use_symlinks | d(true) == false) -%} + cp -fpT {{ (acme_directory ~ '/live/' ~ acme_cert_name ~ '/cert.pem') | quote }} {{ acme_cert_single | quote }} + {% if (acme_owner is not string) and (acme_group is string) -%} + chgrp -f {{ acme_group }} {{ acme_cert_single | quote }} + {% elif acme_owner is defined -%} + chown -f {{ acme_owner ~ ((':' ~ acme_group) if acme_group is string else '') }} {{ acme_cert_single | quote }} + {% endif -%} +{% endif -%} + +{% if (acme_chain is string) and (acme_chain | length > 0) and (acme_use_symlinks | d(true) == false) -%} + cp -fpT {{ (acme_directory ~ '/live/' ~ acme_cert_name ~ '/chain.pem') | quote }} {{ acme_chain | quote }} + {% if (acme_owner is not string) and (acme_group is string) -%} + chgrp -f {{ acme_group }} {{ acme_chain | quote }} + {% elif acme_owner is defined -%} + chown -f {{ acme_owner ~ ((':' ~ acme_group) if acme_group is string else '') }} {{ acme_chain | quote }} + {% endif -%} +{% endif -%} + + +{{ (acme_post_hook ~ ' &>/dev/null &') if acme_post_hook is defined else '' }} + diff --git a/roles/ansible/defaults/main.yml b/roles/ansible/defaults/main.yml new file mode 100644 index 0000000..02fede6 --- /dev/null +++ b/roles/ansible/defaults/main.yml @@ -0,0 +1 @@ +ansible_dir: /etc/ansible diff --git a/roles/ansible/tasks/main.yml b/roles/ansible/tasks/main.yml new file mode 100644 index 0000000..169dfbf --- /dev/null +++ b/roles/ansible/tasks/main.yml @@ -0,0 +1,31 @@ +- name: install ansible and dependencies + include_tasks: tasks/install_packages.yml + vars: + package: + - ansible + - py3-lxml + - py3-pip + - py3-requests + - py3-netaddr + + +- name: install python dependencies + pip: + name: + - pywinrm + - pywinrm[credssp] + + +- name: create ansible directory + file: + path: "{{ ansible_dir }}" + state: directory + + +- name: add directories to backup plan + include_role: + name: backup + vars: + function: add + backup_items: + - "{{ ansible_dir }}" diff --git a/roles/asterisk/defaults/main.yml b/roles/asterisk/defaults/main.yml new file mode 100644 index 0000000..df8200c --- /dev/null +++ b/roles/asterisk/defaults/main.yml @@ -0,0 +1,620 @@ +asterisk_user: asterisk +asterisk_group: asterisk + +asterisk_dir: /var/lib/asterisk +asterisk_conf_dir: /etc/asterisk +asterisk_tls_dir: "{{ asterisk_conf_dir }}/tls" +asterisk_recordings_dir: /opt/recordings +asterisk_data_dir: "{{ asterisk_dir }}" + +asterisk_users: {} +asterisk_trunks: {} + +asterisk_language: ru + +asterisk_pjsip_ciphers: + - ECDHE-ECDSA-CHACHA20-POLY1305 + - ECDHE-ECDSA-AES256-GCM-SHA384 + - ECDHE-ECDSA-AES128-GCM-SHA256 + - ECDHE-RSA-CHACHA20-POLY1305 + - ECDHE-RSA-AES256-GCM-SHA384 + - ECDHE-RSA-AES128-GCM-SHA256 + - DHE-RSA-AES128-SHA256 + + +# meta definitions: +# __template__ (bool) (section): this section is a template +# __template_from__ (string/list) (section): templates to inherit from +# __comment__ (string) (section): specify a comment before the section definition +# __inner_objects__ (boolean) (config/section): use object syntax when enumerating section members + +asterisk_default_config: + acl: + acl_lan_clients: + deny: + - 0.0.0.0/0.0.0.0 + permit: + - "{{ int_net | ansible.utils.ipaddr('network') }}/{{ int_net | ansible.utils.ipaddr('netmask') }}" + acl_inet_clients: + deny: + - "{{ int_net | ansible.utils.ipaddr('network') }}/{{ int_net | ansible.utils.ipaddr('netmask') }}" + permit: + - 0.0.0.0/0.0.0.0 + + asterisk: + directories: + __template__: yes + __inner_objects__: yes + astetcdir: "{{ asterisk_conf_dir }}" + astvarlibdir: "{{ asterisk_dir }}" + astdatadir: "{{ asterisk_data_dir }}" + astdbdir: "{{ asterisk_db_dir | d(asterisk_dir) }}" + astkeydir: "{{ asterisk_key_dir | d(asterisk_dir) }}" + astagidir: "{{ asterisk_agi_dir | d(asterisk_dir ~ '/agi-bin') }}" + astspooldir: "{{ asterisk_spool_dir | d('/var/spool/asterisk') }}" + astrundir: "{{ asterisk_run_dir | d('/var/run/asterisk') }}" + astlogdir: "{{ asterisk_log_dir | d('/var/log/asterisk') }}" + astsbindir: /usr/sbin + astmoddir: /usr/lib/asterisk/modules + + options: + verbose: 0 + debug: no + trace: 0 + + execincludes: no + highpriority: yes + initcrypto: yes + nocolor: yes + dumpcore: no + runuser: "{{ asterisk_user }}" + rungroup: "{{ asterisk_group }}" + autosystemname: yes + maxcalls: 200 + maxload: "100.0" + minmemfree: 1 + languageprefix: yes + transmit_silence: no + + defaultlanguage: en + documentation_language: en_US + + ccss: + general: + cc_max_requests: 15 + + cdr: + general: + enable: yes + unanswered: yes + congestion: yes + + cel: + general: + enable: no + + cdr_pgsql: + global: + hostname: "{{ database_host }}" + port: 5432 + user: "{{ database_user | d('cdr') }}" + dbname: "{{ database_name | d('cdr') }}" + table: "{{ database_table | d('cdr') }}" + password: "{{ database_pass }}" + encoding: UNICODE + + cli_aliases: + general: + template: friendly + friendly: + "hangup request": channel request hangup + "originate": channel originate + "help": core show help + "pri intense debug span": pri set debug intense span + "reload": module reload + "pjsip reload": module reload res_pjsip.so res_pjsip_authenticator_digest.so res_pjsip_endpoint_identifier_ip.so res_pjsip_mwi.so res_pjsip_notify.so res_pjsip_outbound_publish.so res_pjsip_publish_asterisk.so res_pjsip_outbound_registration.so + + cli_permissions: + general: + default_perm: permit + + codecs: + plc: + __inner_objects__: yes + genericplc: "true" + genericplc_on_equal_codecs: "false" + opus: + type: opus + packet_loss: 2 + signal: voice + + confbridge: + default_user: + type: user + dsp_drop_silence: yes + jitterbuffer: yes + default_bridge: + type: bridge + max_members: 30 + language: "{{ asterisk_language }}" + + features: + __inner_objects__: yes + featuremap: + blindxfer: "**" + atxfer: "*#" + applicationmap: + volume-up-tx: "#1,self/caller,Gosub(volume-up-tx,s,1)" + volume-up-rx: "#2,self/caller,Gosub(volume-up-rx,s,1)" + volume-down-tx: "#3,self/caller,Gosub(volume-down-tx,s,1)" + volume-down-rx: "#4,self/caller,Gosub(volume-down-rx,s,1)" + volume-increase-all: "#5,self/caller,Gosub(volume-increase-all,s,1)" + call-controls: + volume-up-tx: "" + volume-up-rx: "" + volume-down-tx: "" + volume-down-rx: "" + volume-increase-all: "" + + followme: + __inner_objects__: yes + general: + featuredigittimeout: 3500 + enable_callee_prompt: "true" + takecall: 1 + declinecall: 2 + call_from_prompt: followme/call-from + norecording_prompt: followme/no-recording + options_prompt: followme/options + pls_hold_prompt: followme/pls-hold-while-try + status_prompt: followme/status + sorry_prompt: followme/sorry + connecting_prompt: "" + default: + musicclass: default + context: default + enable_callee_prompt: "true" + takecall: 1 + declinecall: 2 + call_from_prompt: followme/call-from + norecording_prompt: followme/no-recording + options_prompt: followme/options + pls_hold_prompt: followme/pls-hold-while-try + status_prompt: followme/status + sorry_prompt: followme/sorry + connecting_prompt: "" + + indications: + general: + country: ru + ru: + description: Russian Federation / ex Soviet Union + ringcadence: "1000,4000" + dial: "425" + busy: "425/350,0/350" + ring: "425/1000,0/4000" + congestion: "425/175,0/175" + callwaiting: "425/200,0/5000" + record: "1400/400,0/15000" + info: "950/330,1400/330,1800/330,0/1000" + dialrecall: "425/400,0/40" + stutter: "!425/100,!0/100,!425/100,!0/100,!425/100,!0/100,!425/100,!0/100,!425/100,!0/100,!425/100,!0/100,425" + + logger: + general: + queue_log: no + logfiles: + __inner_objects__: yes + console: notice,warning,error,verbose,dtmf + "syslog.local0": "[plain]notice,warning,error" + + manager: + general: + enabled: yes + webenabled: no + port: 5038 + bindaddr: 0.0.0.0 + debug: "off" + allowmultiplelogin: yes + displayconnects: yes + timestampevents: yes + authtimeout: 10 + + musiconhold: + default: + mode: files + directory: moh + + pjproject: + startup: + cache_pools: yes + + + pjsip: + system: + type: system + threadpool_auto_increment: 3 + timer_t1: 250 + timer_b: 16000 + global: + type: global + max_forwards: 40 + keep_alive_interval: 15 + user_agent: "{{ org }} Asterisk PBX" + endpoint_identifier_order: username,ip + default_from_user: pbx + default_realm: "{{ host_fqdn }}" + + transport-common: + __template__: yes + type: transport + tos: cs3 + cos: 3 + allow_reload: no + local_net: "{{ int_net | ansible.utils.ipaddr('network') }}/{{ int_net | ansible.utils.ipaddr('netmask') }}" + + transport-ext: + __template__: yes + __template_from__: transport-common + external_media_address: "{{ asterisk_external_ipv4 | d(hostvars[selected_node]['external_ipv4']) }}" + external_signaling_address: "{{ asterisk_external_ipv4 | d(hostvars[selected_node]['external_ipv4']) }}" + + transport-udp: + __template__: yes + __template_from__: transport-common + protocol: udp + + transport-tcp: + __template__: yes + __template_from__: transport-common + protocol: tcp + + transport-lan: + __template_from__: transport-udp + bind: 0.0.0.0:5060 + + transport-lan-tcp: + __template_from__: transport-tcp + bind: 0.0.0.0:5060 + + transport-lan-tls: + __template_from__: transport-common + protocol: tls + bind: 0.0.0.0:5061 + cert_file: "{{ asterisk_tls_dir }}/asterisk.crt" + priv_key_file: "{{ asterisk_tls_dir }}/asterisk.key" + cipher: "{{ asterisk_pjsip_ciphers | join(',') }}" + method: tlsv1_2 + require_client_cert: no + verify_client: no + verify_server: no + + endpoint-common: + __template__: yes + type: endpoint + allow: "!all,opus,g722,alaw,ulaw,g726,ilbc,gsm" + allow_overlap: no + send_connected_line: yes + trust_connected_line: yes + direct_media: no + dtmf_mode: auto_info + force_rport: yes + ice_support: no + identify_by: username + rewrite_contact: yes + rtp_symmetric: yes + send_diversion: yes + send_history_info: yes + send_pai: no + send_rpid: no + use_ptime: yes + t38_udptl: no + tone_zone: ru + language: ru + tos_audio: ef + cos_audio: 5 + rtp_keepalive: 5 + rtp_timeout: 360 + rtp_timeout_hold: 720 + rtcp_mux: yes + max_video_streams: 0 + max_audio_streams: 1 + bundle: no + sdp_session: "{{ org }} Asterisk PBX" + sdp_owner: PBX + suppress_q850_reason_headers: yes + + endpoint-trunk: + __template__: yes + __template_from__: endpoint-common + identify_by: ip,username + trust_id_inbound: yes + acl: acl_inet_clients + contact_acl: acl_inet_clients + + endpoint-lan: + __template__: yes + __template_from__: endpoint-common + identify_by: username + trust_id_inbound: no + trust_id_outbound: yes + acl: acl_lan_clients + contact_acl: acl_lan_clients + context: outbound + allow_subscribe: yes + device_state_busy_at: 1 + sub_min_expiry: 15 + media_encryption: sdes + media_encryption_optimistic: yes + + auth-common: + __template__: yes + type: auth + auth_type: userpass + + registration-common: + __template__: yes + type: registration + expiration: 1800 + auth_rejection_permanent: no + max_retries: 10000 + retry_interval: 20 + forbidden_retry_interval: 60 + fatal_retry_interval: 60 + + aor-common: + __template__: yes + type: aor + qualify_frequency: 30 + max_contacts: 2 # https://asterisk.org/pjsip-mis-configuration-can-cause-loss-sip-registrations + + __include__: custom_pjsip.conf + + + pjsip_notify: + __inner_objects__: yes + clear-mwi: + Event: message-summary + Content-type: application/simple-message-summary + Content: + - "Messages-Waiting: no" + - "Message-Account: sip:asterisk@127.0.0.1" + - "Voice-Message: 0/0 (0/0)" + - "" + polycom-check-cfg: + Event: check-sync + yealink-reboot: + Event: check-sync + + queues: + general: + persistentmembers: no + autofill: yes + monitor-type: MixMonitor + updatecdr: yes + log_membername_as_agent: yes + shared_lastcall: yes + + queue-template: + __template__: yes + musicclass: default + strategy: ringall + servicelevel: 30 + maxlen: 128 + timeoutpriority: conf + timeout: 300 + wrapuptime: 5 + announce-frequency: 0 + periodic-announce-frequency: 0 + announce-position: no + autopause: yes + autopausedelay: 60 + autopausebusy: yes + joinempty: unavailable + leavewhenempty: unavailable + ringinuse: no + + queue-single: + __template__: yes + __template_from__: queue-template + weight: 1 + autopause: no + context: inbound-queued-inqueue-busy + + queue-le: + __template__: yes + __template_from__: queue-template + weight: 1 + autopause: no + + __include__: custom_queues.conf + + queuerules: + general: + + rtp: + general: + rtpstart: 15000 + rtpend: 19000 + strictrtp: yes + icesupport: "false" + + udptl: + general: + + modules: + modules: + autoload: no + load: + - app_attended_transfer.so + - app_blind_transfer.so + - app_bridgeaddchan.so + - app_bridgewait.so + - app_cdr.so + - app_celgenuserevent.so + - app_chanisavail.so + - app_channelredirect.so + - app_chanspy.so + - app_confbridge.so + - app_controlplayback.so + - app_dial.so + - app_directed_pickup.so + - app_dumpchan.so + - app_echo.so + - app_exec.so + - app_followme.so + - app_forkcdr.so + - app_mixmonitor.so + - app_originate.so + - app_playback.so + - app_queue.so + - app_read.so + - app_readexten.so + - app_senddtmf.so + - app_softhangup.so + - app_stack.so + - app_stream_echo.so + - app_talkdetect.so + - app_transfer.so + - app_verbose.so + - app_waitforring.so + - app_waitforsilence.so + - app_waituntil.so + - app_while.so + + - bridge_builtin_features.so + - bridge_builtin_interval_features.so + - bridge_holding.so + - bridge_native_rtp.so + - bridge_simple.so + - bridge_softmix.so + + - cdr_pgsql.so + + - chan_bridge_media.so + - chan_pjsip.so + - chan_rtp.so + + - codec_a_mu.so + - codec_adpcm.so + - codec_alaw.so + - codec_g722.so + - codec_g726.so + - codec_gsm.so + - codec_ilbc.so + - codec_opus_open_source.so + - codec_resample.so + - codec_ulaw.so + + - format_g719.so + - format_g723.so + - format_g726.so + - format_gsm.so + - format_ilbc.so + - format_pcm.so + - format_sln.so + - format_vox.so + - format_wav.so + - format_wav_gsm.so + + - func_blacklist.so + - func_callcompletion.so + - func_callerid.so + - func_cdr.so + - func_channel.so + - func_config.so + - func_cut.so + - func_devstate.so + - func_dialplan.so + - func_global.so + - func_hangupcause.so + - func_holdintercept.so + - func_jitterbuffer.so + - func_logic.so + - func_module.so + - func_pjsip_aor.so + - func_pjsip_contact.so + - func_pjsip_endpoint.so + - func_rand.so + - func_sorcery.so + - func_strings.so + - func_talkdetect.so + - func_timeout.so + - func_volume.so + + - pbx_config.so + - pbx_loopback.so + - pbx_realtime.so + - pbx_spool.so + + - res_audiosocket.so + - res_clialiases.so + - res_clioriginate.so + - res_convert.so + - res_crypto.so + - res_format_attr_celt.so + - res_format_attr_g729.so + - res_format_attr_ilbc.so + - res_format_attr_opus.so + - res_format_attr_silk.so + - res_format_attr_siren14.so + - res_format_attr_siren7.so + - res_musiconhold.so + - res_mutestream.so + - res_pjproject.so + + - res_pjsip.so + - res_pjsip_acl.so + - res_pjsip_authenticator_digest.so + - res_pjsip_caller_id.so + - res_pjsip_dialog_info_body_generator.so + - res_pjsip_diversion.so + - res_pjsip_dlg_options.so + - res_pjsip_dtmf_info.so + - res_pjsip_empty_info.so + - res_pjsip_endpoint_identifier_ip.so + - res_pjsip_endpoint_identifier_user.so + - res_pjsip_exten_state.so + - res_pjsip_header_funcs.so + - res_pjsip_history.so + - res_pjsip_logger.so + - res_pjsip_messaging.so + - res_pjsip_mwi.so + - res_pjsip_mwi_body_generator.so + - res_pjsip_nat.so + - res_pjsip_notify.so + - res_pjsip_outbound_authenticator_digest.so + - res_pjsip_outbound_publish.so + - res_pjsip_outbound_registration.so + - res_pjsip_path.so + - res_pjsip_pidf_body_generator.so + - res_pjsip_publish_asterisk.so + - res_pjsip_pubsub.so + - res_pjsip_refer.so + - res_pjsip_registrar.so + - res_pjsip_rfc3326.so + - res_pjsip_sdp_rtp.so + - res_pjsip_send_to_voicemail.so + - res_pjsip_session.so + - res_pjsip_sips_contact.so + - res_pjsip_xpidf_body_generator.so + + - res_rtp_asterisk.so + - res_rtp_multicast.so + - res_security_log.so + - res_sorcery_astdb.so + - res_sorcery_config.so + - res_sorcery_memory.so + - res_sorcery_memory_cache.so + - res_srtp.so + - res_stasis.so + - res_stasis_answer.so + - res_stasis_device_state.so + - res_stasis_playback.so + - res_stasis_recording.so + - res_timing_pthread.so + - res_timing_timerfd.so + + - res_pjsip_header_funcs.so + - res_pjsip_history.so + - res_pjsip_sdp_rtp.so diff --git a/roles/asterisk/handlers/main.yml b/roles/asterisk/handlers/main.yml new file mode 100644 index 0000000..a5a5957 --- /dev/null +++ b/roles/asterisk/handlers/main.yml @@ -0,0 +1,8 @@ +- name: handle config change + import_tasks: asterisk_handlers.yml + + +- name: restart asterisk + service: + name: asterisk + state: restarted diff --git a/roles/asterisk/tasks/asterisk_handlers.yml b/roles/asterisk/tasks/asterisk_handlers.yml new file mode 100644 index 0000000..3545dd3 --- /dev/null +++ b/roles/asterisk/tasks/asterisk_handlers.yml @@ -0,0 +1,19 @@ +- block: + - name: restart asterisk + service: + name: asterisk + state: restarted + when: item.item.action is not defined + + + - name: reload dialplan + command: + cmd: 'asterisk -rx "dialplan reload"' + when: item.item.action == 'reload dialplan' + + + - name: reload configs + command: + cmd: 'asterisk -rx "core reload"' + when: item.item.action == 'reload configs' + when: item is defined \ No newline at end of file diff --git a/roles/asterisk/tasks/main.yml b/roles/asterisk/tasks/main.yml new file mode 100644 index 0000000..977eae5 --- /dev/null +++ b/roles/asterisk/tasks/main.yml @@ -0,0 +1,194 @@ +- name: set asterisk_cfg + set_fact: + asterisk_cfg: "{{ asterisk_default_config | d({}) | combine(asterisk_config | d({}), recursive=true) }}" + + +- name: install dependencies + include_tasks: tasks/install_packages.yml + vars: + package: + - asterisk + - asterisk-pgsql + - asterisk-openrc + - asterisk-opus + - asterisk-srtp + - tar + - vorbis-tools + + +- name: create user and group + include_tasks: tasks/create_user.yml + vars: + user: + name: "{{ asterisk_user }}" + group: "{{ asterisk_group }}" + dir: "{{ asterisk_dir }}" + + +- name: ensure asterisk directories exist + file: + path: "{{ item }}" + state: directory + owner: "{{ asterisk_user }}" + group: "{{ asterisk_group }}" + loop: + - "{{ asterisk_dir }}" + - "{{ asterisk_conf_dir }}" + - "{{ asterisk_tls_dir }}" + - "{{ asterisk_data_dir }}" + - "{{ asterisk_data_dir }}/moh" + - "{{ asterisk_data_dir }}/sounds" + - "{{ asterisk_data_dir }}/sounds/{{ asterisk_language }}" + - "{{ asterisk_data_dir }}/sounds/{{ asterisk_language }}/custom" + - "{{ asterisk_recordings_dir }}" + + +- name: template custom asterisk configs + template: + src: "{{ item }}.j2" + dest: "{{ asterisk_conf_dir }}/{{ item }}.conf" + force: yes + mode: 0400 + owner: "{{ asterisk_user }}" + group: "{{ asterisk_group }}" + lstrip_blocks: yes + notify: restart asterisk + loop: + - custom_pjsip + - custom_queues + - ext_ivr + - ext_utils + - extensions + + +- name: template asterisk configs + template: + src: "{{ 'config' if item is string else (item.config | d('config')) }}.j2" + dest: "{{ asterisk_conf_dir }}/{{ item if item is string else (item.dest | d(item.config) | d(item.name)) }}.conf" + force: yes + mode: 0400 + owner: "{{ asterisk_user }}" + group: "{{ asterisk_group }}" + lstrip_blocks: yes + notify: restart asterisk + loop: + - acl + - asterisk + - ccss + - cdr + - cdr_pgsql + - cli_aliases + - cli_permissions + - codecs + - confbridge + - features + - followme + - indications + - logger + - manager + - musiconhold + - pjproject + - pjsip + - pjsip_notify + - queues + - rtp + - modules + - queuerules + - cel + - udptl + + +- name: edit service config + lineinfile: + path: /etc/conf.d/asterisk + regexp: "^{{ item.name | upper }}=" + line: "{{ item.name | upper }}=\"{{ item.value }}\"" + when: item.when | d(true) + notify: restart asterisk + loop: + - name: asterisk_opts + value: "-C {{ (asterisk_conf_dir ~ '/asterisk.conf') | quote }}" + when: "{{ asterisk_conf_dir != '/etc/asterisk' }}" + - name: asterisk_user + value: "{{ asterisk_user }}" + - name: asterisk_nice + value: "{{ asterisk_niceness | d(None) }}" + when: "{{ asterisk_niceness is defined }}" + + +- name: download asterisk sound pack + get_url: + url: "https://downloads.asterisk.org/pub/telephony/sounds/asterisk-core-sounds-{{ asterisk_language }}-{{ item }}-current.tar.gz" + dest: "{{ asterisk_data_dir }}/{{ asterisk_language }}-{{ item }}.tar.gz" + owner: "{{ asterisk_user }}" + group: "{{ asterisk_group }}" + register: result + loop: + - sln16 + - wav + + +- name: extract sound pack + unarchive: + src: "{{ item }}" + dest: "{{ asterisk_data_dir }}/sounds/{{ asterisk_language }}" + remote_src: yes + owner: "{{ asterisk_user }}" + group: "{{ asterisk_group }}" + loop: "{{ result.results | d([]) | selectattr('dest', 'defined') | selectattr('changed', 'defined') | selectattr('changed', 'equalto', true) | map(attribute='dest') | list }}" + + +- name: deploy RSA cert for SIP TLS + include_role: + name: certs + vars: + certs: + id: ast-tls + cert: "{{ asterisk_tls_dir }}/asterisk.crt" + key: "{{ asterisk_tls_dir }}/asterisk.key" + chain: "{{ asterisk_tls_dir }}/chain.crt" + owner: "{{ asterisk_user }}" + group: "{{ asterisk_group }}" + post_hook: service asterisk restart + notify: restart asterisk + + +- name: install and configure cdr + include_role: + name: cdr + vars: + cdr_group: "{{ asterisk_group }}" + cdr_config: + db_host: "{{ asterisk_cfg.cdr_pgsql.global.hostname }}" + db_user: "{{ asterisk_cfg.cdr_pgsql.global.user }}" + db_pass: "{{ asterisk_cfg.cdr_pgsql.global.password }}" + db_database: "{{ asterisk_cfg.cdr_pgsql.global.dbname }}" + db_table: "{{ asterisk_cfg.cdr_pgsql.global.table }}" + record_dir: "{{ asterisk_recordings_dir }}" + ami_user: "{{ asterisk_ami_cdr_user }}" + ami_pass: "{{ asterisk_ami_cdr_secret }}" + when: asterisk_use_cdr | d(true) == true + + +- name: flush handlers + meta: flush_handlers + + +- name: add directories to backup plan + include_role: + name: backup + vars: + function: add + backup_items: + - "{{ asterisk_conf_dir }}" + - "{{ asterisk_tls_dir }}" + - "{{ asterisk_data_dir }}/moh" + - "{{ asterisk_data_dir }}/sounds/{{ asterisk_language }}/custom" + - "{{ asterisk_dir }}/astdb.sqlite3" + + +- name: enable and start asterisk + service: + name: asterisk + enabled: yes + state: started diff --git a/roles/asterisk/templates/_macros.j2 b/roles/asterisk/templates/_macros.j2 new file mode 100644 index 0000000..1117b16 --- /dev/null +++ b/roles/asterisk/templates/_macros.j2 @@ -0,0 +1,85 @@ +{% macro config_template(config_name, asterisk_cfg) -%} + {% set ns = namespace(objects=false) -%} + + {% if config_name is string and asterisk_cfg[config_name] is mapping -%} + {% for section in (asterisk_cfg[config_name] | dict2items) -%} + {% if section.value is mapping -%} + {% set template_parts = [] -%} + + {% if (section.value['__template__'] is boolean) and (section.value['__template__'] == true) -%} + {% set template_parts = template_parts + ['!'] -%} + {% endif -%} + + {% if section.value['__template_from__'] is string -%} + {% set template_parts = template_parts + [section.value['__template_from__']] -%} + {% elif section.value['__template_from__'] | type_debug == 'list' -%} + {% set template_parts = template_parts + section.value['__template_from__'] -%} + {% endif -%} + + {% if section.value['__comment__'] is string -%} + ; {{ section.value['__comment__'] }} + {% endif -%} + + + {% if template_parts | length == 0 -%} + [{{ section.key }}] + {% else -%} + [{{ section.key }}]({{ template_parts | join(',') }}) + {% endif -%} + + {% set ns.objects = (section.value['__inner_objects__'] | d(asterisk_cfg[config_name]['__inner_objects__'] | d(false))) -%} + + {% for option in (section.value | dict2items) -%} + {% if not option.key.startswith('__') and not option.key.endswith('__') -%} + + {% if option.value | type_debug == 'list' -%} + {% if option.value | length == 0 -%} + {{ option.key }} => + {% else -%} + {% for option_element in option.value -%} + {% set option_value = 'yes' if (option_element is boolean and option_element == true) else ('no' if (option_element is boolean and option_element == false) else option_element ) -%} + {{ option.key }} => {{ option_value }} + {% endfor -%} + {% endif -%} + {% elif option.value is mapping -%} + {% set option_is_object = option.value['__inner_objects__'] | d(ns.objects) -%} + + {% if option.value['__comment__'] is string -%} + ; {{ option.value['__comment__'] }} + {% endif -%} + {% if option.value['__include_before__'] is string -%} + #include {{ option.value['__include_before__'] }} + {% endif -%} + {% if option.value['__try_include_before__'] is string -%} + #tryinclude {{ option.value['__try_include_before__'] }} + {% endif -%} + + {% set option_value = 'yes' if (option.value['__value__'] is boolean and option.value['__value__'] == true) else ('no' if (option.value['__value__'] is boolean and option.value['__value__'] == false) else option.value['__value__'] ) -%} + {{ option.key }} {{ '=>' if option_is_object else '=' }} {{ option_value }} + + {% if option.value['__include_after__'] is string -%} + #include {{ option.value['__include_after__'] }} + {% endif -%} + {% if option.value['__try_include_after__'] is string -%} + #tryinclude {{ option.value['__try_include_after__'] }} + {% endif -%} + {% else -%} + {% set option_value = 'yes' if (option.value is boolean and option.value == true) else ('no' if (option.value is boolean and option.value == false) else option.value ) -%} + {{ option.key }} {{ '=>' if ns.objects else '=' }} {{ option_value }} + {% endif -%} + + {% endif -%} + {% endfor -%} + {% if not loop.last %} + + {% endif -%} + {% elif (section.key == '__include__') and (section.value is string) -%} + #include {{ section.value }} + {% elif (section.key == '__try_include__') and (section.value is string) -%} + #tryinclude {{ section.value }} + {% endif -%} + {% endfor -%} + {% endif -%} +{% endmacro -%} + + diff --git a/roles/asterisk/templates/config.j2 b/roles/asterisk/templates/config.j2 new file mode 100644 index 0000000..b248709 --- /dev/null +++ b/roles/asterisk/templates/config.j2 @@ -0,0 +1,3 @@ +{%- from '_macros.j2' import config_template -%} + +{{- config_template(item if (item is string) else (item.config | d(item.name)), asterisk_cfg) -}} diff --git a/roles/asterisk/templates/custom_pjsip.j2 b/roles/asterisk/templates/custom_pjsip.j2 new file mode 100644 index 0000000..04ec694 --- /dev/null +++ b/roles/asterisk/templates/custom_pjsip.j2 @@ -0,0 +1,75 @@ +{% macro trunk_options(opts) -%} + {% for opt in (opts | d({}) | dict2items) -%} + {{ opt.key }} = {{ 'yes' if (opt.value is boolean and opt.value == true) else ('no' if (opt.value is boolean and opt.value == false) else opt.value ) }} + {% endfor -%} +{% endmacro -%} + + + +{% for user in asterisk_users | d({}) | dict2items -%} + {% if user.value is mapping -%} + {% if user.value['__comment__'] is string -%} + ; {{ user.value['__comment__'] }} + {% endif -%} + +[auth-{{ user.key }}](auth-common) +username = {{ user.value['login'] | d(user.key) }} +password = {{ user.value['password'] }} + +[{{ user.key }}](aor-common) + +[{{ user.key }}](endpoint-lan) +auth = auth-{{ user.key }} +aors = {{ user.key }} +callerid = {{ user.value['callerid'] | d(user.key) }} <{{ user.key }}> + + + {% endif -%} +{% endfor %} + +{% for trunk in asterisk_trunks | d({}) | dict2items -%} + {% if trunk.value is mapping -%} + {% if trunk.value['__comment__'] is string -%} + ; {{ trunk.value['__comment__'] }} + {% endif -%} + +[transport-{{ trunk.key }}](transport-udp,transport-ext) +{{ trunk_options(trunk.value['transport']) }} +{# #} +[registration-{{ trunk.key }}](registration-common) +outbound_auth = auth-{{ trunk.key }} +endpoint = endpoint-{{ trunk.key }} +transport = transport-{{ trunk.key }} +{{ trunk_options(trunk.value['registration']) }} +{# #} +[auth-{{ trunk.key }}](auth-common) +{{ trunk_options(trunk.value['auth']) }} +{# #} +[aor-{{ trunk.key }}](aor-common) +{{ trunk_options(trunk.value['aor']) }} +{# #} +[endpoint-{{ trunk.key }}](endpoint-trunk) +transport = transport-{{ trunk.key }} +context = inbound-{{ trunk.key }} +outbound_auth = auth-{{ trunk.key }} +aors = aor-{{ trunk.key }} +{{ trunk_options(trunk.value['endpoint']) }} +{# #} +[identify-{{ trunk.key }}] +type = identify +endpoint = endpoint-{{ trunk.key }} +{{ trunk_options(trunk.value['identify']) }} + + {%- if not loop.last %} + + + {% endif -%} + {% endif -%} +{% endfor %} + + + +[reslist-all] +type=resource_list +event=presence +list_item={{ asterisk_users | d({}) | dict2items | map(attribute='key') | list | join(',') }} diff --git a/roles/asterisk/templates/custom_queues.j2 b/roles/asterisk/templates/custom_queues.j2 new file mode 100644 index 0000000..86b62ce --- /dev/null +++ b/roles/asterisk/templates/custom_queues.j2 @@ -0,0 +1,26 @@ +{% for user in asterisk_users | d({}) | dict2items -%} + {% if user.value is mapping -%} +[queue-{{ user.key }}]({{ user.value['self_queue_type'] | d('queue-single') }}) +member => PJSIP/{{ user.key }},0,{{ user.value['callerid'] | d(user.key) }} + {% endif -%} +{% endfor %} + + +{% set defined_queues = (asterisk_users | d({}) | dict2items | map(attribute='value') | list | selectattr('queues', 'defined') | map(attribute='queues') | list | flatten | unique | list) -%} +{% set auto_queues = (asterisk_users | d({}) | dict2items | rejectattr('value.queues', 'defined') | map(attribute='key') | list) -%} +{% set all_queues = ((defined_queues | d([])) + (auto_queues | d([])) | unique | list) -%} + +{% for queue in defined_queues -%} + {% if asterisk_users[queue] is not defined -%} + {% set queue_users = (asterisk_users | d({}) | dict2items | selectattr('value.queues', 'defined') | selectattr('value.queues', 'contains', queue) | list) -%} + {% if queue_users | length > 1 -%} + [queue-{{ queue }}](queue-template) + {% for user in queue_users -%} + member => PJSIP/{{ user.key }},0,{{ user.value['callerid'] | d(user.key) }} + {% endfor -%} + {%- if not loop.last %} + + {% endif -%} + {% endif -%} + {% endif -%} +{% endfor -%} diff --git a/roles/asterisk/templates/ext_ivr.j2 b/roles/asterisk/templates/ext_ivr.j2 new file mode 100644 index 0000000..5a82d24 --- /dev/null +++ b/roles/asterisk/templates/ext_ivr.j2 @@ -0,0 +1,58 @@ +; IVR +; 1 - went to IVR +; 2 - pressed a button +; 3 - did not press anything + + +[ivr-dial] +exten => s,1,Set(CDR(ivr)=2) + same => n,Gosub(inbound-queued,s,1(${ARG1})) + same => n,Hangup() + + +[ivr-dial-all] +exten => s,1,Set(CDR(ivr)=3) + same => n,Queue(queue-all,inrt,,,,,,pre-call) + + +[ivr-select] +exten => 1,1,Gosub(ivr-dial,s,1(1)) +exten => 2,1,Gosub(ivr-dial,s,1(3)) +exten => 3,1,Gosub(ivr-dial,s,1(2)) +exten => 4,1,Gosub(ivr-dial,s,1(11)) +exten => 5,1,Gosub(ivr-dial,s,1(9)) + + +[ivr] +exten => s,1,Answer(250) + same => n,Set(CDR(ivr)=1) + same => n,Set(TIMEOUT(digit)=3) + same => n,Set(TIMEOUT(response)=3) + same => n,Background(custom/ivr-intro-12-2021,m,,ivr-select) + same => n,WaitExten(3) + same => n,Gosub(ivr-dial-all,s,1) + same => n,Hangup() + + + + + + + +[ivr-select-spb] +exten => 1,1,Gosub(ivr-dial,s,1(6)) +exten => 2,1,Gosub(ivr-dial,s,1(8)) + +[ivr-dial-all-spb] +exten => s,1,Set(CDR(ivr)=3) + same => n,Queue(queue-spb,inrt,,,,,,pre-call) + +[ivr-spb] +exten => s,1,Answer(250) + same => n,Set(CDR(ivr)=1) + same => n,Set(TIMEOUT(digit)=3) + same => n,Set(TIMEOUT(response)=3) + same => n,Background(custom/ivr-intro-spb,m,,ivr-select-spb) + same => n,WaitExten(3) + same => n,Gosub(ivr-dial-all-spb,s,1) + same => n,Hangup() diff --git a/roles/asterisk/templates/ext_utils.j2 b/roles/asterisk/templates/ext_utils.j2 new file mode 100644 index 0000000..7877a09 --- /dev/null +++ b/roles/asterisk/templates/ext_utils.j2 @@ -0,0 +1,88 @@ +; Extension utilities + + +[record-start] +exten => s,1,ExecIf($["${IS_RECORDING}"="1"]?Return()) + same => n,Set(UID=${UNIQUEID}.${RAND(1,100000)}) + same => n,Set(CDR(actualuniqueid)=${UID}) + same => n,MixMonitor({{ asterisk_recordings_dir }}/${UID}.wav,b,oggenc -q 5 -o {{ asterisk_recordings_dir }}/${UID}.ogg {{ asterisk_recordings_dir }}/${UID}.wav && rm {{ asterisk_recordings_dir }}/${UID}.wav) + same => n,Set(__IS_RECORDING=1) + same => n,Return() + +[record-stop] +exten => s,1,StopMixMonitor() + same => n,Return() + + +; Filtering CallerID +[clear-callerid] +exten => s,1,Verbose(Filtering CallerID) + same => n,Set(CALLERID(num)=${FILTER(0-9,${CALLERID(num)})}) + same => n,Set(CALLERID(name)=) + same => n,Return() + + +; Setting up volume control +[volume-setup] +exten => s,1,Set(CURRENT_VOLUME_TX=1) + same => n,Set(CURRENT_VOLUME_RX=1) + same => n,Set(__DYNAMIC_FEATURES=call-controls) + same => n,Return() + +[volume-up-tx] +exten => s,1,Set(CURRENT_VOLUME_TX=$[${CURRENT_VOLUME_TX}*1.25]) + same => n,Set(VOLUME(TX)=${CURRENT_VOLUME_TX}) + same => n,Return() + +[volume-up-rx] +exten => s,1,Set(CURRENT_VOLUME_RX=$[${CURRENT_VOLUME_RX}*1.25]) + same => n,Set(VOLUME(RX)=${CURRENT_VOLUME_RX}) + same => n,Return() + +[volume-down-tx] +exten => s,1,Set(CURRENT_VOLUME_TX=$[${CURRENT_VOLUME_TX}*0.75]) + same => n,Set(VOLUME(TX)=${CURRENT_VOLUME_TX}) + same => n,Return() + +[volume-down-rx] +exten => s,1,Set(CURRENT_VOLUME_RX=$[${CURRENT_VOLUME_RX}*0.75]) + same => n,Set(VOLUME(RX)=${CURRENT_VOLUME_RX}) + same => n,Return() + +[volume-increase-all] +exten => s,1,Set(CURRENT_VOLUME_RX=2) + same => n,Set(CURRENT_VOLUME_TX=2) + same => n,Set(VOLUME(RX)=2) + same => n,Set(VOLUME(TX)=2) + same => n,Return() + + +; An invalid extension has been dialed +[invalid-ext] +exten => s,1,Answer(250) + same => n,Playback(custom/invalid-ext) + same => n,Wait(0.5) + same => n,Hangup() + + +; An extension has been dialed, but it is currently offline +[offline-ext] +exten => s,1,Answer(250) + same => n,Playback(custom/this-offline) + same => n,Wait(0.5) + same => n,Hangup() + + + + +; Output "Busy" signal +[busy] +exten => s,1,Busy(10) + same => n,Wait(1) + same => n,Hangup() + +; Output "Congestion" signal +[congestion] +exten => s,1,Congestion(10) + same => n,Wait(1) + same => n,Hangup() diff --git a/roles/asterisk/templates/extensions.j2 b/roles/asterisk/templates/extensions.j2 new file mode 100644 index 0000000..972da56 --- /dev/null +++ b/roles/asterisk/templates/extensions.j2 @@ -0,0 +1,303 @@ +[general] +static=yes ; never rewrite this file +writeprotect=yes +autofallthrough=yes ; hang up if end of dialplan is reached +clearglobalvars=yes ; clear global vars on dialplan reload + + +[globals] +#include ext_utils.conf ; include utilities +#include ext_ivr.conf ; include IVR + +TRANSFER_CONTEXT=transfer + + + +[transfer] +exten => 0,1,Verbose(TRANSFER IVR) + same => n,Set(__IS_RECORDING=0) + same => n,StopMixMonitor() + same => n,ForkCDR(erv) + same => n,Gosub(pre-any,s,1(IVR,TRANSFER)) + same => n,Gosub(ivr,s,1) + same => n,Wait(0.5) + same => n,Hangup() + +exten => _Z,1,Verbose(TRANSFER) + same => n,Set(__IS_RECORDING=0) + same => n,StopMixMonitor() + same => n,ForkCDR(erv) + same => n,Gosub(pre-any,s,1(${EXTEN},TRANSFER)) + same => n,Gosub(inbound-queued,s,1(${EXTEN})) + same => n,Wait(0.5) + same => n,Hangup() + +include => catchall + + + +[pre-any] +exten => s,1,Gosub(clear-callerid,s,1) + same => n,Set(__CALLER=${CALLERID(num)}) + same => n,Set(__CALLEE=${ARG1}) + same => n,Set(__CALL_OPERATION=${ARG2}) + same => n,Set(CDR(actualsrc)=${CALLER}) + same => n,Set(CDR(actualdst)=${CALLEE}) + same => n,Set(CDR(realcall)=1) + same => n,Verbose(${CALL_OPERATION}: ${CALLER} -> ${CALLEE}) + same => n,Set(LIMIT_PLAYAUDIO_CALLER=no,LIMIT_PLAYAUDIO_CALLEE=yes) + same => n,Set(LIMIT_TIMEOUT_FILE=custom/call-expired) + same => n,Set(LIMIT_WARNING_FILE=custom/call-expiring-soon) + same => n,Return() + + +[pre-call] +exten => s,1,Gosub(volume-setup,s,1) + same => n,Gosub(record-start,s,1) + same => n,Set(CDR(realcall)=2) + same => n,Set(CDR(startedat)=${EPOCH}) + same => n,Set(CDR(actualdisposition)=ANSWERED) + same => n,Set(CDR(actualdst2)=${CALLERID(num)}) + same => n,Return() + + +[pre-out-call] +exten => s,1,Gosub(volume-setup,s,1) + same => n,Gosub(record-start,s,1) + same => n,Set(CDR(realcall)=2) + same => n,Set(CDR(startedat)=${EPOCH}) + same => n,Return() + + + + + + + +; 1.1. Place an inbound call into a single queue +[inbound-queued] +exten => s,1,Gosub(pre-any,s,1(${ARG1},INBOUND-QUEUED)) + same => n,Verbose(DS ${DEVICE_STATE(PJSIP/${CALLEE})}) + same => n,GosubIf($["${DEVICE_STATE(PJSIP/${CALLEE})}" = "BUSY"]?inbound-queued-busy,s,1) + same => n,GosubIf($["${DEVICE_STATE(PJSIP/${CALLEE})}" = "INUSE"]?inbound-queued-busy,s,1) + same => n,GosubIf($["${DEVICE_STATE(PJSIP/${CALLEE})}" = "RINGINUSE"]?inbound-queued-busy,s,1) + same => n,GosubIf($["${DEVICE_STATE(PJSIP/${CALLEE})}" = "RINGING"]?inbound-queued-busy,s,1) + same => n,GosubIf($["${DEVICE_STATE(PJSIP/${CALLEE})}" = "UNAVAILABLE"]?inbound-queued-unavail,s,1) + same => n,Queue(queue-${CALLEE},inrt,,,,,,pre-call) + same => n,Wait(0.5) + same => n,Hangup() + + +; 1.2. Callee is busy, play a message (if appropriate) and place it into a single queue +[inbound-queued-busy] +exten => s,1,Verbose(QUEUED BUSY) + same => n,GotoIf($["${CALLEE}"="9"]?busy-le) + same => n,GotoIf($[ $["${CALLEE}"="6"] | $["${CALLEE}"="8"] | $["${CALLEE}"="10"] | $["${CALLEE}"="12"] ]?busy-spb) + same => n,Background(custom/this-busy-ask-redirect,m,,inbound-queued-select-busy)) + same => n,Queue(queue-${CALLEE},inrt,,,,,,pre-call) + same => n,Wait(0.5) + same => n,Hangup() + + same => n(busy-le),Playback(custom/this-busy-le) + same => n,Queue(queue-${CALLEE},inrt,,,,,,pre-call) + same => n,Wait(0.5) + same => n,Hangup() + + same => n(busy-spb),Playback(custom/this-busy-spb) + same => n,Queue(queue-${CALLEE},inrt,,,,,,pre-call) + same => n,Wait(0.5) + same => n,Hangup() + + + +; 1.25. Callee is not available, play a message and place it into a single queue +;same => n,GosubIf($[ $["${CALLEE}"="6"] | $["${CALLEE}"="8"] | $["${CALLEE}"="10"] | $["${CALLEE}"="12"] ]?inbound-queued-unavail-spb,s,1) + +[inbound-queued-unavail] +exten => s,1,Verbose(QUEUED UNAVAIL) + same => n,GosubIf($["${CALLEE}"="9"]?inbound-queued-unavail-le,s,1) + same => n,Playback(custom/this-unavail-will-redirect) + same => n,Set(CDR(ivr)=3) + same => n,Queue(queue-some-${CALLEE},inrt,,,,,,pre-call) + same => n,Wait(0.5) + same => n,Hangup() + + +; 1.25.1 LE callee is not available, play a message and hang up +[inbound-queued-unavail-le] +exten => s,1,Playback(custom/this-unavail-le) + same => n,Wait(0.5) + same => n,Hangup() + + +; 1.25.2 SPB callee is not available, play a message and hang up +;[inbound-queued-unavail-spb] +;exten => s,1,Playback(custom/this-unavail-spb) +; same => n,Wait(0.5) +; same => n,Hangup() + + + + + +; 1.3. Caller has requested to join a "some" queue, place it there +[inbound-queued-to-some] +exten => s,1,Verbose(QUEUE TO SOME) + same => n,Set(CDR(ivr)=3) + same => n,Queue(queue-some-${CALLEE},inrt,,,,,,pre-call) + same => n,Wait(0.5) + same => n,Hangup() + + +; 1.1.1. Allow callers to exit from a background playback to dial some +[inbound-queued-select-busy] +exten => 1,1,Gosub(inbound-queued-to-some,s,1) + + +; 1.2.1. Allow callers to exit from a queue to dial some +; Invalid DTMF keypresses get redirected back to inbound queue +[inbound-queued-inqueue-busy] +exten => 1,1,Gosub(inbound-queued-to-some,s,1) +exten => i,1,Gosub(inbound-queued,s,1(${CALLEE})) + + + + + +; Inbound calls from Multifon trunk to LE endpoint (9) +[inbound-multifon] +exten => _Z.,1,Gosub(inbound-queued,s,1(9)) + same => n,Hangup() + + +; Inbound calls from Dom.ru 222003 endpoint directly to ext 1 +[inbound-domru-3] +exten => _Z.,1,Gosub(inbound-queued,s,1(1)) + same => n,Hangup() + + +; Inbound calls from Dom.ru 222004 endpoint to IVR +[inbound-domru-4] +exten => _Z.,1,Gosub(pre-any,s,1(IVR,INBOUND)) + same => n,Gosub(ivr,s,1) + same => n,Wait(0.5) + same => n,Hangup() + + +; Inbound calls from Smart SPB trunk to SPB IVR +[inbound-smart-spb] +exten => _Z.,1,Gosub(pre-any,s,1(IVR,INBOUND)) + same => n,Gosub(ivr-spb,s,1) + same => n,Wait(0.5) + same => n,Hangup() + + + + + + + +; Outbound calls from all local endpoints +[outbound] +exten => _Z,hint,PJSIP/${EXTEN} +exten => _ZX,hint,PJSIP/${EXTEN} +exten => _Z,1,Gosub(outbound-internal,s,1(${EXTEN})) +exten => _ZX,1,Gosub(outbound-internal,s,1(${EXTEN})) + + +exten => _7XXXXXXXXXX,1,GosubIf($[ $["${CALLERID(num)}"="6"] | $["${CALLERID(num)}"="8"] | $["${CALLERID(num)}"="10"] | $["${CALLERID(num)}"="12"] ]?outbound-external,s,1(8${EXTEN:1}):outbound-external,s,1(+${EXTEN})) +exten => _8XXXXXXXXXX,1,GosubIf($[ $["${CALLERID(num)}"="6"] | $["${CALLERID(num)}"="8"] | $["${CALLERID(num)}"="10"] | $["${CALLERID(num)}"="12"] ]?outbound-external,s,1(${EXTEN}):outbound-external,s,1(+7${EXTEN:1})) +exten => _+7XXXXXXXXXX,1,GosubIf($[ $["${CALLERID(num)}"="6"] | $["${CALLERID(num)}"="8"] | $["${CALLERID(num)}"="10"] | $["${CALLERID(num)}"="12"] ]?outbound-external,s,1(8${EXTEN:2}):outbound-external,s,1(${EXTEN})) +exten => _9XXXXXXXXX,1,GosubIf($[ $["${CALLERID(num)}"="6"] | $["${CALLERID(num)}"="8"] | $["${CALLERID(num)}"="10"] | $["${CALLERID(num)}"="12"] ]?outbound-external,s,1(8${EXTEN}):outbound-external,s,1(+7${EXTEN})) +exten => _XXXXXX,1,Gosub(outbound-external,s,1(+78332${EXTEN})) +exten => _XXXXXXX,1,GosubIf($[ $["${CALLERID(num)}"="6"] | $["${CALLERID(num)}"="8"] | $["${CALLERID(num)}"="10"] | $["${CALLERID(num)}"="12"] ]?outbound-external,s,1(${EXTEN})) +include => service +include => catchall + + + + + + +; Internal calls +[outbound-internal] +exten => s,1,Gosub(pre-any,s,1(${ARG1},INTERNAL)) + + same => n,GotoIf($["${CALLERID(number)}" = "${ARG1}"]?busy,s,1) ; dialing the same extension as caller + same => n,GotoIf($["${DEVICE_STATE(PJSIP/${ARG1})}" = "INVALID"]?invalid-ext,s,1) ; extension is invalid + same => n,GotoIf($["${DEVICE_STATE(PJSIP/${ARG1})}" = "UNAVAILABLE"]?offline-ext,s,1) ; extension is valid but offline + + same => n,Dial(PJSIP/${ARG1},900,girtTL(3600000:60000)U(pre-out-call)) + same => n,Wait(0.5) + same => n,Hangup() + + + +[outbound-external] +exten => s,1,Gosub(pre-any,s,1(${ARG1},OUTBOUND)) + + same => n,GosubIf($[${CALLERID(num)} = 9]?outbound-multifon,s,1(${ARG1})) + same => n,GosubIf($[${CALLERID(num)} = 6]?outbound-smart-spb,s,1(${ARG1})) + same => n,GosubIf($[${CALLERID(num)} = 8]?outbound-smart-spb,s,1(${ARG1})) + same => n,GosubIf($[${CALLERID(num)} = 10]?outbound-smart-spb,s,1(${ARG1})) + same => n,GosubIf($[${CALLERID(num)} = 12]?outbound-smart-spb,s,1(${ARG1})) + same => n,Gosub(outbound-domru,s,1(${ARG1})) + same => n,Wait(0.5) + same => n,Hangup() + + + +[outbound-multifon] +exten => s,1,Dial(PJSIP/${ARG1}@endpoint-multifon,900,irTL(3600000:60000)U(pre-out-call)) + same => n,Wait(0.5) + same => n,Hangup() + +[outbound-domru] +exten => s,1,Dial(PJSIP/${ARG1}@endpoint-domru-4,900,irTL(3600000:60000)U(pre-out-call)) + same => n,Wait(0.5) + same => n,Hangup() + +[outbound-smart-spb] +exten => s,1,Dial(PJSIP/${ARG1}@endpoint-smart-spb,900,irTL(3600000:60000)U(pre-out-call)) + same => n,Wait(0.5) + same => n,Hangup() + + + +[service] +; Simple ring test +exten => 001,1,Ringing() + same => n,Wait(20) + same => n,Hangup() + +; Hello World playback +exten => 002,1,Answer(250) + same => n,Playback(hello-world) + same => n,Wait(0.5) + same => n,Hangup() + +; Echo test +exten => 003,1,Answer(250) + same => n,Playback(demo-echotest) + same => n,Echo + same => n,Playback(demo-echodone) + same => n,Wait(0.5) + same => n,Hangup() + +; Internal IVR +exten => 004,1,Answer(250) + same => n,Gosub(ivr,s,1) + same => n,Wait(0.5) + same => n,Hangup() + +; Congestion test +exten => 005,1,Congestion() + same => n,Wait(20) + same => n,Hangup() + + + + +[catchall] +exten => _X.,1,Gosub(invalid-ext,s,1) ; go to invalid extension macro on all extensions +exten => i,1,Gosub(invalid-ext,s,1) ; same, but with invalid extensions diff --git a/roles/backup/tasks/add.yml b/roles/backup/tasks/add.yml new file mode 100644 index 0000000..a81c3cb --- /dev/null +++ b/roles/backup/tasks/add.yml @@ -0,0 +1,5 @@ +- name: add backup dirs to collected backup dirs + set_fact: + collected_backup_dirs: "{{ (collected_backup_dirs | d([])) + + ([backup_items] if backup_items is string else backup_items) }}" + when: backup_items is defined and ((backup_items | type_debug == 'list') or backup_items is string) diff --git a/roles/backup/tasks/main.yml b/roles/backup/tasks/main.yml new file mode 100644 index 0000000..e8dff7e --- /dev/null +++ b/roles/backup/tasks/main.yml @@ -0,0 +1,8 @@ +- name: add to backup plan + include_tasks: add.yml + when: function is defined and function == 'add' + + +- name: setup backups + include_tasks: setup.yml + when: function is defined and function == 'setup' diff --git a/roles/backup/tasks/setup.yml b/roles/backup/tasks/setup.yml new file mode 100644 index 0000000..af59e0b --- /dev/null +++ b/roles/backup/tasks/setup.yml @@ -0,0 +1,31 @@ +- name: notify that backups are not supported + debug: + msg: backup host is missing, will not set up backups + when: services.backup is not mapping + + +- name: install restic with custom configuration + block: + - include_role: + name: restic + vars: + backup: "{{ backup_cfg }}" + + when: services.backup is mapping and backup_cfg is mapping + + +- name: install restic with default configuration + block: + - include_role: + name: restic + vars: + backup: + dirs: "{{ collected_backup_dirs }}" + password: "{{ backup_password }}" + tags: automated + filter: + - "*.log" + - "node_modules" + - ".npm" + + when: services.backup is mapping and backup_cfg is not defined and backup_password is defined diff --git a/roles/blocky/defaults/main.yml b/roles/blocky/defaults/main.yml new file mode 100644 index 0000000..62d51a6 --- /dev/null +++ b/roles/blocky/defaults/main.yml @@ -0,0 +1,52 @@ +blocky_user: blocky +blocky_group: blocky +blocky_dir: /opt/blocky +blocky_conf_dir: /etc/blocky +blocky_conf_file: "{{ blocky_conf_dir }}/blocky.yml" + +blocky_tls_ecc384_cert: "{{ blocky_conf_dir }}/ecc384.crt" +blocky_tls_ecc384_key: "{{ blocky_conf_dir }}/ecc384.key" + +blocky_port: 9000 +blocky_enable_dot: yes + +blocky_default_groups: + - selector: default + groups: + - all + +blocky_default_config: + port: 53 + bootstrapDns: 1.1.1.1 + logLevel: warn + logTimestamp: no + upstreamTimeout: 4s + + httpPort: "127.0.0.1:{{ blocky_port }}" + + prometheus: + enable: "{{ host_metrics }}" + + caching: + maxTime: 8h + maxItemsCount: 15000 + prefetchMaxItemsCount: 1000 + + upstream: + default: + - tcp-tls:anycast.censurfridns.dk:853 + - tcp-tls:dns.quad9.net:853 + - tcp-tls:one.one.one.one:853 + - tcp-tls:dns.digitale-gesellschaft.ch:853 + + blocking: + blackLists: + all: + - https://raw.githubusercontent.com/StevenBlack/hosts/master/hosts + - https://block.energized.pro/extensions/regional/formats/hosts + - https://block.energized.pro/bluGo/formats/hosts + whiteLists: + all: + - https://raw.githubusercontent.com/anudeepND/whitelist/master/domains/whitelist.txt + refreshPeriod: 8h + blockTTL: 5m diff --git a/roles/blocky/handlers/main.yml b/roles/blocky/handlers/main.yml new file mode 100644 index 0000000..86cf3be --- /dev/null +++ b/roles/blocky/handlers/main.yml @@ -0,0 +1,4 @@ +- name: restart blocky + service: + name: blocky + state: restarted diff --git a/roles/blocky/tasks/main.yml b/roles/blocky/tasks/main.yml new file mode 100644 index 0000000..800ecef --- /dev/null +++ b/roles/blocky/tasks/main.yml @@ -0,0 +1,185 @@ +- name: import internal tld resolver vars if internal nameserver is present + include_vars: + file: internal.yml + when: services.internal_ns is defined + + +- name: import ipv6 disable snippet + include_vars: + file: disable_ipv6.yml + hash_behaviour: merge + when: blocky_disable_ipv6 | d(false) == true + + +- name: import tls support + include_vars: + file: tls.yml + hash_behaviour: merge + when: host_tls and blocky_enable_dot + + +- name: set blocky_cfg + set_fact: + blocky_cfg: "{{ blocky_default_config | d({}) | combine(blocky_config | d({}), recursive=true) }}" + + +- name: install dependencies + include_tasks: tasks/install_packages.yml + vars: + package: + - libcap + - libc6-compat + + +- name: create user and group + include_tasks: tasks/create_user.yml + vars: + user: + name: "{{ blocky_user }}" + group: "{{ blocky_group }}" + dir: "{{ blocky_dir }}" + notify: restart blocky + + +- name: create directories + file: + path: "{{ item }}" + state: directory + mode: 0755 + owner: "{{ blocky_user }}" + group: "{{ blocky_group }}" + notify: restart blocky + loop: + - "{{ blocky_conf_dir }}" + - "{{ blocky_dir }}" + + +- name: get and extract latest version of blocky + include_tasks: tasks/get_lastversion.yml + vars: + package: + name: 0xERR0R/blocky + location: github + assets: yes + asset_filter: 'Linux_x86_64.tar.gz$' + file: "{{ blocky_dir }}/last_version" + extract: "{{ blocky_dir }}" + user: "{{ blocky_user }}" + group: "{{ blocky_group }}" + notify: restart blocky + + +- name: template config file + template: + src: blocky.j2 + dest: "{{ blocky_conf_file }}" + force: yes + mode: 0400 + owner: "{{ blocky_user }}" + group: "{{ blocky_group }}" + lstrip_blocks: yes + notify: restart blocky + + +- name: template init script + template: + src: init.j2 + dest: /etc/init.d/blocky + force: yes + mode: "+x" + notify: restart blocky + + +- name: ensure blocky binary has executable bit set + file: + path: "{{ blocky_dir }}/blocky" + mode: "+x" + + +- name: add cap_net_bind_service to blocky executable + community.general.capabilities: + path: "{{ blocky_dir }}/blocky" + capability: cap_net_bind_service+ep + changed_when: no + + +- name: install and configure nginx + include_role: + name: nginx + vars: + nginx: + servers: + - conf: nginx_server + certs: "{{ host_tls }}" + external_tld: "{{ host_tld }}" + + +- block: + - name: get certificate file type + stat: + path: /etc/nginx/tls/ecc384.crt + register: stat + + + - name: copy nginx ecc384 certificate to blocky dir + copy: + src: "/etc/nginx/tls/{{ item.src }}" + dest: "{{ item.dest }}" + force: yes + mode: 0400 + owner: "{{ blocky_user }}" + group: "{{ blocky_group }}" + remote_src: yes + loop: + - src: ecc384.crt + dest: "{{ blocky_tls_ecc384_cert }}" + - src: ecc384.key + dest: "{{ blocky_tls_ecc384_key }}" + when: not (stat.stat.islnk is defined and stat.stat.islnk) + + + - name: create symlinks + file: + path: "{{ item.dest }}" + src: "/etc/nginx/tls/{{ item.src }}" + state: link + force: yes + loop: + - src: ecc384.crt + dest: "{{ blocky_tls_ecc384_cert }}" + - src: ecc384.key + dest: "{{ blocky_tls_ecc384_key }}" + when: stat.stat.islnk is defined and stat.stat.islnk + + when: host_tls and blocky_enable_dot + + +- name: add directories to backup plan + include_role: + name: backup + vars: + function: add + backup_items: + - "{{ blocky_conf_dir }}" + + +- name: add prometheus metric target + include_role: + name: prometheus + vars: + function: add_target + target: + name: blocky + scheme: "{{ host_protocol }}" + when: host_metrics + + +- name: flush handlers + meta: flush_handlers + + +- name: enable and start blocky + service: + name: blocky + enabled: yes + state: started diff --git a/roles/blocky/templates/blocky.j2 b/roles/blocky/templates/blocky.j2 new file mode 100644 index 0000000..e34cafb --- /dev/null +++ b/roles/blocky/templates/blocky.j2 @@ -0,0 +1,7 @@ +{%- set mappings = blocky_default_mappings | items2dict(key_name='tld', value_name='resolver') -%} +{%- set conditional = { 'conditional': { 'mapping': mappings }} -%} + +{%- set groups = blocky_default_groups | items2dict(key_name='selector', value_name='groups') -%} +{%- set clientGroupsBlock = { 'blocking': { 'clientGroupsBlock': groups }} -%} + +{{- blocky_cfg | combine(clientGroupsBlock, recursive=true) | combine(conditional, recursive=true) | to_nice_yaml(indent=2, width=512) }} diff --git a/roles/blocky/templates/init.j2 b/roles/blocky/templates/init.j2 new file mode 100644 index 0000000..c7f8412 --- /dev/null +++ b/roles/blocky/templates/init.j2 @@ -0,0 +1,19 @@ +#!/sbin/openrc-run + +name="blocky" +command="{{ blocky_dir }}/blocky" +command_args="--config {{ blocky_conf_file | quote }}" +directory="{{ blocky_dir }}" +command_user="{{ blocky_user }}:{{ blocky_group }}" +pidfile="/var/run/blocky.pid" +command_background=true +start_stop_daemon_args="--stdout-logger logger --stderr-logger logger" + +depend() { + need net + use dns +} + +start_pre() { + setcap 'cap_net_bind_service=+ep' {{ (blocky_dir ~ '/blocky') | quote }} +} diff --git a/roles/blocky/templates/nginx_server.j2 b/roles/blocky/templates/nginx_server.j2 new file mode 100644 index 0000000..1215468 --- /dev/null +++ b/roles/blocky/templates/nginx_server.j2 @@ -0,0 +1,16 @@ +location / { + return 404; +} + +location /dns-query { + proxy_pass http://127.0.0.1:{{ blocky_port }}; + proxy_set_header Connection ""; +} + +{% if host_metrics -%} +location /metrics { + proxy_pass http://127.0.0.1:{{ blocky_port }}; + allow {{ int_net }}; + deny all; +} +{%- endif %} diff --git a/roles/blocky/vars/disable_ipv6.yml b/roles/blocky/vars/disable_ipv6.yml new file mode 100644 index 0000000..0fd439d --- /dev/null +++ b/roles/blocky/vars/disable_ipv6.yml @@ -0,0 +1,4 @@ +blocky_default_config: + filtering: + queryTypes: + - AAAA diff --git a/roles/blocky/vars/internal.yml b/roles/blocky/vars/internal.yml new file mode 100644 index 0000000..373342f --- /dev/null +++ b/roles/blocky/vars/internal.yml @@ -0,0 +1,7 @@ +blocky_default_mappings: + - tld: "{{ int_tld }}" + resolver: "{%- if services.internal_ns is mapping -%}\ + {{- hostvars[services.internal_ns.hostname]['ansible_host'] -}}\ + {%- else -%}\ + {{- hostvars | dict2items | selectattr('key', 'in', services.internal_ns | map(attribute='hostname')) | map(attribute='value') | list | map(attribute='ansible_host') | list | join(',') -}}\ + {%- endif -%}" diff --git a/roles/blocky/vars/tls.yml b/roles/blocky/vars/tls.yml new file mode 100644 index 0000000..e61aacd --- /dev/null +++ b/roles/blocky/vars/tls.yml @@ -0,0 +1,4 @@ +blocky_default_config: + tlsPort: 853 + certFile: "{{ blocky_tls_ecc384_cert }}" + keyFile: "{{ blocky_tls_ecc384_key }}" diff --git a/roles/ca/defaults/main.yml b/roles/ca/defaults/main.yml new file mode 100644 index 0000000..f5cf563 --- /dev/null +++ b/roles/ca/defaults/main.yml @@ -0,0 +1,27 @@ +ca_key_types: + - { name: rsa2048, type: RSA, size: 2048 } + - { name: ecc384, type: ECC, curve: secp384r1, digest: sha384 } + +ca_key_names: "{{ ca_key_types | map(attribute='name') | list }}" + +ca_default_items: + - { type: ecc384 } + - { type: rsa2048 } + +ca_dir: /etc/ca + +ca_rp: root- +ca_ip: inter- +ca_crt_ext: crt +ca_key_ext: key +ca_csr_ext: csr +ca_pfx_ext: pfx + +# when to start to reissue certs +ca_reissue_period: 8w + +ca_options: {} + +crl_last_update_time: +8w +crl_next_update_time: +24w +crl_dir: /opt/crl \ No newline at end of file diff --git a/roles/ca/tasks/add_cert.yml b/roles/ca/tasks/add_cert.yml new file mode 100644 index 0000000..2961349 --- /dev/null +++ b/roles/ca/tasks/add_cert.yml @@ -0,0 +1,227 @@ +- include_tasks: prepare_item.yml + + +- name: define combined options + set_fact: + ca_combined: "{{ ca_options | d({}) | combine(item) }}" + + +- name: define cert parameters + set_fact: + key_path: "{%- if item.key is defined -%}{{ item.key }}\ + {%- else -%}{{ ca_combined.path ~ '/' ~ kt.name ~ '.' ~ ca_key_ext }}\ + {%- endif -%}" + + cert_path: "{%- if item.cert is defined -%}{{ item.cert }}\ + {%- else -%}{{ ca_combined.path ~ '/' ~ kt.name ~ '.' ~ ca_crt_ext }}\ + {%- endif -%}" + + use_acme: "{{ ca_combined.acme | d(has_acme | d(false)) }}" + + +- name: define tld and presets + set_fact: + ca_tld: "{{ ca_combined.tld | d(host_tld) }}" + ca_presets: + web: + cn: FQDN + eku: ['clientAuth', 'serverAuth'] + ku: ['digitalSignature', 'keyEncipherment', 'keyAgreement'] + san: FQDN + psh: + cn: FQDN + eku: ['serverAuth'] + ku: ['digitalSignature', 'keyEncipherment', 'keyAgreement'] + san: FQDN + + +- name: select a preset + set_fact: + ca_preset: > + {% if item.preset is defined -%}{{ ca_presets[item.preset] }} + {%- elif ca_options.preset is defined -%}{{ ca_presets[ca_options.preset] }} + {%- else -%}{{ None }} + {%- endif %} + + +- name: generate private key + community.crypto.openssl_privatekey: + path: "{{ key_path }}" + size: "{{ kt.size | d(omit) }}" + curve: "{{ kt.curve | d(omit) }}" + type: "{{ kt.type }}" + backup: yes + force: no + format: pkcs8 + format_mismatch: convert + regenerate: never + mode: "{{ k_mode | d(omit) }}" + owner: "{{ k_owner | d(omit) }}" + group: "{{ k_group | d(omit) }}" + notify: "{{ ca_options.notify | d(omit) }}" + + +- name: generate in-memory csr request for private key + community.crypto.openssl_csr_pipe: + basic_constraints: + - 'CA:FALSE' + basic_constraints_critical: yes + digest: "{{ kt.digest | d(omit) }}" + key_usage_critical: yes + privatekey_path: "{{ key_path }}" + + common_name: "{%- if item.cn is defined -%}{{ item.cn }}\ + {%- elif ca_options.cn is defined -%}{{ ca_options.cn }}\ + {%- elif ca_preset.cn == 'FQDN' -%}{{ host_name ~ '.' ~ ca_tld }}\ + {%- elif ca_preset.cn is defined -%}{{ ca_preset.cn }}\ + {%- endif -%}" + + extended_key_usage: "{%- if item.eku is defined -%}{{ item.eku }}\ + {%- elif ca_options.eku is defined -%}{{ ca_options.eku }}\ + {%- elif ca_preset.eku is defined -%}{{ ca_preset.eku }}\ + {%- endif -%}" + + key_usage: "{%- if item.ku is defined -%}{{ item.ku }}\ + {%- elif ca_options.ku is defined -%}{{ ca_options.ku }}\ + {%- elif ca_preset.ku is defined -%}{{ ca_preset.ku }}\ + {%- else -%}{{ ['digitalSignature', 'keyEncipherment', 'keyAgreement'] }}\ + {%- endif -%}" + + subject_alt_name: "{%- if item.san is defined -%}{{ item.san }}\ + {%- elif ca_options.san is defined -%}{{ ca_options.san }}\ + {%- elif item.cn is defined -%}{{ ['DNS:' ~ item.cn] }}\ + {%- elif ca_options.cn is defined -%}{{ ['DNS:' ~ ca_options.cn] }}\ + {%- elif ca_preset.san == 'FQDN' -%}{{ ['DNS:' ~ host_name ~ '.' ~ ca_tld] }}\ + {%- elif ca_preset.san is defined -%}{{ ca_preset.san }}\ + {%- endif -%}" + + ocsp_must_staple: "{{ (has_acme | d(false)) and (ca_options.ocsp_must_staple | d(false)) }}" + register: csr + changed_when: no + + +- name: check if cert already exists + stat: + path: "{{ cert_path }}" + register: cert_exists + + +- name: slurp cert if exists + slurp: + src: "{{ cert_path }}" + when: cert_exists.stat.exists + register: cert + + +- name: check if the cert validity period is about to expire + community.crypto.x509_certificate_info: + content: "{{ cert.content | b64decode }}" + valid_at: + reissue_period: "+{%- if has_acme | d(false) == true -%}45d\ + {%- else -%}{{ ca_reissue_period | d('8w') }}\ + {%- endif -%}" + when: cert_exists.stat.exists + register: cert_info + + +- block: + - name: generate certificate on ca + community.crypto.x509_certificate_pipe: + content: "{{ (cert.content | b64decode) if cert_exists.stat.exists else omit }}" + csr_content: "{{ csr.csr }}" + provider: ownca + ownca_not_after: "{{ item.duration | d('+365d') }}" + ownca_not_before: -1d + ownca_digest: "{{ kt.digest | d(omit) }}" + ownca_path: "{{ ca_dir }}/{{ ca_ip }}{{ kt.name }}.{{ ca_crt_ext }}" + ownca_privatekey_path: "{{ ca_dir }}/{{ ca_ip }}{{ kt.name }}.{{ ca_key_ext }}" + ownca_privatekey_passphrase: "{{ ca_pk_inter_password }}" + force: "{{ cert_exists.stat.exists and not cert_info.valid_at.reissue_period }}" + register: cert + delegate_to: "{{ services.ca.hostname }}" + notify: "{{ ca_options.notify | d(omit) }}" + + + - name: save new cert if it was changed + copy: + dest: "{{ cert_path }}" + content: "{{ cert.certificate }}" + mode: "{{ k_mode | d(omit) }}" + owner: "{{ k_owner | d(omit) }}" + group: "{{ k_group | d(omit) }}" + follow: "{{ (ca_options | combine(item)).follow_symlinks | d(omit) }}" + when: cert is changed + notify: "{{ ca_options.notify | d(omit) }}" + + when: has_acme | d(false) == false + + +- name: generate acme certificate + include_tasks: gen_acme.yml + when: has_acme | d(false) == true + + +- block: + - name: slurp certificate + slurp: + src: "{{ cert_path }}" + register: cert + + - name: complete certificate chain + community.crypto.certificate_complete_chain: + input_chain: "{{ ((cert.content | b64decode).split('\n\n'))[0] }}" + root_certificates: /etc/ssl/certs + register: chain + + - name: save chain to file + copy: + dest: "{{ item.chain }}" + content: | + {% set result = chain.complete_chain %} + {% set _ = result.pop(0) %} + {{ result | join('') }} + mode: "{{ k_mode | d(omit) }}" + owner: "{{ k_owner | d(omit) }}" + group: "{{ k_group | d(omit) }}" + follow: "{{ (ca_options | combine(item)).follow_symlinks | d(omit) }}" + notify: "{{ ca_options.notify | d(omit) }}" + + when: item.chain is string + + +- block: + - name: slurp intermediate from ca + slurp: + src: "{{ ca_dir }}/{{ ca_ip }}{{ kt.name }}.{{ ca_crt_ext }}" + register: inter + delegate_to: "{{ services.ca.hostname }}" + + + - name: add intermediate cert if requested + blockinfile: + block: "{{ inter.content | b64decode }}" + insertafter: EOF + marker: "" + path: "{{ cert_path }}" + notify: "{{ ca_options.notify | d(omit) }}" + + when: (use_acme | d(false) == false) and (cert is changed) and ((ca_options | combine(item)).concat_inter | d(true) == true) + + +- block: + - name: slurp root from ca + slurp: + src: "{{ ca_dir }}/{{ ca_rp }}{{ kt.name }}.{{ ca_crt_ext }}" + register: root + delegate_to: "{{ services.ca.hostname }}" + + + - name: add root cert if requested + blockinfile: + block: "{{ root.content | b64decode }}" + insertafter: EOF + marker: "" + path: "{{ cert_path }}" + notify: "{{ ca_options.notify | d(omit) }}" + + when: (use_acme | d(false) == false) and (cert is changed) and ((ca_options | combine(item)).concat_root | d(false) == true) diff --git a/roles/ca/tasks/add_root.yml b/roles/ca/tasks/add_root.yml new file mode 100644 index 0000000..7546359 --- /dev/null +++ b/roles/ca/tasks/add_root.yml @@ -0,0 +1,44 @@ +- include_tasks: prepare_item.yml + + +- name: slurp root from ca + slurp: + src: "{{ ca_dir }}/{{ ca_rp }}{{ kt.name }}.{{ ca_crt_ext }}" + register: root + delegate_to: "{{ services.ca.hostname }}" + + +- name: copy root to memory + set_fact: + "root_{{ kt.name }}": "{{ root.content | b64decode }}" + when: (ca_options | combine(item)).memory | d(false) == true + + +- name: copy root to remote node + copy: + dest: "{%- if item.path is defined -%}{{ item.path }}\ + {%- else -%}{{ ca_options.path ~ '/' ~ ca_rp ~ kt.name ~ '.' ~ ca_crt_ext }}\ + {%- endif -%}" + content: "{{ root.content | b64decode }}" + mode: "{{ k_mode | d(omit) }}" + owner: "{{ k_owner | d(omit) }}" + group: "{{ k_group | d(omit) }}" + when: (ca_options | combine(item)).path is defined + + +- name: copy root to system storage + block: + - name: ensure ca-certificates is installed + package: + name: ca-certificates + + - name: upload root cert to user cert storage + copy: + dest: "/usr/local/share/ca-certificates/{{ ca_rp }}{{ kt.name }}.{{ ca_crt_ext }}" + content: "{{ root.content | b64decode }}" + + - name: update ca certificates + command: /usr/sbin/update-ca-certificates + changed_when: no + + when: (ca_options | combine(item)).system | d(false) == true diff --git a/roles/ca/tasks/check_acme.yml b/roles/ca/tasks/check_acme.yml new file mode 100644 index 0000000..167e0ae --- /dev/null +++ b/roles/ca/tasks/check_acme.yml @@ -0,0 +1,18 @@ +- block: + - name: check if acme main account exists + community.crypto.acme_account_info: + account_key_src: "{{ ca_dir ~ '/acme-main.' ~ ca_key_ext }}" + account_key_passphrase: "{{ ca_acme_account_key_password }}" + acme_directory: "{{ ca_acme_endpoint | d('https://acme-v02.api.letsencrypt.org/directory') }}" + acme_version: "{{ ca_acme_version | d(2) }}" + register: acme_info + delegate_to: "{{ services.ca.hostname }}" + + - name: determine acme support + set_fact: + has_acme: "{{ acme_info is defined and acme_info.exists and acme_info.account.status == 'valid' and (acme_disable | d(false) == false) }}" + + rescue: + - name: revert has_acme + set_fact: + has_acme: false diff --git a/roles/ca/tasks/gen_acme.yml b/roles/ca/tasks/gen_acme.yml new file mode 100644 index 0000000..acb7524 --- /dev/null +++ b/roles/ca/tasks/gen_acme.yml @@ -0,0 +1,86 @@ +- name: define some acme parameters + set_fact: + acme_staging: "{{ (ca_options | d({}) | combine(item)).acme_staging | d(false) }}" + acme_upgrade_int_ca: "{{ cert_info is defined and ((cert_info.ocsp_uri is not defined) or (cert_info.ocsp_uri == None)) }}" + + +- name: determine if acme cert generation will be forced + set_fact: + acme_forced: "{{ acme_upgrade_int_ca or (always_update_acme is defined) }}" + + +- name: slurp account key from ca + slurp: + src: "{{ ca_dir ~ '/acme-' ~ ('staging' if acme_staging == true else 'main') ~ '.' ~ ca_key_ext }}" + register: acme_account_key + delegate_to: "{{ services.ca.hostname }}" + + +- name: define args for acme certificate generation + set_fact: + acme_common_args: + account_key_content: "{{ acme_account_key.content | b64decode }}" + account_key_passphrase: "{{ ca_acme_account_key_password }}" + acme_directory: "{%- if (acme_staging == false) or (acme_staging == None) -%}{{ ca_acme_endpoint | d('https://acme-v02.api.letsencrypt.org/directory') }}\ + {%- else -%}{{ ca_acme_staging_endpoint | d('https://acme-staging-v02.api.letsencrypt.org/directory') }}\ + {%- endif -%}" + acme_version: "{{ ca_acme_version | d(2) }}" + acme_extra_args: + challenge: dns-01 + csr_content: "{{ csr.csr }}" + fullchain_dest: "{{ cert_path if ((ca_options | d({}) | combine(item)).concat_inter | d(true) == true) else omit }}" + dest: "{{ cert_path if ((ca_options | d({}) | combine(item)).concat_inter | d(true) == false) else omit }}" + modify_account: no + remaining_days: 45 + force: "{{ acme_forced }}" + terms_agreed: yes + + +- name: generate acme challenge request + community.crypto.acme_certificate: + args: "{{ acme_common_args | combine(acme_extra_args) }}" + register: challenge + changed_when: no + + +- block: + - name: unset challenge_records + set_fact: + challenge_records: "{{ [] }}" + + + - name: fill challenge records + set_fact: + challenge_records: "{{ challenge_records + [{ + 'name': item2.key | regex_search('(.*).' ~ (tld | regex_escape()), '\\1') | first, + 'type': 'TXT', + 'value': item2.value[0] + }] }}" + loop: "{{ challenge['challenge_data_dns'] | dict2items }}" + loop_control: + loop_var: item2 + + + - include_tasks: gen_acme_include.yml + + + - block: + - name: revoke cert if it already exists + community.crypto.acme_certificate_revoke: + certificate: "{{ cert_path }}" + revoke_reason: 4 + args: "{{ acme_common_args }}" + when: (cert_exists is defined) and cert_exists.stat.exists and not acme_upgrade_int_ca + + rescue: + - debug: + msg: failed to revoke certificate, ignoring + + + - name: finalize acme challenge request + community.crypto.acme_certificate: + data: "{{ challenge }}" + args: "{{ acme_common_args | combine(acme_extra_args) }}" + notify: "{{ ca_options.notify | d(omit) }}" + + when: (challenge.cert_days is not defined) or (challenge.cert_days < 45) or acme_forced diff --git a/roles/ca/tasks/gen_acme_include.yml b/roles/ca/tasks/gen_acme_include.yml new file mode 100644 index 0000000..94c77cc --- /dev/null +++ b/roles/ca/tasks/gen_acme_include.yml @@ -0,0 +1,7 @@ +- name: add records to external ns + include_role: + name: external_ns + vars: + nse_items: "{{ challenge_records }}" + nse_function: add_records + nse_instant: true diff --git a/roles/ca/tasks/gen_dhparam.yml b/roles/ca/tasks/gen_dhparam.yml new file mode 100644 index 0000000..07bb48d --- /dev/null +++ b/roles/ca/tasks/gen_dhparam.yml @@ -0,0 +1,74 @@ +- name: define dh param dict + set_fact: + dh: "{{ {'remote_gen': true, 'size': 2048, 'backup': false} | combine(dh_params | d({})) }}" + + +- name: check if dhparam file exists + stat: + path: "{{ dh.path | mandatory }}" + register: res + + +- block: + - name: ensure cryptography toolkit is installed + include_tasks: tasks/install_packages.yml + vars: + package: + - alpine: py3-cryptography + debian: python3-cryptography + when: dh.remote_gen == false + + + - block: + - name: wait until ca becomes available + wait_for_connection: + timeout: 10 + + - name: create temporary file for dh params + tempfile: + state: file + register: tf + + delegate_to: "{{ services.ca.hostname }}" + when: dh.remote_gen == true + + + - name: generate dh params + community.crypto.openssl_dhparam: + path: "{%- if dh.remote_gen == false -%}{{ dh.path | mandatory }}\ + {%- else -%}{{ tf.path }}\ + {%- endif -%}" + size: "{{ dh.size }}" + backup: "{{ dh.backup }}" + mode: "{{ (dh.mode | d('0400')) if (dh.remote_gen == false) else '0400' }}" + owner: "{{ (dh.owner | d(omit)) if (dh.remote_gen == false) else omit }}" + group: "{{ (dh.group | d(omit)) if (dh.remote_gen == false) else omit }}" + return_content: "{{ dh.remote_gen == true }}" + delegate_to: "{{ inventory_hostname if (dh.remote_gen == false) else services.ca.hostname }}" + notify: "{{ dh.notify | d(omit) }}" + register: dh_result + + + - block: + - name: remove temporary file + file: + path: "{{ tf.path }}" + state: absent + delegate_to: "{{ services.ca.hostname }}" + + - name: copy dh result to remote node + copy: + content: "{{ dh_result.dhparams }}" + dest: "{{ dh.path | mandatory }}" + mode: "{{ dh.mode | d('0400') }}" + owner: "{{ dh.owner | d(omit) }}" + group: "{{ dh.group | d(omit) }}" + + when: dh.remote_gen == true + + when: (not res.stat.exists) or (dh.remote_gen == false) + + +- name: unset dh param dict + set_fact: + dh: "{{ {} }}" diff --git a/roles/ca/tasks/install.yml b/roles/ca/tasks/install.yml new file mode 100644 index 0000000..a6036c7 --- /dev/null +++ b/roles/ca/tasks/install.yml @@ -0,0 +1,154 @@ +- name: ensure cryptography toolkit is installed + include_tasks: tasks/install_packages.yml + vars: + package: + - alpine: py3-cryptography + debian: python3-cryptography + + +- name: early check to ensure ca variables are defined + fail: + msg: "\"{{ item }}\" is not defined" + when: item is not defined + loop: + - ca_dir + - ca_key_types + - ca_rp + - ca_ip + - ca_crt_ext + - ca_csr_ext + - ca_key_ext + + +- name: create ca directories + file: + path: "{{ ca_dir }}" + state: directory + mode: 0700 + + +- name: generate root private keys + community.crypto.openssl_privatekey: + path: "{{ ca_dir }}/{{ ca_rp }}{{ item.name }}.{{ ca_key_ext }}" + size: "{{ item.size | d(omit) }}" + curve: "{{ item.curve | d(omit) }}" + type: "{{ item.type }}" + backup: yes + cipher: auto + force: no + format: pkcs8 + format_mismatch: convert + passphrase: "{{ ca_pk_password }}" + regenerate: never + mode: 0600 + loop: "{{ ca_key_types }}" + + +- name: generate csr requests for all root keys + community.crypto.openssl_csr: + path: "{{ ca_dir }}/{{ ca_rp }}{{ item.name }}.{{ ca_csr_ext }}" + basic_constraints: + - 'CA:TRUE' + basic_constraints_critical: yes + common_name: "{{ org }} Root CA ({{ item.type | upper }})" + digest: "{{ item.digest | d(omit) }}" + key_usage: + - keyCertSign + - cRLSign + key_usage_critical: yes + privatekey_path: "{{ ca_dir }}/{{ ca_rp }}{{ item.name }}.{{ ca_key_ext }}" + privatekey_passphrase: "{{ ca_pk_password }}" + use_common_name_for_san: no + mode: 0600 + loop: "{{ ca_key_types }}" + + +- name: generate root certificates + community.crypto.x509_certificate: + path: "{{ ca_dir }}/{{ ca_rp }}{{ item.name }}.{{ ca_crt_ext }}" + csr_path: "{{ ca_dir }}/{{ ca_rp }}{{ item.name }}.{{ ca_csr_ext }}" + privatekey_path: "{{ ca_dir }}/{{ ca_rp }}{{ item.name }}.{{ ca_key_ext }}" + privatekey_passphrase: "{{ ca_pk_password }}" + provider: selfsigned + selfsigned_not_after: "{{ ca_root_valid_until | mandatory }}" + selfsigned_digest: "{{ item.digest | d(omit) }}" + mode: 0600 + loop: "{{ ca_key_types }}" + + + + +- name: generate inter private keys + community.crypto.openssl_privatekey: + path: "{{ ca_dir }}/{{ ca_ip }}{{ item.name }}.{{ ca_key_ext }}" + size: "{{ item.size | d(omit) }}" + curve: "{{ item.curve | d(omit) }}" + type: "{{ item.type }}" + backup: yes + cipher: auto + force: no + format: pkcs8 + format_mismatch: convert + passphrase: "{{ ca_pk_inter_password }}" + regenerate: never + mode: 0600 + loop: "{{ ca_key_types }}" + + +- name: generate csr requests for all inter keys + community.crypto.openssl_csr: + path: "{{ ca_dir }}/{{ ca_ip }}{{ item.name }}.{{ ca_csr_ext }}" + basic_constraints: + - 'CA:TRUE' + - 'pathlen:0' + basic_constraints_critical: yes + common_name: "{{ org }} Intermediate CA ({{ item.type | upper }})" + digest: "{{ item.digest | d(omit) }}" + key_usage: + - digitalSignature + - keyCertSign + - cRLSign + key_usage_critical: yes + privatekey_path: "{{ ca_dir }}/{{ ca_ip }}{{ item.name }}.{{ ca_key_ext }}" + privatekey_passphrase: "{{ ca_pk_inter_password }}" + use_common_name_for_san: no + + crl_distribution_points: + - full_name: "URI:http://crl.{{ int_tld }}/{{ item.name }}.crl" + crl_issuer: "URI:http://crl.{{ int_tld }}" + name_constraints_permitted: + - "DNS:{{ tld }}" + - "email:{{ tld }}" + name_constraints_excluded: + - "IP:0.0.0.0/0" + mode: 0600 + loop: "{{ ca_key_types }}" + + +- name: generate inter certificates + community.crypto.x509_certificate: + path: "{{ ca_dir }}/{{ ca_ip }}{{ item.name }}.{{ ca_crt_ext }}" + csr_path: "{{ ca_dir }}/{{ ca_ip }}{{ item.name }}.{{ ca_csr_ext }}" + privatekey_path: "{{ ca_dir }}/{{ ca_ip }}{{ item.name }}.{{ ca_key_ext }}" + privatekey_passphrase: "{{ ca_pk_inter_password }}" + provider: ownca + ownca_not_after: "{{ ca_inter_valid_until | mandatory }}" + ownca_digest: "{{ item.digest | d(omit) }}" + ownca_path: "{{ ca_dir }}/{{ ca_rp }}{{ item.name }}.{{ ca_crt_ext }}" + ownca_privatekey_path: "{{ ca_dir }}/{{ ca_rp }}{{ item.name }}.{{ ca_key_ext }}" + ownca_privatekey_passphrase: "{{ ca_pk_password }}" + mode: 0600 + loop: "{{ ca_key_types }}" + + +- name: install acme + include_tasks: install_acme.yml + + +- name: add directories to backup plan + include_role: + name: backup + vars: + function: add + backup_items: + - "{{ ca_dir }}" diff --git a/roles/ca/tasks/install_acme.yml b/roles/ca/tasks/install_acme.yml new file mode 100644 index 0000000..ddb5a3a --- /dev/null +++ b/roles/ca/tasks/install_acme.yml @@ -0,0 +1,39 @@ +- name: select key type for acme + set_fact: + kt: "{{ ca_key_types | selectattr('name', 'equalto', ca_acme_account_key_type | d('ecc384')) | list | first }}" + + +- name: generate acme account keys + community.crypto.openssl_privatekey: + path: "{{ ca_dir ~ '/acme-' ~ item ~ '.' ~ ca_key_ext }}" + size: "{{ kt.size | d(omit) }}" + curve: "{{ kt.curve | d(omit) }}" + type: "{{ kt.type }}" + backup: yes + cipher: auto + force: no + format: pkcs8 + format_mismatch: convert + passphrase: "{{ ca_acme_account_key_password }}" + regenerate: never + mode: 0600 + loop: + - main + - staging + + +- name: create acme accounts + community.crypto.acme_account: + account_key_src: "{{ ca_dir ~ '/acme-' ~ item ~ '.' ~ ca_key_ext }}" + account_key_passphrase: "{{ ca_acme_account_key_password }}" + acme_directory: "{%- if item == 'main' -%}{{ ca_acme_endpoint | d('https://acme-v02.api.letsencrypt.org/directory') }}\ + {%- else -%}{{ ca_acme_staging_endpoint | d('https://acme-staging-v02.api.letsencrypt.org/directory') }}\ + {%- endif -%}" + acme_version: "{{ ca_acme_version | d(2) }}" + contact: + - "mailto:{{ maintainer_email | d('admin@' ~ tld) }}" + state: present + terms_agreed: yes + loop: + - main + - staging diff --git a/roles/ca/tasks/main.yml b/roles/ca/tasks/main.yml new file mode 100644 index 0000000..a10d354 --- /dev/null +++ b/roles/ca/tasks/main.yml @@ -0,0 +1,51 @@ +- name: ca installation + include_tasks: install.yml + when: function == 'install' + + +- name: install roots + include_tasks: add_root.yml + loop: "{{ ca_default_items if (ca_roots is not defined) or (ca_roots == None) or ((ca_roots | length) == 0) else ca_roots }}" + when: function == 'roots' + + +- block: + - name: wait until ca becomes available + wait_for_connection: + timeout: 10 + delegate_to: "{{ services.ca.hostname }}" + + + - name: check if acme can be used + include_tasks: check_acme.yml + + + - name: process roots if no acme will be used + include_tasks: add_root.yml + loop: "{{ ca_default_items if (ca_roots is not defined) or (ca_roots == None) or ((ca_roots | length) == 0) else ca_roots }}" + when: not has_acme + + + - name: ensure cryptography toolkit is installed + include_tasks: tasks/install_packages.yml + vars: + package: + - alpine: py3-cryptography + debian: python3-cryptography + + + - name: process certificates + include_tasks: add_cert.yml + loop: "{{ ca_default_items if (ca_certs is not defined) or (ca_certs == None) or ((ca_certs | length) == 0) else ca_certs }}" + + when: function == 'certs' + + +- name: generate dhparams + include_tasks: gen_dhparam.yml + when: (function == 'dhparam' or function == 'dhparams') + + +- name: check acme availability + include_tasks: check_acme.yml + when: function == 'check_acme' diff --git a/roles/ca/tasks/prepare_item.yml b/roles/ca/tasks/prepare_item.yml new file mode 100644 index 0000000..df8e05f --- /dev/null +++ b/roles/ca/tasks/prepare_item.yml @@ -0,0 +1,17 @@ +- name: select key type + set_fact: + kt: "{{ ca_key_types | selectattr('name', 'equalto', item.type) | list | first }}" + + +- name: fail if key type is empty + fail: + msg: "key type must be one of: {{ ca_key_names | join(', ') }}" + when: (kt | length) == 0 + + +- name: set preferred mode, owner and group + set_fact: + k_mode: "{{ (ca_options | d({}) | combine(item)).mode | d(omit) }}" + k_owner: "{{ (ca_options | d({}) | combine(item)).owner | d(omit) }}" + k_group: "{{ (ca_options | d({}) | combine(item)).group | d(omit) }}" + k_none: yes diff --git a/roles/cdr/defaults/main.yml b/roles/cdr/defaults/main.yml new file mode 100644 index 0000000..1b18d5d --- /dev/null +++ b/roles/cdr/defaults/main.yml @@ -0,0 +1,24 @@ +cdr_user: cdr +cdr_group: cdr +cdr_dir: /opt/cdr +cdr_port: 3000 + +cdr_default_config: + port: "{{ cdr_port }}" + + db_type: pg + db_host: "{{ database_host }}" + db_user: "{{ database_user }}" + db_password: "{{ database_pass }}" + db_database: "{{ database_name }}" + db_table: cdr + + record_dir: /opt/recordings + record_pretty_names: yes + + ami_enable: yes + ami_host: 127.0.0.1 + + originate_enable: yes + originate_context: outbound + originate_timeout: 30 diff --git a/roles/cdr/handlers/main.yml b/roles/cdr/handlers/main.yml new file mode 100644 index 0000000..e1a9a0f --- /dev/null +++ b/roles/cdr/handlers/main.yml @@ -0,0 +1,4 @@ +- name: restart cdr + service: + name: cdr + state: restarted diff --git a/roles/cdr/tasks/main.yml b/roles/cdr/tasks/main.yml new file mode 100644 index 0000000..e632eb7 --- /dev/null +++ b/roles/cdr/tasks/main.yml @@ -0,0 +1,141 @@ +- name: set cdr_cfg + set_fact: + cdr_cfg: "{{ cdr_default_config | d({}) | combine(cdr_config | d({}), recursive=true) }}" + + +- name: install dependencies + include_tasks: tasks/install_packages.yml + vars: + package: + - nodejs + - npm + + +- name: add extra cname record + include_role: + name: ns + vars: + function: add_records + ns_add_default_record: no + ns_records: + - name: cdr + type: CNAME + value: "{{ host_fqdn }}" + when: "inventory_hostname != 'cdr'" + + +- name: create user and group + include_tasks: tasks/create_user.yml + vars: + user: + name: "{{ cdr_user }}" + group: "{{ cdr_group }}" + dir: "{{ cdr_dir }}" + notify: restart cdr + + +- name: ensure cdr dir exists + file: + path: "{{ cdr_dir }}" + state: directory + owner: "{{ cdr_user }}" + group: "{{ cdr_group }}" + + +- name: ensure recordings dir exists + file: + path: "{{ cdr_cfg.record_dir }}" + state: directory + + +- name: get source-mark status + stat: + path: "{{ cdr_dir }}/source-mark" + register: source_mark + + +- name: pause if source-mark is missing + pause: + prompt: source-mark is missing, source code has to be manually uploaded + when: source_mark.stat.exists == false + + +- name: create source-mark + file: + path: "{{ cdr_dir }}/source-mark" + state: touch + modification_time: preserve + access_time: preserve + + +- name: template env file + template: + src: env.j2 + dest: "{{ cdr_dir }}/.env" + force: yes + owner: "{{ cdr_user }}" + group: "{{ cdr_group }}" + lstrip_blocks: yes + notify: restart cdr + + +- name: ensure app script has executable bit set + file: + path: "{{ cdr_dir }}/app.js" + mode: "+x" + + +- name: install npm dependencies + npm: + path: "{{ cdr_dir }}" + no_optional: yes + ignore_scripts: yes + production: yes + become: yes + become_user: "{{ cdr_user }}" + become_method: su + become_flags: '-s /bin/ash' + notify: restart cdr + changed_when: no + + +- name: template init script + template: + src: init.j2 + dest: /etc/init.d/cdr + force: yes + mode: "+x" + notify: restart cdr + + +- name: install and configure nginx + include_role: + name: nginx + vars: + nginx: + servers: + - conf: nginx_server + override_server_name: cdr + certs: "{{ host_tls }}" + domains: + - "cdr.{{ host_tld }}" + + +- name: flush handlers + meta: flush_handlers + + +- name: add directories to backup plan + include_role: + name: backup + vars: + function: add + backup_items: + - "{{ cdr_dir }}" + + +- name: enable and start cdr + service: + name: cdr + enabled: yes + state: started diff --git a/roles/cdr/templates/env.j2 b/roles/cdr/templates/env.j2 new file mode 100644 index 0000000..7ddd49c --- /dev/null +++ b/roles/cdr/templates/env.j2 @@ -0,0 +1,3 @@ +{% for option in (cdr_cfg | d({}) | dict2items) -%} + {{ option.key | upper }}={{ option.value | quote }} +{% endfor -%} diff --git a/roles/cdr/templates/init.j2 b/roles/cdr/templates/init.j2 new file mode 100644 index 0000000..dc0e618 --- /dev/null +++ b/roles/cdr/templates/init.j2 @@ -0,0 +1,14 @@ +#!/sbin/openrc-run + +name="$SVCNAME" +directory="{{ cdr_dir }}" +command="node {{ cdr_dir }}/app.js" +command_user="{{ cdr_user }}:{{ cdr_group }}" +pidfile="/var/run/$SVCNAME.pid" +supervisor="supervise-daemon" +respawn_max=0 + +depend() { + need net + use dns +} diff --git a/roles/cdr/templates/nginx_server.j2 b/roles/cdr/templates/nginx_server.j2 new file mode 100644 index 0000000..2580807 --- /dev/null +++ b/roles/cdr/templates/nginx_server.j2 @@ -0,0 +1,11 @@ +set_real_ip_from 10.0.0.0/8; +real_ip_header X-Real-IP; +real_ip_recursive on; + +location / { + proxy_pass http://127.0.0.1:{{ cdr_port }}; + proxy_http_version 1.1; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; +} diff --git a/roles/certs/tasks/acme_dns.yml b/roles/certs/tasks/acme_dns.yml new file mode 100644 index 0000000..9a00d4b --- /dev/null +++ b/roles/certs/tasks/acme_dns.yml @@ -0,0 +1,24 @@ +- name: set combined cert info + set_fact: + combined: "{{ (common | d({})) | combine(cert | d({}), recursive=true) }}" + + +- name: generate certificate through acme-dns + include_role: + name: acme + vars: + acme_id: "{{ cert.id | d(host_name ~ ('-ecc' if (combined.ecc | d(false) == true) else '')) }}" + acme_cert: "{{ cert.cert }}" + acme_key: "{{ cert.key }}" + acme_chain: "{{ cert.chain | d(None) }}" + acme_cert_single: "{{ cert.cert_single | d(None) }}" + acme_ecc: "{{ combined.ecc | d(false) }}" + acme_stapling: no + acme_notify: "{{ combined.notify | d(None) }}" + acme_owner: "{{ combined.owner | d(None) }}" + acme_group: "{{ combined.group | d(None) }}" + acme_post_hook: "{{ combined.post_hook | d(None) }}" + acme_hostname: "{{ combined.hostname | d(None) }}" + acme_tld: "{{ combined.tld | d(None) }}" + acme_fqdn: "{{ combined.fqdn | d(None) }}" + acme_hosts: "{{ combined.hosts | d(None) }}" diff --git a/roles/certs/tasks/external_ns.yml b/roles/certs/tasks/external_ns.yml new file mode 100644 index 0000000..bc43cee --- /dev/null +++ b/roles/certs/tasks/external_ns.yml @@ -0,0 +1,46 @@ +- name: set combined cert info + set_fact: + combined: "{{ cert | combine(common | d({}), recursive=true) }}" + + +- name: clear san list + set_fact: + san_list: "{{ [] }}" + + +- block: + - name: build san list + set_fact: + san_list: "{{ (san_list | d([])) + ['DNS:' ~ (item.fqdn | d((item.hostname | d(host_name)) ~ '.' ~ (item.tld | d(host_tld))))] }}" + loop: "{{ cert.hosts }}" + when: (cert.hosts is defined) and (cert.hosts | type_debug == 'list') + + +- name: generate certificate through external ns + include_role: + name: ca + vars: + function: certs + ca_options: + mode: '0400' + owner: "{{ combined.owner | d(None) }}" + group: "{{ combined.group | d(None) }}" + concat_inter: yes + preset: web + acme: yes + ocsp_must_staple: "{{ combined.stapling | d(false) }}" + notify: "{{ combined.notify | d(None) }}" + ca_certs: + - type: "{{ 'ecc384' if (combined.ecc | d(false) == true) else 'rsa2048' }}" + cert: "{{ cert.cert }}" + key: "{{ cert.key }}" + cn: "{% if cert.hosts is defined and cert.hosts | type_debug == 'list' -%}\ + {{ cert.hosts[0].fqdn | d((cert.hosts[0].hostname | d(host_name)) ~ '.' ~ (cert.hosts[0].tld | d(host_tld))) }}\ + {%- else -%}\ + {{ combined.fqdn | d((combined.hostname | d(host_name)) ~ '.' ~ (combined.tld | d(host_tld))) }}\ + {%- endif -%}" + san: "{% if san_list | length > 0 -%}\ + {{ san_list }}\ + {%- else -%}\ + {{ 'DNS:' ~ (combined.fqdn | d((combined.hostname | d(host_name)) ~ '.' ~ (combined.tld | d(host_tld)))) }}\ + {%- endif -%}" \ No newline at end of file diff --git a/roles/certs/tasks/internal_ca.yml b/roles/certs/tasks/internal_ca.yml new file mode 100644 index 0000000..a63b6df --- /dev/null +++ b/roles/certs/tasks/internal_ca.yml @@ -0,0 +1,2 @@ +- fail: + msg: deployment of certs through internal CA is not implemented diff --git a/roles/certs/tasks/main.yml b/roles/certs/tasks/main.yml new file mode 100644 index 0000000..5b99098 --- /dev/null +++ b/roles/certs/tasks/main.yml @@ -0,0 +1,41 @@ +- name: validate cert parameter + fail: + msg: certs variable must be a dict or a list + when: (certs is not defined) or ((certs is not mapping) and (certs | type_debug != 'list')) + + +- name: validate common parameter + fail: + msg: common variable must be a dict + when: (common is defined) and (common is not mapping) + + +- name: validate certificates + include_tasks: validate.yml + loop: "{{ certs if (certs | type_debug == 'list') else [certs] }}" + loop_control: + loop_var: cert + + +- name: process certificates with acme dns + include_tasks: acme_dns.yml + loop: "{{ certs if (certs | type_debug == 'list') else [certs] }}" + loop_control: + loop_var: cert + when: services.acme_dns is defined + + +- name: process certificates with standalone dns + include_tasks: external_ns.yml + loop: "{{ certs if (certs | type_debug == 'list') else [certs] }}" + loop_control: + loop_var: cert + when: (services.external_ns is defined) and (services.acme_dns is not defined) + + +- name: process certificates with internal ca + include_tasks: internal_ca.yml + loop: "{{ certs if (certs | type_debug == 'list') else [certs] }}" + loop_control: + loop_var: cert + when: (services.ca is defined) and (services.external_ns is not defined) and (services.acme_dns is not defined) diff --git a/roles/certs/tasks/validate.yml b/roles/certs/tasks/validate.yml new file mode 100644 index 0000000..3acfd1a --- /dev/null +++ b/roles/certs/tasks/validate.yml @@ -0,0 +1,46 @@ +- name: validate mandatory parameters + fail: + msg: some mandatory parameters in cert variable are missing or invalid + when: (cert is not defined) or (cert is not mapping) or + (cert.key is not string) or (cert.cert is not string) + + +- name: validate optional parameters + fail: + msg: some optional parameters in cert variable are missing or invalid + when: ((cert.ca is defined) and (cert.ca is not string)) or + ((cert.id is defined) and (cert.id is not string)) or + ((cert.ecc is defined) and (cert.ecc is not boolean)) or + ((cert.fqdn is defined) and (cert.fqdn is not string)) or + ((cert.tld is defined) and (cert.tld is not string)) or + ((cert.hostname is defined) and (cert.hostname is not string)) or + ((cert.hosts is defined) and (cert.hosts | type_debug != 'list')) or + ((cert.tld is defined) and (cert.tld is not string)) or + ((cert.stapling is defined) and (cert.stapling is not boolean)) or + ((cert.post_hook is defined) and (cert.post_hook is not string)) or + ((cert.notify is defined) and (cert.notify is not string)) or + ((cert.owner is defined) and (cert.owner is not string)) or + ((cert.group is defined) and (cert.group is not string)) + + +- name: validate parameter combinations + fail: + msg: parameters are defined in an invalid combination + when: ((cert.fqdn is defined) and (cert.hosts is defined)) or + ((cert.tld is defined) and (cert.hosts is defined)) or + ((cert.hostname is defined) and (cert.hosts is defined)) or + ((cert.fqdn is defined) and (cert.tld is defined)) or + ((cert.fqdn is defined) and (cert.hostname is defined)) + + +- name: validate hosts + fail: + msg: host parameters are invalid or are defined in an invalid combination + when: ((host.fqdn is defined) and (host.fqdn is not string)) or + ((host.tld is defined) and (host.tld is not string)) or + ((host.hostname is defined) and (host.hostname is not string)) or + ((host.fqdn is defined) and (host.tld is defined)) or + ((host.fqdn is defined) and (host.hostname is defined)) + loop: "{{ cert.hosts }}" + loop_control: + loop_var: host diff --git a/roles/clamav/defaults/main.yml b/roles/clamav/defaults/main.yml new file mode 100644 index 0000000..258da9d --- /dev/null +++ b/roles/clamav/defaults/main.yml @@ -0,0 +1,72 @@ +clamav_user: clamav +clamav_group: clamav + +clamav_conf_dir: /etc/clamav +clamav_db_dir: /opt/clamav + +clamav_conf_file: "{{ clamav_conf_dir }}/clamd.conf" +clamav_freshclam_conf_file: "{{ clamav_conf_dir }}/freshclam.conf" +clamav_milter_conf_file: "{{ clamav_conf_dir }}/clamav-milter.conf" + +clamav_socket: /run/clamav/clamd.sock + +clamav_max_file_size: "{{ mail_server.max_mail_size_bytes | d('25M') }}" + + +clamav_default_config: + clamav: + log_syslog: yes + log_facility: LOG_LOCAL0 + extended_detection_info: yes + pid_file: /run/clamav/clamd.pid + database_directory: "{{ clamav_db_dir }}" + local_socket: "{{ clamav_socket }}" + local_socket_mode: 660 + stream_max_length: "{{ clamav_max_file_size }}" + self_check: 3600 + concurrent_database_reload: no + user: "{{ clamav_user }}" + detect_p_u_a: yes + heuristic_scan_precedence: no + alert_encrypted: yes + alert_encrypted_archive: yes + alert_encrypted_doc: yes + max_scan_time: 30000 + max_file_size: "{{ clamav_max_file_size }}" + max_recursion: 12 + alert_exceeds_max: yes + bytecode: yes + bytecode_security: Paranoid + + + freshclam: + log_syslog: yes + log_facility: LOG_LOCAL0 + pid_file: /run/clamav/freshclam.pid + database_directory: "{{ clamav_db_dir }}" + database_owner: "{{ clamav_user }}" + update_log_file: /dev/stdout + checks: 4 + test_databases: no + bytecode: yes + safe_browsing: yes + notify_clamd: "{{ clamav_conf_file }}" + scripted_updates: no + private_mirror: https://packages.microsoft.com/clamav + + + milter: + log_syslog: yes + log_facility: LOG_LOCAL0 + log_infected: Basic + log_clean: Basic + milter_socket: "inet:{{ mail_server.clamav_port | d(7357) }}" + user: "{{ clamav_user }}" + clamd_socket: "unix:{{ clamav_socket }}" + max_file_size: "{{ clamav_max_file_size }}" + on_infected: Reject + add_header: Add + report_hostname: "{{ (mail_server.mta_actual_hostname ~ '.' ~ mail_server.tld) if + (mail_server.mta_actual_hostname is defined) and (mail_server.tld is defined) else 'clamav' }}" + support_multiple_recipients: yes + foreground: yes diff --git a/roles/clamav/handlers/main.yml b/roles/clamav/handlers/main.yml new file mode 100644 index 0000000..1829802 --- /dev/null +++ b/roles/clamav/handlers/main.yml @@ -0,0 +1,16 @@ +- name: restart clamd + service: + name: clamd + state: restarted + + +- name: restart freshclam + service: + name: freshclam + state: restarted + + +- name: restart clamav milter + service: + name: clamav-milter + state: restarted diff --git a/roles/clamav/tasks/main.yml b/roles/clamav/tasks/main.yml new file mode 100644 index 0000000..11c3023 --- /dev/null +++ b/roles/clamav/tasks/main.yml @@ -0,0 +1,97 @@ +- name: set clamav_cfg + set_fact: + clamav_cfg: "{{ clamav_default_config | d({}) | combine(clamav_config | d({}), recursive=true) }}" + + +- name: install dependencies + include_tasks: tasks/install_packages.yml + vars: + package: + - clamav-daemon + - alpine: clamav-daemon-openrc + - clamav-milter + + +- name: create user and group + include_tasks: tasks/create_user.yml + vars: + user: + name: "{{ clamav_user }}" + group: "{{ clamav_group }}" + + +- name: create directories + file: + path: "{{ item }}" + state: directory + mode: 0700 + owner: "{{ clamav_user }}" + group: "{{ clamav_group }}" + loop: + - "{{ clamav_conf_dir }}" + - "{{ clamav_db_dir }}" + + +- name: template clamav configs + template: + src: config.j2 + dest: "{{ item.dest }}" + force: yes + mode: 0400 + owner: "{{ clamav_user }}" + group: "{{ clamav_group }}" + lstrip_blocks: yes + notify: "{{ item.notify }}" + loop: + - { dest: "{{ clamav_conf_file }}", section: "clamav", notify: "restart clamd" } + - { dest: "{{ clamav_freshclam_conf_file }}", section: "freshclam", notify: "restart freshclam" } + - { dest: "{{ clamav_milter_conf_file }}", section: "milter", notify: "restart clamav milter" } + + +- name: edit init script for clamd + lineinfile: + path: /etc/init.d/clamd + regexp: '^CONF=' + line: 'CONF={{ clamav_conf_file | quote }}' + notify: restart clamd + + +- name: edit init script for freshclam + lineinfile: + path: /etc/init.d/freshclam + regexp: '^CONF=' + line: 'CONF={{ clamav_freshclam_conf_file | quote }}' + notify: restart freshclam + + +- name: template init script for clamav milter + template: + src: milter_init.j2 + dest: /etc/init.d/clamav-milter + force: yes + mode: "+x" + notify: restart clamav milter + + +- name: flush handlers + meta: flush_handlers + + +- name: add directories to backup plan + include_role: + name: backup + vars: + function: add + backup_items: + - "{{ clamav_conf_dir }}" + + +- name: enable and start services + service: + name: "{{ item }}" + enabled: yes + state: started + loop: + - clamd + - freshclam + - clamav-milter diff --git a/roles/clamav/templates/config.j2 b/roles/clamav/templates/config.j2 new file mode 100644 index 0000000..448eaa5 --- /dev/null +++ b/roles/clamav/templates/config.j2 @@ -0,0 +1,16 @@ +{% macro clamav_option(option) -%} + {% set key = option.key.split('_') | map('capitalize') | join('') -%} + + {% if option.value is boolean -%} + {{ key }} {{ 'yes' if option.value else 'no' }} + {% elif option.value != None -%} + {{ key }} {{ option.value }} + {% endif -%} +{% endmacro -%} + + +{% if clamav_cfg[item.section] is mapping -%} + {% for option in (clamav_cfg[item.section] | d({}) | dict2items) -%} + {{ clamav_option(option) }} + {%- endfor %} +{% endif -%} diff --git a/roles/clamav/templates/milter_init.j2 b/roles/clamav/templates/milter_init.j2 new file mode 100644 index 0000000..30578c1 --- /dev/null +++ b/roles/clamav/templates/milter_init.j2 @@ -0,0 +1,14 @@ +#!/sbin/openrc-run + +name="$SVCNAME" +directory="{{ clamav_db_dir }}" +command="/usr/sbin/clamav-milter" +command_user="{{ clamav_user ~ ':' ~ clamav_group }}" +pidfile="/var/run/$SVCNAME.pid" +supervisor="supervise-daemon" + +depend() { + need net + use dns + after clamd +} diff --git a/roles/common/defaults/main.yml b/roles/common/defaults/main.yml new file mode 100644 index 0000000..aa0d8da --- /dev/null +++ b/roles/common/defaults/main.yml @@ -0,0 +1 @@ +dropbear_dir: /etc/dropbear \ No newline at end of file diff --git a/roles/common/files/dropbear_init b/roles/common/files/dropbear_init new file mode 100644 index 0000000..724b0e6 --- /dev/null +++ b/roles/common/files/dropbear_init @@ -0,0 +1,19 @@ +#!/sbin/openrc-run + +depend() { + use logger dns + need net + after firewall +} + +start() { + ebegin "Starting dropbear" + /usr/sbin/dropbear ${DROPBEAR_OPTS} + eend $? +} + +stop() { + ebegin "Stopping dropbear" + start-stop-daemon --stop --pidfile /var/run/dropbear.pid + eend $? +} diff --git a/roles/common/handlers/main.yml b/roles/common/handlers/main.yml new file mode 100644 index 0000000..9c18241 --- /dev/null +++ b/roles/common/handlers/main.yml @@ -0,0 +1,16 @@ +- name: restart syslog + service: + name: syslog + state: restarted + + +- name: restart crond + service: + name: cron + state: restarted + + +- name: restart dropbear + service: + name: dropbear + state: restarted diff --git a/roles/common/tasks/alpine.yml b/roles/common/tasks/alpine.yml new file mode 100644 index 0000000..36b5386 --- /dev/null +++ b/roles/common/tasks/alpine.yml @@ -0,0 +1,94 @@ +- name: setup timezone + shell: + cmd: PATH=$PATH:/sbin; /sbin/setup-timezone -z {{ timezone | quote }} + chdir: /sbin + creates: "{{ ('/etc/zoneinfo', timezone) | path_join }}" + notify: restart syslog + when: timezone is string + + +- name: flush handlers + meta: flush_handlers + + +- name: upgrade alpine version + replace: + path: /etc/apk/repositories + regexp: '/alpine/v\d+\.\d+/' + replace: '/alpine/v{{ alpine_version }}/' + + +- name: change apk repository + replace: + path: /etc/apk/repositories + regexp: '^https:\/\/dl-cdn\.alpinelinux\.org\/alpine\/' + replace: 'https://mirror.yandex.ru/mirrors/alpine/' + when: use_alternative_apk_repo | d(false) == true + + +- block: + - name: update repository index + community.general.apk: + update_cache: yes + changed_when: no + register: apk_result + + rescue: + - name: fix repository keys + command: + cmd: /sbin/apk fix --allow-untrusted alpine-keys + when: "'UNTRUSTED signature' in apk_result.stderr" + + - name: update repository index in untrusted mode + command: + cmd: /sbin/apk --allow-untrusted update + when: "'UNTRUSTED signature' in apk_result.stderr" + + - name: upgrade basic dependencies in untrusted mode + command: + cmd: /sbin/apk --allow-untrusted upgrade apk-tools alpine-keys + when: "'UNTRUSTED signature' in apk_result.stderr" + + - name: update repository index + community.general.apk: + update_cache: yes + changed_when: no + + +- name: check if there are any updates + command: + cmd: /sbin/apk list -u + register: updates_found + changed_when: no + + +- name: pause and confirm updates + pause: + prompt: "{{ updates_found.stdout }}" + when: (updates_found.stdout | length > 0) and (interactive | d(true) == true) and (hosts_strategy | d('') != 'free') + changed_when: updates_found.stdout | length > 0 + + +- name: upgrade all packages if updates are found + community.general.apk: + upgrade: yes + when: updates_found.stdout | length > 0 + + +- name: collect apk-new files + find: + paths: + - /etc + - /usr + - /var + patterns: "*.apk-new" + recurse: yes + depth: 8 + register: new_files + + +- name: remove apk-new files + file: + path: "{{ item.path }}" + state: absent + loop: "{{ new_files.files | flatten(levels=1) }}" diff --git a/roles/common/tasks/debian.yml b/roles/common/tasks/debian.yml new file mode 100644 index 0000000..298dbbd --- /dev/null +++ b/roles/common/tasks/debian.yml @@ -0,0 +1,52 @@ +- name: set timezone + community.general.timezone: + name: "{{ timezone }}" + notify: restart crond + when: timezone is defined + + +- name: flush handlers + meta: flush_handlers + + +- name: update repository index + apt: + force_apt_get: yes + update_cache: yes + changed_when: false + + +- name: ensure apt-show-versions is installed + apt: + force_apt_get: yes + name: apt-show-versions + state: latest + + +- name: get upgradeable packages + shell: + cmd: apt-show-versions --upgradeable + register: upgradeable + changed_when: false + + +- block: + - name: pause and confirm updates + pause: + prompt: "{{ upgradeable.stdout }}" + + + - name: upgrade all packages + apt: + force_apt_get: yes + install_recommends: no + upgrade: dist + + when: "(upgradeable.stdout_lines is defined) and (upgradeable.stdout_lines | length > 0)" + + +- name: clean repository cache + apt: + force_apt_get: yes + autoclean: yes + autoremove: yes diff --git a/roles/common/tasks/main.yml b/roles/common/tasks/main.yml new file mode 100644 index 0000000..5cc2a2c --- /dev/null +++ b/roles/common/tasks/main.yml @@ -0,0 +1,147 @@ +- block: + - name: try to connect + wait_for_connection: + timeout: 10 + + - set_fact: + ssh_ok: yes + + rescue: + - name: save old ansible ssh args + set_fact: + old_ansible_ssh_extra_args: "{{ ansible_ssh_extra_args | d('') }}" + + - name: disable key checking and enable password login + set_fact: + ssh_ok: no + host_key_checking: no + ansible_password: "{{ container_password | d(host_password) }}" + ansible_ssh_extra_args: "{{ ansible_ssh_extra_args | d('') }} -o StrictHostKeyChecking=no" + + - name: try to connect without key checking + wait_for_connection: + timeout: 10 + + +- name: gather facts + setup: + gather_subset: + - min + - distribution + + +- name: generate host ssh key + include_tasks: gen_ssh_key.yml + when: (use_ssh_keys | d(true) == true) and ('containers' not in group_names) + + +- block: + - name: remove default dropbear keys + file: + path: "{{ (dropbear_dir, item) | path_join }}" + state: absent + loop: + - dropbear_dss_host_key + - dropbear_rsa_host_key + - dropbear_ecdsa_host_key + notify: restart dropbear + + + - name: generate ed25519 dropbear key if missing + command: + cmd: "dropbearkey -t ed25519 -f {{ (dropbear_dir, 'dropbear_ed25519_host_key') | path_join | quote }}" + creates: "{{ (dropbear_dir, 'dropbear_ed25519_host_key') | path_join }}" + notify: restart dropbear + + + - name: get remote host public key + command: + cmd: "dropbearkey -y -f {{ (dropbear_dir, 'dropbear_ed25519_host_key') | path_join | quote }}" + register: pubkey + changed_when: no + + + - name: get actual public key + set_fact: + host_ssh_pubkey: "{{ pubkey.stdout_lines | map('regex_search', '^ssh-ed25519.*$') | select('string') | first }}" + + + - name: fail if public key is missing + fail: + msg: "remote host ssh public key is missing" + when: host_ssh_pubkey | length == 0 + + + - name: add public key to known_hosts on ansible controller + known_hosts: + key: "{{ ansible_host }} {{ host_ssh_pubkey }}" + name: "{{ ansible_host }}" + delegate_to: localhost + + + - name: edit dropbear conf file + lineinfile: + path: /etc/conf.d/dropbear + regexp: '^DROPBEAR_OPTS=.*$' + line: "DROPBEAR_OPTS=\"-r {{ (dropbear_dir, 'dropbear_ed25519_host_key') | path_join | quote }} -jk -T 5 -K 360 -I 7200\"" + notify: restart dropbear + + + - name: copy dropbear init file + copy: + src: dropbear_init + dest: /etc/init.d/dropbear + force: yes + notify: restart dropbear + + + - name: ensure remote host has ansible key in authorized_keys file + lineinfile: + path: /root/.ssh/authorized_keys + line: "{{ container_key.public_key }}" + create: yes + mode: 0400 + when: container_key is defined and container_key.public_key is defined + + when: ansible_distribution == 'Alpine' + + +- name: flush handlers + meta: flush_handlers + + +- name: if key checking was disabled + block: + - name: set it back on + set_fact: + host_key_checking: yes + ansible_ssh_extra_args: "{{ old_ansible_ssh_extra_args }}" + ansible_password: "{{ None }}" + + - name: try to connect + wait_for_connection: + timeout: 10 + + - set_fact: + ssh_ok: true + + when: not ssh_ok + + +- name: add etc directory to backup plan + include_role: + name: backup + vars: + function: add + backup_items: + - /etc + + +- name: alpine setup + include_tasks: alpine.yml + when: ansible_distribution == 'Alpine' + + +- name: debian setup + include_tasks: debian.yml + when: ansible_distribution == 'Debian' diff --git a/roles/container/defaults/main.yml b/roles/container/defaults/main.yml new file mode 100644 index 0000000..16eb2f0 --- /dev/null +++ b/roles/container/defaults/main.yml @@ -0,0 +1,7 @@ +container_description: Managed by Ansible +container_pool: production + +container_distro: alpine +container_template: + alpine: alpine-3.15-default_20211202_amd64.tar.xz + debian: debian-11-standard_11.3-1_amd64.tar.zst diff --git a/roles/container/tasks/main.yml b/roles/container/tasks/main.yml new file mode 100644 index 0000000..51cec6c --- /dev/null +++ b/roles/container/tasks/main.yml @@ -0,0 +1,186 @@ +- name: specify connection parameters + set_fact: + pm_api_host: "{{ hostvars[selected_node]['ansible_host'] | mandatory }}" + pm_api_user: "{{ hostvars[selected_node]['api_user'] | d('root@pam') }}" + pm_api_password: "{{ hostvars[selected_node]['api_password'] | d(hostvars[selected_node]['ansible_password']) }}" + pm_lxc_storage: "{{ hostvars[selected_node]['lxc_storage'] | d('local-zfs') }}" + no_log: yes + + +- name: validate template and distribution parameters + fail: + msg: some container parameters are missing or invalid + when: (container_distro is not defined) or (container_template is not mapping) or + (container_template[container_distro] is not defined) or + (container_id is not defined) or (container_password is not defined) + + +- name: ensure pool exists on cluster node + command: + cmd: "pveum pool add {{ container_pool | quote }}" + register: pool_res + changed_when: pool_res.rc == 0 + failed_when: (pool_res.rc != 0) and not ((pool_res.rc == 255) and ('already exists' in pool_res.stderr)) + when: container_pool is defined + delegate_to: "{{ selected_node }}" + + +- block: + - name: ensure pip3 is installed on local node + package: + name: py3-pip + run_once: yes + + + - name: ensure proxmoxer is installed on local node + pip: + name: proxmoxer + run_once: yes + + + - name: generate host ssh key + include_tasks: gen_ssh_key.yml + when: use_ssh_keys | d(true) == true + + + - name: ensure there is a container template + community.general.proxmox_template: + node: "{{ selected_node }}" + api_host: "{{ pm_api_host }}" + api_user: "{{ pm_api_user }}" + api_password: "{{ pm_api_password }}" + content_type: vztmpl + template: "{{ container_template[container_distro] }}" + validate_certs: no + timeout: 20 + + + - name: create container if not exists + community.general.proxmox: + node: "{{ selected_node }}" + api_host: "{{ pm_api_host }}" + api_user: "{{ pm_api_user }}" + api_password: "{{ pm_api_password }}" + + cores: "{{ hardware.cores }}" + cpus: "{{ hardware.cpus }}" + cpuunits: "{{ hardware.cpuunits }}" + disk: "{{ hardware.disk | string }}" + memory: "{{ hardware.memory }}" + swap: "{{ hardware.swap }}" + + description: "{{ container_description | d(omit) }}" + hostname: "{{ inventory_hostname }}" + pool: "{{ container_pool | d(omit) }}" + vmid: "{{ container_id }}" + + password: "{{ container_password }}" + pubkey: "{{ (container_key | d({})).public_key | d(omit) }}" + + ostemplate: "local:vztmpl/{{ container_template[container_distro] }}" + netif: "{\"net0\":\ + \"name=eth0,hwaddr={{ container_mac | d(mac_prefix | community.general.random_mac(seed=inventory_hostname)) }},\ + ip={{ ansible_host }}/{{ networks[container_network].gw | ansible.utils.ipaddr('prefix') }},\ + gw={{ networks[container_network].gw | ansible.utils.ipaddr('address') }},\ + bridge=vmbr0,\ + firewall=0,\ + tag={{ networks[container_network].tag }},\ + type=veth,\ + mtu={{ container_mtu | d(hostvars[selected_node]['container_mtu'] | d(1500)) }}\"}" + nameserver: "{%- if container_nameserver is defined -%}\ + {{ hostvars[container_nameserver]['ansible_host'] }}\ + {%- elif services.filtering_ns is defined -%}\ + {%- if services.filtering_ns | type_debug == 'list' -%} + {{ hostvars[services.filtering_ns[0].hostname]['ansible_host'] }}\ + {%- else -%} + {{ hostvars[services.filtering_ns.hostname]['ansible_host'] }}\ + {%- endif -%} + {%- elif container_default_nameserver is defined -%}\ + {{ container_default_nameserver }}\ + {%- else -%}\ + {{ omit }}\ + {%- endif -%}" + onboot: yes + proxmox_default_behavior: no_defaults + storage: "{{ pm_lxc_storage }}" + unprivileged: yes + timeout: 240 + + mounts: >- + { {%- for item in (container_mounts | d([])) -%} + "{{ item.id }}":"{{ pm_lxc_storage }}:{{ item.size | mandatory }},mp={{ item.mp | mandatory }}{% if item.readonly is defined and item.readonly %},ro=1{% endif %}", + {%- endfor -%} } + + + - block: + - name: add features to lxc config + lineinfile: + path: "/etc/pve/lxc/{{ container_id }}.conf" + line: "features: {{ container_features | join(',') }}" + when: container_features | d([]) | length > 0 + + + - name: check that lxc config is correct + lineinfile: + path: "/etc/pve/lxc/{{ container_id }}.conf" + regexp: "^{{ item.name }}:(\\s*).*$" + line: "{{ item.name | mandatory }}:\\g<1>{{ item.value | mandatory }}" + backrefs: yes + loop: + - { name: cpus, value: "{{ hardware.cpus }}" } + - { name: cores, value: "{{ [hardware.cores, hostvars[selected_node]['max_cores'] | d(hardware.cores)] | min }}" } + - { name: cpuunits, value: "{{ hardware.cpuunits }}" } + - { name: memory, value: "{{ hardware.memory }}" } + - { name: swap, value: "{{ hardware.swap }}" } + - { name: onboot, value: "{{ '1' if (container_active | d(true) == true) else '0' }}" } + + + - name: set startup order and delay + lineinfile: + path: "/etc/pve/lxc/{{ container_id }}.conf" + regexp: '^startup:.*$' + line: "startup: {{ 'order=' ~ (container_order | d(role_dependency[host_primary_role] | d('0'))) ~ ((',up=' ~ container_startup_delay) if container_startup_delay is defined else '') }}" + insertbefore: '^[^\#]' + firstmatch: yes + when: (container_order is defined) or (role_dependency[host_primary_role] is defined) or (container_startup_delay is defined) + + + - name: ensure that cpulimit is not set + lineinfile: + path: "/etc/pve/lxc/{{ container_id }}.conf" + regexp: '^cpulimit:.*$' + state: absent + + delegate_to: "{{ selected_node }}" + + + - name: start/stop container + community.general.proxmox: + node: "{{ selected_node }}" + api_host: "{{ pm_api_host }}" + api_user: "{{ pm_api_user }}" + api_password: "{{ pm_api_password }}" + vmid: "{{ container_id }}" + proxmox_default_behavior: no_defaults + state: "{{ 'started' if (container_active | d(true) == true) else 'stopped' }}" + + + - name: end playbook for current host if container is set to inactive + meta: end_host + when: container_active | d(true) == false + + + - name: wait until networking is avaliable + command: + cmd: "ping -c1 -W1 {{ ansible_host | quote }}" + register: ping_result + until: ping_result.rc == 0 + retries: 5 + delay: 2 + changed_when: no + + delegate_to: 127.0.0.1 + + +- name: preconfigure container + include_tasks: preconf.yml diff --git a/roles/container/tasks/preconf.yml b/roles/container/tasks/preconf.yml new file mode 100644 index 0000000..a322bd0 --- /dev/null +++ b/roles/container/tasks/preconf.yml @@ -0,0 +1,66 @@ +- block: + - name: install basic dependencies + include_tasks: tasks/pct_command.yml + vars: + pct_command: "{{ item.pct_command }}" + chg_substr: "{{ item.chg_substr | d(omit) }}" + loop: + - pct_command: apk update + - pct_command: apk add python3 + chg_substr: Installing + - pct_command: apk add dropbear + chg_substr: Installing + - pct_command: rc-update add dropbear + chg_substr: added to runlevel + + - name: install dropbear-scp if this is not an ansible controller + include_tasks: tasks/pct_command.yml + vars: + pct_command: apk add dropbear-scp + chg_substr: Installing + when: (inventory_hostname != 'ansible') and ((primary_role is not defined) or (primary_role != 'ansible')) + and alpine_version is version('3.15', '<=') + + - name: install openssh-sftp-server due to openssh 9 scp deprecation + include_tasks: tasks/pct_command.yml + vars: + pct_command: apk add openssh-sftp-server + chg_substr: Installing + when: alpine_version is version('3.16', '>=') + + - name: start dropbear + include_tasks: tasks/pct_command.yml + vars: + pct_command: service dropbear start + chg_substr: \* Starting dropbear ... [ ok ] + + when: (container_distro | lower) == 'alpine' + + +- block: + - name: install basic dependencies + include_tasks: tasks/pct_command.yml + vars: + pct_command: "{{ item.pct_command }}" + chg_substr: "{{ item.chg_substr | default(omit) }}" + loop: + - pct_command: apt-get --assume-yes update + - pct_command: apt-get --assume-yes install python3 + chg_substr: The following NEW packages + - pct_command: apt-get --assume-yes install openssh-server + chg_substr: The following NEW packages + - pct_command: systemctl enable ssh.service + chg_substr: Synchronizing state + + - name: edit sshd config + include_tasks: tasks/pct_command.yml + vars: + pct_command: "sed -i 's/PermitRootLogin prohibit-password/PermitRootLogin yes/g' /etc/ssh/sshd_config" + + + - name: start sshd + include_tasks: tasks/pct_command.yml + vars: + pct_command: systemctl start ssh.service + + when: (container_distro | lower) in ['debian', 'ubuntu'] diff --git a/roles/coredns/defaults/main.yml b/roles/coredns/defaults/main.yml new file mode 100644 index 0000000..8abb56c --- /dev/null +++ b/roles/coredns/defaults/main.yml @@ -0,0 +1,9 @@ +coredns_user: coredns +coredns_group: coredns +coredns_conf_dir: /etc/coredns + +coredns_conf_file: "{{ coredns_conf_dir }}/coredns.conf" +coredns_tls_file: "{{ coredns_conf_dir }}/tls.conf" + +coredns_cert_file: "{{ coredns_conf_dir }}/ecc384.crt" +coredns_key_file: "{{ coredns_conf_dir }}/ecc384.key" diff --git a/roles/coredns/handlers/main.yml b/roles/coredns/handlers/main.yml new file mode 100644 index 0000000..2f24edf --- /dev/null +++ b/roles/coredns/handlers/main.yml @@ -0,0 +1,4 @@ +- name: restart coredns + service: + name: coredns + state: restarted diff --git a/roles/coredns/tasks/add_record.yml b/roles/coredns/tasks/add_record.yml new file mode 100644 index 0000000..ac97008 --- /dev/null +++ b/roles/coredns/tasks/add_record.yml @@ -0,0 +1,119 @@ +- name: check if record is an object + fail: + msg: record must be an object + when: record is not mapping + + +- name: check if record zone is a string + fail: + msg: record zone must be a string + when: record.zone is defined and record.zone is not string + + +- name: check if record zone exists + fail: + msg: '"{{ record.zone }}" does not seem to be a valid zone' + when: (record.zone is defined) and + (record.zone != 'root') and + ((int_zones is not defined) or (record.zone not in int_zones)) + + +- name: construct record parameters + set_fact: + ns_zone: "{%- if (record.zone is defined) and (record.zone != 'root') -%}{{ record.zone }}\ + {%- else -%}{{ ns_tld | d(int_tld) }}\ + {%- endif -%}" + ns_name: "{%- if record.name is defined -%}{{ record.name }}\ + {%- else -%}{{ inventory_hostname }}\ + {%- endif -%}" + ns_type: "{%- if record.type is defined -%}{{ record.type | upper }}\ + {%- else -%}A\ + {%- endif -%}" + ns_value: "{%- if record.value is defined -%}{{ record.value }}\ + {%- else -%}{{ ansible_host }}\ + {%- endif -%}" + +- name: set ns_quote + set_fact: + ns_quote: "{{ '\"' if ns_type == 'TXT' else '' }}" + + +- name: construct full name + set_fact: + ns_full_name: '{%- if ns_name != "@" -%}{{ ns_name }}.{%- endif -%}{{ ns_zone }}' + + +- name: construct regex part + set_fact: + ns_regex_part: '{%- if record.allow_multiple is defined -%}{{ (ns_quote ~ ns_value ~ ns_quote) | regex_escape() }}\.?{%- else -%}{{ "" | string }}{%- endif -%}' + + +- name: construct regex + set_fact: + ns_regex: '^{{ ns_full_name | regex_escape() }}\s+\d+\s+IN\s+{{ ns_type | regex_escape() }}\s+{{ ns_regex_part }}' + + +- name: show debug info + debug: + msg: "{{ ns_zone }} {{ ns_name }} {{ ns_type }} {{ ns_quote ~ ns_value ~ ns_quote }} --> {{ ns_regex }}" + + +- name: slurp zone file + slurp: + src: "{{ coredns_conf_dir ~ '/' ~ (ns_tld | d(int_tld)) ~ '.zone' }}" + register: zf + changed_when: false + + +- name: enumerate stdout lines to check if an entry already exists + set_fact: + ns_exists: "{{ (zf.content | b64decode).split('\n') | select('search', ns_regex) | list | length > 0 }}" + + +- block: + - name: fail if there are multiple records + fail: + msg: single record mode is selected, but multiple records found + when: (zf.content | b64decode).split('\n') | select('search', ns_regex) | list | length > 1 + + + - name: grab the value + set_fact: + ns_old_value: "{{ (zf.content | b64decode).split('\n') | select('search', ns_regex) | map('regex_search', '\\s+?(\\S+?)\\.?$', '\\1') | first | join('') }}" + + + - name: replace the record + lineinfile: + path: "{{ coredns_conf_dir ~ '/' ~ (ns_tld | d(int_tld)) ~ '.zone' }}" + regexp: '^\s*{{ ns_name | regex_escape() }}\s+IN\s+{{ ns_type | regex_escape() }}\s+' + line: "{{ ns_name }}\tIN\t{{ ns_type }}\t{{ ns_quote ~ ns_value ~ ns_quote }}" + backrefs: yes + when: ns_old_value != (ns_quote ~ ns_value ~ ns_quote) + register: rr1 + + when: ns_exists and rrset.allow_multiple is not defined + + +- name: add the record if it is missing + lineinfile: + path: "{{ coredns_conf_dir ~ '/' ~ (ns_tld | d(int_tld)) ~ '.zone' }}" + line: "{{ ns_name }}\tIN\t{{ ns_type }}\t{{ ns_quote ~ ns_value ~ ns_quote }}" + when: not ns_exists + register: rr2 + + +- name: determine if records were changed + set_fact: + ns_records_changed: "{{ ((rr1 is defined) and rr1.changed) or ((rr2 is defined) and rr2.changed) }}" + + +- name: change serial + include_tasks: increase_serial.yml + when: ns_records_changed | d(false) == true + + +- name: restart coredns + service: + name: coredns + state: restarted + when: (ns_instant | d(false) == true) and (ns_records_changed or ns_serial_changed) diff --git a/roles/coredns/tasks/add_records.yml b/roles/coredns/tasks/add_records.yml new file mode 100644 index 0000000..8ce5647 --- /dev/null +++ b/roles/coredns/tasks/add_records.yml @@ -0,0 +1,21 @@ +- name: add default record + include_tasks: add_record.yml + vars: + record: {} + when: (ns_records | d([]) | length) == 0 + + +- name: process other items + include_tasks: add_record.yml + loop: "{{ ns_records | d([]) }}" + loop_control: + loop_var: record + + +- name: restart coredns + service: + name: coredns + state: restarted + when: (ns_instant | d(false) == false) and + ((ns_records_changed | d(false) == true) or + (ns_serial_changed | d(false) == true)) diff --git a/roles/coredns/tasks/increase_serial.yml b/roles/coredns/tasks/increase_serial.yml new file mode 100644 index 0000000..eed3714 --- /dev/null +++ b/roles/coredns/tasks/increase_serial.yml @@ -0,0 +1,47 @@ +- name: slurp zone file + slurp: + src: "{{ coredns_conf_dir ~ '/' ~ (ns_tld | d(int_tld)) ~ '.zone' }}" + register: zf + changed_when: false + + +- name: get SOA serial value + set_fact: + ns_old_serial: '{{ zf.content | b64decode | regex_search(''@\s+IN\s+SOA\s+\S+\s+\S+\s*\(\s*(\d+)'', ''\1'') | first }}' + + +- name: get current date + include_tasks: tasks/get_datetime.yml + vars: + format: YYMMDD + + +- name: replace outdated serial with current date + set_fact: + ns_new_serial: "{{ (current_date_time | string) ~ '01'}}" + when: ns_old_serial[:8] != (current_date_time | string) + + +- name: increase current serial + set_fact: + ns_new_serial: "{{ (ns_old_serial | int) + 1 }}" + when: (ns_old_serial[:8] == (current_date_time | string)) and ((ns_old_serial[8:10] | int) < 99) + + +- name: do not change current serial if it had more than 99 iterations + set_fact: + ns_new_serial: "{{ ns_old_serial }}" + when: (ns_old_serial[:8] == (current_date_time | string)) and ((ns_old_serial[8:10] | int) >= 99) + + +- name: insert new serial + replace: + path: "{{ coredns_conf_dir ~ '/' ~ (ns_tld | d(int_tld)) ~ '.zone' }}" + regexp: '(@\s+IN\s+SOA\s+\S+\s+\S+\s*\(\s*){{ ns_old_serial }}' + replace: '\g<1>{{ ns_new_serial }}' + register: result + + +- name: set fact if serial was changed + set_fact: + ns_serial_changed: "{{ result.changed }}" diff --git a/roles/coredns/tasks/install.yml b/roles/coredns/tasks/install.yml new file mode 100644 index 0000000..95049ef --- /dev/null +++ b/roles/coredns/tasks/install.yml @@ -0,0 +1,93 @@ +- name: install coredns and dependencies + include_tasks: tasks/install_packages.yml + vars: + package: + - coredns + - alpine: coredns-openrc + + +- name: create user and group + include_tasks: tasks/create_user.yml + vars: + user: + name: "{{ coredns_user }}" + group: "{{ coredns_group }}" + + +- name: create config directory + file: + path: "{{ coredns_conf_dir }}" + state: directory + owner: "{{ coredns_user }}" + group: "{{ coredns_group }}" + notify: restart coredns + + +- name: template corefile + template: + src: corefile.j2 + dest: "{{ coredns_conf_file }}" + force: yes + owner: "{{ coredns_user }}" + group: "{{ coredns_group }}" + mode: 0400 + notify: restart coredns + + +- name: template empty tls file if missing + copy: + content: '' + dest: "{{ coredns_tls_file }}" + force: no + owner: "{{ coredns_user }}" + group: "{{ coredns_group }}" + mode: 0400 + notify: restart coredns + + +- name: template root zone if missing + template: + src: zone.j2 + dest: "{{ coredns_conf_dir ~ '/' ~ (ns_tld | d(int_tld)) ~ '.zone' }}" + force: no + mode: 0400 + owner: "{{ coredns_user }}" + group: "{{ coredns_group }}" + notify: restart coredns + + +- name: edit service config + lineinfile: + path: /etc/conf.d/coredns + regexp: "^COREDNS_CONFIG=" + line: "COREDNS_CONFIG={{ coredns_conf_file | quote }}" + notify: restart coredns + + +- name: template init script + template: + src: init.j2 + dest: /etc/init.d/coredns + force: yes + mode: 0755 + notify: restart coredns + + +- name: flush handlers + meta: flush_handlers + + +- name: add directories to backup plan + include_role: + name: backup + vars: + function: add + backup_items: + - "{{ coredns_conf_dir }}" + + +- name: enable and start coredns + service: + name: coredns + enabled: yes + state: started diff --git a/roles/coredns/tasks/install_tls.yml b/roles/coredns/tasks/install_tls.yml new file mode 100644 index 0000000..d58b1bc --- /dev/null +++ b/roles/coredns/tasks/install_tls.yml @@ -0,0 +1,28 @@ +- name: deploy ecc384 cert + include_role: + name: ca + vars: + function: certs + ca_options: + mode: '0400' + owner: "{{ coredns_user }}" + group: "{{ coredns_group }}" + concat_inter: true + preset: web + ocsp_must_staple: false + notify: restart coredns + ca_certs: + - type: ecc384 + key: "{{ coredns_key_file }}" + cert: "{{ coredns_cert_file }}" + + +- name: template tls snippet file + template: + src: tls.j2 + dest: "{{ coredns_tls_file }}" + force: yes + owner: "{{ coredns_user }}" + group: "{{ coredns_group }}" + mode: 0400 + notify: restart coredns diff --git a/roles/coredns/tasks/main.yml b/roles/coredns/tasks/main.yml new file mode 100644 index 0000000..78a7e89 --- /dev/null +++ b/roles/coredns/tasks/main.yml @@ -0,0 +1,13 @@ +- name: install coredns + include_tasks: install.yml + when: function == 'install' + + +- name: install coredns tls enhancements + include_tasks: install_tls.yml + when: function == 'install_tls' + + +- name: add records + include_tasks: add_records.yml + when: function == 'add_records' diff --git a/roles/coredns/templates/corefile.j2 b/roles/coredns/templates/corefile.j2 new file mode 100644 index 0000000..0289681 --- /dev/null +++ b/roles/coredns/templates/corefile.j2 @@ -0,0 +1,15 @@ +(common) { + root {{ (coredns_conf_dir ~ '/') | quote }} + file {{ ((ns_tld | d(int_tld)) ~ '.zone') | quote }} + + any + bufsize {{ ns_edns0_bufsize | d(1232) }} + errors + loadbalance +} + +{{ ns_tld | d(int_tld) }} { + import common +} + +import {{ coredns_tls_file | quote }} diff --git a/roles/coredns/templates/init.j2 b/roles/coredns/templates/init.j2 new file mode 100644 index 0000000..8e792a1 --- /dev/null +++ b/roles/coredns/templates/init.j2 @@ -0,0 +1,14 @@ +#!/sbin/openrc-run + +name="$SVCNAME" +directory="{{ coredns_conf_dir }}" +command="/usr/bin/coredns" +command_args="-conf ${COREDNS_CONFIG} ${COREDNS_EXTRA_ARGS}" +command_user="{{ coredns_user }}:{{ coredns_group }}" +pidfile="/var/run/$SVCNAME.pid" +command_background=true +start_stop_daemon_args="--stdout-logger logger --stderr-logger logger" + +depend() { + after net +} diff --git a/roles/coredns/templates/tls.j2 b/roles/coredns/templates/tls.j2 new file mode 100644 index 0000000..5639d8a --- /dev/null +++ b/roles/coredns/templates/tls.j2 @@ -0,0 +1,9 @@ +tls://{{ ns_tld | d(int_tld) }}:853 { + import common + tls {{ coredns_cert_file | quote }} {{ coredns_key_file | quote }} +} + +https://{{ ns_tld | d(int_tld) }} { + import common + tls {{ coredns_cert_file | quote }} {{ coredns_key_file | quote }} +} diff --git a/roles/coredns/templates/zone.j2 b/roles/coredns/templates/zone.j2 new file mode 100644 index 0000000..827785c --- /dev/null +++ b/roles/coredns/templates/zone.j2 @@ -0,0 +1,32 @@ +{%- set primary_ns = inventory_hostname -%} + +{%- if ns_server_group is defined -%} + {%- set primary_ns = hostvars[groups[ns_server_group][0]]['inventory_hostname'] -%} +{%- endif -%} + +{%- set this_name = (ns_name | d(inventory_hostname)) -%} +{%- set this_primary_name = (hostvars[primary_ns]['ns_name'] | d(hostvars[primary_ns]['inventory_hostname'])) -%} +{%- set this_tld = (hostvars[primary_ns]['ns_tld'] | d(ns_tld) | d(int_tld)) -%} + + + +$ORIGIN {{ this_tld }}. +$TTL {{ ns_ttl | d(300) }} + +@ IN SOA {{ this_name ~ '.' ~ this_tld }}. {{ (ns_admin | replace('@', '.')) if ns_admin is defined else ('admin' ~ '.' ~ this_tld) }}. ( + 2021010101 + {{ ns_refresh | d(1200) }} + {{ ns_retry | d(300) }} + {{ ns_expire | d(1209600) }} + {{ ns_neg_ttl | d(300) }} +) + +{% if ns_server_group is defined -%} + {% for host in groups[ns_server_group] -%} +@ IN NS {{ (hostvars[host]['ns_name'] | d(hostvars[host]['inventory_hostname'])) ~ '.' ~ this_tld }}. +{{ hostvars[host]['ns_name'] | d(hostvars[host]['inventory_hostname']) }} IN A {{ hostvars[host]['ansible_host'] }} + {% endfor -%} +{% else -%} +@ IN NS {{ this_primary_name ~ '.' ~ this_tld }}. +{{ this_primary_name }} IN A {{ ansible_host }} +{% endif -%} diff --git a/roles/crl/defaults/main.yml b/roles/crl/defaults/main.yml new file mode 100644 index 0000000..a219861 --- /dev/null +++ b/roles/crl/defaults/main.yml @@ -0,0 +1 @@ +crl_dir: /opt/crl \ No newline at end of file diff --git a/roles/crl/tasks/main.yml b/roles/crl/tasks/main.yml new file mode 100644 index 0000000..9cefdc5 --- /dev/null +++ b/roles/crl/tasks/main.yml @@ -0,0 +1,41 @@ +- name: install and configure nginx + include_role: + name: nginx + vars: + nginx: + servers: + - conf: nginx_crl + http: true + - conf: nginx_crl + certs: true + + +- name: create crl directory + file: + path: "{{ crl_dir }}" + state: directory + mode: 0500 + owner: nginx + group: nginx + + +- name: generate crls + include_role: + name: ca + vars: + function: crl + ca_options: + path: "{{ crl_dir }}" + mode: '0400' + owner: nginx + group: nginx + ca_crls: + + +- name: add directories to backup plan + include_role: + name: backup + vars: + function: add + backup_items: + - "{{ crl_dir }}" diff --git a/roles/crl/templates/nginx_crl.j2 b/roles/crl/templates/nginx_crl.j2 new file mode 100644 index 0000000..3ba1f80 --- /dev/null +++ b/roles/crl/templates/nginx_crl.j2 @@ -0,0 +1,4 @@ +location / { + root {{ crl_dir }}; + try_files $uri =404; +} diff --git a/roles/dovecot/defaults/main.yml b/roles/dovecot/defaults/main.yml new file mode 100644 index 0000000..2bd0216 --- /dev/null +++ b/roles/dovecot/defaults/main.yml @@ -0,0 +1,290 @@ +dovecot_user: dovecot +dovecot_group: dovecot +dovecot_mail_user: dovemail +dovecot_mail_group: dovemail +dovecot_null_user: dovenull + +dovecot_conf_dir: /etc/dovecot +dovecot_tls_dir: "{{ dovecot_conf_dir }}/tls" +dovecot_sieve_dir: "{{ dovecot_conf_dir }}/sieve" +dovecot_mail_dir: /opt/mail +dovecot_script_dir: "{{ dovecot_conf_dir }}/scripts" + +dovecot_tls_dh2048: "{{ dovecot_tls_dir }}/dh2048.pem" +dovecot_tls_int_ecc384_key: "{{ dovecot_tls_dir }}/ecc384.key" +dovecot_tls_int_ecc384_cert: "{{ dovecot_tls_dir }}/ecc384.crt" +dovecot_tls_int_rsa2048_key: "{{ dovecot_tls_dir }}/rsa2048.key" +dovecot_tls_int_rsa2048_cert: "{{ dovecot_tls_dir }}/rsa2048.crt" + + +dovecot_drafts_name: Drafts +dovecot_junk_name: Junk +dovecot_sent_name: Sent +dovecot_trash_name: Trash +dovecot_expunged_name: .EXPUNGED + +dovecot_max_quota_mb: 5000 + +dovecot_default_config: + protocols: imap lmtp sieve + hostname: "{{ (mail_server.mua_actual_hostname | d(host_name)) ~ '@' ~ mail_server.tld }}" + login_greeting: "IMAPS {{ org }} (Dovecot) ready" + + auth_cache_ttl: 20m + auth_cache_size: 2M + auth_cache_negative_ttl: 5m + auth_mechanisms: + - plain + - login + - digest-md5 + - cram-md5 + - scram-sha-1 + - scram-sha-256 + auth_default_realm: "{{ mail_server.tld }}" + auth_realms: "{{ mail_server.tld }}" + auth_worker_max_count: 5 + + default_internal_user: "{{ dovecot_user }}" + default_internal_group: "{{ dovecot_group }}" + default_login_user: "{{ dovecot_null_user }}" + default_process_limit: 50 + default_vsz_limit: 64M + + disable_plaintext_auth: yes + + imap_capability: "+SPECIAL-USE" + imap_id_send: '"name" * "version" * support-email postmaster@{{ mail_server.tld }}' + + mail_attachment_detection_options: add-flags + mail_attribute_dict: "file:%h/mail_attrib" + mail_gid: "{{ dovecot_mail_group }}" + mail_home: "{{ dovecot_mail_dir }}/%Ld/%Ln" + mail_location: "mdbox:%h/mail:UTF-8" + mail_max_keyword_length: 100 + mail_server_admin: "mailto:{{ maintainer_email }}" + mail_server_comment: "Dovecot IMAPS server - {{ org }}" + mail_temp_scan_interval: 24h + mail_uid: "{{ dovecot_mail_user }}" + + postmaster_address: "postmaster@{{ mail_server.tld }}" + quota_full_tempfail: yes + recipient_delimiter: '+' + submission_client_workarounds: whitespace-before-path mailbox-for-path + + ssl: required + ssl_cert: "<{{ dovecot_tls_int_ecc384_cert }}" + ssl_key: "<{{ dovecot_tls_int_ecc384_key }}" + ssl_alt_cert: "<{{ dovecot_tls_int_rsa2048_cert }}" + ssl_alt_key: "<{{ dovecot_tls_int_rsa2048_key }}" + ssl_cipher_suites: "TLS_CHACHA20_POLY1305_SHA256:TLS_AES_256_GCM_SHA384:TLS_AES_128_GCM_SHA256" + ssl_dh: "<{{ dovecot_tls_dh2048 }}" + ssl_min_protocol: TLSv1.2 + ssl_prefer_server_ciphers: yes + + mail_plugins: "$mail_plugins mailbox_alias lazy_expunge listescape trash quota acl" + + +dovecot_protocols: + imap: + imap_metadata: yes + mail_plugins: "$mail_plugins imap_zlib imap_quota imap_acl imap_sieve" + lmtp: + mail_plugins: "$mail_plugins sieve" + lmtp_client_workarounds: whitespace-before-path mailbox-for-path + lmtp_user_concurrency_limit: 25 + lda: + mail_plugins: "$mail_plugins sieve" + lda_mailbox_autocreate: yes + lda_mailbox_autosubscribe: yes + sieve: + mail_max_userip_connections: 50 + + +dovecot_namespaces: + - name: inbox + opts: + inbox: yes + separator: '/' + + mailboxes: + - name: INBOX + opts: + auto: subscribe + + - name: "{{ dovecot_drafts_name }}" + opts: + auto: subscribe + special_use: '\Drafts' + + - name: "{{ dovecot_junk_name }}" + opts: + auto: subscribe + special_use: '\Junk' + autoexpunge: 90d + + - name: "{{ dovecot_sent_name }}" + opts: + auto: subscribe + special_use: '\Sent' + + - name: "{{ dovecot_trash_name }}" + opts: + auto: subscribe + special_use: '\Trash' + autoexpunge: 90d + + - name: "{{ dovecot_expunged_name }}" + opts: + auto: create + autoexpunge: 180d + + - name: shared + opts: + type: shared + separator: '/' + prefix: 'Общие/%%u/' + location: 'mdbox:%%h/mail:INDEXPVT=%h/shared_idx/%%u' + subscriptions: no + list: children + + +dovecot_dicts: + acl: "pgsql:{{ dovecot_conf_dir }}/dovecot-dict-sql.conf.ext" + + +dovecot_plugin_config: + trash: "{{ dovecot_conf_dir }}/dovecot-trash.conf.ext" + + lazy_expunge: "{{ dovecot_expunged_name }}" + lazy_expunge_only_last_instance: yes + + acl: "vfile:{{ dovecot_conf_dir }}/dovecot.acl" + acl_shared_dict: "proxy::acl" + + quota: "count:Account quota" + quota_exceeded_message: Mailbox quota exceeded + quota_grace: "5%%" + quota_max_mail_size: "{{ mail_server.max_mail_size_bytes ~ 'B' }}" + quota_rule: "*:storage={{ dovecot_max_quota_mb }}M" + quota_rule2: "{{ dovecot_trash_name }}:storage=+200M" + quota_rule3: "{{ dovecot_expunged_name }}:ignore" + quota_status_success: DUNNO + quota_status_nouser: DUNNO + quota_status_overquota: "452 4.2.2 User mailbox is full" + quota_vsizes: yes + + sieve_extensions: "-enotify -editheader" + sieve_global_extensions: "+vnd.dovecot.pipe +vnd.dovecot.filter +vnd.dovecot.execute" + sieve_max_actions: 64 + sieve_plugins: sieve_imapsieve sieve_extprograms + + sieve_pipe_bin_dir: "{{ dovecot_script_dir }}" + sieve_execute_bin_dir: "{{ dovecot_script_dir }}" + sieve_filter_bin_dir: "{{ dovecot_script_dir }}" + + sieve_spamtest_status_type: text + sieve_spamtest_status_header: X-Spam + sieve_spamtest_text_value0: No + sieve_spamtest_text_value10: Yes + + sieve_before: "{{ dovecot_sieve_dir }}/spam-to-folder.sieve" + + +dovecot_user_pass_db: + - type: passdb + opts: + driver: sql + args: "{{ dovecot_conf_dir }}/dovecot-sql.conf.ext" + - type: userdb + opts: + driver: prefetch + - type: userdb + opts: + driver: sql + args: "{{ dovecot_conf_dir }}/dovecot-sql.conf.ext" + + +dovecot_services: + imap: + opts: + service_count: 16 + process_limit: 256 + + imap-login: + opts: + service_count: 0 + process_min_avail: 1 + client_limit: 16 + service_count: 32 + + listeners: + - type: inet_listener + name: imap + opts: + port: 143 + + - type: inet_listener + name: imaps + opts: + port: 993 + ssl: yes + + lmtp: + opts: + client_limit: 1 + vsz_limit: 192M + + listeners: + - type: inet_listener + opts: + port: "{{ mail_server.mua_lmtp_port }}" + + auth: + listeners: + - type: inet_listener + opts: + port: "{{ mail_server.mua_auth_port }}" + - type: unix_listener auth-userdb + opts: + mode: 0666 + user: "{{ dovecot_user }}" + group: "{{ dovecot_group }}" + + quota-status: + opts: + executable: "/usr/libexec/dovecot/quota-status -p postfix" + + listeners: + - type: inet_listener + opts: + port: "{{ mail_server.mua_quota_port }}" + + auth-worker: + opts: + user: "{{ dovecot_user }}" + group: "{{ dovecot_group }}" + + dict: + opts: + user: "{{ dovecot_user }}" + group: "{{ dovecot_group }}" + listeners: + - type: unix_listener dict + opts: + mode: 0666 + user: "{{ dovecot_user }}" + group: "{{ dovecot_group }}" + + managesieve-login: + opts: + service_count: 0 + process_min_avail: 1 + + managesieve: + opts: + process_limit: 512 + + +dovecot_sieve_scripts: + - src: sieve-spam + dest: spam-to-folder diff --git a/roles/dovecot/handlers/main.yml b/roles/dovecot/handlers/main.yml new file mode 100644 index 0000000..c24c805 --- /dev/null +++ b/roles/dovecot/handlers/main.yml @@ -0,0 +1,4 @@ +- name: restart dovecot + service: + name: dovecot + state: restarted diff --git a/roles/dovecot/tasks/main.yml b/roles/dovecot/tasks/main.yml new file mode 100644 index 0000000..68717b2 --- /dev/null +++ b/roles/dovecot/tasks/main.yml @@ -0,0 +1,241 @@ +- name: set dovecot_cfg + set_fact: + dovecot_cfg: "{{ dovecot_default_config | d({}) | combine(dovecot_config | d({}), recursive=true) }}" + + +- name: install dovecot + include_tasks: tasks/install_packages.yml + vars: + package: + - dovecot + - dovecot-lmtpd + - dovecot-openrc + - dovecot-pgsql + - dovecot-pigeonhole-plugin + + +- name: create user and group + include_tasks: tasks/create_user.yml + vars: + user: + name: "{{ dovecot_user }}" + group: "{{ dovecot_group }}" + + +- name: create dovemail user and group + include_tasks: tasks/create_user.yml + vars: + user: + name: "{{ dovecot_mail_user }}" + group: "{{ dovecot_mail_group }}" + + +- name: create dovenull user and group + include_tasks: tasks/create_user.yml + vars: + user: + name: "{{ dovecot_null_user }}" + + +- name: create dovecot conf dir + file: + path: "{{ dovecot_conf_dir }}" + state: directory + mode: 0755 + owner: "{{ dovecot_user }}" + group: "{{ dovecot_group }}" + + +- name: create dovecot tls dir + file: + path: "{{ dovecot_tls_dir }}" + state: directory + mode: 0700 + + +- name: create dovecot mail dir + file: + path: "{{ dovecot_mail_dir }}" + state: directory + mode: "g+s,o-rwx" + owner: "{{ dovecot_mail_user }}" + group: "{{ dovecot_mail_group }}" + + +- name: create dovecot sieve dir + file: + path: "{{ dovecot_sieve_dir }}" + state: directory + mode: 0755 + owner: "{{ dovecot_mail_user }}" + group: "{{ dovecot_mail_group }}" + + +- name: generate dh params + include_role: + name: ca + vars: + function: dhparams + dh_params: + path: "{{ dovecot_tls_dh2048 }}" + mode: '0400' + remote_gen: yes + notify: restart dovecot + + +- name: remove unneeded dovecot files + file: + path: "{{ dovecot_conf_dir ~ '/' ~ item }}" + state: absent + loop: + - conf.d + - dovecot-dict-auth.conf.ext + - dovecot-oauth2.conf.ext + - dovecot-openssl.cnf + - users + notify: restart dovecot + + +- name: get dovemail user info + getent: + database: passwd + key: "{{ dovecot_mail_user }}" + changed_when: no + + +- name: set dovemail uid + set_fact: + dovecot_dovemail_uid: "{{ getent_passwd[dovecot_mail_user][1] }}" + + +- name: template dovecot configuration + template: + src: "{{ item if item is string else item.src }}.j2" + dest: "{{ dovecot_conf_dir ~ '/' ~ ((item ~ '.conf.ext') if item is string else item.dest) }}" + force: yes + mode: "{{ '0400' if (item is string) else (item.mode | d('0400')) }}" + lstrip_blocks: yes + loop: + - { src: dovecot-dict-sql, dest: dovecot-dict-sql.conf.ext, mode: '0444' } + - dovecot-sql + - dovecot-trash + - { src: dovecot-acl, dest: dovecot.acl } + - { src: dovecot, dest: dovecot.conf } + notify: restart dovecot + + +- name: edit permissions of dovecot plugin files + file: + path: "{{ dovecot_conf_dir ~ '/' ~ item }}" + state: file + owner: "{{ dovecot_mail_user }}" + group: "{{ dovecot_mail_group }}" + loop: + - dovecot.acl + - dovecot-sql.conf.ext + - dovecot-trash.conf.ext + - dovecot-dict-sql.conf.ext + notify: restart dovecot + + +- name: template sieve scripts + template: + src: "{{ item.src }}.j2" + dest: "{{ dovecot_sieve_dir ~ '/' ~ item.dest ~ '.sieve' }}" + force: yes + mode: 0400 + owner: "{{ dovecot_mail_user }}" + group: "{{ dovecot_mail_group }}" + loop: "{{ dovecot_sieve_scripts | d([]) }}" + register: result + + +- name: compile scripts + shell: + cmd: "sievec {{ (dovecot_sieve_dir ~ '/') | quote }}" + when: result.changed + notify: restart dovecot + + +- name: collect svbin files + find: + paths: "{{ dovecot_sieve_dir }}/" + patterns: "*.svbin" + recurse: yes + depth: 3 + register: svbin_files + + +- name: change svbin permissions + file: + path: "{{ item.path }}" + mode: 0400 + owner: "{{ dovecot_mail_user }}" + group: "{{ dovecot_mail_group }}" + loop: "{{ svbin_files.files | d([]) | flatten(levels=1) }}" + notify: restart dovecot + + +- name: add extra cname record + include_role: + name: ns + vars: + function: add_records + ns_add_default_record: no + ns_records: + - name: "{{ mail_server.mua_actual_hostname }}" + type: CNAME + value: "{{ host_fqdn }}" + when: mail_server.mua_actual_hostname is defined + + +- name: deploy certs + include_role: + name: certs + vars: + common: + owner: root + group: root + post_hook: service dovecot restart + notify: restart dovecot + hostname: "{{ mail_server.mua_actual_hostname }}" + certs: + - cert: "{{ dovecot_tls_int_ecc384_cert }}" + key: "{{ dovecot_tls_int_ecc384_key }}" + ecc: yes + - cert: "{{ dovecot_tls_int_rsa2048_cert }}" + key: "{{ dovecot_tls_int_rsa2048_key }}" + ecc: no + + +- name: flush handlers + meta: flush_handlers + + +- name: add directories to backup plan + include_role: + name: backup + vars: + function: add + backup_items: + - "{{ dovecot_conf_dir }}" + - "{{ dovecot_tls_dir }}" + - "{{ dovecot_sieve_dir }}" + - "{{ dovecot_script_dir }}" + + +- name: add mail dir to backup plan + include_role: + name: backup + vars: + function: add + backup_items: + - "{{ dovecot_mail_dir }}" + when: dovecot_backup_mail_dir | d(false) == true + + +- name: enable and start dovecot + service: + name: dovecot + enabled: yes + state: started diff --git a/roles/dovecot/templates/dovecot-acl.j2 b/roles/dovecot/templates/dovecot-acl.j2 new file mode 100644 index 0000000..7c20c18 --- /dev/null +++ b/roles/dovecot/templates/dovecot-acl.j2 @@ -0,0 +1,8 @@ +* user={{ mail_server.admin_email }} lrwstipekxa +INBOX owner lrwstipek +{{ dovecot_sent_name }} owner lrwstipek +{{ dovecot_drafts_name }} owner lrwstipek +{{ dovecot_junk_name }} owner lrwstipek +{{ dovecot_trash_name }} owner lrwstipek +{{ dovecot_expunged_name }} owner +{{ dovecot_expunged_name }} anyone diff --git a/roles/dovecot/templates/dovecot-dict-sql.j2 b/roles/dovecot/templates/dovecot-dict-sql.j2 new file mode 100644 index 0000000..dd921b8 --- /dev/null +++ b/roles/dovecot/templates/dovecot-dict-sql.j2 @@ -0,0 +1,22 @@ +connect = host={{ hostvars[mail_server.db_server_hostname]['ansible_host'] }} user={{ mail_server.db_user }} password={{ mail_server.db_pass }} dbname={{ mail_server.db_name }} + +map { + pattern = shared/shared-boxes/user/$to/$from + table = mail_user_shares + value_field = dummy + + fields { + from_user = $from + to_user = $to + } +} + +map { + pattern = shared/shared-boxes/anyone/$from + table = mail_anyone_shares + value_field = dummy + + fields { + from_user = $from + } +} diff --git a/roles/dovecot/templates/dovecot-sql.j2 b/roles/dovecot/templates/dovecot-sql.j2 new file mode 100644 index 0000000..31602dc --- /dev/null +++ b/roles/dovecot/templates/dovecot-sql.j2 @@ -0,0 +1,47 @@ +driver = pgsql +connect = host={{ hostvars[mail_server.db_server_hostname]['ansible_host'] }} user={{ mail_server.db_user }} password={{ mail_server.db_pass }} dbname={{ mail_server.db_name }} +default_pass_scheme = PLAIN + +password_query = \ + SELECT username AS user, \ + ( \ + SELECT domain FROM mail_domains WHERE id = domain_id \ + ) AS domain, \ + password_plaintext AS password, \ + '{{ dovecot_mail_dir }}/%Ld/%Ln' AS userdb_home, \ + concat('*:bytes=', coalesce(nullif(quota_mb, 0), {{ dovecot_max_quota_mb }}), 'M') AS userdb_quota_rule, \ + {{ dovecot_dovemail_uid }} AS userdb_uid \ + FROM mail_users \ + WHERE \ + LOWER(username) = '%Ln' AND \ + domain_id = ( \ + SELECT id FROM mail_domains WHERE LOWER(domain) = '%Ld' \ + ) AND \ + enabled = true; + + + +user_query = \ + SELECT username AS user, \ + ( \ + SELECT domain FROM mail_domains WHERE id = domain_id \ + ) AS domain, \ + '{{ dovecot_mail_dir }}/%Ld/%Ln' AS home, \ + concat('*:bytes=', coalesce(nullif(quota_mb, 0), {{ dovecot_max_quota_mb }}), 'M') AS quota_rule, \ + {{ dovecot_dovemail_uid }} AS uid \ + FROM mail_users \ + WHERE \ + LOWER(username) = '%Ln' AND \ + domain_id = ( \ + SELECT id FROM mail_domains WHERE LOWER(domain) = '%Ld' \ + ) AND \ + enabled = true; + + +iterate_query = \ + SELECT username AS user, \ + ( \ + SELECT domain FROM mail_domains WHERE id = domain_id \ + ) AS domain \ + FROM mail_users \ + WHERE enabled = true; diff --git a/roles/dovecot/templates/dovecot-trash.j2 b/roles/dovecot/templates/dovecot-trash.j2 new file mode 100644 index 0000000..7f41c14 --- /dev/null +++ b/roles/dovecot/templates/dovecot-trash.j2 @@ -0,0 +1,3 @@ +1 {{ dovecot_trash_name }} +2 {{ dovecot_junk_name }} +3 {{ dovecot_sent_name }} diff --git a/roles/dovecot/templates/dovecot.j2 b/roles/dovecot/templates/dovecot.j2 new file mode 100644 index 0000000..2b359cf --- /dev/null +++ b/roles/dovecot/templates/dovecot.j2 @@ -0,0 +1,94 @@ +{% macro dovecot_option(option, padding = 0) -%} + {{- '' if (padding == 0) else (' ' * 4 * padding) -}} + {% if option.value is boolean -%} + {{ option.key }} = {{ 'yes' if option.value else 'no' }} + {% elif option.value | type_debug == 'list' -%} + {{ option.key }} = {{ option.value | join(' ') }} + {% elif option.value is mapping -%} + {{ option.key }} { + {% for suboption in (option.value | d({}) | dict2items) -%} + {{- dovecot_option(suboption, padding + 1) }} + {% endfor -%} + } + {% else -%} + {{ option.key }} = {{ option.value if option.value != None else '' }} + {% endif -%} +{% endmacro -%} + + +{% for option in (dovecot_cfg | d({}) | dict2items) -%} + {{ dovecot_option(option) }} +{%- endfor %} + +first_valid_uid = {{ dovecot_dovemail_uid }} +last_valid_uid = {{ dovecot_dovemail_uid }} + + +{% for proto in (dovecot_protocols | d({}) | dict2items) -%} + protocol {{ proto.key }} { + {% for option in (proto.value | d({}) | dict2items) -%} + {{ dovecot_option(option, 1) }} + {%- endfor -%} + } +{% endfor %} + + +{% for namespace in (dovecot_namespaces | d([])) -%} + namespace {{ namespace.name }} { + {% for option in (namespace.opts | d({}) | dict2items) -%} + {{ dovecot_option(option, 1) }} + {%- endfor -%} + + {% for mailbox in (namespace.mailboxes | d([])) -%} + {{- ' ' -}}mailbox {{ mailbox.name }} { + {% for mailbox_option in (mailbox.opts | d({}) | dict2items) -%} + {{ dovecot_option(mailbox_option, 2) }} + {%- endfor -%} + {{- ' ' -}}} + {% endfor -%} + } +{% endfor %} + + +{% if dovecot_dicts is mapping -%} +dict { + {% for option in (dovecot_dicts | d({}) | dict2items) -%} + {{ dovecot_option(option, 1) }} + {%- endfor -%} +} +{% endif %} + + +{% if dovecot_plugin_config is mapping -%} +plugin { + {% for option in (dovecot_plugin_config | d({}) | dict2items) -%} + {{ dovecot_option(option, 1) }} + {%- endfor -%} +} +{% endif %} + + +{% for db in (dovecot_user_pass_db | d([])) -%} +{{ db.type }} { + {% for option in (db.opts | d({}) | dict2items) -%} + {{ dovecot_option(option, 1) }} + {%- endfor -%} +} +{% endfor %} + + +{% for service in (dovecot_services | d({}) | dict2items) -%} +service {{ service.key }} { + {% for option in (service.value.opts | d({}) | dict2items) -%} + {{ dovecot_option(option, 1) }} + {%- endfor %} + + {% for listener in (service.value.listeners | d([])) -%} + {{- ' ' -}}{{ listener.type }} {{ listener.name | d('') }} { + {% for listener_option in (listener.opts | d({}) | dict2items) -%} + {{ dovecot_option(listener_option, 2) }} + {%- endfor -%} + {{- ' ' -}}} + {% endfor -%} +} +{% endfor %} diff --git a/roles/dovecot/templates/sieve-spam.j2 b/roles/dovecot/templates/sieve-spam.j2 new file mode 100644 index 0000000..f6566f4 --- /dev/null +++ b/roles/dovecot/templates/sieve-spam.j2 @@ -0,0 +1,6 @@ +require ["fileinto", "imap4flags", "mailbox"]; + +if header :contains "X-Spam" "Yes" { + fileinto :create "{{ dovecot_trash_name }}"; + stop; +} diff --git a/roles/external_ns/tasks/main.yml b/roles/external_ns/tasks/main.yml new file mode 100644 index 0000000..ec4ba18 --- /dev/null +++ b/roles/external_ns/tasks/main.yml @@ -0,0 +1,2 @@ +- debug: + msg: external_ns role is not supported diff --git a/roles/iptables/defaults/main.yml b/roles/iptables/defaults/main.yml new file mode 100644 index 0000000..8caa2a1 --- /dev/null +++ b/roles/iptables/defaults/main.yml @@ -0,0 +1,22 @@ +iptables_dir: /etc/iptables +iptables_file: "{{ iptables_dir }}/rules-save" + +iptables_mappings: + state: { module: 'state', param: 'state', upper: yes, join: ',' } + action: { param: 'j', upper: yes } + protocol: { param: 'p', lower: yes } + icmp_type: { module: 'icmp', param: 'icmp-type' } + in_intf: { param: 'i' } + out_intf: { param: 'o' } + src_addr: { param: 's', join: ',' } + dst_addr: { param: 'd', join: ',' } + src_port: { module: 'multiport', param: 'source-ports' } + dst_port: { module: 'multiport', param: 'destination-ports' } + any_port: { module: 'multiport', param: 'ports' } + ipsec: { module: 'policy', param: 'pol', if_false: 'none', if_true: 'ipsec' } + ipsec_direction: { module: 'policy', param: 'dir', lower: yes } + tcp_flags: { param: 'tcp-flags', upper: yes } + mss: { module: 'tcpmss', param: 'mss' } + + set_mss: { param: 'set-mss' } + to_source: { param: 'to-source' } diff --git a/roles/iptables/tasks/main.yml b/roles/iptables/tasks/main.yml new file mode 100644 index 0000000..0547b8e --- /dev/null +++ b/roles/iptables/tasks/main.yml @@ -0,0 +1,47 @@ +- block: + - name: set firewall_cfg + set_fact: + firewall_cfg: "{{ firewall_default_config | d({}) | combine(firewall | d({}), recursive=true) }}" + + + - name: install iptables + include_tasks: tasks/install_packages.yml + vars: + package: + - iptables + - alpine: iptables-openrc + - debian: iptables-persistent + + + - name: edit service config + lineinfile: + path: /etc/conf.d/iptables + regexp: "^IPTABLES_SAVE=" + line: "IPTABLES_SAVE=\"{{ iptables_file }}\"" + + + - name: template iptables schema + template: + src: iptables.j2 + dest: "{{ iptables_file }}" + force: yes + lstrip_blocks: yes + register: result + + + - name: load iptables + community.general.iptables_state: + path: "{{ iptables_file }}" + state: restored + async: "{{ ansible_timeout }}" + poll: 0 + when: result.changed + + + - name: start and enable iptables + service: + name: iptables + enabled: yes + state: started + + when: firewall is mapping diff --git a/roles/iptables/templates/iptables.j2 b/roles/iptables/templates/iptables.j2 new file mode 100644 index 0000000..b3ac261 --- /dev/null +++ b/roles/iptables/templates/iptables.j2 @@ -0,0 +1,68 @@ +{%- macro iptables_param(name, value, ns) -%} + {% set has_not_operator = name.startswith('not_') -%} + {% set filtered_name = name[4:] if name.startswith('not_') else name -%} + + {% if iptables_mappings[filtered_name] is not mapping -%} + {%- include 'no iptables mapping for "' ~ filtered_name ~ '"' -%} + {% elif iptables_mappings[filtered_name].param is not string -%} + {%- include 'no param in iptables mapping for "' ~ filtered_name ~ '"' -%} + {% else -%} + {% set mapping = iptables_mappings[filtered_name] -%} + + {% if mapping.module is string and ns.module != mapping.module -%}-m {{ mapping.module }} {% endif -%} + {% if has_not_operator == true -%}! {% endif -%} + {% if mapping.param | length == 1 -%}-{{ mapping.param }} {% else -%}--{{ mapping.param }} {% endif -%} + + {%- set new_value = (value | join(mapping.join | d(','))) if value | type_debug == 'list' else value -%} + {%- set new_value = (new_value | upper) if mapping.upper | d(false) == true else (new_value | lower) if mapping.lower | d(false) == true else new_value -%} + {%- set new_value = mapping.if_true if value is boolean and value == true and mapping.if_true is string else new_value -%} + {%- set new_value = mapping.if_false if value is boolean and value == false and mapping.if_false is string else new_value -%} + + {{- new_value -}} + + {%- if mapping.module is string -%} + {%- set ns.module = mapping.module -%} + {%- endif -%} + {%- endif -%} +{%- endmacro -%} + + +{% macro iptables_rule(chain, rule) -%} + {%- set ns = namespace(module='') -%} + + -A {{ chain | upper -}} + {%- for param in rule | d({}) | dict2items -%} + {{- ' ' -}} + {{- iptables_param(param.key, param.value, ns) -}} + {%- endfor -%} +{% endmacro -%} + + +{% macro iptables_table(name, params) -%} + {% if params is mapping and (params | dict2items | length > 0) -%} + *{{ name }} + {% for policy in params.default_policy | d({}) | dict2items -%} + :{{ policy.key | upper }} {{ policy.value | upper }} + {% endfor -%} + + {% for section in params | dict2items -%} + {% if section.key != 'default_policy' -%} + {% if section.value | type_debug == 'list' -%} + {% for rule in section.value -%} + {{ iptables_rule(section.key, rule) }} + {% endfor -%} + {% elif section.value is mapping -%} + {{ iptables_rule(section.key, section.value) }} + {% endif -%} + {% endif -%} + {% endfor -%} + + COMMIT + {% endif -%} +{%- endmacro -%} + + +{{- iptables_table('filter', firewall_cfg.filter | d({})) }} +{{ iptables_table('nat', firewall_cfg.nat | d({})) }} +{{ iptables_table('mangle', firewall_cfg.mangle | d({})) }} +{{ iptables_table('raw', firewall_cfg.raw | d({})) -}} diff --git a/roles/logrotate/defaults/main.yml b/roles/logrotate/defaults/main.yml new file mode 100644 index 0000000..edaef71 --- /dev/null +++ b/roles/logrotate/defaults/main.yml @@ -0,0 +1,16 @@ +logrotate_conf_file: /etc/logrotate.conf +logrotate_conf_dir: /etc/logrotate.d + +logrotate_default_directives: + - create + - compress + - dateext + - delaycompress + - notifempty + - missingok + +logrotate_default_config: + rotate: '4' + size: '1M' + +logrotate_schedule: weekly diff --git a/roles/logrotate/handlers/main.yml b/roles/logrotate/handlers/main.yml new file mode 100644 index 0000000..66ad8b4 --- /dev/null +++ b/roles/logrotate/handlers/main.yml @@ -0,0 +1,3 @@ +- name: reload systemd daemons + systemd: + daemon_reload: yes diff --git a/roles/logrotate/tasks/main.yml b/roles/logrotate/tasks/main.yml new file mode 100644 index 0000000..6bdab27 --- /dev/null +++ b/roles/logrotate/tasks/main.yml @@ -0,0 +1,46 @@ +- name: set logrotate_cfg + set_fact: + logrotate_cfg: "{{ logrotate_default_config | d({}) | combine(logrotate_config | d({}), recursive=true) }}" + + +- name: install logrotate package + include_tasks: tasks/install_packages.yml + vars: + package: + - logrotate + + +- name: change logrotate config path + replace: + path: /etc/periodic/daily/logrotate + regexp: '\s/etc/logrotate.conf' + replace: ' {{ logrotate_conf_file }}' + when: ansible_distribution == 'Alpine' + + +- name: edit string in systemd init file + lineinfile: + path: /lib/systemd/system/logrotate.service + regexp: '(ExecStart=/usr/sbin/logrotate )(\S*)(\s*)' + line: '\1{{ logrotate_conf_file }}\3' + backrefs: yes + notify: reload systemd daemons + when: ansible_distribution == 'Debian' + + +- name: template logrotate config + template: + src: logrotate.j2 + dest: "{{ logrotate_conf_file }}" + force: yes + mode: 0644 + + +- name: template logrotate service configs + template: + src: "{{ item.template | d(item.name) }}.j2" + dest: "{{ logrotate_conf_dir ~ '/' ~ item.name }}" + force: yes + mode: 0644 + loop: "{{ logrotate_services | d([]) }}" + when: item.name is defined diff --git a/roles/logrotate/templates/logrotate.j2 b/roles/logrotate/templates/logrotate.j2 new file mode 100644 index 0000000..5c340d2 --- /dev/null +++ b/roles/logrotate/templates/logrotate.j2 @@ -0,0 +1,18 @@ +{% for dir in (logrotate_default_directives | d({})) -%} + {{ dir | lower }} +{% endfor %} + +{% for dir in (logrotate_directives | d({})) -%} + {{ dir | lower }} +{% endfor %} + +{% for option in (logrotate_cfg | d({}) | dict2items) -%} + {{ option.key | lower }} {{ option.value }} +{% endfor %} + + +{{ logrotate_schedule | d('weekly') }} + +tabooext + .apk-new + +include {{ logrotate_conf_dir }} diff --git a/roles/mail-db/files/schema.sql b/roles/mail-db/files/schema.sql new file mode 100644 index 0000000..c8a72da --- /dev/null +++ b/roles/mail-db/files/schema.sql @@ -0,0 +1,83 @@ +CREATE TABLE IF NOT EXISTS mail_domains ( + id integer PRIMARY KEY GENERATED BY DEFAULT AS IDENTITY, + domain text NOT NULL, + + UNIQUE (domain) +); + +CREATE TABLE IF NOT EXISTS mail_aliases ( + id integer PRIMARY KEY GENERATED BY DEFAULT AS IDENTITY, + enabled boolean DEFAULT true NOT NULL, + + alias_username text NOT NULL, + alias_domain_id integer NOT NULL, + + email_username text NOT NULL, + email_domain_id integer NOT NULL, + + FOREIGN KEY (alias_domain_id) REFERENCES mail_domains(id) ON DELETE CASCADE, + FOREIGN KEY (email_domain_id) REFERENCES mail_domains(id) ON DELETE CASCADE, + + UNIQUE (alias_username, alias_domain_id, email_username, email_domain_id) +); + +CREATE TABLE IF NOT EXISTS mail_forwards ( + id integer PRIMARY KEY GENERATED BY DEFAULT AS IDENTITY, + enabled boolean DEFAULT true NOT NULL, + source text NOT NULL, + destination text NOT NULL, + + UNIQUE (source, destination) +); + +CREATE TABLE IF NOT EXISTS mail_tls ( + id integer PRIMARY KEY GENERATED BY DEFAULT AS IDENTITY, + enabled boolean DEFAULT true NOT NULL, + + foreign_domain text NOT NULL, + + policy text NOT NULL, + params text DEFAULT NULL, + + UNIQUE (foreign_domain) +); + +CREATE TABLE IF NOT EXISTS mail_users ( + id integer PRIMARY KEY GENERATED BY DEFAULT AS IDENTITY, + enabled boolean DEFAULT true NOT NULL, + + username text NOT NULL, + domain_id integer NOT NULL, + + password_plaintext text DEFAULT NULL, + password_md5 text DEFAULT NULL, + password_sha1 text DEFAULT NULL, + password_sha256 text DEFAULT NULL, + + quota_mb integer NOT NULL DEFAULT 0 CHECK (quota_mb >= 0), + no_reply boolean NOT NULL DEFAULT false, + allowed_networks text DEFAULT NULL, + + when_created timestamp with time zone DEFAULT now(), + + FOREIGN KEY (domain_id) REFERENCES mail_domains(id) ON DELETE CASCADE, + UNIQUE (username, domain_id), + + CHECK ((password_md5 IS NOT NULL) OR (password_sha1 IS NOT NULL) OR + (password_sha256 IS NOT NULL) OR (password_plaintext IS NOT NULL)) +); + +CREATE TABLE IF NOT EXISTS mail_anyone_shares ( + from_user text NOT NULL, + dummy character(1) DEFAULT '1'::bpchar, + + UNIQUE (from_user) +); + +CREATE TABLE IF NOT EXISTS mail_user_shares ( + from_user text NOT NULL, + to_user text NOT NULL, + dummy character(1) DEFAULT '1'::bpchar, + + UNIQUE (from_user, to_user) +); diff --git a/roles/mail-db/tasks/main.yml b/roles/mail-db/tasks/main.yml new file mode 100644 index 0000000..58462f5 --- /dev/null +++ b/roles/mail-db/tasks/main.yml @@ -0,0 +1,55 @@ +- name: create postgres user and database + include_role: + name: postgres + apply: + delegate_to: "{{ mail_server.db_server_hostname }}" + vars: + function: add_database + database: + name: "{{ mail_server.db_name }}" + user: "{{ mail_server.db_user }}" + pass: "{{ mail_server.db_pass }}" + addresses: "{{ [ + (hostvars[mail_server.mta_hostname]['ansible_host'] if mail_server.mta_hostname is defined else ''), + (hostvars[mail_server.mua_hostname]['ansible_host'] if mail_server.mua_hostname is defined else ''), + (hostvars[mail_server.rspamd_hostname]['ansible_host'] if mail_server.rspamd_hostname is defined else '') + ] | select() | list }}" + + +- name: check if users table already exists + include_role: + name: postgres + apply: + delegate_to: "{{ mail_server.db_server_hostname }}" + vars: + function: run_query + query: + database: "{{ mail_server.db_name }}" + text: "SELECT to_regclass('public.users');" + + +- name: build database schema + include_role: + name: postgres + apply: + delegate_to: "{{ mail_server.db_server_hostname }}" + vars: + function: run_query + query: + database: "{{ mail_server.db_name }}" + text: "{{ lookup('file', './files/schema.sql') }}" + user: "{{ mail_server.db_user }}" + refresh_privs: yes + when: (query_result is defined) and (query_result[0].to_regclass is none) + + +- name: create database entries + include_role: + name: postgres + apply: + delegate_to: "{{ mail_server.db_server_hostname }}" + vars: + function: run_query + query: + database: "{{ mail_server.db_name }}" + text: "{{ lookup('template', './templates/sql.j2') }}" diff --git a/roles/mail-db/tasks/user.yml b/roles/mail-db/tasks/user.yml new file mode 100644 index 0000000..c79b363 --- /dev/null +++ b/roles/mail-db/tasks/user.yml @@ -0,0 +1,10 @@ +- name: create user + include_role: + name: postgres + apply: + delegate_to: "{{ mail_server.db_server_hostname }}" + vars: + function: run_query + query: + database: "{{ mail_server.db_name }}" + text: "{{ lookup('template', './templates/user.j2') }}" diff --git a/roles/mail-db/templates/sql.j2 b/roles/mail-db/templates/sql.j2 new file mode 100644 index 0000000..6814ba6 --- /dev/null +++ b/roles/mail-db/templates/sql.j2 @@ -0,0 +1,79 @@ +{% if mail_server.domains | d([]) | length > 0 -%} +INSERT INTO mail_domains (domain) VALUES + {% for domain in mail_server.domains -%} + ('{{ domain }}'){% if not loop.last -%},{%- endif %} + {% endfor -%} +ON CONFLICT DO NOTHING; +{% endif -%} + + +{% if mail_server.users | d([]) | length > 0 -%} +INSERT INTO mail_users (enabled, username, domain_id, password_md5, password_sha1, + password_sha256, password_plaintext, quota_mb, no_reply) VALUES + {% for user in mail_server.users -%} + (true, + '{{ user.name }}', + (SELECT id FROM mail_domains WHERE domain = '{{ user.domain }}'), + '{{ user.cram_md5 | d(user.password | hash('md5')) }}', + '{{ user.password | hash('sha1') }}', + '{{ user.password | hash('sha256') }}', + '{{ user.password }}', + {{ user.quota_mb | d(0) }}, + {{ 'true' if (user.no_reply | d(false) == true) else 'false' }} + ){% if not loop.last -%},{%- endif %} + {% endfor -%} +ON CONFLICT (username, domain_id) DO UPDATE SET + password_md5 = EXCLUDED.password_md5, + password_sha1 = EXCLUDED.password_sha1, + password_sha256 = EXCLUDED.password_sha256, + password_plaintext = EXCLUDED.password_plaintext, + quota_mb = EXCLUDED.quota_mb, + no_reply = EXCLUDED.no_reply; +{% endif -%} + + +{% if mail_server.aliases | d([]) | length > 0 -%} +INSERT INTO mail_aliases (enabled, alias_username, alias_domain_id, email_username, email_domain_id) VALUES + {% for alias in mail_server.aliases -%} + (true, + '{{ alias.source }}', + (SELECT id FROM mail_domains WHERE domain = '{{ alias.source_domain }}'), + '{{ alias.target }}', + (SELECT id FROM mail_domains WHERE domain = '{{ alias.target_domain }}') + ){% if not loop.last -%},{%- endif %} + {% endfor -%} +ON CONFLICT DO NOTHING; +{% endif -%} + + +{% if mail_server.forwards | d([]) | length > 0 -%} +INSERT INTO mail_forwards (enabled, source, destination) VALUES + {% for forward in mail_server.forwards -%} + (true, + '{{ forward.source }}', + '{{ forward.destination }}' + ){% if not loop.last -%},{%- endif %} + {% endfor -%} +ON CONFLICT DO NOTHING; +{% endif -%} + + +{% if mail_server.global_shares | d([]) | length > 0 -%} +INSERT INTO mail_anyone_shares (from_user) VALUES + {% for share in mail_server.global_shares -%} + ('{{ share }}' + ){% if not loop.last -%},{%- endif %} + {% endfor -%} +ON CONFLICT DO NOTHING; +{% endif -%} + + +{% if mail_server.user_shares | d([]) | length > 0 -%} +INSERT INTO mail_user_shares (from_user, to_user) VALUES + {% for share in mail_server.user_shares -%} + ('{{ share.from }}', + '{{ share.to }}' + ){% if not loop.last -%},{%- endif %} + {% endfor -%} +ON CONFLICT DO NOTHING; +{% endif -%} diff --git a/roles/mail-db/templates/user.j2 b/roles/mail-db/templates/user.j2 new file mode 100644 index 0000000..af0ae6d --- /dev/null +++ b/roles/mail-db/templates/user.j2 @@ -0,0 +1,20 @@ +INSERT INTO mail_users (enabled, username, domain_id, password_md5, password_sha1, + password_sha256, password_plaintext, quota_mb, no_reply) VALUES + ( + true, + '{{ user.name }}', + (SELECT id FROM mail_domains WHERE domain = '{{ user.domain }}'), + '{{ user.password | hash('md5') }}', + '{{ user.password | hash('sha1') }}', + '{{ user.password | hash('sha256') }}', + '{{ user.password }}', + {{ user.quota_mb if user.quota_mb is number else '0' }}, + {{ 'true' if (user.no_reply | d(false) == true) and (user.no_reply != None) else 'false' }} + ) +ON CONFLICT (username, domain_id) DO UPDATE SET + password_md5 = EXCLUDED.password_md5, + password_sha1 = EXCLUDED.password_sha1, + password_sha256 = EXCLUDED.password_sha256, + password_plaintext = EXCLUDED.password_plaintext, + quota_mb = EXCLUDED.quota_mb, + no_reply = EXCLUDED.no_reply; diff --git a/roles/mail-user/tasks/main.yml b/roles/mail-user/tasks/main.yml new file mode 100644 index 0000000..a6f61a2 --- /dev/null +++ b/roles/mail-user/tasks/main.yml @@ -0,0 +1,13 @@ +- name: create mail user + include_role: + name: mail-db + tasks_from: user.yml + vars: + user: + name: "{{ mail_account.username }}" + domain: "{{ mail_account.domain | d(mail_server.tld) }}" + password: "{{ mail_account.password }}" + quota_mb: "{{ mail_account.quota_mb | d(None) }}" + no_reply: "{{ mail_account.no_reply | d(None) }}" + when: (mail_server is defined) and (mail_account is mapping) and + (mail_account.username is defined) and (mail_account.password is defined) diff --git a/roles/mail/defaults/main.yml b/roles/mail/defaults/main.yml new file mode 100644 index 0000000..8567dfd --- /dev/null +++ b/roles/mail/defaults/main.yml @@ -0,0 +1,37 @@ +mail_user: roundcube +mail_group: webmail +mail_dir: /opt/roundcube +mail_enigma_subdir: enigma-keys + +mail_fpm_socket: /var/run/php7-fpm.sock +mail_mime_types_file: mime.types + +mail_plugins: + - name: persistent_login + where: texxasrulez/persistent_login + major: 1 + config: true + + - name: contextmenu + where: johndoh/roundcube-contextmenu + + - name: html5_notifier + where: stremlau/html5_notifier + config: true + + - name: show_folder_size + where: jfcherng-roundcube/plugin-show-folder-size + + - name: zipdownload + - name: userinfo + - name: attachment_reminder + - name: hide_blockquote + - name: reconnect + + - name: markasjunk + config: true + + - name: enigma + config: true + +# managesieve diff --git a/roles/mail/files/favicon.ico b/roles/mail/files/favicon.ico new file mode 100644 index 0000000..b63aeb2 Binary files /dev/null and b/roles/mail/files/favicon.ico differ diff --git a/roles/mail/files/logo.png b/roles/mail/files/logo.png new file mode 100644 index 0000000..ab4ba4e Binary files /dev/null and b/roles/mail/files/logo.png differ diff --git a/roles/mail/handlers/main.yml b/roles/mail/handlers/main.yml new file mode 100644 index 0000000..50016ad --- /dev/null +++ b/roles/mail/handlers/main.yml @@ -0,0 +1,4 @@ +- name: restart php fpm + service: + name: php-fpm7 + state: restarted \ No newline at end of file diff --git a/roles/mail/tasks/db.yml b/roles/mail/tasks/db.yml new file mode 100644 index 0000000..40da130 --- /dev/null +++ b/roles/mail/tasks/db.yml @@ -0,0 +1,60 @@ +- block: + - name: check if system table is missing + community.postgresql.postgresql_query: + db: "{{ database_name | mandatory }}" + query: SELECT to_regclass('public.system'); + register: db_result + changed_when: false + + - name: set db_is_empty fact + set_fact: + db_is_empty: "{{ (db_result.query_result is defined) and (db_result.query_result[0].to_regclass is none) }}" + delegate_to: postgres + + +- name: fetch script from mail to ansible + fetch: + src: "{{ mail_dir }}/SQL/postgres.initial.sql" + dest: /tmp/ + flat: yes + register: fetched + when: db_is_empty + + +- block: + - name: create temporary file on postgres for holding the script + tempfile: + state: file + register: tf + + - name: upload script + copy: + src: "{{ fetched.dest }}" + dest: "{{ tf.path }}" + force: yes + + - name: execute script + community.postgresql.postgresql_query: + db: "{{ database_name | mandatory }}" + path_to_script: "{{ tf.path }}" + as_single_query: no + + - name: remove temp script + file: + path: "{{ tf.path }}" + state: absent + + when: db_is_empty + delegate_to: postgres + + +- name: remove fetched script + file: + path: "{{ fetched.dest }}" + state: absent + when: db_is_empty + + +- name: update db privileges + include_tasks: tasks/psql_grant_privs.yml + when: db_is_empty diff --git a/roles/mail/tasks/main.yml b/roles/mail/tasks/main.yml new file mode 100644 index 0000000..f518963 --- /dev/null +++ b/roles/mail/tasks/main.yml @@ -0,0 +1,214 @@ +- name: install dependencies + package: + name: + - php7 + - curl + - libgd + - php7-fpm + - aspell-libs + - aspell-en + - aspell-ru + - aspell + - aspell-lang + - gnupg + + +- name: install php7 dependencies + package: + name: + - php7-dom + - php7-session + - php7-xml + - php7-intl + - php7-json + - php7-pdo + - php7-pdo_mysql + - php7-pdo_pgsql + - php7-mbstring + - php7-openssl + - php7-ctype + + - php7-curl + - php7-fileinfo + - php7-exif + - php7-iconv + - php7-gd + - php7-zip + + - php7-pspell + - php7-pcntl + - php7-xmlwriter + - php7-tokenizer + + +- name: create user and group + include_tasks: tasks/create_user.yml + vars: + user: + name: "{{ mail_user }}" + group: "{{ mail_group }}" + dir: "{{ mail_dir }}" + create_home: no + + +- name: create mail dir + file: + path: "{{ mail_dir }}" + state: directory + mode: 0775 + owner: "{{ mail_user }}" + group: "{{ mail_group }}" + + +- name: create key dir for enigma plugin + file: + path: "{{ mail_dir }}/{{ mail_enigma_subdir }}" + state: directory + mode: 0770 + owner: "{{ mail_user }}" + group: "{{ mail_group }}" + + +- name: template php7 custom config + template: + src: php.j2 + dest: /etc/php7/conf.d/50_override.ini + force: yes + mode: 0640 + notify: restart php fpm + + +- name: template fpm custom config + template: + src: fpm.j2 + dest: /etc/php7/php-fpm.d/roundcube.conf + force: yes + mode: 0640 + notify: restart php fpm + + +- name: delete www fpm config + file: + path: /etc/php7/php-fpm.d/www.conf + state: absent + + +- name: install and configure nginx + include_role: + name: nginx + vars: + nginx: + servers: + - conf: nginx_server + certs: true + group: "{{ mail_group }}" + fastcgi: yes + + +- name: get latest version of roundcube + include_tasks: get_lastversion.yml + vars: + package: + name: roundcube/roundcubemail + location: github + assets: true + asset_filter: 'complete.tar.gz$' + file: "{{ mail_dir }}/last_version" + extract: "{{ mail_dir }}" + user: "{{ mail_user }}" + group: "{{ mail_group }}" + notify: restart php fpm + strip_first_dir: yes + + +- name: ensure roundcube logs/temp directories are writable + file: + path: "{{ mail_dir }}/{{ item }}" + mode: 0775 + state: directory + loop: + - logs + - temp + + +- name: download mime types + get_url: + url: https://svn.apache.org/repos/asf/httpd/httpd/trunk/docs/conf/mime.types + dest: "{{ mail_dir }}/config/{{ mail_mime_types_file }}" + mode: 0440 + owner: "{{ mail_user }}" + group: "{{ mail_group }}" + notify: restart php fpm + + +- name: create custom directory + file: + path: "{{ mail_dir }}/public_html/custom" + mode: 0775 + owner: "{{ mail_user }}" + group: "{{ mail_group }}" + state: directory + + +- name: upload files to custom directory + copy: + src: "{{ item }}" + dest: "{{ mail_dir }}/public_html/custom/{{ item }}" + mode: 0444 + owner: "{{ mail_user }}" + group: "{{ mail_group }}" + loop: + - favicon.ico + - logo.png + + +- name: template roundcube config + template: + src: config.j2 + dest: "{{ mail_dir }}/config/config.inc.php" + force: yes + mode: 0660 + owner: "{{ mail_user }}" + group: "{{ mail_group }}" + notify: restart php fpm + + +- name: delete sample config + file: + path: "{{ mail_dir }}/config/config.inc.php.sample" + state: absent + + +- name: setup database + include_tasks: db.yml + + +- name: install plugins + include_tasks: plugin.yml + loop: "{{ mail_plugins }}" + + +- name: add cleandb cron job + cron: + name: roundcube database cleanup + job: "{{ mail_dir }}/bin/cleandb.sh > /dev/null" + hour: "5" + minute: "0" + user: "{{ mail_user }}" + + +- name: add directories to backup plan + include_role: + name: backup + vars: + function: add + backup_items: + - "{{ mail_dir }}" + + +- name: enable and start php-fpm7 + service: + name: php-fpm7 + state: started + enabled: yes + diff --git a/roles/mail/tasks/plugin.yml b/roles/mail/tasks/plugin.yml new file mode 100644 index 0000000..39276a6 --- /dev/null +++ b/roles/mail/tasks/plugin.yml @@ -0,0 +1,53 @@ +- block: + - name: create plugin directory + file: + path: "{{ mail_dir }}/plugins/{{ item.name }}" + state: directory + owner: "{{ mail_user }}" + group: "{{ mail_group }}" + mode: 0775 + + - name: get latest version of plugin + include_tasks: get_lastversion.yml + vars: + package: + name: "{{ item.where }}" + location: github + assets: true + asset_filter: 'tar.gz$' + file: "{{ mail_dir }}/last_pv_{{ item.name }}" + extract: "{{ mail_dir }}/plugins/{{ item.name }}" + user: "{{ mail_user }}" + group: "{{ mail_group }}" + notify: restart php fpm + strip_first_dir: yes + major_branch: "{{ item.major | d(omit) }}" + + when: item.where is defined + + +- block: + - name: check if subdir exists for internal plugins + stat: + path: "{{ mail_dir }}/plugins/{{ item.name }}" + register: plugin_dir_stat + + + - name: fail if it does not exist + fail: + msg: "subdir does not exist for internal plugin {{ item.name }}" + when: not plugin_dir_stat.stat.exists or not plugin_dir_stat.stat.isdir + + when: item.where is not defined + + +- name: template plugin config + template: + src: "plugin_{{ item.name }}.j2" + dest: "{{ mail_dir }}/plugins/{{ item.name }}/config.inc.php" + force: yes + mode: 0660 + owner: "{{ mail_user }}" + group: "{{ mail_group }}" + when: item.config | d(false) == true + notify: restart php fpm diff --git a/roles/mail/templates/config.j2 b/roles/mail/templates/config.j2 new file mode 100644 index 0000000..969e759 --- /dev/null +++ b/roles/mail/templates/config.j2 @@ -0,0 +1,150 @@ + [ + 'verify_peer' => false, + ], +]; + +$config['imap_vendor'] = 'dovecot'; +$config['imap_cache'] = 'db'; +$config['messages_cache'] = true; +$config['imap_cache_ttl'] = '2d'; +$config['messages_cache_ttl'] = '2d'; + + +// ---------------------------------- +// SMTP +// ---------------------------------- + +$config['smtp_server'] = 'ssl://{{ mail_server.smtp.fqdn }}'; +$config['smtp_port'] = {{ mail_server.smtp.implicit_port }}; +$config['smtp_conn_options'] = [ + 'ssl' => [ + 'verify_peer' => false, + ], + +]; + + +// ---------------------------------- +// SYSTEM +// ---------------------------------- + +$config['enable_installer'] = false; + +$config['skin_logo'] = [ + "[favicon]" => "custom/favicon.ico", + "*" => "custom/logo.png", +]; + +$config['login_rate_limit'] = 15; +$config['display_product_info'] = 2; +$config['session_lifetime'] = {{ (60 * 24 * 7 * 4) | int }}; +$config['session_name'] = 'rc_{{ org | lower | replace(' ', '') }}_sessid'; +$config['session_auth_name'] = 'rc_{{ org | lower | replace(' ', '') }}_sessauth'; + +$config['session_samesite'] = 'Strict'; +$config['des_key'] = '{{ (base_encryption_key ~ host_encryption_key ~ 'des_key') | hash('sha512') | truncate(24, False, '') }}'; +$config['cipher_method'] = 'ChaCha20-Poly1305'; + +$config['username_domain'] = '{{ tld }}'; +$config['username_domain_forced'] = true; + +$config['max_message_size'] = '{{ max_mail_size_mb | int | abs }}M'; +$config['max_disclosed_recipients'] = 10; + +$config['product_name'] = '{{ org_localized }} | Почта'; +$config['useragent'] = null; + +$config['identities_level'] = 0; +$config['identity_image_size'] = 128; + +$config['mime_types'] = '{{ mail_dir }}/config/{{ mail_mime_types_file }}'; + + +// ---------------------------------- +// USER INTERFACE +// ---------------------------------- + +$config['language'] = 'ru_RU'; + +$config['date_format'] = 'd.m.Y'; +$config['date_long'] = 'd.m.Y H:i'; + +$config['drafts_mbox'] = 'Черновики'; +$config['junk_mbox'] = 'Спам'; +$config['sent_mbox'] = 'Отправленные'; +$config['trash_mbox'] = 'Удаленные'; + +$config['min_refresh_interval'] = 30; +$config['undo_timeout'] = 10; + +$config['enable_spellcheck'] = true; +$config['spellcheck_engine'] = 'pspell'; +$config['spellcheck_languages'] = ['en'=>'English', 'ru'=>'Русский']; + +// ---------------------------------- +// ADDRESSBOOK SETTINGS +// ---------------------------------- + +$config['contact_form_mode'] = 'business'; + +$config['collected_recipients'] = true; +$config['collected_senders'] = true; + + +// ---------------------------------- +// USER PREFERENCES +// ---------------------------------- + +$config['addressbook_sort_col'] = 'name'; +$config['show_images'] = 3; +$config['htmleditor'] = 4; +$config['draft_autosave'] = 60; + +$config['refresh_interval'] = 30; +$config['check_all_folders'] = true; + +$config['reply_mode'] = 1; + +$config['default_font'] = 'Tahoma'; +$config['message_show_email'] = true; + + +// ---------------------------------- +// PLUGINS +// ---------------------------------- + +$config['plugins'] = array( +{% for plugin in mail_plugins %} + '{{ plugin.name }}', +{% endfor %} + 'filesystem_attachments' +); diff --git a/roles/mail/templates/fpm.j2 b/roles/mail/templates/fpm.j2 new file mode 100644 index 0000000..1d8e844 --- /dev/null +++ b/roles/mail/templates/fpm.j2 @@ -0,0 +1,22 @@ +[roundcube] +user = {{ mail_user }} +group = {{ mail_group }} +listen = {{ mail_fpm_socket }} + +listen.owner = {{ mail_user }} +listen.group = {{ mail_group }} +listen.mode = 0660 + +pm = dynamic +pm.max_children = 8 +pm.start_servers = 2 +pm.min_spare_servers = 1 +pm.max_spare_servers = 3 + +catch_workers_output = yes +php_admin_flag[log_errors] = on +php_admin_value[error_log] = syslog + +[global] +error_log = syslog +log_level = notice \ No newline at end of file diff --git a/roles/mail/templates/nginx_server.j2 b/roles/mail/templates/nginx_server.j2 new file mode 100644 index 0000000..7b15e8c --- /dev/null +++ b/roles/mail/templates/nginx_server.j2 @@ -0,0 +1,16 @@ +root {{ mail_dir }}/public_html; +index index.php; + +disable_symlinks off; + +location ~ /\. { + deny all; +} + +location ~ \.php$ { + include /etc/nginx/fastcgi.conf; + + fastcgi_pass unix:{{ mail_fpm_socket }}; + fastcgi_index index.php; + fastcgi_param SCRIPT_FILENAME $document_root$fastcgi_script_name; +} \ No newline at end of file diff --git a/roles/mail/templates/php.j2 b/roles/mail/templates/php.j2 new file mode 100644 index 0000000..3ad961e --- /dev/null +++ b/roles/mail/templates/php.j2 @@ -0,0 +1,13 @@ +error_reporting = E_ALL & ~E_NOTICE +memory_limit = {{ (((container_memory | int | abs) | default(512)) / 2) | int | abs }}M +file_uploads = On +session.auto_start = 0 +mbstring.func_overload = 0 +pcre.backtrack_limit = 200000 +date.timezone = {{ default_timezone }} + +upload_max_filesize = {{ max_mail_size_mb | int | abs }}M +post_max_size = {{ max_mail_size_mb | int | abs }}M + +allow_url_fopen = On + diff --git a/roles/mail/templates/plugin_enigma.j2 b/roles/mail/templates/plugin_enigma.j2 new file mode 100644 index 0000000..1126e65 --- /dev/null +++ b/roles/mail/templates/plugin_enigma.j2 @@ -0,0 +1,13 @@ + \ No newline at end of file diff --git a/roles/mariadb/defaults/main.yml b/roles/mariadb/defaults/main.yml new file mode 100644 index 0000000..d057093 --- /dev/null +++ b/roles/mariadb/defaults/main.yml @@ -0,0 +1,21 @@ +mariadb_user: mariadb +mariadb_group: mariadb + +mariadb_data_dir: /opt/db +mariadb_conf_dir: /etc/mariadb + +mariadb_socket: /var/run/mysqld/mysqld.sock + +mariadb_default_config: + symbolic-links: no + + +mariadb_enable_tls: yes + +mariadb_tls_dir: "{{ mariadb_conf_dir }}/tls" + +mariadb_tls_config: + ssl_ca: "{{ mariadb_tls_dir }}/root.crt" + ssl_cert: "{{ mariadb_tls_dir }}/ecc384.crt" + ssl_key: "{{ mariadb_tls_dir }}/ecc384.key" + tls_version: TLSv1.2,TLSv1.3 diff --git a/roles/mariadb/handlers/main.yml b/roles/mariadb/handlers/main.yml new file mode 100644 index 0000000..ec8302c --- /dev/null +++ b/roles/mariadb/handlers/main.yml @@ -0,0 +1,9 @@ +- name: restart mariadb + service: + name: mariadb + state: restarted + + +- name: reload systemd daemons + systemd: + daemon_reload: yes diff --git a/roles/mariadb/tasks/add_db.yml b/roles/mariadb/tasks/add_db.yml new file mode 100644 index 0000000..02b9575 --- /dev/null +++ b/roles/mariadb/tasks/add_db.yml @@ -0,0 +1,20 @@ +- name: validate input + fail: + msg: database parameters are incorrect + when: (database is not mapping) or (database.name is not defined) + + +- name: get target server hostname + set_fact: + target_server: "{{ mariadb_server | d(services.mariadb.hostname) }}" + + +- name: add db to mariadb + community.mysql.mysql_db: + name: "{{ database.name }}" + encoding: "{{ database.encoding | d(omit) }}" + collation: "{{ database.collation | d(omit) }}" + config_file: "{{ hostvars[target_server]['mariadb_conf_dir'] | d(mariadb_conf_dir) }}/mariadb.conf" + login_unix_socket: "{{ hostvars[target_server]['mariadb_socket'] | d(mariadb_socket) }}" + check_implicit_admin: yes + delegate_to: "{{ target_server }}" diff --git a/roles/mariadb/tasks/add_user.yml b/roles/mariadb/tasks/add_user.yml new file mode 100644 index 0000000..319ec25 --- /dev/null +++ b/roles/mariadb/tasks/add_user.yml @@ -0,0 +1,31 @@ +- name: validate input + fail: + msg: user parameters are incorrect + when: (user is not mapping) or (user.name is not defined) or + (user.password is not defined) + + +- name: get target server hostname + set_fact: + target_server: "{{ mariadb_server | d(services.mariadb.hostname) }}" + + +- block: + - name: construct keyvalue pairs for privileges + set_fact: + mysql_user_attrs: + - key: "{{ user.name ~ '.*' }}" + value: "ALL" + when: user.privs is not defined + + + - name: add user to mariadb + community.mysql.mysql_user: + name: "{{ user.name }}" + password: "{{ user.password }}" + priv: "{{ (user.privs | d(mysql_user_attrs | d([]))) | items2dict }}" + config_file: "{{ hostvars[target_server]['mariadb_conf_dir'] | d(mariadb_conf_dir) }}/mariadb.conf" + login_unix_socket: "{{ hostvars[target_server]['mariadb_socket'] | d(mariadb_socket) }}" + check_implicit_admin: yes + + delegate_to: "{{ target_server }}" diff --git a/roles/mariadb/tasks/install.yml b/roles/mariadb/tasks/install.yml new file mode 100644 index 0000000..d448ea7 --- /dev/null +++ b/roles/mariadb/tasks/install.yml @@ -0,0 +1,202 @@ +- name: set mariadb config + set_fact: + mariadb_cfg: "{{ mariadb_default_config | + combine(mariadb_tls_config if mariadb_enable_tls else {}, recursive=true) | + combine(mariadb_config if mariadb_config is mapping else {}, recursive=true) }}" + + +- name: install nginx and dependencies + include_tasks: tasks/install_packages.yml + vars: + package: + - mariadb-client + - alpine: mariadb + - alpine: mariadb-openrc + - alpine: mariadb-server-utils + - alpine: py3-pip + debian: python3-pip + - debian: mariadb-server + + +- name: install pymysql + pip: + name: pymysql + + +- name: remove old mariadb configs + file: + path: "/etc/{{ item }}" + state: absent + loop: + - my.cnf + - my.cnf.d/ + - mysql/ + + +- name: remove mysql user if it exists + user: + name: mysql + remove: yes + state: absent + + +- name: remove mysql group if it exists + group: + name: mysql + state: absent + + +- name: create user and group + include_tasks: tasks/create_user.yml + vars: + user: + name: "{{ mariadb_user }}" + group: "{{ mariadb_group }}" + dir: "{{ mariadb_data_dir }}" + notify: restart mariadb + + +- name: create mariadb config dir + file: + path: "{{ mariadb_conf_dir }}" + state: directory + mode: 0700 + owner: "{{ mariadb_user }}" + group: "{{ mariadb_group }}" + + +- name: create mariadb data dir + file: + path: "{{ mariadb_data_dir }}" + state: directory + mode: 0755 + owner: "{{ mariadb_user }}" + group: "{{ mariadb_group }}" + notify: restart mariadb + + +- name: template custom config + template: + src: mariadb.j2 + dest: "{{ mariadb_conf_dir }}/mariadb.conf" + force: yes + mode: 0400 + owner: "{{ mariadb_user }}" + group: "{{ mariadb_group }}" + lstrip_blocks: yes + notify: restart mariadb + + +- name: template init script + template: + src: init.j2 + dest: /etc/init.d/mariadb + force: yes + notify: restart mariadb + when: ansible_distribution == 'Alpine' + + +- name: create tls directory for holding certs + file: + path: "{{ mariadb_tls_dir }}" + state: directory + mode: 0700 + owner: "{{ mariadb_user }}" + group: "{{ mariadb_group }}" + when: mariadb_enable_tls + + +- block: + - name: add drop-in systemd directory + file: + path: /etc/systemd/system/mariadb.service.d + state: directory + + - name: template systemd drop-in file + template: + src: systemd.j2 + dest: /etc/systemd/system/mariadb.service.d/mariadb.conf + force: yes + notify: reload systemd daemons + + - name: edit string in systemd init file + lineinfile: + path: /lib/systemd/system/mariadb.service + regexp: '(ExecStartPre=/usr/bin/install -m 755 -o )(\S*)( -g root -d /var/run/mysqld)' + line: '\1{{ mariadb_user }}\3' + backrefs: yes + notify: reload systemd daemons + + - name: remove string in systemd init file + lineinfile: + path: /lib/systemd/system/mariadb.service + line: 'ExecStartPost=/etc/mysql/debian-start' + state: absent + + - name: change mysql directory ownership + file: + path: /var/lib/mysql + state: directory + recurse: yes + owner: "{{ mariadb_user }}" + group: "{{ mariadb_group }}" + + when: ansible_distribution == 'Debian' + + +- name: create pid directory + file: + path: "/var/run/mysqld" + state: directory + owner: "{{ mariadb_user }}" + group: "{{ mariadb_group }}" + + +- name: deploy ecc384 cert + include_role: + name: certs + vars: + certs: + id: mariadb-ecc + cert: "{{ mariadb_cfg.ssl_cert }}" + key: "{{ mariadb_cfg.ssl_key }}" + chain: "{{ mariadb_cfg.ssl_ca }}" + ecc: yes + post_hook: service mariadb restart + owner: "{{ mariadb_user }}" + group: "{{ mariadb_group }}" + notify: restart mariadb + when: mariadb_enable_tls + + +- name: run mariadb-install-db + command: + argv: + - /usr/bin/mariadb-install-db + - "--defaults-file={{ mariadb_conf_dir }}/mariadb.conf" + - "--datadir={{ mariadb_data_dir }}" + - "--user={{ mariadb_user }}" + register: res + changed_when: (res.rc == 0) and ("Two all-privilege accounts were created" in res.stdout) + failed_when: res.rc != 0 + notify: restart mariadb + + +- name: flush handlers + meta: flush_handlers + + +- name: add directories to backup plan + include_role: + name: backup + vars: + function: add + backup_items: + - "{{ mariadb_conf_dir }}" + + +- name: enable and start mariadb + service: + name: mariadb + enabled: yes + state: started diff --git a/roles/mariadb/tasks/main.yml b/roles/mariadb/tasks/main.yml new file mode 100644 index 0000000..093dd85 --- /dev/null +++ b/roles/mariadb/tasks/main.yml @@ -0,0 +1,14 @@ +- name: mariadb installation + include_tasks: install.yml + when: function == 'install' + + +- name: add databases + include_tasks: add_db.yml + when: function == 'add_db' + + +- name: add users + include_tasks: add_user.yml + when: function == 'add_user' + diff --git a/roles/mariadb/templates/init.j2 b/roles/mariadb/templates/init.j2 new file mode 100644 index 0000000..8e46403 --- /dev/null +++ b/roles/mariadb/templates/init.j2 @@ -0,0 +1,23 @@ +#!/sbin/openrc-run +retry="60" +directory={{ mariadb_data_dir | quote }} +pidfile="/var/run/mysqld/$SVCNAME.pid" + +command="/usr/bin/mariadbd-safe" +command_args="--defaults-file={{ (mariadb_conf_dir ~ '/mariadb.conf') | quote }} --syslog --nowatch --pid-file=$pidfile --datadir={{ mariadb_data_dir | quote }} --user={{ mariadb_user }}" +command_user="{{ mariadb_user }}:{{ mariadb_group }}" +#command_background=true + +depend() { + use net + need localmount +} + +start_pre() { + mkdir -p /var/run/mysqld + chown {{ mariadb_user }}:{{ mariadb_group }} /var/run/mysqld +} + +start_post() { + ewaitfile 15 {{ mariadb_socket | quote }} +} diff --git a/roles/mariadb/templates/mariadb.j2 b/roles/mariadb/templates/mariadb.j2 new file mode 100644 index 0000000..e72cab2 --- /dev/null +++ b/roles/mariadb/templates/mariadb.j2 @@ -0,0 +1,16 @@ +{% macro mariadb_option(option) -%} + {% if option.value is boolean -%} + {{- option.key | lower }}={{ '1' if option.value else '0' }} + {% elif option.value is string -%} + {{- option.key | lower }}={{ option.value }} + {% endif -%} +{% endmacro -%} + + +[mariadbd] +user={{ mariadb_user }} +datadir={{ mariadb_data_dir }} + +{% for option in (mariadb_cfg | d({}) | dict2items) -%} + {{- mariadb_option(option) -}} +{%- endfor -%} diff --git a/roles/mariadb/templates/systemd.j2 b/roles/mariadb/templates/systemd.j2 new file mode 100644 index 0000000..a4e0625 --- /dev/null +++ b/roles/mariadb/templates/systemd.j2 @@ -0,0 +1,6 @@ +[Service] +User={{ mariadb_user }} +Group={{ mariadb_group }} + +ExecStartPre=/usr/bin/install -m 755 -o {{ mariadb_user }} -g root -d /var/run/mysqld +ExecStartPost= diff --git a/roles/memcached/tasks/main.yml b/roles/memcached/tasks/main.yml new file mode 100644 index 0000000..bdfea98 --- /dev/null +++ b/roles/memcached/tasks/main.yml @@ -0,0 +1,13 @@ +- name: install memcached + include_tasks: tasks/install_packages.yml + vars: + package: + - memcached + - alpine: memcached-openrc + + +- name: enable and start memcached + service: + name: memcached + enabled: yes + state: started diff --git a/roles/mta-sts/defaults/main.yml b/roles/mta-sts/defaults/main.yml new file mode 100644 index 0000000..0967dc3 --- /dev/null +++ b/roles/mta-sts/defaults/main.yml @@ -0,0 +1,22 @@ +mta_sts_user: mta-sts +mta_sts_group: mta-sts + +mta_sts_conf_dir: /etc/mta-sts +mta_sts_conf_file: "{{ mta_sts_conf_dir }}/config.yml" + +mta_sts_log_verbosity: warn + +mta_sts_default_config: + host: 127.0.0.1 + port: 8461 + cache_grace: 180 + shutdown_timeout: 7 + cache: + type: internal + proactive_policy_fetching: + enabled: yes + concurrency_limit: 4 + default_zone: + strict_testing: no + require_sni: yes + timeout: 8 diff --git a/roles/mta-sts/handlers/main.yml b/roles/mta-sts/handlers/main.yml new file mode 100644 index 0000000..2345d7d --- /dev/null +++ b/roles/mta-sts/handlers/main.yml @@ -0,0 +1,4 @@ +- name: restart mta-sts + service: + name: mta-sts + state: restarted diff --git a/roles/mta-sts/tasks/main.yml b/roles/mta-sts/tasks/main.yml new file mode 100644 index 0000000..16c3770 --- /dev/null +++ b/roles/mta-sts/tasks/main.yml @@ -0,0 +1,85 @@ +- name: set mta_sts_cfg + set_fact: + mta_sts_cfg: "{{ mta_sts_default_config | d({}) | combine(mta_sts_config | d({}), recursive=true) }}" + + +- name: install pip3 and dependencies + include_tasks: tasks/install_packages.yml + vars: + package: + - gcc + - python3-dev + - musl-dev + - py3-pip + - py3-aiohttp + - py3-frozenlist + - py3-multidict + - py3-yaml + notify: restart mta-sts + + +- name: install mta-sts-resolver from pip + pip: + name: postfix-mta-sts-resolver + extra_args: "--no-cache-dir" + notify: restart mta-sts + + +- name: create user and group + include_tasks: tasks/create_user.yml + vars: + user: + name: "{{ mta_sts_user }}" + group: "{{ mta_sts_group }}" + dir: "{{ mta_sts_conf_dir }}" + create_home: no + + +- name: create config dir + file: + path: "{{ mta_sts_conf_dir }}" + state: directory + mode: 0700 + owner: "{{ mta_sts_user }}" + group: "{{ mta_sts_group }}" + + +- name: template mta-sts config + template: + src: config.j2 + dest: "{{ mta_sts_conf_file }}" + force: yes + mode: 0600 + owner: "{{ mta_sts_user }}" + group: "{{ mta_sts_group }}" + lstrip_blocks: no + notify: restart mta-sts + + +- name: template init script + template: + src: init.j2 + dest: /etc/init.d/mta-sts + force: yes + mode: "+x" + notify: restart mta-sts + + +- name: flush handlers + meta: flush_handlers + + +- name: add directories to backup plan + include_role: + name: backup + vars: + function: add + backup_items: + - "{{ mta_sts_conf_dir }}" + + +- name: enable and start mta-sts + service: + name: mta-sts + enabled: yes + state: started diff --git a/roles/mta-sts/templates/config.j2 b/roles/mta-sts/templates/config.j2 new file mode 100644 index 0000000..da53b06 --- /dev/null +++ b/roles/mta-sts/templates/config.j2 @@ -0,0 +1 @@ +{{ mta_sts_cfg | to_nice_yaml(indent=2, width=512) }} diff --git a/roles/mta-sts/templates/init.j2 b/roles/mta-sts/templates/init.j2 new file mode 100644 index 0000000..56be188 --- /dev/null +++ b/roles/mta-sts/templates/init.j2 @@ -0,0 +1,14 @@ +#!/sbin/openrc-run + +name="$SVCNAME" +command="/usr/bin/mta-sts-daemon" +command_args="-v {{ mta_sts_log_verbosity | quote }} -c {{ mta_sts_conf_file | quote }}" +command_user="{{ mta_sts_user }}:{{ mta_sts_group }}" +pidfile="/var/run/$SVCNAME.pid" +command_background=true +start_stop_daemon_args="--stdout-logger logger --stderr-logger logger" + +depend() { + need net + use dns +} diff --git a/roles/nginx/defaults/main.yml b/roles/nginx/defaults/main.yml new file mode 100644 index 0000000..1eb470c --- /dev/null +++ b/roles/nginx/defaults/main.yml @@ -0,0 +1,150 @@ +nginx_list_join: + gzip_types: " " + ssl_protocols: " " + ssl_ciphers: ":" + ssl_ecdh_curve: ":" + +nginx_multi_list: + - ssl_conf_command + +nginx_defaults: + user: nginx + group: nginx + conf_dir: /etc/nginx + dhparam_file: dhparam.pem + + cert_rsa_name: rsa2048 + cert_ecc_name: ecc384 + + enable_stapling: yes + must_staple: no + + security: + headers: + X-Frame-Options: "SAMEORIGIN" + X-Content-Type-Options: "nosniff" + Referrer-Policy: "strict-origin-when-cross-origin" + Expect-CT: "maxage=86400, enforce" + Cross-Origin-Embedder-Policy: "require-corp" + Cross-Origin-Opener-Policy: "same-origin" + Cross-Origin-Resource-Policy: "same-site" + + csp: + default-src: "'none'" + base-uri: "'self'" + connect-src: "'self'" + font-src: "'self'" + img-src: "'self'" + manifest-src: "'self'" + media-src: "'self'" + prefetch-src: "'self'" + script-src: "'self'" + style-src: "'self'" + worker-src: "'self'" + form-action: "'self'" + frame-ancestors: "'self'" + + pp: + accelerometer: "" + camera: "" + display-capture: "" + document-domain: "" + geolocation: "" + gyroscope: "" + magnetometer: "" + microphone: "" + midi: "" + payment: "" + screen-wake-lock: "" + sync-xhr: "" + usb: "" + xr-spatial-tracking: "" + + conf: + root: + pcre_jit: "on" + worker_processes: "auto" + events: + http: + aio: "threads" + aio_write: "on" + directio: "128k" + sendfile: "on" + sendfile_max_chunk: "1m" + tcp_nodelay: "on" + tcp_nopush: "on" + + client_body_buffer_size: "64k" + client_body_timeout: "30s" + client_header_buffer_size: "2k" + client_header_timeout: "15s" + client_max_body_size: "0" + send_timeout: "180s" + resolver_timeout: "10s" + + disable_symlinks: "on" + keepalive_disable: "none" + msie_padding: "off" + server_tokens: "off" + log_not_found: "on" + access_log: "off" + + open_file_cache: "max=512 inactive=120s" + open_file_cache_errors: "on" + + gzip: "on" + gzip_comp_level: "4" + gzip_min_length: "4096" + gzip_vary: "on" + gzip_types: + - text/css + - text/javascript + - text/plain + - application/javascript + - application/x-javascript + - font/truetype + - font/opentype + - image/svg+xml + - application/xml + + deny: "all" + autoindex: "off" + + default_type: "application/octet-stream" + + proxy_buffer_size: "16k" + proxy_buffers: "16 16k" + proxy_connect_timeout: "30s" + proxy_http_version: "1.1" + proxy_read_timeout: "180s" + proxy_send_timeout: "180s" + proxy_max_temp_file_size: "0" + + http2_push_preload: "on" + + ssl_protocols: + - TLSv1.2 + - TLSv1.3 + ssl_ciphers: + - ECDHE-ECDSA-CHACHA20-POLY1305 + - ECDHE-ECDSA-AES256-GCM-SHA384 + - ECDHE-ECDSA-AES128-GCM-SHA256 + - ECDHE-RSA-CHACHA20-POLY1305 + - ECDHE-RSA-AES256-GCM-SHA384 + - ECDHE-RSA-AES128-GCM-SHA256 + - DHE-RSA-AES256-GCM-SHA384 + - DHE-RSA-AES128-GCM-SHA256 + ssl_ecdh_curve: + - X448 + - X25519 + - secp521r1 + - secp384r1 + + ssl_prefer_server_ciphers: "on" + ssl_early_data: "on" + ssl_conf_command: + - Options PrioritizeChaCha,-MiddleboxCompat + - Ciphersuites TLS_CHACHA20_POLY1305_SHA256:TLS_AES_256_GCM_SHA384:TLS_AES_128_GCM_SHA256 + ssl_session_cache: "shared:SSL:512k" + ssl_session_tickets: "on" + ssl_session_timeout: "1h" diff --git a/roles/nginx/handlers/main.yml b/roles/nginx/handlers/main.yml new file mode 100644 index 0000000..b5a93cf --- /dev/null +++ b/roles/nginx/handlers/main.yml @@ -0,0 +1,4 @@ +- name: restart nginx + service: + name: nginx + state: restarted \ No newline at end of file diff --git a/roles/nginx/tasks/main.yml b/roles/nginx/tasks/main.yml new file mode 100644 index 0000000..fbb8d96 --- /dev/null +++ b/roles/nginx/tasks/main.yml @@ -0,0 +1,181 @@ +- name: set nginx_cfg + set_fact: + nginx_cfg: "{{ nginx_defaults | d({}) | combine(nginx | d({}), recursive=true) }}" + + +- name: install nginx and dependencies + include_tasks: tasks/install_packages.yml + vars: + package: + - nginx + - alpine: nginx-openrc + notify: restart nginx + + +- name: create user and group + include_tasks: tasks/create_user.yml + vars: + user: + name: "{{ nginx_cfg.user }}" + group: "{{ nginx_cfg.group }}" + when: (ansible_distribution is defined) and (ansible_distribution == 'Debian') + + +- name: edit init script + lineinfile: + path: /etc/init.d/nginx + regexp: "{{ item.regexp }}" + line: "{{ item.line }}" + backrefs: yes + insertafter: omit + loop: + - regexp: '^(\s*)checkpath --directory --owner \w+:\w+(.*)$' + line: '\g<1>checkpath --directory --owner {{ nginx_cfg.user }}:{{ nginx_cfg.group }}\g<2>' + - regexp: '^(\s*)cfgfile=\$\{cfgfile:\-.+\}(.*)$' + line: '\g<1>cfgfile=${cfgfile:-{{ nginx_cfg.conf_dir }}/nginx.conf}\g<2>' + - regexp: '^(\s*)command_args=\"(.*)\"(\s*)$' + line: '\g<1>command_args="-c $cfgfile -e /dev/null"\g<3>' + notify: restart nginx + when: (ansible_distribution is not defined) or (ansible_distribution == 'Alpine') + + +- name: create nginx directories + file: + path: "{{ item }}" + state: directory + mode: 0700 + owner: "{{ nginx_cfg.user }}" + group: "{{ nginx_cfg.group }}" + loop: + - "{{ nginx_cfg.conf_dir }}" + - "{{ nginx_cfg.conf_dir }}/custom" + - "{{ nginx_cfg.conf_dir }}/tls" + notify: restart nginx + + +- name: remove unused nginx files + file: + path: "{{ nginx_cfg.conf_dir }}/{{ item }}" + state: absent + loop: + - fastcgi_params + - scgi_params + - uwsgi_params + - modules + - http.d + notify: restart nginx + + +- name: remove fastcgi.conf if cgi is not used + file: + path: "{{ nginx_cfg.conf_dir }}/fastcgi.conf" + state: absent + when: (nginx_cfg.fastcgi | d(false) == false) and + ((nginx_cfg.servers | d([]) | selectattr('fastcgi', 'defined') | selectattr('fastcgi', 'equalto', true) | list | length) == 0) + notify: restart nginx + + +- name: template base nginx config + template: + src: nginx.j2 + dest: "{{ nginx_cfg.conf_dir }}/nginx.conf" + force: yes + mode: 0600 + owner: "{{ nginx_cfg.user }}" + group: "{{ nginx_cfg.group }}" + notify: restart nginx + + +- name: template fastcgi config if requested + template: + src: fastcgi.j2 + dest: "{{ nginx_cfg.conf_dir }}/fastcgi.conf" + force: yes + mode: 0600 + owner: "{{ nginx_cfg.user }}" + group: "{{ nginx_cfg.group }}" + when: (nginx_cfg.fastcgi | d(false) == true) or + ((nginx_cfg.servers | d([]) | selectattr('fastcgi', 'defined') | selectattr('fastcgi', 'equalto', true) | list | length) > 0) + notify: restart nginx + + +- name: template server configs + template: + src: "{{ item.conf | d(item.name) }}.j2" + dest: "{{ nginx_cfg.conf_dir }}/custom/{{ item.conf | d(item.name) }}.conf" + force: yes + mode: 0600 + owner: "{{ nginx_cfg.user }}" + group: "{{ nginx_cfg.group }}" + loop: "{{ nginx_cfg.servers | d([]) }}" + when: ((item.conf is defined) or (item.name is defined)) and not (item.conf == None) + notify: restart nginx + + +- name: template default http config + template: + src: "{{ nginx_cfg.default_http_config }}.j2" + dest: "{{ nginx_cfg.conf_dir }}/custom/{{ nginx_cfg.default_http_config }}.conf" + force: yes + mode: 0600 + owner: "{{ nginx_cfg.user }}" + group: "{{ nginx_cfg.group }}" + when: nginx_cfg.default_http_config is defined + notify: restart nginx + + +- name: template extra configs + template: + src: "{{ item }}.j2" + dest: "{{ nginx_cfg.conf_dir }}/custom/{{ item }}.conf" + force: yes + mode: 0600 + owner: "{{ nginx_cfg.user }}" + group: "{{ nginx_cfg.group }}" + loop: "{{ nginx_cfg.extra_configs | d([]) }}" + notify: restart nginx + + +- block: + - name: deploy certs + include_role: + name: certs + vars: + common: + owner: "{{ nginx_cfg.user }}" + group: "{{ nginx_cfg.group }}" + post_hook: service nginx restart + notify: restart nginx + stapling: "{{ nginx_cfg.must_staple | d(nginx_cfg.enable_stapling) | d(false) }}" + hosts: "{{ nginx_cfg.domains | d(None) }}" + certs: + - id: "{{ host_name ~ '-nginx-ecc' }}" + cert: "{{ nginx_cfg.conf_dir }}/tls/{{ nginx_cfg.cert_ecc_name }}.crt" + key: "{{ nginx_cfg.conf_dir }}/tls/{{ nginx_cfg.cert_ecc_name }}.key" + ecc: yes + - id: "{{ host_name ~ '-nginx-rsa' }}" + cert: "{{ nginx_cfg.conf_dir }}/tls/{{ nginx_cfg.cert_rsa_name }}.crt" + key: "{{ nginx_cfg.conf_dir }}/tls/{{ nginx_cfg.cert_rsa_name }}.key" + + when: nginx_cfg.certs | d(false) == true + + +- name: change ownership of nginx temp directory + file: + path: /var/lib/nginx + state: directory + recurse: yes + owner: "{{ nginx_cfg.user }}" + group: "{{ nginx_cfg.group }}" + changed_when: no + + +- name: flush handlers + meta: flush_handlers + + +- name: enable and start nginx + service: + name: nginx + enabled: yes + state: started diff --git a/roles/nginx/templates/fastcgi.j2 b/roles/nginx/templates/fastcgi.j2 new file mode 100644 index 0000000..a987b82 --- /dev/null +++ b/roles/nginx/templates/fastcgi.j2 @@ -0,0 +1,23 @@ +fastcgi_param QUERY_STRING $query_string; +fastcgi_param REQUEST_METHOD $request_method; +fastcgi_param CONTENT_TYPE $content_type; +fastcgi_param CONTENT_LENGTH $content_length; + +fastcgi_param SCRIPT_NAME $fastcgi_script_name; +fastcgi_param REQUEST_URI $request_uri; +fastcgi_param DOCUMENT_URI $document_uri; +fastcgi_param DOCUMENT_ROOT $document_root; +fastcgi_param SERVER_PROTOCOL $server_protocol; +fastcgi_param REQUEST_SCHEME $scheme; +fastcgi_param HTTPS $https if_not_empty; + +fastcgi_param GATEWAY_INTERFACE CGI/1.1; +fastcgi_param SERVER_SOFTWARE nginx/$nginx_version; + +fastcgi_param REMOTE_ADDR $remote_addr; +fastcgi_param REMOTE_PORT $remote_port; +fastcgi_param SERVER_ADDR $server_addr; +fastcgi_param SERVER_PORT $server_port; +fastcgi_param SERVER_NAME $server_name; + +fastcgi_param REDIRECT_STATUS 200; diff --git a/roles/nginx/templates/nginx.j2 b/roles/nginx/templates/nginx.j2 new file mode 100644 index 0000000..27afa1f --- /dev/null +++ b/roles/nginx/templates/nginx.j2 @@ -0,0 +1,211 @@ +{% macro nginx_option(option) -%} + {% if option.value is boolean -%} + {{ option.key | lower }} {{ 'on' if option.value else 'off' }}; + {% elif option.value is string -%} + {{ option.key | lower }} {{ option.value }}; + {% elif option.value | type_debug == "list" -%} + {% if option.key in nginx_multi_list -%} + {% for suboption in option.value -%} + {{ option.key | lower }} {{ suboption }}; + {% endfor -%} + {% else -%} + {{ option.key | lower }} {{ option.value | join(nginx_list_join[option.key] | d(' ')) }}; + {% endif -%} + {% endif -%} +{% endmacro -%} + +{% macro nginx_option_block(block) -%} + {% if block | type_debug == 'dict' -%} + {% for option in (block | d({}) | dict2items) -%} + {{ nginx_option(option) -}} + {% endfor -%} + {% endif -%} +{% endmacro -%} + +{%- macro nginx_include(conf, dir) -%} + include {{ (nginx_cfg.conf_dir ~ '/' ~ (dir | d('custom')) ~ '/' ~ conf ~ '.conf') | quote -}}; +{%- endmacro -%} + +{% macro nginx_security_headers(source) -%} + {% set nginx_sel_source = (source | d(nginx_cfg)) -%} + + {% for header in (nginx_sel_source.security.headers | d({}) | dict2items) -%} + {% if header.value is string -%} + add_header {{ header.key ~ ' "' ~ header.value ~ '"' }} always; + {% endif -%} + {% endfor -%} + + {% if (nginx_sel_source.security.csp is mapping) and (nginx_sel_source.security.csp | d({}) | dict2items | length) > 0 -%} + {% set all_csp = [] -%} + {% for csp in (nginx_sel_source.security.csp | d({}) | dict2items) -%} + {% set all_csp = all_csp.append(csp.key ~ ' ' ~ csp.value) -%} + {% endfor -%} + + {% if nginx_sel_source.security.csp_in_report_mode | d(false) == true -%} + add_header {{ 'Content-Security-Policy-Report-Only "' ~ (all_csp | join('; ')) ~ '"' }} always; + {% else -%} + add_header {{ 'Content-Security-Policy "' ~ (all_csp | join('; ')) ~ '"' }} always; + {% endif -%} + {% endif -%} + + {% if (nginx_sel_source.security.pp is mapping) and (nginx_sel_source.security.pp | d({}) | dict2items | length) > 0 -%} + {% set all_fp = [] -%} + {% set all_pp = [] -%} + {% for pp in (nginx_sel_source.security.pp | d({}) | dict2items) -%} + {% set all_fp = all_fp.append(pp.key ~ ' ' ~ ("'none'" if (pp.value | length == 0) else pp.value)) -%} + {% set all_pp = all_pp.append(pp.key ~ '=(' ~ pp.value ~ ')') -%} + {% endfor -%} + + add_header {{ 'Feature-Policy "' ~ (all_fp | join('; ')) ~ '"' }} always; + add_header {{ 'Permissions-Policy "' ~ (all_pp | join(', ')) ~ '"' }} always; + {% endif -%} +{% endmacro -%} + +{% macro nginx_allow(combined) -%} + {% if (combined.external_tld | d(false) == true) or (combined.tld is defined and combined.tld == tld) or (combined.int_net | d(true) == false) -%} + allow all; + {% for bogon in (bogons | d([])) -%} + deny {{ bogon }}; + {% endfor -%} + {% else -%} + allow {{ int_net }}; + {% endif -%} +{% endmacro -%} + + +{% macro nginx_server_name(combined, srv) -%} + {% if combined.is_root | d(false) == false -%} + {% set nginx_sn_prefix = (srv.name | d(combined.override_server_name | d(inventory_hostname))) ~ '.' -%} + {% else -%} + {% set nginx_sn_prefix = '' -%} + {% endif -%} + + {% if combined.tld is string -%} + {% set nginx_sn_tld = combined.tld -%} + {% else -%} + {% set nginx_sn_tld = host_tld -%} + {% endif -%} + + {% if srv.no_tld | d(false) == true -%} + server_name {{ srv.name }}; + {% else -%} + server_name {{ nginx_sn_prefix ~ nginx_sn_tld }}; + {% endif -%} +{% endmacro -%} + + + +{% macro nginx_stapling() -%} + {% if nginx_cfg.enable_stapling | d(false) == true -%} + ssl_stapling on; + ssl_stapling_verify on; + resolver 1.1.1.1; + {% else -%} + ssl_stapling off; + {% endif -%} +{% endmacro -%} + + + + + + +user {{ nginx_cfg.user }} {{ nginx_cfg.group }}; +error_log syslog:server=unix:/dev/log,facility=local2 {{ nginx_cfg.log_level | d('notice') }}; + +{{ nginx_option_block(nginx_cfg.conf.root) }} + +{{ nginx_include('*', 'modules') }} +{{ nginx_include('main*') }} + +events { + {{ nginx_option_block(nginx_cfg.conf.events) }} +} + +http { + {{ nginx_option_block(nginx_cfg.conf.http) }} + + include {{ (nginx_cfg.conf_dir ~ '/mime.types') | quote }}; + {# ssl_dhparam {{ (nginx_cfg.conf_dir ~ '/tls/' ~ nginx_cfg.dhparam_file) | quote }}; #} + + {% if nginx_cfg.security_headers | d(false) == true -%} + {{ nginx_security_headers(nginx_cfg) }} + {% endif %} + + {{ nginx_include('http*') }} + + + {% set nginx_http = (nginx_cfg.servers | d([]) | selectattr('http', 'defined') | selectattr('http', 'equalto', true) | list) -%} + {% set nginx_https = (nginx_cfg.servers | d([]) | rejectattr('http', 'defined') | list) -%} + + {# custom HTTP servers -#} + {% for srv in nginx_http -%} + {% set combined = (nginx_cfg | combine(srv, recursive=true)) -%} + + server { + listen 80; + {% if srv.ipv6 | d(true) == true %}listen [::]:80;{% endif %} + + {{ nginx_allow(combined) }} + {{ nginx_server_name(combined, srv) }} + + {% if (srv.security_headers | d(false) == true) and (nginx_cfg.security_headers | d(false) == false) %} + {{ nginx_security_headers(srv) }} + {% endif %} + + {{ nginx_include(srv.conf) }} + } + {% endfor %} + + + {# default HTTP server -#} + + {% if nginx_cfg.add_default_http_server | d(true) == true -%} + server { + listen 80 default_server; + listen [::]:80 default_server; + + {{ nginx_allow(combined) }} + {{ nginx_server_name(combined, {}) }} + + {% if nginx_cfg.default_http_config is not defined -%} + location / { + return 301 https://$host$request_uri; + } + {% else -%} + {{ nginx_include(nginx_cfg.default_http_config) }} + {% endif -%} + } + {% endif -%} + + + {# custom HTTPS servers #} + {% for srv in nginx_https -%} + {% set combined = (nginx_cfg | combine(srv, recursive=true)) -%} + + server { + listen 443 ssl http2; + {% if srv.ipv6 | d(true) == true %}listen [::]:443 ssl http2;{% endif %} + + {% if combined.certs | d(true) == true -%} + ssl_certificate {{ (nginx_cfg.conf_dir ~ '/tls/' ~ nginx_cfg.cert_rsa_name ~ '.crt') | quote }}; + ssl_certificate_key {{ (nginx_cfg.conf_dir ~ '/tls/' ~ nginx_cfg.cert_rsa_name ~ '.key') | quote }}; + + ssl_certificate {{ (nginx_cfg.conf_dir ~ '/tls/' ~ nginx_cfg.cert_ecc_name ~ '.crt') | quote }}; + ssl_certificate_key {{ (nginx_cfg.conf_dir ~ '/tls/' ~ nginx_cfg.cert_ecc_name ~ '.key') | quote }}; + {%- endif %} + + {{ nginx_allow(combined) }} + {{ nginx_server_name(combined, srv) }} + {{ nginx_stapling() }} + + {% if (srv.security_headers | d(false) == true) and (nginx_cfg.security_headers | d(false) == false) %} + {{ nginx_security_headers(srv) }} + {% endif %} + + {{ nginx_include(srv.conf) }} + } + + {% endfor %} + +} diff --git a/roles/ns/defaults/main.yml b/roles/ns/defaults/main.yml new file mode 100644 index 0000000..1cb8d40 --- /dev/null +++ b/roles/ns/defaults/main.yml @@ -0,0 +1,4 @@ +pdns_dir: /etc/pdns +pdns_custom_dir: "{{ pdns_dir }}/custom" +pdns_user: pdns +pdns_group: pdns diff --git a/roles/ns/tasks/_main.yml b/roles/ns/tasks/_main.yml new file mode 100644 index 0000000..23f03a0 --- /dev/null +++ b/roles/ns/tasks/_main.yml @@ -0,0 +1,171 @@ +- name: install powerdns + community.general.apk: + name: pdns,pdns-openrc,pdns-backend-pgsql + + +- name: install powerdns docs for db init scripts + community.general.apk: + name: pdns-doc + + +- name: set powerdns to start on boot + service: + name: pdns + enabled: yes + + +- name: create config directory + file: + path: /etc/pdns + state: directory + owner: pdns + group: pdns + mode: 0750 + + +- name: create include directory + file: + path: /etc/pdns/custom + state: directory + owner: pdns + group: pdns + mode: 0750 + + +- name: template pdns config + template: + src: custom.j2 + dest: /etc/pdns/custom/custom.conf + force: yes + owner: pdns + group: pdns + mode: 0640 + + +- name: add include-dir to default pdns config + lineinfile: + path: /etc/pdns/pdns.conf + line: "include-dir=/etc/pdns/custom" + create: yes + owner: pdns + group: pdns + mode: 0640 + +- name: remove bad lines from default pdns config + lineinfile: + path: /etc/pdns/pdns.conf + regex: "{{ item }}" + state: absent + loop: + - "^use-logfile=" + - "^wildcards=" + + +- block: + - name: check if records table is missing (meaning the db is probably empty) + community.postgresql.postgresql_query: + db: "{{ db_name | mandatory }}" + query: SELECT to_regclass('public.records'); + register: db_result + changed_when: False + + - name: set db_is_empty fact + set_fact: + db_is_empty: "{{ (db_result.query_result is defined) and (db_result.query_result[0].to_regclass is none) }}" + delegate_to: postgres + + + +- name: fetch script from ns to ansible + fetch: + src: /usr/share/doc/pdns/schema.pgsql.sql + dest: /tmp/ + flat: yes + register: fetched + when: db_is_empty + + +- block: + - name: create temporary file on postgres for holding the script + tempfile: + state: file + register: tf + + - name: upload script + copy: + src: "{{ fetched.dest }}" + dest: "{{ tf.path }}" + force: yes + + - name: execute script + community.postgresql.postgresql_query: + db: "{{ db_name | mandatory }}" + path_to_script: "{{ tf.path }}" + as_single_query: no + + - name: remove temp script + file: + path: "{{ tf.path }}" + state: absent + + when: db_is_empty + delegate_to: postgres + + +- name: remove fetched script + file: + path: "{{ fetched.dest }}" + state: absent + when: db_is_empty + + +- name: add default zones for all branches (+ root) + command: + cmd: "pdnsutil create-zone {% if item != 'root' %}{{ item }}.{% endif %}{{ corp_tld }} {{ ct_hostname }}.{{ corp_tld }}" + register: res + changed_when: (res.rc == 0) and ("Creating empty zone" in res.stderr) + failed_when: (res.rc != 0) and ("exists already" not in res.stderr) + loop: + "{{ corp_branches | default([]) + ['root'] }}" + + +- name: prepare list of NS records for subzones + set_fact: + ns_subs: "{{ ns_subs | default([]) + [{ 'zone': item, 'name': '@', 'type': 'NS', 'value': ct_hostname ~ '.' ~ corp_tld }] }}" + loop: "{{ corp_branches | default([]) }}" + + +- name: ensure NS records in subzones exist + include_tasks: ns_items.yml + loop: "{{ ns_subs | default([]) }}" + + +- name: ensure NS record in root zone exists + include_tasks: ns_items.yml + vars: + item: { 'zone': 'root', 'name': '@', 'type': 'NS', 'value': "{{ ct_hostname }}.{{ corp_tld }}" } + + +- name: prepare list of NS delegated records for root zone + set_fact: + ns_delegated: "{{ ns_delegated | default([]) + [{ 'zone': 'root', 'name': item, 'type': 'NS', 'value': ct_hostname ~ '.' ~ corp_tld }] }}" + loop: "{{ corp_branches | default([]) }}" + + +- name: ensure NS delegated records exist in root zone + include_tasks: ns_items.yml + loop: "{{ ns_delegated | default([]) }}" + + +- name: rectify all zones + command: + cmd: "pdnsutil rectify-all-zones" + register: res2 + changed_when: False + failed_when: res2.rc != 0 + + +- name: start powerdns + service: + name: pdns + state: started diff --git a/roles/ns/tasks/add_record.yml b/roles/ns/tasks/add_record.yml new file mode 100644 index 0000000..8074d82 --- /dev/null +++ b/roles/ns/tasks/add_record.yml @@ -0,0 +1,122 @@ +- block: + - name: check if item zone is valid + fail: + msg: '"{{ item.zone }}" does not seem to be a valid zone' + when: (item.zone is defined) and ((item.zone != 'root') and (item.zone not in int_zones) or (item.zone is not string)) + + + - name: construct record parameters + set_fact: + ns_zone: "{%- if (item.zone is defined) and (item.zone != 'root') -%}{{ item.zone }}.\ + {%- elif item.zone is not defined and branch is defined -%}{{ branch }}.\ + {%- endif -%}\ + {{ int_tld }}" + ns_name: "{%- if item.name is defined -%}{{ item.name }}\ + {%- else -%}{{ inventory_hostname }}\ + {%- endif -%}" + ns_type: "{%- if item.type is defined -%}{{ item.type | upper }}\ + {%- else -%}A\ + {%- endif -%}" + ns_value: "{%- if item.value is defined -%}{{ item.value }}\ + {%- else -%}{{ ansible_host }}\ + {%- endif -%}" + + + - name: set ns_quote + set_fact: + ns_quote: "{{ '\"' if ns_type == 'TXT' else '' }}" + + + - name: construct full name + set_fact: + ns_full_name: '{%- if ns_name != "@" -%}{{ ns_name }}.{%- endif -%}{{ ns_zone }}' + + + - name: construct regex part + set_fact: + ns_regex_part: '{%- if item.allow_multiple is defined -%}{{ (ns_quote ~ ns_value ~ ns_quote) | regex_escape() }}\.?{%- else -%}{{ "" | string }}{%- endif -%}' + + + - name: construct regex + set_fact: + ns_regex: '^{{ ns_full_name | regex_escape() }}\s+\d+\s+IN\s+{{ ns_type | regex_escape() }}\s+{{ ns_regex_part }}' + + + - name: show debug info + debug: + msg: "{{ ns_zone }} {{ ns_name }} {{ ns_type }} {{ ns_quote ~ ns_value ~ ns_quote }} --> {{ ns_regex }}" + + + - name: query ns zone for a list of entries + command: + cmd: "pdnsutil list-zone {{ ns_zone | quote }}" + register: res + changed_when: false + failed_when: res.rc != 0 + + + - name: enumerate stdout lines to check if an entry already exists + set_fact: + ns_exists: "{{ res.stdout_lines | select('search', ns_regex) | list | length > 0 }}" + + + + # this takes care of situations with wrong record values + + - block: + - name: fail if there are multiple records + fail: + msg: single record is chosen, but multiple records found + when: res.stdout_lines | select('search', ns_regex) | list | length > 1 + + + - name: grab the value + set_fact: + ns_old_value: "{{ res.stdout_lines | select('search', ns_regex) | map('regex_search', '\\s+(\\S+?)\\.?$', '\\1') | first | join('') }}" + + + - name: debug + debug: + msg: "{{ ns_old_value }} <-> {{ ns_quote ~ ns_value ~ ns_quote }}" + + + - name: grab diff + set_fact: + ns_value_diff: "{{ ns_old_value != (ns_quote ~ ns_value ~ ns_quote) }}" + + + - name: check if records table already exists + include_role: + name: postgres + vars: + function: run_query + query: + database: "{{ hostvars['ns']['database']['name'] | mandatory }}" + text: "DELETE FROM records WHERE \ + domain_id = (SELECT id FROM domains WHERE name = %s) \ + AND name = %s AND type = %s AND content = %s" + positional_args: + - "{{ ns_zone }}" + - "{{ ns_full_name }}" + - "{{ ns_type }}" + - "{{ ns_old_value }}" + when: ns_value_diff + + + - name: unset ns_exists flag so the record will be added + set_fact: + ns_exists: false + when: ns_value_diff + + when: ns_exists and item.allow_multiple is not defined + + + - name: add the record if it is missing + command: + cmd: "pdnsutil add-record {{ ns_zone | quote }} {{ ns_name | quote }} {{ ns_type | quote }} {{ ns_value | quote }}" + register: res + changed_when: (res.rc == 0) and ("New rrset:" in res.stdout) + failed_when: res.rc != 0 + when: not ns_exists + + delegate_to: ns \ No newline at end of file diff --git a/roles/ns/tasks/add_records.yml b/roles/ns/tasks/add_records.yml new file mode 100644 index 0000000..eeef1a3 --- /dev/null +++ b/roles/ns/tasks/add_records.yml @@ -0,0 +1,11 @@ +- name: add default record + include_tasks: add_record.yml + vars: + item: {} + when: ns_add_default_record | d(true) == true + + +- name: process other ns items + include_tasks: add_record.yml + loop: "{{ ns_records | d([]) }}" + diff --git a/roles/ns/tasks/install.yml b/roles/ns/tasks/install.yml new file mode 100644 index 0000000..e82a480 --- /dev/null +++ b/roles/ns/tasks/install.yml @@ -0,0 +1,137 @@ +- name: install powerdns and dependencies + include_tasks: tasks/install_packages.yml + vars: + package: + - pdns + - alpine: pdns-openrc + - pdns-backend-pgsql + - pdns-doc + + +- name: create user and group + include_tasks: tasks/create_user.yml + vars: + user: + name: "{{ pdns_user }}" + group: "{{ pdns_group }}" + + +- name: create pdns config directories + file: + path: "{{ item }}" + state: directory + owner: "{{ pdns_user }}" + group: "{{ pdns_group }}" + mode: 0750 + loop: + - "{{ pdns_dir }}" + - "{{ pdns_custom_dir }}" + + +- name: template pdns config + template: + src: custom.j2 + dest: "{{ pdns_custom_dir }}/custom.conf" + force: yes + owner: "{{ pdns_user }}" + group: "{{ pdns_group }}" + mode: 0640 + + +- name: add include-dir to default pdns config + lineinfile: + path: "{{ pdns_dir }}/pdns.conf" + line: "include-dir={{ pdns_custom_dir }}" + create: yes + owner: "{{ pdns_user }}" + group: "{{ pdns_group }}" + mode: 0640 + + +- name: remove unwanted lines from default pdns config + lineinfile: + path: "{{ pdns_dir }}/pdns.conf" + regex: '^\s*{{ item }}\s*=' + state: absent + loop: + - use-logfile + - wildcards + + +- name: populate database + include_tasks: populate_db.yml + + +- name: add internal zone + command: + cmd: "pdnsutil create-zone {{ int_tld | quote }} {{ (inventory_hostname ~ '.' ~ int_tld) | quote }}" + register: res + changed_when: (res.rc == 0) and ("Creating empty zone" in res.stderr) + failed_when: (res.rc != 0) and ("exists already" not in res.stderr) + + +- name: add NS records for internal zone + include_tasks: add_record.yml + vars: + item: { 'name': '@', 'type': 'NS', value: "{{ inventory_hostname ~ '.' ~ int_tld }}" } + + +- name: add branch zones + command: + cmd: "pdnsutil create-zone {{ item ~ '.' ~ int_tld | quote }} {{ (inventory_hostname ~ '.' ~ int_tld) | quote }}" + register: res + changed_when: (res.rc == 0) and ("Creating empty zone" in res.stderr) + failed_when: (res.rc != 0) and ("exists already" not in res.stderr) + loop: + "{{ int_branches | default([]) }}" + + +- name: prepare list of NS records for branches + set_fact: + ns_subs: "{{ ns_subs | default([]) + [{ 'zone': item, 'name': '@', 'type': 'NS', 'value': inventory_hostname ~ '.' ~ int_tld }] }}" + loop: "{{ int_branches | default([]) }}" + + +- name: add NS records for branch zones + include_tasks: add_record.yml + vars: + ns_records: "{{ ns_subs | default([]) }}" + + +- name: prepare list of NS delegated records for root zone + set_fact: + ns_delegated: "{{ ns_delegated | default([]) + [{ 'zone': 'root', 'name': item, 'type': 'NS', 'value': inventory_hostname ~ '.' ~ int_tld }] }}" + loop: "{{ int_branches | default([]) }}" + + +- name: add NS delegated records for root zone + include_tasks: add_record.yml + vars: + ns_records: "{{ ns_delegated | default([]) }}" + + +- name: rectify all zones + command: + cmd: pdnsutil rectify-all-zones + register: res + changed_when: false + failed_when: res.rc != 0 + + +- name: flush handlers + meta: flush_handlers + + +- name: add ns directories to backup plan + include_tasks: tasks/add_backup.yml + vars: + backup_items: + - "{{ pdns_dir }}" + - "{{ pdns_custom_dir }}" + + +- name: enable and start powerdns + service: + name: pdns + state: started + enabled: yes diff --git a/roles/ns/tasks/main.yml b/roles/ns/tasks/main.yml new file mode 100644 index 0000000..2652ab3 --- /dev/null +++ b/roles/ns/tasks/main.yml @@ -0,0 +1,16 @@ +- name: ns installation + include_tasks: install.yml + when: function == 'install' + + +- block: + - name: add records + include_tasks: add_records.yml + when: services.internal_ns is defined + + + - debug: + msg: internal nameserver is not defined + when: services.internal_ns is not defined + + when: function == 'add_records' \ No newline at end of file diff --git a/roles/ns/tasks/populate_db.yml b/roles/ns/tasks/populate_db.yml new file mode 100644 index 0000000..e3d78b4 --- /dev/null +++ b/roles/ns/tasks/populate_db.yml @@ -0,0 +1,24 @@ +- name: check if records table already exists + include_role: + name: postgres + vars: + function: run_query + database_query: "SELECT to_regclass('public.records');" + + +- block: + - name: slurp script from ns + slurp: + src: /usr/share/doc/pdns/schema.pgsql.sql + register: fetched + + + - name: run script + include_role: + name: postgres + vars: + function: run_script + database_script: "{{ fetched.content | b64decode }}" + refresh_privs: true + + when: (query_result is defined) and (query_result[0].to_regclass is none) diff --git a/roles/ns/templates/custom.j2 b/roles/ns/templates/custom.j2 new file mode 100644 index 0000000..56f7f99 --- /dev/null +++ b/roles/ns/templates/custom.j2 @@ -0,0 +1,60 @@ +allow-dnsupdate-from= +allow-notify-from= +allow-unsigned-notify=no +allow-unsigned-supermaster=no + +# keep entries in packet cache for 65s instead of default 20 +cache-ttl=65 + +chroot= +config-dir={{ pdns_dir }} +consistent-backends=yes + +daemon=yes +default-ksk-algorithm=ed25519 +default-soa-content=ns.{{ int_tld }} admin.{{ tld }} 0 3600 360 604800 120 +default-ttl=1200 +default-zsk-algorithm=ed25519 +disable-axfr=yes +distributor-threads=1 +dnsupdate=no # disable it for now + +guardian=yes + +local-port=53 +logging-facility=1 +loglevel=4 + +master=no + +max-cache-entries=50000 +max-ent-entries=10000 +max-packet-cache-entries=10000 +max-queue-length=2500 +max-tcp-connections=60 + +negquery-cache-ttl=60 +only-notify= + +query-cache-ttl=20 +queue-limit=1500 + +receiver-threads={{ container_cores }} +reuseport=yes +signing-threads={{ container_cores }} +security-poll-suffix= + +version-string=anonymous +webserver=no +write-pid=yes + + + +launch=gpgsql + +gpgsql-host={{ hostvars['postgres']['ansible_host'] | mandatory }} +gpgsql-port=5432 +gpgsql-dbname={{ database_name }} +gpgsql-user={{ database_user }} +gpgsql-password={{ database_password }} +gpgsql-dnssec=yes diff --git a/roles/ntp/defaults/main.yml b/roles/ntp/defaults/main.yml new file mode 100644 index 0000000..c3b5220 --- /dev/null +++ b/roles/ntp/defaults/main.yml @@ -0,0 +1,37 @@ +ntp_user: chrony +ntp_group: chrony + +ntp_data_dir: /opt/chrony +ntp_conf_dir: /etc/chrony +ntp_tls_dir: "{{ ntp_conf_dir }}/tls" + +ntp_pool: ru.pool.ntp.org + +ntp_default_config: + user: "{{ ntp_user }}" + + pool: "{{ ntp_pool }} iburst minpoll 6 maxpoll 8 polltarget 4 maxsources 7 burst" + minsources: 2 + combinelimit: 6 + authselectmode: ignore + + ntsservercert: + - "{{ ntp_tls_dir }}/ecc384.crt" + - "{{ ntp_tls_dir }}/rsa2048.crt" + ntsserverkey: + - "{{ ntp_tls_dir }}/ecc384.key" + - "{{ ntp_tls_dir }}/rsa2048.key" + maxntsconnections: 15 + + ratelimit: "interval 1 burst 8" + ntsratelimit: "interval 3 burst 6" + + allow: + - "{{ int_net }}" + - 127.0.0.0/8 + dscp: 46 + local: stratum 4 + + ntsdumpdir: "{{ ntp_data_dir }}" + dumpdir: "{{ ntp_data_dir }}" + driftfile: "{{ ntp_data_dir }}/drift" diff --git a/roles/ntp/handlers/main.yml b/roles/ntp/handlers/main.yml new file mode 100644 index 0000000..6fa0828 --- /dev/null +++ b/roles/ntp/handlers/main.yml @@ -0,0 +1,4 @@ +- name: restart chrony + service: + name: chronyd + state: restarted \ No newline at end of file diff --git a/roles/ntp/tasks/main.yml b/roles/ntp/tasks/main.yml new file mode 100644 index 0000000..a17b3b5 --- /dev/null +++ b/roles/ntp/tasks/main.yml @@ -0,0 +1,92 @@ +- name: set ntp_cfg + set_fact: + ntp_cfg: "{{ ntp_default_config | d({}) | combine(ntp_config | d({}), recursive=true) }}" + + +- name: install chrony + include_tasks: tasks/install_packages.yml + vars: + package: + - chrony + - alpine: chrony-openrc + notify: restart chrony + + +- name: create user and group + include_tasks: tasks/create_user.yml + vars: + user: + name: "{{ ntp_user }}" + group: "{{ ntp_group }}" + + +- name: disable system clock control in conf.d + lineinfile: + path: /etc/conf.d/chronyd + regexp: '^ARGS=' + line: 'ARGS="-x"' + notify: restart chrony + + +- name: create directories + file: + path: "{{ item }}" + state: directory + owner: "{{ ntp_user }}" + group: "{{ ntp_group }}" + loop: + - "{{ ntp_conf_dir }}" + - "{{ ntp_tls_dir }}" + - "{{ ntp_data_dir }}" + + +- name: template config file + template: + src: chrony.j2 + dest: "{{ ntp_conf_dir }}/chrony.conf" + force: yes + mode: 0444 + owner: "{{ ntp_user }}" + group: "{{ ntp_group }}" + lstrip_blocks: yes + notify: restart chrony + + +- name: deploy certs + include_role: + name: certs + vars: + common: + owner: "{{ ntp_user }}" + group: "{{ ntp_group }}" + post_hook: service chronyd restart + notify: restart chrony + certs: + - cert: "{{ ntp_tls_dir }}/ecc384.crt" + key: "{{ ntp_tls_dir }}/ecc384.key" + ecc: yes + - cert: "{{ ntp_tls_dir }}/rsa2048.crt" + key: "{{ ntp_tls_dir }}/rsa2048.key" + ecc: no + + +- name: flush handlers + meta: flush_handlers + + +- name: add directories to backup plan + include_role: + name: backup + vars: + function: add + backup_items: + - "{{ ntp_conf_dir }}" + - "{{ ntp_tls_dir }}" + - "{{ ntp_data_dir }}" + + +- name: enable and start chronyd + service: + name: chronyd + enabled: yes + state: started diff --git a/roles/ntp/templates/chrony.j2 b/roles/ntp/templates/chrony.j2 new file mode 100644 index 0000000..a2b2e15 --- /dev/null +++ b/roles/ntp/templates/chrony.j2 @@ -0,0 +1,9 @@ +{% for option in (ntp_cfg | d({}) | dict2items) -%} + {% if (option.value | type_debug == 'list') -%} + {% for suboption in option.value -%} + {{- option.key }} {{ suboption }} + {% endfor -%} + {% else -%} + {{- option.key }} {{ option.value }} + {% endif -%} +{% endfor -%} diff --git a/roles/officeonline/handlers/main.yml b/roles/officeonline/handlers/main.yml new file mode 100644 index 0000000..d27e3cd --- /dev/null +++ b/roles/officeonline/handlers/main.yml @@ -0,0 +1,4 @@ +- name: restart coolwsd + service: + name: coolwsd + state: restarted diff --git a/roles/officeonline/tasks/main.yml b/roles/officeonline/tasks/main.yml new file mode 100644 index 0000000..61cec73 --- /dev/null +++ b/roles/officeonline/tasks/main.yml @@ -0,0 +1,45 @@ +- name: download signing key + get_url: + dest: /usr/share/keyrings/collaboraonline-release-keyring.gpg + url: https://collaboraoffice.com/downloads/gpg/collaboraonline-release-keyring.gpg + force: yes + + +- name: add repo source + template: + src: collaboraonline_sources.j2 + dest: /etc/apt/sources.list.d/collaboraonline.sources + force: yes + + +- name: install collabora + apt: + name: + - coolwsd + - code-brand + - collaboraoffice*-ru + - collaboraoffice-dict-ru + update_cache: yes + + +- name: template collabora config + template: + src: coolwsd.j2 + dest: /etc/coolwsd/coolwsd.xml + force: yes + mode: 0640 + owner: cool + group: cool + notify: restart coolwsd + + +- name: enable and start service + service: + name: coolwsd + state: started + enabled: yes + + +- name: set officeonline presence fact + set_fact: + has_officeonline: yes diff --git a/roles/officeonline/templates/collaboraonline_sources.j2 b/roles/officeonline/templates/collaboraonline_sources.j2 new file mode 100644 index 0000000..c409d35 --- /dev/null +++ b/roles/officeonline/templates/collaboraonline_sources.j2 @@ -0,0 +1,4 @@ +Types: deb +URIs: https://www.collaboraoffice.com/repos/CollaboraOnline/CODE-debian11 +Suites: ./ +Signed-By: /usr/share/keyrings/collaboraonline-release-keyring.gpg diff --git a/roles/officeonline/templates/coolwsd.j2 b/roles/officeonline/templates/coolwsd.j2 new file mode 100644 index 0000000..9d66883 --- /dev/null +++ b/roles/officeonline/templates/coolwsd.j2 @@ -0,0 +1,89 @@ + + + en_US ru + + + false + true + + + + false + + warning + notice + fatal + + false + + + + + + default + true + + + + + + localhost + {{ host_fqdn }} + 10\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3} + 172\.1[6789]\.[0-9]{1,3}\.[0-9]{1,3} + 172\.2[0-9]\.[0-9]{1,3}\.[0-9]{1,3} + 172\.3[01]\.[0-9]{1,3}\.[0-9]{1,3} + 192\.168\.[0-9]{1,3}\.[0-9]{1,3} + 192\.168\.1\.1 + 0 + + 900 + + + + + + + + + + + true + + + + + + + + + true + + + true + false + + + + + + + + + + + + + + + + + + + diff --git a/roles/php/defaults/main.yml b/roles/php/defaults/main.yml new file mode 100644 index 0000000..87f04de --- /dev/null +++ b/roles/php/defaults/main.yml @@ -0,0 +1,30 @@ +php_install_fpm: yes +php_version: 8 + +php_default_config: + error_reporting: 'E_ALL & ~E_NOTICE' + memory_limit: "{{ (((hardware.memory | d(512) | int | abs) | d(512)) / 2) | int | abs }}M" + date.timezone: "{{ timezone }}" + +php_fpm_default_config: + listener: + user: "{{ php_fpm_user }}" + group: "{{ php_fpm_group }}" + listen: "{{ php_fpm_socket }}" + listen.owner: "{{ php_fpm_user }}" + listen.group: "{{ php_fpm_group }}" + listen.mode: "0660" + + pm: dynamic + pm.max_children: 8 + pm.start_servers: 2 + pm.min_spare_servers: 1 + pm.max_spare_servers: 3 + + catch_workers_output: yes + 'php_admin_flag[log_errors]': on + 'php_admin_value[error_log]': syslog + + global: + error_log: syslog + log_level: notice diff --git a/roles/php/handlers/main.yml b/roles/php/handlers/main.yml new file mode 100644 index 0000000..db4b3f1 --- /dev/null +++ b/roles/php/handlers/main.yml @@ -0,0 +1,4 @@ +- name: restart php fpm + service: + name: "php-fpm{{ php_version }}" + state: restarted diff --git a/roles/php/tasks/main.yml b/roles/php/tasks/main.yml new file mode 100644 index 0000000..a17c97a --- /dev/null +++ b/roles/php/tasks/main.yml @@ -0,0 +1,61 @@ +- name: set php configuration + set_fact: + php_cfg: "{{ php_default_config | d({}) | combine(php_config | d({}), recursive=true) }}" + php_fpm_cfg: "{{ php_fpm_default_config | d({}) | combine(php_fpm_config | d({}), recursive=true) }}" + + +- name: install php + include_tasks: tasks/install_packages.yml + vars: + package: "{{ 'php' ~ php_version }}" + + +- name: template php config + template: + src: php.j2 + dest: "/etc/php{{ php_version }}/conf.d/50_override.ini" + force: yes + mode: 0640 + lstrip_blocks: yes + + +- block: + - name: install php-fpm + include_tasks: tasks/install_packages.yml + vars: + package: "php{{ php_version }}-fpm" + notify: restart php fpm + + - name: template php-fpm config + template: + src: fpm.j2 + dest: "/etc/php{{ php_version }}/php-fpm.d/{{ php_fpm_listener }}.conf" + force: yes + mode: 0640 + lstrip_blocks: yes + notify: restart php fpm + + - name: delete www fpm config + file: + path: "/etc/php{{ php_version }}/php-fpm.d/www.conf" + state: absent + + - name: flush handlers + meta: flush_handlers + + - name: enable and start php-fpm + service: + name: "php-fpm{{ php_version }}" + state: started + enabled: yes + + when: php_install_fpm | d(false) == true + + +- name: add directories to backup plan + include_role: + name: backup + vars: + function: add + backup_items: + - "/etc/php{{ php_version }}" diff --git a/roles/php/templates/fpm.j2 b/roles/php/templates/fpm.j2 new file mode 100644 index 0000000..f63e6f5 --- /dev/null +++ b/roles/php/templates/fpm.j2 @@ -0,0 +1,18 @@ +{% macro fpm_option(option) -%} + {% if option.value is boolean -%} + {{ option.key }} = {{ 'yes' if option.value else 'no' -}} + {% else -%} + {{ option.key }} = {{ option.value -}} + {% endif -%} +{% endmacro -%} + + +[{{ php_fpm_listener }}] +{% for option in (php_fpm_cfg.listener | d({}) | dict2items) -%} + {{ fpm_option(option) }} +{% endfor %} + +[global] +{% for option in (php_fpm_cfg.global | d({}) | dict2items) -%} + {{ fpm_option(option) }} +{% endfor %} diff --git a/roles/php/templates/php.j2 b/roles/php/templates/php.j2 new file mode 100644 index 0000000..7c1fcae --- /dev/null +++ b/roles/php/templates/php.j2 @@ -0,0 +1,7 @@ +{% for option in (php_cfg | d({}) | dict2items) -%} + {% if option.value is boolean -%} + {{ option.key }} = {{ 'On' if option.value else 'Off' }} + {% else -%} + {{ option.key }} = {{ option.value }} + {% endif -%} +{% endfor %} diff --git a/roles/postfix/defaults/main.yml b/roles/postfix/defaults/main.yml new file mode 100644 index 0000000..abcd1cd --- /dev/null +++ b/roles/postfix/defaults/main.yml @@ -0,0 +1,477 @@ +postfix_user: postfix +postfix_group: postfix + +postfix_conf_dir: /etc/postfix +postfix_sql_dir: "{{ postfix_conf_dir }}/sql" +postfix_tls_dir: "{{ postfix_conf_dir }}/tls" + +postfix_tls_int_ecc384_key: "{{ postfix_tls_dir }}/int_ecc384.key" +postfix_tls_int_ecc384_cert: "{{ postfix_tls_dir }}/int_ecc384.crt" +postfix_tls_int_rsa2048_key: "{{ postfix_tls_dir }}/int_rsa2048.key" +postfix_tls_int_rsa2048_cert: "{{ postfix_tls_dir }}/int_rsa2048.crt" + +postfix_tls_ext_ecc384_key: "{{ postfix_tls_dir }}/ext_ecc384.key" +postfix_tls_ext_ecc384_cert: "{{ postfix_tls_dir }}/ext_ecc384.crt" +postfix_tls_ext_rsa2048_key: "{{ postfix_tls_dir }}/ext_rsa2048.key" +postfix_tls_ext_rsa2048_cert: "{{ postfix_tls_dir }}/ext_rsa2048.crt" + +postfix_tls_dh2048: "{{ postfix_tls_dir }}/dh2048.pem" + +postfix_use_mta_sts_resolver: yes + +postfix_default_config: + compatibility_level: 3.6 + + mydomain: "{{ mail_server.tld }}" + myhostname: "{{ (mail_server.mta_actual_hostname | d(host_name)) ~ '.' ~ mail_server.tld }}" + myorigin: "$mydomain" + masquerade_domains: "$mydomain" + mynetworks_style: host + mydestination: localhost + relay_domains: + inet_protocols: ipv4 + + virtual_transport: "lmtp:inet:{{ hostvars[mail_server.mua_hostname]['ansible_host'] ~ ((':' ~ mail_server.mua_lmtp_port) if mail_server.mua_lmtp_port is defined else '') }}" + virtual_alias_maps: "pgsql:{{ (postfix_sql_dir ~ '/aliases.cf') | quote }},pgsql:{{ (postfix_sql_dir ~ '/forwards.cf') | quote }}" + virtual_mailbox_domains: "pgsql:{{ (postfix_sql_dir ~ '/domains.cf') | quote }}" + virtual_mailbox_maps: "pgsql:{{ (postfix_sql_dir ~ '/users.cf') | quote }}" + + local_recipient_maps: "$virtual_mailbox_maps" + smtpd_sender_login_maps: "unionmap:{\ + pgsql:{{ (postfix_sql_dir ~ '/shared_users.cf') | quote }},\ + pgsql:{{ (postfix_sql_dir ~ '/self_users.cf') | quote }},\ + pgsql:{{ (postfix_sql_dir ~ '/aliases.cf') | quote }}\ + }" + + message_size_limit: "{{ mail_server.max_mail_size_bytes }}" + mailbox_size_limit: 0 + virtual_mailbox_limit: 0 + header_size_limit: 512000 + default_destination_recipient_limit: 25 + + queue_run_delay: 3m + minimal_backoff_time: 3m + maximal_backoff_time: 30m + maximal_queue_lifetime: 3d + bounce_queue_lifetime: 2d + + tls_append_default_CA: yes + tls_disable_workarounds: + tls_ssl_options: NO_COMPRESSION, NO_RENEGOTIATION, ENABLE_MIDDLEBOX_COMPAT, LEGACY_SERVER_CONNECT, PRIORITIZE_CHACHA + tls_preempt_cipherlist: yes + + smtp_dns_support_level: dnssec + smtp_tls_CApath: /etc/ssl/certs + smtp_tls_ciphers: medium + smtp_tls_exclude_ciphers: "aNULL, eNULL, EXP, LOW, MD5, DES, 3DES, RC4, CAMELLIA, kEDH+CAMELLIA, kRSA+CAMELLIA" + smtp_tls_protocols: ">=TLSv1.2" + smtp_tls_mandatory_ciphers: medium + smtp_tls_mandatory_protocols: ">=TLSv1.2" + smtp_tls_security_level: dane + smtp_tls_servername: hostname + smtp_starttls_timeout: 180s + smtp_tls_note_starttls_offer: yes + + smtp_tls_policy_maps: "{{ [ + 'pgsql:' ~ ((postfix_sql_dir ~ '/tls_policies.cf') | quote), + ('socketmap:inet:127.0.0.1:' ~ mail_server.mta_sts_port ~ ':postfix') if (postfix_use_mta_sts_resolver | d(false) == true) else '', + ] | select() | list | join(',') }}" + + smtpd_tls_cert_file: "{{ postfix_tls_ext_rsa2048_cert | quote }}" + smtpd_tls_key_file: "{{ postfix_tls_ext_rsa2048_key | quote }}" + smtpd_tls_eccert_file: "{{ postfix_tls_ext_ecc384_cert | quote }}" + smtpd_tls_eckey_file: "{{ postfix_tls_ext_ecc384_key | quote }}" + + smtpd_tls_security_level: may + smtpd_tls_ciphers: medium + smtpd_tls_mandatory_ciphers: medium + smtpd_tls_exclude_ciphers: "aNULL, eNULL, EXP, LOW, MD5, DES, 3DES, RC4, CAMELLIA, kEDH+CAMELLIA, kRSA+CAMELLIA" + smtpd_tls_protocols: ">=TLSv1.2" + smtpd_tls_mandatory_protocols: ">=TLSv1.2" + smtpd_tls_dh1024_param_file: "{{ postfix_tls_dh2048 | quote }}" + smtpd_tls_auth_only: yes + smtpd_tls_received_header: yes + + smtpd_sasl_type: dovecot + smtpd_sasl_path: "inet:{{ hostvars[mail_server.mua_hostname]['ansible_host'] ~ ((':' ~ mail_server.mua_auth_port) if mail_server.mua_auth_port is defined else '') }}" + smtpd_sasl_auth_enable: no + smtpd_sasl_local_domain: "$mydomain" + smtpd_sasl_exceptions_networks: "!{{ int_net }}" + smtpd_sasl_security_options: "noanonymous, noplaintext" + smtpd_sasl_tls_security_options: noanonymous + smtpd_sasl_authenticated_header: no + + + smtpd_client_restrictions: + - permit + mua_client_restrictions: + - permit_sasl_authenticated + - reject + + smtpd_helo_restrictions: + - reject_unauth_pipelining + - reject_invalid_helo_hostname + - permit_mynetworks + - reject_non_fqdn_helo_hostname + - "check_helo_access pcre:{{ (postfix_conf_dir ~ '/filter_smtpd_helo.pcre') | quote }}" + - "check_client_access texthash:{{ (postfix_conf_dir ~ '/smtpd_checks_relaxed.hash') | quote }}" + - reject_unknown_client_hostname + - reject_unknown_helo_hostname + - permit + mua_helo_restrictions: + - reject_unauth_pipelining + - reject_invalid_helo_hostname + - permit_sasl_authenticated + - reject + + smtpd_sender_restrictions: + - reject_unauth_pipelining + - reject_non_fqdn_sender + - permit_mynetworks + - "check_client_access texthash:{{ (postfix_conf_dir ~ '/smtpd_checks_relaxed.hash') | quote }}" + - reject_unknown_sender_domain + - permit + mua_sender_restrictions: + - reject_unauth_pipelining + - reject_non_fqdn_sender + - reject_authenticated_sender_login_mismatch + - permit_sasl_authenticated + - reject + + smtpd_relay_restrictions: + - reject_unauth_pipelining + - permit_mynetworks + - reject_unauth_destination + - permit + mua_relay_restrictions: + - reject_unauth_pipelining + - permit_sasl_authenticated + - reject + + smtpd_recipient_restrictions: + - reject_unauth_pipelining + - reject_non_fqdn_recipient + - "check_recipient_access pgsql:{{ (postfix_sql_dir ~ '/no_reply.cf') | quote }}" + #- "check_policy_service inet:{{ hostvars[mail_server.mua_hostname]['ansible_host'] ~ ((':' ~ mail_server.mua_quota_port) if mail_server.mua_quota_port is defined else '') }}" + - permit_mynetworks + - reject_unknown_recipient_domain + - reject_unlisted_recipient + - permit + mua_recipient_restrictions: + - reject_unauth_pipelining + - reject_non_fqdn_recipient + - "check_recipient_access pgsql:{{ (postfix_sql_dir ~ '/no_reply.cf') | quote }}" + #- "check_policy_service inet:{{ hostvars[mail_server.mua_hostname]['ansible_host'] ~ ((':' ~ mail_server.mua_quota_port) if mail_server.mua_quota_port is defined else '') }}" + - permit_sasl_authenticated + - reject + + smtpd_data_restrictions: + - reject_unauth_pipelining + - permit + smtpd_etrn_restrictions: + - reject + + + smtp_always_send_ehlo: yes + smtp_connect_timeout: 20s + smtp_helo_timeout: 120s + smtp_rcpt_timeout: 120s + smtp_mail_timeout: 180s + smtp_quit_timeout: 180s + smtp_xforward_timeout: 180s + smtp_pix_workarounds: delay_dotcrlf + smtp_use_tls: yes + smtp_transport_rate_delay: 1s + + smtpd_authorized_verp_clients: + smtpd_authorized_xclient_hosts: + smtpd_authorized_xforward_hosts: + smtpd_banner: "$myhostname ESMTP {{ org }} ($mail_name $mail_version) ready" + smtpd_client_connection_count_limit: 120 + smtpd_client_connection_rate_limit: 360 + smtpd_client_message_rate_limit: 120 + smtpd_client_recipient_rate_limit: 240 + smtpd_client_new_tls_session_rate_limit: 180 + smtpd_client_auth_rate_limit: 90 + smtpd_client_port_logging: yes + smtpd_delay_reject: yes + smtpd_error_sleep_time: 3s + smtpd_soft_error_limit: 3 + smtpd_hard_error_limit: 6 + smtpd_junk_command_limit: 15 + smtpd_helo_required: yes + smtpd_policy_service_default_action: DUNNO + smtpd_recipient_limit: 50 + smtpd_recipient_overshoot_limit: 50 + smtpd_timeout: 120s + smtpd_use_tls: yes + smtpd_discard_ehlo_keywords: silent-discard, etrn + + postscreen_access_list: "permit_mynetworks, cidr:{{ (postfix_conf_dir ~ '/filter_postscreen_connect.cidr') | quote }}" + postscreen_blacklist_action: drop + postscreen_dnsbl_action: enforce + postscreen_greet_action: enforce + postscreen_bare_newline_enable: no + postscreen_non_smtp_command_enable: no + postscreen_pipelining_enable: no + postscreen_dnsbl_max_ttl: 3h + postscreen_dnsbl_min_ttl: 10m + postscreen_dnsbl_threshold: 2 + postscreen_dnsbl_sites: + - "zen.spamhaus.org=127.0.0.[1..20]" + - "dnsbl.sorbs.net=127.0.0.[1..255]" + - "dnsbl.spfbl.net=127.0.0.[1..255]" + - "bl.nordspam.com=127.0.0.2" + postscreen_dnsbl_timeout: 2s + postscreen_greet_wait: 2s + postscreen_greet_banner: "$myhostname ESMTP {{ org }} ($mail_name $mail_version) loading..." + + smtpd_milters: "{{ [ + (('inet:' ~ hostvars[mail_server.rspamd_hostname]['ansible_host'] ~ ':' ~ mail_server.rspamd_port) if (mail_server.rspamd_hostname is defined and mail_server.rspamd_port is defined) else ''), + (('inet:' ~ hostvars[mail_server.clamav_hostname]['ansible_host'] ~ ':' ~ mail_server.clamav_port) if (mail_server.clamav_hostname is defined and mail_server.clamav_port is defined) else '') + ] | select() | list | join(',') }}" + milter_default_action: accept + milter_protocol: 6 + non_smtpd_milters: $smtpd_milters + + notify_classes: "bounce, data, delay, policy, resource, software" + swap_bangpath: no + show_user_unknown_table_name: no + remote_header_rewrite_domain: no.tld + local_header_rewrite_clients: + - permit_mynetworks + - permit_inet_interfaces + - permit_sasl_authenticated + enable_long_queue_ids: yes + disable_vrfy_command: yes + delay_warning_time: 20m + confirm_delay_cleared: yes + default_recipient_limit: 1000 + allow_min_user: yes + backwards_bounce_logfile_compatibility: no + biff: no + anvil_status_update_time: 1h + recipient_delimiter: "+" + append_dot_mydomain: yes + respectful_logging: no + + +postfix_services: + - service: 2525 + conf: {type: 'inet', priv: false, maxproc: 1} + command: postscreen + - service: smtpd + conf: {type: 'pass'} + command: smtpd + options: + syslog_name: postfix/smtp_ext + - service: dnsblog + conf: {maxproc: 0} + command: dnsblog + - service: tlsproxy + conf: {maxproc: 0} + command: tlsproxy + - service: smtp + conf: {type: 'inet', priv: false} + command: smtpd + options: + syslog_name: postfix/smtp_int + cleanup_service_name: cleanupsub + - service: submission + conf: {type: 'inet', priv: false} + command: smtpd + options: + syslog_name: postfix/submission + smtpd_tls_security_level: encrypt + smtpd_tls_cert_file: "{{ postfix_tls_int_rsa2048_cert | quote }}" + smtpd_tls_key_file: "{{ postfix_tls_int_rsa2048_key | quote }}" + smtpd_tls_eccert_file: "{{ postfix_tls_int_ecc384_cert | quote }}" + smtpd_tls_eckey_file: "{{ postfix_tls_int_ecc384_key | quote }}" + smtpd_sasl_auth_enable: yes + smtpd_client_restrictions: $mua_client_restrictions + smtpd_helo_restrictions: $mua_helo_restrictions + smtpd_sender_restrictions: $mua_sender_restrictions + smtpd_relay_restrictions: $mua_relay_restrictions + smtpd_recipient_restrictions: $mua_recipient_restrictions + milter_macro_daemon_name: ORIGINATING + cleanup_service_name: cleanupsub + smtpd_tls_protocols: ">=TLSv1" + smtpd_tls_mandatory_protocols: ">=TLSv1" + + - service: smtps + conf: {type: 'inet', priv: false} + command: smtpd + options: + syslog_name: postfix/smtps + smtpd_tls_wrappermode: yes + smtpd_tls_cert_file: "{{ postfix_tls_int_rsa2048_cert | quote }}" + smtpd_tls_key_file: "{{ postfix_tls_int_rsa2048_key | quote }}" + smtpd_tls_eccert_file: "{{ postfix_tls_int_ecc384_cert | quote }}" + smtpd_tls_eckey_file: "{{ postfix_tls_int_ecc384_key | quote }}" + smtpd_sasl_auth_enable: yes + smtpd_client_restrictions: $mua_client_restrictions + smtpd_helo_restrictions: $mua_helo_restrictions + smtpd_sender_restrictions: $mua_sender_restrictions + smtpd_relay_restrictions: $mua_relay_restrictions + smtpd_recipient_restrictions: $mua_recipient_restrictions + milter_macro_daemon_name: ORIGINATING + cleanup_service_name: cleanupsub + smtpd_tls_protocols: ">=TLSv1" + smtpd_tls_mandatory_protocols: ">=TLSv1" + + - service: pickup + conf: {priv: false, wakeup: 60, maxproc: 1} + command: pickup + - service: cleanup + conf: {priv: false, maxproc: 0} + command: cleanup + - service: cleanupsub + conf: {priv: false, maxproc: 0} + command: cleanup + options: + syslog_name: postfix/cleanupsub + header_checks: "pcre:{{ (postfix_conf_dir ~ '/filter_submission_header.pcre') | quote }}" + - service: qmgr + conf: {priv: false, wakeup: 300, maxproc: 1} + command: qmgr + - service: tlsmgr + conf: {wakeup: '1000?', maxproc: 1} + command: tlsmgr + - service: rewrite + command: trivial-rewrite + - service: bounce + conf: {maxproc: 0} + command: bounce + - service: defer + conf: {maxproc: 0} + command: bounce + - service: trace + conf: {maxproc: 0} + command: bounce + - service: verify + conf: {maxproc: 1} + command: verify + - service: flush + conf: {priv: false, wakeup: '1000?', maxproc: 0} + command: flush + - service: proxymap + command: proxymap + - service: proxywrite + conf: {maxproc: 1} + command: proxymap + - service: smtp + command: smtp + - service: relay + command: smtp + options: + syslog_name: postfix/$service_name + - service: showq + conf: {priv: false} + command: showq + - service: error + command: error + - service: retry + command: error + - service: discard + command: discard + - service: local + conf: {unpriv: false} + command: local + - service: virtual + conf: {unpriv: false} + command: virtual + - service: lmtp + command: lmtp + - service: anvil + conf: {maxproc: 1} + command: anvil + - service: scache + conf: {maxproc: 1} + command: scache + - service: postlog + conf: {type: 'unix-dgram', priv: false, maxproc: 1} + command: postlogd + + +postfix_sql_queries: + aliases: | + SELECT concat(email_username, '@', ( + SELECT domain FROM mail_domains WHERE id = email_domain_id) + ) AS email + FROM mail_aliases + WHERE + LOWER(alias_username) = LOWER('%u') AND + alias_domain_id = ( + SELECT id FROM mail_domains WHERE LOWER(domain) = LOWER('%d') + ) AND + enabled = true; + + domains: | + SELECT domain FROM mail_domains WHERE LOWER(domain) = LOWER('%s'); + + forwards: | + SELECT concat(source, ',', destination) + FROM mail_forwards + WHERE LOWER(source) = LOWER('%s') AND enabled = true; + + no_reply: | + SELECT + CASE + WHEN no_reply = true THEN 'REJECT' + ELSE 'DUNNO' + END + AS access + FROM mail_users + WHERE + LOWER(username) = LOWER('%u') AND + domain_id = ( + SELECT id FROM mail_domains WHERE LOWER(domain) = LOWER('%d') + ) AND + enabled = true; + + self_users: | + SELECT concat(username, '@', ( + SELECT domain FROM mail_domains WHERE id = domain_id + )) AS email + FROM mail_users + WHERE + LOWER(username) = LOWER('%u') AND + domain_id = ( + SELECT id FROM mail_domains WHERE LOWER(domain) = LOWER('%d') + ) AND + enabled = true; + + shared_users: | + SELECT to_user AS email + FROM mail_user_shares + WHERE LOWER(from_user) = LOWER('%s'); + + tls_policies: | + SELECT policy, params FROM mail_tls WHERE LOWER(foreign_domain) = LOWER('%s') AND enabled = true; + + users: | + SELECT 1 AS user + FROM mail_users + WHERE + LOWER(username) = LOWER('%u') AND + domain_id = ( + SELECT id FROM mail_domains WHERE LOWER(domain) = LOWER('%d') + ) AND + enabled = true; + + +postfix_relaxed_smtpd_domains: + - dellin.ru + - mx.smp.io + - smmplanner.com + - noty.smmplanner.com + - 5.135.32.65 + - avito.ru + - smtp-fallback.avito.ru + - platformalp.ru + - dba.platformalp.ru + - 85.119.149.136 + - 146.158.53 + - 146.158.48 + - 146.158.55 + - 178.44.116.85 diff --git a/roles/postfix/handlers/main.yml b/roles/postfix/handlers/main.yml new file mode 100644 index 0000000..09618fa --- /dev/null +++ b/roles/postfix/handlers/main.yml @@ -0,0 +1,4 @@ +- name: restart postfix + service: + name: postfix + state: restarted diff --git a/roles/postfix/tasks/main.yml b/roles/postfix/tasks/main.yml new file mode 100644 index 0000000..115d3ff --- /dev/null +++ b/roles/postfix/tasks/main.yml @@ -0,0 +1,204 @@ +- name: set postfix_cfg + set_fact: + postfix_cfg: "{{ postfix_default_config | d({}) | combine(postfix_config | d({}), recursive=true) }}" + + +- name: install postfix + include_tasks: tasks/install_packages.yml + vars: + package: + - postfix + - postfix-openrc + - postfix-pgsql + - postfix-pcre + + +- name: create user and group + include_tasks: tasks/create_user.yml + vars: + user: + name: "{{ postfix_user }}" + group: "{{ postfix_group }}" + + +- name: ensure postfix spool directory is owned by root + file: + path: /var/spool/postfix + state: directory + owner: root + group: root + + +- name: create postfix directory structure + file: + path: "{{ item }}" + state: directory + mode: 0700 + loop: + - "{{ postfix_conf_dir }}" + - "{{ postfix_sql_dir }}" + - "{{ postfix_tls_dir }}" + + +- name: generate dh params + include_role: + name: ca + vars: + function: dhparams + dh_params: + path: "{{ postfix_tls_dh2048 }}" + mode: '0400' + remote_gen: yes + notify: restart postfix + + +- name: remove unneeded postfix files + file: + path: "{{ postfix_conf_dir ~ '/' ~ item }}" + state: absent + loop: + - access + - aliases + - canonical + - generic + - header_checks + - main.cf.proto + - master.cf.proto + - relocated + - transport + - virtual + notify: restart postfix + + +- name: template postfix configuration + template: + src: "{{ item if item is string else item.src }}.j2" + dest: "{{ postfix_conf_dir ~ '/' ~ ((item ~ '.cf') if item is string else item.dest) }}" + force: yes + mode: 0400 + lstrip_blocks: yes + loop: + - { src: postscreen_connect, dest: filter_postscreen_connect.cidr } + - { src: smtpd_helo, dest: filter_smtpd_helo.pcre } + - { src: submission_header, dest: filter_submission_header.pcre } + - main + - master + - { src: smtpd_checks_relaxed, dest: smtpd_checks_relaxed.hash } + notify: restart postfix + + +- name: template postfix sql snippets + template: + src: sql.j2 + dest: "{{ postfix_sql_dir ~ '/' ~ item }}.cf" + force: yes + mode: 0400 + vars: + query: "{{ postfix_sql_queries[item] }}" + loop: + - aliases + - domains + - forwards + - no_reply + - self_users + - shared_users + - tls_policies + - users + notify: restart postfix + + +- name: install mta resolver + include_role: + name: mta-sts + vars: + mta_sts_log_verbosity: info + mta_sts_config: + port: "{{ mail_server.mta_sts_port }}" + + +- name: add extra cname record + include_role: + name: ns + vars: + function: add_records + ns_add_default_record: no + ns_records: + - name: "{{ mail_server.mta_actual_hostname }}" + type: CNAME + value: "{{ host_fqdn }}" + when: mail_server.mta_actual_hostname is defined + + +- name: add records to external ns + include_role: + name: external_ns + vars: + nse_items: + - {name: '{{ mail_server.mta_actual_hostname }}', type: 'CNAME', value: '@'} + - {name: '@', type: 'MX', value: '0 {{ mail_server.mta_actual_hostname ~ "." ~ mail_server.tld ~ "." }}'} + + - {name: '@', type: 'TXT', value: 'v=spf1 ip4:{{ mail_server.allowed_spf | join(" ip4:") }} ~all'} + - {name: '_adsp._domainkey', type: 'TXT', value: 'dkim=all'} + - {name: '_dmarc', type: 'TXT', value: 'v=DMARC1;p=reject;sp=reject;rua=mailto:dmarc-report@{{ mail_server.tld }}'} + - {name: '_report._domainkey', type: 'TXT', value: 'ra=dkim-report rr=o:s:u:v'} + - {name: '_smtp._tls', type: 'TXT', value: 'v=TLSRPTv1;rua=mailto:smtp-tls-report@{{ mail_server.tld }}'} + + - {name: '_mta-sts', type: 'TXT', value: 'v=STSv1; id={{ mail_server.mta_sts_id | d("sts2022") }}'} + + nse_function: add_records + nse_instant: yes + + +- name: deploy certs + include_role: + name: certs + vars: + common: + owner: root + group: root + post_hook: service postfix restart + notify: restart postfix + ecc: no + hostname: "{{ mail_server.mta_actual_hostname }}" + certs: + - id: postfix-ecc-ext + cert: "{{ postfix_tls_ext_ecc384_cert }}" + key: "{{ postfix_tls_ext_ecc384_key }}" + ecc: yes + tld: "{{ mail_server.tld }}" + + - id: postfix-ecc-int + cert: "{{ postfix_tls_int_ecc384_cert }}" + key: "{{ postfix_tls_int_ecc384_key }}" + ecc: yes + + - id: postfix-rsa-ext + cert: "{{ postfix_tls_ext_rsa2048_cert }}" + key: "{{ postfix_tls_ext_rsa2048_key }}" + tld: "{{ mail_server.tld }}" + + - id: postfix-rsa-int + cert: "{{ postfix_tls_int_rsa2048_cert }}" + key: "{{ postfix_tls_int_rsa2048_key }}" + + +- name: flush handlers + meta: flush_handlers + + +- name: add directories to backup plan + include_role: + name: backup + vars: + function: add + backup_items: + - "{{ postfix_conf_dir }}" + - "{{ postfix_sql_dir }}" + - "{{ postfix_tls_dir }}" + + +- name: enable and start postfix + service: + name: postfix + enabled: yes + state: started diff --git a/roles/postfix/templates/main.j2 b/roles/postfix/templates/main.j2 new file mode 100644 index 0000000..d22aa90 --- /dev/null +++ b/roles/postfix/templates/main.j2 @@ -0,0 +1,17 @@ +{% macro postfix_option(option) -%} + {% if option.value is boolean -%} + {{ option.key }} = {{ 'yes' if option.value else 'no' }} + {% elif option.value | type_debug == 'list' -%} + {{ option.key }} = + {% for suboption in option.value -%} + {{ "\t" ~ suboption }} + {% endfor -%} + {% else -%} + {{ option.key }} = {{ option.value if option.value != None else '' }} + {% endif -%} +{% endmacro -%} + + +{% for option in (postfix_cfg | d({}) | dict2items) -%} + {{- postfix_option(option) }} +{%- endfor %} diff --git a/roles/postfix/templates/master.j2 b/roles/postfix/templates/master.j2 new file mode 100644 index 0000000..9e2ffef --- /dev/null +++ b/roles/postfix/templates/master.j2 @@ -0,0 +1,28 @@ +{% macro postfix_service(service) -%} + {% set conf = service.conf if service.conf is mapping else {} -%} + {{ [ + service.service, + "\t", + ('unix' if (conf.type is not defined) else conf.type), + ('-' if (conf.priv is not defined) else ('y' if conf.priv else 'n')), + ('-' if (conf.unpriv is not defined) else ('y' if conf.unpriv else 'n')), + ('n' if (conf.chroot is not defined) else ('y' if conf.chroot else 'n')), + ('-' if (conf.wakeup is not defined) else (conf.wakeup | string)), + ('-' if (conf.maxproc is not defined) else (conf.maxproc | string)), + service.command + ] | select() | list | join("\t") }} + {% for option in (service.options | d({}) | dict2items) -%} + {% if option.value is boolean -%} + {{ "\t" ~ '-o ' ~ option.key ~ '=' ~ ('yes' if option.value else 'no') }} + {% elif option.value | type_debug == 'list' -%} + {{ "\t" ~ '-o ' ~ option.key ~ '=' ~ (option.value | map('quote') | join(',')) }} + {% else -%} + {{ "\t" ~ '-o ' ~ option.key ~ '=' ~ (option.value if option.value != None else '') }} + {% endif -%} + {% endfor -%} +{% endmacro -%} + + +{% for service in (postfix_services | d([])) -%} + {{- postfix_service(service) }} +{%- endfor %} diff --git a/roles/postfix/templates/postscreen_connect.j2 b/roles/postfix/templates/postscreen_connect.j2 new file mode 100644 index 0000000..0163321 --- /dev/null +++ b/roles/postfix/templates/postscreen_connect.j2 @@ -0,0 +1,7 @@ +10.0.0.0/8 REJECT +172.16.0.0/12 REJECT +192.168.0.0/16 REJECT + +{% for bogon in (bogons | d([])) -%} +{{ bogon }} REJECT +{% endfor -%} diff --git a/roles/postfix/templates/smtpd_checks_relaxed.j2 b/roles/postfix/templates/smtpd_checks_relaxed.j2 new file mode 100644 index 0000000..255deee --- /dev/null +++ b/roles/postfix/templates/smtpd_checks_relaxed.j2 @@ -0,0 +1,3 @@ +{% for domain in (postfix_relaxed_smtpd_domains | d([])) -%} +{{ domain }} OK +{% endfor -%} diff --git a/roles/postfix/templates/smtpd_helo.j2 b/roles/postfix/templates/smtpd_helo.j2 new file mode 100644 index 0000000..caf9713 --- /dev/null +++ b/roles/postfix/templates/smtpd_helo.j2 @@ -0,0 +1,14 @@ +/^\s*localhost\s*$/ REJECT Invalid HELO hostname specified +/^\s*localhost\.localdomain\s*$/ REJECT Invalid HELO hostname specified +/^\s*127\.([0-9]{1,3}\.){2}[0-9]{1,3}\s*$/ REJECT Invalid HELO hostname specified +/^\s*10\.([0-9]{1,3}\.){2}[0-9]{1,3}\s*$/ REJECT Invalid HELO hostname specified +/^\s*192\.168.[0-9]{1,3}\.[0-9]{1,3}\s*$/ REJECT Invalid HELO hostname specified +/^\s*172\.((1[6-9]\.)|(2[0-9]\.)|(3[0-1]\.))([0-9]{1,3}\.)[0-9]{1,3}\s*$/ REJECT Invalid HELO hostname specified + +{% for host in groups['nodes'] -%} +/^\s*{{ hostvars[host]['external_ipv4'] | regex_escape() }}\s*$/ REJECT Invalid HELO hostname specified +{% endfor -%} + +{% for domain in mail_server.domains -%} +/(^|\.){{ domain | regex_escape() }}\s*$/ REJECT Invalid HELO hostname specified +{% endfor -%} diff --git a/roles/postfix/templates/sql.j2 b/roles/postfix/templates/sql.j2 new file mode 100644 index 0000000..54b3224 --- /dev/null +++ b/roles/postfix/templates/sql.j2 @@ -0,0 +1,6 @@ +hosts = {{ hostvars[mail_server.db_server_hostname]['ansible_host'] }} +user = {{ mail_server.db_user }} +password = {{ mail_server.db_pass }} +dbname = {{ mail_server.db_name }} + +query = {{ query | replace('\n', ' ') }} diff --git a/roles/postfix/templates/submission_header.j2 b/roles/postfix/templates/submission_header.j2 new file mode 100644 index 0000000..04c3f60 --- /dev/null +++ b/roles/postfix/templates/submission_header.j2 @@ -0,0 +1,14 @@ +/^\s*Received:/ IGNORE +/^\s*X-Enigmail:/ IGNORE +/^\s*X-Mailer:/ IGNORE +/^\s*X-Originating-IP:/ IGNORE +/^\s*X-PHP-Originating-Script:/ IGNORE +/^\s*X-Forward:/ IGNORE +/^\s*User-Agent:/ IGNORE +/^\s*Organization:/ IGNORE + +/^\s*(Mime-Version:\s*[0-9\.]+)\s.+/ REPLACE $1 + +{% for domain in mail_server.domains -%} +/^\s*From:.*@{{ domain | regex_escape() }}/ PREPEND BIMI-Selector: v=BIMI1; s=default; +{% endfor -%} diff --git a/roles/postgres/defaults/main.yml b/roles/postgres/defaults/main.yml new file mode 100644 index 0000000..08f0150 --- /dev/null +++ b/roles/postgres/defaults/main.yml @@ -0,0 +1,67 @@ +postgresql_user: postgres +postgresql_group: postgres + +postgresql_data_dir: /db +postgresql_conf_dir: /etc/postgresql +postgresql_tls_dir: "{{ postgresql_conf_dir }}/tls" +postgresql_dhparam_file: dhparam.pem + +postgresql_db: [] + +postgresql_string_parameters: + - listen_addresses + - log_destination + - syslog_facility + - datestyle + - lc_monetary + - lc_messages + - lc_numeric + - lc_time + - unix_socket_directories + - timezone + - log_timezone + - ssl_ciphers + - ssl_ca_file + - ssl_cert_file + - ssl_key_file + - ssl_dh_params_file + + +postgresql_default_config: + listen_addresses: "{{ '*' if (host_primary_role == 'postgres') or (database_self_hosted == false) else '127.0.0.1' }}" + max_connections: 125 + bonjour: no + password_encryption: scram-sha-256 + + wal_level: replica + wal_init_zero: no + wal_recycle: no + + log_destination: syslog + syslog_facility: LOCAL0 + datestyle: "ISO, DMY" + + lc_messages: C + lc_monetary: C + lc_numeric: C + lc_time: C + unix_socket_directories: /var/run/postgresql + + log_timezone: "{{ timezone }}" + timezone: "{{ timezone }}" + + shared_buffers: "{{ ((hardware.memory | d(512) | int) * (1024/2)) | int }}kB" + work_mem: "{{ ((hardware.memory | d(512) | int) * (1024/35)) | round(1, 'ceil') | int }}kB" + max_wal_size: "{{ ((hardware.disk | d(2) | float) * (1024 / 2)) | int }}MB" + + +postgresql_tls_config: + ssl: yes + ssl_ciphers: "ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-CHACHA20-POLY1305:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384:DHE-RSA-AES128-GCM-SHA256" + ssl_prefer_server_ciphers: yes + ssl_min_protocol_version: TLSv1.2 + ssl_ecdh_curve: secp384r1 + ssl_ca_file: "{{ postgresql_tls_dir }}/root.crt" + ssl_cert_file: "{{ postgresql_tls_dir }}/ecc384.crt" + ssl_key_file: "{{ postgresql_tls_dir }}/ecc384.key" + ssl_dh_params_file: "{{ postgresql_tls_dir ~ '/' ~ postgresql_dhparam_file }}" diff --git a/roles/postgres/handlers/main.yml b/roles/postgres/handlers/main.yml new file mode 100644 index 0000000..6193b80 --- /dev/null +++ b/roles/postgres/handlers/main.yml @@ -0,0 +1,9 @@ +- name: restart postgresql + service: + name: postgresql + state: restarted + + +- name: reload postgres config + community.postgresql.postgresql_query: + query: SELECT pg_reload_conf(); diff --git a/roles/postgres/tasks/add_database.yml b/roles/postgres/tasks/add_database.yml new file mode 100644 index 0000000..bc46743 --- /dev/null +++ b/roles/postgres/tasks/add_database.yml @@ -0,0 +1,73 @@ +- name: check if database is an object + fail: + msg: database must be an object + when: database is not mapping + + +- name: check if database parameters are defined + fail: + msg: some database parameters are invalid or not defined + when: (database.name is not string) or (database.user is not string) or + (database.pass is not string) + + +- name: add db to postgres + community.postgresql.postgresql_db: + name: "{{ database.name }}" + trust_input: no + + +- name: add user to postgres + community.postgresql.postgresql_user: + comment: "{{ database.user_comment | d('managed by ansible') }}" + encrypted: yes + expires: infinity + name: "{{ database.user }}" + password: "{{ database.pass }}" + role_attr_flags: "{{ database.user_flags | d('NOSUPERUSER,NOCREATEROLE,NOCREATEDB') }}" + trust_input: no + + +- name: grant database privileges to user + community.postgresql.postgresql_privs: + database: "{{ database.name }}" + privs: CREATE,CONNECT,TEMPORARY + type: database + role: "{{ database.user }}" + + +- name: grant privileges to all tables + community.postgresql.postgresql_privs: + database: "{{ database.name }}" + privs: ALL + type: table + objs: ALL_IN_SCHEMA + role: "{{ database.user }}" + + +- name: grant privileges to all sequences + community.postgresql.postgresql_privs: + database: "{{ database.name }}" + privs: ALL + type: sequence + objs: ALL_IN_SCHEMA + role: "{{ database.user }}" + + +- name: add line to postgres hba + community.postgresql.postgresql_pg_hba: + dest: "{{ postgresql_conf_dir }}/pg_hba.conf" + contype: "{{ 'host' if (database.ssl | d(false) == false) else 'hostssl' }}" + databases: "{{ database.name }}" + users: "{{ database.user }}" + address: "{{ item }}/32" + method: "{{ database.auth_method | d('scram-sha-256') }}" + register: result + loop: "{{ [database.addresses] if database.addresses is string else + (database.addresses | d(['127.0.0.1' if (database.self_hosted | d(false) == true) else ansible_host])) }}" + + +- name: reload postgres config + community.postgresql.postgresql_query: + query: SELECT pg_reload_conf(); + when: result.changed diff --git a/roles/postgres/tasks/install.yml b/roles/postgres/tasks/install.yml new file mode 100644 index 0000000..a39ec65 --- /dev/null +++ b/roles/postgres/tasks/install.yml @@ -0,0 +1,138 @@ +- name: set pg_cfg + set_fact: + pg_cfg: "{{ postgresql_default_config | d({}) | combine(postgresql_config | d({}), recursive=true) }}" + + +- name: install dependencies + include_tasks: tasks/install_packages.yml + vars: + package: + - postgresql + - postgresql-contrib + - py3-psycopg2 + + +- name: create user and group + include_tasks: tasks/create_user.yml + vars: + user: + name: "{{ postgresql_user }}" + group: "{{ postgresql_group }}" + + +- name: create config directory + file: + path: "{{ postgresql_conf_dir }}" + state: directory + mode: 0770 + owner: "{{ postgresql_user }}" + group: "{{ postgresql_group }}" + + +- name: include custom config in default postgres config + lineinfile: + path: "{{ postgresql_conf_dir }}/postgresql.conf" + line: "include 'custom.conf'" + create: yes + mode: 0400 + owner: "{{ postgresql_user }}" + group: "{{ postgresql_group }}" + notify: restart postgresql + + +- name: template custom config + template: + src: postgresql.j2 + dest: "{{ postgresql_conf_dir }}/custom.conf" + force: yes + mode: 0400 + owner: "{{ postgresql_user }}" + group: "{{ postgresql_group }}" + lstrip_blocks: yes + notify: restart postgresql + vars: + config: "{{ pg_cfg }}" + + +- name: template pg_hba.conf if it does not exist + template: + src: pg_hba.j2 + dest: "{{ postgresql_conf_dir }}/pg_hba.conf" + force: no + mode: 0400 + owner: "{{ postgresql_user }}" + group: "{{ postgresql_group }}" + notify: restart postgresql + + +- name: ensure postgres hba allows local connections + community.postgresql.postgresql_pg_hba: + dest: "{{ postgresql_conf_dir }}/pg_hba.conf" + contype: local + databases: all + users: all + method: trust + notify: restart postgresql + + +- name: edit service config + lineinfile: + path: /etc/conf.d/postgresql + regexp: "^{{ item.name }}=" + line: '{{ item.name }}="{{ item.value }}"' + notify: restart postgresql + loop: + - name: conf_dir + value: "{{ postgresql_conf_dir }}" + - name: data_dir + value: "{{ postgresql_data_dir }}" + - name: pg_opts + value: -c log_destination='syslog' + - name: initdb_opts + value: --locale=en_US.UTF-8 + + +- name: disable logfile in service config + lineinfile: + path: /etc/conf.d/postgresql + regexp: '^logfile=(.*)$' + line: '#logfile=\1' + backrefs: yes + notify: restart postgresql + + +- name: collect .new files + find: + paths: "{{ postgresql_conf_dir }}" + patterns: "*.new" + register: new_files + + +- name: remove .new files + file: + path: "{{ item.path }}" + state: absent + loop: "{{ new_files.files | flatten(levels=1) }}" + + +- name: remove old postgres log file + file: + path: /var/log/postgresql/postmaster.log + state: absent + changed_when: no + + +- name: flush handlers + meta: flush_handlers + + +# TODO: backup configuration for postgres + + +- name: enable and start postgres + service: + name: postgresql + state: started + enabled: yes + environment: + LANG: 'en_US.UTF-8' diff --git a/roles/postgres/tasks/install_tls.yml b/roles/postgres/tasks/install_tls.yml new file mode 100644 index 0000000..7d461f0 --- /dev/null +++ b/roles/postgres/tasks/install_tls.yml @@ -0,0 +1,56 @@ +- name: include optional tls config in default postgres config + lineinfile: + path: "{{ postgresql_conf_dir }}/postgresql.conf" + line: "include_if_exists 'tls.conf'" + create: no + notify: restart postgresql + + +- name: create tls directory for holding certs + file: + path: "{{ postgresql_tls_dir }}" + state: directory + mode: 0700 + owner: "{{ postgresql_user }}" + group: "{{ postgresql_group }}" + + +- name: deploy ecc384 cert + include_role: + name: certs + vars: + certs: + cert: "{{ postgresql_tls_dir }}/ecc384.crt" + key: "{{ postgresql_tls_dir }}/ecc384.key" + chain: "{{ postgresql_tls_dir }}/root.crt" + ecc: yes + post_hook: service postgresql restart + owner: "{{ postgresql_user }}" + group: "{{ postgresql_group }}" + + +- name: generate dh params + include_role: + name: ca + vars: + function: dhparams + dh_params: + path: "{{ postgresql_tls_dir }}/{{ postgresql_dhparam_file }}" + mode: '0400' + owner: "{{ postgresql_user }}" + group: "{{ postgresql_group }}" + remote_gen: no + + +- name: template tls config + template: + src: postgresql.j2 + dest: "{{ postgresql_conf_dir }}/tls.conf" + force: yes + mode: 0400 + owner: "{{ postgresql_user }}" + group: "{{ postgresql_group }}" + lstrip_blocks: yes + notify: restart postgresql + vars: + config: "{{ postgresql_tls_config }}" diff --git a/roles/postgres/tasks/integrate.yml b/roles/postgres/tasks/integrate.yml new file mode 100644 index 0000000..56f6a12 --- /dev/null +++ b/roles/postgres/tasks/integrate.yml @@ -0,0 +1,19 @@ +- name: install postgres for self-hosted deployment + include_role: + name: postgres + vars: + function: install + when: database_self_hosted | d(false) == true + + +- name: add database + include_role: + name: postgres + apply: + delegate_to: "{{ inventory_hostname if (database_self_hosted | d(false) == true) else services.db.hostname }}" + vars: + function: add_database + database: + name: "{{ database_name }}" + user: "{{ database_user }}" + pass: "{{ database_pass }}" diff --git a/roles/postgres/tasks/main.yml b/roles/postgres/tasks/main.yml new file mode 100644 index 0000000..2693218 --- /dev/null +++ b/roles/postgres/tasks/main.yml @@ -0,0 +1,33 @@ +- name: install postgres + include_tasks: install.yml + when: function == 'install' + + +- name: install postgres tls enhancements + include_tasks: install_tls.yml + when: function == 'install_tls' + + +- name: postgres maintenance + include_tasks: maintenance.yml + when: function == 'maintenance' + + +- name: add custom database + include_tasks: add_database.yml + when: function == 'add_database' + + +- name: run postgres script + include_tasks: run_script.yml + when: function == 'run_script' + + +- name: run postgres query + include_tasks: run_query.yml + when: function == 'run_query' + + +- name: postgres integration + include_tasks: integrate.yml + when: function == 'integrate' diff --git a/roles/postgres/tasks/maintenance.yml b/roles/postgres/tasks/maintenance.yml new file mode 100644 index 0000000..b33da29 --- /dev/null +++ b/roles/postgres/tasks/maintenance.yml @@ -0,0 +1,22 @@ +- name: get info about all databases + community.postgresql.postgresql_info: + filter: + - databases + register: pg_databases + + +- block: + - name: vacuum and analyze database + community.postgresql.postgresql_query: + db: "{{ item.database_name }}" + query: VACUUM FULL ANALYZE; + changed_when: no + + + - name: reindex database + community.postgresql.postgresql_query: + db: "{{ item.database_name }}" + query: 'REINDEX DATABASE "{{ item.database_name }}";' + changed_when: no + + loop: "{{ pg_databases.databases }}" diff --git a/roles/postgres/tasks/run_query.yml b/roles/postgres/tasks/run_query.yml new file mode 100644 index 0000000..f468618 --- /dev/null +++ b/roles/postgres/tasks/run_query.yml @@ -0,0 +1,44 @@ +- name: fail if query is not an object + fail: + msg: query must be an object + when: query is not mapping + + +- name: fail if query parameters are incorrect + fail: + msg: some query parameters are incorrect + when: query.text is not string + + +- name: execute query + community.postgresql.postgresql_query: + db: "{{ query.database | d(omit) }}" + query: "{{ query.text }}" + positional_args: "{{ query.positional_args | d(omit) }}" + register: db_result + changed_when: false + + +- name: set query result + set_fact: + query_result: "{{ db_result.query_result | d({}) }}" + + +- block: + - name: grant privileges to all tables + community.postgresql.postgresql_privs: + database: "{{ query.database }}" + privs: ALL + type: table + objs: ALL_IN_SCHEMA + role: "{{ query.user }}" + + - name: grant privileges to all sequences + community.postgresql.postgresql_privs: + database: "{{ query.database }}" + privs: ALL + type: sequence + objs: ALL_IN_SCHEMA + role: "{{ query.user }}" + + when: query.refresh_privs | d(false) == true \ No newline at end of file diff --git a/roles/postgres/tasks/run_script.yml b/roles/postgres/tasks/run_script.yml new file mode 100644 index 0000000..7fd74b1 --- /dev/null +++ b/roles/postgres/tasks/run_script.yml @@ -0,0 +1,47 @@ +- name: fail if script is not an object + fail: + msg: script must be an object + when: script is not mapping + + +- name: create temporary file on postgres for holding the script + tempfile: + state: file + register: tf + +- name: upload script + copy: + content: "{{ script.text }}" + dest: "{{ tf.path }}" + force: yes + +- name: execute script + community.postgresql.postgresql_query: + db: "{{ script.database | mandatory }}" + path_to_script: "{{ tf.path }}" + as_single_query: "{{ script.as_single_query | d(false) }}" + +- name: remove temp script + file: + path: "{{ tf.path }}" + state: absent + + +- block: + - name: grant privileges to all tables + community.postgresql.postgresql_privs: + database: "{{ script.database }}" + privs: ALL + type: table + objs: ALL_IN_SCHEMA + role: "{{ script.user }}" + + - name: grant privileges to all sequences + community.postgresql.postgresql_privs: + database: "{{ script.database }}" + privs: ALL + type: sequence + objs: ALL_IN_SCHEMA + role: "{{ script.user }}" + + when: script.refresh_privs | d(false) == true \ No newline at end of file diff --git a/roles/postgres/templates/pg_hba.j2 b/roles/postgres/templates/pg_hba.j2 new file mode 100644 index 0000000..138f6a6 --- /dev/null +++ b/roles/postgres/templates/pg_hba.j2 @@ -0,0 +1,2 @@ +# TYPE DATABASE USER ADDRESS METHOD +local all all trust diff --git a/roles/postgres/templates/postgresql.j2 b/roles/postgres/templates/postgresql.j2 new file mode 100644 index 0000000..51fab6c --- /dev/null +++ b/roles/postgres/templates/postgresql.j2 @@ -0,0 +1,14 @@ +{% macro pg_option(option) -%} + {% if option.value is boolean -%} + {{- option.key | lower }} = {{ 'on' if option.value else 'off' }} + {% elif option.value is string and option.key in postgresql_string_parameters -%} + {{- option.key | lower }} = '{{ option.value }}' + {% else -%} + {{- option.key | lower }} = {{ option.value }} + {% endif -%} +{% endmacro -%} + + +{% for option in (config | d({}) | dict2items) -%} + {{- pg_option(option) -}} +{%- endfor -%} diff --git a/roles/prometheus/defaults/main.yml b/roles/prometheus/defaults/main.yml new file mode 100644 index 0000000..9ac8206 --- /dev/null +++ b/roles/prometheus/defaults/main.yml @@ -0,0 +1,16 @@ +prometheus_user: prometheus +prometheus_group: prometheus + +prometheus_conf_dir: /etc/prometheus +prometheus_conf_file: "{{ prometheus_conf_dir }}/prometheus.yml" +prometheus_snmp_file: "{{ prometheus_conf_dir }}/snmp.yml" +prometheus_data_dir: /opt/tsdb + +prometheus_port: 9090 +prometheus_snmp_port: 9116 + +prometheus_default_config: + scrape_interval: '90s' + scrape_timeout: '10s' + + diff --git a/roles/prometheus/handlers/main.yml b/roles/prometheus/handlers/main.yml new file mode 100644 index 0000000..0eb9d6d --- /dev/null +++ b/roles/prometheus/handlers/main.yml @@ -0,0 +1,10 @@ +- name: restart prometheus + service: + name: prometheus + state: restarted + + +- name: restart snmp-exporter + service: + name: snmp-exporter + state: restarted diff --git a/roles/prometheus/tasks/add_node.yml b/roles/prometheus/tasks/add_node.yml new file mode 100644 index 0000000..c747515 --- /dev/null +++ b/roles/prometheus/tasks/add_node.yml @@ -0,0 +1,7 @@ +- name: install node-exporter + include_tasks: tasks/install_packages.yml + vars: + package: + - prometheus-node-exporter + + diff --git a/roles/prometheus/tasks/add_target.yml b/roles/prometheus/tasks/add_target.yml new file mode 100644 index 0000000..30e6852 --- /dev/null +++ b/roles/prometheus/tasks/add_target.yml @@ -0,0 +1,78 @@ +- name: check if target is an object + fail: + msg: target must be an object + when: target is not mapping + + +- name: check if target parameters are defined + fail: + msg: some target parameters are invalid or not defined + when: target.name is not string + + +- name: fail if prometheus is not present + debug: + msg: prometheus host is missing, will not add target + when: services.prometheus is not defined or services.prometheus.hostname is not defined + + +- block: + - name: slurp prometheus config + slurp: + path: "{{ hostvars[services.prometheus.hostname]['prometheus_conf_file'] | d(prometheus_conf_file) }}" + register: slurped_config_file + delegate_to: "{{ services.prometheus.hostname }}" + + + - name: parse config file + set_fact: + pm_config_file: "{{ slurped_config_file.content | b64decode | from_yaml }}" + + + - name: construct scrape target config snippet + set_fact: + pm_target: + job_name: "{{ target.name if (target.exact_name | d(false) == true) else (target.name ~ '-' ~ inventory_hostname ~ '-' ~ ansible_host) }}" + scrape_interval: "{{ target.scrape_interval | d(None) }}" + scrape_timeout: "{{ target.scrape_timeout | d(None) }}" + metrics_path: "{{ target.url | d('/metrics') }}" + scheme: "{{ target.scheme | d('http') }}" + static_configs: + - targets: + - "{% if target.use_ip | d(false) == true -%}\ + {{ target.ip | d(ansible_host) }}\ + {% else -%}\ + {{ (target.hostname | d(host_name)) ~ '.' ~ (target.tld | d(host_tld)) }}\ + {% endif -%}{{ (':' ~ target.port) if target.port is defined else '' }}" + + + - name: finalize building scrape snippet + set_fact: + pm_target_final: "{{ pm_target | combine(target.extra_params | d({}), recursive=true) | dict2items | rejectattr('value', 'equalto', None) | list | items2dict }}" + + + - name: output scrape target config + debug: + msg: "{{ pm_target_final }}" + + + - name: update prometheus config + blockinfile: + path: "{{ hostvars[services.prometheus.hostname]['prometheus_conf_file'] | d(prometheus_conf_file) }}" + block: "{{ [pm_target_final] | to_nice_yaml(indent=2, width=512) | indent(width=2, first=true) }}" + insertafter: "--- scrape config begin ---" + marker: " # target: {mark}" + marker_begin: "{{ pm_target_final.job_name }} begin" + marker_end: "{{ pm_target_final.job_name }} end" + delegate_to: "{{ services.prometheus.hostname }}" + register: result + + + - name: restart prometheus if its config was changed + service: + name: prometheus + state: restarted + delegate_to: "{{ services.prometheus.hostname }}" + when: result.changed + + when: services.prometheus is defined and services.prometheus.hostname is defined diff --git a/roles/prometheus/tasks/install.yml b/roles/prometheus/tasks/install.yml new file mode 100644 index 0000000..09d4085 --- /dev/null +++ b/roles/prometheus/tasks/install.yml @@ -0,0 +1,151 @@ +- name: set prometheus_cfg + set_fact: + prometheus_cfg: "{{ prometheus_default_config | d({}) | combine(prometheus_config | d({}), recursive=true) }}" + + +- name: install dependencies + include_tasks: tasks/install_packages.yml + vars: + package: + - prometheus + - prometheus-snmp-exporter + + +- name: create user and group + include_tasks: tasks/create_user.yml + vars: + user: + name: "{{ prometheus_user }}" + group: "{{ prometheus_group }}" + + +- name: create directories + file: + path: "{{ item }}" + state: directory + mode: 0750 + owner: "{{ prometheus_user }}" + group: "{{ prometheus_group }}" + loop: + - "{{ prometheus_conf_dir }}" + - "{{ prometheus_data_dir }}" + + +- name: check if config file exists + stat: + path: "{{ prometheus_conf_file }}" + register: file_exists + + +- name: slurp existing config file + slurp: + src: "{{ prometheus_conf_file }}" + register: existing_config + when: file_exists.stat.exists + no_log: yes + + +- name: template prometheus config + template: + src: config.j2 + dest: "{{ prometheus_conf_file }}" + force: yes + mode: 0600 + owner: "{{ prometheus_user }}" + group: "{{ prometheus_group }}" + notify: restart prometheus + when: not file_exists.stat.exists or (existing_config is defined and not (existing_config.content | b64decode) is search('managed by ansible')) + + +- name: edit prometheus init config + lineinfile: + path: /etc/conf.d/prometheus + regexp: '^{{ item.name }}=' + line: '{{ item.name }}={{ item.value | quote }}' + notify: restart prometheus + loop: + - { name: "prometheus_config_file", value: "{{ prometheus_conf_file }}" } + - { name: "prometheus_storage_path", value: "{{ prometheus_data_dir }}" } + - { name: "prometheus_retention_time", value: "{{ prometheus_retention_time | d('15d') }}" } + + +- name: remove log entries from prometheus init config + lineinfile: + path: /etc/conf.d/prometheus + regexp: '^{{ item }}=' + state: absent + notify: restart prometheus + loop: + - output_log + - error_log + + +- name: template prometheus init script + template: + src: init.j2 + dest: /etc/init.d/prometheus + force: yes + mode: "+x" + notify: restart prometheus + + +- name: remove snmp-exporter init config + file: + path: /etc/conf.d/snmp-exporter + state: absent + notify: restart snmp-exporter + + +- name: template snmp-exporter init script + template: + src: snmp_init.j2 + dest: /etc/init.d/snmp-exporter + force: yes + mode: "+x" + notify: restart snmp-exporter + + +- name: add prometheus metric target + include_role: + name: prometheus + vars: + function: add_target + target: + name: self + url: /metrics + use_ip: yes + ip: 127.0.0.1 + port: "{{ prometheus_port }}" + + +- name: install and configure nginx + include_role: + name: nginx + vars: + nginx: + servers: + - conf: nginx_server + certs: "{{ host_tls }}" + + +- name: flush handlers + meta: flush_handlers + + +- name: add directories to backup plan + include_role: + name: backup + vars: + function: add + backup_items: + - "{{ prometheus_conf_dir }}" + + +- name: enable and start services + service: + name: "{{ item }}" + enabled: yes + state: started + loop: + - snmp-exporter + - prometheus diff --git a/roles/prometheus/tasks/main.yml b/roles/prometheus/tasks/main.yml new file mode 100644 index 0000000..24f1ff9 --- /dev/null +++ b/roles/prometheus/tasks/main.yml @@ -0,0 +1,8 @@ +- name: install prometheus + include_tasks: install.yml + when: function == 'install' + + +- name: add scrape target + include_tasks: add_target.yml + when: function == 'add_target' diff --git a/roles/prometheus/templates/config.j2 b/roles/prometheus/templates/config.j2 new file mode 100644 index 0000000..977bbf5 --- /dev/null +++ b/roles/prometheus/templates/config.j2 @@ -0,0 +1,8 @@ +# managed by ansible + +{{ {'global': prometheus_cfg} | to_nice_yaml(indent=2, width=512) }} + +scrape_configs: + # --- scrape config begin --- + + # --- scrape config end --- diff --git a/roles/prometheus/templates/init.j2 b/roles/prometheus/templates/init.j2 new file mode 100644 index 0000000..a0ac191 --- /dev/null +++ b/roles/prometheus/templates/init.j2 @@ -0,0 +1,32 @@ +#!/sbin/openrc-run + +name="$SVCNAME" +directory="{{ prometheus_conf_dir }}" +command=/usr/bin/prometheus +command_args="--config.file=$prometheus_config_file \ + --storage.tsdb.path=$prometheus_storage_path \ + --storage.tsdb.retention.time=$prometheus_retention_time \ + --web.listen-address={{ ('127.0.0.1:' ~ prometheus_port) | quote }} \ + --web.page-title={{ ('Prometheus | ' ~ org) | quote }} \ + --web.external-url={{ host_url | quote }}" +command_user="{{ prometheus_user }}:{{ prometheus_group }}" +extra_started_commands="reload" +pidfile="/var/run/$SVCNAME.pid" +command_background=true +start_stop_daemon_args="--stdout-logger logger --stderr-logger logger" + +rc_ulimit="${prometheus_ulimit:--n 65536}" + +depend() { + after net +} + +start_pre() { + checkpath -d "$prometheus_storage_path" -o {{ prometheus_user }}:{{ prometheus_group }} +} + +reload() { + ebegin "Reloading $RC_SVCNAME" + supervise-daemon $RC_SVCNAME --signal HUP + eend $? +} diff --git a/roles/prometheus/templates/nginx_server.j2 b/roles/prometheus/templates/nginx_server.j2 new file mode 100644 index 0000000..a02c21a --- /dev/null +++ b/roles/prometheus/templates/nginx_server.j2 @@ -0,0 +1,9 @@ +location / { + proxy_pass http://127.0.0.1:{{ prometheus_port }}; + proxy_http_version 1.1; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection "upgrade"; +} diff --git a/roles/prometheus/templates/snmp_init.j2 b/roles/prometheus/templates/snmp_init.j2 new file mode 100644 index 0000000..27e1f3f --- /dev/null +++ b/roles/prometheus/templates/snmp_init.j2 @@ -0,0 +1,14 @@ +#!/sbin/openrc-run + +name="$SVCNAME" +directory="{{ prometheus_conf_dir }}" +command=/usr/bin/snmp_exporter +command_args="--config.file={{ prometheus_snmp_file | quote }} --web.listen-address={{ ('127.0.0.1:' ~ prometheus_snmp_port) | quote }}" +command_user="{{ prometheus_user }}:{{ prometheus_group }}" +pidfile="/var/run/$SVCNAME.pid" +command_background=true +start_stop_daemon_args="--stdout-logger logger --stderr-logger logger" + +depend() { + after net +} diff --git a/roles/proxmox/defaults/main.yml b/roles/proxmox/defaults/main.yml new file mode 100644 index 0000000..3bd0ee9 --- /dev/null +++ b/roles/proxmox/defaults/main.yml @@ -0,0 +1 @@ +cpu_governor: conservative diff --git a/roles/proxmox/handlers/main.yml b/roles/proxmox/handlers/main.yml new file mode 100644 index 0000000..0024477 --- /dev/null +++ b/roles/proxmox/handlers/main.yml @@ -0,0 +1,16 @@ +- name: restart chrony + service: + name: chrony + state: restarted + + +- name: restart postfix + service: + name: postfix + state: restarted + + +- name: restart zed + service: + name: zed + state: restarted diff --git a/roles/proxmox/tasks/install.yml b/roles/proxmox/tasks/install.yml new file mode 100644 index 0000000..786898b --- /dev/null +++ b/roles/proxmox/tasks/install.yml @@ -0,0 +1,74 @@ +- name: set cpu scheduler in cron + cron: + name: set cpu scheduler + special_time: reboot + job: 'echo {{ cpu_governor | quote }} | tee /sys/devices/system/cpu/cpu*/cpufreq/scaling_governor > /dev/null' + user: root + + +- block: + - name: get current cpu scheduler types + shell: + cmd: cat /sys/devices/system/cpu/cpu*/cpufreq/scaling_governor + register: result + changed_when: false + + + - name: change cpu scheduler + shell: + cmd: 'echo {{ cpu_governor | quote }} | tee /sys/devices/system/cpu/cpu*/cpufreq/scaling_governor' + when: (result.stdout_lines | unique | length > 1) or ((result.stdout_lines | unique)[0] != cpu_governor) + + rescue: + - name: report that cpu scheduler cannot be changed + debug: + msg: failed to change cpu scheduler + + +- name: disable enterprise repo + apt_repository: + repo: deb https://enterprise.proxmox.com/debian/pve bullseye pve-enterprise + filename: pve-enterprise.list + state: absent + update_cache: no + + +- name: enable community repo + apt_repository: + repo: deb http://download.proxmox.com/debian/pve bullseye pve-no-subscription + filename: pve-community.list + state: present + update_cache: no + + +- name: set datacenter configuration + lineinfile: + path: /etc/pve/datacenter.cfg + regexp: "^{{ item.name }}: " + line: "{{ item.name }}: {{ item.value }}" + mode: 0640 + owner: root + group: www-data + create: yes + loop: + - name: mac_prefix + value: "{{ mac_prefix }}" + - name: email_from + value: "{{ host_name }}@{{ mail_server.tld | d(tld) }}" + + +- name: enable auto-reboot on kernel panic + copy: + dest: /etc/sysctl.d/90-auto-reboot.conf + content: "kernel.panic = 5\n" + mode: 0644 + + +- name: set max arc cache size for zfs + lineinfile: + path: /etc/modprobe.d/zfs.conf + regexp: "^options zfs zfs_arc_max=" + line: "options zfs zfs_arc_max={{ zfs_arc_max }}" + create: yes + mode: 0644 + when: zfs_arc_max is defined diff --git a/roles/proxmox/tasks/mail.yml b/roles/proxmox/tasks/mail.yml new file mode 100644 index 0000000..f14a200 --- /dev/null +++ b/roles/proxmox/tasks/mail.yml @@ -0,0 +1,72 @@ +- name: install libsasl2-modules + package: + name: libsasl2-modules + + +- name: edit postfix config + lineinfile: + path: /etc/postfix/main.cf + regexp: '^{{ item.name | regex_escape() }}([^\S\r\n]*)=([^\S\r\n]*)' + line: '{{ item.name }} = {{ item.value }}' + notify: restart postfix + loop: + - { name: myhostname, value: "{{ host_fqdn }}" } + - { name: relayhost, value: "{{ mail_server.mta_actual_hostname ~ '.' ~ int_tld }}:465" } + - { name: sender_canonical_classes, value: "envelope_sender, header_sender" } + - { name: sender_canonical_maps, value: "regexp:/etc/postfix/sender_canonical_maps" } + - { name: smtp_header_checks, value: "regexp:/etc/postfix/header_check" } + - { name: smtp_use_tls, value: "yes" } + - { name: smtp_sasl_auth_enable, value: "yes" } + - { name: smtp_sasl_security_options, value: "noanonymous" } + - { name: smtp_tls_wrappermode, value: "yes" } + - { name: smtp_tls_security_level, value: "encrypt" } + - { name: smtp_sasl_password_maps, value: "texthash:/etc/postfix/sasl_passwd" } + - { name: smtp_tls_CAfile, value: "/etc/ssl/certs/ca-certificates.crt" } + - { name: notify_classes, value: "" } + - { name: mydestination, value: "" } + + +- name: edit master.cf + lineinfile: + path: /etc/postfix/master.cf + regexp: '^bounce([^\S\r\n]+)unix' + line: 'bounce unix - - n - 0 discard' + notify: restart postfix + + +- name: create postfix files + copy: + dest: "/etc/postfix/{{ item.name }}" + content: "{{ item.content }}" + mode: "{{ item.mode | d(omit) }}" + notify: restart postfix + loop: + - name: sasl_passwd + content: "{{ mail_server.mta_actual_hostname ~ '.' ~ int_tld ~ ':465 ' ~ + mail_account.username ~ '@' ~ mail_server.tld ~ ':' ~ mail_account.password }}" + mode: '0600' + - name: sender_canonical_maps + content: '/.+/ {{ host_name }}@{{ mail_server.tld }}' + - name: header_check + content: '/From:.*/ REPLACE From: {{ host_name }} <{{ host_name }}@{{ mail_server.tld }}>' + + +- name: edit crontab mail config + lineinfile: + path: /etc/crontab + regexp: '^MAILTO=' + line: 'MAILTO=""' + insertafter: '^PATH=' + + +- name: edit zed config file + lineinfile: + path: /etc/zfs/zed.d/zed.rc + regexp: '^{{ item.name | upper | regex_escape() }}=' + line: '{{ item.name | upper }}="{{ item.value }}"' + notify: restart zed + loop: + - { name: zed_email_addr, value: "{{ maintainer_email }}" } + - { name: zed_email_prog, value: mail } + - { name: zed_email_opts, value: "-s '@SUBJECT@' @ADDRESS@ -r {{ mail_account.username ~ '@' ~ mail_server.tld }}" } + - { name: zed_notify_verbose, value: 1 } diff --git a/roles/proxmox/tasks/main.yml b/roles/proxmox/tasks/main.yml new file mode 100644 index 0000000..e1d9510 --- /dev/null +++ b/roles/proxmox/tasks/main.yml @@ -0,0 +1,13 @@ +- name: proxmox installation + include_tasks: install.yml + when: function == 'install' + + +- name: proxmox tls configuration + include_tasks: tls.yml + when: function == 'tls' + + +- name: proxmox mail configuration + include_tasks: mail.yml + when: (function == 'mail') and (mail_account is mapping) and (mail_server is mapping) diff --git a/roles/proxmox/tasks/tls.yml b/roles/proxmox/tasks/tls.yml new file mode 100644 index 0000000..2424df0 --- /dev/null +++ b/roles/proxmox/tasks/tls.yml @@ -0,0 +1,12 @@ +- name: install and configure nginx + include_role: + name: nginx + vars: + nginx: + servers: + - conf: nginx_server + certs: true + conf: + http: + ssl_conf_command: [] + diff --git a/roles/proxmox/templates/nginx_server.j2 b/roles/proxmox/templates/nginx_server.j2 new file mode 100644 index 0000000..b48118b --- /dev/null +++ b/roles/proxmox/templates/nginx_server.j2 @@ -0,0 +1,15 @@ +proxy_redirect off; + +location / { + proxy_pass https://127.0.0.1:8006; + proxy_http_version 1.1; + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection "upgrade"; + proxy_buffering off; + + client_max_body_size 0; + proxy_connect_timeout 300s; + proxy_read_timeout 300s; + proxy_send_timeout 300s; + send_timeout 300s; +} \ No newline at end of file diff --git a/roles/rclone/defaults/main.yml b/roles/rclone/defaults/main.yml new file mode 100644 index 0000000..13ec422 --- /dev/null +++ b/roles/rclone/defaults/main.yml @@ -0,0 +1,19 @@ +rclone_user: rclone +rclone_group: rclone +rclone_conf_dir: /etc/rclone +rclone_mount_dir: /opt/rclone + +rclone_single_args: + - syslog + - allow-other + +rclone_multi_args: + buffer-size: 3M + checkers: 3 + contimeout: 4m0s + max-backlog: 1000 + retries: 10 + transfers: 3 + attr-timeout: 2s + +rclone_remotes: [] diff --git a/roles/rclone/handlers/main.yml b/roles/rclone/handlers/main.yml new file mode 100644 index 0000000..906d52f --- /dev/null +++ b/roles/rclone/handlers/main.yml @@ -0,0 +1,5 @@ +- name: restart all rclone daemons + service: + name: "rclone-{{ item }}" + state: restarted + loop: "{{ rclone_remotes | map(attribute='name') | list }}" \ No newline at end of file diff --git a/roles/rclone/tasks/main.yml b/roles/rclone/tasks/main.yml new file mode 100644 index 0000000..1640a76 --- /dev/null +++ b/roles/rclone/tasks/main.yml @@ -0,0 +1,120 @@ +- name: install rclone and dependencies + include_tasks: tasks/install_packages.yml + vars: + package: + - fuse + - rclone + + +- name: create user and group + include_tasks: tasks/create_user.yml + vars: + user: + name: "{{ rclone_user }}" + group: "{{ rclone_group }}" + + +- name: create config directory + file: + path: "{{ rclone_conf_dir }}" + state: directory + owner: "{{ rclone_user }}" + group: "{{ rclone_group }}" + + +- name: create root mount directory + file: + path: "{{ rclone_mount_dir }}" + state: directory + mode: 0755 + owner: "{{ rclone_user }}" + group: "{{ rclone_group }}" + + +- name: create other mount directories + file: + path: "{{ rclone_mount_dir ~ '/' ~ item }}" + state: directory + mode: 0755 + owner: "{{ rclone_user }}" + group: "{{ rclone_group }}" + loop: "{{ rclone_remotes | map(attribute='name') | list }}" + + +- name: template config file for each remote + template: + src: config.j2 + dest: "{{ rclone_conf_dir }}/{{ remote.name }}.conf" + force: no + mode: 0600 + owner: "{{ rclone_user }}" + group: "{{ rclone_group }}" + lstrip_blocks: yes + notify: restart all rclone daemons + loop: "{{ rclone_remotes }}" + loop_control: + loop_var: remote + + +- name: template init script for each remote + template: + src: init.j2 + dest: "/etc/init.d/rclone-{{ remote.name }}" + force: yes + mode: "+x" + notify: restart all rclone daemons + loop: "{{ rclone_remotes }}" + loop_control: + loop_var: remote + + +- name: edit fuse config + lineinfile: + path: /etc/fuse.conf + line: user_allow_other + notify: restart all rclone daemons + + +- name: flush handlers + meta: flush_handlers + + +- name: add directories to backup plan + include_role: + name: backup + vars: + function: add + backup_items: + - "{{ rclone_conf_dir }}" + + +- name: enable and start rclone daemons + service: + name: "rclone-{{ item }}" + enabled: yes + state: started + loop: "{{ rclone_remotes | map(attribute='name') | list }}" + + +- set_fact: + rclone_mount_dir_: "{{ rclone_mount_dir }}" + + +- name: setup backups + include_role: + name: backup + vars: + function: setup + backup_cfg: + repo: "{{ remote.name }}" + password: "{{ lookup('vars', 'backup_password') }}" + repo_password: "{{ remote.backup_password }}" + tags: automated + max_size: "1G" + dirs: + - "{{ rclone_mount_dir_ ~ '/' ~ remote.name }}" + fs_snapshot: no + when: (backup_password is defined) and (remote.backup_password is defined) + loop: "{{ rclone_remotes }}" + loop_control: + loop_var: remote diff --git a/roles/rclone/templates/config.j2 b/roles/rclone/templates/config.j2 new file mode 100644 index 0000000..243d051 --- /dev/null +++ b/roles/rclone/templates/config.j2 @@ -0,0 +1,9 @@ +[{{ remote.name }}] + +{% for option in remote.options | d({}) | dict2items -%} + {% if option.value is mapping -%} + {{ option.key }} = {{ option.value | to_json }} + {% else -%} + {{ option.key }} = {{ option.value }} + {% endif -%} +{% endfor %} diff --git a/roles/rclone/templates/init.j2 b/roles/rclone/templates/init.j2 new file mode 100644 index 0000000..ce0e50a --- /dev/null +++ b/roles/rclone/templates/init.j2 @@ -0,0 +1,37 @@ +#!/sbin/openrc-run + +{% set ns = namespace(single_args=[], multi_args=[], remote_args=[]) -%} + +{% for arg in (rclone_single_args | d([])) -%} + {% set ns.single_args = ns.single_args + ['--' ~ arg] -%} +{% endfor -%} + +{% for arg in (rclone_multi_args | d({}) | dict2items) -%} + {% set ns.multi_args = ns.multi_args + ['--' ~ arg.key ~ ' ' ~ (arg.value | quote)] -%} +{% endfor -%} + +{% for arg in (remote.args | d([])) -%} + {% set ns.remote_args = ns.remote_args + ['--' ~ arg] -%} +{% endfor -%} + + +name="$SVCNAME" +directory="{{ rclone_mount_dir }}" +command="/usr/bin/rclone" +command_user="{{ rclone_user }}:{{ rclone_group }}" +command_args="mount {{ remote.name }}: {{ (rclone_mount_dir ~ '/' ~ remote.name) | quote }} --config {{ (rclone_conf_dir ~ '/' ~ remote.name ~ '.conf') | quote }} {{ ns.single_args | join(' ') }} {{ ns.multi_args | join(' ') }} {{ ns.remote_args | join(' ') }}" +pidfile="/var/run/$SVCNAME.pid" + + +command_background=true +start_stop_daemon_args="--stdout-logger logger --stderr-logger logger" + + +depend() { + need net + use dns +} + +start_pre() { + fusermount -u {{ (rclone_mount_dir ~ '/' ~ remote.name) | quote }} > /dev/null 2>&1 || true +} \ No newline at end of file diff --git a/roles/redis/defaults/main.yml b/roles/redis/defaults/main.yml new file mode 100644 index 0000000..8141501 --- /dev/null +++ b/roles/redis/defaults/main.yml @@ -0,0 +1,18 @@ +redis_user: redis +redis_group: redis +redis_conf_dir: /etc/redis +redis_conf_file: "{{ redis_conf_dir }}/redis.conf" + +redis_default_config: + bind: + - "127.0.0.1" + - "-::1" + protected-mode: yes + port: "0" + unixsocket: "{{ redis_unix_socket | d('/run/redis/redis.sock') }}" + unixsocketperm: 777 + dir: /var/lib/redis + syslog-enabled: yes + crash-log-enabled: no + maxmemory: 96M + maxmemory-policy: volatile-ttl diff --git a/roles/redis/handlers/main.yml b/roles/redis/handlers/main.yml new file mode 100644 index 0000000..83b67cb --- /dev/null +++ b/roles/redis/handlers/main.yml @@ -0,0 +1,4 @@ +- name: restart redis + service: + name: redis + state: restarted diff --git a/roles/redis/tasks/main.yml b/roles/redis/tasks/main.yml new file mode 100644 index 0000000..5620900 --- /dev/null +++ b/roles/redis/tasks/main.yml @@ -0,0 +1,84 @@ +- name: set redis_cfg + set_fact: + redis_cfg: "{{ redis_default_config | d({}) | combine(redis_config | d({}), recursive=true) }}" + + +- name: install redis + include_tasks: tasks/install_packages.yml + vars: + package: + - redis + - alpine: redis-openrc + + +- name: create user and group + include_tasks: tasks/create_user.yml + vars: + user: + name: "{{ redis_user }}" + group: "{{ redis_group }}" + + +- name: remove old redis configs + file: + path: "{{ item }}" + state: absent + loop: + - /etc/redis.conf + - /etc/sentinel.conf + notify: restart redis + + +- name: create redis config dir + file: + path: "{{ redis_conf_dir }}" + state: directory + mode: 0700 + owner: "{{ redis_user }}" + group: "{{ redis_group }}" + notify: restart redis + + +- name: template redis config file + template: + src: redis.j2 + dest: "{{ redis_conf_file }}" + force: yes + mode: 0400 + lstrip_blocks: yes + owner: "{{ redis_user }}" + group: "{{ redis_group }}" + notify: restart redis + + +- name: edit service config + lineinfile: + path: /etc/conf.d/redis + regexp: "^{{ item.name }}=" + line: "{{ item.name }}=\"{{ item.value }}\"" + notify: restart redis + loop: + - name: command_user + value: "{{ redis_user }}:{{ redis_group }}" + - name: cfgfile + value: "{{ redis_conf_file }}" + + +- name: flush handlers + meta: flush_handlers + + +- name: add directories to backup plan + include_role: + name: backup + vars: + function: add + backup_items: + - "{{ redis_conf_dir }}" + + +- name: enable and start redis + service: + name: redis + enabled: yes + state: started diff --git a/roles/redis/templates/redis.j2 b/roles/redis/templates/redis.j2 new file mode 100644 index 0000000..ec7de0f --- /dev/null +++ b/roles/redis/templates/redis.j2 @@ -0,0 +1,9 @@ +{% for option in (redis_cfg | d({}) | dict2items) -%} + {% if option.value is boolean -%} + {{ option.key }} {{ 'yes' if option.value else 'no' }} + {% elif option.value | type_debug == 'list' -%} + {{ option.key }} {{ option.value | join(' ') }} + {% else -%} + {{ option.key }} {{ option.value }} + {% endif -%} +{%- endfor %} diff --git a/roles/rest-server/defaults/main.yml b/roles/rest-server/defaults/main.yml new file mode 100644 index 0000000..991e57b --- /dev/null +++ b/roles/rest-server/defaults/main.yml @@ -0,0 +1,22 @@ +rest_server_user: restic +rest_server_group: restic + +rest_server_conf_dir: /etc/rest-server +rest_server_data_dir: /opt/storage +rest_server_passwd_dir: /etc/rest-server/passwd + +rest_server_port: "{{ 443 if host_tls else 80 }}" + +rest_server_append_mode: yes +rest_server_max_repo_size: "{{ 200 * 1024 * 1024 * 1024 }}" +rest_server_private_repos: yes + +rest_server_tls_key_file: "{{ rest_server_conf_dir }}/tls.key" +rest_server_tls_cert_file: "{{ rest_server_conf_dir }}/tls.pem" + +rest_server_maintenance_mark: "----- end of list -----" + +rest_server_maintenance_schedule: + minute: 0 + hour: 5 + weekday: 6 diff --git a/roles/rest-server/handlers/main.yml b/roles/rest-server/handlers/main.yml new file mode 100644 index 0000000..bcc3bbc --- /dev/null +++ b/roles/rest-server/handlers/main.yml @@ -0,0 +1,4 @@ +- name: restart rest-server + service: + name: rest-server + state: restarted diff --git a/roles/rest-server/tasks/add_repo.yml b/roles/rest-server/tasks/add_repo.yml new file mode 100644 index 0000000..0dca72d --- /dev/null +++ b/roles/rest-server/tasks/add_repo.yml @@ -0,0 +1,120 @@ +- name: fail if repo parameters are missing + fail: + msg: repo parameters are missing or incorrect + when: (repo is not mapping) or (repo.user is not defined) or + (repo.password is not defined) + + +- name: protect against malicious repo names + fail: + msg: this backup repo name is reserved + when: (repo.name is string) and (((repo.name | lower) == 'config') or + ((repo.name | lower) == 'data') or ((repo.name | lower) == 'index') or + ((repo.name | lower) == 'keys') or ((repo.name | lower) == 'locks') or + ((repo.name | lower) == 'snapshots')) + + +- name: get rest server hostname + set_fact: + rest_server_hostname: "{{ repo.server if (repo.server is string and repo.server | length > 0) else services.rest_server.hostname }}" + + +- name: get rest server parms + set_fact: + rest_server_parms: + conf_dir: "{{ hostvars[rest_server_hostname]['rest_server_conf_dir'] | d(rest_server_conf_dir) }}" + data_dir: "{{ hostvars[rest_server_hostname]['rest_server_data_dir'] | d(rest_server_data_dir) }}" + passwd_dir: "{{ hostvars[rest_server_hostname]['rest_server_passwd_dir'] | d(rest_server_passwd_dir) }}" + user: "{{ hostvars[rest_server_hostname]['rest_server_user'] | d(rest_server_user) }}" + group: "{{ hostvars[rest_server_hostname]['rest_server_group'] | d(rest_server_group) }}" + maintenance_mark: "{{ hostvars[rest_server_hostname]['rest_server_maintenance_mark'] | d(rest_server_maintenance_mark) }}" + + +- block: + - name: ensure user exists in htpasswd file + htpasswd: + path: "{{ rest_server_parms.data_dir }}/.htpasswd" + create: yes + crypt_scheme: bcrypt + mode: 0400 + owner: "{{ rest_server_parms.user }}" + group: "{{ rest_server_parms.group }}" + name: "{{ repo.user }}" + password: "{{ repo.password }}" + + + - name: set restic repo and password facts + set_fact: + restic_repo: "{{ rest_server_parms.data_dir ~ '/' ~ repo.user ~ (('/' ~ repo.name) if (repo.name is string and repo.name | length > 0) else '') }}" + restic_password: "{{ repo.repo_password if (repo.repo_password is string and repo.repo_password | length > 0) else repo.password }}" + restic_passwd_file: "{{ rest_server_parms.passwd_dir ~ '/' ~ repo.user ~ (('.' ~ repo.name) if (repo.name is string and repo.name | length > 0) else '') }}" + no_log: yes + + + - name: set restic retention facts + set_fact: + restic_retention: "{{ [ + (('--keep-last ' ~ (repo.retention.last | quote)) if (repo.retention.last is defined) else ''), + (('--keep-hourly ' ~ (repo.retention.hourly | quote)) if (repo.retention.hourly is defined) else ''), + (('--keep-daily ' ~ (repo.retention.daily | quote)) if (repo.retention.daily is defined) else ''), + (('--keep-weekly ' ~ (repo.retention.weekly | quote)) if (repo.retention.weekly is defined) else ''), + (('--keep-monthly ' ~ (repo.retention.monthly | quote)) if (repo.retention.monthly is defined) else ''), + (('--keep-yearly ' ~ (repo.retention.yearly | quote)) if (repo.retention.yearly is defined) else ''), + + (('--keep-within ' ~ (repo.retention.within | quote)) if (repo.retention.within is defined) else ''), + (('--keep-within-hourly ' ~ (repo.retention.within_hourly | quote)) if (repo.retention.within_hourly is defined) else ''), + (('--keep-within-daily ' ~ (repo.retention.within_daily | quote)) if (repo.retention.within_daily is defined) else ''), + (('--keep-within-weekly ' ~ (repo.retention.within_weekly | quote)) if (repo.retention.within_weekly is defined) else ''), + (('--keep-within-monthly ' ~ (repo.retention.within_monthly | quote)) if (repo.retention.within_monthly is defined) else ''), + (('--keep-within-yearly ' ~ (repo.retention.within_yearly | quote)) if (repo.retention.within_yearly is defined) else '') + ] | select() | list | join(' ') }}" + when: repo.retention is defined and repo.retention is mapping + + + - name: create passwd file for repo + copy: + dest: "{{ restic_passwd_file }}" + content: "{{ restic_password }}" + mode: 0400 + owner: "{{ rest_server_parms.user }}" + group: "{{ rest_server_parms.group }}" + + + - name: create repository + shell: + cmd: /usr/bin/restic init + environment: + RESTIC_REPOSITORY: "{{ restic_repo }}" + RESTIC_PASSWORD: "{{ restic_password }}" + register: result + changed_when: "'created restic repository' in result.stdout" + failed_when: "(result.rc != 0) and not ('config file already exists' in result.stderr)" + become: yes + become_user: "{{ rest_server_parms.user }}" + become_method: su + become_flags: "-s /bin/ash" + + + - name: edit maintenance script + lineinfile: + path: "{{ rest_server_parms.conf_dir }}/maintenance.sh" + regexp: '^/usr/bin/restic(\s+)--repo(\s+){{ restic_repo | quote | regex_escape }}(\s+)(.*)$' + line: "/usr/bin/restic --repo {{ restic_repo | quote }} --password-file {{ restic_passwd_file | quote }} forget --prune {{ restic_retention | d('--keep-last 14') }}" + insertbefore: "^# {{ rest_server_parms.maintenance_mark }}" + firstmatch: yes + + + - name: unset restic repo and password facts + set_fact: + restic_repo: "{{ None }}" + restic_password: "{{ None }}" + restic_passwd_file: "{{ None }}" + no_log: yes + + delegate_to: "{{ rest_server_hostname }}" + + +- name: unset facts + set_fact: + rest_server_parms: "{{ None }}" + rest_server_hostname: "{{ None }}" diff --git a/roles/rest-server/tasks/install.yml b/roles/rest-server/tasks/install.yml new file mode 100644 index 0000000..6cc7385 --- /dev/null +++ b/roles/rest-server/tasks/install.yml @@ -0,0 +1,162 @@ +- name: install dependencies + include_tasks: tasks/install_packages.yml + vars: + package: + - apache2-utils + - py3-passlib + - fuse + - restic + - libcap + + +- name: install rest-server + include_tasks: tasks/install_packages.yml + vars: + package: + - rest-server + - rest-server-openrc + + +- name: create user and group + include_tasks: tasks/create_user.yml + vars: + user: + name: "{{ rest_server_user }}" + group: "{{ rest_server_group }}" + dir: "{{ rest_server_data_dir }}" + notify: restart rest-server + + +- name: create directories + file: + path: "{{ item }}" + state: directory + owner: "{{ rest_server_user }}" + group: "{{ rest_server_group }}" + loop: + - "{{ rest_server_conf_dir }}" + - "{{ rest_server_data_dir }}" + + +- name: create password directory + file: + path: "{{ rest_server_passwd_dir }}" + state: directory + mode: 0700 + owner: "{{ rest_server_user }}" + group: "{{ rest_server_group }}" + + +- name: edit service config + lineinfile: + path: /etc/conf.d/rest-server + regexp: "^{{ item.name | upper }}=" + line: "{{ item.name | upper }}=\"{{ item.value }}\"" + notify: restart rest-server + loop: + - name: rest_user + value: "{{ rest_server_user }}" + - name: rest_group + value: "{{ rest_server_group }}" + - name: rest_server_path + value: "{{ rest_server_data_dir }}" + - name: rest_server_opts + value: "{{ [('--append-only' if (rest_server_append_mode | d(false) == true) else ''), + '--listen :' ~ rest_server_port, + (('--max-size ' ~ (rest_server_max_size | quote)) if (rest_server_max_size is defined) else ''), + ('--private-repos' if (rest_server_private_repos | d(false) == true) else ''), + ('--tls' if host_tls else ''), + (('--tls-key ' ~ (rest_server_tls_key_file | quote)) if host_tls else ''), + (('--tls-cert ' ~ (rest_server_tls_cert_file | quote)) if host_tls else '') + ] | select() | list | join(' ') }}" + + +- name: add logging to init script + lineinfile: + path: /etc/init.d/rest-server + line: "start_stop_daemon_args=\"--stdout-logger logger --stderr-logger logger\"" + insertafter: "^pidfile=" + firstmatch: yes + notify: restart rest-server + + +- name: add setcap to init script + lineinfile: + path: /etc/init.d/rest-server + line: "\tsetcap 'cap_net_bind_service=+ep' /usr/bin/rest-server" + insertafter: '^start_pre\(\)' + firstmatch: yes + notify: restart rest-server + + +- block: + - name: deploy ECC cert + include_role: + name: certs + vars: + common: + owner: "{{ rest_server_user }}" + group: "{{ rest_server_group }}" + post_hook: service rest-server restart + notify: restart rest-server + ecc: yes + certs: + - id: rest-server-ecc + cert: "{{ rest_server_tls_cert_file }}" + key: "{{ rest_server_tls_key_file }}" + when: host_tls + + +- name: initialize htpasswd + file: + path: "{{ rest_server_data_dir }}/.htpasswd" + state: touch + mode: 0400 + owner: "{{ rest_server_user }}" + group: "{{ rest_server_group }}" + modification_time: preserve + access_time: preserve + notify: restart rest-server + + +- name: template maintenance script + template: + src: maintenance.j2 + dest: "{{ rest_server_conf_dir }}/maintenance.sh" + mode: 0500 + force: no + lstrip_blocks: yes + owner: "{{ rest_server_user }}" + group: "{{ rest_server_group }}" + + +- name: create cron job for maintenance script + cron: + name: rest-server-maintenance + minute: "{{ rest_server_maintenance_schedule.minute | d(0) }}" + hour: "{{ rest_server_maintenance_schedule.hour | d(5) }}" + weekday: "{{ rest_server_maintenance_schedule.weekday | d(6) }}" + job: "{{ rest_server_conf_dir }}/maintenance.sh" + user: "{{ rest_server_user }}" + + +- name: flush handlers + meta: flush_handlers + + +- name: setup extra backup repositories + include_role: + name: rest-server + vars: + function: add_repo + repo: "{{ rp }}" + loop: "{{ rest_server_backup_repos | d([]) }}" + loop_control: + loop_var: rp + + +- name: enable and start rest-server + service: + name: rest-server + enabled: yes + state: started diff --git a/roles/rest-server/tasks/main.yml b/roles/rest-server/tasks/main.yml new file mode 100644 index 0000000..de3eafe --- /dev/null +++ b/roles/rest-server/tasks/main.yml @@ -0,0 +1,8 @@ +- name: rest-server installation + include_tasks: install.yml + when: function == 'install' + + +- name: add repository + include_tasks: add_repo.yml + when: function == 'add_repo' diff --git a/roles/rest-server/templates/maintenance.j2 b/roles/rest-server/templates/maintenance.j2 new file mode 100644 index 0000000..937a2c1 --- /dev/null +++ b/roles/rest-server/templates/maintenance.j2 @@ -0,0 +1,8 @@ +#!/bin/ash + +logger 'starting rest-server maintenance' + + +# {{ rest_server_maintenance_mark }} + +logger 'rest-server maintenance completed' diff --git a/roles/restic/defaults/main.yml b/roles/restic/defaults/main.yml new file mode 100644 index 0000000..1cd0bb2 --- /dev/null +++ b/roles/restic/defaults/main.yml @@ -0,0 +1,10 @@ +restic_default_backup_retention: + within_daily: 7d + within_weekly: 1m + within_monthly: 1y + within_yearly: 10y + +restic_dir: + linux: /etc/restic + windows: "%ProgramFiles%\\restic" + windows_psh: "$Env:ProgramFiles\\restic" diff --git a/roles/restic/tasks/main.yml b/roles/restic/tasks/main.yml new file mode 100644 index 0000000..5b0c301 --- /dev/null +++ b/roles/restic/tasks/main.yml @@ -0,0 +1,149 @@ +- name: fail if backup parameters are missing + fail: + msg: backup parameters are missing or incorrect + when: (backup is not mapping) or (backup.dirs is not defined) + + +- name: install restic + include_tasks: tasks/install_packages.yml + vars: + package: + - restic + + +- name: add restic repo + include_role: + name: rest-server + vars: + function: add_repo + repo: + user: "{{ backup.user | d(host_name) }}" + password: "{{ backup.password }}" + name: "{{ backup.repo | d(None) }}" + retention: "{{ backup.retention | d(restic_default_backup_retention) | d({}) }}" + repo_password: "{{ backup.repo_password | d(None) }}" + server: "{{ backup.server | d(None) }}" + + +- name: set backup id + set_fact: + restic_backup_id: "{{ (backup.user | d(host_name)) ~ (('-' ~ backup.repo) if backup.repo is defined else '') }}" + + +- block: + - name: ensure work dir exists + file: + path: "{{ restic_dir.linux }}" + state: directory + mode: 0700 + + - name: set exclude file location + set_fact: + restic_exclude_file_path: "{{ restic_dir.linux ~ '/exclude-file-' ~ restic_backup_id }}" + + - name: create exclude file + copy: + content: "{{ backup.filter | join('\n') }}" + dest: "{{ restic_exclude_file_path }}" + + when: ansible_system != 'Win32NT' and (backup.filter | type_debug == 'list') and (backup.filter | length > 0) + + +- block: + - name: ensure work dir exists + win_file: + path: "{{ restic_dir.windows }}" + state: directory + + - name: set exclude file location + set_fact: + restic_exclude_file_path: "{{ restic_dir.windows ~ '\\exclude-file-' ~ restic_backup_id }}" + restic_exclude_file_path_psh: "{{ restic_dir.windows_psh ~ '\\exclude-file-' ~ restic_backup_id }}" + + - name: create exclude file + win_copy: + content: "{{ backup.filter | join('\r\n') }}" + dest: "{{ restic_exclude_file_path }}" + + when: ansible_system == 'Win32NT' and (backup.filter | type_debug == 'list') and (backup.filter | length > 0) + + +- name: build restic args + set_fact: + restic_args: "{{ [ + ('--one-file-system' if (backup.fs_single | d(false) == true) else ''), + ('--use-fs-snapshot' if (backup.fs_snapshot | d(false) == true) else ''), + (('--tag ' ~ (backup.tags | select() | list | join(',') | quote)) if (backup.tags | type_debug == 'list') else ''), + (('--tag ' ~ (backup.tags | quote)) if (backup.tags is string and backup.tags | length > 0) else ''), + (('--limit-download ' ~ (backup.download_limit | quote)) if (backup.download_limit is defined and backup.download_limit != 0) else ''), + (('--limit-upload ' ~ (backup.upload_limit | quote)) if (backup.upload_limit is defined and backup.upload_limit != 0) else ''), + (('--exclude-larger-than ' ~ (backup.max_size | quote)) if (backup.max_size is string and backup.max_size | length > 0) else ''), + (('--iexclude-file \"' ~ restic_exclude_file_path_psh ~ '\"') if ((backup.filter | type_debug == 'list') and (backup.filter | length > 0) and ansible_system == 'Win32NT') else ''), + (('--iexclude-file ' ~ (restic_exclude_file_path | quote)) if ((backup.filter | type_debug == 'list') and (backup.filter | length > 0) and ansible_system != 'Win32NT') else '') + ] | select() | list | join(' ') }}" + restic_item_list: "{{ (backup.dirs if (backup.dirs | type_debug == 'list') else [backup.dirs]) | map('quote') | join(' ') }}" + + +- name: build env vars + set_fact: + restic_env_vars: + RESTIC_PASSWORD: "{{ backup.repo_password | d(backup.password) }}" + RESTIC_REPOSITORY: "{{ ('rest:' ~ (services.backup.protocol | d('https')) ~ '://' ~ (backup.user | d(host_name)) ~ ':' ~ backup.password ~ '@' ~ + (backup.server | d(services.backup.hostname)) ~ '.' ~ + (services.backup.tld | d(int_tld)) ~ ':' ~ (services.backup.port | d('443')) ~ '/' ~ + (backup.user | d(host_name)) ~ (('/' ~ backup.repo) if backup.repo is defined else '')) }}" + + +- name: add backup job to cron + cron: + name: "{{ 'restic-backup-' ~ restic_backup_id }}" + minute: "{{ backup.minute | d(59 | random(seed=restic_backup_id)) }}" + hour: "{{ backup.hour | d(4 | random(start=1, seed=restic_backup_id)) }}" + day: "{{ backup.day | d('*') }}" + weekday: "{{ backup.weekday | d('*') }}" + month: "{{ backup.month | d('*') }}" + job: "{{ restic_env_vars.keys() | zip(restic_env_vars.values() | map('quote')) | map('join', '=') | list | join(' ') }} restic backup {{ restic_args }} {{ restic_item_list }}" + when: (ansible_system != 'Win32NT') and (backup.schedule | d(true) == true) + + +- block: + - name: template backup script to remote host + win_template: + src: win_script.j2 + dest: "{{ restic_dir.windows ~ '\\backup-' ~ restic_backup_id ~ '.ps1' }}" + lstrip_blocks: yes + + + - name: add scheduled task + win_scheduled_task: + name: "{{ 'Restic Backup (' ~ restic_backup_id ~ ')' }}" + description: Initiate a Restic backup job + allow_demand_start: yes + allow_hard_terminate: yes + compatibility: 3 + execution_time_limit: PT18H + disallow_start_if_on_batteries: no + enabled: yes + logon_type: service_account + multiple_instances: 2 + username: SYSTEM + run_level: highest + start_when_available: yes + stop_if_going_on_batteries: no + wake_to_run: no + update_password: no + actions: + - path: powershell.exe + arguments: "{{ '-ExecutionPolicy Unrestricted -File \"' ~ restic_dir.windows ~ '\\backup-' ~ restic_backup_id ~ '.ps1\"' }}" + triggers: + - type: "{{ backup.interval }}" + enabled: yes + start_boundary: "2020-01-01T{{ '%02d' | format(backup.hour | d(4 | random(start=1, seed=restic_backup_id))) }}\ + :{{ '%02d' | format(backup.minute | d(59 | random(seed=restic_backup_id))) }}:00" + random_delay: "{{ backup.random_delay | d(omit) }}" + days_of_week: "{{ backup.days_of_week | d(omit) }}" + days_of_month: "{{ backup.days_of_month | d(omit) }}" + weeks_interval: "{{ backup.weeks_interval | d(omit) }}" + when: (backup.schedule | d(true) == true) + + when: ansible_system == 'Win32NT' diff --git a/roles/restic/templates/win_script.j2 b/roles/restic/templates/win_script.j2 new file mode 100644 index 0000000..1e06ae7 --- /dev/null +++ b/roles/restic/templates/win_script.j2 @@ -0,0 +1,6 @@ +{% for envvar in restic_env_vars | dict2items -%} + $Env:{{ envvar.key }} = "{{ envvar.value }}" +{% endfor %} + +& "{{ restic_dir.windows_psh }}\restic.exe" backup {{ restic_args }} {{ restic_item_list }} + diff --git a/roles/roundcube/defaults/main.yml b/roles/roundcube/defaults/main.yml new file mode 100644 index 0000000..61bf90c --- /dev/null +++ b/roles/roundcube/defaults/main.yml @@ -0,0 +1,156 @@ +roundcube_user: roundcube +roundcube_group: webmail +roundcube_dir: /opt/roundcube +roundcube_enigma_dir: "{{ roundcube_dir }}/enigma-keys" + +roundcube_fpm_socket: /var/run/php7-fpm.sock +roundcube_mime_types_file: mime.types + +roundcube_custom_logo: no + +roundcube_default_config: + db_dsnw: "pgsql://{{ database_user }}:{{ database_pass }}@{{ database_host }}/{{ database_name }}" + log_driver: syslog + log_logins: yes + + imap_host: "tls://{{ mail_server.mua_actual_hostname ~ '.' ~ host_tld }}" + imap_conn_options: + ssl: + verify_peer: yes + capath: /etc/ssl/certs + + imap_vendor: dovecot + imap_cache: db + messages_cache: yes + imap_cache_ttl: 2d + messages_cache_ttl: 2d + + smtp_host: "tls://{{ mail_server.mta_actual_hostname ~ '.' ~ host_tld }}" + smtp_conn_options: + ssl: + verify_peer: yes + capath: /etc/ssl/certs + + smtp_xclient_login: yes + smtp_xclient_addr: yes + + enable_installer: no + skin_logo: "{{ { '[favicon]': 'custom/favicon.ico', '*': 'custom/logo.png' } if (roundcube_custom_logo | d(false) == true) else 'null' }}" + use_https: yes + + login_rate_limit: 15 + display_product_info: 2 + session_lifetime: 40320 + session_domain: ".{{ int_tld }}" + session_name: rc_sessid + session_auth_name: rc_sessauth + session_samesite: Strict + + proxy_whitelist: + - 127.0.0.1 + + des_key: "{{ (host_name ~ 'des_key') | hash('sha512') | truncate(24, False, '') }}" + cipher_method: ChaCha20-Poly1305 + + username_domain: "{{ mail_server.tld }}" + username_domain_forced: yes + + max_message_size: "{{ mail_server.max_mail_size_bytes | int }}" + max_disclosed_recipients: 10 + + product_name: "{{ org }} | Mail" + useragent: "null" + + identities_level: 0 + identity_image_size: 128 + + mime_types: "{{ roundcube_dir }}/config/{{ roundcube_mime_types_file }}" + + language: en_US + + date_format: "d.m.Y" + date_long: "d.m.Y H:i" + + drafts_mbox: Drafts + junk_mbox: Junk + sent_mbox: Sent + trash_mbox: Trash + + min_refresh_interval: 30 + undo_timeout: 10 + + enable_spellcheck: yes + spellcheck_engine: pspell + spellcheck_languages: + en: English + ru: Русский + + contact_form_mode: business + collected_recipients: yes + collected_senders: yes + + addressbook_sort_col: name + show_images: 3 + htmleditor: 4 + draft_autosave: 60 + + refresh_interval: 30 + check_all_folders: yes + + reply_mode: 1 + + default_font: Tahoma + message_show_email: yes + + + +roundcube_plugins: + - name: persistent_login + where: texxasrulez/persistent_login + major: 1 + config: + ifpl_login_expire: 40320 + ifpl_cookie_name: "rc_{{ org | lower | replace(' ', '') }}_plogin" + + - name: contextmenu + where: johndoh/roundcube-contextmenu + + - name: html5_notifier + where: stremlau/html5_notifier + config: + html5_notifier_duration: 2 + html5_notifier_smbox: 2 + + - name: show_folder_size + where: jfcherng-roundcube/plugin-show-folder-size + + - name: zipdownload + - name: userinfo + - name: attachment_reminder + - name: hide_blockquote + - name: reconnect + - name: filesystem_attachments + + - name: markasjunk + config: + markasjunk_read_spam: yes + + - name: enigma + config: + enigma_pgp_homedir: "{{ roundcube_enigma_dir }}" + enigma_pgp_cipher_algo: "null" + enigma_pgp_digest_algo: "null" + enigma_signatures: yes + enigma_decryption: yes + enigma_encryption: yes + enigma_password_time: 120 + enigma_passwordless: yes + + - name: managesieve + config: + managesieve_host: "tls://{{ mail_server.mua_actual_hostname ~ '.' ~ host_tld }}" + managesieve_conn_options: + ssl: + verify_peer: yes + capath: /etc/ssl/certs + managesieve_mbox_encoding: UTF-8 diff --git a/roles/roundcube/handlers/main.yml b/roles/roundcube/handlers/main.yml new file mode 100644 index 0000000..95e30b8 --- /dev/null +++ b/roles/roundcube/handlers/main.yml @@ -0,0 +1,4 @@ +- name: restart php fpm + service: + name: php-fpm7 + state: restarted diff --git a/roles/roundcube/tasks/db.yml b/roles/roundcube/tasks/db.yml new file mode 100644 index 0000000..a2587d3 --- /dev/null +++ b/roles/roundcube/tasks/db.yml @@ -0,0 +1,43 @@ +- name: check if system table already exists + include_role: + name: postgres + apply: + delegate_to: "{{ inventory_hostname if database_self_hosted else services.db.hostname }}" + vars: + function: run_query + query: + database: "{{ database_name }}" + text: "SELECT to_regclass('public.system');" + + +- name: set db_is_empty fact + set_fact: + db_is_empty: "{{ (query_result is defined) and (query_result[0].to_regclass is none) }}" + + +- block: + - name: slurp script from roundcube to ansible + slurp: + src: "{{ roundcube_dir }}/SQL/postgres.initial.sql" + register: fetched + + - name: run script + include_role: + name: postgres + apply: + delegate_to: "{{ inventory_hostname if database_self_hosted else services.db.hostname }}" + vars: + function: run_script + script: + database: "{{ database_name }}" + text: "{{ fetched.content | b64decode }}" + refresh_privs: yes + as_single_query: yes + user: "{{ database_user }}" + + when: db_is_empty == true + + +- name: unset db_empty fact + set_fact: + db_is_empty: "{{ None }}" diff --git a/roles/roundcube/tasks/main.yml b/roles/roundcube/tasks/main.yml new file mode 100644 index 0000000..10569cb --- /dev/null +++ b/roles/roundcube/tasks/main.yml @@ -0,0 +1,212 @@ +- name: set roundcube_cfg + set_fact: + roundcube_cfg: "{{ roundcube_default_config | d({}) | combine(roundcube_config | d({}), recursive=true) }}" + + +- name: install dependencies + package: + name: + - php8 + - curl + - libgd + - php8-fpm + - aspell-libs + - aspell-en + - aspell-ru + - aspell + - aspell-lang + - gnupg + + +- name: install php dependencies + package: + name: + - php8-dom + - php8-session + - php8-xml + - php8-intl + - php8-json + - php8-pdo + - php8-pdo_mysql + - php8-pdo_pgsql + - php8-mbstring + - php8-openssl + - php8-ctype + - php8-curl + - php8-fileinfo + - php8-exif + - php8-iconv + - php8-gd + - php8-zip + - php8-pspell + - php8-pcntl + - php8-xmlwriter + - php8-tokenizer + + + +- name: create user and group + include_tasks: tasks/create_user.yml + vars: + user: + name: "{{ roundcube_user }}" + group: "{{ roundcube_group }}" + dir: "{{ roundcube_dir }}" + create_home: no + + +- name: create roundcube directory structure + file: + path: "{{ item.path }}" + state: directory + mode: "{{ item.mode }}" + owner: "{{ roundcube_user }}" + group: "{{ roundcube_group }}" + loop: + - path: "{{ roundcube_dir }}" + mode: "0775" + - path: "{{ roundcube_enigma_dir }}" + mode: "0770" + + +- name: install php and php-fpm + include_role: + name: php + vars: + php_version: 8 + php_install_fpm: yes + php_fpm_user: "{{ roundcube_user }}" + php_fpm_group: "{{ roundcube_group }}" + php_fpm_socket: "{{ roundcube_fpm_socket }}" + php_fpm_listener: roundcube + php_config: + file_uploads: yes + session.auto_start: 0 + mbstring.func_overload: 0 + pcre.backtrack_limit: 200000 + upload_max_filesize: "{{ (mail_server.max_mail_size_bytes / 1024) | int | abs }}K" + post_max_size: "{{ (mail_server.max_mail_size_bytes / 1024) | int | abs }}K" + allow_url_fopen: yes + + +- name: install and configure nginx + include_role: + name: nginx + vars: + nginx: + servers: + - conf: nginx_server + certs: "{{ host_tls }}" + group: "{{ roundcube_group }}" + fastcgi: yes + + +- name: get latest version of roundcube + include_tasks: get_lastversion.yml + vars: + package: + name: roundcube/roundcubemail + location: github + assets: yes + asset_filter: 'complete.tar.gz$' + file: "{{ roundcube_dir }}/last_version" + extract: "{{ roundcube_dir }}" + user: "{{ roundcube_user }}" + group: "{{ roundcube_group }}" + notify: restart php fpm + strip_first_dir: yes + + +- name: ensure roundcube logs/temp directories are writable + file: + path: "{{ roundcube_dir ~ '/' ~ item }}" + mode: 0775 + state: directory + loop: + - logs + - temp + + +- name: download mime types + get_url: + url: https://svn.apache.org/repos/asf/httpd/httpd/trunk/docs/conf/mime.types + dest: "{{ roundcube_dir }}/config/{{ roundcube_mime_types_file }}" + mode: 0440 + owner: "{{ roundcube_user }}" + group: "{{ roundcube_group }}" + notify: restart php fpm + + +- name: create custom directory + file: + path: "{{ roundcube_dir }}/public_html/custom" + mode: 0775 + owner: "{{ roundcube_user }}" + group: "{{ roundcube_group }}" + state: directory + + +- name: upload files to custom directory + copy: + src: "{{ item }}" + dest: "{{ roundcube_dir }}/public_html/custom/{{ item }}" + mode: 0444 + owner: "{{ roundcube_user }}" + group: "{{ roundcube_group }}" + loop: + - favicon_mail.ico + - logo_mail.png + notify: restart php fpm + when: roundcube_custom_logo | d(false) == true + + +- name: template roundcube config + template: + src: config.j2 + dest: "{{ roundcube_dir }}/config/config.inc.php" + force: yes + mode: 0660 + owner: "{{ roundcube_user }}" + group: "{{ roundcube_group }}" + lstrip_blocks: yes + notify: restart php fpm + + +- name: delete sample config + file: + path: "{{ roundcube_dir }}/config/config.inc.php.sample" + state: absent + + +- name: setup database + include_tasks: db.yml + + +- name: install plugins + include_tasks: plugin.yml + loop: "{{ roundcube_plugins }}" + loop_control: + loop_var: plugin + + +- name: add cleandb cron job + cron: + name: roundcube database cleanup + job: "{{ roundcube_dir }}/bin/cleandb.sh 2>&1 /dev/null" + hour: "5" + minute: "0" + user: "{{ roundcube_user }}" + + +- name: flush handlers + meta: flush_handlers + + +- name: add directories to backup plan + include_role: + name: backup + vars: + function: add + backup_items: + - "{{ roundcube_dir }}" + - "{{ roundcube_enigma_dir }}" diff --git a/roles/roundcube/tasks/plugin.yml b/roles/roundcube/tasks/plugin.yml new file mode 100644 index 0000000..f29393b --- /dev/null +++ b/roles/roundcube/tasks/plugin.yml @@ -0,0 +1,56 @@ +- block: + - name: create plugin directory + file: + path: "{{ roundcube_dir }}/plugins/{{ plugin.name }}" + state: directory + owner: "{{ roundcube_user }}" + group: "{{ roundcube_group }}" + mode: 0775 + + - name: get latest version of plugin + include_tasks: get_lastversion.yml + vars: + package: + name: "{{ plugin.where }}" + location: github + assets: yes + asset_filter: 'tar.gz$' + file: "{{ roundcube_dir }}/last_pv_{{ plugin.name }}" + extract: "{{ roundcube_dir }}/plugins/{{ plugin.name }}" + user: "{{ roundcube_user }}" + group: "{{ roundcube_group }}" + strip_first_dir: yes + major_branch: "{{ plugin.major | d(omit) }}" + notify: restart php fpm + + when: plugin.where is defined + + +- block: + - name: check if subdir exists for internal plugins + stat: + path: "{{ roundcube_dir }}/plugins/{{ plugin.name }}" + register: plugin_dir_stat + + + - name: fail if it does not exist + fail: + msg: "subdir does not exist for internal plugin {{ plugin.name }}" + when: not plugin_dir_stat.stat.exists or not plugin_dir_stat.stat.isdir + + when: plugin.where is not defined + + +- name: template plugin config + template: + src: "plugin.j2" + dest: "{{ roundcube_dir }}/plugins/{{ plugin.name }}/config.inc.php" + force: yes + mode: 0660 + owner: "{{ roundcube_user }}" + group: "{{ roundcube_group }}" + lstrip_blocks: yes + vars: + config: "{{ plugin.config }}" + notify: restart php fpm + when: plugin.config is mapping diff --git a/roles/roundcube/templates/config.j2 b/roles/roundcube/templates/config.j2 new file mode 100644 index 0000000..ba5b1eb --- /dev/null +++ b/roles/roundcube/templates/config.j2 @@ -0,0 +1,16 @@ +{% from 'macros.j2' import roundcube_option -%} + + {{ option.value }} + {% else -%} + '{{ option.key }}' => '{{ option.value }}' + {% endif -%} + {% elif option.value is boolean -%} + '{{ option.key }}' => {{ 'true' if option.value else 'false' }} + {% elif option.value is mapping -%} + '{{ option.key }}' => [ + {% for suboption in (option.value | dict2items) -%} + {{ roundcube_array_option(suboption) }} + {%- if not loop.last -%},{%- endif %} + {% endfor -%} + ] + {% else -%} + '{{ option.key }}' => {{ option.value if option.value != None else '' }} + {% endif -%} +{%- endmacro -%} + +{%- macro roundcube_option(option) -%} + {% if option.value is string -%} + {% if (option.key == 'syslog_facility') or (option.value == 'null') -%} + $config['{{ option.key }}'] = {{ option.value }}; + {% else -%} + $config['{{ option.key }}'] = '{{ option.value }}'; + {% endif -%} + {% elif option.value is boolean -%} + $config['{{ option.key }}'] = {{ 'true' if option.value else 'false' }}; + {% elif option.value | type_debug == 'list' -%} + $config['{{ option.key }}'] = array( + {% for suboption in option.value -%} + '{{ suboption }}' + {%- if not loop.last -%},{%- endif %} + {% endfor -%} + ); + {% elif option.value is mapping -%} + $config['{{ option.key }}'] = [ + {% for suboption in (option.value | dict2items) -%} + {{ roundcube_array_option(suboption) }} + {%- if not loop.last -%},{%- endif %} + {% endfor -%} + ]; + {% else -%} + $config['{{ option.key }}'] = {{ option.value if option.value != None else '' }}; + {% endif -%} +{%- endmacro -%} diff --git a/roles/roundcube/templates/nginx_server.j2 b/roles/roundcube/templates/nginx_server.j2 new file mode 100644 index 0000000..e7e5ac5 --- /dev/null +++ b/roles/roundcube/templates/nginx_server.j2 @@ -0,0 +1,16 @@ +root {{ roundcube_dir }}/public_html; +index index.php; + +disable_symlinks off; + +location ~ /\. { + deny all; +} + +location ~ \.php$ { + include /etc/nginx/fastcgi.conf; + + fastcgi_pass unix:{{ roundcube_fpm_socket }}; + fastcgi_index index.php; + fastcgi_param SCRIPT_FILENAME $document_root$fastcgi_script_name; +} \ No newline at end of file diff --git a/roles/roundcube/templates/plugin.j2 b/roles/roundcube/templates/plugin.j2 new file mode 100644 index 0000000..1d2df7b --- /dev/null +++ b/roles/roundcube/templates/plugin.j2 @@ -0,0 +1,7 @@ +{% from 'macros.j2' import roundcube_option -%} + + 0) + + +- name: unset rspamd pub key + set_fact: + rspamd_temp_pub_key: "{{ None }}" diff --git a/roles/rspamd/tasks/main.yml b/roles/rspamd/tasks/main.yml new file mode 100644 index 0000000..bb09ba1 --- /dev/null +++ b/roles/rspamd/tasks/main.yml @@ -0,0 +1,161 @@ +- name: set rspamd_cfg + set_fact: + rspamd_cfg: "{{ rspamd_default_config | d({}) | combine(rspamd_config | d({}), recursive=true) }}" + + +- name: install rspamd + include_tasks: tasks/install_packages.yml + vars: + package: + - rspamd + - rspamd-client + - rspamd-controller + - rspamd-fuzzy + - rspamd-proxy + - alpine: rspamd-openrc + - py3-cryptography + + +- name: create user and group + include_tasks: tasks/create_user.yml + vars: + user: + name: "{{ rspamd_user }}" + group: "{{ rspamd_group }}" + + +- name: create rspamd directory structure + file: + path: "{{ item }}" + state: directory + mode: 0700 + owner: "{{ rspamd_user }}" + group: "{{ rspamd_group }}" + loop: + - "{{ rspamd_conf_dir }}" + - "{{ rspamd_local_dir }}" + - "{{ rspamd_override_dir }}" + - "{{ rspamd_local_map_dir }}" + + +- name: edit service config + lineinfile: + path: /etc/conf.d/rspamd + regexp: "^{{ item.name }}=" + line: "{{ item.name }}=\"{{ item.value }}\"" + notify: restart rspamd + loop: + - name: command_user + value: "{{ rspamd_user }}:{{ rspamd_group }}" + - name: cfgfile + value: "{{ rspamd_conf_dir }}/rspamd.conf" + + +- name: template rspamd configuration + template: + src: rspamd.j2 + dest: "{{ rspamd_local_dir ~ '/' ~ ((item ~ '.conf') if item is string else item.dest) }}" + force: yes + mode: 0400 + lstrip_blocks: yes + owner: "{{ rspamd_user }}" + group: "{{ rspamd_group }}" + vars: + rspamd_config_item: "{{ item if item is string else item.conf }}" + loop: + - actions + - { conf: logging, dest: logging.inc } + - { conf: options, dest: options.inc } + - settings + - { conf: worker-controller, dest: worker-controller.inc } + - { conf: worker-fuzzy, dest: worker-fuzzy.inc } + - { conf: worker-normal, dest: worker-normal.inc } + - { conf: worker-proxy, dest: worker-proxy.inc } + - antivirus + - arc + - chartable + - classifier-bayes + - dkim + - dkim_signing + - dmarc + - force_actions + - greylist + - history_redis + - milter_headers + - { conf: mime_types, dest: mime_types.inc.local } + - multimap + - mx_check + - neural + - neural_group + - phishing + - redis + - replies + notify: restart rspamd + + +- name: template rspamd maps + template: + src: "{{ item.src ~ '.j2' }}" + dest: "{{ rspamd_local_map_dir ~ '/' ~ item.dest }}" + force: yes + mode: 0400 + lstrip_blocks: yes + owner: "{{ rspamd_user }}" + group: "{{ rspamd_group }}" + loop: + - { src: bad_filenames, dest: bad_filenames.map } + - { src: local_domains, dest: local_domains.inc } + - { src: local_ip_ranges, dest: local_ip_ranges.inc } + notify: restart rspamd + + +- name: generate dkim keys + include_tasks: dkim.yml + vars: + key: "{{ item }}" + loop: + - { type: 'rsa', selector: 'rsa2048', path: '{{ rspamd_dkim_rsa2048_key }}' } + - { type: 'ed25519', selector: 'ed25519', path: '{{ rspamd_dkim_ed25519_key }}' } + + +- name: install and configure redis + include_role: + name: redis + vars: + redis_group: "{{ rspamd_group }}" + redis_config: + unixsocket: "{{ rspamd_redis_socket }}" + unixsocketperm: "770" + + +- name: install and configure nginx + include_role: + name: nginx + vars: + nginx: + servers: + - conf: nginx_server + certs: "{{ host_tls }}" + + +- name: flush handlers + meta: flush_handlers + + +- name: add directories to backup plan + include_role: + name: backup + vars: + function: add + backup_items: + - "{{ rspamd_conf_dir }}" + - "{{ rspamd_local_dir }}" + - "{{ rspamd_local_map_dir }}" + - "{{ rspamd_override_dir }}" + + +- name: enable and start rspamd + service: + name: rspamd + enabled: yes + state: started diff --git a/roles/rspamd/templates/bad_filenames.j2 b/roles/rspamd/templates/bad_filenames.j2 new file mode 100644 index 0000000..d4d9523 --- /dev/null +++ b/roles/rspamd/templates/bad_filenames.j2 @@ -0,0 +1,3 @@ +{% for filename in (rspamd_bad_filenames | d([])) -%} + {{ filename }} +{% endfor -%} diff --git a/roles/rspamd/templates/local_domains.j2 b/roles/rspamd/templates/local_domains.j2 new file mode 100644 index 0000000..e3ee56a --- /dev/null +++ b/roles/rspamd/templates/local_domains.j2 @@ -0,0 +1 @@ +{{ tld }} diff --git a/roles/rspamd/templates/local_ip_ranges.j2 b/roles/rspamd/templates/local_ip_ranges.j2 new file mode 100644 index 0000000..f58d472 --- /dev/null +++ b/roles/rspamd/templates/local_ip_ranges.j2 @@ -0,0 +1 @@ +{{ int_net }} diff --git a/roles/rspamd/templates/nginx_server.j2 b/roles/rspamd/templates/nginx_server.j2 new file mode 100644 index 0000000..454e251 --- /dev/null +++ b/roles/rspamd/templates/nginx_server.j2 @@ -0,0 +1,10 @@ +location / { + root /usr/share/rspamd/www/; + try_files $uri @proxy; +} + +location @proxy { + proxy_pass http://127.0.0.1:{{ rspamd_controller_port }}; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header Host $http_host; +} diff --git a/roles/rspamd/templates/rspamd.j2 b/roles/rspamd/templates/rspamd.j2 new file mode 100644 index 0000000..e770fdb --- /dev/null +++ b/roles/rspamd/templates/rspamd.j2 @@ -0,0 +1,55 @@ +{% macro rspamd_array_option(value, padding = 0) -%} + {%- if value is boolean -%} + {{- 'true' if value else 'false' -}} + {%- elif (value is string) and not (value | regex_search('^\d')) -%} + "{{- value -}}" + {%- elif value is mapping -%} + { + {% for suboption in (value | d({}) | dict2items) -%} + {{ rspamd_option(suboption, padding + 1) }} + {% endfor -%} + } + {%- elif value | type_debug == 'list' -%} + [ + {% for suboption in (value | d([])) -%} + {{- ' ' * 4 * (padding + 1) -}} + {{- rspamd_array_option(suboption, padding + 1) -}}{%- if not loop.last -%}{{ ', ' }}{%- else -%}{{ "\n" }}{%- endif -%} + {% endfor -%} + ] + {%- else -%} + {{- value if value != None else '' -}} + {%- endif -%} +{%- endmacro -%} + + + +{% macro rspamd_option(option, padding = 0) -%} + {{- '' if (padding == 0) else (' ' * 4 * padding) -}} + {% if option.value is boolean -%} + {{ option.key }} = {{ 'true' if option.value else 'false' }}; + {% elif (option.value is string) and not (option.value | regex_search('^\d')) -%} + {{ option.key }} = "{{ option.value }}"; + {% elif option.value is mapping -%} + {{ option.key if (option.key != '__tld__') else tld }} = { + {% for suboption in (option.value | d({}) | dict2items) -%} + {{ rspamd_option(suboption, padding + 1) }} + {%- endfor -%} + {{- '' if (padding == 0) else (' ' * 4 * padding) -}} }; + {% elif option.value | type_debug == 'list' -%} + {{ option.key }} = [ + {% for suboption in (option.value | d([])) -%} + {{- ' ' * 4 * (padding + 1) -}} + {{- rspamd_array_option(suboption, padding + 1) -}}{%- if not loop.last -%}{{ ', ' }}{%- else -%}{{ "\n" }}{%- endif -%} + {% endfor %} + ]; + {% else -%} + {{ option.key }} = {{ option.value if option.value != None else '' }}; + {% endif -%} +{% endmacro -%} + + + + +{% for option in (rspamd_cfg[rspamd_config_item] | d({}) | dict2items) -%} + {{ rspamd_option(option) }} +{%- endfor %} diff --git a/roles/seafile/defaults/main.yml b/roles/seafile/defaults/main.yml new file mode 100644 index 0000000..204e815 --- /dev/null +++ b/roles/seafile/defaults/main.yml @@ -0,0 +1,81 @@ +seaf_user: seafile +seaf_group: seafile +seaf_dir: /opt/seafile + +seaf_version: 9.0.6 + +seaf_fs_port: 8082 + +seaf_db_user: seafile +seaf_db_ccnet: ccnet_db +seaf_db_seafile: seafile_db +seaf_db_seahub: seahub_db + +seaf_server_name: "{{ ('seaf_' ~ org) | truncate(15, true, '', 0) | lower }}" + +seaf_cleanup: + minute: 30 + hour: 3 + weekday: 0 + +seaf_default_config: + seafile: + fileserver: + host: 127.0.0.1 + port: "{{ seaf_fs_port }}" + max_download_dir_size: 2000 + web_token_expire_time: 10800 + + database: + type: mysql + host: 127.0.0.1 + port: 3306 + user: "{{ seaf_db_user }}" + password: "{{ seaf_db_password }}" + db_name: "{{ seaf_db_seafile }}" + connection_charset: utf8 + + slow_log: + enable_slow_log: no + + seahub: + enable_signup: yes + login_remember_days: 90 + login_attempt_limit: 7 + user_password_min_length: 8 + user_password_strength_level: 2 + user_strong_password_required: yes + enable_force_2fa_to_all_users: no + + enable_repo_snapshot_label: yes + + encrypted_library_version: 4 + share_link_login_required: no + enable_watermark: no + enable_share_link_report_abuse: no + file_preview_max_size: 104857600 + + time_zone: "{{ timezone }}" + site_name: "{{ org }}" + site_title: "Seafile | {{ org }}" + max_number_of_files_for_fileupload: 10000 + + email_use_tls: yes + email_host: "{{ mail_server.mta_actual_hostname ~ '.' ~ mail_server.tld }}" + email_port: "{{ mail_server.tls_port | d(465) }}" + + email_host_user: "{{ mail_account.username }}" + email_host_password: "{{ mail_account.password }}" + default_from_email: "{{ mail_account.username ~ '@' ~ (mail_account.domain | d(mail_server.tld)) }}" + server_email: "{{ mail_account.username ~ '@' ~ (mail_account.domain | d(mail_server.tld)) }}" + + office_server_type: CollaboraOffice + enable_office_web_app: yes + office_web_app_base_url: "{{ host_url }}/hosting/discovery" + wopi_access_token_expiration: 1800 + enable_office_web_app_edit: yes + + office_web_app_file_extension: ['odp', 'ods', 'odt', 'xls', 'xlsb', 'xlsm', 'xlsx', 'ppsx', 'ppt', 'pptm', 'pptx', 'doc', 'docm', 'docx'] + office_web_app_edit_file_extension: ['odp', 'ods', 'odt', 'xls', 'xlsb', 'xlsm', 'xlsx', 'ppsx', 'ppt', 'pptm', 'pptx', 'doc', 'docm', 'docx'] + + enable_two_factor_auth: yes diff --git a/roles/seafile/handlers/main.yml b/roles/seafile/handlers/main.yml new file mode 100644 index 0000000..6732baa --- /dev/null +++ b/roles/seafile/handlers/main.yml @@ -0,0 +1,15 @@ +- name: reload systemd daemons + systemd: + daemon_reload: yes + + +- name: restart seafile + service: + name: seafile + state: restarted + + +- name: restart seahub + service: + name: seahub + state: restarted diff --git a/roles/seafile/tasks/main.yml b/roles/seafile/tasks/main.yml new file mode 100644 index 0000000..a251082 --- /dev/null +++ b/roles/seafile/tasks/main.yml @@ -0,0 +1,242 @@ +- name: gather facts + setup: + gather_subset: + - min + + +- name: fail if this is not debian + fail: + msg: "this role only supports debian hosts" + when: ansible_distribution != 'Debian' + + +- name: set seaf_cfg + set_fact: + seaf_cfg: "{{ seaf_default_config | d({}) | combine(seaf_config | d({}), recursive=true) }}" + + +- name: install mariadb + include_role: + name: mariadb + vars: + function: install + mariadb_config: + old_passwords: 0 + ssl_ca: + ssl_cert: + ssl_key: + tls_version: + mariadb_enable_tls: no + + +- name: add mariadb users + include_role: + name: mariadb + vars: + function: add_user + mariadb_server: "{{ inventory_hostname }}" + user: + name: "{{ seaf_db_user }}" + password: "{{ seaf_db_password }}" + privs: + - key: "{{ seaf_db_ccnet }}.*" + value: ALL + - key: "{{ seaf_db_seafile }}.*" + value: ALL + - key: "{{ seaf_db_seahub }}.*" + value: ALL + + +- name: add mariadb databases + include_role: + name: mariadb + vars: + function: add_db + mariadb_server: "{{ inventory_hostname }}" + database: + name: "{{ item }}" + encoding: utf8 + loop: + - "{{ seaf_db_ccnet }}" + - "{{ seaf_db_seafile }}" + - "{{ seaf_db_seahub }}" + + +- name: install dependencies + include_tasks: tasks/install_packages.yml + vars: + package: + - python3 + - python3-setuptools + - python3-pip + - libmariadb-dev + - memcached + - libmemcached-dev + - libffi-dev + + +- name: install pip dependencies + pip: + name: + - django==3.2.* + - Pillow + - pylibmc + - captcha + - jinja2 + - sqlalchemy==1.4.3 + - django-pylibmc + - django-simple-captcha + - python3-ldap + - mysqlclient + - pycryptodome==3.12.0 + - cffi==1.14.0 + + +- name: create user and group + include_tasks: tasks/create_user.yml + vars: + user: + name: "{{ seaf_user }}" + group: "{{ seaf_group }}" + dir: "{{ seaf_dir }}" + shell: /bin/bash + + +- name: download and extract seafile distro + unarchive: + src: "https://s3.eu-central-1.amazonaws.com/download.seadrive.org/seafile-server_{{ seaf_version }}_x86-64.tar.gz" + dest: "{{ seaf_dir }}" + remote_src: yes + owner: "{{ seaf_user }}" + group: "{{ seaf_group }}" + creates: "{{ seaf_dir }}/seafile-server-{{ seaf_version }}" + + +- name: set seafile setup script parameters + set_fact: + seaf_script_params: "{{ [(('--server-name ' ~ (seaf_server_name | quote)) if (seaf_server_name is defined) else ''), + '--server-ip ' ~ (host_fqdn | quote), + '--use-existing-db 1', + '--mysql-user ' ~ seaf_db_user, + '--mysql-user-passwd ' ~ seaf_db_password + ] | select() | list | join(' ') }}" + + +- name: run seafile setup script + shell: + cmd: "./setup-seafile-mysql.sh auto {{ seaf_script_params }}" + chdir: "{{ seaf_dir }}/seafile-server-{{ seaf_version }}" + creates: "{{ seaf_dir }}/seafile-server-latest" + become: yes + become_method: su + become_flags: '-s /bin/bash' + become_user: "{{ seaf_user }}" + register: result + + +- name: template configs + template: + src: "{{ item.src }}.j2" + dest: "{{ seaf_dir }}/conf/{{ item.dest }}" + force: yes + mode: "{{ item.mode | d(omit) }}" + owner: "{{ seaf_user }}" + group: "{{ seaf_group }}" + loop: + - { src: 'ccnet', dest: 'ccnet.conf' } + - { src: 'seafile', dest: 'seafile.conf' } + - { src: 'seahub_settings', dest: 'seahub_settings.py', mode: '0700' } + notify: + - restart seafile + - restart seahub + + +- name: template cleanup script + template: + src: cleanup.j2 + dest: "{{ seaf_dir }}/cleanup_script.sh" + force: yes + mode: "+x" + owner: "{{ seaf_user }}" + group: "{{ seaf_group }}" + + +- name: create cron entry for cleanup script + cron: + name: seafile-cleanup + minute: "{{ seaf_cleanup.minute | d(0) }}" + hour: "{{ seaf_cleanup.hour | d(2) }}" + weekday: "{{ seaf_cleanup.weekday | d(0) }}" + job: "{{ seaf_dir }}/cleanup_script.sh" + + +- name: template systemd init files + template: + src: "{{ item.src }}.j2" + dest: "/etc/systemd/system/{{ item.dest }}.service" + force: yes + loop: + - { src: 'seafile_service', dest: 'seafile' } + - { src: 'seahub_service', dest: 'seahub' } + notify: reload systemd daemons + + +- name: install and configure memcached + include_role: + name: memcached + + +- name: install and configure collabora online + include_role: + name: officeonline + + +- name: install and configure logrotate + include_role: + name: logrotate + vars: + logrotate_services: + - name: seafile + template: logrotate_seafile + + +- name: install and configure nginx + include_role: + name: nginx + vars: + nginx: + servers: + - conf: nginx_server + certs: "{{ host_tls }}" + security_headers: no + conf: + http: + disable_symlinks: no + ssl_conf_command: [] + + +- name: flush handlers + meta: flush_handlers + + +- name: add directories to backup plan + include_role: + name: backup + vars: + function: add + backup_items: + - "{{ seaf_dir }}/ccnet" + - "{{ seaf_dir }}/conf" + - "{{ seaf_dir }}/seafile-server-latest" + - "{{ seaf_dir }}/cleanup_script.sh" + + +- name: enable and start services + systemd: + daemon_reload: yes + enabled: yes + name: "{{ item }}" + state: started + loop: + - seafile + - seahub diff --git a/roles/seafile/templates/ccnet.j2 b/roles/seafile/templates/ccnet.j2 new file mode 100644 index 0000000..53e9b45 --- /dev/null +++ b/roles/seafile/templates/ccnet.j2 @@ -0,0 +1,10 @@ +[General] + +[Database] +ENGINE = mysql +HOST = 127.0.0.1 +PORT = 3306 +USER = {{ seaf_db_user }} +PASSWD = {{ seaf_db_password }} +DB = {{ seaf_db_ccnet }} +CONNECTION_CHARSET = utf8 diff --git a/roles/seafile/templates/cleanup.j2 b/roles/seafile/templates/cleanup.j2 new file mode 100644 index 0000000..7f7e539 --- /dev/null +++ b/roles/seafile/templates/cleanup.j2 @@ -0,0 +1,23 @@ +#!/bin/bash + +# stop the server +echo Stopping the Seafile-Server... +systemctl stop seahub.service +systemctl stop seafile.service + +echo Giving the server some time to shut down properly.... +sleep 20 + +# run the cleanup +echo Seafile cleanup started... +su {{ seaf_user | quote }} -c {{ (seaf_dir ~ '/seafile-server-latest/seaf-gc.sh') | quote }} + +echo Giving the server some time.... +sleep 10 + +# start the server again +echo Starting the Seafile-Server... +systemctl start seafile.service +systemctl start seahub.service + +echo Seafile cleanup done! diff --git a/roles/seafile/templates/logrotate_seafile.j2 b/roles/seafile/templates/logrotate_seafile.j2 new file mode 100644 index 0000000..c13f7bc --- /dev/null +++ b/roles/seafile/templates/logrotate_seafile.j2 @@ -0,0 +1,13 @@ +{{ seaf_dir }}/logs/*.log +{ + daily + missingok + rotate 4 + compress + delaycompress + notifempty + sharedscripts + postrotate + [ ! -f {{ seaf_dir }}/pids/seaf-server.pid ] || kill -USR1 `cat {{ seaf_dir }}/pids/seaf-server.pid` + endscript +} diff --git a/roles/seafile/templates/nginx_server.j2 b/roles/seafile/templates/nginx_server.j2 new file mode 100644 index 0000000..8d84b82 --- /dev/null +++ b/roles/seafile/templates/nginx_server.j2 @@ -0,0 +1,71 @@ +proxy_set_header X-Forwarded-For $remote_addr; + +location / { + proxy_pass http://127.0.0.1:8000; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Host $server_name; + proxy_set_header X-Forwarded-Proto $scheme; + proxy_read_timeout 1200s; + + client_max_body_size 0; +} + +location /seafhttp { + rewrite ^/seafhttp(.*)$ $1 break; + proxy_pass http://127.0.0.1:{{ seaf_fs_port }}; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + + client_max_body_size 0; + proxy_request_buffering off; + + proxy_connect_timeout 36000s; + proxy_read_timeout 36000s; + proxy_send_timeout 36000s; + send_timeout 36000s; +} + +location /media { + root {{ seaf_dir }}/seafile-server-latest/seahub; +} + + + +{% if has_officeonline | d(false) == true -%} +location ^~ /browser { + proxy_pass http://127.0.0.1:9980; + proxy_set_header Host $http_host; +} + +location ^~ /hosting/discovery { + proxy_pass http://127.0.0.1:9980; + proxy_set_header Host $http_host; +} + +location ^~ /hosting/capabilities { + proxy_pass http://127.0.0.1:9980; + proxy_set_header Host $http_host; +} + +location ~ ^/cool/(.*)/ws$ { + proxy_pass http://127.0.0.1:9980; + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection "Upgrade"; + proxy_set_header Host $http_host; + proxy_read_timeout 36000s; +} + +location ~ ^/(c|l)ool { + proxy_pass http://127.0.0.1:9980; + proxy_set_header Host $http_host; +} + +location ^~ /cool/adminws { + proxy_pass http://127.0.0.1:9980; + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection "Upgrade"; + proxy_set_header Host $http_host; + proxy_read_timeout 36000s; +} +{% endif -%} diff --git a/roles/seafile/templates/seafile.j2 b/roles/seafile/templates/seafile.j2 new file mode 100644 index 0000000..ced9578 --- /dev/null +++ b/roles/seafile/templates/seafile.j2 @@ -0,0 +1,20 @@ +{% macro seaf_option(option) -%} + {%- if option.value is boolean -%} + {{- option.key | lower }} = {{ 'true' if option.value else 'false' -}} + {%- else -%} + {{- option.key | lower }} = {{ option.value -}} + {%- endif -%} +{% endmacro -%} + +{% macro seaf_block(name, block) -%} + {%- if block | type_debug == 'dict' -%} + [{{ name }}] +{% for option in (block | d({}) | dict2items) %} + {{- seaf_option(option) }} +{% endfor -%} + {%- endif -%} +{% endmacro -%} + +{% for block in (seaf_cfg.seafile | d({}) | dict2items) -%} + {{ seaf_block(block.key, block.value) }} +{% endfor -%} diff --git a/roles/seafile/templates/seafile_service.j2 b/roles/seafile/templates/seafile_service.j2 new file mode 100644 index 0000000..1355abb --- /dev/null +++ b/roles/seafile/templates/seafile_service.j2 @@ -0,0 +1,14 @@ +[Unit] +Description=Seafile +After=network.target mysql.service + +[Service] +Type=forking +ExecStart={{ seaf_dir }}/seafile-server-latest/seafile.sh start +ExecStop={{ seaf_dir }}/seafile-server-latest/seafile.sh stop +LimitNOFILE=infinity +User={{ seaf_user }} +Group={{ seaf_group }} + +[Install] +WantedBy=multi-user.target diff --git a/roles/seafile/templates/seahub_service.j2 b/roles/seafile/templates/seahub_service.j2 new file mode 100644 index 0000000..6bd0a75 --- /dev/null +++ b/roles/seafile/templates/seahub_service.j2 @@ -0,0 +1,14 @@ +[Unit] +Description=Seafile hub +After=network.target seafile.service + +[Service] +Type=forking +# change start to start-fastcgi if you want to run fastcgi +ExecStart={{ seaf_dir }}/seafile-server-latest/seahub.sh start +ExecStop={{ seaf_dir }}/seafile-server-latest/seahub.sh stop +User={{ seaf_user }} +Group={{ seaf_group }} + +[Install] +WantedBy=multi-user.target \ No newline at end of file diff --git a/roles/seafile/templates/seahub_settings.j2 b/roles/seafile/templates/seahub_settings.j2 new file mode 100644 index 0000000..fc86261 --- /dev/null +++ b/roles/seafile/templates/seahub_settings.j2 @@ -0,0 +1,36 @@ +# -*- coding: utf-8 -*- +SECRET_KEY = "b'np@1u5zmqzb)at5^n7@so19b%a42kd=kdx+xcum!z)le9g1zet'" + +SERVICE_URL = "{{ host_url }}" +FILE_SERVER_ROOT = "{{ host_url }}/seafhttp" + +DATABASES = { + 'default': { + 'ENGINE': 'django.db.backends.mysql', + 'NAME': '{{ seaf_db_seahub }}', + 'USER': '{{ seaf_db_user }}', + 'PASSWORD': '{{ seaf_db_password }}', + 'HOST': '127.0.0.1', + 'PORT': '3306', + 'OPTIONS': {'charset': 'utf8mb4'}, + } +} + +CACHES = { + 'default': { + 'BACKEND': 'django_pylibmc.memcached.PyLibMCCache', + 'LOCATION': '127.0.0.1:11211', + }, +} + +{% for option in (seaf_cfg.seahub | d({}) | dict2items) -%} +{% if option.value is boolean -%} + {{- option.key | upper }} = {{ 'True' if option.value else 'False' }} +{% elif option.value is string -%} + {{- option.key | upper }} = '{{ option.value }}' +{% elif (option.value | type_debug == 'list') -%} + {{- option.key | upper }} = ('{{ option.value | join("', '") }}') +{% else -%} + {{- option.key | upper }} = {{ option.value }} +{% endif -%} +{% endfor -%} diff --git a/roles/shop/defaults/main.yml b/roles/shop/defaults/main.yml new file mode 100644 index 0000000..9f5ffd9 --- /dev/null +++ b/roles/shop/defaults/main.yml @@ -0,0 +1,5 @@ +shop_user: shop +shop_group: shop +shop_dir: /opt/shop + +shop_port: 3000 diff --git a/roles/shop/handlers/main.yml b/roles/shop/handlers/main.yml new file mode 100644 index 0000000..c11f754 --- /dev/null +++ b/roles/shop/handlers/main.yml @@ -0,0 +1,4 @@ +- name: restart shop + service: + name: shop + state: restarted diff --git a/roles/shop/tasks/main.yml b/roles/shop/tasks/main.yml new file mode 100644 index 0000000..416556f --- /dev/null +++ b/roles/shop/tasks/main.yml @@ -0,0 +1,93 @@ +- name: install dependencies + include_tasks: tasks/install_packages.yml + vars: + package: + - nodejs + - npm + + +- name: create user and group + include_tasks: tasks/create_user.yml + vars: + user: + name: "{{ shop_user }}" + group: "{{ shop_group }}" + dir: "{{ shop_dir }}" + notify: restart shop + + +- name: recursively set owner and group for server dir + file: + path: "{{ shop_dir }}" + state: directory + recurse: yes + owner: "{{ shop_user }}" + group: "{{ shop_group }}" + + +- name: ensure server script has executable bit set + file: + path: "{{ shop_dir }}/app.mjs" + mode: "+x" + + +- name: template config file + template: + src: config.j2 + dest: "{{ shop_dir }}/config.ini" + force: yes + lstrip_blocks: yes + notify: restart shop + + +- name: install npm dependencies + npm: + path: "{{ shop_dir }}" + no_optional: yes + ignore_scripts: yes + production: yes + become: yes + become_user: "{{ shop_user }}" + become_method: su + become_flags: '-s /bin/ash' + notify: restart shop + changed_when: no + + +- name: template init script + template: + src: init.j2 + dest: /etc/init.d/shop + force: yes + mode: "+x" + notify: restart shop + + +- name: install and configure nginx + include_role: + name: nginx + vars: + nginx: + servers: + - conf: nginx_server + certs: "{{ host_tls }}" + + +- name: flush handlers + meta: flush_handlers + + +- name: add directories to backup plan + include_role: + name: backup + vars: + function: add + backup_items: + - "{{ shop_dir }}" + + +- name: enable and start shop + service: + name: shop + enabled: yes + state: started diff --git a/roles/shop/templates/config.j2 b/roles/shop/templates/config.j2 new file mode 100644 index 0000000..d89a9ac --- /dev/null +++ b/roles/shop/templates/config.j2 @@ -0,0 +1,7 @@ +PORT = {{ shop_port }} +DEBUG = false + +DB_HOST = {{ database_host }} +DB_USER = {{ database_user }} +DB_PASS = {{ database_pass }} +DB_DATABASE = {{ database_name }} diff --git a/roles/shop/templates/init.j2 b/roles/shop/templates/init.j2 new file mode 100644 index 0000000..ff5a8d2 --- /dev/null +++ b/roles/shop/templates/init.j2 @@ -0,0 +1,14 @@ +#!/sbin/openrc-run + +name="$SVCNAME" +directory="{{ shop_dir }}" +command="node {{ shop_dir }}/app.mjs" +command_user="{{ shop_user }}:{{ shop_group }}" +pidfile="/var/run/$SVCNAME.pid" +supervisor="supervise-daemon" +respawn_max=0 + +depend() { + need net + use dns +} diff --git a/roles/shop/templates/nginx_server.j2 b/roles/shop/templates/nginx_server.j2 new file mode 100644 index 0000000..d1ab9f2 --- /dev/null +++ b/roles/shop/templates/nginx_server.j2 @@ -0,0 +1,27 @@ +set_real_ip_from 10.0.0.0/8; +real_ip_header X-Real-IP; +real_ip_recursive on; + +location / { + proxy_pass http://127.0.0.1:{{ shop_port }}; +} + +location = /favicon.ico { + alias {{ shop_dir }}/assets/favicon.ico; +} + +location /assets/ { + alias {{ shop_dir }}/assets/; +} + +location ~ ^/orders(?:/(.*))?$ { + allow 10.0.0.0/8; + deny all; + proxy_pass http://127.0.0.1:{{ shop_port }}; +} + +location ~ ^/promos(?:/(.*))?$ { + allow 10.0.0.0/8; + deny all; + proxy_pass http://127.0.0.1:{{ shop_port }}; +} diff --git a/roles/smb/defaults/main.yml b/roles/smb/defaults/main.yml new file mode 100644 index 0000000..7c00117 --- /dev/null +++ b/roles/smb/defaults/main.yml @@ -0,0 +1,71 @@ +smb_guest_user: smb-guest +smb_guest_group: smb-guest +smb_storage_dir: /storage + +smb_default_config: + acl_allow_execute_always: yes + aio_max_threads: 25 + allow_dns_updates: disabled + browseable: yes + client_min_protocol: SMB2 + client_signing: mandatory + create_mask: 0750 + deadtime: 600 + directory_mask: 0750 + disable_netbios: yes + disable_spoolss: yes + follow_symlinks: no + force_create_mode: 0750 + force_directory_mode: 0750 + guest_ok: yes + guest_only: no + hide_dot_files: no + hide_special_files: yes + host_msdfs: no + kernel_oplocks: yes + lm_announce: no + load_printers: no + logging: syslog + log_level: 1 + map_archive: no + map_to_guest: "Bad Password" + multicast_dns_register: no + name_resolve_order: "lmhosts host" + netbios_name: smb + ntlm_auth: ntlmv2-only + read_only: yes + reject_md5_clients: yes + reject_md5_servers: yes + security: user + server_role: standalone + server_min_protocol: SMB2_10 + server_services: + - smb + - _s3fs + server_signing: mandatory + show_add_printer_wizard: no + unix_extensions: no + use_sendfile: yes + + idmap_uid: 10000-20000 + idmap_gid: 10000-20000 + syslog_only: yes + syslog: 1 + + hosts_allow: + - "127." + - "{{ int_net | ipaddr('network') }}/{{ int_net | ipaddr('netmask') }}" + + force_group: "{{ smb_guest_group }}" + force_user: "{{ smb_guest_user }}" + guest_account: "{{ smb_guest_user }}" + server_string: "{{ org }} SMB server" + workgroup: "{{ smb_workgroup | default(org | upper) }}" + + +smb_shares: + - name: soft + comment: software packages + - name: script + comment: powershell scripts + has_scripts: yes diff --git a/roles/smb/handlers/main.yml b/roles/smb/handlers/main.yml new file mode 100644 index 0000000..20017cf --- /dev/null +++ b/roles/smb/handlers/main.yml @@ -0,0 +1,4 @@ +- name: restart samba + service: + name: samba + state: restarted \ No newline at end of file diff --git a/roles/smb/tasks/main.yml b/roles/smb/tasks/main.yml new file mode 100644 index 0000000..8ddb9f9 --- /dev/null +++ b/roles/smb/tasks/main.yml @@ -0,0 +1,81 @@ +- name: set smb_cfg + set_fact: + smb_cfg: "{{ smb_default_config | d({}) | combine(smb_config | d({}), recursive=true) }}" + + +- name: install samba + include_tasks: tasks/install_packages.yml + vars: + package: + - samba-server + - alpine: samba-server-openrc + + +- name: ensure nmbd is disabled in conf file + lineinfile: + path: /etc/conf.d/samba + regexp: '^daemon_list=' + line: 'daemon_list="smbd"' + notify: restart samba + + +- name: ensure syslog logging is enabled in conf file + lineinfile: + path: /etc/conf.d/samba + regexp: '^smbd_options=' + line: 'smbd_options="-D --option=logging=syslog"' + notify: restart samba + + +- name: create user and group + include_tasks: tasks/create_user.yml + vars: + user: + name: "{{ smb_guest_user }}" + group: "{{ smb_guest_group }}" + dir: "{{ smb_storage_dir }}" + create_home: no + + +- name: ensure storage dir has adequate permissions + file: + path: "{{ smb_storage_dir }}" + mode: "2750" + owner: "{{ smb_guest_user }}" + group: "{{ smb_guest_group }}" + notify: restart samba + + +- name: ensure share directories exist + file: + path: "{{ smb_storage_dir }}/{{ item.name }}" + state: directory + mode: "2750" + owner: "{{ smb_guest_user }}" + group: "{{ smb_guest_group }}" + loop: "{{ smb_shares }}" + notify: restart samba + + +- name: template config file + template: + src: smb.j2 + dest: /etc/samba/smb.conf + force: yes + mode: 0640 + notify: restart samba + + +- name: flush handlers + meta: flush_handlers + + +- name: generate scripts + include_tasks: script_generate.yml + + +- name: start samba and set it to start on boot + service: + name: samba + enabled: yes + state: started diff --git a/roles/smb/tasks/script_generate.yml b/roles/smb/tasks/script_generate.yml new file mode 100644 index 0000000..f0c7068 --- /dev/null +++ b/roles/smb/tasks/script_generate.yml @@ -0,0 +1,37 @@ +- name: select a share than can hold scripts + set_fact: + script_share: "{{ smb_shares | selectattr('has_scripts', 'defined') | selectattr('has_scripts', 'equalto', true) | list | first }}" + + +- name: fail if share is missing + fail: + msg: "cannot generate scripts, script share is missing" + when: (script_share | length) == 0 + + +- name: create psr directory + file: + path: "{{ smb_storage_dir }}/{{ script_share.name }}/psr" + state: directory + mode: "2750" + owner: "{{ smb_guest_user }}" + group: "{{ smb_guest_group }}" + + +- name: template items to script share + template: + src: "scripts/{{ item.src }}" + dest: "{{ smb_storage_dir }}/{{ script_share.name }}/{{ item.dst }}" + force: yes + mode: 0750 + owner: "{{ smb_guest_user }}" + group: "{{ smb_guest_group }}" + loop: + - src: psr-ps1.j2 + dst: psr/Setup-PSRemoting.ps1 + - src: psr-cmd.j2 + dst: psr/Setup-PSRemoting.cmd + - src: psr-reset-ps1.j2 + dst: psr/Reset-PSRemoting.ps1 + - src: psr-reset-cmd.j2 + dst: psr/Reset-PSRemoting.cmd diff --git a/roles/smb/templates/scripts/psr-cmd.j2 b/roles/smb/templates/scripts/psr-cmd.j2 new file mode 100644 index 0000000..3b029e7 --- /dev/null +++ b/roles/smb/templates/scripts/psr-cmd.j2 @@ -0,0 +1,5 @@ +@echo off +pushd %~dp0 +powershell -ExecutionPolicy Bypass -File .\Setup-PSRemoting.ps1 +popd +@pause \ No newline at end of file diff --git a/roles/smb/templates/scripts/psr-ps1.j2 b/roles/smb/templates/scripts/psr-ps1.j2 new file mode 100644 index 0000000..0c119d3 --- /dev/null +++ b/roles/smb/templates/scripts/psr-ps1.j2 @@ -0,0 +1,536 @@ +#Requires -RunAsAdministrator +#Requires -Version 5 +#Requires -Modules Microsoft.PowerShell.LocalAccounts + +[CmdletBinding()] + +Param ( + # Whether or not to enable debugging messages + [bool]$EnableDebug = $true, + + # Username for remote administration account + [String]$ServiceUser = "{{ winrm_remote_user }}", + + # Default password for this account + [String]$ServicePassword = "{{ winrm_bootstrap_password }}" +) + +Set-StrictMode -Version 2 # don't force v3 + +[bool]$HasSSL = $false +$ServiceUserDescription = "Service user for remote administration" + + + +function Log($Message, [String]$Color = $null, $NoNewline = $false) { + if ($Color) { + $ExtraParms = @{"ForegroundColor" = $Color} + } else { + $ExtraParms = @{} + } + + Write-Host $Message @ExtraParms -NoNewline:$NoNewline +} + +function Debug($Message) { + if ($EnableDebug) { + Log $Message -Color Cyan + } +} + +function Change($Message) { + Log "! $Message" -Color Yellow +} + +function Error($Message) { + Log "ERROR: $Message" -Color Red + Exit +} + +function Assert($Condition, $Message) { + if (!$Condition) { + Error -Message $Message + } +} + +function Count($E) { + return ($E | Measure).Count +} + +function New-Credential($User, $Password) { + return New-Object System.Management.Automation.PSCredential($User, (ConvertTo-SecureString $Password -AsPlainText -Force)) +} + + + + + + + + + + +# Checks service account for compliance + +function Verify-LocalUser($User) { + $Name = $User.Name + Debug "Verifying account `"$Name`"" + + # Is it enabled? + if (!$User.Enabled) { + Change "Enabling account `"$Name`"" + $User | Enable-LocalUser -ErrorAction Stop + } + + # Check if account is set to never expire + if ($User.AccountExpires -ne $null) { + Change "Changing account expiration policy for `"$Name`"" + $User | Set-LocalUser -AccountNeverExpires -ErrorAction Stop + } + + # Do the same with its password + if ($User.PasswordExpires -ne $null) { + Change "Changing password expiration policy for `"$Name`"" + $User | Set-LocalUser -PasswordNeverExpires -ErrorAction Stop + } + + # Account description is not really important but it doesn't hurt to check + if ($User.Description -ne $ServiceUserDescription) { + Change "Changing description for `"$Name`"" + $User | Set-LocalUser -Description $ServiceUserDescription -ErrorAction Stop + } + + # Validate group membership for Administrators group + if ((Get-LocalGroupMember -SID S-1-5-32-544 | select -ExpandProperty SID) -notcontains $User.SID.Value) { + Change "Changing group membership for `"$Name`" - adding account to `"Administrators`" local group" + Add-LocalGroupMember -SID S-1-5-32-544 -Member $User -ErrorAction Stop + } + + # Do the same for RMS group, if it exists + if ((Get-LocalGroupMember -SID S-1-5-32-580 | select -ExpandProperty SID) -notcontains $User.SID.Value) { + if (Get-LocalGroup -SID S-1-5-32-580 -ErrorAction SilentlyContinue) { + Change "Changing group membership for `"$Name`" - adding account to `"Remote Management Users`" local group" + Add-LocalGroupMember -SID S-1-5-32-580 -Member $User -ErrorAction Stop + } + } + + Debug "Verification complete" +} + + + + + +# PROCESS: +# Find, create, edit group membership of service account and validate it +# DESIRED STATE: an active service account on local machine + +function Process-ServiceAccount { + Debug "* Processing: Service account" + + # Verify if there is already an user + $User = Get-LocalUser -Name $ServiceUser -ErrorAction SilentlyContinue + + if ($User) { + Log "Found existing service account: `"$ServiceUser`"" + } else { + # Create new user and verify it + Debug "No service account found, will create one" + + # Handle passwordless user (may be useful for pure cert auth, not really the case now) + if ($ServicePassword -eq $null -or $ServicePassword.Length -eq 0) { + $PasswordSplat = @{ + "NoPassword" = $true + } + + Log "Using passwordless login" + } else { + $PasswordSplat = @{ + "Password" = ConvertTo-SecureString $ServicePassword -AsPlainText -Force + "PasswordNeverExpires" = $true + } + } + + # Create an user + try { + $User = New-LocalUser -Name $ServiceUser ` + -Description $ServiceUserDescription ` + -AccountNeverExpires ` + @PasswordSplat + } catch { + Error "Caught an exception while creating service account `"$ServiceUser`"" + } + Assert $User "Failed to create service account `"$ServiceUser`"" + Change "Created service account `"$ServiceUser`"" + + + # Add this user to Administrators local group + try { + Add-LocalGroupMember -SID S-1-5-32-544 -Member $User -ErrorAction Stop + } catch { + Error "Caught an exception while adding service account `"$ServiceUser`" to local group `"Administrators`"" + } + Change "Added account `"$ServiceUser`" to local group `"Administrators`"" + + + # Check if RMU group exists + if (!(Get-LocalGroup -SID S-1-5-32-580 -ErrorAction SilentlyContinue)) { + Log "`"Remote Management Users`" group is missing from this system - will not add user to this group" -Color Yellow + } else { + + # Also add user to RMU local group + try { + Add-LocalGroupMember -SID S-1-5-32-580 -Member $User -ErrorAction Stop + } catch { + Error "Caught an exception while adding service account `"$ServiceUser`" to local group `"Remote Management Users`"" + } + Change "Added account `"$ServiceUser`" to local group `"Remote Management Users`"" + } + } + + # Verify user, no matter whether it was found or created + Verify-LocalUser -User $User +} + + + + + +# PROCESS: +# Ensure that local network connection profile category is set to Private, so firewall rules and Enable-PSRemoting should work correctly +# Network interface are selected by their DNS suffix +# DESIRED STATE: category of local network interface is set to Private + +function Process-NetworkProfile { + Debug "* Processing: Network Profiles" + + $Interfaces = gwmi -Class Win32_NetworkAdapterConfiguration -Filter IPEnabled=TRUE -ComputerName . + + {% if old_corp_tld is defined -%} + # workaround for old corp tld + $Interfaces = $Interfaces | ? { ($_.DNSDomain -match ".*{{ int_tld }}$") -or ($_.DNSDomain -match ".*{{ old_int_tld }}$") } + {%- else -%} + $Interfaces = $Interfaces | ? { $_.DNSDomain -match ".*{{ int_tld }}$" } + {%- endif %} + + if (!$Interfaces) { + # early return if there is no compatible network interface + Log -Color Yellow "Failed to find local network interface with corp DNS suffix - skipping network profile check" + } else { + Debug "Found $(Count $Interfaces) compatible network interfaces" + + $Interfaces.InterfaceIndex | % { + if ((Get-NetConnectionProfile -InterfaceIndex $_).NetworkCategory -eq "Public") { + Change "Setting network category of interface #$_ to Private" + + try { + Set-NetConnectionProfile -InterfaceIndex $_ -NetworkCategory "Private" -ErrorAction Stop + } catch { + Error "Caught an exception when setting network profile category" + } + } + } + } +} + + + + + +# PROCESS: +# Start up WinRM service and ensure that it has automatic start type +# DESIRED STATE: WinRM service is running and is set to auto-start on next boot + +function Process-WinRM { + Debug "* Processing: WinRM" + + try { + $Service = Get-Service -Name "WinRM" + Assert $Service "WinRM service does not exist on this machine" + + if ($Service.StartType -ne "Automatic") { + Change "Setting WinRM startup type to Automatic" + $Service | Set-Service -StartupType Automatic -ErrorAction Stop + } + + if ($Service.Status -in "Stopped", "StopPending") { + Change "Starting WinRM service" + $Service | Start-Service -ErrorAction Stop + } + } catch { + Error "Caught an exception while setting up WinRM service" + } +} + + + + + + + +# PROCESS: +# Set LocalAccountTokenFilterPolicy registry value to 1 +# DESIRED STATE: LocalAccountTokenFilterPolicy = 1 + +function Process-Registry { + Debug "* Processing: Registry" + + $Key = "HKLM:\SOFTWARE\Microsoft\Windows\CurrentVersion\Policies\System" + $Name = "LocalAccountTokenFilterPolicy" + $Value = 1 + + $Prop = Get-ItemProperty $Key -ErrorAction SilentlyContinue + + if (!$Prop) { + Error "Parent registry key for $Name does not exist, skipping this step" + } else { + $M = Get-Member -InputObject $Prop -name $Name -Membertype Properties -ErrorAction SilentlyContinue + if (!$M -or $Prop.$Name -ne $Value) { + Log "$Name is set to an incorrect value or is empty" + + Remove-ItemProperty $Key -Name $Name -Force -ErrorAction SilentlyContinue + Change "Removed $Name from $Key" + + Assert ((New-ItemProperty $Key -Name $Name -PropertyType "DWord" -Value $Value).$Name -eq $Value) "Failed to create `"$Name`" registry property" + Change "Added $Name to $Key with value $Value" + } + } +} + + + + + + +# PROCESS: +# Enable PS Remoting +# DESIRED STATE: there is at least one session configuration and a WSMan listener (their validity will be checked later) + +function Process-PSRemoting { + Debug "* Processing: PS Remoting" + + # This snippet was mostly taken from Ansible script + # TODO: most of remoting stuff is already taken care of - this function is just a failsafe + if (!(Get-PSSessionConfiguration -Verbose:$false) -or !(Get-ChildItem WSMan:\localhost\Listener)) { + Log "No PS session configuration or listener found - enabling PS remoting" + + # Override local verbose preference + $Pref = $VerbosePreference + $VerbosePreference = "SilentlyContinue" + + try { + Enable-PSRemoting -Force -ErrorAction Stop -Verbose:$false > $null + } catch { + Error "Caught an exception in Enable-PSRemoting" + } + + Change "PS Remoting enabled" + + # Set verbose preference back to its original value + $VerbosePreference = $Pref + } +} + + + + + + + + +# Create a HTTP listener +function Create-HTTPListener { + New-Item 'WSMan:\localhost\Listener' -Transport HTTP -Address "*" -Force > $null + Change "Created HTTP listener" +} + +function Verify-Listener($Listener) { + return ($Listener -and ` + ($Listener.Keys -contains "Transport=HTTP" -or $Listener.Keys -contains "Transport=HTTPS") -and ` + $Listener.Keys -contains "Address=*") +} + + +# PROCESS: +# Loop through PS listeners and ensure there's only one active HTTP listener +# (this ignores HTTPS listeners because they will be set up by Ansible later) +# DESIRED STATE: there is an active HTTP listener + +function Process-Listeners { + Debug "* Processing: WSMan listeners" + + # Find valid listeners and also save all listeners + $All = Get-ChildItem WSMan:\localhost\Listener + $Valid = $All | ? {Verify-Listener -Listener $_} + + if ((Count $All) -eq 1 -and (Count $Valid) -eq 1) { + Log "Found 1 listener: `"$($Valid[0].Name)`"" # only one: either http (ok) or https (also ok) + } else { + # Remove all listeners + $All | % { + Change "Removing listener: `"$($_.Name)`"" + try { + $_ | Remove-Item -Force -Recurse + } catch { + # Continue even if an exception has happened + Log -Color Yellow "Caught an exception while removing listener `"$($_.Name)`"" + } + } + + # Create + Create-HTTPListener + + # Verify after creation + $Valid = Get-ChildItem WSMan:\localhost\Listener | ? {Verify-Listener -Listener $_} + Assert ((Count $Valid) -eq 1) "Listener was just created, but it's missing" + Debug "Found listener after creation" + } + + $Listener = $Valid[0] +} + + + +# PURPOSE: +# Search, validate and fix PS session configurations +# This also sets up a more secure SDDL +# DESIRED STATE: PS session configurations are validated and are now correct + +function Process-SessionConfig { + Debug "* Processing: PS Session Configuration" + + $SDDL = "O:NSG:BAD:P(A;;GA;;;RM)(A;;GA;;;IU)S:P(AU;FA;GA;;;WD)(AU;SA;GXGW;;;WD)" + Get-PSSessionConfiguration | ? {$_.Name -eq "microsoft.powershell" -or $_.Name -eq "microsoft.powershell32"} | % { + + if ($_.SecurityDescriptorSddl -ne $SDDL) { + Change "Changing SDDL on session configuration `"$($_.Name)`"" + + try { + ($_ | Set-PSSessionConfiguration -SecurityDescriptorSddl $SDDL) > $null + } catch { + Log -Color Yellow "Caught an exception while changing SDDL on `"$($_.Name)`"" + } + } + } +} + + + + +function Process-PSRAuth { + Debug "* Processing: PS Remoting Authentication" + + $Auth = Get-ChildItem WSMan:\localhost\Service\Auth + + "Basic","Kerberos","Certificate" | % { + if (($Auth | ? Name -eq $_).Value -eq $true) { + Change "Disabling $_ authentication" + Set-Item -Path "WSMan:\localhost\Service\Auth\$_" -Value $false + } + } + + if (($Auth | ? Name -eq "CredSSP").Value -eq $false) { + Change "Enabling CredSSP authentication" + Enable-WSManCredSSP -Role Server -Force > $null + } +} + + + +function Process-Firewall { + Debug "* Processing: PS Remoting Firewall" + + if (Get-NetFirewallRule -Name "WINRM-HTTPS-In-TCP" -ErrorAction SilentlyContinue) { + Debug "Found HTTPS rule, will disable HTTP rules" + $Script:HasSSL = $true + + Get-NetFirewallRule -ErrorAction SilentlyContinue | ? {$_.Name -like "WINRM-HTTP-*" -and $_.Enabled -eq $true} | % { + Change "Disabling firewall rule for PSR over HTTP: $($_.DisplayName)" + Disable-NetFirewallRule -Name $_.Name + } + } else { + Debug "HTTPS rule is missing, will add HTTP rule" + $Script:HasSSL = $false + + if (!(Get-NetFirewallRule -Name "WINRM-HTTP-In-TCP" -ErrorAction SilentlyContinue)) { + Change "Adding firewall rule for PSR over HTTP" + + New-NetFirewallRule -Name "WINRM-HTTP-In-TCP" ` + -DisplayName "Windows Remote Management (HTTP-In)" ` + -Description "Inbound rule for Windows Remote Management via WS-Management. [TCP 5985]" ` + -Group "Windows Remote Management" ` + -Program "System" ` + -Protocol TCP ` + -LocalPort "5985" ` + -RemoteAddress "{{ int_net }}" ` + -Action Allow ` + -Profile Domain,Private > $null + } + + $Rule = Get-NetFirewallRule -Name "WINRM-HTTP-In-TCP" -ErrorAction SilentlyContinue + if (!$Rule) { + Error "HTTP rule is missing after its creation" + } + + if ($Rule.Enabled -eq $false) { + Change "Enabling HTTP rule" + $Rule | Enable-NetFirewallRule -ErrorAction Stop + } + + if (($Rule | Get-NetFirewallAddressFilter).RemoteAddress -ne "{{ int_net | ipaddr('network') }}/{{ int_net | ipaddr('netmask') }}") { + Change "Changing HTTP rule remote address" + $Rule | Set-NetFirewallRule -RemoteAddress "{{ int_net }}" -ErrorAction Stop + } + + if ($Rule.Profile -ne "Domain,Private") { + Change "Changing HTTP rule profile" + $Rule | Set-NetFirewallRule -Profile Domain,Private -ErrorAction Stop + } + } +} + + + +function Test-PSR { + Debug "* Processing: PS Remoting Test" + + try { + if ($Script:HasSSL) { + Debug "Creating PS session through HTTPS" + $Session = New-PSSession -UseSSL -ComputerName "localhost" -SessionOption (New-PSSessionOption -SkipRevocationCheck -SkipCNCheck) -Credential (New-Credential -User $ServiceUser -Password $ServicePassword) + } else { + Debug "Creating PS session through HTTP" + $Session = New-PSSession -ComputerName "localhost" -Credential (New-Credential -User $ServiceUser -Password $ServicePassword) + + } + } catch { + Error "Caught an exception while setting up PS session: $_" + } + + Assert $Session "Failed to initiate local PS Remoting session" + Assert ((Invoke-Command -Session $Session -ScriptBlock {Write-Output "test"}) -eq "test") "Received wrong output from local PS remoting session" + + $Session | Remove-PSSession +} + + +Log "PS remoting preparation script" -Color Green +Log "Debug mode is $(("off","on")[$EnableDebug])" + + +Process-ServiceAccount -Name $ServiceUser -Password $ServicePassword +Process-NetworkProfile +Process-Registry + +Process-WinRM +Process-PSRemoting +Process-Listeners +Process-SessionConfig +Process-PSRAuth +Process-Firewall + +Process-WinRM + +Test-PSR + +Log "Completed" -Color Green \ No newline at end of file diff --git a/roles/smb/templates/scripts/psr-reset-cmd.j2 b/roles/smb/templates/scripts/psr-reset-cmd.j2 new file mode 100644 index 0000000..053c02d --- /dev/null +++ b/roles/smb/templates/scripts/psr-reset-cmd.j2 @@ -0,0 +1,5 @@ +@echo off +pushd %~dp0 +powershell -ExecutionPolicy Bypass -File .\Reset-PSRemoting.ps1 +popd +@pause \ No newline at end of file diff --git a/roles/smb/templates/scripts/psr-reset-ps1.j2 b/roles/smb/templates/scripts/psr-reset-ps1.j2 new file mode 100644 index 0000000..fc5369e --- /dev/null +++ b/roles/smb/templates/scripts/psr-reset-ps1.j2 @@ -0,0 +1,5 @@ +#Requires -RunAsAdministrator + +gci wsman:\localhost\listener | ? {$_.Keys -contains "Transport=HTTPS" -and $_.Keys -contains "Address=*"} | % {$_ | Remove-Item -Force -Recurse } +Get-LocalUser {{ winrm_remote_user }} | Set-LocalUser -Password (ConvertTo-SecureString '{{ winrm_bootstrap_password }}' -AsPlainText -Force) +Remove-NetFirewallRule -Name "WINRM-HTTPS-In-TCP" \ No newline at end of file diff --git a/roles/smb/templates/smb.j2 b/roles/smb/templates/smb.j2 new file mode 100644 index 0000000..14e5eb8 --- /dev/null +++ b/roles/smb/templates/smb.j2 @@ -0,0 +1,21 @@ +[global] + +{%- for option in (smb_cfg | d({}) | dict2items) -%} +{% if option.value is boolean -%} + {{ option.key | replace('_', ' ') | lower }} = {{ 'yes' if option.value else 'no' }} +{% elif (option.value | type_debug == 'list') -%} + {{ option.key | replace('_', ' ') | lower }} = {{ option.value | join(', ') }} +{% else -%} + {{ option.key | replace('_', ' ') | lower }} = {{ option.value }} +{% endif -%} +{% endfor %} + +{% for share in smb_shares -%} +[{{ share.name }}] + path = {{ smb_storage_dir }}/{{ share.name }} + {% if share.comment is defined -%} + comment = {{ share.comment }} + {%- endif %} + + +{% endfor %} diff --git a/roles/strongswan/defaults/main.yml b/roles/strongswan/defaults/main.yml new file mode 100644 index 0000000..85cc97b --- /dev/null +++ b/roles/strongswan/defaults/main.yml @@ -0,0 +1,73 @@ +strongswan_user: ipsec +strongswan_group: ipsec + +strongswan_cert_name: server.pem + +strongswan_proposals: + - chacha20poly1305-prfsha384-prfsha256-prfaesxcbc-prfaescmac-x448-x25519 + +strongswan_esp_proposals: + - chacha20poly1305-x448-x25519 + +strongswan_pool: 10.250.0.0/16 + +strongswan_default_config: + strongswan: + block_threshold: 10 + dos_protection: yes + init_limit_half_open: 100 + integrity_test: no + load_modular: yes + send_vendor_id: no + + logging: + filelog: {} + syslog: + daemon: + default: 0 + ike_name: yes + log_level: yes + dmn: 1 + + connections: + ikev2-eap-mschapv2: + version: 2 + local_addrs: "{{ ansible_host }}" + remote_addrs: "%any" + send_cert: always + encap: yes + + proposals: "{{ strongswan_proposals | d('default') }}" + dpd_delay: 40s + rekey_time: 8h + pools: rw-pool-ipv4 + fragmentation: yes + + local: + certs: "{{ strongswan_cert_name }}" + id: "{{ host_fqdn }}" + + remote: + auth: eap-mschapv2 + eap_id: "%any" + + children: + ikev2-eap-mschapv2: + local_ts: 0.0.0.0/0 + rekey_time: 2h + esp_proposals: "{{ strongswan_esp_proposals | d('default') }}" + + pools: + rw-pool-ipv4: + addrs: "{{ strongswan_pool }}" + + secrets: + + +strongswan_exporter_dir: /opt/strongswan_exporter +strongswan_prometheus_port: 9903 + +strongswan_exporter_default_config: + vici.address: unix:///var/run/charon.vici + collector: vici + web.listen-address: "0.0.0.0:{{ strongswan_prometheus_port }}" diff --git a/roles/strongswan/handlers/main.yml b/roles/strongswan/handlers/main.yml new file mode 100644 index 0000000..d02e8bc --- /dev/null +++ b/roles/strongswan/handlers/main.yml @@ -0,0 +1,10 @@ +- name: restart charon + service: + name: charon + state: restarted + + +- name: restart strongswan exporter + service: + name: strongswan_exporter + state: restarted diff --git a/roles/strongswan/tasks/main.yml b/roles/strongswan/tasks/main.yml new file mode 100644 index 0000000..5e87684 --- /dev/null +++ b/roles/strongswan/tasks/main.yml @@ -0,0 +1,163 @@ +- name: import dns vars + include_vars: + file: dns.yml + hash_behaviour: merge + when: strongswan_use_dns | d(false) == true + + +- name: set strongswan_cfg + set_fact: + strongswan_cfg: "{{ strongswan_default_config | d({}) | combine(strongswan_config | d({}), recursive=true) }}" + + +- name: install strongswan + include_tasks: tasks/install_packages.yml + vars: + package: + - strongswan + - alpine: strongswan-openrc + + +- name: ensure strongswan user exists + include_tasks: tasks/create_user.yml + vars: + user: + name: "{{ strongswan_user }}" + group: "{{ strongswan_group }}" + notify: restart charon + + +- name: change resolv.conf permissions + file: + path: /etc/resolv.conf + mode: 0664 + group: "{{ strongswan_group }}" + + +- name: add forwarding to sysctl file + ansible.posix.sysctl: + name: "{{ item }}" + value: "1" + sysctl_set: yes + notify: restart charon + loop: + - net.ipv4.ip_forward + - net.ipv6.conf.all.forwarding + + +- name: remove unnecessary config files and directories + file: + path: "/etc/{{ item }}" + state: absent + loop: + - swanctl/bliss + - swanctl/conf.d + - swanctl/pkcs12 + - swanctl/pkcs8 + - swanctl/x509aa + - swanctl/x509ac + - swanctl/x509crl + - swanctl/x509ocsp + - ipsec.d/ + - ipsec.conf + - ipsec.secrets + notify: restart charon + + +- name: template configuration files + template: + src: "{{ item.src }}.j2" + dest: "/etc/{{ item.dest | d(item.src) }}.conf" + force: yes + lstrip_blocks: yes + mode: "{{ item.mode | d('0644') }}" + notify: restart charon + loop: + - src: swanctl + dest: swanctl/swanctl + - src: charon-logging + dest: strongswan.d/charon-logging + - src: attr + dest: strongswan.d/charon/attr + - src: strongswan + dest: strongswan.d/strongswan-custom + + +- name: disable unnecessary charon plugins + lineinfile: + path: "/etc/strongswan.d/charon/{{ item }}.conf" + regexp: '^(\s*)load\s?=\s?yes\s*' + line: '\1load = no' + backrefs: yes + notify: restart charon + loop: + - attr-sql + - dhcp + - dnskey + - eap-aka-3gpp2 + - eap-aka + - eap-md5 + - eap-radius + - eap-sim-file + - eap-sim + - eap-simaka-pseudonym + - eap-simaka-reauth + - eap-tls + - fips-prf + - ha + - sqlite + - stroke + - unity + - xauth-eap + - xauth-generic + + +- name: template init script + template: + src: init.j2 + dest: /etc/init.d/charon + force: yes + mode: 0755 + notify: restart charon + + +- name: deploy ecc384 cert + include_role: + name: certs + vars: + certs: + cert: "/etc/swanctl/x509/{{ strongswan_cert_name }}" + key: "/etc/swanctl/private/{{ strongswan_cert_name }}" + chain: "/etc/swanctl/x509ca/{{ strongswan_cert_name }}" + ecc: yes + post_hook: service charon restart + notify: restart charon + owner: "{{ strongswan_user }}" + group: "{{ strongswan_group }}" + tld: "{{ host_tld }}" + + +- name: add directories to backup plan + include_role: + name: backup + vars: + function: add + backup_items: + - /etc/strongswan.d + - /etc/swanctl + + +- name: flush handlers + meta: flush_handlers + + +- name: install prometheus metrics + include_tasks: prometheus.yml + when: host_metrics | d(false) == true + + +- name: start and enable charon + service: + name: charon + enabled: yes + state: started diff --git a/roles/strongswan/tasks/prometheus.yml b/roles/strongswan/tasks/prometheus.yml new file mode 100644 index 0000000..b584e81 --- /dev/null +++ b/roles/strongswan/tasks/prometheus.yml @@ -0,0 +1,64 @@ +- name: set strongswan exporter config + set_fact: + strongswan_exporter_cfg: "{{ strongswan_exporter_default_config | d({}) | combine(strongswan_exporter_config | d({}), recursive=true) }}" + + +- name: create strongswan exporter directory + file: + path: "{{ strongswan_exporter_dir }}" + state: directory + owner: "{{ strongswan_user }}" + group: "{{ strongswan_group }}" + + +- name: get and extract latest version of strongswan exporter + include_tasks: tasks/get_lastversion.yml + vars: + package: + name: sergeymakinen/ipsec_exporter + location: github + prerelease: yes + assets: yes + asset_filter: '.linux_amd64.tar.gz$' + file: "{{ strongswan_exporter_dir }}/last_version" + extract: "{{ strongswan_exporter_dir }}" + user: "{{ strongswan_user }}" + group: "{{ strongswan_group }}" + notify: restart strongswan exporter + + +- name: ensure strongswan exporter has executable bit set + file: + path: "{{ strongswan_exporter_dir }}/ipsec_exporter" + mode: "+x" + + +- name: template init script + template: + src: init_exporter.j2 + dest: /etc/init.d/strongswan_exporter + force: yes + mode: 0755 + notify: restart strongswan exporter + + +- name: add prometheus metric target + include_role: + name: prometheus + vars: + function: add_target + target: + name: strongswan + scheme: http + port: "{{ strongswan_prometheus_port }}" + + +- name: flush handlers + meta: flush_handlers + + +- name: enable and start strongswan exporter + service: + name: strongswan_exporter + enabled: yes + state: started diff --git a/roles/strongswan/templates/_macros.j2 b/roles/strongswan/templates/_macros.j2 new file mode 100644 index 0000000..47a15cf --- /dev/null +++ b/roles/strongswan/templates/_macros.j2 @@ -0,0 +1,33 @@ +{%- macro config_option_block(options, padding = 0) -%} + {% for option in (options | d({}) | dict2items) -%} + {{- '' if padding == 0 else (' ' * padding) -}} + {% if option.value | type_debug == 'list' -%} + {{ option.key }} = {{ option.value | join(',') }} + {% elif option.value is mapping -%} + {{ option.key ~ ' {\n' ~ config_option_block(option.value, padding + 2) -}} + {{- ('' if padding == 0 else (' ' * padding)) ~ '}\n' -}} + {% elif option.value is boolean -%} + {{ option.key }} = {{ 'yes' if option.value else 'no' }} + {% elif option.value != None -%} + {{ option.key }} = {{ option.value }} + {% endif -%} + {% endfor -%} +{%- endmacro -%} + + +{%- macro config_template(name, cfg, outer_name = None) -%} + {% if outer_name is string -%} + {{ outer_name }} { + {% endif -%} + + {{- config_option_block(cfg[name], 2 if outer_name is string else 0) -}} + + {%- if outer_name is string -%} + } + {% endif %} + +{% endmacro -%} + + + + diff --git a/roles/strongswan/templates/attr.j2 b/roles/strongswan/templates/attr.j2 new file mode 100644 index 0000000..c4f83e9 --- /dev/null +++ b/roles/strongswan/templates/attr.j2 @@ -0,0 +1,3 @@ +{%- from '_macros.j2' import config_template -%} + +{{- config_template('attr', strongswan_cfg, 'attr') -}} diff --git a/roles/strongswan/templates/charon-logging.j2 b/roles/strongswan/templates/charon-logging.j2 new file mode 100644 index 0000000..65bfc97 --- /dev/null +++ b/roles/strongswan/templates/charon-logging.j2 @@ -0,0 +1,3 @@ +{%- from '_macros.j2' import config_template -%} + +{{- config_template('logging', strongswan_cfg, 'charon') -}} diff --git a/roles/strongswan/templates/init.j2 b/roles/strongswan/templates/init.j2 new file mode 100644 index 0000000..21c6486 --- /dev/null +++ b/roles/strongswan/templates/init.j2 @@ -0,0 +1,42 @@ +#!/sbin/openrc-run + +description="strongSwan charon IKE daemon" +command="/usr/lib/strongswan/charon" +pidfile="/var/run/charon.pid" +start_stop_daemon_args="--background" +extra_started_commands="reload fullstatus" + +depend() { + need net + after firewall + provide ipsec +} + +start_pre() { + sysctl -p /etc/sysctl.conf &>/dev/null +} + +start_post() { + ebegin "Loading ${name:-$RC_SVCNAME} configuration" + while [ ! -e $pidfile ]; do + sleep 0.1 + done + swanctl --load-all &>/dev/null + eend $? +} + +{% if strongswan_restore_dns | d(false) == true -%} +stop_post() { + echo "nameserver 1.1.1.1" > /etc/resolv.conf +} +{% endif -%} + +reload() { + swanctl --reload-settings + swanctl --load-all +} + +fullstatus() { + swanctl --list-conns + swanctl --list-sas +} diff --git a/roles/strongswan/templates/init_exporter.j2 b/roles/strongswan/templates/init_exporter.j2 new file mode 100644 index 0000000..bb07824 --- /dev/null +++ b/roles/strongswan/templates/init_exporter.j2 @@ -0,0 +1,19 @@ +#!/sbin/openrc-run + +name="$SVCNAME" +directory="{{ strongswan_exporter_dir }}" +command="{{ strongswan_exporter_dir }}/ipsec_exporter" +command_args="{{ [] | zip_longest(strongswan_exporter_cfg.keys() | list, fillvalue='--') | + map('join') | list | + zip(strongswan_exporter_cfg.values() | list | map('quote')) | + map('join', ' ') | + join(' ') }}" +command_user="{{ strongswan_user }}:{{ strongswan_group }}" +pidfile="/var/run/$SVCNAME.pid" +command_background=true +start_stop_daemon_args="--stdout-logger logger --stderr-logger logger" + +depend() { + need net + use dns +} diff --git a/roles/strongswan/templates/strongswan.j2 b/roles/strongswan/templates/strongswan.j2 new file mode 100644 index 0000000..595e9b5 --- /dev/null +++ b/roles/strongswan/templates/strongswan.j2 @@ -0,0 +1,3 @@ +{%- from '_macros.j2' import config_template -%} + +{{- config_template('strongswan', strongswan_cfg, 'charon') -}} diff --git a/roles/strongswan/templates/swanctl.j2 b/roles/strongswan/templates/swanctl.j2 new file mode 100644 index 0000000..ec90728 --- /dev/null +++ b/roles/strongswan/templates/swanctl.j2 @@ -0,0 +1,7 @@ +{%- from '_macros.j2' import config_template -%} + +{{- config_template('connections', strongswan_cfg, 'connections') }} + +{{- config_template('pools', strongswan_cfg, 'pools') }} + +{{- config_template('secrets', strongswan_cfg, 'secrets') -}} diff --git a/roles/strongswan/vars/dns.yml b/roles/strongswan/vars/dns.yml new file mode 100644 index 0000000..7eb9579 --- /dev/null +++ b/roles/strongswan/vars/dns.yml @@ -0,0 +1,10 @@ +strongswan_default_config: + attr: + load: yes + dns: "{%- if strongswan_dns_ip is defined -%}\ + {{ strongswan_dns_ip }}\ + {%- elif strongswan_dns_hostname is defined -%}\ + {{ hostvars[strongswan_dns_hostname]['ansible_host'] }}\ + {%- elif services.filtering_ns is defined -%}\ + {{ hostvars[services.filtering_ns.hostname]['ansible_host'] }}\ + {%- endif -%}" diff --git a/roles/unbound/defaults/main.yml b/roles/unbound/defaults/main.yml new file mode 100644 index 0000000..3abf588 --- /dev/null +++ b/roles/unbound/defaults/main.yml @@ -0,0 +1,99 @@ +unbound_user: unbound +unbound_group: unbound +unbound_conf_dir: /etc/unbound + +unbound_conf_file: "{{ unbound_conf_dir }}/unbound.conf" +unbound_hints_file: "{{ unbound_conf_dir }}/root.hints" + +unbound_string_parameters: + - username + - private-domain + - domain-insecure + - module-config + +unbound_default_config: + server: + verbosity: 1 + num-threads: 2 + interface: + - "0.0.0.0" + - "::0" + do-ip6: no + outgoing-port-avoid: 0-1024 + incoming-num-tcp: 8 + outgoing-num-tcp: 8 + so-reuseport: yes + edns-tcp-keepalive: yes + edns-tcp-keepalive-timeout: 120000 + + access-control: "0.0.0.0/0 allow" + + chroot: "" + username: "{{ unbound_user }}" + use-syslog: yes + log-tag-queryreply: no + log-servfail: no + log-queries: no + root-hints: "{{ unbound_hints_file }}" + + hide-identity: yes + hide-version: yes + module-config: "validator iterator" + + private-address: + - "10.0.0.0/8" + - "172.16.0.0/12" + - "192.168.0.0/16" + - "169.254.0.0/16" + - "fd00::/8" + - "fe80::/10" + + private-domain: "{{ int_tld }}" + domain-insecure: "{{ int_tld }}" + + trust-anchor-file: "/usr/share/dnssec-root/trusted-key.key" + + unblock-lan-zones: yes + insecure-lan-zones: yes + + local-zone: + - '"localhost." nodefault' + - '"127.in-addr.arpa." nodefault' + - '"1.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.ip6.arpa." nodefault' + - '"onion." nodefault' + - '"test." nodefault' + - '"invalid." nodefault' + - '"10.in-addr.arpa." nodefault' + - '"16.172.in-addr.arpa." nodefault' + - '"17.172.in-addr.arpa." nodefault' + - '"18.172.in-addr.arpa." nodefault' + - '"19.172.in-addr.arpa." nodefault' + - '"20.172.in-addr.arpa." nodefault' + - '"21.172.in-addr.arpa." nodefault' + - '"22.172.in-addr.arpa." nodefault' + - '"23.172.in-addr.arpa." nodefault' + - '"24.172.in-addr.arpa." nodefault' + - '"25.172.in-addr.arpa." nodefault' + - '"26.172.in-addr.arpa." nodefault' + - '"27.172.in-addr.arpa." nodefault' + - '"28.172.in-addr.arpa." nodefault' + - '"29.172.in-addr.arpa." nodefault' + - '"30.172.in-addr.arpa." nodefault' + - '"31.172.in-addr.arpa." nodefault' + - '"168.192.in-addr.arpa." nodefault' + - '"0.in-addr.arpa." nodefault' + - '"254.169.in-addr.arpa." nodefault' + - '"2.0.192.in-addr.arpa." nodefault' + - '"100.51.198.in-addr.arpa." nodefault' + - '"113.0.203.in-addr.arpa." nodefault' + - '"255.255.255.255.in-addr.arpa." nodefault' + - '"0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.ip6.arpa." nodefault' + - '"d.f.ip6.arpa." nodefault' + - '"8.e.f.ip6.arpa." nodefault' + - '"9.e.f.ip6.arpa." nodefault' + - '"a.e.f.ip6.arpa." nodefault' + - '"b.e.f.ip6.arpa." nodefault' + - '"8.b.d.0.1.0.0.2.ip6.arpa." nodefault' + + remote-control: + control-enable: no diff --git a/roles/unbound/handlers/main.yml b/roles/unbound/handlers/main.yml new file mode 100644 index 0000000..0061ece --- /dev/null +++ b/roles/unbound/handlers/main.yml @@ -0,0 +1,4 @@ +- name: restart unbound + service: + name: unbound + state: restarted diff --git a/roles/unbound/tasks/main.yml b/roles/unbound/tasks/main.yml new file mode 100644 index 0000000..b52055b --- /dev/null +++ b/roles/unbound/tasks/main.yml @@ -0,0 +1,99 @@ +- name: import internal ns forwarding snippet + include_vars: + file: internal.yml + hash_behaviour: merge + when: services.internal_ns is defined + + +- name: set unbound_cfg + set_fact: + unbound_cfg: "{{ unbound_default_config | d({}) | combine(unbound_config | d({}), recursive=true) }}" + + +- name: install unbound + include_tasks: tasks/install_packages.yml + vars: + package: + - unbound + - alpine: unbound-openrc + notify: restart unbound + + +- name: create user and group + include_tasks: tasks/create_user.yml + vars: + user: + name: "{{ unbound_user }}" + group: "{{ unbound_group }}" + + +- name: create unbound conf dir + file: + path: "{{ unbound_conf_dir }}" + state: directory + mode: 0755 + owner: "{{ unbound_user }}" + group: "{{ unbound_group }}" + + +- name: template unbound configuration + template: + src: unbound.j2 + dest: "{{ unbound_conf_file }}" + force: yes + lstrip_blocks: yes + mode: 0400 + owner: "{{ unbound_user }}" + group: "{{ unbound_group }}" + notify: restart unbound + + +- name: edit service config + lineinfile: + path: /etc/conf.d/unbound + regexp: "^{{ item.name }}=" + line: "{{ item.name }}=\"{{ item.value }}\"" + notify: restart unbound + loop: + - name: cfgfile + value: "{{ unbound_conf_file }}" + + +- name: download root hint file + get_url: + url: https://www.internic.net/domain/named.cache + dest: "{{ unbound_hints_file }}" + mode: 0644 + owner: "{{ unbound_user }}" + group: "{{ unbound_group }}" + + +- name: create cron job for hint refresh + cron: + name: refresh root hints + minute: "{{ 59 | random(seed=inventory_hostname) }}" + hour: "{{ 5 | random(seed=inventory_hostname) }}" + job: "( wget -q -O {{ unbound_hints_file | quote }} https://www.internic.net/domain/named.cache ; \ + chown {{ unbound_user }}:{{ unbound_group }} {{ unbound_hints_file | quote }} ; \ + chmod 0644 {{ unbound_hints_file | quote }} \ + ) > /dev/null 2>&1" + + +- name: flush handlers + meta: flush_handlers + + +- name: add directories to backup plan + include_role: + name: backup + vars: + function: add + backup_items: + - "{{ unbound_conf_dir }}" + + +- name: enable and start unbound + service: + name: unbound + enabled: yes + state: started diff --git a/roles/unbound/templates/unbound.j2 b/roles/unbound/templates/unbound.j2 new file mode 100644 index 0000000..7049e10 --- /dev/null +++ b/roles/unbound/templates/unbound.j2 @@ -0,0 +1,30 @@ +{%- macro unbound_option(option, padding = 0) -%} + {{- '' if (padding == 0) else (' ' * 4 * padding) -}} + + {% if option.value is boolean -%} + {{ option.key }}: {{ 'yes' if option.value else 'no' }} + {% elif option.value is string and ((option.value == '') or (option.key in unbound_string_parameters)) -%} + {{ option.key }}: "{{ option.value }}" + {% elif option.value | type_debug == 'list' -%} + {%- for suboption in option.value -%} + {%- if loop.first -%} + {{- unbound_option({ 'key': option.key, 'value': suboption}, 0) -}} + {%- else -%} + {{- unbound_option({ 'key': option.key, 'value': suboption}, padding) -}} + {%- endif -%} + {% endfor -%} + {% elif option.value is mapping -%} + {{ option.key }}: + {% for suboption in (option.value | dict2items) -%} + {{ unbound_option(suboption, padding + 1) -}} + {% endfor -%} + {% elif option.value != None -%} + {{ option.key }}: {{ option.value }} + {% endif -%} +{%- endmacro -%} + + + +{% for option in (unbound_cfg | d({}) | dict2items) -%} + {{ unbound_option(option) -}} +{%- endfor -%} diff --git a/roles/unbound/vars/internal.yml b/roles/unbound/vars/internal.yml new file mode 100644 index 0000000..4f11004 --- /dev/null +++ b/roles/unbound/vars/internal.yml @@ -0,0 +1,7 @@ +unbound_default_config: + forward-zone: + name: "{{ int_tld }}" + forward-addr: "{{ hostvars[services.internal_ns.hostname]['ansible_host'] }}" + forward-first: no + forward-tls-upstream: no + forward-no-cache: no diff --git a/roles/uptime-kuma/defaults/main.yml b/roles/uptime-kuma/defaults/main.yml new file mode 100644 index 0000000..169bc57 --- /dev/null +++ b/roles/uptime-kuma/defaults/main.yml @@ -0,0 +1,5 @@ +uptime_kuma_user: ukuma +uptime_kuma_group: ukuma +uptime_kuma_dir: /opt/ukuma + +uptime_kuma_port: 3000 \ No newline at end of file diff --git a/roles/uptime-kuma/handlers/main.yml b/roles/uptime-kuma/handlers/main.yml new file mode 100644 index 0000000..2f47a91 --- /dev/null +++ b/roles/uptime-kuma/handlers/main.yml @@ -0,0 +1,4 @@ +- name: restart uptime-kuma + service: + name: uptime-kuma + state: restarted diff --git a/roles/uptime-kuma/tasks/main.yml b/roles/uptime-kuma/tasks/main.yml new file mode 100644 index 0000000..57618e1 --- /dev/null +++ b/roles/uptime-kuma/tasks/main.yml @@ -0,0 +1,136 @@ +- name: install dependencies + include_tasks: tasks/install_packages.yml + vars: + package: + - nodejs + - npm + - iputils + - sqlite + - sqlite-dev + - python3 + - py3-cryptography + - py3-pip + - py3-six + - py3-yaml + - py3-click + - py3-markdown + - py3-requests + - py3-requests-oauthlib + + +- name: install pip dependencies + pip: + name: apprise==0.9.7 + + +- name: create user and group + include_tasks: tasks/create_user.yml + vars: + user: + name: "{{ uptime_kuma_user }}" + group: "{{ uptime_kuma_group }}" + dir: "{{ uptime_kuma_dir }}" + comment: "uptime-kuma service user" + notify: restart uptime-kuma + + +- name: get and extract latest version of uptime-kuma sources + include_tasks: tasks/get_lastversion.yml + vars: + package: + name: louislam/uptime-kuma + location: github + sources: yes + file: "{{ uptime_kuma_dir }}/last_version" + extract: "{{ uptime_kuma_dir }}" + user: "{{ uptime_kuma_user }}" + group: "{{ uptime_kuma_group }}" + notify: restart uptime-kuma + strip_first_dir: yes + + +- name: get and extract latest version of uptime-kuma assets + include_tasks: tasks/get_lastversion.yml + vars: + package: + name: louislam/uptime-kuma + location: github + assets: yes + asset_filter: '.tar.gz$' + extract: "{{ uptime_kuma_dir }}" + user: "{{ uptime_kuma_user }}" + group: "{{ uptime_kuma_group }}" + notify: restart uptime-kuma + force_download: yes + when: package_changed | d(false) == true + + +- name: ensure server script has executable bit set + file: + path: "{{ uptime_kuma_dir }}/server/server.js" + mode: "+x" + + +- name: install sqlite3 + npm: + path: "{{ uptime_kuma_dir }}" + name: sqlite3 + production: yes + become: yes + become_user: "{{ uptime_kuma_user }}" + become_method: su + become_flags: '-s /bin/ash' + changed_when: no + + +- name: install npm dependencies + npm: + path: "{{ uptime_kuma_dir }}" + no_optional: yes + ignore_scripts: yes + production: yes + become: yes + become_user: "{{ uptime_kuma_user }}" + become_method: su + become_flags: '-s /bin/ash' + notify: restart uptime-kuma + changed_when: no + + +- name: template init script + template: + src: init.j2 + dest: /etc/init.d/uptime-kuma + force: yes + mode: "+x" + notify: restart uptime-kuma + + +- name: install and configure nginx + include_role: + name: nginx + vars: + nginx: + servers: + - conf: nginx_server + certs: "{{ host_tls }}" + + +- name: flush handlers + meta: flush_handlers + + +- name: add directories to backup plan + include_role: + name: backup + vars: + function: add + backup_items: + - "{{ uptime_kuma_dir }}" + + +- name: enable and start uptime-kuma + service: + name: uptime-kuma + enabled: yes + state: started diff --git a/roles/uptime-kuma/templates/init.j2 b/roles/uptime-kuma/templates/init.j2 new file mode 100644 index 0000000..d53c083 --- /dev/null +++ b/roles/uptime-kuma/templates/init.j2 @@ -0,0 +1,16 @@ +#!/sbin/openrc-run + +name="$SVCNAME" +directory="{{ uptime_kuma_dir }}" +command="node {{ uptime_kuma_dir }}/server/server.js" +command_args="--host=127.0.0.1 --port={{ uptime_kuma_port }}" +command_user="{{ uptime_kuma_user }}:{{ uptime_kuma_group }}" +pidfile="/var/run/$SVCNAME.pid" +#command_background=true +#start_stop_daemon_args="--stdout-logger logger --stderr-logger logger" +supervisor="supervise-daemon" + +depend() { + need net + use dns +} diff --git a/roles/uptime-kuma/templates/nginx_server.j2 b/roles/uptime-kuma/templates/nginx_server.j2 new file mode 100644 index 0000000..a131399 --- /dev/null +++ b/roles/uptime-kuma/templates/nginx_server.j2 @@ -0,0 +1,9 @@ +location / { + proxy_pass http://127.0.0.1:{{ uptime_kuma_port }}; + proxy_http_version 1.1; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection "upgrade"; +} diff --git a/roles/vault/defaults/main.yml b/roles/vault/defaults/main.yml new file mode 100644 index 0000000..9090715 --- /dev/null +++ b/roles/vault/defaults/main.yml @@ -0,0 +1,44 @@ +vault_user: vault +vault_group: vault +vault_dir: /opt/vault +vault_extract_dir: /opt/docker +vault_port: 8080 +vault_websocket_port: 3012 +vault_supervised: yes + +vault_default_config: + database_url: "{{ 'postgresql://' ~ database_user ~ ':' ~ database_pass ~ '@' ~ database_host ~ '/' ~ database_name }}" + domain: "{{ host_url }}" + invitation_org_name: "{{ org }}" + admin_token: "{{ vault_admin_token }}" + rocket_port: "{{ vault_port }}" + websocket_port: "{{ vault_websocket_port }}" + + org_attachment_limit: "{{ ((hardware.disk | d(10) | float) * 1024 * 1024 / 30) | int | abs }}" + user_attachment_limit: "{{ ((hardware.disk | d(10) | float) * 1024 * 1024 / 90) | int | abs }}" + + database_max_conns: 4 + websocket_enabled: yes + websocket_address: 127.0.0.1 + sends_allowed: yes + emergency_access_allowed: no + extended_logging: yes + use_syslog: yes + log_level: warn + db_connection_retries: 0 + icon_blacklist_non_global_ips: no + email_attempts_limit: 6 + email_expiration_time: 2400 + signups_allowed: no + signups_verify: yes + signups_verify_resend_time: 120 + signups_verify_resend_limit: 10 + org_creation_users: all + invitations_allowed: yes + incomplete_2fa_time_limit: 5 + trash_auto_delete_days: 90 + password_iterations: 5000 + rocket_address: 127.0.0.1 + rocket_workers: 4 + _enable_yubico: no + _enable_duo: no diff --git a/roles/vault/handlers/main.yml b/roles/vault/handlers/main.yml new file mode 100644 index 0000000..af37ece --- /dev/null +++ b/roles/vault/handlers/main.yml @@ -0,0 +1,4 @@ +- name: restart vaultwarden + service: + name: vaultwarden + state: restarted diff --git a/roles/vault/tasks/main.yml b/roles/vault/tasks/main.yml new file mode 100644 index 0000000..b386669 --- /dev/null +++ b/roles/vault/tasks/main.yml @@ -0,0 +1,173 @@ +- name: import mail vars if mail is enabled + include_vars: + file: mail.yml + when: (host_mail | d(true) == true) and (mail_account is mapping) and + (mail_account.username is defined) and (mail_account.password is defined) + + +- name: set vault_cfg + set_fact: + vault_cfg: "{{ vault_default_config | d({}) | combine(vault_mail_config | d({}), recursive=true) | combine(vault_config | d({}), recursive=true) }}" + + +- name: install curl + include_tasks: tasks/install_packages.yml + vars: + package: + - curl + + +- name: create user and group + include_tasks: tasks/create_user.yml + vars: + user: + name: "{{ vault_user }}" + group: "{{ vault_group }}" + dir: "{{ vault_dir }}" + comment: "vaultwarden service user" + notify: restart vaultwarden + + +- name: create data directory + file: + path: "{{ vault_dir }}/data" + state: directory + mode: 0750 + owner: "{{ vault_user }}" + group: "{{ vault_group }}" + + +- name: ensure extract dir exists + file: + path: "{{ vault_extract_dir }}" + state: directory + + +- name: download docker-image-extract script + get_url: + url: "https://raw.githubusercontent.com/jjlin/docker-image-extract/main/docker-image-extract" + dest: "{{ vault_extract_dir }}" + timeout: 20 + mode: "+x" + + +- name: run docker-image-extract + command: + cmd: "{{ vault_extract_dir }}/docker-image-extract vaultwarden/server:alpine" + chdir: "{{ vault_extract_dir }}" + register: result + changed_when: no + failed_when: result.rc != 0 + + +- name: check if output directory exists + stat: + path: "{{ vault_extract_dir }}/output" + register: result + + +- name: fail if output directory is missing + fail: + msg: output directory is missing + when: not (result.stat.isdir is defined and result.stat.isdir) + + +- name: move vaultwarden to vault dir + copy: + src: "{{ vault_extract_dir ~ '/output/vaultwarden' }}" + dest: "{{ vault_dir ~ '/vaultwarden' }}" + force: yes + remote_src: yes + owner: "{{ vault_user }}" + group: "{{ vault_group }}" + notify: restart vaultwarden + + +- name: remove output directory + file: + path: "{{ vault_extract_dir }}/output" + state: absent + changed_when: no + + +- name: ensure vaultwarden has executable bit set + file: + path: "{{ vault_dir }}/vaultwarden" + mode: "+x" + + +- name: get and extract latest version of web-vault + include_tasks: tasks/get_lastversion.yml + vars: + package: + name: dani-garcia/bw_web_builds + location: github + assets: yes + asset_filter: '.tar.gz$' + file: "{{ vault_dir }}/last_version" + extract: "{{ vault_dir }}" + user: "{{ vault_user }}" + group: "{{ vault_group }}" + notify: restart vaultwarden + + +- name: template .env file + template: + src: env.j2 + dest: "{{ vault_dir }}/.env" + force: yes + mode: 0400 + owner: "{{ vault_user }}" + group: "{{ vault_group }}" + lstrip_blocks: yes + notify: restart vaultwarden + + +- name: template init script + template: + src: init.j2 + dest: /etc/init.d/vaultwarden + force: yes + mode: "+x" + notify: restart vaultwarden + + +- name: ensure correct ownership in vault dir + file: + path: "{{ vault_dir }}" + state: directory + follow: no + recurse: yes + owner: "{{ vault_user }}" + group: "{{ vault_group }}" + notify: restart vaultwarden + + +- name: install and configure nginx + include_role: + name: nginx + vars: + nginx: + servers: + - conf: nginx_server + certs: "{{ host_tls }}" + + +- name: flush handlers + meta: flush_handlers + + +- name: add directories to backup plan + include_role: + name: backup + vars: + function: add + backup_items: + - "{{ vault_dir }}" + + +- name: enable and start vaultwarden + service: + name: vaultwarden + enabled: yes + state: started diff --git a/roles/vault/templates/env.j2 b/roles/vault/templates/env.j2 new file mode 100644 index 0000000..acba05c --- /dev/null +++ b/roles/vault/templates/env.j2 @@ -0,0 +1,11 @@ +{% for option in (vault_cfg | d({}) | dict2items) -%} + {% if option.value != None -%} + {% if option.value is boolean -%} + {{- option.key | upper }}={{ 'true' if option.value else 'false' }} + {% elif option.value is string -%} + {{- option.key | upper }}='{{ option.value }}' + {% else -%} + {{- option.key | upper }}={{ option.value }} + {% endif -%} + {% endif -%} +{% endfor -%} diff --git a/roles/vault/templates/init.j2 b/roles/vault/templates/init.j2 new file mode 100644 index 0000000..08eabeb --- /dev/null +++ b/roles/vault/templates/init.j2 @@ -0,0 +1,19 @@ +#!/sbin/openrc-run + +name="$SVCNAME" +directory="{{ vault_dir }}" +command="{{ vault_dir }}/vaultwarden" +command_user="{{ vault_user ~ ':' ~ vault_group }}" +pidfile="/var/run/$SVCNAME.pid" +{% if vault_supervised | d(false) == true -%} +supervisor="supervise-daemon" +{% else -%} +command_background=true +{%- endif %} + +depend() { + need net + use dns + before nginx + after postgresql mariadb +} diff --git a/roles/vault/templates/nginx_server.j2 b/roles/vault/templates/nginx_server.j2 new file mode 100644 index 0000000..cc8e4b4 --- /dev/null +++ b/roles/vault/templates/nginx_server.j2 @@ -0,0 +1,32 @@ +proxy_http_version 1.1; +proxy_set_header Connection ""; +proxy_set_header Host $host; +proxy_set_header X-Real-IP $remote_addr; +proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; +proxy_set_header X-Forwarded-Proto $scheme; + +location / { + proxy_pass http://127.0.0.1:{{ vault_port }}; +} + +location /notifications/hub/negotiate { + proxy_pass http://127.0.0.1:{{ vault_port }}; +} + +location /notifications/hub { + proxy_pass http://127.0.0.1:{{ vault_websocket_port }}; + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection "upgrade"; + proxy_set_header Forwarded $remote_addr; +} + + +{% if admin_net is defined -%} +location /admin { + {% for network in admin_net -%} + allow {{ network }}; + {% endfor %} + + proxy_pass http://127.0.0.1:{{ vault_port }}; +} +{%- endif %} diff --git a/roles/vault/vars/mail.yml b/roles/vault/vars/mail.yml new file mode 100644 index 0000000..ba01440 --- /dev/null +++ b/roles/vault/vars/mail.yml @@ -0,0 +1,9 @@ +vault_mail_config: + smtp_host: "{{ mail_account.server | d(mail_server.mta_actual_hostname ~ '.' ~ int_tld) }}" + smtp_from: "{{ mail_account.from | d(mail_account.username ~ '@' ~ (mail_account.domain | d(mail_server.tld))) }}" + smtp_from_name: "{{ 'Vaultwarden | ' ~ org }}" + smtp_security: force_tls + smtp_port: 465 + smtp_username: "{{ mail_account.username }}" + smtp_password: "{{ mail_account.password }}" + helo_name: "{{ host_fqdn }}" diff --git a/roles/web/defaults/main.yml b/roles/web/defaults/main.yml new file mode 100644 index 0000000..c23eb48 --- /dev/null +++ b/roles/web/defaults/main.yml @@ -0,0 +1 @@ +webroot_dir: /opt/web diff --git a/roles/web/files/logo.svg b/roles/web/files/logo.svg new file mode 100644 index 0000000..6ee1d41 --- /dev/null +++ b/roles/web/files/logo.svg @@ -0,0 +1,8 @@ + + + Monroe LLC + Monroe LLC + + + + diff --git a/roles/web/tasks/main.yml b/roles/web/tasks/main.yml new file mode 100644 index 0000000..5c5d73e --- /dev/null +++ b/roles/web/tasks/main.yml @@ -0,0 +1,97 @@ +- name: install and configure nginx + include_role: + name: nginx + vars: + nginx: + security: + csp: + pp: + headers: + Cross-Origin-Embedder-Policy: + Cross-Origin-Opener-Policy: + Cross-Origin-Resource-Policy: + + servers: + - conf: redirect_80_any + name: '*' + http: yes + + - conf: redirect_80_monroe43 + name: '*.monroe43.ru' + no_tld: yes + http: yes + + - conf: redirect_443_monroe43 + name: 'monroe43.ru' + no_tld: yes + - conf: redirect_443_monroe43 + name: 'www.monroe43.ru' + no_tld: yes + + - conf: shop + name: shop + - conf: feedback + name: feedback + - conf: welcome-spb + name: welcome-spb + - conf: spb-10-8 + name: spb-10-8 + - conf: mta-sts + name: mta-sts + + - conf: default + is_root: yes + cn: yes + - conf: default + name: www + + certs: yes + tld: "{{ tld }}" + security_headers: yes + + domains: + - "{{ tld }}" + - "www.{{ tld }}" + - "monroe43.ru" + - "www.monroe43.ru" + - "feedback.{{ tld }}" + - "mta-sts.{{ tld }}" + - "shop.{{ tld }}" + - "spb-10-8.{{ tld }}" + - "welcome-spb.{{ tld }}" + + +- name: create directories + file: + path: "{{ webroot_dir ~ '/' ~ item }}" + state: directory + loop: + - acme + - acme/.well-known + - acme/.well-known/acme-challenge + - static + - mta-sts + - mta-sts/.well-known + + +- name: build mta-sts file + template: + src: mta-sts-file.j2 + dest: "{{ webroot_dir }}/mta-sts/.well-known/mta-sts.txt" + force: yes + lstrip_blocks: yes + + +- name: upload static bimi logo + copy: + src: logo.svg + dest: "{{ webroot_dir }}/static/logo.svg" + + +- name: add directories to backup plan + include_role: + name: backup + vars: + function: add + backup_items: + - "{{ webroot_dir }}" diff --git a/roles/web/templates/default.j2 b/roles/web/templates/default.j2 new file mode 100644 index 0000000..5cdffd0 --- /dev/null +++ b/roles/web/templates/default.j2 @@ -0,0 +1,13 @@ +location / { + root {{ (webroot_dir ~ '/static') | quote }}; + try_files $uri @legacy_site; +} + +location /le-game { + return 301 https://vk.com/app5898182_-200204055#s=1306418; +} + +location @legacy_site { + proxy_pass http://85.119.149.96/$request_uri; + proxy_set_header Host $host; +} diff --git a/roles/web/templates/feedback.j2 b/roles/web/templates/feedback.j2 new file mode 100644 index 0000000..de2b1b7 --- /dev/null +++ b/roles/web/templates/feedback.j2 @@ -0,0 +1,15 @@ +location / { + return 301 https://docs.google.com/forms/d/e/1FAIpQLSfKawJbfRj4WBT18q40u6qSPGYvYlWOqhQQrb97ynqlM4vKsQ/viewform; +} +location /center { + return 301 https://docs.google.com/forms/d/e/1FAIpQLSfKawJbfRj4WBT18q40u6qSPGYvYlWOqhQQrb97ynqlM4vKsQ/viewform; +} +location /fileyka { + return 301 https://docs.google.com/forms/d/e/1FAIpQLSce8l_dDPHTCVudxBgpcIGELZrc1P-GT7sm_8FmSNKZf14I9Q/viewform; +} +location /rassvet { + return 301 https://docs.google.com/forms/d/e/1FAIpQLSdCHgZb_dDcRJwUv63KrcRO0YuLap3LCIy3uKiPkjS1JTuRdA/viewform; +} +location /spb { + return 301 https://docs.google.com/forms/d/e/1FAIpQLSdVoZ-8EQf1BlzaFJms28rFLFDoO7j28SIo6GVPauhYAIMoEA/viewform; +} diff --git a/roles/web/templates/mta-sts-file.j2 b/roles/web/templates/mta-sts-file.j2 new file mode 100644 index 0000000..e38d09c --- /dev/null +++ b/roles/web/templates/mta-sts-file.j2 @@ -0,0 +1,4 @@ +version: STSv1 +mode: enforce +max_age: 2419200 +mx: {{ mail_server.mta_actual_hostname }}.{{ mail_server.tld }} diff --git a/roles/web/templates/mta-sts.j2 b/roles/web/templates/mta-sts.j2 new file mode 100644 index 0000000..9bfa62a --- /dev/null +++ b/roles/web/templates/mta-sts.j2 @@ -0,0 +1,8 @@ +location / { + return 404; +} + +location /.well-known { + root {{ (webroot_dir ~ '/mta-sts') | quote }}; + try_files $uri =404; +} diff --git a/roles/web/templates/redirect_443_monroe43.j2 b/roles/web/templates/redirect_443_monroe43.j2 new file mode 100644 index 0000000..20e2f60 --- /dev/null +++ b/roles/web/templates/redirect_443_monroe43.j2 @@ -0,0 +1,3 @@ +location / { + return 301 https://monroe.fitness; +} diff --git a/roles/web/templates/redirect_80_any.j2 b/roles/web/templates/redirect_80_any.j2 new file mode 100644 index 0000000..46e77fe --- /dev/null +++ b/roles/web/templates/redirect_80_any.j2 @@ -0,0 +1,7 @@ +location / { + return 301 https://$host$request_uri; +} + +location /.well-known/acme-challenge/ { + alias /www/acme/.well-known/acme-challenge/; +} diff --git a/roles/web/templates/redirect_80_monroe43.j2 b/roles/web/templates/redirect_80_monroe43.j2 new file mode 100644 index 0000000..f39a511 --- /dev/null +++ b/roles/web/templates/redirect_80_monroe43.j2 @@ -0,0 +1,7 @@ +location / { + return 301 https://monroe.fitness$request_uri; +} + +location /.well-known/acme-challenge/ { + alias /www/acme/.well-known/acme-challenge/; +} diff --git a/roles/web/templates/shop.j2 b/roles/web/templates/shop.j2 new file mode 100644 index 0000000..f237ff4 --- /dev/null +++ b/roles/web/templates/shop.j2 @@ -0,0 +1,9 @@ +location / { + proxy_pass https://{{ hostvars[services.shop.hostname]['ansible_host'] }}$request_uri; + proxy_set_header Host $host; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Real-IP $remote_addr; + proxy_connect_timeout 45s; + proxy_read_timeout 45s; + proxy_http_version 1.1; +} diff --git a/roles/web/templates/spb-10-8.j2 b/roles/web/templates/spb-10-8.j2 new file mode 100644 index 0000000..1f82026 --- /dev/null +++ b/roles/web/templates/spb-10-8.j2 @@ -0,0 +1,3 @@ +location / { + return 301 https://api.whatsapp.com/send?phone=79319556480&text=%D0%97%D0%B4%D1%80%D0%B0%D0%B2%D1%81%D1%82%D0%B2%D1%83%D0%B9%D1%82%D0%B5%2C%20%D1%85%D0%BE%D1%87%D1%83%20%D0%B0%D0%B1%D0%BE%D0%BD%D0%B5%D0%BC%D0%B5%D0%BD%D1%82%20%D0%BF%D0%BE%20%D0%B0%D0%BA%D1%86%D0%B8%D0%B8%21; +} diff --git a/roles/web/templates/welcome-spb.j2 b/roles/web/templates/welcome-spb.j2 new file mode 100644 index 0000000..32d8552 --- /dev/null +++ b/roles/web/templates/welcome-spb.j2 @@ -0,0 +1,7 @@ +location / { + return 301 https://api.whatsapp.com/send?phone=79319556480&text=%D0%97%D0%B4%D1%80%D0%B0%D0%B2%D1%81%D1%82%D0%B2%D1%83%D0%B9%D1%82%D0%B5%2C%20%D0%B7%D0%B0%D0%BF%D0%B8%D1%88%D0%B8%D1%82%D0%B5%20%D0%BC%D0%B5%D0%BD%D1%8F%20%D0%BD%D0%B0%20%D0%BF%D1%80%D0%BE%D0%B1%D0%BD%D1%83%D1%8E%20%D1%82%D1%80%D0%B5%D0%BD%D0%B8%D1%80%D0%BE%D0%B2%D0%BA%D1%83%21; +} + +location /new { + return 301 https://api.whatsapp.com/send?phone=79112914440&text=%D0%97%D0%B4%D1%80%D0%B0%D0%B2%D1%81%D1%82%D0%B2%D1%83%D0%B9%D1%82%D0%B5%2C%20%D0%B7%D0%B0%D0%BF%D0%B8%D1%88%D0%B8%D1%82%D0%B5%20%D0%BC%D0%B5%D0%BD%D1%8F%20%D0%BD%D0%B0%20%D0%BF%D1%80%D0%BE%D0%B1%D0%BD%D1%83%D1%8E%20%D1%82%D1%80%D0%B5%D0%BD%D0%B8%D1%80%D0%BE%D0%B2%D0%BA%D1%83%21; +} diff --git a/roles/wikijs/defaults/main.yml b/roles/wikijs/defaults/main.yml new file mode 100644 index 0000000..2820c31 --- /dev/null +++ b/roles/wikijs/defaults/main.yml @@ -0,0 +1,25 @@ +wiki_user: wikijs +wiki_group: wikijs +wiki_dir: /opt/wikijs + + +wiki_default_config: + bindIP: 0.0.0.0 + port: 3000 + + db: + type: postgres + host: "{{ database_host }}" + port: 5432 + user: "{{ database_user }}" + pass: "{{ database_pass }}" + db: "{{ database_name }}" + ssl: no + + ssl: + enabled: no + + logLevel: info + offline: no + ha: no + dataPath: ./data diff --git a/roles/wikijs/handlers/main.yml b/roles/wikijs/handlers/main.yml new file mode 100644 index 0000000..9689a93 --- /dev/null +++ b/roles/wikijs/handlers/main.yml @@ -0,0 +1,4 @@ +- name: restart wikijs + service: + name: wikijs + state: restarted \ No newline at end of file diff --git a/roles/wikijs/tasks/main.yml b/roles/wikijs/tasks/main.yml new file mode 100644 index 0000000..191951b --- /dev/null +++ b/roles/wikijs/tasks/main.yml @@ -0,0 +1,135 @@ +- name: set wiki_cfg + set_fact: + wiki_cfg: "{{ wiki_default_config | d({}) | combine(wiki_config | d({}), recursive=true) }}" + + +- name: install nodejs + include_tasks: tasks/install_packages.yml + vars: + package: + - nodejs + + +- name: create user and group + include_tasks: tasks/create_user.yml + vars: + user: + name: "{{ wiki_user }}" + group: "{{ wiki_group }}" + dir: "{{ wiki_dir }}" + notify: restart wikijs + + +- name: create wiki dir structure + file: + path: "{{ item }}" + state: directory + owner: "{{ wiki_user }}" + group: "{{ wiki_group }}" + loop: + - "{{ wiki_dir }}" + + +- name: get latest asset of wikijs + include_tasks: get_lastversion.yml + vars: + package: + name: Requarks/wiki + location: github + assets: true + asset_filter: 'wiki-js.tar.gz$' + file: "{{ wiki_dir }}/last_version" + extract: "{{ wiki_dir }}" + user: "{{ wiki_user }}" + group: "{{ wiki_group }}" + notify: restart wikijs + + +- name: template config + template: + src: config.j2 + dest: "{{ wiki_dir }}/config.yml" + force: yes + mode: 0400 + owner: "{{ wiki_user }}" + group: "{{ wiki_group }}" + notify: restart wikijs + + +- name: template init script + template: + src: init.j2 + dest: /etc/init.d/wikijs + force: yes + mode: "+x" + notify: restart wikijs + + +- block: + - name: change ico favicon + copy: + src: favicon_wiki.ico + dest: "{{ wiki_dir }}/assets/favicon.ico" + mode: 0444 + owner: "{{ wiki_user }}" + group: "{{ wiki_group }}" + + + - name: change png favicons + copy: + src: favicon_wiki.png + dest: "{{ wiki_dir }}/assets/favicons/{{ item }}" + mode: 0444 + owner: "{{ wiki_user }}" + group: "{{ wiki_group }}" + loop: + - android-chrome-192x192.png + - android-chrome-256x256.png + - apple-touch-icon.png + - favicon-16x16.png + - favicon-32x32.png + - mstile-150x150.png + + when: wiki_custom_icons | d(false) == true + + +- name: enable trgm extension + include_role: + name: postgres + apply: + delegate_to: "{{ database_hostname }}" + vars: + function: run_query + query: + database: "{{ database_name }}" + text: "CREATE EXTENSION IF NOT EXISTS pg_trgm;" + + +- name: install and configure nginx + include_role: + name: nginx + vars: + nginx: + servers: + - conf: nginx_server + certs: "{{ host_tls }}" + + +- name: flush handlers + meta: flush_handlers + + +- name: add directories to backup plan + include_role: + name: backup + vars: + function: add + backup_items: + - "{{ wiki_dir }}" + + +- name: enable and start wikijs + service: + name: wikijs + enabled: yes + state: started diff --git a/roles/wikijs/templates/config.j2 b/roles/wikijs/templates/config.j2 new file mode 100644 index 0000000..24e52dd --- /dev/null +++ b/roles/wikijs/templates/config.j2 @@ -0,0 +1 @@ +{{ wiki_cfg | to_nice_yaml(indent=2, width=512) }} diff --git a/roles/wikijs/templates/init.j2 b/roles/wikijs/templates/init.j2 new file mode 100644 index 0000000..93c2663 --- /dev/null +++ b/roles/wikijs/templates/init.j2 @@ -0,0 +1,13 @@ +#!/sbin/openrc-run + +name="$SVCNAME" +directory="{{ wiki_dir }}" +command="node {{ wiki_dir }}/server" +command_user="{{ wiki_user }}:{{ wiki_group }}" +pidfile="/var/run/$SVCNAME.pid" +command_background=true + +depend() { + need net + use dns +} diff --git a/roles/wikijs/templates/nginx_server.j2 b/roles/wikijs/templates/nginx_server.j2 new file mode 100644 index 0000000..f1a6e68 --- /dev/null +++ b/roles/wikijs/templates/nginx_server.j2 @@ -0,0 +1,11 @@ +location / { + proxy_pass http://127.0.0.1:{{ wiki_cfg.port }}; + + proxy_http_version 1.1; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection "upgrade"; +} diff --git a/roles/workstation/tasks/get_all_hkus.yml b/roles/workstation/tasks/get_all_hkus.yml new file mode 100644 index 0000000..5be3f2e --- /dev/null +++ b/roles/workstation/tasks/get_all_hkus.yml @@ -0,0 +1,20 @@ +- name: get all HKUs + win_reg_stat: + path: HKU:\ + register: all_hkus + failed_when: not all_hkus.exists or ((all_hkus.sub_keys | default([])) | length == 0) + + +- name: filter HKUs that are valid and correspond to actual users + set_fact: + good_hkus: "{{ all_hkus.sub_keys | select('match', '^S-1-5-21-\\d+-\\d+-\\d+-\\d+$') | list }}" + failed_when: (good_hkus | default([]) | length) == 0 + + +- debug: + msg: "got HKUs: {{ good_hkus | join(', ') }}" + + +- name: combine HKUs with root HKU to form a full registry path + set_fact: + hkus: "{{ good_hkus | map('regex_replace', '^(.*)$', 'HKU:\\\\\\1') | list }}" \ No newline at end of file diff --git a/roles/workstation/tasks/main.yml b/roles/workstation/tasks/main.yml new file mode 100644 index 0000000..09b0d32 --- /dev/null +++ b/roles/workstation/tasks/main.yml @@ -0,0 +1,40 @@ +- block: + - name: fail if host does not support winrm + fail: + msg: winrm support is missing + when: ansible_connection != 'winrm' + + + - name: redefine ansible_host + set_fact: + ansible_host: "{{ host_fqdn }}" + when: ansible_host is ansible.utils.ip_address + + + - name: prepare connection + include_tasks: prepare.yml + + + - name: gather facts + setup: + gather_facts: + - min + + + - include_tasks: win_icons.yml + - include_tasks: win_ntp.yml + + - include_tasks: win_firewall.yml + + - include_tasks: win_description.yml + - include_tasks: win_timezone.yml + - include_tasks: win_hide_account.yml + - include_tasks: win_hosts.yml + - include_tasks: win_tweaks.yml + - include_tasks: win_chocolatey.yml + - include_tasks: win_backup.yml + - include_tasks: win_install_libreoffice.yml + - include_tasks: win_install_lmserver.yml + - include_tasks: win_install_seadrive.yml + + when: workstation_unmanaged | d(false) == false \ No newline at end of file diff --git a/roles/workstation/tasks/prepare.yml b/roles/workstation/tasks/prepare.yml new file mode 100644 index 0000000..46b61da --- /dev/null +++ b/roles/workstation/tasks/prepare.yml @@ -0,0 +1,56 @@ +- block: + - wait_for_connection: + timeout: 10 + + rescue: + - name: set bootstrap password if connection fails + set_fact: + winrm_old_password: "{{ ansible_password }}" + ansible_password: "{{ winrm_bootstrap_password }}" + + +- name: gather facts + setup: + gather_facts: + - min + + +- name: fail if Windows version is not 10 + fail: + msg: "only Windows 10 is supported" + when: (ansible_os_family != 'Windows') or (ansible_distribution_major_version|int < 10) + + +- name: setup service account for remote control + win_user: + name: "{{ winrm_remote_user }}" + account_disabled: no + account_locked: no + password: "{{ winrm_old_password | d(ansible_password) }}" + password_expired: no + password_never_expires: yes + groups: + - S-1-5-32-544 + - S-1-5-32-580 + groups_action: add + + +- name: set correct password if it was changed earlier + set_fact: + ansible_password: "{{ winrm_old_password }}" + when: winrm_old_password is defined + + +- name: ensure LocalAccountTokenFilterPolicy is set to 1 + win_regedit: + path: HKLM:\SOFTWARE\Microsoft\Windows\CurrentVersion\Policies\System + name: LocalAccountTokenFilterPolicy + data: 1 + type: dword + + +- name: setup winrm service + win_service: + name: WinRM + start_mode: auto + state: started diff --git a/roles/workstation/tasks/win_backup.yml b/roles/workstation/tasks/win_backup.yml new file mode 100644 index 0000000..34eb46d --- /dev/null +++ b/roles/workstation/tasks/win_backup.yml @@ -0,0 +1,36 @@ +- name: setup backups + include_role: + name: backup + vars: + function: setup + backup_cfg: + password: "{{ backup_password }}" + tags: automated + max_size: "{{ backup_max_size | d('100M') }}" + dirs: "{{ backup_dirs | d(['C:\\Users']) }}" + filter: + - "$$RECYCLE.BIN" + - ".*/" + - "SeaDrive" + - "SeaFile" + - "seadrive_root" + - "Application Data" + - "AppData" + - "Local Settings" + - "System Volume Information" + - "*.exe" + - "*.rar" + - "*.zip" + - "*.tmp" + - "*.log" + - "*.dll" + - "*.dat" + - "*.sys" + - "*.log1" + - "Google/Chrome/*cache*" + - "Mozilla/Firefox/*cache*" + - "node_modules" + fs_snapshot: yes + interval: weekly + days_of_week: "{{ ['tuesday', 'thursday', 'saturday'] if ((2 | random(seed=host_fqdn)) == 1) else ['monday', 'wednesday', 'friday'] }}" + random_delay: PT10M diff --git a/roles/workstation/tasks/win_chocolatey.yml b/roles/workstation/tasks/win_chocolatey.yml new file mode 100644 index 0000000..fa9d22b --- /dev/null +++ b/roles/workstation/tasks/win_chocolatey.yml @@ -0,0 +1,43 @@ +- name: ensure chocolatey is installed + chocolatey.chocolatey.win_chocolatey: + name: chocolatey + state: latest + source: https://community.chocolatey.org/api/v2/ + ignore_errors: yes + + +- name: set chocolatey config + chocolatey.chocolatey.win_chocolatey_config: + name: "{{ item.name }}" + value: "{{ item.value }}" + loop: + - name: cacheLocation + value: "{{ ansible_env.ALLUSERSPROFILE }}\\choco-cache" + - name: commandExecutionTimeoutSeconds + value: 14400 + + +- name: set chocolatey features + chocolatey.chocolatey.win_chocolatey_feature: + name: "{{ item.name }}" + state: "{{ item.state }}" + loop: + - name: allowGlobalConfirmation + state: enabled + - name: allowEmptyChecksumsSecure + state: disabled + + +- name: remove default source + chocolatey.chocolatey.win_chocolatey_source: + name: chocolatey + state: absent + + +- name: add custom source + chocolatey.chocolatey.win_chocolatey_source: + name: internal + priority: 1 + source: '\\{{ services.smb.hostname }}.{{ services.smb.tld | d(int_tld) }}\soft' + state: present + when: services.smb is mapping \ No newline at end of file diff --git a/roles/workstation/tasks/win_description.yml b/roles/workstation/tasks/win_description.yml new file mode 100644 index 0000000..f60e6da --- /dev/null +++ b/roles/workstation/tasks/win_description.yml @@ -0,0 +1,5 @@ +- name: set windows description + community.windows.win_computer_description: + description: "" + owner: "" + organization: "{{ org_localized }}" diff --git a/roles/workstation/tasks/win_firewall.yml b/roles/workstation/tasks/win_firewall.yml new file mode 100644 index 0000000..b7eb2a3 --- /dev/null +++ b/roles/workstation/tasks/win_firewall.yml @@ -0,0 +1,29 @@ +- name: allow ICMP for local network + win_firewall_rule: + name: ICMP - Local network + description: "Allow ICMP packets from corp network" + enabled: yes + action: allow + direction: in + profiles: + - domain + - private + protocol: icmpv4 + remoteip: "{{ int_net }}" + state: present + + +- name: allow SMB for local network + win_firewall_rule: + name: SMB - Local network + description: "Allow SMB packets from corp network" + enabled: yes + action: allow + direction: in + profiles: + - domain + - private + protocol: tcp + remoteip: "{{ int_net }}" + remoteport: 445 + state: present diff --git a/roles/workstation/tasks/win_font.yml b/roles/workstation/tasks/win_font.yml new file mode 100644 index 0000000..f7ac0f1 --- /dev/null +++ b/roles/workstation/tasks/win_font.yml @@ -0,0 +1,13 @@ +- name: download font to remote host + win_copy: + src: "{{ item.file }}" + dest: '{{ ansible_env.SystemRoot }}\Fonts\{{ item.file }}' + force: yes + + +- name: add font to registry + win_regedit: + path: HKLM:\SOFTWARE\Microsoft\Windows NT\CurrentVersion\Fonts + name: "{{ item.name }}" + type: string + data: "{{ item.file }}" diff --git a/roles/workstation/tasks/win_fonts.yml b/roles/workstation/tasks/win_fonts.yml new file mode 100644 index 0000000..b9fb40d --- /dev/null +++ b/roles/workstation/tasks/win_fonts.yml @@ -0,0 +1,39 @@ +- set_fact: + ws_fonts: "{{ ws_fonts | default([]) + + [ {'name': 'Exo 2 ' ~ item ~ ' (TrueType)', 'file': 'Exo2-' ~ (item | replace(' ', '')) ~ '.ttf' } ] + + [ {'name': 'Exo 2 ' ~ item ~ ' Italic (TrueType)', 'file': 'Exo2-' ~ (item | replace(' ', '')) ~ 'Italic.ttf' } ] }}" + loop: + - Black + - Bold + - Extra Bold + - Extra Light + - Light + - Medium + - Semi Bold + - Thin + + +- include_tasks: win_font.yml + loop: + - name: Exo 2 Regular (TrueType) + file: Exo2-Regular.ttf + - name: Exo 2 Italic (TrueType) + file: Exo2-Italic.ttf + + +- include_tasks: win_font.yml + loop: "{{ ws_fonts }}" + + +- include_tasks: win_font.yml + loop: + - name: Elektra Text Pro (TrueType) + file: ElektraText.otf + - name: Elektra Text Pro Bold (TrueType) + file: ElektraTextBold.otf + - name: Elektra Text Pro Bold Italic (TrueType) + file: ElektraTextBoldItalic.otf + - name: Elektra Text Pro Italic (TrueType) + file: ElektraTextItalic.otf + - name: Elektra Light Pro (TrueType) + file: ElektraTextLight.otf diff --git a/roles/workstation/tasks/win_hide_account.yml b/roles/workstation/tasks/win_hide_account.yml new file mode 100644 index 0000000..3176f0e --- /dev/null +++ b/roles/workstation/tasks/win_hide_account.yml @@ -0,0 +1,7 @@ +- name: hide service account + win_regedit: + path: HKLM:\SOFTWARE\Microsoft\Windows NT\CurrentVersion\Winlogon\SpecialAccounts\UserList + name: "{{ winrm_remote_user }}" + type: dword + value: 1 + state: present diff --git a/roles/workstation/tasks/win_hosts.yml b/roles/workstation/tasks/win_hosts.yml new file mode 100644 index 0000000..8df3627 --- /dev/null +++ b/roles/workstation/tasks/win_hosts.yml @@ -0,0 +1,5 @@ +- name: add hosts entry for sv-1c + win_hosts: + canonical_name: MonServer + ip_address: "{{ hostvars['sv-1c']['ansible_host'] }}" + when: hostvars['sv-1c'] is defined diff --git a/roles/workstation/tasks/win_icon.yml b/roles/workstation/tasks/win_icon.yml new file mode 100644 index 0000000..6c05ab1 --- /dev/null +++ b/roles/workstation/tasks/win_icon.yml @@ -0,0 +1,14 @@ +- name: download icon to remote host + win_copy: + src: "{{ item.icon }}" + dest: '{{ ansible_env.SystemRoot }}\{{ item.icon }}' + force: yes + +- name: create shortcut + win_shortcut: + description: "{{ item.description | d(omit) }}" + dest: '{{ ansible_env.PUBLIC }}\Desktop\{{ item.name }}.lnk' + icon: '{{ ansible_env.SystemRoot }}\{{ item.icon }}' + src: "{{ item.dest }}" + state: present + changed_when: no diff --git a/roles/workstation/tasks/win_icons.yml b/roles/workstation/tasks/win_icons.yml new file mode 100644 index 0000000..734d980 --- /dev/null +++ b/roles/workstation/tasks/win_icons.yml @@ -0,0 +1,26 @@ +- include_tasks: win_icon.yml + loop: + - name: "База знаний {{ org_localized }}" + icon: custom_icon_wiki.ico + description: "Открыть базу знаний (вики) {{ org_localized }}" + dest: "https://wiki.{{ int_tld }}" + + - name: "Менеджер паролей" + icon: custom_icon_vault.ico + description: "Открыть менеджер паролей {{ org_localized }}" + dest: "https://vault.{{ int_tld }}" + + - name: "Звонки {{ org_localized }}" + icon: custom_icon_cdr.ico + description: "Открыть список звонков {{ org_localized }}" + dest: "https://cdr.{{ int_tld }}" + + - name: "Почта {{ org_localized }}" + icon: custom_icon_mail.ico + description: "Открыть корпоративную почту {{ org_localized }}" + dest: "https://mail.{{ int_tld }}" + + - name: "Сервисы {{ org_localized }}" + icon: custom_icon_uptime.ico + description: "Открыть статистику сервисов {{ org_localized }}" + dest: "https://uptime.{{ int_tld }}" diff --git a/roles/workstation/tasks/win_install_libreoffice.yml b/roles/workstation/tasks/win_install_libreoffice.yml new file mode 100644 index 0000000..6376ac1 --- /dev/null +++ b/roles/workstation/tasks/win_install_libreoffice.yml @@ -0,0 +1,35 @@ +- name: install LibreOffice + chocolatey.chocolatey.win_chocolatey: + name: libreoffice + state: latest + + +- name: define LibreOffice paths + set_fact: + libreoffice_reg_paths: + - org.openoffice.Office.Common\Save\Document\WarnAlienFormat + - org.openoffice.Office.Common\Misc\ShowTipOfTheDay + - org.openoffice.Office.Common\Misc\FirstRun + - org.openoffice.Office.Common\Misc\CrashReport + - org.openoffice.Office.Common\Misc\CollectUsageInformation + - org.openoffice.Office.UI\Infobar\GetInvolved + - org.openoffice.Office.UI\Infobar\Donate + - org.openoffice.Office.UI\Infobar\WhatsNew + + +- name: create LibreOffice policies + win_regedit: + path: "HKLM:\\SOFTWARE\\Policies\\LibreOffice\\{{ item }}" + name: "Value" + type: string + data: "false" + loop: "{{ libreoffice_reg_paths }}" + + +- name: finalize LibreOffice policies + win_regedit: + path: "HKLM:\\SOFTWARE\\Policies\\LibreOffice\\{{ item }}" + name: "Final" + type: dword + data: 1 + loop: "{{ libreoffice_reg_paths }}" diff --git a/roles/workstation/tasks/win_install_lmserver.yml b/roles/workstation/tasks/win_install_lmserver.yml new file mode 100644 index 0000000..fe659dc --- /dev/null +++ b/roles/workstation/tasks/win_install_lmserver.yml @@ -0,0 +1,4 @@ +- name: install LMServer + chocolatey.chocolatey.win_chocolatey: + name: lmserver + state: latest diff --git a/roles/workstation/tasks/win_install_seadrive.yml b/roles/workstation/tasks/win_install_seadrive.yml new file mode 100644 index 0000000..fba9bf4 --- /dev/null +++ b/roles/workstation/tasks/win_install_seadrive.yml @@ -0,0 +1,213 @@ +- name: install SeaDrive + chocolatey.chocolatey.win_chocolatey: + name: seadrive + state: latest + register: install_result + + + +# determine installation location + +- name: search for SeaDrive installation folder in Program Files + win_stat: + path: "{{ ansible_env.ProgramFiles }}\\SeaDrive\\bin" + get_checksum: no + register: seadrive_stat + +- name: fail if SeaDrive is missing + fail: + msg: "SeaDrive is missing from this system" + when: not seadrive_stat.stat.exists + +- name: get SeaDrive installation location + set_fact: + sd_install_folder: "{{ seadrive_stat.stat.path }}" + +- debug: + msg: "SeaDrive is installed to {{ sd_install_folder }}" + + + +# clear old per-user autorun entries + +- set_fact: + sd_partial_autorun_path: Software\Microsoft\Windows\CurrentVersion\Run + +- include_tasks: tasks/get_all_hkus.yml + +- name: collect all SeaDrive autorun entries in all HKUs + win_reg_stat: + path: "{{ item }}\\{{ sd_partial_autorun_path }}" + loop: "{{ hkus }}" + register: sd_autoruns + +- name: remove all SeaDrive entries in all HKUs + win_regedit: + path: "{{ item.item }}\\{{ sd_partial_autorun_path }}" + name: SeaDrive + type: string + state: absent + when: item.exists and (item.properties['SeaDrive'] is defined) + loop: "{{ sd_autoruns.results }}" + + + +# setup global autorun + +- name: set SeaDrive to launch on boot for all users + win_regedit: + path: HKLM:\SOFTWARE\Microsoft\Windows\CurrentVersion\Run + name: SeaDrive + type: string + data: "{{ sd_install_folder }}\\seadrive-gui.exe" + state: present + + + +# remove default shortcut + +- name: remove default shortcut + win_shortcut: + dest: "{{ ansible_env.PUBLIC }}\\Desktop\\SeaDrive.lnk" + state: absent + + + +# add custom shortcut to root folder + +- name: run a PSH snippet to get at least one SeaDrive root folder + win_shell: | + $Path = gci Registry::HKEY_users | ? {$_.Name -notlike "*Classes" -and $_.Name -notlike "*.DEFAULT"} | ` + select @{n="SFPath";e={$_.PSPath + "\Software\SeaDrive\Seafile Drive Client\Settings"}} | ? ` + {test-path $_.SFPath } | % { (gp $_.SFPath).seadriveRoot } | select -first 1 + if ($Path) { + $Path = (gci $Path | ? PSIsContainer | select -first 1).FullName + if ($Path) { + $Path = (gci $Path | ? PSIsContainer | ? Name -like "*groups*" | select -first 1).FullName + if ($Path) { + Write-Host -NoNewline $Path + } + } + } + register: script_output + failed_when: script_output.stdout == '' + changed_when: false + +- set_fact: + sd_root_folder: '{{ script_output.stdout | replace("/","\;") | replace(";", "") }}' + +- name: download custom icon to SeaDrive directory + win_copy: + src: custom_icon_seadrive.ico + dest: "{{ sd_install_folder }}\\custom_icon_seadrive.ico" + force: yes + +- name: edit SeaDrive shortcut on the desktop + win_shortcut: + src: "{{ sd_root_folder }}" + dest: "{{ ansible_env.PUBLIC }}\\Desktop\\Файлы {{ org_localized }}.lnk" + icon: "{{ sd_install_folder }}\\custom_icon_seadrive.ico" + description: "Открыть файловое хранилище {{ org_localized }}" + changed_when: False + + + +# register custom CSP + +- name: ensure all required variables are set + fail: + msg: mandatory variables are not set, cannot add custom CSP + when: (sd_install_folder is not defined) or (sd_root_folder is not defined) + +- name: setup CSP variables + set_fact: + sd_csp_clsid: "{EE8556E2-2E7E-4909-81BB-0020A484F618}" + sd_csp_root: "HKLM:" + sd_csp_name: "Файлы {{ org_localized }}" + +- name: concat CLSID and key to determine the full path to CSP + set_fact: + sd_csp_path: "{{ sd_csp_root }}\\Software\\Classes\\CLSID\\{{ sd_csp_clsid }}" + + +- name: 1. add CLSID and set CSP name + win_regedit: + path: "{{ sd_csp_path }}" + type: string + data: "{{ sd_csp_name }}" + +- name: 2. set the icon for CSP + win_regedit: + path: "{{ sd_csp_path }}\\DefaultIcon" + type: expandstring + name: "" + data: "{{ sd_install_folder }}\\custom_icon_seadrive.ico" + +- name: 3. pin CSP to namespace tree + win_regedit: + path: "{{ sd_csp_path }}" + type: dword + name: System.IsPinnedToNameSpaceTree + data: 1 + +- name: 4. set location (sort order) for CSP + win_regedit: + path: "{{ sd_csp_path }}" + type: dword + name: SortOrderIndex + data: 0x42 + +- name: 5. provide DLL for CSP that hosts the extension + win_regedit: + path: "{{ sd_csp_path }}\\InProcServer32" + type: expandstring + name: "" + data: "%SystemRoot%\\system32\\shell32.dll" + +- name: 6. define the instance object + win_regedit: + path: "{{ sd_csp_path }}\\Instance" + type: string + name: CLSID + data: "{0E5AAE11-A475-4c5b-AB00-C66DE400274E}" + +- name: 7. set FS attributes on CSP folder + win_regedit: + path: "{{ sd_csp_path }}\\Instance\\InitPropertyBag" + type: dword + name: Attributes + data: 17 + +- name: 8. set path to system root + win_regedit: + path: "{{ sd_csp_path }}\\Instance\\InitPropertyBag" + type: expandstring + name: TargetFolderPath + data: "{{ sd_root_folder }}" + +- name: 9. set shell flags + win_regedit: + path: "{{ sd_csp_path }}\\ShellFolder" + type: dword + name: FolderValueFlags + data: 0x28 + +- name: 10. set SFGAO flags + win_regedit: + path: "{{ sd_csp_path }}\\ShellFolder" + type: dword + name: Attributes + data: 0xF080004D + +- name: 11. register CSP in namespace root + win_regedit: + path: "{{ sd_csp_root }}\\Software\\Microsoft\\Windows\\CurrentVersion\\Explorer\\Desktop\\NameSpace\\{{ sd_csp_clsid }}" + type: string + data: "{{ sd_csp_name }}" + +- name: 12. hide CSP from desktop folder + win_regedit: + path: "{{ sd_csp_root }}\\Software\\Microsoft\\Windows\\CurrentVersion\\Explorer\\HideDesktopIcons\\NewStartPanel" + type: dword + name: "{{ sd_csp_clsid }}" + data: 1 diff --git a/roles/workstation/tasks/win_ntp.yml b/roles/workstation/tasks/win_ntp.yml new file mode 100644 index 0000000..2f8ba5b --- /dev/null +++ b/roles/workstation/tasks/win_ntp.yml @@ -0,0 +1,34 @@ +- name: check if there is an entry for W32Time service + win_reg_stat: + path: HKLM:\SYSTEM\CurrentControlSet\Services\W32Time + register: w32time_exists + + +- name: fail if W32Time is missing + fail: + msg: W32Time registry key is missing, aborting + when: w32time_exists.exists == false + + +- name: ensure NTP is enabled + win_regedit: + path: HKLM:\SYSTEM\CurrentControlSet\Services\W32Time\TimeProviders\NtpClient + name: Enabled + type: dword + data: 1 + + +- name: ensure NTP client is set to NTP mode + win_regedit: + path: HKLM:\SYSTEM\CurrentControlSet\Services\W32Time\Parameters + name: Type + type: string + data: NTP + + +- name: set NTP server address + win_regedit: + path: HKLM:\SYSTEM\CurrentControlSet\Services\W32Time\Parameters + name: NtpServer + type: string + data: "{{ services.ntp.hostname if services.ntp is mapping else 'ru.pool.ntp.org' }},0x8" diff --git a/roles/workstation/tasks/win_timezone.yml b/roles/workstation/tasks/win_timezone.yml new file mode 100644 index 0000000..43e8477 --- /dev/null +++ b/roles/workstation/tasks/win_timezone.yml @@ -0,0 +1,3 @@ +- name: set timezone + community.windows.win_timezone: + timezone: "{{ timezone_win }}" diff --git a/roles/workstation/tasks/win_tweaks.yml b/roles/workstation/tasks/win_tweaks.yml new file mode 100644 index 0000000..f329817 --- /dev/null +++ b/roles/workstation/tasks/win_tweaks.yml @@ -0,0 +1,114 @@ +- name: process security policy + win_security_policy: + section: "{{ item.section | default('System Access') }}" + key: "{{ item.key }}" + value: "{{ item.value }}" + loop: + - { desc: "Set unlimited password age", key: MaximumPasswordAge, value: -1 } + - { desc: "Disable built-in admin account", key: EnableAdminAccount, value: 0 } + #- { desc: "Disable built-in guest account", key: EnableGuestAccount, value: 0 } + + +- name: disable Start Menu suggestions and tips/tricks + win_regedit: + path: HKLM:\SOFTWARE\Microsoft\Windows\CurrentVersion\ContentDeliveryManager + name: "{{ item }}" + type: dword + data: 0 + loop: + - SubscribedContent-338388Enabled + - SubscribedContent-338389Enabled + + +- name: disable PrintNightmare mitigations + win_regedit: + path: HKLM:\System\CurrentControlSet\Control\Print + name: RpcAuthnLevelPrivacyEnabled + type: dword + data: 0 + + +- name: process MDM settings + win_regedit: + path: 'HKLM:\SOFTWARE\Microsoft\PolicyManager\default\{{ item.group }}\{{ item.name }}' + name: value + type: "{{ item.type | default('dword') }}" + data: "{{ item.value }}" + loop: + - { desc: "Disable game broadcasting", group: ApplicationManagement, name: AllowGameDVR, value: 0 } + - { desc: "Disable linking phone to PC", group: ApplicationManagement, name: AllowPhonePCLinking, value: 0 } + + - { desc: "Disable Cortana", group: Experience, name: AllowCortana, value: 0 } + - { desc: "Disable Find My Device", group: Experience, name: AllowFindMyDevice, value: 0 } + - { desc: "Disable third-party suggestions in Windows Spotlight", group: Experience, name: AllowThirdPartySuggestionsInWindowsSpotlight, value: 0 } + - { desc: "Do not allow Feedback notifications", group: Experience, name: DoNotShowFeedbackNotifications, value: 1 } + + - { desc: "Disable advanced gaming settings", group: Games, name: AllowAdvancedGamingServices, value: 0 } + + - { desc: "Block Microsoft Accounts", group: LocalPoliciesSecurityOptions, name: Accounts_BlockMicrosoftAccounts, value: 3 } + - { desc: "Always sign communications as SMB Server", group: LocalPoliciesSecurityOptions, name: MicrosoftNetworkServer_DigitallySignCommunicationsAlways, value: 1 } + - { desc: "Sign communications as SMB Server if client agrees", group: LocalPoliciesSecurityOptions, name: MicrosoftNetworkServer_DigitallySignCommunicationsIfClientAgrees, value: 1 } + - { desc: "Force NTLMv2 and refuse older NTLM versions for LAN Manager", group: LocalPoliciesSecurityOptions, name: NetworkSecurity_LANManagerAuthenticationLevel, value: 5 } + + - { desc: "Disable cross-device clipboard sharing", group: Privacy, name: AllowCrossDeviceClipboard, value: 0 } + - { desc: "Disable online speech recognition", group: Privacy, name: AllowInputPersonalization, value: 0 } + - { desc: "Make ads non-personalized", group: Privacy, name: DisableAdvertisingId, value: 1 } + - { desc: "Disable activity feed", group: Privacy, name: EnableActivityFeed, value: 0 } + - { desc: "Do not allow apps to publish info to online activity feed", group: Privacy, name: PublishUserActivities, value: 0 } + - { desc: "Do not allow apps to upload info to online activity feed", group: Privacy, name: UploadUserActivities, value: 0 } + + - { desc: "Disable cloud search", group: Search, name: AllowCloudSearch, value: 0 } + - { desc: "Disable web search", group: Search, name: DoNotUseWebResults, value: 0 } + + - { desc: "Disable online tips for Settings app", group: Settings, name: AllowOnlineTips, value: 0 } + - { desc: "Disable editing sign-in settings", group: Settings, name: AllowSignInOptions, value: 0 } + - { desc: "Disable editing and logging in with MS account in Settings", group: Settings, name: AllowYourAccount, value: 0 } + + - { desc: "Do not update speech synthesis models", group: Speech, name: AllowSpeechModelUpdate, value: 0 } + + - { desc: "Pin Documents folder to Start Menu", group: Start, name: AllowPinnedFolderDocuments, value: 1 } + - { desc: "Pin Downloads folder to Start Menu", group: Start, name: AllowPinnedFolderDownloads, value: 1 } + - { desc: "Unpin Explorer folder from Start Menu", group: Start, name: AllowPinnedFolderFileExplorer, value: 0 } + - { desc: "Unpin Home Group folder from Start Menu", group: Start, name: AllowPinnedFolderHomeGroup, value: 0 } + - { desc: "Unpin Music folder from Start Menu", group: Start, name: AllowPinnedFolderMusic, value: 0 } + - { desc: "Unpin Network folder from Start Menu", group: Start, name: AllowPinnedFolderNetwork, value: 0 } + - { desc: "Unpin Personal folder from Start Menu", group: Start, name: AllowPinnedFolderPersonalFolder, value: 0 } + - { desc: "Unpin Pictures folder from Start Menu", group: Start, name: AllowPinnedFolderPictures, value: 0 } + - { desc: "Pin Settings folder to Start Menu", group: Start, name: AllowPinnedFolderSettings, value: 1 } + - { desc: "Unpin Videos folder from Start Menu", group: Start, name: AllowPinnedFolderVideos, value: 0 } + - { desc: "Hide People icon from Start Menu", group: Start, name: HidePeopleBar, value: 1 } + + - { desc: "Prevent users from using Insider Builds and Build Previews", group: System, name: AllowBuildPreview, value: 0 } + - { desc: "Disable Microsoft from running experiments in OS", group: System, name: AllowExperimentation, value: 0 } + - { desc: "Reduce amount of telemetry that is being sent to Microsoft", group: System, name: AllowTelemetry, value: 0 } + - { desc: "Do not allow factory resets", group: System, name: AllowUserToResetPhone, value: 0 } + - { desc: "Prevent users from changing telemetry settings in Settings UI", group: System, name: ConfigureTelemetryOptInSettingsUx, value: 1 } + - { desc: "Disable OneDrive and its integration to Windows Explorer", group: System, name: DisableOneDriveFileSync, value: 1 } + + - { desc: "Disable XBox Accessory Management service", group: SystemServices, name: ConfigureXboxAccessoryManagementServiceStartupMode, value: 4 } + - { desc: "Disable XBox Live Auth Manager service", group: SystemServices, name: ConfigureXboxLiveAuthManagerServiceStartupMode, value: 4 } + - { desc: "Disable XBox Live Game Save service", group: SystemServices, name: ConfigureXboxLiveGameSaveServiceStartupMode, value: 4 } + - { desc: "Disable XBox Live Networking service", group: SystemServices, name: ConfigureXboxLiveNetworkingServiceStartupMode, value: 4 } + + - { desc: "Do not allow Microsoft to collect typing data", group: TextInput, name: AllowLinguisticDataCollection, value: 0 } + + - { desc: "Do not automatically connect to Wi-Fi hotspots", group: Wifi, name: AllowAutoConnectToWiFiSenseHotspots, value: 0 } + - { desc: "Disable Internet Connection Sharing", group: Wifi, name: AllowInternetSharing, value: 0 } + - { desc: "Disable Wi-Fi Direct", group: Wifi, name: AllowWiFiDirect, value: 0 } + + +- name: process GP settings + win_regedit: + path: 'HKLM:\SOFTWARE\Policies\Microsoft\{{ item.path }}' + name: "{{ item.name }}" + type: "{{ item.type | default('dword') }}" + data: "{{ item.value }}" + loop: + - { desc: "Disable Cortana in Windows search", path: 'Windows\Windows Search', name: AllowCortana, value: 0 } + - { desc: "Disable web search", path: 'Windows\Windows Search', name: DisableWebSearch, value: 1 } + - { desc: "Do not display web results in Search", path: 'Windows\Windows Search', name: ConnectedSearchUseWeb, value: 0 } + - { desc: "Turn off Find My Device", path: "FindMyDevice", name: AllowFindMyDevice, value: 0 } + - { desc: "Turn off Insider Preview builds", path: 'Windows\PreviewBuilds', name: AllowBuildPreview, value: 0 } + - { desc: "Turn off Windows Mail app", path: "Windows Mail", name: ManualLaunchAllowed, value: 0 } + - { desc: "Turn off OneDrive", path: 'Windows\OneDrive', name: DisableFileSyncNGSC, value: 1 } + - { desc: "Disable cloud content", path: 'Windows\CloudContent', name: DisableWindowsConsumerFeatures, value: 1 } diff --git a/tasks/add_backup.yml b/tasks/add_backup.yml new file mode 100644 index 0000000..a81c3cb --- /dev/null +++ b/tasks/add_backup.yml @@ -0,0 +1,5 @@ +- name: add backup dirs to collected backup dirs + set_fact: + collected_backup_dirs: "{{ (collected_backup_dirs | d([])) + + ([backup_items] if backup_items is string else backup_items) }}" + when: backup_items is defined and ((backup_items | type_debug == 'list') or backup_items is string) diff --git a/tasks/create_user.yml b/tasks/create_user.yml new file mode 100644 index 0000000..6d4905b --- /dev/null +++ b/tasks/create_user.yml @@ -0,0 +1,40 @@ +- name: validate input + fail: + msg: user parameter must be defined and must be a dictionary + when: user is not defined or user is not mapping + + +- name: ensure group exists + group: + name: "{{ user.group }}" + gid: "{{ user.gid | d(omit) }}" + system: "{{ user.is_system | d('yes') }}" + when: user.group is defined + notify: "{{ user.notify | d(omit) }}" + + +- name: ensure user exists + user: + name: "{{ user.name }}" + comment: "{{ user.comment | d(omit) }}" + group: "{{ user.group | d(omit) }}" + groups: "{{ user.extra_groups | d(omit) }}" + uid: "{{ user.uid | d(omit) }}" + home: "{{ user.dir | d(omit) }}" + shell: "{{ user.shell | d('/bin/false') }}" + system: "{{ user.is_system | d('yes') }}" + create_home: "{{ 'yes' if (user.dir is defined and (user.create_home | d(true) == true)) else 'no' }}" + when: user.name is defined + notify: "{{ user.notify | d(omit) }}" + + +- name: ensure user home dir exists + file: + path: "{{ user.dir }}" + state: directory + mode: "{{ user.dir_mode | d('0755') }}" + owner: "{{ user.name }}" + group: "{{ user.group | d(omit) }}" + when: user.dir is defined and user.name is defined and (user.create_home | d(true) == true) + notify: "{{ user.notify | d(omit) }}" + diff --git a/tasks/gen_ssh_key.yml b/tasks/gen_ssh_key.yml new file mode 100644 index 0000000..fbb26fc --- /dev/null +++ b/tasks/gen_ssh_key.yml @@ -0,0 +1,24 @@ +- block: + - name: ensure ansible key directory exists + file: + path: "{{ (ansible_dir, ansible_key_dir) | path_join }}" + state: directory + mode: 0400 + + + - name: ensure ssh key for this host exists + community.crypto.openssh_keypair: + path: "{{ (ansible_dir, ansible_key_dir, inventory_hostname) | path_join }}" + comment: "{{ inventory_hostname }} ssh key for ansible management" + mode: 0400 + regenerate: full_idempotence + type: ed25519 + register: container_key + + + - name: fail if public key is missing + fail: + msg: public key is missing + when: container_key.public_key is not defined + + delegate_to: localhost \ No newline at end of file diff --git a/tasks/get_datetime.yml b/tasks/get_datetime.yml new file mode 100644 index 0000000..19194ba --- /dev/null +++ b/tasks/get_datetime.yml @@ -0,0 +1,16 @@ +- name: collect facts if datetime is undefined + setup: + gather_subset: + - min + when: ansible_date_time is not defined + + +- name: set default datetime + set_fact: + current_date_time: "{{ ansible_date_time }}" + + +- name: format date + set_fact: + current_date_time: "{{ ansible_date_time.year ~ ansible_date_time.month ~ ansible_date_time.day }}" + when: (format is defined) and (format is string) and (format | upper == 'YYMMDD') diff --git a/tasks/get_lastversion.yml b/tasks/get_lastversion.yml new file mode 100644 index 0000000..c21fd9d --- /dev/null +++ b/tasks/get_lastversion.yml @@ -0,0 +1,155 @@ +- block: + - name: validate input + fail: + msg: package must be defined and a dictionary + when: package is not defined or package is not mapping + + + - name: define package dict + set_fact: + pkg: "{{ {'assets': false, 'sources': false} | combine(package) }}" + + + - name: fail if package name is not defined + fail: + msg: package name is not defined + when: pkg.name is not defined + + + - name: fail if both assets and sources are enabled + fail: + msg: both assets and sources are enabled + when: (pkg.assets | d(false) == true) and (pkg.sources | d(false) == true) + + + - block: + - name: install pip3 + include_tasks: tasks/install_packages.yml + vars: + package: + - alpine: py3-pip + debian: python3-pip + + - name: install lastversion from pip + pip: + name: lastversion + + - name: set lastversion install result for caching purposes + set_fact: + lastversion_installed: yes + + when: lastversion_installed | d(false) == false + + + - name: construct lastversion parameters + set_fact: + lv_params: "{%- if pkg.major_branch is defined and pkg.major_branch is string -%}--major {{ pkg.major_branch | quote }}\ {% endif -%}\ + {%- if pkg.location is defined and pkg.location is string -%}--at {{ pkg.location | quote }}\ {% endif -%}\ + {%- if pkg.prerelease | d(false) == true -%}--pre\ {% endif -%}\ + {%- if pkg.release_filter is defined and pkg.release_filter is string -%}--only {{ pkg.release_filter | quote }}\ {% endif -%}" + + + - name: invoke lastversion + shell: + cmd: "lastversion {{ lv_params }}{{ pkg.name | quote }}" + changed_when: false + environment: "{{ {'GITHUB_API_TOKEN': github_api_token} if (github_api_token is defined) else {} }}" + register: lv_result + + + - name: save last version + set_fact: + package_last_version: "{{ lv_result.stdout.strip() }}" + + + - block: + - name: add asset-related lastversion parameters + set_fact: + lv_params_new: "{{ lv_params }}\ + {%- if pkg.assets | d(false) == true -%}--assets\ {% endif -%}\ + {%- if pkg.asset_filter is defined and pkg.asset_filter is string -%}--filter {{ pkg.asset_filter | quote }}\ {% endif -%}" + + - name: invoke lastversion + shell: + cmd: "lastversion {{ lv_params_new }}{{ pkg.name | quote }}" + changed_when: false + environment: "{{ {'GITHUB_API_TOKEN': github_api_token} if (github_api_token is defined) else {} }}" + register: lv_result + + - name: save asset urls + set_fact: + package_url: "{{ lv_result.stdout.strip().split('\n') }}" + + when: pkg.assets | d(false) == true + + + - block: + - name: add source-related lastversion parameters + set_fact: + lv_params_new: "{{ lv_params }}\ + {%- if pkg.sources | d(false) == true -%}--source\ {% endif -%}" + + - name: invoke lastversion + shell: + cmd: "lastversion {{ lv_params_new }}{{ pkg.name | quote }}" + changed_when: false + environment: "{{ {'GITHUB_API_TOKEN': github_api_token} if (github_api_token is defined) else {} }}" + register: lv_result + + - name: save source urls + set_fact: + package_url: "{{ lv_result.stdout.strip().split('\n') }}" + + when: pkg.sources | d(false) == true + + + delegate_to: localhost + + +- block: + - name: save last version info to file + copy: + content: "{{ package_last_version }}" + dest: "{{ pkg.file }}" + mode: "{{ pkg.mode | d(omit) }}" + owner: "{{ pkg.user | d(omit) }}" + group: "{{ pkg.group | d(omit) }}" + register: lv_copy + notify: "{{ pkg.notify | d(omit) }}" + + - name: check if the file was changed + set_fact: + package_changed: "{{ lv_copy.changed }}" + + when: pkg.file is defined + + +- block: + - name: ensure there is only one url + fail: + msg: multiple asset urls or no asset urls found + when: (package_url | length) != 1 + + - name: install tar + include_tasks: tasks/install_packages.yml + vars: + package: + - tar + + - pause: + when: interactive | d(true) == true + + - name: download and extract assets + unarchive: + src: "{{ package_url[0] }}" + dest: "{{ pkg.extract }}/" + remote_src: yes + mode: "{{ pkg.mode | d(omit) }}" + owner: "{{ pkg.user | d(omit) }}" + group: "{{ pkg.group | d(omit) }}" + extra_opts: "{%- if pkg.strip_first_dir | d(false) == true -%}{{ [ '--strip-components=1' ] }}\ + {%- else -%}{{ [] }}\ + {%- endif -%}" + notify: "{{ pkg.notify | d(omit) }}" + + when: (lv_copy.changed or (pkg.force_download | d(false) == true)) and pkg.extract is defined diff --git a/tasks/includes/package.yml b/tasks/includes/package.yml new file mode 100644 index 0000000..1000256 --- /dev/null +++ b/tasks/includes/package.yml @@ -0,0 +1,36 @@ +- name: determine package name + set_fact: + this_package: "{%- if package_inner is string -%}{{ package_inner }}\ + {%- elif package_inner[ansible_distribution | lower] is defined -%}{{ package_inner[ansible_distribution | lower] }}\ + {%- elif package_inner[ansible_system | lower] is defined -%}{{ package_inner[ansible_system | lower] }}\ + {%- elif package_inner.default is defined -%}{{ package_inner.default }}\ + {%- elif package_inner.name is defined -%}{{ package_inner.name }}\ + {%- else -%}{{ None }}\ + {%- endif -%}" + + +- block: + - name: install package + package: + name: "{{ this_package }}" + when: (ansible_system != 'Win32NT') and ((repository is not defined) or (repository == None)) + notify: "{{ notify | d(omit) }}" + + + - name: install apk package from custom repository + community.general.apk: + name: "{{ this_package }}" + repository: "{{ (package_inner.repository | d(repository)) if package_inner is mapping else repository }}" + when: (ansible_system != 'Win32NT') and ((repository is defined) and (repository != None) or + (package_inner is mapping) and (package_inner.repository is defined) and (package_inner.repository != None)) + notify: "{{ notify | d(omit) }}" + + + - name: install windows package + win_chocolatey: + name: "{{ this_package }}" + state: latest + when: ansible_system == 'Win32NT' + notify: "{{ notify | d(omit) }}" + + when: (this_package | d(None) != None) and (this_package != "") \ No newline at end of file diff --git a/tasks/includes/role.yml b/tasks/includes/role.yml new file mode 100644 index 0000000..9fd03df --- /dev/null +++ b/tasks/includes/role.yml @@ -0,0 +1,11 @@ +- name: fail if role is not defined + fail: + msg: this_role variable is not defined or is not an object + when: this_role is not mapping + + +- name: include role + include_role: + name: "{{ this_role.role }}" + vars: + function: "{{ this_role.function | d(None) }}" diff --git a/tasks/includes/serial.yml b/tasks/includes/serial.yml new file mode 100644 index 0000000..6890ac2 --- /dev/null +++ b/tasks/includes/serial.yml @@ -0,0 +1,49 @@ +# no longer needed + +- block: + - name: slurp zone file + slurp: + src: "{{ nsd_data_dir }}/{{ ns_zone }}.zone" + register: zf + changed_when: false + + + - name: get SOA serial value + set_fact: + ns_old_serial: '{{ zf.content | b64decode | regex_search(''@\s+IN\s+SOA\s+\S+\s+\S+\s*\(\s*(\d+)'', ''\1'') | first | string }}' + + + - name: get current date + include_tasks: tasks/get_datetime.yml + vars: + format: YYMMDD + + + - name: replace outdated serial with current date + set_fact: + ns_new_serial: "{{ (current_date_time | string) ~ '01'}}" + when: ns_old_serial[:8] != (current_date_time | string) + + + - name: increase current serial + set_fact: + ns_new_serial: "{{ (ns_old_serial | int) + 1 }}" + when: (ns_old_serial[:8] == (current_date_time | string)) and ((ns_old_serial[8:10] | int) < 99) + + delegate_to: nse1 + + +- name: do not change current serial + set_fact: + ns_new_serial: "{{ ns_old_serial }}" + when: (ns_old_serial[:8] == (current_date_time | string)) and ((ns_old_serial[8:10] | int) >= 99) + delegate_to: nse1 + + +- name: insert new serial + replace: + path: "{{ nsd_data_dir }}/{{ ns_zone }}.zone" + regexp: '(@\s+IN\s+SOA\s+\S+\s+\S+\s*\(\s*){{ ns_old_serial }}' + replace: '\g<1>{{ ns_new_serial }}' + notify: reload nsd zones + delegate_to: nse1 diff --git a/tasks/includes/stage.yml b/tasks/includes/stage.yml new file mode 100644 index 0000000..a16f359 --- /dev/null +++ b/tasks/includes/stage.yml @@ -0,0 +1,16 @@ +- name: fail if stage is missing + fail: + msg: stage should be defined + when: this_stage is not defined + + +- name: define current stage + set_fact: + stage: "{{ this_stage | int }}" + + +- name: include roles for selected stage + include_tasks: tasks/includes/role.yml + loop: "{{ role_mapping | selectattr('stage', 'equalto', (this_stage | int)) | list }}" + loop_control: + loop_var: this_role diff --git a/tasks/install_packages.yml b/tasks/install_packages.yml new file mode 100644 index 0000000..63383e2 --- /dev/null +++ b/tasks/install_packages.yml @@ -0,0 +1,40 @@ +- name: validate input + fail: + msg: package must be a list or a string + when: (package is not defined) or (package is mapping) or + (package is not iterable) and (package is not string) + + +- block: + - name: determine distribution + setup: + gather_subset: + - min + + - name: fail if distribution is not yet defined + fail: + msg: distribution is not yet defined, using fallback + when: ansible_distribution is not defined + + rescue: + - name: set fallback distribution + set_fact: + ansible_distribution: 'Alpine' + ansible_system: "{{ 'Win32NT' if (is_windows | d(false) == true) else 'Linux' }}" + + when: ansible_distribution is not defined + + +- name: loop over packages + include_tasks: tasks/includes/package.yml + loop: "{%- if package is string -%}{{ [ package ] }}\ + {%- else -%}{{ package }}\ + {%- endif -%}" + loop_control: + loop_var: package_inner + + +- name: undefine temporary facts + set_fact: + this_package: "{{ None }}" + when: this_package | d(None) != None diff --git a/tasks/pct_command.yml b/tasks/pct_command.yml new file mode 100644 index 0000000..2da5f8b --- /dev/null +++ b/tasks/pct_command.yml @@ -0,0 +1,13 @@ +- name: call pct command + command: + cmd: "pct exec {{ container_id | quote }} -- {{ pct_command | mandatory }}" + register: result + changed_when: "(chg_substr is defined and chg_substr in result.stdout) or \ + (chg_rc is defined and chg_rc == result.rc) or \ + chg_always is defined" + + failed_when: "(fail_substr is defined and (fail_substr in result.stdout or fail_substr in result.stderr)) or \ + (fail_rc is defined and fail_rc == result.rc) or \ + result.rc != 0" + + delegate_to: "{{ selected_node | mandatory }}" \ No newline at end of file diff --git a/tasks/pre_tasks.yml b/tasks/pre_tasks.yml new file mode 100644 index 0000000..e60c7b7 --- /dev/null +++ b/tasks/pre_tasks.yml @@ -0,0 +1,63 @@ +- name: determine host info + set_fact: + host_name: "{{ actual_hostname | d(inventory_hostname) }}" + host_tld: "{%- if branch is defined -%}{{ branch }}.{%- endif -%}{{ tld if (use_external_tld | d(false) == true) else int_tld }}" + host_tls: "{{ use_tls | d(true) }}" + host_protocol: "{{ 'https' if (use_tls | d(true)) else 'http' }}" + host_metrics: "{{ services.prometheus is defined and (use_metrics | d(true) == true) }}" + host_mail: "{{ mail_server.mta_hostname is defined and (use_mail | d(true) == true) }}" + host_backups: "{{ services.backup is defined and (use_backups | d(true) == true) }}" + + +- name: determine host fqdn and uri + set_fact: + host_fqdn: "{{ host_name ~ '.' ~ host_tld }}" + host_url: "{{ host_protocol }}://{{ host_name ~ '.' ~ host_tld }}" + + +- name: define ansible_host if it is missing + set_fact: + ansible_host: "{{ host_fqdn }}" + when: ansible_host is not defined + + +- name: select a cluster node + include_tasks: tasks/select_node.yml + when: "'containers' in group_names" + + +- name: set hardware information + set_fact: + hardware: "{{ default_container_hardware | combine(role_hardware[host_primary_role] | d({})) | + combine((container_hardware if 'containers' in group_names else host_hardware) | d({})) }}" + + +- name: clamp hardware cores to max node number + set_fact: + hardware: "{{ hardware | combine({'cores': ([hardware.cores, hostvars[selected_node]['max_cores'] | d(hardware.cores)] | min)}) }}" + when: "not ('containers' in group_names) and (selected_node is defined) and (hostvars[selected_node]['max_cores'] is defined)" + + +- block: + - name: validate database parameters + fail: + msg: some database parameters are not defined or invalid + when: (database.name is not string) or + (database.user is defined and database.user is not string) or + (database.pass is defined and database.pass is not string) or + (database.host is defined and database.host is not string) or + (database.self_hosted is defined and database.self_hosted is not boolean) + + - name: set database information + set_fact: + database_name: "{{ database.name | mandatory }}" + database_user: "{{ database.user | d(database.name) }}" + database_pass: "{{ database.pass }}" + database_host: "{{ '127.0.0.1' if (database.self_hosted | d(false) == true) or (services.db is not defined) else + (services.db.address | d(hostvars[services.db.hostname]['ansible_host'])) }}" + database_hostname: "{{ inventory_hostname if (database.self_hosted | d(false) == true) or (services.db is not defined) else + services.db.hostname }}" + database_self_hosted: "{{ (database.self_hosted | d(false) == true) or (services.db is not defined) }}" + when: database is mapping + + diff --git a/tasks/query_node.yml b/tasks/query_node.yml new file mode 100644 index 0000000..18442b3 --- /dev/null +++ b/tasks/query_node.yml @@ -0,0 +1,30 @@ +- block: + - name: call pvesh command + command: + cmd: "pvesh get /cluster/resources --type vm --output-format yaml" + register: res + failed_when: no + changed_when: no + delegate_to: "{{ node }}" + no_log: yes + + - block: + - name: set temp pvesh result + set_fact: + temp_node_result: "{{ res.stdout | from_yaml | selectattr('vmid', 'equalto', container_id) | list }}" + + - name: fail if node is empty or more than one container was found + fail: + when: ((temp_node_result | length) != 1) or (temp_node_result[0].node is not defined) + + - name: set selected node + set_fact: + selected_node: "{{ temp_node_result[0].node }}" + + - name: set node found result + set_fact: + is_node_found: yes + + when: (res.stdout is string) and (res.stdout | length > 0) + + when: is_node_found == false \ No newline at end of file diff --git a/tasks/select_node.yml b/tasks/select_node.yml new file mode 100644 index 0000000..f4e11c2 --- /dev/null +++ b/tasks/select_node.yml @@ -0,0 +1,85 @@ +# select a cluster node that can host a role + +- name: ensure there is at least one cluster node + fail: + msg: no cluster nodes are defined + when: (groups['nodes'] | d([]) | length) == 0 + + +- name: clear selected_node + set_fact: + selected_node: "" + + +- name: preference-based node determination + block: + - name: determine first cluster node which prefers to host role + set_fact: + selected_node: "{{ item }}" + when: + - hostvars[item] is defined + - hostvars[item].can_host is defined + - hostvars[item].can_host is iterable + - hostvars[item].can_host is not mapping + - hostvars[item].can_host is not string + - hostvars[item].can_host[primary_role | d(inventory_hostname)] is defined + - (hostvars[item].never_host is not defined) or + (hostvars[item].never_host[primary_role | d(inventory_hostname)] is not defined) + - not selected_node + loop: "{{ groups['nodes'] }}" + + when: (hosted_on is not defined) and (inventory_hostname is defined) + + +- name: pin-based node determination + block: + - name: fail if pinned node is not a string + fail: + msg: 'pinned node for "{{ inventory_hostname }}" must be a string' + when: hosted_on is not string + + - name: fail if pinned node does not exist + fail: + msg: '"{{ inventory_hostname }}" has a nonexistent pinned node "{{ hosted_on }}"' + when: hostvars[hosted_on] is not defined + + - name: select pinned cluster node + set_fact: + selected_node: "{{ hosted_on }}" + + when: (hosted_on is defined) and (inventory_hostname is defined) + + +- name: fallback to first cluster node + set_fact: + selected_node: "{{ groups['nodes'][0] }}" + when: not selected_node + + +- name: determine actual node in a cluster configuration + block: + - set_fact: + is_node_found: no + + - name: determine on which node this container is currently hosted on + include_tasks: tasks/query_node.yml + loop: "{{ groups['nodes'] }}" + loop_control: + loop_var: node + + when: groups['nodes'] | length >= 2 + rescue: + - name: report that cluster cannot be queried + debug: + msg: 'cluster cannot be queried or all nodes are inaccessible, will keep cluster mode: "{{ selected_node }}"' + + +- name: report which node was selected + debug: + msg: 'using cluster node: "{{ selected_node }}"' + + +- name: ensure this node exists and can be connected to + wait_for_connection: + timeout: 10 + delegate_to: "{{ selected_node }}"