Compare commits

...

3 Commits

Author SHA1 Message Date
Dave S. bdbca4a90e feat: various fixes 2 years ago
Dave S. ac4de942ba fix: various fixes 2 years ago
Dave S. 5ca943312c feat: caddy 2 years ago
  1. 11
      all.yml
  2. 2
      ansible.cfg
  3. 23
      group_vars/all.yml
  4. 3
      handlers/main.yml
  5. 9
      roles/acme-dns/defaults/main.yml
  6. 18
      roles/acme-dns/tasks/info.yml
  7. 43
      roles/acme-dns/tasks/main.yml
  8. 8
      roles/acme-dns/templates/init.j2
  9. 0
      roles/acme-dns/templates/rproxy_nginx.j2
  10. 2
      roles/acme/templates/renewal.j2
  11. 35
      roles/backup/tasks/main.yml
  12. 31
      roles/backup/tasks/setup.yml
  13. 46
      roles/caddy/defaults/main.yml
  14. 4
      roles/caddy/handlers/main.yml
  15. 133
      roles/caddy/tasks/build_caddy.yml
  16. 41
      roles/caddy/tasks/install_prebuilt_caddy.yml
  17. 144
      roles/caddy/tasks/main.yml
  18. 1
      roles/caddy/templates/caddy.j2
  19. 23
      roles/caddy/templates/init.j2
  20. 21
      roles/caddy/vars/reverse_proxy.yml
  21. 8
      roles/caddy/vars/tls.yml
  22. 1
      roles/certs/tasks/acme_dns.yml
  23. 1
      roles/common/defaults/main.yml
  24. 2
      roles/container/defaults/main.yml
  25. 2
      roles/container/tasks/main.yml
  26. 12
      roles/container/tasks/preconf.yml
  27. 6
      roles/coredns/tasks/info.yml
  28. 0
      roles/coredns/tasks/install.yml
  29. 23
      roles/iptables/defaults/main.yml
  30. 5
      roles/iptables/tasks/add.yml
  31. 18
      roles/iptables/tasks/main.yml
  32. 2
      roles/iptables/templates/iptables.j2
  33. 3
      roles/lego/defaults/main.yml
  34. 214
      roles/lego/tasks/main.yml
  35. 22
      roles/lego/templates/renewal.j2
  36. 1
      roles/nginx/tasks/main.yml
  37. 19
      roles/postgres/defaults/main.yml
  38. 11
      roles/postgres/tasks/add_database.yml
  39. 15
      roles/postgres/tasks/install.yml
  40. 11
      roles/postgres/tasks/integrate.yml
  41. 5
      roles/rproxy/tasks/add.yml
  42. 42
      roles/rproxy/tasks/main.yml
  43. 4
      roles/vault/defaults/main.yml
  44. 12
      roles/vault/tasks/info.yml
  45. 49
      roles/vault/tasks/main.yml
  46. 4
      roles/vault/templates/init.j2
  47. 0
      roles/vault/templates/rproxy_nginx.j2
  48. 7
      tasks/get_host_arch.yml
  49. 13
      tasks/pre_tasks.yml

@ -5,6 +5,7 @@
tasks:
# TODO: multiple roles
- block:
- name: get primary role
set_fact:
host_primary_role: "{%- if primary_role is defined -%}{{ primary_role }}\
@ -12,7 +13,6 @@
{%- else -%}{{ inventory_hostname }}\
{%- endif -%}"
- name: check if role info file exists
stat:
path: "{{ (playbook_dir, 'roles', host_primary_role, 'tasks', 'info.yml') | path_join }}"
@ -20,7 +20,6 @@
register: result
no_log: yes
- name: get role info
include_role:
name: "{{ host_primary_role }}"
@ -29,13 +28,15 @@
no_log: yes
when: result.stat.exists
when: no_primary_role | d(false) == false
- name: build role dependency list and stage info
set_fact:
role_deps: "{{ (([{ 'stage': 6, 'role': host_primary_role }] if role_dependency is not defined else [role_dependency]) +
role_deps: "{{ (([] if no_primary_role | d(false) == true else ([{ 'stage': 6, 'role': host_primary_role }] if role_dependency is not defined else [role_dependency])) +
([role_dependency_common] if (role_dependency_no_common | d(false) == false) else []) +
([{ 'stage': 1, 'role': 'container' }] if 'containers' in group_names else []) +
([{ 'stage': 3, 'role': 'postgres', 'function': 'integrate' }] if role_dependency_use_db | d(false) == true else [])
([{ 'stage': 3, 'role': 'postgres', 'tasks_from': 'integrate.yml' }] if role_use_database | d(false) == true else [])
) | flatten(levels=1) | sort(attribute='stage') }}"
selected_stages: "{%- if stage is defined and ((stage | string) is search(',')) -%}{{ stage | string | split(',') | list | map('int') | list }}\
{%- elif (stage is not defined) or ((stage | int) == 0) -%}{{ [1,2,3,4,5,6,7,8,9] }}\
@ -46,7 +47,7 @@
- name: show deployment info
debug:
msg: "deploying role \"{{ host_primary_role }}\" on host \"{{ inventory_hostname }}\", {{
msg: "deploying role {{ host_primary_role | d('(no role)') }}\" on host \"{{ inventory_hostname }}\", {{
(('stages ' if (selected_stages | length > 1) else 'stage ') ~ (selected_stages | join(', ')))
if ([1,2,3,4,5,6,7,8,9] | symmetric_difference(selected_stages) | list | length > 0) else 'all stages' }}\n\n{{
'dependencies:\n' ~ (role_deps | map(attribute='stage') | list | zip(

@ -5,6 +5,8 @@ use_persistent_connections = true
forks = 6
internal_poll_interval = 0.01
jinja2_native = true
no_target_syslog = true
verbosity = 1
[ssh_connection]
pipelining = true

@ -1,18 +1,9 @@
ansible_user: root
ansible_dir: /etc/ansible
ansible_key_dir: keys
alpine_version: "3.17"
mac_prefix: 02:FF
default_container_hardware:
cores: 1
cpus: 1
cpuunits: 1024
memory: 128
swap: 128
disk: 0.4
known_external_ca:
- url: letsencrypt.org
wildcard: no
@ -39,5 +30,17 @@ role_dependency_common:
- {stage: 2, role: 'common'}
- {stage: 4, role: 'ns'}
- {stage: 5, role: 'mail-user'}
- {stage: 8, role: 'rproxy'}
- {stage: 8, role: 'iptables'}
- {stage: 9, role: 'backup', function: 'setup'}
- {stage: 9, role: 'backup'}
reverse_proxy_type: caddy
acme_default_config:
endpoint_prod: https://acme-v02.api.letsencrypt.org/directory
endpoint_staging: https://acme-staging-v02.api.letsencrypt.org/directory
staging: no
resolver: 1.1.1.1
renew_at_days: 45
preferred_chain: 'ISRG Root X1'
type: ec384

@ -0,0 +1,3 @@
- name: restart systemd daemons
systemd:
daemon_reload: yes

@ -5,10 +5,9 @@ acme_dns_dir: /opt/acmedns
acme_dns_tld: "acme-dns.{{ acme_tld | d(tld) }}"
acme_dns_ns: "ns.acme-dns.{{ acme_tld | d(tld) }}"
acme_dns_admin: "{{ maintainer_email | d('admin@' ~ (acme_tld | d(tld))) }}"
acme_dns_external_ipv4: "{{ hostvars[selected_node]['external_ipv4'] }}"
acme_dns_api_port: 8080
acme_dns_default_config:
general:
listen: ":53"
@ -32,12 +31,12 @@ acme_dns_default_config:
port: "{{ acme_dns_api_port }}"
disable_registration: no
tls: none
use_header: no
notification_email: "{{ letsencrypt_email | d(maintainer_email) }}"
use_header: yes
header_name: X-Forwarded-For
corsorigins:
- "*"
logconfig:
loglevel: debug
logtype: stdout

@ -0,0 +1,18 @@
- name: set role information
set_fact:
role_dependency_index: 0
role_hardware:
cores: 2
memory: 128
swap: 64
disk: 0.6
role_use_reverse_proxy: yes
role_use_database: yes
role_firewall_config:
filter:
input:
- { protocol: tcp, dst_port: [53], action: accept }
- { protocol: udp, dst_port: [53], action: accept }

@ -1,6 +1,7 @@
- name: set acme_dns_cfg
set_fact:
acme_dns_cfg: "{{ acme_dns_default_config | d({}) | combine(acme_dns_config | d({}), recursive=true) }}"
acme_dns_cfg: "{{ acme_dns_default_config | d({}) |
combine(acme_dns_config | d({}), recursive=true) }}"
- name: install dependencies
@ -27,7 +28,7 @@
location: github
assets: yes
asset_filter: 'Linux_amd64.tar.gz$'
file: "{{ acme_dns_dir }}/last_version"
file: "{{ (acme_dns_dir, 'last_version') | path_join }}"
extract: "{{ acme_dns_dir }}"
user: "{{ acme_dns_user }}"
group: "{{ acme_dns_group }}"
@ -36,7 +37,7 @@
- name: delete unnecessary files
file:
path: "{{ acme_dns_dir }}/{{ item }}"
path: "{{ (acme_dns_dir, item) | path_join }}"
state: absent
loop:
- CHANGELOG.md
@ -47,7 +48,7 @@
- name: template acme-dns config
template:
src: config.j2
dest: "{{ acme_dns_dir }}/config.cfg"
dest: "{{ (acme_dns_dir, 'config.cfg') | path_join }}"
force: yes
mode: 0400
owner: "{{ acme_dns_user }}"
@ -63,45 +64,47 @@
force: yes
mode: "+x"
notify: restart acme-dns
when: ansible_distribution == 'Alpine'
- name: ensure acme-dns binary has executable bit set
file:
path: "{{ acme_dns_dir }}/acme-dns"
path: "{{ (acme_dns_dir, 'acme-dns') | path_join }}"
mode: "+x"
- name: add cap_net_bind_service to acme-dns executable
community.general.capabilities:
path: "{{ acme_dns_dir }}/acme-dns"
path: "{{ (acme_dns_dir, 'acme-dns') | path_join }}"
capability: cap_net_bind_service+ep
changed_when: no
- name: set acme server address
set_fact:
acme_server: "http://127.0.0.1:{{ acme_dns_api_port }}"
- name: flush handlers
meta: flush_handlers
- name: install and configure nginx
- name: add reverse proxy config
include_role:
name: nginx
name: rproxy
tasks_from: add.yml
vars:
nginx:
servers:
- conf: nginx_server
certs: "{{ host_tls }}"
- name: flush handlers
meta: flush_handlers
rproxy_config:
port: "{{ acme_dns_api_port }}"
acme:
server: "http://127.0.0.1:{{ acme_dns_api_port }}"
nginx_rproxy: rproxy_nginx.j2
caddy_rproxy:
- handler: reverse_proxy
upstreams:
- dial: "127.0.0.1:{{ acme_dns_api_port }}"
- name: add directories to backup plan
include_role:
name: backup
tasks_from: add.yml
vars:
function: add
backup_items:
- "{{ acme_dns_dir }}"

@ -1,9 +1,9 @@
#!/sbin/openrc-run
name="$SVCNAME"
command="{{ acme_dns_dir }}/$SVCNAME"
command="{{ (acme_dns_dir, 'acme-dns') | path_join }}"
directory="{{ acme_dns_dir }}"
command_user="{{ acme_dns_user }}:{{ acme_dns_group }}"
command_user="{{ acme_dns_user ~ ':' ~ acme_dns_group }}"
pidfile="/var/run/$SVCNAME.pid"
command_background=true
start_stop_daemon_args="--stdout-logger logger --stderr-logger logger"
@ -11,8 +11,10 @@ start_stop_daemon_args="--stdout-logger logger --stderr-logger logger"
depend() {
need net
use dns
before nginx caddy
after mariadb postgresql
}
start_pre() {
setcap 'cap_net_bind_service=+ep' {{ acme_dns_dir }}/$SVCNAME
setcap 'cap_net_bind_service=+ep' {{ (acme_dns_dir, 'acme-dns') | path_join | quote }}
}

@ -9,7 +9,7 @@
{% if (acme_cert is string) and (acme_cert | length > 0) and (acme_use_symlinks | d(true) == false) -%}
cp -fpT {{ (acme_directory ~ '/live/' ~ acme_cert_name ~ '/fullchain.pem') | quote }} {{ acme_cert | quote }}
cp -fpT {{ (lego_cert_dir ~ '/live/' ~ acme_cert_name ~ '/fullchain.pem') | quote }} {{ acme_cert | quote }}
{% if (acme_owner is not string) and (acme_group is string) -%}
chgrp -f {{ acme_group }} {{ acme_cert | quote }}
{% elif acme_owner is defined -%}

@ -1,8 +1,31 @@
- name: add to backup plan
include_tasks: add.yml
when: function is defined and function == 'add'
- name: notify that backups are not supported
debug:
msg: backup host is missing, will not set up backups
when: services.backup is not mapping
- name: setup backups
include_tasks: setup.yml
when: function is defined and function == 'setup'
- name: install restic with custom configuration
block:
- include_role:
name: restic
vars:
backup: "{{ backup_cfg }}"
when: services.backup is mapping and backup_cfg is mapping
- name: install restic with default configuration
block:
- include_role:
name: restic
vars:
backup:
dirs: "{{ collected_backup_dirs }}"
password: "{{ backup_password }}"
tags: automated
filter:
- "*.log"
- "node_modules"
- ".npm"
when: services.backup is mapping and backup_cfg is not defined and backup_password is defined

@ -1,31 +0,0 @@
- name: notify that backups are not supported
debug:
msg: backup host is missing, will not set up backups
when: services.backup is not mapping
- name: install restic with custom configuration
block:
- include_role:
name: restic
vars:
backup: "{{ backup_cfg }}"
when: services.backup is mapping and backup_cfg is mapping
- name: install restic with default configuration
block:
- include_role:
name: restic
vars:
backup:
dirs: "{{ collected_backup_dirs }}"
password: "{{ backup_password }}"
tags: automated
filter:
- "*.log"
- "node_modules"
- ".npm"
when: services.backup is mapping and backup_cfg is not defined and backup_password is defined

@ -0,0 +1,46 @@
caddy_user: caddy
caddy_group: caddy
caddy_conf_dir: /etc/caddy
caddy_asset_dir: /opt/caddy-assets
caddy_bin_dir: /usr/sbin
caddy_cert_dir: /etc/caddy/certs
caddy_xcaddy_dir: /opt/xcaddy
caddy_conf_file: "{{ (caddy_conf_dir, 'caddy.json') | path_join }}"
caddy_ecc384_cert: "{{ (caddy_cert_dir, 'ecc384.crt') | path_join }}"
caddy_ecc384_key: "{{ (caddy_cert_dir, 'ecc384.key') | path_join }}"
caddy_rsa2048_cert: "{{ (caddy_cert_dir, 'rsa2048.crt') | path_join }}"
caddy_rsa2048_key: "{{ (caddy_cert_dir, 'rsa2048.key') | path_join }}"
caddy_default_plugins: []
caddy_domains:
- "{{ host_fqdn }}"
caddy_default_config:
admin:
disabled: yes
logging:
sink:
writer:
output: stdout
logs:
default:
writer:
output: stdout
encoder:
format: console
level: INFO
storage:
module: file_system
root: "{{ caddy_asset_dir }}"
apps:
tls:
session_tickets:
rotation_interval: 4h
max_keys: 8
cache:
capacity: 512
http:
grace_period: 20s

@ -0,0 +1,4 @@
- name: restart caddy
service:
name: caddy
state: restarted

@ -0,0 +1,133 @@
- name: install xcaddy from debian/ubuntu repository
block:
- name: install dependencies
package:
name:
- debian-keyring
- debian-archive-keyring
- apt-transport-https
- name: get xcaddy signing key location
set_fact:
caddy_xcaddy_signing_key_file: "{{ (gpg_keyrings_dir, 'xcaddy.asc') | path_join }}"
- name: add xcaddy signing key
get_url:
url: "{{ caddy_xcaddy_gpg_key_url }}"
dest: "{{ caddy_xcaddy_signing_key_file }}"
mode: a+r
- name: add apt repo
apt_repository:
repo: "deb [signed-by={{ caddy_xcaddy_signing_key_file | quote }}] {{ caddy_xcaddy_repo_url }} any-version main"
register: result
- name: update repository index
apt:
force_apt_get: yes
update_cache: yes
changed_when: no
when: result.changed
- name: install xcaddy
package:
name:
- xcaddy
- golang
when: ansible_distribution == 'Debian' or ansible_distribution == 'Ubuntu'
- name: install xcaddy from github
block:
- name: determine host architecture
include_tasks: tasks/get_host_arch.yml
- name: create xcaddy dir
file:
path: "{{ caddy_xcaddy_dir }}"
state: directory
- name: get latest xcaddy version
include_tasks: tasks/get_lastversion.yml
vars:
package:
name: caddyserver/xcaddy
location: github
file: "{{ (caddy_conf_dir, 'last_xcaddy_version') | path_join }}"
assets: yes
asset_filter: "{{ 'linux_' ~ host_architecture ~ '.tar.gz$' }}"
extract: "{{ caddy_xcaddy_dir }}"
- name: ensure xcaddy binary has executable flag set
file:
path: "{{ (caddy_xcaddy_dir, 'xcaddy') | path_join }}"
mode: "+x"
- name: copy xcaddy to bin dir
copy:
src: "{{ (caddy_xcaddy_dir, 'xcaddy') | path_join }}"
dest: "{{ (caddy_bin_dir, 'xcaddy') | path_join }}"
remote_src: yes
mode: "+x"
- name: install golang
package:
name: go
when: ansible_distribution == 'Alpine'
- name: get latest caddy version
include_tasks: tasks/get_lastversion.yml
vars:
package:
name: caddyserver/caddy
location: github
file: "{{ (caddy_conf_dir, 'last_caddy_version') | path_join }}"
- name: generate build command
set_fact:
caddy_build_command: >-
{{
'xcaddy build ' ~ (
[] | zip_longest((caddy_default_plugins | d([])) +
(caddy_custom_plugins | d([])) | select() | map('quote'), fillvalue='--with ')
| map('join') | list | join(' ')
) ~ ' --output ' ~ ((caddy_bin_dir, 'caddy') | path_join | quote) }}
- name: save build command to a file
copy:
content: "{{ caddy_build_command }}"
dest: "{{ (caddy_conf_dir, 'build_info') | path_join }}"
mode: 0400
register: result
- name: build caddy binary if a new version is found or build command was changed
shell:
cmd: "{{ caddy_build_command }}"
chdir: "{{ caddy_bin_dir }}"
register: result
when: package_changed or result.changed
notify: restart caddy
- name: check if caddy version and last version are identical
shell:
cmd: "caddy version | cut -d ' ' -f1 | cut -b2- | diff -wq {{ (caddy_conf_dir, 'last_caddy_version') | path_join | quote }} -"
register: result
failed_when: result.rc != 0
changed_when: no

@ -0,0 +1,41 @@
- name: install caddy from debian/ubuntu repository
block:
- name: install dependencies
package:
name:
- debian-keyring
- debian-archive-keyring
- apt-transport-https
- name: get caddy signing key location
set_fact:
caddy_signing_key_file: "{{ (gpg_keyrings_dir, 'caddy.asc') | path_join }}"
- name: add caddy signing key
get_url:
url: "{{ caddy_gpg_key_url }}"
dest: "{{ caddy_signing_key_file }}"
mode: a+r
- name: add apt repo
apt_repository:
repo: "deb [signed-by={{ caddy_signing_key_file | quote }}] {{ caddy_repo_url }} any-version main"
register: result
- name: update repository index
apt:
force_apt_get: yes
update_cache: yes
changed_when: no
when: result.changed
when: ansible_distribution == 'Debian' or ansible_distribution == 'Ubuntu'
- name: install caddy
package:
name: caddy

@ -0,0 +1,144 @@
- name: determine if custom caddy build should be used
set_fact:
caddy_custom_build: "{{ (((caddy_default_plugins | d([])) + (caddy_custom_plugins | d([]))) | length > 0) or (caddy_use_xcaddy | d(false) == true) }}"
- name: import vars for unmanaged tls
include_vars:
file: tls.yml
when: host_tls
- name: import reverse proxy vars
include_vars:
file: reverse_proxy.yml
when: caddy_reverse_proxy_handlers is defined
- name: set caddy_cfg
set_fact:
caddy_cfg: "{{ caddy_default_config | d({}) |
combine(caddy_tls_config | d({}), recursive=true) |
combine(caddy_reverse_proxy_config | d({}), recursive=true, list_merge='replace') |
combine(caddy_config | d({}), recursive=true) }}"
- name: create user and group
include_tasks: tasks/create_user.yml
vars:
user:
name: "{{ caddy_user }}"
group: "{{ caddy_group }}"
dir: "{{ caddy_conf_dir }}"
create_home: no
- name: create caddy directories
file:
path: "{{ item }}"
state: directory
owner: "{{ caddy_user }}"
group: "{{ caddy_group }}"
loop:
- "{{ caddy_conf_dir }}"
- "{{ caddy_asset_dir }}"
- "{{ caddy_cert_dir }}"
- name: create caddy bin dir
file:
path: "{{ caddy_bin_dir }}"
state: directory
- name: build caddy
include_tasks: build_caddy.yml
when: caddy_custom_build
- name: install prebuilt caddy
include_tasks: install_prebuilt_caddy.yml
when: not caddy_custom_build
- name: template systemd file
template:
src: systemd.j2
dest: /etc/systemd/system/caddy.service
force: yes
lstrip_blocks: yes
notify:
- reload systemd daemons
- restart caddy
when: ansible_distribution == 'Debian' or ansible_distribution == 'Ubuntu'
- name: template init script
template:
src: init.j2
dest: /etc/init.d/caddy
force: yes
mode: 0755
notify: restart caddy
when: ansible_distribution == 'Alpine'
- name: change permissions on asset dir contents
file:
path: "{{ caddy_asset_dir }}"
recurse: yes
owner: "{{ caddy_user }}"
group: "{{ caddy_group }}"
notify: restart caddy
- block:
- name: deploy certificates through lego
include_role:
name: lego
vars:
acme:
domains: "{{ caddy_domains }}"
cert: "{{ caddy_ecc384_cert }}"
key: "{{ caddy_ecc384_key }}"
owner: "{{ caddy_user }}"
group: "{{ caddy_group }}"
run_after_renew: service caddy restart
notify: restart caddy
acme2: "{{ caddy_acme_config | d({}) }}"
when: host_tls
- name: template caddy config
template:
src: caddy.j2
dest: "{{ caddy_conf_file }}"
force: yes
owner: "{{ caddy_user }}"
group: "{{ caddy_group }}"
mode: 0400
validate: "{{ (caddy_bin_dir, 'caddy') | path_join }} validate --config %s"
notify: restart caddy
- name: flush handlers
meta: flush_handlers
- name: add directories to backup plan
include_role:
name: backup
tasks_from: add.yml
vars:
backup_items:
- "{{ caddy_asset_dir }}"
- "{{ caddy_conf_dir }}"
- "{{ caddy_cert_dir }}"
- name: enable and start caddy
service:
name: caddy
enabled: yes
state: started

@ -0,0 +1 @@
{{ caddy_cfg | to_nice_json(indent=2) }}

@ -0,0 +1,23 @@
#!/sbin/openrc-run
: ${caddy_opts:="--config {{ caddy_conf_file | quote }}"}
name="$SVCNAME"
directory="{{ caddy_conf_dir }}"
command="{{ (caddy_bin_dir, 'caddy') | path_join }}"
command_args="run --environ $caddy_opts"
command_user="{{ caddy_user ~ ':' ~ caddy_group }}"
pidfile="/var/run/$SVCNAME.pid"
command_background=true
extra_started_commands="reload"
depend() {
need net localmount
after firewall
}
reload() {
ebegin "Reloading $SVCNAME"
su ${command_user%:*} -s /bin/sh -c "$command reload $caddy_opts"
eend $?
}

@ -0,0 +1,21 @@
caddy_reverse_proxy_default_handler:
- handler: static_response
status_code: 404
caddy_reverse_proxy_config:
apps:
http:
servers:
rproxy:
listen: "{{ ['tcp4/:443', 'tcp6/:443'] if host_tls else ['tcp4/:80', 'tcp6/:80'] }}"
listener_wrappers: "{{ [{'wrapper': 'http_redirect'}, {'wrapper': 'tls'}] if host_tls else [] }}"
automatic_https:
disable: yes
tls_connection_policies:
- match:
sni:
- "{{ host_fqdn }}"
default_sni: "{{ host_fqdn }}"
routes:
- handle: "{{ (caddy_reverse_proxy_handlers | d([])) + caddy_reverse_proxy_default_handler }}"
terminal: true

@ -0,0 +1,8 @@
caddy_tls_config:
apps:
tls:
certificates:
load_files:
- certificate: "{{ caddy_ecc384_cert }}"
key: "{{ caddy_ecc384_key }}"
tags: "{{ caddy_domains }}"

@ -22,3 +22,4 @@
acme_tld: "{{ combined.tld | d(None) }}"
acme_fqdn: "{{ combined.fqdn | d(None) }}"
acme_hosts: "{{ combined.hosts | d(None) }}"
acme_server: "{{ combined.acme_server | d(None) }}"

@ -1 +1,2 @@
dropbear_dir: /etc/dropbear
alpine_version: "3.17"

@ -4,4 +4,4 @@ container_pool: production
container_distro: alpine
container_template:
alpine: alpine-3.17-default_20221129_amd64.tar.xz
debian: debian-11-standard_11.3-1_amd64.tar.zst
debian: debian-11-standard_11.6-1_amd64.tar.zst

@ -4,7 +4,7 @@
pm_api_user: "{{ hostvars[selected_node]['api_user'] | d('root@pam') }}"
pm_api_password: "{{ hostvars[selected_node]['api_password'] |
d(hostvars[selected_node]['host_password'] | d(hostvars[selected_node]['ansible_password'])) }}"
pm_lxc_storage: "{{ hostvars[selected_node]['lxc_storage'] | d('local-zfs') }}"
pm_lxc_storage: "{{ container_storage | d(hostvars[selected_node]['lxc_storage'] | d('local-zfs')) }}"
no_log: yes

@ -13,20 +13,11 @@
- pct_command: rc-update add dropbear
chg_substr: added to runlevel
- name: install dropbear-scp if this is not an ansible controller
include_tasks: tasks/pct_command.yml
vars:
pct_command: apk add dropbear-scp
chg_substr: Installing
when: (inventory_hostname != 'ansible') and ((primary_role is not defined) or (primary_role != 'ansible'))
and alpine_version is version('3.15', '<=')
- name: install openssh-sftp-server due to openssh 9 scp deprecation
- name: install openssh-sftp-server
include_tasks: tasks/pct_command.yml
vars:
pct_command: apk add openssh-sftp-server
chg_substr: Installing
when: alpine_version is version('3.16', '>=')
- name: start dropbear
include_tasks: tasks/pct_command.yml
@ -57,7 +48,6 @@
vars:
pct_command: "sed -i 's/PermitRootLogin prohibit-password/PermitRootLogin yes/g' /etc/ssh/sshd_config"
- name: start sshd
include_tasks: tasks/pct_command.yml
vars:

@ -11,3 +11,9 @@
memory: 128
swap: 64
disk: 0.3
role_firewall_config:
filter:
input:
- { protocol: tcp, dst_port: [53, 443, 853], action: accept }
- { protocol: udp, dst_port: [53, 443, 853], action: accept }

@ -1,5 +1,5 @@
iptables_dir: /etc/iptables
iptables_file: "{{ iptables_dir }}/rules-save"
iptables_file: "{{ (iptables_dir, 'rules-save' if ansible_distribution == 'Alpine' else 'rules.v4') | path_join }}"
iptables_mappings:
state: { module: 'state', param: 'state', upper: yes, join: ',' }
@ -20,3 +20,24 @@ iptables_mappings:
set_mss: { param: 'set-mss' }
to_source: { param: 'to-source' }
firewall_default_config:
filter:
default_policy:
input: drop
forward: drop
output: accept
input:
- { state: ['established', 'related'], action: accept }
- { state: invalid, action: drop }
- { protocol: icmp, icmp_type: 8, action: accept }
- { in_intf: lo, action: accept }
- { not_in_intf: lo, src_addr: '127.0.0.0/8', action: drop }
firewall_ssh_config:
filter:
input:
- "{{ { 'protocol': 'tcp', 'dst_port': 22, 'src_addr': admin_net | d(int_net | d('0.0.0.0/0')), 'action': 'accept' } }}"
- "{{ { 'protocol': 'tcp', 'dst_port': 22, 'src_addr': hostvars['ansible']['ansible_host'], 'action': 'accept' } if hostvars['ansible'] is defined else None }}"

@ -0,0 +1,5 @@
- name: collect firewall config
set_fact:
firewall_collected_configs: "{{ firewall_collected_configs | d([]) |
combine(firewall_config, recursive=true, list_merge='append') }}"
when: firewall_config is mapping

@ -1,7 +1,11 @@
- block:
- name: set firewall_cfg
set_fact:
firewall_cfg: "{{ firewall_default_config | d({}) | combine(firewall | d({}), recursive=true) }}"
firewall_cfg: "{{ firewall_default_config | d({}) |
combine(firewall_ssh_config if (firewall_use_ssh | d(true) == true) else {}, recursive=true, list_merge='append') |
combine(role_firewall_config | d({}), recursive=true, list_merge='append') |
combine(firewall_collected_configs | d({}), recursive=true, list_merge='append') |
combine(firewall | d({}), recursive=true, list_merge='append') }}"
- name: install iptables
@ -18,6 +22,7 @@
path: /etc/conf.d/iptables
regexp: "^IPTABLES_SAVE="
line: "IPTABLES_SAVE=\"{{ iptables_file }}\""
when: ansible_distribution == 'Alpine'
- name: template iptables schema
@ -43,5 +48,14 @@
name: iptables
enabled: yes
state: started
when: ansible_distribution == 'Alpine'
when: firewall is mapping
- name: start and enable netfilter-persistent
service:
name: netfilter-persistent
enabled: yes
state: started
when: ansible_distribution == 'Debian' or ansible_distribution == 'Ubuntu'
when: firewall is mapping or role_firewall_config is mapping or (host_firewall | d(false) == true)

@ -49,7 +49,9 @@
{% if section.key != 'default_policy' -%}
{% if section.value | type_debug == 'list' -%}
{% for rule in section.value -%}
{% if rule is mapping and rule != None -%}
{{ iptables_rule(section.key, rule) }}
{% endif -%}
{% endfor -%}
{% elif section.value is mapping -%}
{{ iptables_rule(section.key, section.value) }}

@ -0,0 +1,3 @@
lego_dir: /opt/lego
lego_cert_dir: /etc/lego
lego_accounts_file: "{{ (lego_cert_dir, 'accounts.conf') | path_join }}"

@ -0,0 +1,214 @@
- name: fail if acme is not defined
fail:
msg: acme must be a mapping
when: acme is not mapping
- name: set acme_cfg
set_fact:
acme_cfg: "{{ acme_default_config | d({}) |
combine(acme_config | d({}), recursive=true) |
combine(acme2 | d({}), recursive=true) |
combine(acme, recursive=true) }}"
- name: determine host architecture
include_tasks: tasks/get_host_arch.yml
- name: create lego dirs
file:
path: "{{ item }}"
state: directory
mode: 0700
loop:
- "{{ lego_dir }}"
- "{{ lego_cert_dir }}"
- name: get and extract latest lego version
include_tasks: tasks/get_lastversion.yml
vars:
package:
name: go-acme/lego
location: github
assets: yes
asset_filter: "{{ 'linux_' ~ host_architecture ~ '.tar.gz$' }}"
file: "{{ (lego_dir, 'last_version') | path_join }}"
extract: "{{ lego_dir }}"
- block:
- name: remove unnecessary files
file:
path: "{{ (lego_dir, item) | path_join }}"
state: absent
loop:
- LICENSE
- CHANGELOG.md
rescue:
- meta: noop
- name: set job name
set_fact:
lego_job_name: "{{ acme_cfg.domains[0] ~ '-' ~ acme_cfg.type }}"
- name: set initial lego facts
set_fact:
lego_must_reissue: yes
lego_renew_script: "{{ (lego_cert_dir, 'cron-' ~ lego_job_name ~ '.sh') | path_join }}"
lego_lastrun_file: "{{ (lego_cert_dir, 'lastrun-' ~ lego_job_name) | path_join }}"
- name: create custom renewal script file
template:
src: renewal.j2
dest: "{{ lego_renew_script }}"
force: yes
mode: 0500
lstrip_blocks: yes
- name: set lego parameters
set_fact:
lego_params: "{{
[
([] | zip_longest(acme_cfg.domains | d([]) | select() | map('quote'), fillvalue='--domains ') | map('join') | list),
'--server ' ~ ((acme_cfg.endpoint_staging if acme_cfg.staging else acme_cfg.endpoint_prod) | quote),
'--accept-tos',
'--filename ' ~ (lego_job_name | quote),
'--email ' ~ (acme_cfg.email | d(maintainer_email) | quote),
'--key-type ' ~ (acme_cfg.type | quote),
'--path ' ~ (lego_cert_dir | quote),
'--dns acme-dns',
'--dns.resolvers ' ~ (acme_cfg.resolver | d('1.1.1.1') | quote),
'--dns.disable-cp'
] | flatten(levels=1) | select() | list | join(' ') }}"
lego_renewal_params: "{{
[
(('--days ' ~ (acme_cfg.renew_at_days | quote)) if acme_cfg.renew_at_days is defined else ''),
('--reuse-key' if acme_cfg.reuse_key | d(false) == true else ''),
('--no-random-sleep' if acme_cfg.no_random_sleep | d(true) == true else ''),
('--renew-hook ' ~ (lego_renew_script | quote))
] | flatten(levels=1) | select() | list | join(' ') }}"
lego_preferred_chain: "{{ '--preferred-chain ' ~ (acme_cfg.preferred_chain | quote) if acme_cfg.preferred_chain is defined else '' }}"
- name: set lego command facts
set_fact:
lego_full_command: "{{ (lego_dir, 'lego') | path_join }} {{ lego_params }} run {{ lego_preferred_chain }}"
lego_renew_command: "{{ (lego_dir, 'lego') | path_join }} {{ lego_params }} renew {{ lego_preferred_chain }} {{ lego_renewal_params }}"
- name: check if lastrun file exists
stat:
path: "{{ lego_lastrun_file }}"
get_checksum: no
get_mime: no
register: result
- block:
- name: get lastrun file contents
slurp:
path: "{{ lego_lastrun_file }}"
register: file_content
no_log: yes
- name: determine if cert should be reissued
set_fact:
lego_must_reissue: "{{ (file_content.content | b64decode) != lego_full_command }}"
when: result.stat.exists
- block:
- name: issue cert with dns mode
shell:
cmd: "{{ lego_full_command }}"
chdir: "{{ lego_dir }}"
environment:
ACME_DNS_API_BASE: "{{ acme_cfg.server }}"
ACME_DNS_STORAGE_PATH: "{{ lego_accounts_file }}"
register: result
when: lego_must_reissue
rescue:
- pause:
when: interactive | d(true) == true
- name: retry issuing cert with dns mode
shell:
cmd: "{{ lego_full_command }}"
chdir: "{{ lego_dir }}"
environment:
ACME_DNS_API_BASE: "{{ acme_cfg.server }}"
ACME_DNS_STORAGE_PATH: "{{ lego_accounts_file }}"
register: result
- block:
- name: save data to lastrun file
copy:
content: "{{ lego_full_command }}"
dest: "{{ lego_lastrun_file }}"
remote_src: yes
- name: defer service restart
debug:
msg: deferring service restart
changed_when: yes
notify: "{{ acme_cfg.notify }}"
when: acme_cfg.notify is defined
- name: copy certificates to their intended locations
copy:
src: "{{ (lego_cert_dir, 'certificates', lego_job_name ~ '.' ~ item.src_ext) | path_join }}"
dest: "{{ item.dest }}"
remote_src: yes
mode: 0600
owner: "{{ acme_cfg.owner | d(omit) }}"
group: "{{ acme_cfg.group | d(omit) }}"
when: item.dest != None
loop:
- { src_ext: 'crt', dest: "{{ acme_cfg.cert | d(None) }}" }
- { src_ext: 'key', dest: "{{ acme_cfg.key | d(None) }}" }
notify: "{{ acme_cfg.notify | d(omit) }}"
when: lego_must_reissue
- name: configure systemd service and timer
block:
- name: template systemd files
template:
src: "{{ item.src }}.j2"
dest: "{{ ('/etc/systemd/system', item.dst) | path_join }}"
force: yes
lstrip_blocks: yes
loop:
- { src: 'lego_systemd', dst: 'lego.service' }
- { src: 'lego_timer', dst: 'lego.timer' }
notify: reload systemd daemons
- name: enable lego timer
systemd:
name: lego.timer
state: started
enabled: yes
when: ansible_distribution == 'Debian' or ansible_distribution == 'Ubuntu'
- name: configure crontab entry
cron:
name: "certificate renewal ({{ lego_job_name ~ ' on ' ~ acme_cfg.server }})"
job: "ACME_DNS_API_BASE={{ acme_cfg.server | quote }} ; \
ACME_DNS_STORAGE_PATH={{ lego_accounts_file | quote }} ; \
cd {{ lego_dir | quote }} ; \
{{ lego_renew_command }}"
hour: "{{ 4 | random(start=1, seed=(host_name ~ lego_job_name)) }}"
minute: "{{ 59 | random(seed=(host_name ~ lego_job_name)) }}"
when: ansible_distribution == 'Alpine'

@ -0,0 +1,22 @@
{%- set cert_base = (lego_cert_dir, 'certificates', lego_job_name ~ '.') | path_join -%}
#!/bin/sh
{{ (acme_cfg.run_before_renew ~ ' &>/dev/null') if acme_cfg.run_before_renew is defined else '' }}
{%- if acme_cfg.cert is defined %}
cp -fpT {{ (cert_base ~ 'crt') | quote }} {{ acme_cfg.cert | quote }}
{% if acme_cfg.owner is defined -%}
chown -f {{ acme_cfg.owner ~ ((':' ~ acme_cfg.group) if acme_cfg.group is defined else '') }} {{ acme_cfg.cert | quote }}
{% endif -%}
{% endif -%}
{% if acme_cfg.key is defined -%}
cp -fpT {{ (cert_base ~ 'key') | quote }} {{ acme_cfg.key | quote }}
{% if acme_cfg.owner is defined -%}
chown -f {{ acme_cfg.owner ~ ((':' ~ acme_cfg.group) if acme_cfg.group is defined else '') }} {{ acme_cfg.key | quote }}
{% endif -%}
{% endif -%}
{{ (acme_cfg.run_after_renew ~ ' &>/dev/null &') if acme_cfg.run_after_renew is defined else '' }}

@ -148,6 +148,7 @@
notify: restart nginx
stapling: "{{ nginx_cfg.must_staple | d(nginx_cfg.enable_stapling) | d(false) }}"
hosts: "{{ nginx_cfg.domains | d(None) }}"
acme_server: "{{ nginx_cfg.acme_server | d(None) }}"
certs:
- id: "{{ host_name ~ '-nginx-ecc' }}"
cert: "{{ nginx_cfg.conf_dir }}/tls/{{ nginx_cfg.cert_ecc_name }}.crt"

@ -1,10 +1,9 @@
postgresql_user: postgres
postgresql_group: postgres
postgresql_data_dir: /db
postgresql_data_dir: /opt/postgresql
postgresql_conf_dir: /etc/postgresql
postgresql_tls_dir: "{{ postgresql_conf_dir }}/tls"
postgresql_dhparam_file: dhparam.pem
postgresql_tls_dir: "{{ (postgresql_conf_dir, 'tls') | path_join }}"
postgresql_db: []
@ -50,9 +49,9 @@ postgresql_default_config:
log_timezone: "{{ timezone }}"
timezone: "{{ timezone }}"
shared_buffers: "{{ ((hardware.memory | d(512) | int) * (1024/2)) | int }}kB"
work_mem: "{{ ((hardware.memory | d(512) | int) * (1024/35)) | round(1, 'ceil') | int }}kB"
max_wal_size: "{{ ((hardware.disk | d(2) | float) * (1024 / 2)) | int }}MB"
shared_buffers: "{{ ((host_hardware.memory | d(512) | int) * (1024/2)) | int }}kB"
work_mem: "{{ ((host_hardware.memory | d(512) | int) * (1024/35)) | round(1, 'ceil') | int }}kB"
max_wal_size: "{{ ((host_hardware.disk | d(2) | float) * (1024 / 2)) | int }}MB"
postgresql_tls_config:
@ -61,7 +60,7 @@ postgresql_tls_config:
ssl_prefer_server_ciphers: yes
ssl_min_protocol_version: TLSv1.2
ssl_ecdh_curve: secp384r1
ssl_ca_file: "{{ postgresql_tls_dir }}/root.crt"
ssl_cert_file: "{{ postgresql_tls_dir }}/ecc384.crt"
ssl_key_file: "{{ postgresql_tls_dir }}/ecc384.key"
ssl_dh_params_file: "{{ postgresql_tls_dir ~ '/' ~ postgresql_dhparam_file }}"
ssl_ca_file: "{{ (postgresql_tls_dir, 'root.crt') | path_join }}"
ssl_cert_file: "{{ (postgresql_tls_dir, 'ecc384.crt') | path_join }}"
ssl_key_file: "{{ (postgresql_tls_dir, 'ecc384.key') | path_join }}"
ssl_dh_params_file: "{{ (postgresql_tls_dir, 'dhparam.pem') | path_join }}"

@ -54,9 +54,18 @@
role: "{{ database.user }}"
- name: grant privileges to public schema
community.postgresql.postgresql_privs:
database: "{{ database.name }}"
privs: USAGE,CREATE
type: schema
objs: public
role: "{{ database.user }}"
- name: add line to postgres hba
community.postgresql.postgresql_pg_hba:
dest: "{{ postgresql_conf_dir }}/pg_hba.conf"
dest: "{{ (postgresql_conf_dir, 'pg_hba.conf') | path_join }}"
contype: "{{ 'host' if (database.ssl | d(false) == false) else 'hostssl' }}"
databases: "{{ database.name }}"
users: "{{ database.user }}"

@ -1,6 +1,7 @@
- name: set pg_cfg
set_fact:
pg_cfg: "{{ postgresql_default_config | d({}) | combine(postgresql_config | d({}), recursive=true) }}"
pg_cfg: "{{ postgresql_default_config | d({}) |
combine(postgresql_config | d({}), recursive=true) }}"
- name: install dependencies
@ -29,6 +30,18 @@
group: "{{ postgresql_group }}"
- block:
- name: create data directory
file:
path: "{{ postgresql_data_dir }}"
state: directory
mode: 0700
owner: "{{ postgresql_user }}"
group: "{{ postgresql_group }}"
rescue:
- meta: noop
- name: include custom config in default postgres config
lineinfile:
path: "{{ postgresql_conf_dir }}/postgresql.conf"

@ -1,19 +1,16 @@
- name: install postgres for self-hosted deployment
include_role:
name: postgres
vars:
function: install
include_tasks: install.yml
when: database_self_hosted | d(false) == true
- name: add database
include_role:
name: postgres
include_tasks:
file: add_database.yml
apply:
delegate_to: "{{ inventory_hostname if (database_self_hosted | d(false) == true) else services.db.hostname }}"
vars:
function: add_database
database:
name: "{{ database_name }}"
user: "{{ database_user }}"
pass: "{{ database_pass }}"
self_hosted: "{{ database_self_hosted | d(false) }}"

@ -0,0 +1,5 @@
- name: collect rproxy config
set_fact:
rproxy_collected_configs: "{{ (rproxy_collected_configs | d([])) +
([rproxy_config] if rproxy_config is mapping else rproxy_config) }}"
when: rproxy_config is defined and ((rproxy_config | type_debug == 'list') or rproxy_config is mapping)

@ -0,0 +1,42 @@
- block:
- name: fail if more than one reverse proxy config was collected
fail:
msg: more than one reverse proxy config was collected, this is not supported yet
when: rproxy_collected_configs | length > 1
- name: install nginx
include_role:
name: nginx
vars:
nginx:
servers:
- conf: rproxy_collected_configs[0].nginx_rproxy
certs: "{{ host_tls }}"
acme_server: "{{ (rproxy_collected_configs[0].acme | d({}))['server'] | d(None) }}"
when: reverse_proxy_type == 'nginx'
- name: install caddy
include_role:
name: caddy
vars:
caddy_config: "{{ rproxy_collected_configs[0].caddy | d({}) }}"
caddy_reverse_proxy_handlers: "{{ rproxy_collected_configs[0].caddy_rproxy | d([]) }}"
caddy_acme_config: "{{ rproxy_collected_configs[0].acme | d({}) }}"
when: reverse_proxy_type == 'caddy'
- name: add firewall entries
include_role:
name: iptables
tasks_from: add.yml
vars:
firewall_config:
filter:
input:
- { protocol: tcp, dst_port: "{{ [80, 443] if host_tls else [80] }}", action: accept }
- { protocol: udp, dst_port: "{{ [80, 443] if host_tls else [80] }}", action: accept }
when: rproxy_collected_configs is defined and rproxy_collected_configs | length > 0
and role_use_reverse_proxy | d(true) == true

@ -14,8 +14,8 @@ vault_default_config:
rocket_port: "{{ vault_port }}"
websocket_port: "{{ vault_websocket_port }}"
org_attachment_limit: "{{ ((hardware.disk | d(10) | float) * 1024 * 1024 / 30) | int | abs }}"
user_attachment_limit: "{{ ((hardware.disk | d(10) | float) * 1024 * 1024 / 90) | int | abs }}"
org_attachment_limit: "{{ ((host_hardware.disk | d(10) | float) * 1024 * 1024 / 30) | int | abs }}"
user_attachment_limit: "{{ ((host_hardware.disk | d(10) | float) * 1024 * 1024 / 90) | int | abs }}"
database_max_conns: 4
websocket_enabled: yes

@ -0,0 +1,12 @@
- name: set role information
set_fact:
role_dependency_index: 2
role_hardware:
cores: 4
memory: 128
swap: 64
disk: 1.2
role_use_reverse_proxy: yes
role_use_database: yes

@ -7,7 +7,9 @@
- name: set vault_cfg
set_fact:
vault_cfg: "{{ vault_default_config | d({}) | combine(vault_mail_config | d({}), recursive=true) | combine(vault_config | d({}), recursive=true) }}"
vault_cfg: "{{ vault_default_config | d({}) |
combine(vault_mail_config | d({}), recursive=true) |
combine(vault_config | d({}), recursive=true) }}"
- name: install curl
@ -30,7 +32,7 @@
- name: create data directory
file:
path: "{{ vault_dir }}/data"
path: "{{ (vault_dir, 'data') | path_join }}"
state: directory
mode: 0750
owner: "{{ vault_user }}"
@ -53,7 +55,7 @@
- name: run docker-image-extract
command:
cmd: "{{ vault_extract_dir }}/docker-image-extract vaultwarden/server:alpine"
cmd: "{{ (vault_extract_dir, 'docker-image-extract') | path_join }} vaultwarden/server:alpine"
chdir: "{{ vault_extract_dir }}"
register: result
changed_when: no
@ -62,7 +64,7 @@
- name: check if output directory exists
stat:
path: "{{ vault_extract_dir }}/output"
path: "{{ (vault_extract_dir, 'output') | path_join }}"
register: result
@ -74,8 +76,8 @@
- name: move vaultwarden to vault dir
copy:
src: "{{ vault_extract_dir ~ '/output/vaultwarden' }}"
dest: "{{ vault_dir ~ '/vaultwarden' }}"
src: "{{ (vault_extract_dir, 'output', 'vaultwarden') | path_join }}"
dest: "{{ (vault_dir, 'vaultwarden') | path_join }}"
force: yes
remote_src: yes
owner: "{{ vault_user }}"
@ -85,14 +87,14 @@
- name: remove output directory
file:
path: "{{ vault_extract_dir }}/output"
path: "{{ (vault_extract_dir, 'output') | path_join }}"
state: absent
changed_when: no
- name: ensure vaultwarden has executable bit set
file:
path: "{{ vault_dir }}/vaultwarden"
path: "{{ (vault_dir, 'vaultwarden') | path_join }}"
mode: "+x"
@ -104,7 +106,7 @@
location: github
assets: yes
asset_filter: '.tar.gz$'
file: "{{ vault_dir }}/last_version"
file: "{{ (vault_dir, 'last_version') | path_join }}"
extract: "{{ vault_dir }}"
user: "{{ vault_user }}"
group: "{{ vault_group }}"
@ -114,7 +116,7 @@
- name: template .env file
template:
src: env.j2
dest: "{{ vault_dir }}/.env"
dest: "{{ (vault_dir, '.env') | path_join }}"
force: yes
mode: 0400
owner: "{{ vault_user }}"
@ -130,6 +132,7 @@
force: yes
mode: "+x"
notify: restart vaultwarden
when: ansible_distribution == 'Alpine'
- name: ensure correct ownership in vault dir
@ -143,25 +146,29 @@
notify: restart vaultwarden
- name: install and configure nginx
include_role:
name: nginx
vars:
nginx:
servers:
- conf: nginx_server
certs: "{{ host_tls }}"
- name: flush handlers
meta: flush_handlers
- name: add reverse proxy config
include_role:
name: rproxy
tasks_from: add.yml
vars:
rproxy_config:
port: "{{ vault_port }}"
nginx_rproxy: rproxy_nginx.j2
caddy_rproxy:
- handler: reverse_proxy
upstreams:
- dial: "127.0.0.1:{{ vault_port }}"
- name: add directories to backup plan
include_role:
name: backup
tasks_from: add.yml
vars:
function: add
backup_items:
- "{{ vault_dir }}"

@ -2,7 +2,7 @@
name="$SVCNAME"
directory="{{ vault_dir }}"
command="{{ vault_dir }}/vaultwarden"
command="{{ (vault_dir, 'vaultwarden') | path_join }}"
command_user="{{ vault_user ~ ':' ~ vault_group }}"
pidfile="/var/run/$SVCNAME.pid"
{% if vault_supervised | d(false) == true -%}
@ -14,6 +14,6 @@ command_background=true
depend() {
need net
use dns
before nginx
before nginx caddy
after postgresql mariadb
}

@ -0,0 +1,7 @@
- name: determine host architecture
set_fact:
host_architecture: "{{ [ansible_architecture] | map('extract', {
'aarch64': 'arm64',
'x86_64': 'amd64',
'i386': '386'
}) | first }}"

@ -44,9 +44,16 @@
- name: set hardware information
set_fact:
host_hardware: "{{ (default_container_hardware | combine(role_hardware | d({})) |
combine(host_hardware | d({}))) if host_is_container
else (host_hardware | d({})) }}"
host_hardware: "{{ (
{
'cores': 4,
'cpus': 1,
'cpuunits': 1024,
'memory': 128,
'swap': 128,
'disk': 0.4
} | combine(role_hardware | d({})) |
combine(host_hardware | d({}))) if host_is_container else (host_hardware | d({})) }}"
- name: clamp hardware cores to max node number

Loading…
Cancel
Save