diff --git a/.gitignore b/.gitignore index c96b8df..3a8c627 100644 --- a/.gitignore +++ b/.gitignore @@ -60,4 +60,3 @@ tags # End of https://www.toptal.com/developers/gitignore/api/vim,linux,vagrant,ansible development.yml -single-test.yml diff --git a/README.md b/README.md index 3f2016a..2eddd2f 100644 --- a/README.md +++ b/README.md @@ -12,7 +12,7 @@ vagrant plugin install vagrant-hosts vagrant-hostsupdater ``` Additionally, since the test setup mirrors the production setup in that it makes use of subdomains for the individual hosted applications, -the server needs to be reachable under a domain name, +the server needs to be reachable under a domain name, not just an IP address. For now this is most simply accomplished through editing the hosts file, e.g.: @@ -23,20 +23,21 @@ For now this is most simply accomplished through editing the hosts file, e.g.: ``` This will allow you to reach the main domain under `http(s)://ansible.test` and sets up two subdomains that can be reached. -Be aware that the hosts file does not support subdomain wildcards. -You will have to specify each hostname individually or use a tool such as `dnsmasq`. +Be aware that the hosts file does not support subdomain wildcards. +You will have to specify each hostname individually or use a tool such as `dnsmasq`. Read more [here](https://serverfault.com/questions/118378/in-my-etc-hosts-file-on-linux-osx-how-do-i-do-a-wildcard-subdomain). -Then you are ready to run the complete infrastructure setup locally, +Then you are ready to run the complete infrastructure setup locally, simply by executing `ansible-playbook site.yml`. You can of course pick and choose what should be executed with host limits, tags, group variables, and so on, but this should provide an easy way to see if a) the playbook is working as intended and b) what it does is useful. + ## Deployment Most variables to be changed should be set either through `group_variables` or `host_variables`. For my deployment I have a `production` group under `group_variables` which houses both a `vars.yml` containing basic variables -(like `server_domain`, `caddy_email`, etc.) +(like `server_domain`, `caddy_email`, etc.) and a `vault.yml` which houses everything that should ideally not be lying around in plain-text (individual container and database passwords for the various roles etc). diff --git a/group_vars/testing.yml b/group_vars/testing.yml index 6a95df5..13676f5 100644 --- a/group_vars/testing.yml +++ b/group_vars/testing.yml @@ -1,21 +1,21 @@ --- + docker_swarm_advertise_addr: eth1 caddy_use_debug: yes caddy_tls_use_staging: yes -blog_use_https: no -caddy_use_https: no -forgejo_use_https: no +blog_use_https: no +caddy_use_https: no +gitea_use_https: no landingpage_use_https: no -miniflux_use_https: no -monica_use_https: no -nextcloud_use_https: no -ntfy_use_https: no -searx_use_https: no -shaarli_use_https: no -traggo_use_https: no -wallabag_use_https: no -whoami_use_https: no +miniflux_use_https: no +monica_use_https: no +nextcloud_use_https: no +searx_use_https: no +shaarli_use_https: no +traggo_use_https: no +wallabag_use_https: no +whoami_use_https: no server_domain: ansible.test diff --git a/inv-prod.yml b/inv-prod.yml new file mode 100644 index 0000000..7df0353 --- /dev/null +++ b/inv-prod.yml @@ -0,0 +1,8 @@ +prod: + hosts: + ssdnodes: + +docker_swarm_manager_node: + hosts: + ssdnodes: + diff --git a/roles/blog/README.md b/roles/blog/README.md new file mode 100644 index 0000000..d649b50 --- /dev/null +++ b/roles/blog/README.md @@ -0,0 +1,37 @@ +# landingpage + +The public face of my server. +Not much to see here honestly, +just a few simple lines of html explaining what this server is about and how to contact me. + +I don't see anybody else benefiting massively from this role but me, +but if you want the same web presence go for it I suppose 😉 + +## Defaults + +``` +landingpage_upstream_file_dir: "{{ docker_stack_files_dir }}/{{ stack_name }}" +``` + +The on-target directory where the proxy configuration file should be stashed. + +``` +landingpage_use_https: true +``` + +Whether the service should be reachable through http (port 80) or through https (port 443) and provision an https certificate. Usually you will want this to stay `true`. + +``` +landingpage_version: latest +``` + +The docker image version to be used in stack creation. + +``` +subdomain_alias: www +``` + +If the deployed container should be served over a uri that is not the stack name. +By default, it will be set to `www.yourdomain.com` - +if this option is not set it will be served on `landingpage.yourdomain.com` instead. + diff --git a/roles/blog/defaults/main.yml b/roles/blog/defaults/main.yml new file mode 100644 index 0000000..f0057a2 --- /dev/null +++ b/roles/blog/defaults/main.yml @@ -0,0 +1,11 @@ +--- + +# never got around to removing the master tag from the images +blog_version: master + +blog_upstream_file_dir: "{{ docker_stack_files_dir }}/{{ stack_name }}" + +blog_use_https: true + +# the subdomain link blog will be reachable under +# subdomain_alias: blog diff --git a/roles/linkding/handlers/main.yml b/roles/blog/handlers/main.yml similarity index 58% rename from roles/linkding/handlers/main.yml rename to roles/blog/handlers/main.yml index 081f2df..4ea5e4a 100644 --- a/roles/linkding/handlers/main.yml +++ b/roles/blog/handlers/main.yml @@ -1,18 +1,18 @@ ## Register reverse proxy - name: Ensure upstream directory exists ansible.builtin.file: - path: "{{ linkding_upstream_file_dir }}" + path: "{{ blog_upstream_file_dir }}" state: directory - mode: "0755" - become: true - listen: "update linkding upstream" + mode: '0755' + become: yes + listen: "update blog upstream" - name: Update upstream template ansible.builtin.template: src: upstream.json.j2 - dest: "{{ linkding_upstream_file_dir }}/upstream.json" - become: true - listen: "update linkding upstream" + dest: "{{ blog_upstream_file_dir }}/upstream.json" + become: yes + listen: "update blog upstream" # figure out if upstream id exists - name: check {{ stack_name }} upstream @@ -22,8 +22,8 @@ curl localhost:2019/id/{{ stack_name }}_upstream/ changed_when: False register: result - become: true - listen: "update linkding upstream" + become: yes + listen: "update blog upstream" # upstream already exists, patch it - name: remove old {{ stack_name }} upstream @@ -31,22 +31,23 @@ container: "{{ caddy_container_id }}" command: > curl -X DELETE localhost:2019/id/{{ stack_name }}_upstream/ - become: true + become: yes when: (result.stdout | from_json)['error'] is not defined - listen: "update linkding upstream" + listen: "update blog upstream" # upstream has to be created - name: add {{ stack_name }} upstream community.docker.docker_container_exec: container: "{{ caddy_container_id }}" command: > - curl -X POST -H "Content-Type: application/json" -d @{{ linkding_upstream_file_dir }}/upstream.json localhost:2019/config/apps/http/servers/{{ (linkding_use_https == True) | ternary(caddy_https_server_name, caddy_http_server_name) }}/routes/0/ - become: true - listen: "update linkding upstream" + curl -X POST -H "Content-Type: application/json" -d @{{ blog_upstream_file_dir }}/upstream.json localhost:2019/config/apps/http/servers/{{ (blog_use_https == True) | ternary(caddy_https_server_name, caddy_http_server_name) }}/routes/0/ + become: yes + listen: "update blog upstream" - name: Ensure upstream directory is gone again ansible.builtin.file: - path: "{{ linkding_upstream_file_dir }}" + path: "{{ blog_upstream_file_dir }}" state: absent - become: true - listen: "update linkding upstream" + become: yes + listen: "update blog upstream" + diff --git a/roles/blog/meta/main.yml b/roles/blog/meta/main.yml new file mode 100644 index 0000000..fbb1340 --- /dev/null +++ b/roles/blog/meta/main.yml @@ -0,0 +1,14 @@ +--- + +galaxy_info: + author: Marty Oehme + description: Installs my personal public facing landing page as a docker stack service + license: GPL-3.0-only + min_ansible_version: 2.9 + galaxy_tags: [] + + +dependencies: + - docker + - docker-swarm + - caddy diff --git a/roles/linkding/tasks/main.yml b/roles/blog/tasks/main.yml similarity index 74% rename from roles/linkding/tasks/main.yml rename to roles/blog/tasks/main.yml index e514b26..4a0e1e5 100644 --- a/roles/linkding/tasks/main.yml +++ b/roles/blog/tasks/main.yml @@ -1,5 +1,5 @@ --- -## install linkding container +## install blog container - name: Check upstream status community.docker.docker_container_exec: container: "{{ caddy_container_id }}" @@ -7,17 +7,18 @@ curl localhost:2019/id/{{ stack_name }}_upstream/ register: result changed_when: (result.stdout | from_json) != (lookup('template', 'upstream.json.j2') | from_yaml) - become: true - notify: "update linkding upstream" + become: yes + notify: "update blog upstream" -- name: Deploy linkding to swarm +- name: Deploy blog to swarm community.general.docker_stack: name: "{{ stack_name }}" state: present prune: yes compose: - "{{ stack_compose }}" - become: true + become: yes tags: - docker-swarm - notify: "update linkding upstream" + notify: "update blog upstream" + diff --git a/roles/blog/templates/docker-stack.yml.j2 b/roles/blog/templates/docker-stack.yml.j2 new file mode 100644 index 0000000..1d83e61 --- /dev/null +++ b/roles/blog/templates/docker-stack.yml.j2 @@ -0,0 +1,20 @@ +version: '3.4' + +services: + app: + image: "{{ stack_image }}:{{ blog_version }}" + healthcheck: + test: ["CMD", "wget", "--spider", "-q", "localhost"] + interval: 1m + timeout: 10s + retries: 3 + start_period: 1m + entrypoint: sh -c "/docker-entrypoint.sh nginx -g 'daemon off;'" + networks: + - "{{ docker_swarm_public_network_name }}" + +networks: + "{{ docker_swarm_public_network_name }}": + external: true + + diff --git a/roles/ntfy/templates/upstream.json.j2 b/roles/blog/templates/upstream.json.j2 similarity index 94% rename from roles/ntfy/templates/upstream.json.j2 rename to roles/blog/templates/upstream.json.j2 index a8af36a..6c6c59d 100644 --- a/roles/ntfy/templates/upstream.json.j2 +++ b/roles/blog/templates/upstream.json.j2 @@ -9,6 +9,8 @@ {% else %} "{{ stack_name }}.{{ server_domain }}" {% endif %} + , + "{{ server_domain }}" ] } ], diff --git a/roles/ntfy/vars/main.yml b/roles/blog/vars/main.yml similarity index 54% rename from roles/ntfy/vars/main.yml rename to roles/blog/vars/main.yml index d4bdc5c..565e61f 100644 --- a/roles/ntfy/vars/main.yml +++ b/roles/blog/vars/main.yml @@ -1,6 +1,7 @@ --- -stack_name: ntfy -stack_image: "binwiederhier/ntfy" +stack_name: blog + +stack_image: "registry.gitlab.com/cloud-serve/blog" stack_compose: "{{ lookup('template', 'docker-stack.yml.j2') | from_yaml }}" diff --git a/roles/caddy/README.md b/roles/caddy/README.md index eda03d2..88871db 100644 --- a/roles/caddy/README.md +++ b/roles/caddy/README.md @@ -1,7 +1,7 @@ -# Caddy +# Caddy Caddy is the reverse proxy for all other services running on the infrastructure. -It was chosen for its relative ease of use, +It was chosen for its relative ease of use, interactible API and https-by-default setup. ## Variables @@ -48,27 +48,28 @@ caddy_version: alpine Sets the docker image version to be used. + ## Internal variables ```yaml caddy_stack: - name: caddy - compose: "{{ lookup('template', 'docker-stack.yml.j2') | from_yaml }}" + name: caddy + compose: "{{ lookup('template', 'docker-stack.yml.j2') | from_yaml }}" ``` -Defines the actual docker stack which will later run on the target. -The name can be changed and will be used as a proxy target (`caddy.mydomain.com` or `192.168.1.1/caddy`) --- +Defines the actual docker stack which will later run on the target. +The name can be changed and will be used as a proxy target (`caddy.mydomain.com` or `192.168.1.1/caddy`) --- though to be clear there is no intention currently to expose the caddy to the web at the moment.\ -The compose option defines which template to use for the `docker-stack.yml` file. You can either change options for the stack in the template file, +The compose option defines which template to use for the `docker-stack.yml` file. You can either change options for the stack in the template file, or directly here like the following: ```yaml -compose: - - "{{ lookup('template', 'docker-stack.yml.j2') | from_yaml }}" - - version: "3" - services: - another-container: - image: nginx:latest + compose: + - "{{ lookup('template', 'docker-stack.yml.j2') | from_yaml }}" + - version: '3' + services: + another-container: + image: nginx:latest # ... ``` diff --git a/roles/caddy/defaults/main.yml b/roles/caddy/defaults/main.yml index dbc9087..378f819 100644 --- a/roles/caddy/defaults/main.yml +++ b/roles/caddy/defaults/main.yml @@ -1,5 +1,6 @@ --- -caddy_version: 2.8.4-alpine # tag exact version to avoid suprising container renewals + +caddy_version: alpine caddy_caddyfile_dir: "{{ docker_stack_files_dir }}/caddy" caddy_use_debug: no @@ -8,4 +9,3 @@ caddy_use_https: yes caddy_tls_use_staging: no # caddy_email: your@email.here -# caddy_zerossl_api_key: your-zerossl-key-here-its-free diff --git a/roles/caddy/meta/main.yml b/roles/caddy/meta/main.yml index 5a00c2a..5863772 100644 --- a/roles/caddy/meta/main.yml +++ b/roles/caddy/meta/main.yml @@ -1,3 +1,5 @@ --- + dependencies: + - docker - docker-swarm diff --git a/roles/caddy/tasks/main.yml b/roles/caddy/tasks/main.yml index 61f1abe..884e5c0 100644 --- a/roles/caddy/tasks/main.yml +++ b/roles/caddy/tasks/main.yml @@ -5,9 +5,9 @@ ansible.builtin.file: path: "{{ caddy_caddyfile_dir }}" state: directory - mode: "0755" + mode: '0755' become: true - tags: + tags: - fs - name: Ensure Caddyfile exists @@ -27,9 +27,47 @@ compose: - "{{ caddy_stack.compose }}" when: caddy_stack is defined - become: true + become: yes tags: - docker-swarm + +- name: Get caddy container info + ansible.builtin.command: + cmd: docker ps -q -f name={{ caddy_stack.name }} + become: yes + # bringing up the container takes some time, we have to wait + until: caddy_container_info['rc'] == 0 and caddy_container_info['stdout'] | length >= 1 + retries: 5 + delay: 10 + changed_when: False + register: caddy_container_info + +- name: Register caddy container id + ansible.builtin.set_fact: caddy_container_id={{ caddy_container_info['stdout'] }} + notify: + - debug caddy container + +# FIXME this should be taken care of in Dockerfile not here +- name: Ensure caddy curl available + community.docker.docker_container_exec: + container: "{{ caddy_container_id }}" + command: > + apk add curl + become: yes + register: result + changed_when: "'Installing' in result.stdout" + +- name: Ensure caddy api is responsive + community.docker.docker_container_exec: + container: "{{ caddy_container_id }}" + command: > + curl localhost:2019/config/ + become: yes + until: result.rc == 0 + when: caddy_use_api == True + changed_when: False + register: result + # TODO FIXME UP # - name: Allow access to services # firewalld: diff --git a/roles/caddy/templates/config.json.j2 b/roles/caddy/templates/config.json.j2 index 6a13848..b104a25 100644 --- a/roles/caddy/templates/config.json.j2 +++ b/roles/caddy/templates/config.json.j2 @@ -51,19 +51,17 @@ {% if caddy_tls_use_staging is sameas true %} "ca": "https://acme-staging-v02.api.letsencrypt.org/directory", {% endif %} - {%- if caddy_email is not undefined and not none %} + {%- if caddy_email is not undefined and not none %} "email": "{{ caddy_email }}", {% endif %} "module": "acme" - {%- if caddy_zerossl_api_key is not undefined and not none %} }, { - "api_key": "{{ caddy_zerossl_api_key }}", + {%- if caddy_email is not undefined and not none %} + "email": "{{ caddy_email }}", + {% endif %} "module": "zerossl" } - {% else %} - } - {% endif %} ] } ] diff --git a/roles/caddy/templates/docker-stack.yml.j2 b/roles/caddy/templates/docker-stack.yml.j2 index 21a4c5a..9c1ecf1 100644 --- a/roles/caddy/templates/docker-stack.yml.j2 +++ b/roles/caddy/templates/docker-stack.yml.j2 @@ -5,7 +5,7 @@ services: image: caddy:{{ caddy_version }} command: caddy run --config /etc/caddy/config.json healthcheck: - test: ["CMD", "wget", "--quiet", "--spider", "--tries=1", "http://127.0.0.1:2019/metrics"] + test: ["CMD", "wget", "--quiet", "--spider", "--tries=1", "http://localhost:2019/metrics"] interval: 1m timeout: 10s retries: 3 diff --git a/roles/caddy/vars/main.yml b/roles/caddy/vars/main.yml index 7684a29..27530c3 100644 --- a/roles/caddy/vars/main.yml +++ b/roles/caddy/vars/main.yml @@ -1,4 +1,5 @@ --- + caddy_stack: name: caddy compose: "{{ lookup('template', 'docker-stack.yml.j2') | from_yaml }}" diff --git a/roles/caddy_id/README.md b/roles/caddy_id/README.md deleted file mode 100644 index eda03d2..0000000 --- a/roles/caddy_id/README.md +++ /dev/null @@ -1,83 +0,0 @@ -# Caddy - -Caddy is the reverse proxy for all other services running on the infrastructure. -It was chosen for its relative ease of use, -interactible API and https-by-default setup. - -## Variables - -``` -caddy_caddyfile_dir: "{{ docker_stack_files_dir }}/caddy" -``` - -Sets up the on-target directory where important caddy files should be stored. - -``` -caddy_email: -``` - -Which e-mail should be used to provision https certificates with. I believe theoretically caddy will work and provision you with certificates even without providing an e-mail, but I would strongly urge providing one. - -``` -caddy_tls_use_staging: no -``` - -If turned on will use the staging servers of the acme certificate service, which is useful for testing and playing around with https (due to higher API limits and less severe restrictions). - -``` -caddy_use_api: yes -``` - -If turned off, will turn off the admin api for caddy. Should only be used if no other services are intended to be provisioned on the target, since most other service stacks rely on the API to set up their proxy targets. - -``` -caddy_use_debug: no -``` - -If true, will turn on caddy's debug logging. - -``` -caddy_use_https: yes -``` - -If turned off will turn of all auto-provisioning of https certificates by caddy. - -``` -caddy_version: alpine -``` - -Sets the docker image version to be used. - -## Internal variables - -```yaml -caddy_stack: - name: caddy - compose: "{{ lookup('template', 'docker-stack.yml.j2') | from_yaml }}" -``` - -Defines the actual docker stack which will later run on the target. -The name can be changed and will be used as a proxy target (`caddy.mydomain.com` or `192.168.1.1/caddy`) --- -though to be clear there is no intention currently to expose the caddy to the web at the moment.\ -The compose option defines which template to use for the `docker-stack.yml` file. You can either change options for the stack in the template file, -or directly here like the following: - -```yaml -compose: - - "{{ lookup('template', 'docker-stack.yml.j2') | from_yaml }}" - - version: "3" - services: - another-container: - image: nginx:latest -# ... -``` - -```yaml -caddy_http_server_name: http -``` - -```yaml -caddy_https_server_name: https -``` - -The internal representation of the http and https servers respectively. diff --git a/roles/caddy_id/meta/main.yml b/roles/caddy_id/meta/main.yml deleted file mode 100644 index 5a00c2a..0000000 --- a/roles/caddy_id/meta/main.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -dependencies: - - docker-swarm diff --git a/roles/caddy_id/tasks/main.yml b/roles/caddy_id/tasks/main.yml deleted file mode 100644 index adbfc5c..0000000 --- a/roles/caddy_id/tasks/main.yml +++ /dev/null @@ -1,39 +0,0 @@ ---- -# get the caddy container id for all other containers - -- name: Get caddy container info - ansible.builtin.command: - cmd: docker ps -q -f name={{ caddy_stack.name }} - become: true - # bringing up the container takes some time, we have to wait - until: caddy_container_info['rc'] | default('') == 0 and caddy_container_info['stdout'] | length >= 1 - retries: 5 - delay: 10 - changed_when: False - register: caddy_container_info - -- name: Register caddy container id - ansible.builtin.set_fact: caddy_container_id={{ caddy_container_info['stdout'] }} - notify: - - debug caddy container - -# FIXME this should be taken care of in Dockerfile not here -- name: Ensure caddy curl available - community.docker.docker_container_exec: - container: "{{ caddy_container_id }}" - command: > - apk add curl - become: true - register: result - changed_when: "'Installing' in result.stdout" - -- name: Ensure caddy api is responsive - community.docker.docker_container_exec: - container: "{{ caddy_container_id }}" - command: > - curl localhost:2019/config/ - become: true - until: result.rc | default('') == 0 - when: caddy_use_api == True - changed_when: False - register: result diff --git a/roles/caddy_id/templates/config.json.j2 b/roles/caddy_id/templates/config.json.j2 deleted file mode 100644 index b104a25..0000000 --- a/roles/caddy_id/templates/config.json.j2 +++ /dev/null @@ -1,72 +0,0 @@ -{ -{% if caddy_use_api is sameas false %} - "admin": { - "disabled": true - }, -{% endif %} -{% if caddy_use_debug is sameas true %} - "logging": { - "logs": { - "default": { - "level": "DEBUG" - } - } - }, -{% endif %} - "apps": { - "http": { - "servers": { - "{{ caddy_http_server_name }}": { - "listen": [ - ":80" - ], - "routes": [] -{% if caddy_use_https is sameas false %}, - "automatic_https": { - "disable": true - } -{% endif %} - }, - "{{ caddy_https_server_name }}": { - "listen": [ - ":443" - ], - "routes": [] -{% if caddy_use_https is sameas false %}, - "automatic_https": { - "disable": true - } -{% endif %} - } - } - } -{% if caddy_use_https is sameas true %}, - "tls": { - "automation": { - "policies": [ - { - "subjects": [], - "issuers": [ - { - {% if caddy_tls_use_staging is sameas true %} - "ca": "https://acme-staging-v02.api.letsencrypt.org/directory", - {% endif %} - {%- if caddy_email is not undefined and not none %} - "email": "{{ caddy_email }}", - {% endif %} - "module": "acme" - }, - { - {%- if caddy_email is not undefined and not none %} - "email": "{{ caddy_email }}", - {% endif %} - "module": "zerossl" - } - ] - } - ] - } - } -{% endif %} - } -} diff --git a/roles/caddy_id/templates/docker-stack.yml.j2 b/roles/caddy_id/templates/docker-stack.yml.j2 deleted file mode 100644 index 21a4c5a..0000000 --- a/roles/caddy_id/templates/docker-stack.yml.j2 +++ /dev/null @@ -1,30 +0,0 @@ -version: "3.7" - -services: - app: - image: caddy:{{ caddy_version }} - command: caddy run --config /etc/caddy/config.json - healthcheck: - test: ["CMD", "wget", "--quiet", "--spider", "--tries=1", "http://127.0.0.1:2019/metrics"] - interval: 1m - timeout: 10s - retries: 3 - start_period: 1m - ports: - - "80:80" - - "443:443" - volumes: - - "{{ caddy_caddyfile_dir }}:/etc/caddy" - - "{{ docker_stack_files_dir }}:/stacks:ro" - - data:/data - - config:/config - networks: - - "{{ docker_swarm_public_network_name }}" - -volumes: - data: - config: - -networks: - "{{ docker_swarm_public_network_name }}": - external: true diff --git a/roles/caddy_id/vars/main.yml b/roles/caddy_id/vars/main.yml deleted file mode 100644 index 7e60722..0000000 --- a/roles/caddy_id/vars/main.yml +++ /dev/null @@ -1,5 +0,0 @@ ---- -caddy_stack: - name: caddy - -caddy_use_api: yes # if no turns off api interface; it is *required* for other swarm roles to be routed diff --git a/roles/diun/README.md b/roles/diun/README.md deleted file mode 100644 index 5f821f2..0000000 --- a/roles/diun/README.md +++ /dev/null @@ -1,5 +0,0 @@ -# diun - -Monitor the deployed swarm containers for updates. -Will notify you when it found any update for any container. -Can (currently) notify you either through mail or on matrix. diff --git a/roles/diun/defaults/main.yml b/roles/diun/defaults/main.yml deleted file mode 100644 index 2eb93de..0000000 --- a/roles/diun/defaults/main.yml +++ /dev/null @@ -1,26 +0,0 @@ ---- -diun_version: 4 - -diun_upstream_file_dir: "{{ docker_stack_files_dir }}/{{ stack_name }}" - -diun_use_https: true - -# the subdomain link diun will be reachable under -subdomain_alias: diun - -diun_tz: Europe/Berlin -diun_log_level: info -diun_watch_swarm_by_default: true - -diun_notif_mail_host: localhost -diun_notif_mail_port: 25 -# diun_notif_mail_username: required for mail -# diun_notif_mail_password: required for mail -# diun_notif_mail_from: required for mail -# diun_notif_mail_to: required for mail - -diun_notif_matrix_url: "https://matrix.org" -#diun_notif_matrix_user: required for matrix -#diun_notif_matrix_password: required for matrix -#diun_notif_matrix_roomid: required for matrix - diff --git a/roles/diun/meta/main.yml b/roles/diun/meta/main.yml deleted file mode 100644 index 2c1b831..0000000 --- a/roles/diun/meta/main.yml +++ /dev/null @@ -1,10 +0,0 @@ ---- -galaxy_info: - author: Marty Oehme - description: Notify on any docker swarm container updates - license: GPL-3.0-only - min_ansible_version: "2.9" - galaxy_tags: [] - -dependencies: - - docker-swarm diff --git a/roles/diun/tasks/main.yml b/roles/diun/tasks/main.yml deleted file mode 100644 index 10456f4..0000000 --- a/roles/diun/tasks/main.yml +++ /dev/null @@ -1,12 +0,0 @@ ---- -## install diun container -- name: Deploy diun to swarm - community.general.docker_stack: - name: "{{ stack_name }}" - state: present - prune: yes - compose: - - "{{ stack_compose }}" - become: true - tags: - - docker-swarm diff --git a/roles/diun/templates/docker-stack.yml.j2 b/roles/diun/templates/docker-stack.yml.j2 deleted file mode 100644 index 71a07cb..0000000 --- a/roles/diun/templates/docker-stack.yml.j2 +++ /dev/null @@ -1,51 +0,0 @@ -version: '3.4' - -services: - app: - image: crazymax/diun:latest - # healthcheck: - # test: ["CMD", "wget", "--spider", "-q", "127.0.0.1"] - # interval: 1m - # timeout: 10s - # retries: 3 - # start_period: 1m - command: serve - volumes: - - "data:/data" - - "/var/run/docker.sock:/var/run/docker.sock" - environment: - - "TZ={{ diun_tz }}" - - "LOG_LEVEL={{ diun_log_level }}" - - "LOG_JSON=false" - - "DIUN_WATCH_WORKERS=20" - - "DIUN_WATCH_SCHEDULE=0 */6 * * *" - - "DIUN_WATCH_JITTER=30s" - - "DIUN_PROVIDERS_SWARM=true" - - "DIUN_PROVIDERS_SWARM_WATCHBYDEFAULT={{ diun_watch_swarm_by_default }}" -{% if diun_notif_matrix_user is not undefined and not None and diun_notif_matrix_password is not undefined and not None and diun_notif_matrix_roomid is not undefined and not None %} - - "DIUN_NOTIF_MATRIX_HOMESERVERURL={{ diun_notif_matrix_url }}" - - "DIUN_NOTIF_MATRIX_USER={{ diun_notif_matrix_user }}" - - "DIUN_NOTIF_MATRIX_PASSWORD={{ diun_notif_matrix_password }}" - - "DIUN_NOTIF_MATRIX_ROOMID={{ diun_notif_matrix_roomid }}" -{% endif %} -{% if diun_notif_mail_username is not undefined and not None and diun_notif_mail_password is not undefined and not None and diun_notif_mail_from is not undefined and not None and diun_notif_mail_to is not undefined and not None %} - - "DIUN_NOTIF_MAIL_HOST={{ diun_notif_mail_host }}" - - "DIUN_NOTIF_MAIL_PORT={{ diun_notif_mail_port }}" - - "DIUN_NOTIF_MAIL_USERNAME={{ diun_notif_mail_username }}" - - "DIUN_NOTIF_MAIL_PASSWORD={{ diun_notif_mail_password }}" - - "DIUN_NOTIF_MAIL_FROM={{ diun_notif_mail_from }}" - - "DIUN_NOTIF_MAIL_TO={{ diun_notif_mail_to }}" -{% endif %} -# deploy: -# mode: replicated -# replicas: 1 -# placement: -# constraints: -# - node.role == manager - -volumes: - data: - -networks: - "{{ docker_swarm_public_network_name }}": - external: true diff --git a/roles/diun/vars/main.yml b/roles/diun/vars/main.yml deleted file mode 100644 index 91148a4..0000000 --- a/roles/diun/vars/main.yml +++ /dev/null @@ -1,6 +0,0 @@ ---- -stack_name: diun - -stack_image: "crazymax/diun" - -stack_compose: "{{ lookup('template', 'docker-stack.yml.j2') | from_yaml }}" diff --git a/roles/docker-clean/tasks/main.yml b/roles/docker-clean/tasks/main.yml deleted file mode 100644 index 53a7bad..0000000 --- a/roles/docker-clean/tasks/main.yml +++ /dev/null @@ -1,12 +0,0 @@ ---- -- name: Get running docker stacks - community.docker.docker_stack_info: - register: running_stacks - become: true - -- name: Remove stacks without matching role - community.docker.docker_stack: - name: "{{ item.Name }}" - state: "absent" - loop: "{{ running_stacks.results | rejectattr('Name', 'in', role_names) }}" - become: true diff --git a/roles/docker-swarm/defaults/main.yml b/roles/docker-swarm/defaults/main.yml index 4e9d4af..9399a91 100644 --- a/roles/docker-swarm/defaults/main.yml +++ b/roles/docker-swarm/defaults/main.yml @@ -1,3 +1,5 @@ --- + docker_stack_files_dir: /stacks docker_swarm_public_network_name: public + diff --git a/roles/docker-swarm/meta/main.yml b/roles/docker-swarm/meta/main.yml deleted file mode 100644 index 128f19c..0000000 --- a/roles/docker-swarm/meta/main.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -dependencies: - - docker diff --git a/roles/docker-swarm/tasks/main.yml b/roles/docker-swarm/tasks/main.yml index e44183e..01cf75b 100644 --- a/roles/docker-swarm/tasks/main.yml +++ b/roles/docker-swarm/tasks/main.yml @@ -28,7 +28,7 @@ ansible.builtin.file: path: "{{ docker_stack_files_dir }}" state: directory - mode: "0755" + mode: '0755' become: true - tags: + tags: - fs diff --git a/roles/docker/handlers/main.yml b/roles/docker/handlers/main.yml index 3e4a3b7..2525e6a 100644 --- a/roles/docker/handlers/main.yml +++ b/roles/docker/handlers/main.yml @@ -4,4 +4,4 @@ state: started enabled: yes daemon_reload: yes - become: true + become: yes diff --git a/roles/docker/tasks/Ubuntu.yml b/roles/docker/tasks/Ubuntu.yml index 0ac4236..53ea490 100644 --- a/roles/docker/tasks/Ubuntu.yml +++ b/roles/docker/tasks/Ubuntu.yml @@ -1,7 +1,7 @@ - name: Ensure requirements installed ansible.builtin.package: name: "{{ requisites }}" - state: latest + state: present update_cache: yes tags: - apt @@ -11,14 +11,11 @@ - name: Ensure docker GPG apt key exists apt_key: - url: "https://download.docker.com/linux/ubuntu/gpg" + url: https://download.docker.com/linux/ubuntu/gpg state: present tags: - apt - repository - # FIXME: Needs a 'until:' defined for the retries to actually work - retries: 3 - delay: 5 become: true - name: Ensure docker repository exists @@ -30,18 +27,7 @@ - repository become: true -- name: docker-ce is installed - ansible.builtin.package: - name: "{{ packages }}" - state: present - tags: - - apt - - download - - packages - become: true - notify: Handle docker daemon - -- name: Latest docker-ce is installed +- name: Ensure latest docker-ce installed ansible.builtin.package: name: "{{ packages }}" state: latest @@ -49,14 +35,12 @@ - apt - download - packages - - docker - - never become: true notify: Handle docker daemon - name: Ensure docker requisites for python installed pip: - name: + name: - docker - jsondiff - pyyaml diff --git a/roles/forgejo/README.md b/roles/forgejo/README.md deleted file mode 100644 index f99dea6..0000000 --- a/roles/forgejo/README.md +++ /dev/null @@ -1,40 +0,0 @@ -# forgejo - -A relatively light-weight git server hosting. - -## Defaults - -``` -forgejo_upstream_file_dir: "{{ docker_stack_files_dir }}/{{ stack_name }}" -``` - -The on-target directory where the proxy configuration file should be stashed. - -``` -forgejo_use_https: true -``` - -Whether the service should be reachable through http (port 80) or through https (port 443) and provision an https certificate. Usually you will want this to stay `true`. - -``` -forgejo_version: latest -``` - -The docker image version to be used in stack creation. - -``` -subdomain_alias: git -``` - -If the deployed container should be served over a uri that is not the stack name. -By default, it will be set to `git.yourdomain.com` - -if this option is not set it will be served on `forgejo.yourdomain.com` instead. - -For now forgejo will still need to be initially set up after installation. -This could be automated with the help of these commands: - -```sh -docker run --name forgejo -p 8080:3000 -e FORGEJO__security__INSTALL_LOCK=true -d codeberg.org/forgejo/forgejo:7 -$ docker exec forgejo migrate -$ docker exec forgejo forgejo admin user create --admin --username root --password admin1234 --email admin@example.com -``` diff --git a/roles/forgejo/defaults/main.yml b/roles/forgejo/defaults/main.yml deleted file mode 100644 index 8dad546..0000000 --- a/roles/forgejo/defaults/main.yml +++ /dev/null @@ -1,50 +0,0 @@ ---- -forgejo_version: 11 - -forgejo_upstream_file_dir: "{{ docker_stack_files_dir }}/{{ stack_name }}" - -forgejo_use_https: true - -# the subdomain link forgejo will be reachable under -subdomain_alias: git -subdomain_ci_alias: ci - -forgejo_db_database: forgejo -forgejo_db_username: forgejo -forgejo_db_password: forgejo - -forgejo_app_admin_username: Myforgejousername # can not be set to admin in Forgejo -forgejo_app_admin_password: Myforgejopassword -forgejo_app_admin_email: myadmin@mydomain.mytld - -# forgejo_smtp_addr: domain.com -# forgejo_smtp_port: 465 -# forgejo_smtp_username: my@username.com -# forgejo_smtp_password: -# forgejo_smtp_protocol: smtps # can be one of starttls | smtps - -forgejo_use_lfs: false -forgejo_lfs_max_filesize: 0 -forgejo_lfs_http_auth_expiry: 24h -# forgejo_lfs_jwt_secret: - -forgejo_use_ci: false -# forgejo_ci_github_client: -# forgejo_ci_github_secret: -# forgejo_ci_gitlab_client: -# forgejo_ci_gitlab_secret: -# forgejo_ci_forgejo_client: -# forgejo_ci_forgejo_secret: -# forgejo_ci_gitea_url: -# forgejo_ci_gitea_client: -# forgejo_ci_gitea_secret: - -forgejo_use_s3: false -forgejo_s3_use_ssl: true -forgejo_s3_bucket_lookup: auto # auto|dns|path -forgejo_s3_checksum: default # default|md5 -# forgejo_s3_endpoint: -# forgejo_s3_region: -# forgejo_s3_key: -# forgejo_s3_secret: -# forgejo_s3_bucket: diff --git a/roles/forgejo/handlers/main.yml b/roles/forgejo/handlers/main.yml deleted file mode 100644 index d3e8b18..0000000 --- a/roles/forgejo/handlers/main.yml +++ /dev/null @@ -1,100 +0,0 @@ -- name: Add admin user - community.docker.docker_container_exec: - container: "{{ forgejo_app_container_name['stdout'] }}" - command: > - forgejo admin user create --admin --username {{ forgejo_app_admin_username }} --password {{ forgejo_app_admin_password }} --email {{ forgejo_app_admin_email }} - user: git - become: true - listen: "no admin user" - -## Register reverse proxy -- name: Upstream directory exists - ansible.builtin.file: - path: "{{ forgejo_upstream_file_dir }}" - state: directory - mode: "0755" - become: true - listen: "update forgejo upstream" - -- name: Update upstream template - ansible.builtin.template: - src: upstream.json.j2 - dest: "{{ forgejo_upstream_file_dir }}/upstream.json" - mode: "0600" - become: true - listen: "update forgejo upstream" - -- name: Update ci upstream template - ansible.builtin.template: - src: upstream_ci.json.j2 - dest: "{{ forgejo_upstream_file_dir }}/upstream_ci.json" - mode: "0600" - become: true - listen: "update forgejo upstream" - -# figure out if upstream id exists -- name: check {{ stack_name }} upstream - community.docker.docker_container_exec: - container: "{{ caddy_container_id }}" - command: > - curl localhost:2019/id/{{ stack_name }}_upstream/ - changed_when: False - register: result - become: true - listen: "update forgejo upstream" - -# upstream already exists, patch it -- name: remove old {{ stack_name }} upstream - community.docker.docker_container_exec: - container: "{{ caddy_container_id }}" - command: > - curl -X DELETE localhost:2019/id/{{ stack_name }}_upstream/ - become: true - when: (result.stdout | from_json)['error'] is not defined - listen: "update forgejo upstream" - -# upstream has to be created -- name: add {{ stack_name }} upstream - community.docker.docker_container_exec: - container: "{{ caddy_container_id }}" - command: > - curl -X POST -H "Content-Type: application/json" -d @{{ forgejo_upstream_file_dir }}/upstream.json localhost:2019/config/apps/http/servers/{{ (forgejo_use_https == True) | ternary(caddy_https_server_name, caddy_http_server_name) }}/routes/0/ - become: true - listen: "update forgejo upstream" - -# figure out if upstream id exists -- name: check {{ stack_name }}_ci upstream - community.docker.docker_container_exec: - container: "{{ caddy_container_id }}" - command: > - curl localhost:2019/id/{{ stack_name }}_ci_upstream/ - changed_when: False - register: result - become: true - listen: "update forgejo upstream" - -# upstream for ci already exists, patch it -- name: remove old {{ stack_name }}_ci upstream - community.docker.docker_container_exec: - container: "{{ caddy_container_id }}" - command: > - curl -X DELETE localhost:2019/id/{{ stack_name }}_ci_upstream/ - become: true - when: (result.stdout | from_json)['error'] is not defined - listen: "update forgejo upstream" - -# upstream for ci has to be created -- name: add {{ stack_name }}_ci upstream - community.docker.docker_container_exec: - container: "{{ caddy_container_id }}" - command: > - curl -X POST -H "Content-Type: application/json" -d @{{ forgejo_upstream_file_dir }}/upstream_ci.json localhost:2019/config/apps/http/servers/{{ (forgejo_use_https == True) | ternary(caddy_https_server_name, caddy_http_server_name) }}/routes/0/ - become: true - listen: "update forgejo upstream" - -- name: Ensure upstream directory is gone again - ansible.builtin.file: - path: "{{ forgejo_upstream_file_dir }}" - state: absent - become: true - listen: "update forgejo upstream" diff --git a/roles/forgejo/tasks/Ubuntu.yml b/roles/forgejo/tasks/Ubuntu.yml deleted file mode 100644 index dd5b043..0000000 --- a/roles/forgejo/tasks/Ubuntu.yml +++ /dev/null @@ -1,11 +0,0 @@ ---- -## install requisites -- name: Ensure openssl installed - ansible.builtin.package: - name: "openssl" - state: present - become: true - tags: - - apt - - download - - packages diff --git a/roles/forgejo/tasks/main.yml b/roles/forgejo/tasks/main.yml deleted file mode 100644 index 33e8abc..0000000 --- a/roles/forgejo/tasks/main.yml +++ /dev/null @@ -1,132 +0,0 @@ ---- -## Prepare woodpecker ci -- name: "Select tasks for {{ ansible_distribution }} {{ ansible_distribution_major_version }}" - include_tasks: "{{ distribution }}" - with_first_found: - - "{{ ansible_distribution }}-{{ ansible_distribution_major_version }}.yml" - - "{{ ansible_distribution }}.yml" - - "{{ ansible_os_family }}.yml" - loop_control: - loop_var: distribution - when: forgejo_use_ci == True - -# TODO only generate when no existing (check with docker inspect?) -- name: Generate agent key - ansible.builtin.shell: openssl rand -hex 32 - register: forgejo_woodpecker_agent_secret - when: forgejo_use_ci == True - -- name: Set agent key - ansible.builtin.set_fact: - forgejo_woodpecker_agent_secret: "{{ forgejo_woodpecker_agent_secret.stdout }}" - when: forgejo_woodpecker_agent_secret.stdout is not undefined and not None - -## Prepare forgejo -- name: Ensure git user exists with ssh key - ansible.builtin.user: - name: "{{ forgejo_git_username }}" - generate_ssh_key: yes - ssh_key_type: rsa - ssh_key_bits: 4096 - ssh_key_comment: "Forgejo Host Key" - become: true - register: git_user - -- name: Ensure git passthrough command directory exists - ansible.builtin.file: - path: "/app/forgejo/" - state: directory - mode: "0770" - owner: "{{ git_user['uid'] }}" - group: "{{ git_user['group'] }}" - become: true - -- name: Passthrough git command is in right location - ansible.builtin.copy: - src: forgejo - dest: "/app/forgejo/forgejo" - owner: "{{ git_user['uid'] }}" - group: "{{ git_user['group'] }}" - mode: "0750" - become: true - -- name: Host machine forgejo command points to passthrough command - ansible.builtin.file: - state: link - src: "/app/forgejo/forgejo" - dest: "/usr/local/bin/forgejo" - become: true - -- name: Fetch keyfile - fetch: - src: "{{ git_user['home'] }}/.ssh/id_rsa.pub" - dest: "buffer/{{ansible_hostname}}-id_rsa.pub" - flat: yes - become: true - -- name: Ensure git user has its own key authorized for access - ansible.posix.authorized_key: - user: "{{ git_user['name'] }}" - state: present - key: "{{ lookup('file', 'buffer/{{ ansible_hostname }}-id_rsa.pub') }}" - become: true - -- name: Clean up buffer dir - ansible.builtin.file: - path: buffer - state: absent - delegate_to: localhost - -## install forgejo container -- name: Check upstream status - community.docker.docker_container_exec: - container: "{{ caddy_container_id }}" - command: > - curl localhost:2019/id/{{ stack_name }}_upstream/ - register: result - changed_when: (result.stdout | from_json) != (lookup('template', 'upstream.json.j2') | from_yaml) - become: true - notify: "update forgejo upstream" - -- name: Deploy forgejo to swarm - community.general.docker_stack: - name: "{{ stack_name }}" - state: present - prune: yes - compose: - - "{{ stack_compose }}" - become: true - tags: - - docker-swarm - register: forgejo_deployment - notify: "update forgejo upstream" - -- name: Wait a minute for forgejo to become healthy - wait_for: - timeout: 55 - delegate_to: localhost - when: forgejo_deployment is changed - -- name: Get app container info - ansible.builtin.command: - cmd: docker ps -q -f name={{ stack_name }}_app - become: true - until: forgejo_app_container_name['rc'] | default('') == 0 and forgejo_app_container_name['stdout'] | length >= 1 - retries: 10 - delay: 10 - changed_when: False - register: forgejo_app_container_name - -- name: Look for existing admin user - community.docker.docker_container_exec: - container: "{{ forgejo_app_container_name['stdout'] }}" - user: git - command: > - forgejo admin user list --admin - until: forgejo_admin_list is defined and forgejo_admin_list['rc'] | default('') == 0 - retries: 15 - delay: 20 - become: true - register: forgejo_admin_list - changed_when: forgejo_admin_list['stdout_lines'] | length <= 1 and 'Username' in forgejo_admin_list['stdout'] - notify: "no admin user" diff --git a/roles/forgejo/templates/docker-stack.yml.j2 b/roles/forgejo/templates/docker-stack.yml.j2 deleted file mode 100644 index 5eb9756..0000000 --- a/roles/forgejo/templates/docker-stack.yml.j2 +++ /dev/null @@ -1,146 +0,0 @@ -version: '3.4' - -services: - app: - image: "{{ stack_image }}:{{ forgejo_version }}" - healthcheck: - test: ["CMD", "wget", "--spider", "-q", "127.0.0.1:3000"] - interval: 1m - timeout: 10s - retries: 3 - start_period: 1m - volumes: - - data:/data - - /home/git/.ssh:/data/git/.ssh - - /etc/timezone:/etc/timezone:ro - - /etc/localtime:/etc/localtime:ro - environment: - - USER_UID={{ git_user['uid'] }} - - USER_GID={{ git_user['group'] }} - - FORGEJO__database__DB_TYPE=postgres - - FORGEJO__database__HOST=db:5432 - - "FORGEJO__database__NAME={{ forgejo_db_database }}" - - "FORGEJO__database__USER={{ forgejo_db_username }}" - - "FORGEJO__database__PASSWD={{ forgejo_db_password }}" - - "FORGEJO__server__ROOT_URL={{ (forgejo_use_https == True) | ternary('https', 'http') }}://{{ (subdomain_alias is not undefined and not none) | ternary(subdomain_alias, stack_name) }}.{{server_domain}}" - - "FORGEJO__server__SSH_DOMAIN={{ (subdomain_alias is not undefined and not none) | ternary(subdomain_alias, stack_name) }}.{{server_domain}}" - - FORGEJO__server__LANDING_PAGE=explore - - FORGEJO__service__DISABLE_REGISTRATION=true -{% if forgejo_app_admin_username is not undefined and not None and forgejo_app_admin_password is not undefined and not None %} - - FORGEJO__security__INSTALL_LOCK=true -{% endif %} -{% if forgejo_smtp_addr is not undefined and not None and forgejo_smtp_port is not undefined and not None and forgejo_smtp_username is not undefined and not None and forgejo_smtp_password is not undefined and not None %} - - FORGEJO__mailer__ENABLED=true - - FORGEJO__service__ENABLE_NOTIFY_MAIL=true - - FORGEJO__mailer__FROM=forgejo@{{ server_domain }} - - FORGEJO__mailer__TYPE=smtp - - FORGEJO__mailer__SMTP_ADDR={{ forgejo_smtp_addr }} - - FORGEJO__mailer__SMTP_PORT={{ forgejo_smtp_port }} -{% if forgejo_smtp_protocol is not undefined and not none %} - - FORGEJO__mailer__PROTOCOL={{ forgejo_smtp_protocol }} -{% endif %} - - FORGEJO__mailer__USER={{ forgejo_smtp_username }} - - FORGEJO__mailer__PASSWD={{ forgejo_smtp_password }} -{% endif %} -{% if forgejo_use_lfs %} - - FORGEJO__server__LFS_START_SERVER=true -{% if forgejo_lfs_jwt_secret is not undefined and not none %} - - FORGEJO__server__LFS_JWT_SECRET={{ forgejo_lfs_jwt_secret }} -{% endif %} - - FORGEJO__server__LFS_HTTP_AUTH_EXPIRY={{ forgejo_lfs_http_auth_expiry }} - - FORGEJO__server__LFS_MAX_FILE_SIZE={{ forgejo_lfs_max_filesize }} -{% endif %} -{% if forgejo_use_s3 %} - - FORGEJO__storage__STORAGE_TYPE="minio" - - FORGEJO__storage__MINIO_USE_SSL={{ forgejo_s3_use_ssl }} - - FORGEJO__storage__MINIO_BUCKET_LOOKUP={{ forgejo_s3_bucket_lookup }} - - FORGEJO__storage__MINIO_ENDPOINT={{ forgejo_s3_endpoint }} - - FORGEJO__storage__MINIO_ACCESS_KEY_ID={{ forgejo_s3_key }} - - FORGEJO__storage__MINIO_SECRET_ACCESS_KEY={{ forgejo_s3_secret }} - - FORGEJO__storage__MINIO_BUCKET={{ forgejo_s3_bucket }} - - FORGEJO__storage__MINIO_LOCATION={{ forgejo_s3_region }} - - FORGEJO__storage__MINIO_CHECKSUM_ALGORITHM={{ forgejo_s3_checksum }} -{% endif %} - networks: - - "{{ docker_swarm_public_network_name }}" - - backend - ports: - - "127.0.0.1:2222:22" - - db: - image: postgres:13 - healthcheck: - test: ["CMD", "pg_isready", "-q", "-U", "{{ forgejo_db_username }}"] - interval: 1m - timeout: 10s - retries: 3 - start_period: 1m - volumes: - - db:/var/lib/postgresql/data - networks: - - backend - environment: - - POSTGRES_USER={{ forgejo_db_username }} - - POSTGRES_PASSWORD={{ forgejo_db_password }} - - POSTGRES_DB={{ forgejo_db_database }} - -{% if forgejo_use_ci %} - wp-server: - image: woodpeckerci/woodpecker-server:v3 - networks: - - "{{ docker_swarm_public_network_name }}" - - backend - volumes: - - woodpecker:/var/lib/woodpecker/ - environment: - - WOODPECKER_OPEN=true - - "WOODPECKER_HOST={{ (forgejo_use_https == True) | ternary('https', 'http') }}://{{ (subdomain_ci_alias is not undefined and not none) | ternary(subdomain_ci_alias, stack_name + '_ci') }}.{{server_domain}}" - - WOODPECKER_AGENT_SECRET={{ forgejo_woodpecker_agent_secret }} -{% if forgejo_ci_github_client is not undefined and not None and forgejo_ci_github_secret is not undefined and not None %} - - WOODPECKER_GITHUB=true - - WOODPECKER_GITHUB_CLIENT={{ forgejo_ci_github_client }} - - WOODPECKER_GITHUB_SECRET={{ forgejo_ci_github_secret }} -{% endif %} -{% if forgejo_ci_gitlab_client is not undefined and not None and forgejo_ci_gitlab_secret is not undefined and not None %} - - WOODPECKER_GITLAB=true - - WOODPECKER_GITLAB_CLIENT={{ forgejo_ci_gitlab_client }} - - WOODPECKER_GITLAB_SECRET={{ forgejo_ci_gitlab_secret }} -{% endif %} -{% if forgejo_ci_forgejo_client is not undefined and not None and forgejo_ci_forgejo_secret is not undefined and not None %} - - WOODPECKER_FORGEJO=true - - "WOODPECKER_FORGEJO_URL={{ (forgejo_use_https == True) | ternary('https', 'http') }}://{{ (subdomain_alias is not undefined and not none) | ternary(subdomain_alias, stack_name) }}.{{server_domain}}" - - WOODPECKER_FORGEJO_CLIENT={{ forgejo_ci_forgejo_client }} - - WOODPECKER_FORGEJO_SECRET={{ forgejo_ci_forgejo_secret }} -{% endif %} -{% if forgejo_ci_gitea_url is not undefined and not None and forgejo_ci_gitea_client is not undefined and not None and forgejo_ci_gitea_secret is not undefined and not None %} - - WOODPECKER_GITEA=true - - "WOODPECKER_GITEA_URL={{ (forgejo_use_https == True) | ternary('https', 'http') }}://{{ (subdomain_alias is not undefined and not none) | ternary(subdomain_alias, stack_name) }}.{{server_domain}}" - - WOODPECKER_GITEA_CLIENT={{ forgejo_ci_gitea_client }} - - WOODPECKER_GITEA_SECRET={{ forgejo_ci_gitea_secret }} -{% endif %} - - wp-agent: - image: woodpeckerci/woodpecker-agent:v3 - networks: - - backend - command: agent - volumes: - - woodpecker-agent-config:/etc/woodpecker - - /var/run/docker.sock:/var/run/docker.sock - environment: - - WOODPECKER_SERVER=wp-server:9000 - - WOODPECKER_AGENT_SECRET={{ forgejo_woodpecker_agent_secret }} -{% endif %} - -volumes: - data: - db: - woodpecker: - woodpecker-agent-config: - -networks: - "{{ docker_swarm_public_network_name }}": - external: true - backend: - - diff --git a/roles/forgejo/templates/upstream_ci.json.j2 b/roles/forgejo/templates/upstream_ci.json.j2 deleted file mode 100644 index 9c5ee64..0000000 --- a/roles/forgejo/templates/upstream_ci.json.j2 +++ /dev/null @@ -1,39 +0,0 @@ -{ - "@id": "{{ stack_name }}_ci_upstream", -{% if server_domain is not undefined and not none %} - "match": [ - { - "host": [ -{% if subdomain_ci_alias is not undefined and not none %} - "{{ subdomain_ci_alias }}.{{ server_domain }}" -{% else %} - "{{ stack_name }}_ci.{{ server_domain }}" -{% endif %} - ] - } - ], -{% else %} - "match": [ - { - "path": [ -{% if subdomain_ci_alias is not undefined and not none %} - "/{{ subdomain_ci_alias }}*" -{% else %} - "/{{ stack_name }}_ci*" -{% endif %} - ] - } - ], -{% endif %} - "handle": [ - { - "handler": "reverse_proxy", - "upstreams": [ - { - "dial": "{{ stack_name }}_wp-server:8000" - } - ] - } - ] -} - diff --git a/roles/forgejo/vars/main.yml b/roles/forgejo/vars/main.yml deleted file mode 100644 index f28238d..0000000 --- a/roles/forgejo/vars/main.yml +++ /dev/null @@ -1,8 +0,0 @@ ---- -stack_name: forgejo - -stack_image: "codeberg.org/forgejo/forgejo" - -stack_compose: "{{ lookup('template', 'docker-stack.yml.j2') | from_yaml }}" - -forgejo_git_username: git diff --git a/roles/gitea/README.md b/roles/gitea/README.md new file mode 100644 index 0000000..60a0b99 --- /dev/null +++ b/roles/gitea/README.md @@ -0,0 +1,41 @@ +# gitea + +A relatively light-weight git server hosting. + +## Defaults + +``` +gitea_upstream_file_dir: "{{ docker_stack_files_dir }}/{{ stack_name }}" +``` + +The on-target directory where the proxy configuration file should be stashed. + +``` +gitea_use_https: true +``` + +Whether the service should be reachable through http (port 80) or through https (port 443) and provision an https certificate. Usually you will want this to stay `true`. + +``` +gitea_version: latest +``` + +The docker image version to be used in stack creation. + +``` +subdomain_alias: git +``` + +If the deployed container should be served over a uri that is not the stack name. +By default, it will be set to `git.yourdomain.com` - +if this option is not set it will be served on `gitea.yourdomain.com` instead. + +For now gitea will still need to be initially set up after installation. +This could be automated with the help of these commands: + +```sh +docker run --name gitea -p 8080:3000 -e GITEA__security__INSTALL_LOCK=true -d gitea/gitea:1.14.2 + +$ docker exec gitea migrate +$ docker exec gitea gitea admin user create --admin --username root --password admin1234 --email admin@example.com +``` diff --git a/roles/gitea/defaults/main.yml b/roles/gitea/defaults/main.yml new file mode 100644 index 0000000..136085a --- /dev/null +++ b/roles/gitea/defaults/main.yml @@ -0,0 +1,24 @@ +--- + +# never got around to removing the master tag from the images +gitea_version: latest + +gitea_upstream_file_dir: "{{ docker_stack_files_dir }}/{{ stack_name }}" + +gitea_use_https: true + +# the subdomain link gitea will be reachable under +subdomain_alias: git + +gitea_db_database: gitea +gitea_db_username: gitea +gitea_db_password: gitea + +gitea_app_admin_username: Mygiteausername # can not be set to admin in Gitea +gitea_app_admin_password: Mygiteapassword +gitea_app_admin_email: myadmin@mydomain.mytld + +# gitea_smtp_host: domain.com:port +# gitea_smtp_username: my@username.com +# gitea_smtp_password: +# gitea_smtp_force_tls: false # forces tls if it is on a non-traditional tls port. Overwrites starttls so should generally be off diff --git a/roles/forgejo/files/forgejo b/roles/gitea/files/gitea similarity index 100% rename from roles/forgejo/files/forgejo rename to roles/gitea/files/gitea diff --git a/roles/gitea/handlers/main.yml b/roles/gitea/handlers/main.yml new file mode 100644 index 0000000..68ebab7 --- /dev/null +++ b/roles/gitea/handlers/main.yml @@ -0,0 +1,62 @@ +- name: Add admin user + community.docker.docker_container_exec: + container: "{{ gitea_app_container_name['stdout'] }}" + command: > + gitea admin user create --admin --username {{ gitea_app_admin_username }} --password {{ gitea_app_admin_password }} --email {{ gitea_app_admin_email }} + become: yes + listen: "no admin user" + +## Register reverse proxy +- name: Ensure upstream directory exists + ansible.builtin.file: + path: "{{ gitea_upstream_file_dir }}" + state: directory + mode: '0755' + become: yes + listen: "update gitea upstream" + +- name: Update upstream template + ansible.builtin.template: + src: upstream.json.j2 + dest: "{{ gitea_upstream_file_dir }}/upstream.json" + mode: '0600' + become: yes + listen: "update gitea upstream" + +# figure out if upstream id exists +- name: check {{ stack_name }} upstream + community.docker.docker_container_exec: + container: "{{ caddy_container_id }}" + command: > + curl localhost:2019/id/{{ stack_name }}_upstream/ + changed_when: False + register: result + become: yes + listen: "update gitea upstream" + +# upstream already exists, patch it +- name: remove old {{ stack_name }} upstream + community.docker.docker_container_exec: + container: "{{ caddy_container_id }}" + command: > + curl -X DELETE localhost:2019/id/{{ stack_name }}_upstream/ + become: yes + when: (result.stdout | from_json)['error'] is not defined + listen: "update gitea upstream" + +# upstream has to be created +- name: add {{ stack_name }} upstream + community.docker.docker_container_exec: + container: "{{ caddy_container_id }}" + command: > + curl -X POST -H "Content-Type: application/json" -d @{{ gitea_upstream_file_dir }}/upstream.json localhost:2019/config/apps/http/servers/{{ (gitea_use_https == True) | ternary(caddy_https_server_name, caddy_http_server_name) }}/routes/0/ + become: yes + listen: "update gitea upstream" + +- name: Ensure upstream directory is gone again + ansible.builtin.file: + path: "{{ gitea_upstream_file_dir }}" + state: absent + become: yes + listen: "update gitea upstream" + diff --git a/roles/forgejo/meta/main.yml b/roles/gitea/meta/main.yml similarity index 72% rename from roles/forgejo/meta/main.yml rename to roles/gitea/meta/main.yml index f6b84d0..da07f4a 100644 --- a/roles/forgejo/meta/main.yml +++ b/roles/gitea/meta/main.yml @@ -1,15 +1,16 @@ --- + galaxy_info: author: Marty Oehme description: Light-weight git hosting license: GPL-3.0-only - min_ansible_version: "2.9" + min_ansible_version: 2.9 galaxy_tags: [] platforms: - name: GenericLinux - versions: - - all - + versions: all + dependencies: + - docker - docker-swarm - - caddy_id + - caddy diff --git a/roles/gitea/tasks/main.yml b/roles/gitea/tasks/main.yml new file mode 100644 index 0000000..3529038 --- /dev/null +++ b/roles/gitea/tasks/main.yml @@ -0,0 +1,95 @@ +--- +- name: Ensure git user exists with ssh key + ansible.builtin.user: + name: "{{ gitea_git_username }}" + generate_ssh_key: yes + ssh_key_type: rsa + ssh_key_bits: 4096 + ssh_key_comment: "Gitea Host Key" + become: yes + register: git_user + +- name: Ensure git passthrough command directory exists + ansible.builtin.file: + path: "/app/gitea/" + state: directory + mode: '0770' + owner: "{{ git_user['uid'] }}" + group: "{{ git_user['group'] }}" + become: yes + +- name: Save git passthrough command in right location + ansible.builtin.copy: + src: gitea + dest: "/app/gitea/gitea" + owner: "{{ git_user['uid'] }}" + group: "{{ git_user['group'] }}" + mode: '0750' + become: yes + +- name: Fetch keyfile + fetch: + src: "{{ git_user['home'] }}/.ssh/id_rsa.pub" + dest: "buffer/{{ansible_hostname}}-id_rsa.pub" + flat: yes + become: yes + +- name: Ensure git user has its own key authorized for access + ansible.posix.authorized_key: + user: "{{ git_user['name'] }}" + state: present + key: "{{ lookup('file', 'buffer/{{ ansible_hostname }}-id_rsa.pub') }}" + become: yes + +- name: Clean up buffer dir + ansible.builtin.file: + path: buffer + state: absent + delegate_to: localhost + +## install gitea container +- name: Check upstream status + community.docker.docker_container_exec: + container: "{{ caddy_container_id }}" + command: > + curl localhost:2019/id/{{ stack_name }}_upstream/ + register: result + changed_when: (result.stdout | from_json) != (lookup('template', 'upstream.json.j2') | from_yaml) + become: yes + notify: "update gitea upstream" + +- name: Deploy gitea to swarm + community.general.docker_stack: + name: "{{ stack_name }}" + state: present + prune: yes + compose: + - "{{ stack_compose }}" + become: yes + tags: + - docker-swarm + notify: "update gitea upstream" + +- name: Get app container info + ansible.builtin.command: + cmd: docker ps -q -f name={{ stack_name }}_app + become: yes + until: gitea_app_container_name['rc'] == 0 and gitea_app_container_name['stdout'] | length >= 1 + retries: 5 + delay: 10 + changed_when: False + register: gitea_app_container_name + +- name: Look for existing admin user + community.docker.docker_container_exec: + container: "{{ gitea_app_container_name['stdout'] }}" + command: > + gitea admin user list --admin + become: yes + until: "'connection refused' not in gitea_admin_list and 'Failed to run app' not in gitea_admin_list" + retries: 5 + delay: 10 + changed_when: gitea_admin_list['stdout_lines'] | length <= 1 + failed_when: gitea_admin_list['rc'] == 1 and gitea_admin_list['attempts'] >= 5 + register: gitea_admin_list + notify: "no admin user" diff --git a/roles/gitea/templates/docker-stack.yml.j2 b/roles/gitea/templates/docker-stack.yml.j2 new file mode 100644 index 0000000..aa1cb81 --- /dev/null +++ b/roles/gitea/templates/docker-stack.yml.j2 @@ -0,0 +1,68 @@ +version: '3.4' + +services: + app: + image: "{{ stack_image }}:{{ gitea_version }}" + healthcheck: + test: ["CMD", "wget", "--spider", "-q", "localhost:3000"] + interval: 1m + timeout: 10s + retries: 3 + start_period: 1m + volumes: + - data:/data + - /home/git/.ssh:/data/git/.ssh + - /etc/timezone:/etc/timezone:ro + - /etc/localtime:/etc/localtime:ro + environment: + - USER_UID={{ git_user['uid'] }} + - USER_GID={{ git_user['group'] }} + - GITEA__database__DB_TYPE=postgres + - GITEA__database__HOST=db:5432 + - GITEA__database__NAME={{ gitea_db_database }} + - GITEA__database__USER={{ gitea_db_username }} + - GITEA__database__PASSWD={{ gitea_db_password }} + - "GITEA__server__ROOT_URL={{ (gitea_use_https == True) | ternary('https', 'http') }}://{{ (subdomain_alias is not undefined and not none) | ternary(subdomain_alias, stack_name) }}.{{server_domain}}" + - "GITEA__server__SSH_DOMAIN={{ server_domain }}" + - GITEA__server__LANDINGPAGE=explore + - GITEA__service__DISABLE_REGISTRATION=true +{% if gitea_app_admin_username is not undefined and not None and gitea_app_admin_password is not undefined and not None %} + - GITEA__security__INSTALL_LOCK=true +{% endif %} +{% if gitea_smtp_host is not undefined and not None and gitea_smtp_username is not undefined and not None and gitea_smtp_password is not undefined and not None %} + - GITEA__mailer__ENABLED=true + - GITEA__service__ENABLE_NOTIFY_MAIL=true + - GITEA__mailer__FROM=gitea@{{ server_domain }} + - GITEA__mailer__TYPE=smtp + - GITEA__mailer__HOST={{ gitea_smtp_host }} + - GITEA__mailer__IS_TLS_ENABLED={{ (gitea_smtp_force_tls is not undefined and not None) | ternary(gitea_smtp_force_tls,'false') }} + - GITEA__mailer__USER={{ gitea_smtp_username }} + - GITEA__mailer__PASSWD={{ gitea_smtp_password }} +{% endif %} + networks: + - "{{ docker_swarm_public_network_name }}" + - backend + ports: + - "127.0.0.1:2222:22" + + db: + image: postgres:13 + volumes: + - db:/var/lib/postgresql/data + networks: + - backend + environment: + - POSTGRES_USER={{ gitea_db_username }} + - POSTGRES_PASSWORD={{ gitea_db_password }} + - POSTGRES_DB={{ gitea_db_database }} + +volumes: + data: + db: + +networks: + "{{ docker_swarm_public_network_name }}": + external: true + backend: + + diff --git a/roles/forgejo/templates/upstream.json.j2 b/roles/gitea/templates/upstream.json.j2 similarity index 100% rename from roles/forgejo/templates/upstream.json.j2 rename to roles/gitea/templates/upstream.json.j2 diff --git a/roles/restic/vars/main.yml b/roles/gitea/vars/main.yml similarity index 54% rename from roles/restic/vars/main.yml rename to roles/gitea/vars/main.yml index 8b3dcf5..8fd0ae4 100644 --- a/roles/restic/vars/main.yml +++ b/roles/gitea/vars/main.yml @@ -1,8 +1,9 @@ --- -stack_name: restic -stack_image: "mazzolino/restic" +stack_name: gitea + +stack_image: "gitea/gitea" stack_compose: "{{ lookup('template', 'docker-stack.yml.j2') | from_yaml }}" -backup_enable: true +gitea_git_username: git diff --git a/roles/landingpage/README.md b/roles/landingpage/README.md index d0d3487..d649b50 100644 --- a/roles/landingpage/README.md +++ b/roles/landingpage/README.md @@ -1,10 +1,10 @@ # landingpage -The public face of my server. +The public face of my server. Not much to see here honestly, just a few simple lines of html explaining what this server is about and how to contact me. -I don't see anybody else benefiting massively from this role but me, +I don't see anybody else benefiting massively from this role but me, but if you want the same web presence go for it I suppose 😉 ## Defaults @@ -31,6 +31,7 @@ The docker image version to be used in stack creation. subdomain_alias: www ``` -If the deployed container should be served over a uri that is not the stack name. -By default, it will be set to `www.yourdomain.com` - +If the deployed container should be served over a uri that is not the stack name. +By default, it will be set to `www.yourdomain.com` - if this option is not set it will be served on `landingpage.yourdomain.com` instead. + diff --git a/roles/landingpage/defaults/main.yml b/roles/landingpage/defaults/main.yml index cdfb3fd..2c47345 100644 --- a/roles/landingpage/defaults/main.yml +++ b/roles/landingpage/defaults/main.yml @@ -1,11 +1,11 @@ --- -landingpage_version: latest + +# never got around to removing the master tag from the images +landingpage_version: master landingpage_upstream_file_dir: "{{ docker_stack_files_dir }}/{{ stack_name }}" landingpage_use_https: true -landingpage_autoupdate: true - # the subdomain link landingpage will be reachable under subdomain_alias: www diff --git a/roles/landingpage/handlers/main.yml b/roles/landingpage/handlers/main.yml index e82422e..869b074 100644 --- a/roles/landingpage/handlers/main.yml +++ b/roles/landingpage/handlers/main.yml @@ -3,15 +3,15 @@ ansible.builtin.file: path: "{{ landingpage_upstream_file_dir }}" state: directory - mode: "0755" - become: true + mode: '0755' + become: yes listen: "update landingpage upstream" - name: Update upstream template ansible.builtin.template: src: upstream.json.j2 dest: "{{ landingpage_upstream_file_dir }}/upstream.json" - become: true + become: yes listen: "update landingpage upstream" # figure out if upstream id exists @@ -22,7 +22,7 @@ curl localhost:2019/id/{{ stack_name }}_upstream/ changed_when: False register: result - become: true + become: yes listen: "update landingpage upstream" # upstream already exists, patch it @@ -31,7 +31,7 @@ container: "{{ caddy_container_id }}" command: > curl -X DELETE localhost:2019/id/{{ stack_name }}_upstream/ - become: true + become: yes when: (result.stdout | from_json)['error'] is not defined listen: "update landingpage upstream" @@ -40,13 +40,14 @@ community.docker.docker_container_exec: container: "{{ caddy_container_id }}" command: > - curl -X POST -H "Content-Type: application/json" -d @{{ landingpage_upstream_file_dir }}/upstream.json localhost:2019/config/apps/http/servers/{{ (landingpage_use_https == True) | ternary(caddy_https_server_name, caddy_http_server_name) }}/routes/0/ - become: true + curl -X POST -H "Content-Type: application/json" -d @{{ landingpage_upstream_file_dir }}/upstream.json localhost:2019/config/apps/http/servers/{{ (landingpage_use_https == True) | ternary(caddy_https_server_name, caddy_http_server_name) }}/routes/0/ + become: yes listen: "update landingpage upstream" - name: Ensure upstream directory is gone again ansible.builtin.file: path: "{{ landingpage_upstream_file_dir }}" state: absent - become: true + become: yes listen: "update landingpage upstream" + diff --git a/roles/landingpage/meta/main.yml b/roles/landingpage/meta/main.yml index 75e0801..fbb1340 100644 --- a/roles/landingpage/meta/main.yml +++ b/roles/landingpage/meta/main.yml @@ -1,11 +1,14 @@ --- + galaxy_info: author: Marty Oehme description: Installs my personal public facing landing page as a docker stack service license: GPL-3.0-only - min_ansible_version: "2.9" + min_ansible_version: 2.9 galaxy_tags: [] + dependencies: + - docker - docker-swarm - - caddy_id + - caddy diff --git a/roles/landingpage/tasks/main.yml b/roles/landingpage/tasks/main.yml index c0fad7c..02a9d2a 100644 --- a/roles/landingpage/tasks/main.yml +++ b/roles/landingpage/tasks/main.yml @@ -7,7 +7,7 @@ curl localhost:2019/id/{{ stack_name }}_upstream/ register: result changed_when: (result.stdout | from_json) != (lookup('template', 'upstream.json.j2') | from_yaml) - become: true + become: yes notify: "update landingpage upstream" - name: Deploy landingpage to swarm @@ -17,7 +17,8 @@ prune: yes compose: - "{{ stack_compose }}" - become: true + become: yes tags: - docker-swarm notify: "update landingpage upstream" + diff --git a/roles/landingpage/templates/docker-stack.yml.j2 b/roles/landingpage/templates/docker-stack.yml.j2 index fee59db..b2525ab 100644 --- a/roles/landingpage/templates/docker-stack.yml.j2 +++ b/roles/landingpage/templates/docker-stack.yml.j2 @@ -4,7 +4,7 @@ services: app: image: "{{ stack_image }}:{{ landingpage_version }}" healthcheck: - test: ["CMD", "wget", "--spider", "-q", "127.0.0.1"] + test: ["CMD", "wget", "--spider", "-q", "localhost"] interval: 1m timeout: 10s retries: 3 @@ -12,11 +12,6 @@ services: entrypoint: sh -c "/docker-entrypoint.sh nginx -g 'daemon off;'" networks: - "{{ docker_swarm_public_network_name }}" -{% if landingpage_autoupdate is defined and landingpage_autoupdate %} - deploy: - labels: - - shepherd.autoupdate=true -{% endif %} networks: "{{ docker_swarm_public_network_name }}": diff --git a/roles/landingpage/vars/main.yml b/roles/landingpage/vars/main.yml index e8e20e6..e3616a9 100644 --- a/roles/landingpage/vars/main.yml +++ b/roles/landingpage/vars/main.yml @@ -1,6 +1,7 @@ --- + stack_name: landingpage -stack_image: "ghcr.io/marty-oehme/page" +stack_image: "registry.gitlab.com/cloud-serve/landing" stack_compose: "{{ lookup('template', 'docker-stack.yml.j2') | from_yaml }}" diff --git a/roles/linkding/defaults/main.yml b/roles/linkding/defaults/main.yml deleted file mode 100644 index c736c2c..0000000 --- a/roles/linkding/defaults/main.yml +++ /dev/null @@ -1,19 +0,0 @@ ---- -linkding_version: latest-plus # plus contains self-archiving possibilities with singlefile - -linkding_upstream_file_dir: "{{ docker_stack_files_dir }}/{{ stack_name }}" - -linkding_use_https: true - -linkding_autoupdate: true - -# the subdomain link linkding will be reachable under -subdomain_alias: links - -# initial superuser creation -linkding_username: linkdinger -linkding_password: linkdingerpass123 - -# should we back up the data? -linkding_backup_enable: true -linkding_backup_cron: 0 45 3 * * * diff --git a/roles/linkding/meta/main.yml b/roles/linkding/meta/main.yml deleted file mode 100644 index 1c14785..0000000 --- a/roles/linkding/meta/main.yml +++ /dev/null @@ -1,11 +0,0 @@ ---- -galaxy_info: - author: Marty Oehme - description: Installs linkding as a docker stack service - license: GPL-3.0-only - min_ansible_version: "2.9" - galaxy_tags: [] - -dependencies: - - docker-swarm - - caddy_id diff --git a/roles/linkding/templates/docker-stack.yml.j2 b/roles/linkding/templates/docker-stack.yml.j2 deleted file mode 100644 index dad26fc..0000000 --- a/roles/linkding/templates/docker-stack.yml.j2 +++ /dev/null @@ -1,46 +0,0 @@ -services: - app: - image: "{{ stack_image }}:{{ linkding_version }}" - healthcheck: - test: ["CMD", "curl", "--fail", "http://127.0.0.1:9090/health"] - interval: 1m - timeout: 10s - retries: 3 - start_period: 1m - networks: - - "{{ docker_swarm_public_network_name }}" - volumes: - - data:/etc/linkding/data - environment: - - "LD_SUPERUSER_NAME={{ linkding_username }}" - - "LD_SUPERUSER_PASSWORD={{ linkding_password }}" -{% if linkding_autoupdate is defined and linkding_autoupdate %} - deploy: - labels: - - shepherd.autoupdate=true -{% endif %} - -{% if backup_enable is not undefined and not false and linkding_backup_enable is not undefined and not false %} - backup: - image: mazzolino/restic - environment: - - "TZ={{ restic_timezone }}" - # go-cron starts w seconds - - "BACKUP_CRON={{ linkding_backup_cron }}" - - "RESTIC_REPOSITORY={{ restic_repo }}" - - "AWS_ACCESS_KEY_ID={{ restic_s3_key }}" - - "AWS_SECRET_ACCESS_KEY={{ restic_s3_secret }}" - - "RESTIC_PASSWORD={{ restic_pass }}" - - "RESTIC_BACKUP_TAGS=linkding" - - "RESTIC_BACKUP_SOURCES=/volumes" - volumes: - - data:/volumes/linkding_data:ro -{% endif %} - -volumes: - data: - -networks: - "{{ docker_swarm_public_network_name }}": - external: true - diff --git a/roles/linkding/templates/upstream.json.j2 b/roles/linkding/templates/upstream.json.j2 deleted file mode 100644 index c4ee71c..0000000 --- a/roles/linkding/templates/upstream.json.j2 +++ /dev/null @@ -1,38 +0,0 @@ -{ - "@id": "{{ stack_name }}_upstream", -{% if server_domain is not undefined and not none %} - "match": [ - { - "host": [ -{% if subdomain_alias is not undefined and not none %} - "{{ subdomain_alias }}.{{ server_domain }}" -{% else %} - "{{ stack_name }}.{{ server_domain }}" -{% endif %} - ] - } - ], -{% else %} - "match": [ - { - "path": [ -{% if subdomain_alias is not undefined and not none %} - "/{{ subdomain_alias }}*" -{% else %} - "/{{ stack_name }}*" -{% endif %} - ] - } - ], -{% endif %} - "handle": [ - { - "handler": "reverse_proxy", - "upstreams": [ - { - "dial": "{{ stack_name }}_app:9090" - } - ] - } - ] -} diff --git a/roles/linkding/vars/main.yml b/roles/linkding/vars/main.yml deleted file mode 100644 index 2588d44..0000000 --- a/roles/linkding/vars/main.yml +++ /dev/null @@ -1,6 +0,0 @@ ---- -stack_name: linkding - -stack_image: "ghcr.io/sissbruecker/linkding" - -stack_compose: "{{ lookup('template', 'docker-stack.yml.j2') | from_yaml }}" diff --git a/roles/miniflux/README.md b/roles/miniflux/README.md index 282e3c6..f1ce4c1 100644 --- a/roles/miniflux/README.md +++ b/roles/miniflux/README.md @@ -27,6 +27,6 @@ The docker image version to be used in stack creation. subdomain_alias: rss ``` -If the deployed container should be served over a uri that is not the stack name. -By default, it will be set to `rss.yourdomain.com` - +If the deployed container should be served over a uri that is not the stack name. +By default, it will be set to `rss.yourdomain.com` - if this option is not set it will be served on `miniflux.yourdomain.com` instead. diff --git a/roles/miniflux/defaults/main.yml b/roles/miniflux/defaults/main.yml index d93c12f..b57d96f 100644 --- a/roles/miniflux/defaults/main.yml +++ b/roles/miniflux/defaults/main.yml @@ -1,4 +1,5 @@ --- + miniflux_version: latest miniflux_upstream_file_dir: "{{ docker_stack_files_dir }}/{{ stack_name }}" @@ -8,8 +9,6 @@ miniflux_use_https: true # the subdomain link miniflux will be reachable under subdomain_alias: rss -miniflux_autoupdate: true - # Should ideally be overwritten in encrypted group/host vars miniflux_admin_username: myadmin miniflux_admin_password: mypassword diff --git a/roles/miniflux/handlers/main.yml b/roles/miniflux/handlers/main.yml index d26b2a6..864d7c6 100644 --- a/roles/miniflux/handlers/main.yml +++ b/roles/miniflux/handlers/main.yml @@ -3,15 +3,15 @@ ansible.builtin.file: path: "{{ miniflux_upstream_file_dir }}" state: directory - mode: "0755" - become: true + mode: '0755' + become: yes listen: "update miniflux upstream" - name: Update upstream template ansible.builtin.template: src: upstream.json.j2 dest: "{{ miniflux_upstream_file_dir }}/upstream.json" - become: true + become: yes listen: "update miniflux upstream" # figure out if upstream id exists @@ -22,7 +22,7 @@ curl localhost:2019/id/{{ stack_name }}_upstream/ changed_when: False register: result - become: true + become: yes listen: "update miniflux upstream" # upstream already exists, patch it @@ -31,7 +31,7 @@ container: "{{ caddy_container_id }}" command: > curl -X DELETE localhost:2019/id/{{ stack_name }}_upstream/ - become: true + become: yes when: (result.stdout | from_json)['error'] is not defined listen: "update miniflux upstream" @@ -40,13 +40,14 @@ community.docker.docker_container_exec: container: "{{ caddy_container_id }}" command: > - curl -X POST -H "Content-Type: application/json" -d @{{ miniflux_upstream_file_dir }}/upstream.json localhost:2019/config/apps/http/servers/{{ (miniflux_use_https == True) | ternary(caddy_https_server_name, caddy_http_server_name) }}/routes/0/ - become: true + curl -X POST -H "Content-Type: application/json" -d @{{ miniflux_upstream_file_dir }}/upstream.json localhost:2019/config/apps/http/servers/{{ (miniflux_use_https == True) | ternary(caddy_https_server_name, caddy_http_server_name) }}/routes/0/ + become: yes listen: "update miniflux upstream" - name: Ensure upstream directory is gone again ansible.builtin.file: path: "{{ miniflux_upstream_file_dir }}" state: absent - become: true + become: yes listen: "update miniflux upstream" + diff --git a/roles/miniflux/meta/main.yml b/roles/miniflux/meta/main.yml index f9aeaf0..50da3df 100644 --- a/roles/miniflux/meta/main.yml +++ b/roles/miniflux/meta/main.yml @@ -1,11 +1,14 @@ --- + galaxy_info: author: Marty Oehme description: Installs miniflux as a docker stack service license: GPL-3.0-only - min_ansible_version: "2.9" + min_ansible_version: 2.9 galaxy_tags: [] + dependencies: + - docker - docker-swarm - - caddy_id + - caddy diff --git a/roles/miniflux/tasks/main.yml b/roles/miniflux/tasks/main.yml index 46cd068..e4dd0ab 100644 --- a/roles/miniflux/tasks/main.yml +++ b/roles/miniflux/tasks/main.yml @@ -7,7 +7,7 @@ curl localhost:2019/id/{{ stack_name }}_upstream/ register: result changed_when: (result.stdout | from_json) != (lookup('template', 'upstream.json.j2') | from_yaml) - become: true + become: yes notify: "update miniflux upstream" - name: Deploy miniflux to swarm @@ -17,7 +17,8 @@ prune: yes compose: - "{{ stack_compose }}" - become: true + become: yes tags: - docker-swarm notify: "update miniflux upstream" + diff --git a/roles/miniflux/templates/docker-stack.yml.j2 b/roles/miniflux/templates/docker-stack.yml.j2 index 15af1ed..7af9ed7 100644 --- a/roles/miniflux/templates/docker-stack.yml.j2 +++ b/roles/miniflux/templates/docker-stack.yml.j2 @@ -24,11 +24,6 @@ services: {% else %} - "BASE_URL={{ (miniflux_use_https == True) | ternary('https', 'http') }}://localhost/{{ (subdomain_alias is not undefined and not none) | ternary(subdomain_alias, stack_name) }}" {% endif %} -{% if miniflux_autoupdate is defined and miniflux_autoupdate %} - deploy: - labels: - - shepherd.autoupdate=true -{% endif %} db: image: postgres:11 diff --git a/roles/miniflux/vars/main.yml b/roles/miniflux/vars/main.yml index 495ffee..05bf0b2 100644 --- a/roles/miniflux/vars/main.yml +++ b/roles/miniflux/vars/main.yml @@ -1,4 +1,5 @@ --- + stack_name: miniflux stack_image: "miniflux/miniflux" diff --git a/roles/monica/README.md b/roles/monica/README.md index f953fe3..c95ec92 100644 --- a/roles/monica/README.md +++ b/roles/monica/README.md @@ -27,8 +27,8 @@ The docker image version to be used in stack creation. subdomain_alias: prm ``` -If the deployed container should be served over a uri that is not the stack name. -By default, it will be set to `prm.yourdomain.com` (personal relationship manager) - +If the deployed container should be served over a uri that is not the stack name. +By default, it will be set to `prm.yourdomain.com` (personal relationship manager) - if this option is not set it will be served on `monica.yourdomain.com` instead. ``` @@ -38,14 +38,14 @@ monica_db_password: mymonicadbpassword ``` Set the default username and password combination on first container start. -If loading from an existing volume this does nothing, otherwise it sets the +If loading from an existing volume this does nothing, otherwise it sets the first user so you can instantly log in. ``` monica_app_disable_signups: true ``` -Sets the behavior on the login screen --- +Sets the behavior on the login screen --- if set to true (default) will not let anyone but the first user sign up, who automatically becomes an administrative user. If set to false will allow multiple users to sign up on the instance. @@ -57,13 +57,13 @@ monica_app_weather_api_key: If `monica_app_geolocation_api_key` is set, Monica will translate addresses input into the app to geographical latitude/ longitude data. -It requires an api key from https://locationiq.com/, which are free for +It requires an api key from https://locationiq.com/, which are free for 10.000 daily requests. -Similarly, if `monica_app_weather_api_key` is set, monica will (afaik) show -weather data for the location of individual contacts. +Similarly, if `monica_app_weather_api_key` is set, monica will (afaik) show +weather data for the location of individual contacts. It requires an API key from https://darksky.net/dev/register, where -1.000 daily requests are free. +1.000 daily requests are free. Be aware, however, that since darksky's sale to Apple, no new API signups are possible. To use this feature, `monica_app_geolocation_api_key` must also be filled out. @@ -71,8 +71,8 @@ To use this feature, `monica_app_geolocation_api_key` must also be filled out. monica_mail_host: smtp.eu.mailgun.org monica_mail_port: 465 monica_mail_encryption: tls -monica_mail_username: -monica_mail_password: +monica_mail_username: +monica_mail_password: monica_mail_from: monica@yourserver.com monica_mail_from_name: Monica monica_mail_new_user_notification_address: "{{ caddy_email }}" @@ -81,5 +81,5 @@ monica_mail_new_user_notification_address: "{{ caddy_email }}" Sets up the necessary details for Monica to send out registration and reminder e-mails. Requires an smtp server set up, most easily doable through things like mailgun or sendgrid. Variables should be relatively self-explanatory, -with `monica_mail_new_user_notification_address` being the address the notifications should be sent _to_, +with `monica_mail_new_user_notification_address` being the address the notifications should be sent *to*, so in all probability some sort of administration address. diff --git a/roles/monica/defaults/main.yml b/roles/monica/defaults/main.yml index 303401e..f4d1d5e 100644 --- a/roles/monica/defaults/main.yml +++ b/roles/monica/defaults/main.yml @@ -1,4 +1,5 @@ --- + monica_version: latest monica_upstream_file_dir: "{{ docker_stack_files_dir }}/{{ stack_name }}" @@ -18,8 +19,8 @@ monica_db_password: mymonicadbpassword #monica_app_weather_api_key: #monica_mail_host: smtp.eu.mailgun.org -#monica_mail_username: -#monica_mail_password: +#monica_mail_username: +#monica_mail_password: monica_mail_port: 465 monica_mail_encryption: tls #monica_mail_from: monica@yourserver.com diff --git a/roles/monica/handlers/main.yml b/roles/monica/handlers/main.yml index c7d2644..00c7001 100644 --- a/roles/monica/handlers/main.yml +++ b/roles/monica/handlers/main.yml @@ -3,15 +3,15 @@ ansible.builtin.file: path: "{{ monica_upstream_file_dir }}" state: directory - mode: "0755" - become: true + mode: '0755' + become: yes listen: "update monica upstream" - name: Update upstream template ansible.builtin.template: src: upstream.json.j2 dest: "{{ monica_upstream_file_dir }}/upstream.json" - become: true + become: yes listen: "update monica upstream" # figure out if upstream id exists @@ -22,7 +22,7 @@ curl localhost:2019/id/{{ stack_name }}_upstream/ changed_when: False register: result - become: true + become: yes listen: "update monica upstream" # upstream already exists, patch it @@ -31,7 +31,7 @@ container: "{{ caddy_container_id }}" command: > curl -X DELETE localhost:2019/id/{{ stack_name }}_upstream/ - become: true + become: yes when: (result.stdout | from_json)['error'] is not defined listen: "update monica upstream" @@ -40,13 +40,14 @@ community.docker.docker_container_exec: container: "{{ caddy_container_id }}" command: > - curl -X POST -H "Content-Type: application/json" -d @{{ monica_upstream_file_dir }}/upstream.json localhost:2019/config/apps/http/servers/{{ (monica_use_https == True) | ternary(caddy_https_server_name, caddy_http_server_name) }}/routes/0/ - become: true + curl -X POST -H "Content-Type: application/json" -d @{{ monica_upstream_file_dir }}/upstream.json localhost:2019/config/apps/http/servers/{{ (monica_use_https == True) | ternary(caddy_https_server_name, caddy_http_server_name) }}/routes/0/ + become: yes listen: "update monica upstream" - name: Ensure upstream directory is gone again ansible.builtin.file: path: "{{ monica_upstream_file_dir }}" state: absent - become: true + become: yes listen: "update monica upstream" + diff --git a/roles/monica/meta/main.yml b/roles/monica/meta/main.yml index b456668..3858e67 100644 --- a/roles/monica/meta/main.yml +++ b/roles/monica/meta/main.yml @@ -1,11 +1,14 @@ --- + galaxy_info: author: Marty Oehme description: Installs monica as a docker stack service license: GPL-3.0-only - min_ansible_version: "2.9" + min_ansible_version: 2.9 galaxy_tags: [] + dependencies: + - docker - docker-swarm - - caddy_id + - caddy diff --git a/roles/monica/tasks/Ubuntu.yml b/roles/monica/tasks/Ubuntu.yml index dd5b043..a67147d 100644 --- a/roles/monica/tasks/Ubuntu.yml +++ b/roles/monica/tasks/Ubuntu.yml @@ -4,8 +4,9 @@ ansible.builtin.package: name: "openssl" state: present - become: true + become: yes tags: - apt - download - packages + diff --git a/roles/monica/tasks/main.yml b/roles/monica/tasks/main.yml index 30d9aab..0c0ce63 100644 --- a/roles/monica/tasks/main.yml +++ b/roles/monica/tasks/main.yml @@ -12,7 +12,8 @@ ansible.builtin.shell: echo -n 'base64:'; openssl rand -base64 32 register: monica_app_key -- set_fact: monica_app_key={{ monica_app_key.stdout }} +- set_fact: + monica_app_key={{ monica_app_key.stdout }} ## install container - name: Check upstream status @@ -22,7 +23,7 @@ curl localhost:2019/id/{{ stack_name }}_upstream/ register: result changed_when: (result.stdout | from_json) != (lookup('template', 'upstream.json.j2') | from_yaml) - become: true + become: yes notify: "update monica upstream" - name: Deploy to swarm @@ -32,7 +33,8 @@ prune: yes compose: - "{{ stack_compose }}" - become: true + become: yes tags: - docker-swarm notify: "update monica upstream" + diff --git a/roles/monica/vars/main.yml b/roles/monica/vars/main.yml index 4635128..a4495b4 100644 --- a/roles/monica/vars/main.yml +++ b/roles/monica/vars/main.yml @@ -1,4 +1,5 @@ --- + stack_name: monica stack_image: "monica" diff --git a/roles/nextcloud/README.md b/roles/nextcloud/README.md index 146fed9..047548b 100644 --- a/roles/nextcloud/README.md +++ b/roles/nextcloud/README.md @@ -4,14 +4,13 @@ A full office suite and groupware proposition, though its main draw for most is the file synchronization abilities. AKA Dropbox replacement. -This software can grow enormous and enormously complicated, +This software can grow enormous and enormously complicated, this Ansible setup role concentrates on 3 things: +* a stable and secure base setup from the official docker container +* automatic setup of an email pipeline so users can reset passwords and be updated of changes +* the ability to use S3 object storage as the primary way of storing users' files -- a stable and secure base setup from the official docker container -- automatic setup of an email pipeline so users can reset passwords and be updated of changes -- the ability to use S3 object storage as the primary way of storing users' files - -The rest should be taken care of either automatically, +The rest should be taken care of either automatically, or supplied after the fact (if using different plugins or similar). ## Defaults @@ -33,7 +32,7 @@ nextcloud_version: fpm nextcloud_db_version: 12 ``` -The docker image version to be used in stack creation. +The docker image version to be used in stack creation. The role sets up the `php-fpm` version of the official Nextcloud image. That means, Caddy is used in front as the server which presents all pages and access to files, the Nextcloud image itself only serves as the PHP data store. @@ -42,17 +41,17 @@ If changing the version to one relying on Nextcloud's in-built Apache server, take care to change where the upstream proxy is pointing to since the Caddy server in front loses its meaning. The second variable points to the docker image that should be used for the PostgreSQL database, -with 12 pre-filled as default. +with 12 pre-filled as default. You can put this to latest, but should take care to migrate the database correctly when an update rolls around, -or it _will_ destroy your data at some point. +or it *will* destroy your data at some point. Generally, it seems easier to pin this to a specific version and then only update manually. ```yml subdomain_alias: files ``` -If the deployed container should be served over a uri that is not the stack name. -By default, it will be set to `files.yourdomain.com` - +If the deployed container should be served over a uri that is not the stack name. +By default, it will be set to `files.yourdomain.com` - if this option is not set it will be served on `nextcloud.yourdomain.com` instead. If you change or delete this, you should also change what `nextcloud_trusted_domains` points to. @@ -67,7 +66,7 @@ nextcloud_db_password: secretnextcloud ``` Sets the default username and password for application and database. -All of these variables are necessary to circumvent the manual installation process +All of these variables are necessary to circumvent the manual installation process you would usually be faced with on first creating a Nextcloud instance. Ideally change all of these for your personal setup, but it is especially important to change the app admin login data since they are what is public facing. @@ -78,7 +77,7 @@ nextcloud_trusted_domains: "{{ subdomain_alias }}.{{ server_domain }}" The domains that are allowed to access your Nextcloud instance. Should point to any domains that you want it accessible on, -can be a space-separated list of them. +can be a space-separated list of them. Take care to include the sub-domain if your are accessing it through one of them. [Further explanation](https://blog.martyoeh.me/posts/2021-11-18-nextcloud-trusted-domains/). @@ -131,6 +130,7 @@ If your details are correct, Nextcloud should automatically set up S3 as its pri Be careful if you switch an existing data volume of the Nextcloud image to S3 as you will lose all access to existing files. -The files _should_ not be deleted at this point, +The files *should* not be deleted at this point, only access will be lost, but you are playing with fire at this point. + diff --git a/roles/nextcloud/defaults/main.yml b/roles/nextcloud/defaults/main.yml index 4881068..10858f8 100644 --- a/roles/nextcloud/defaults/main.yml +++ b/roles/nextcloud/defaults/main.yml @@ -1,8 +1,9 @@ --- + # set preferred application version -nextcloud_version: 30-fpm-alpine +nextcloud_version: fpm-alpine # set preferred postgres version -nextcloud_db_version: 16-alpine +nextcloud_db_version: 12-alpine nextcloud_upstream_file_dir: "{{ docker_stack_files_dir }}/{{ stack_name }}" @@ -18,13 +19,6 @@ nextcloud_redis_password: myredispass nextcloud_db_username: nextcloud nextcloud_db_password: secretnextcloud -# run restic backups -nextcloud_backup_enable: true -nextcloud_backup_cron: 0 30 3 * * * - -nextcloud_php_memory_limit: 5G # maximum ram php may use -nextcloud_php_upload_limit: 15G # maximum size of (web) uploaded files - # if you wish to access your nextcloud instance from the reverse proxy nextcloud_trusted_domains: "{{ subdomain_alias }}.{{ server_domain }}" @@ -37,6 +31,7 @@ nextcloud_smtp_authtype: LOGIN # nextcloud_smtp_password: nextcloud_smtp_from_address: noreply nextcloud_smtp_from_domain: "{{ server_domain }}" + # the following block is required *fully* for primary object storage # nextcloud_s3_host: s3.eu-central-1.wasabisys.com # nextcloud_s3_bucket: nextcloud @@ -46,3 +41,4 @@ nextcloud_smtp_from_domain: "{{ server_domain }}" # nextcloud_s3_ssl: true # nextcloud_s3_region: eu-central-1 # nextcloud_s3_usepath_style: true + diff --git a/roles/nextcloud/files/Caddyfile b/roles/nextcloud/files/Caddyfile index a56ed59..8a86c1c 100644 --- a/roles/nextcloud/files/Caddyfile +++ b/roles/nextcloud/files/Caddyfile @@ -1,35 +1,15 @@ -{ - servers { - trusted_proxies static 10.0.0.0/8 - } -} - :80 { - encode zstd gzip root * /var/www/html + file_server + php_fastcgi app:9000 header { # enable HSTS - Strict-Transport-Security max-age=31536000;includeSubDomains;preload; - Permissions-Policy interest-cohort=() - X-Content-Type-Options nosniff - X-Frame-Options SAMEORIGIN - Referrer-Policy no-referrer - X-XSS-Protection "1; mode=block" - X-Permitted-Cross-Domain-Policies none - X-Robots-Tag "noindex, nofollow" + Strict-Transport-Security max-age=31536000; } - # client support (e.g. os x calendar / contacts) redir /.well-known/carddav /remote.php/dav 301 redir /.well-known/caldav /remote.php/dav 301 - redir /.well-known/webfinger /index.php/.well-known/webfinger 301 - redir /.well-known/nodeinfo /index.php/.well-known/nodeinfo 301 - - # Uncomment this block if you use the high speed files backend: https://github.com/nextcloud/notify_push - #handle_path /push/* { - # reverse_proxy unix//run/notify_push/notify_push.sock # I love Unix sockets, but you can do :7867 also - #} # .htaccess / data / config / ... shouldn't be accessible from outside @forbidden { @@ -45,36 +25,8 @@ path /occ path /console.php } - handle @forbidden { - respond 404 - } - handle { - root * /var/www/html - php_fastcgi app:9000 { - # Tells nextcloud to remove /index.php from URLs in links - env front_controller_active true - env modHeadersAvailable true # Avoid sending the security headers twice - } - } + respond @forbidden 404 - # From .htaccess, set cache for versioned static files (cache-busting) - @immutable { - path *.css *.js *.mjs *.svg *.gif *.png *.jpg *.ico *.wasm *.tflite - query v=* - } - header @immutable Cache-Control "max-age=15778463, immutable" - - # From .htaccess, set cache for normal static files - @static { - path *.css *.js *.mjs *.svg *.gif *.png *.jpg *.ico *.wasm *.tflite - not query v=* - } - header @static Cache-Control "max-age=15778463" - - # From .htaccess, cache fonts for 1 week - @woff2 path *.woff2 - header @woff2 Cache-Control "max-age=604800" - - file_server } + diff --git a/roles/nextcloud/handlers/main.yml b/roles/nextcloud/handlers/main.yml index 82d4a16..7cc7626 100644 --- a/roles/nextcloud/handlers/main.yml +++ b/roles/nextcloud/handlers/main.yml @@ -3,15 +3,15 @@ ansible.builtin.file: path: "{{ nextcloud_upstream_file_dir }}" state: directory - mode: "0755" - become: true + mode: '0755' + become: yes listen: "update nextcloud upstream" - name: Update upstream template ansible.builtin.template: src: upstream.json.j2 dest: "{{ nextcloud_upstream_file_dir }}/upstream.json" - become: true + become: yes listen: "update nextcloud upstream" # figure out if upstream id exists @@ -22,7 +22,7 @@ curl localhost:2019/id/{{ stack_name }}_upstream/ changed_when: False register: result - become: true + become: yes listen: "update nextcloud upstream" # upstream already exists, patch it @@ -31,7 +31,7 @@ container: "{{ caddy_container_id }}" command: > curl -X DELETE localhost:2019/id/{{ stack_name }}_upstream/ - become: true + become: yes when: (result.stdout | from_json)['error'] is not defined listen: "update nextcloud upstream" @@ -40,13 +40,14 @@ community.docker.docker_container_exec: container: "{{ caddy_container_id }}" command: > - curl -X POST -H "Content-Type: application/json" -d @{{ nextcloud_upstream_file_dir }}/upstream.json localhost:2019/config/apps/http/servers/{{ (nextcloud_use_https == True) | ternary(caddy_https_server_name, caddy_http_server_name) }}/routes/0/ - become: true + curl -X POST -H "Content-Type: application/json" -d @{{ nextcloud_upstream_file_dir }}/upstream.json localhost:2019/config/apps/http/servers/{{ (nextcloud_use_https == True) | ternary(caddy_https_server_name, caddy_http_server_name) }}/routes/0/ + become: yes listen: "update nextcloud upstream" - name: Ensure upstream directory is gone again ansible.builtin.file: path: "{{ nextcloud_upstream_file_dir }}" state: absent - become: true + become: yes listen: "update nextcloud upstream" + diff --git a/roles/nextcloud/meta/main.yml b/roles/nextcloud/meta/main.yml index 8eb68a7..b503ed3 100644 --- a/roles/nextcloud/meta/main.yml +++ b/roles/nextcloud/meta/main.yml @@ -1,11 +1,14 @@ --- + galaxy_info: author: Marty Oehme description: Installs nextcloud as a docker stack service license: GPL-3.0-only - min_ansible_version: "2.9" + min_ansible_version: 2.9 galaxy_tags: [] + dependencies: + - docker - docker-swarm - - caddy_id + - caddy diff --git a/roles/nextcloud/tasks/main.yml b/roles/nextcloud/tasks/main.yml index 05e9c38..17781ae 100644 --- a/roles/nextcloud/tasks/main.yml +++ b/roles/nextcloud/tasks/main.yml @@ -7,21 +7,23 @@ curl localhost:2019/id/{{ stack_name }}_upstream/ register: result changed_when: (result.stdout | from_json) != (lookup('template', 'upstream.json.j2') | from_yaml) - become: true + become: yes notify: "update nextcloud upstream" - name: Ensure target directory exists ansible.builtin.file: path: "{{ nextcloud_upstream_file_dir }}" state: directory - mode: "0755" - become: true + mode: '0755' + become: yes + notify: "update nextcloud upstream" - name: Move webserver Caddyfile to target dir ansible.builtin.copy: src: "Caddyfile" dest: "{{ nextcloud_upstream_file_dir }}/Caddyfile" - become: true + become: yes + notify: "update nextcloud upstream" - name: Deploy to swarm community.general.docker_stack: @@ -30,6 +32,8 @@ prune: yes compose: - "{{ stack_compose }}" - become: true + become: yes tags: - docker-swarm + notify: "update nextcloud upstream" + diff --git a/roles/nextcloud/templates/docker-stack.yml.j2 b/roles/nextcloud/templates/docker-stack.yml.j2 index ff5f6ce..d97632a 100644 --- a/roles/nextcloud/templates/docker-stack.yml.j2 +++ b/roles/nextcloud/templates/docker-stack.yml.j2 @@ -7,7 +7,7 @@ services: - backend - "{{ docker_swarm_public_network_name }}" healthcheck: - test: ["CMD", "wget", "--quiet", "--spider", "--tries=1", "http://127.0.0.1:2019/metrics"] + test: ["CMD", "wget", "--quiet", "--spider", "--tries=1", "http://localhost:2019/metrics"] interval: 1m timeout: 10s retries: 3 @@ -31,7 +31,7 @@ services: start_period: 5m # needed for db to be up, # see https://help.nextcloud.com/t/failed-to-install-nextcloud-with-docker-compose/83681/15 - # entrypoint: sh -c "while !(nc -z db 5432); do sleep 30; done; /entrypoint.sh php-fpm" + entrypoint: sh -c "while !(nc -z db 5432); do sleep 30; done; /entrypoint.sh php-fpm" environment: - NEXTCLOUD_ADMIN_USER={{ nextcloud_app_admin_username }} - NEXTCLOUD_ADMIN_PASSWORD={{ nextcloud_app_admin_password }} @@ -41,8 +41,6 @@ services: - POSTGRES_DB={{ nextcloud_db_username }} - POSTGRES_USER={{ nextcloud_db_username }} - POSTGRES_PASSWORD={{ nextcloud_db_password }} - - PHP_MEMORY_LIMIT={{ nextcloud_php_memory_limit }} - - PHP_UPLOAD_LIMIT={{ nextcloud_php_upload_limit }} {% if nextcloud_trusted_domains is not undefined and not none %} - NEXTCLOUD_TRUSTED_DOMAINS={{ nextcloud_trusted_domains }} {% endif %} @@ -142,42 +140,6 @@ services: networks: - backend - # from https://okxo.de/speed-up-nextcloud-preview-generation-with-imaginary/ - # and https://github.com/nextcloud/all-in-one/tree/main/Containers/imaginary - imaginary: - image: nextcloud/aio-imaginary:latest - environment: - - PORT=9000 - healthcheck: - test: ["CMD", "/healthcheck.sh"] - interval: 1m - timeout: 10s - retries: 3 - start_period: 1m - command: -return-size -max-allowed-resolution 222.2 -concurrency 50 -enable-url-source -log-level debug - cap_add: - - CAP_SYS_NICE - networks: - - backend - -{% if backup_enable is not undefined and not false and nextcloud_backup_enable is not undefined and not false %} - backup: - image: mazzolino/restic - environment: - - "TZ={{ restic_timezone }}" - # go-cron starts w seconds - - "BACKUP_CRON={{ nextcloud_backup_cron }}" - - "RESTIC_REPOSITORY={{ restic_repo }}" - - "AWS_ACCESS_KEY_ID={{ restic_s3_key }}" - - "AWS_SECRET_ACCESS_KEY={{ restic_s3_secret }}" - - "RESTIC_PASSWORD={{ restic_pass }}" - - "RESTIC_BACKUP_TAGS=nextcloud" - - "RESTIC_BACKUP_SOURCES=/volumes" - volumes: - - db:/volumes/nextcloud_db:ro - - data:/volumes/nextcloud_data:ro -{% endif %} - # metrics: # image: telegraf # hostname: "${HOSTNAME:-vmi352583.contaboserver.net}" diff --git a/roles/nextcloud/vars/main.yml b/roles/nextcloud/vars/main.yml index 65a4821..a1a21cd 100644 --- a/roles/nextcloud/vars/main.yml +++ b/roles/nextcloud/vars/main.yml @@ -1,4 +1,5 @@ --- + stack_name: nextcloud stack_image: "nextcloud" diff --git a/roles/ntfy/README.md b/roles/ntfy/README.md deleted file mode 100644 index cd7dde5..0000000 --- a/roles/ntfy/README.md +++ /dev/null @@ -1,42 +0,0 @@ -# ntfy - -A self-hosted notifications service. - -Can take messages sent to the server through simple POST requests on specific topics and -blasts them out to any subscribed receiver on Android, Web, Commandline, or even in other applications. - -Thus can function as a simple cross-platform push message service that fits very well into unix workflows. - -## Defaults - -``` -ntfy_upstream_file_dir: "{{ docker_stack_files_dir }}/{{ stack_name }}" -``` - -The on-target directory where the proxy configuration file should be stashed. - -``` -ntfy_use_https: true -``` - -Whether the service should be reachable through http (port 80) or through https (port 443) and provision an https certificate. -Usually you will want this to stay `true`, -especially on the public facing web. - -``` -ntfy_version: latest -``` - -The docker image version to be used in stack creation. - -``` -subdomain_alias: push -``` - -If the deployed container should be served over a uri that is not the stack name. -By default, it will be set to `push.yourdomain.com` - -if this option is not set it will be served on `ntfy.yourdomain.com` instead. - -The individual `ntfy` options to be changed are very well described on -[the ntfy documentation](https://ntfy.sh/docs/config/). -Together with the default variables for this role it should be easy to find a good pair of settings. diff --git a/roles/ntfy/defaults/main.yml b/roles/ntfy/defaults/main.yml deleted file mode 100644 index 70e3275..0000000 --- a/roles/ntfy/defaults/main.yml +++ /dev/null @@ -1,19 +0,0 @@ ---- -ntfy_version: latest - -ntfy_upstream_file_dir: "{{ docker_stack_files_dir }}/{{ stack_name }}" - -ntfy_use_https: true - -subdomain_alias: push - -ntfy_global_topic_limit: 15000 -ntfy_visitor_subscription_limit: 30 -ntfy_visitor_request_limit_burst: 60 -ntfy_visitor_request_limit_replenish: "10s" -ntfy_cache_duration: "12h" -ntfy_attachment_total_size_limit: "5G" -ntfy_attachment_file_size_limit: "15M" -ntfy_attachment_expiry_duration: "5h" -ntfy_visitor_attachment_total_size_limit: "500M" -ntfy_visitor_attachment_daily_bandwidth_limit: "1G" diff --git a/roles/ntfy/handlers/main.yml b/roles/ntfy/handlers/main.yml deleted file mode 100644 index c26f731..0000000 --- a/roles/ntfy/handlers/main.yml +++ /dev/null @@ -1,45 +0,0 @@ -## Register reverse proxy -- name: Ensure upstream directory exists - ansible.builtin.file: - path: "{{ ntfy_upstream_file_dir }}" - state: directory - mode: "0755" - become: true - listen: "update ntfy upstream" - -- name: Update upstream template - ansible.builtin.template: - src: upstream.json.j2 - dest: "{{ ntfy_upstream_file_dir }}/upstream.json" - become: true - listen: "update ntfy upstream" - -# figure out if upstream id exists -- name: check {{ stack_name }} upstream - community.docker.docker_container_exec: - container: "{{ caddy_container_id }}" - command: > - curl localhost:2019/id/{{ stack_name }}_upstream/ - changed_when: False - register: result - become: true - listen: "update ntfy upstream" - -# upstream already exists, patch it -- name: remove old {{ stack_name }} upstream - community.docker.docker_container_exec: - container: "{{ caddy_container_id }}" - command: > - curl -X DELETE localhost:2019/id/{{ stack_name }}_upstream/ - become: true - when: (result.stdout | from_json)['error'] is not defined - listen: "update ntfy upstream" - -# upstream has to be created -- name: add {{ stack_name }} upstream - community.docker.docker_container_exec: - container: "{{ caddy_container_id }}" - command: > - curl -X POST -H "Content-Type: application/json" -d @{{ ntfy_upstream_file_dir }}/upstream.json localhost:2019/config/apps/http/servers/{{ (ntfy_use_https == True) | ternary(caddy_https_server_name, caddy_http_server_name) }}/routes/0/ - become: true - listen: "update ntfy upstream" diff --git a/roles/ntfy/meta/main.yml b/roles/ntfy/meta/main.yml deleted file mode 100644 index 14b8f5d..0000000 --- a/roles/ntfy/meta/main.yml +++ /dev/null @@ -1,11 +0,0 @@ ---- -galaxy_info: - author: Marty Oehme - description: Installs a self-hosted push notification service through docker-swarm. - license: GPL-3.0-only - min_ansible_version: "2.9" - galaxy_tags: [] - -dependencies: - - docker-swarm - - caddy_id diff --git a/roles/ntfy/tasks/main.yml b/roles/ntfy/tasks/main.yml deleted file mode 100644 index df8ce94..0000000 --- a/roles/ntfy/tasks/main.yml +++ /dev/null @@ -1,37 +0,0 @@ ---- -- name: Ensure target directory exists - ansible.builtin.file: - path: "{{ ntfy_upstream_file_dir }}" - state: directory - mode: "0755" - become: true - -- name: Move ntfy configuration file to target dir - ansible.builtin.template: - src: "server.yml.j2" - dest: "{{ ntfy_upstream_file_dir }}/server.yml" - become: true - notify: "update ntfy upstream" - -## install ntfy container -- name: Check upstream status - community.docker.docker_container_exec: - container: "{{ caddy_container_id }}" - command: > - curl localhost:2019/id/{{ stack_name }}_upstream/ - register: result - changed_when: (result.stdout | from_json) != (lookup('template', 'upstream.json.j2') | from_yaml) - become: true - notify: "update ntfy upstream" - -- name: Deploy ntfy to swarm - community.general.docker_stack: - name: "{{ stack_name }}" - state: present - prune: yes - compose: - - "{{ stack_compose }}" - become: true - tags: - - docker-swarm - notify: "update ntfy upstream" diff --git a/roles/ntfy/templates/docker-stack.yml.j2 b/roles/ntfy/templates/docker-stack.yml.j2 deleted file mode 100644 index dce3b29..0000000 --- a/roles/ntfy/templates/docker-stack.yml.j2 +++ /dev/null @@ -1,27 +0,0 @@ -version: '3.4' - -services: - app: - image: "{{ stack_image }}:{{ ntfy_version }}" - healthcheck: - test: ["CMD", "wget", "--spider", "-q", "127.0.0.1"] - interval: 1m - timeout: 10s - retries: 3 - start_period: 1m - volumes: - - "{{ ntfy_upstream_file_dir }}/server.yml:/etc/ntfy/server.yml" - - cache:/var/cache/ntfy - networks: - - "{{ docker_swarm_public_network_name }}" - command: - - serve - -volumes: - cache: - -networks: - "{{ docker_swarm_public_network_name }}": - external: true - - diff --git a/roles/ntfy/templates/server.yml.j2 b/roles/ntfy/templates/server.yml.j2 deleted file mode 100644 index 93c8bb8..0000000 --- a/roles/ntfy/templates/server.yml.j2 +++ /dev/null @@ -1,15 +0,0 @@ -base-url: "https://{{ server_domain }}" -global_topic_limit: {{ ntfy_global_topic_limit }} -visitor_subscription_limit: {{ ntfy_visitor_subscription_limit }} -visitor_request_limit_burst: {{ ntfy_visitor_request_limit_burst }} -visitor_request_limit_replenish: "{{ ntfy_visitor_request_limit_replenish }}" -cache-file: "/var/cache/ntfy/cache.db" -cache_duration: "{{ ntfy_cache_duration }}" -attachment-cache-dir: "/var/cache/ntfy/attachments" -attachment_total_size_limit: "{{ ntfy_attachment_total_size_limit }}" -attachment_file_size_limit: "{{ ntfy_attachment_file_size_limit }}" -attachment_expiry_duration: "{{ ntfy_attachment_expiry_duration }}" -visitor_attachment_total_size_limit: "{{ ntfy_visitor_attachment_total_size_limit }}" -visitor_attachment_daily_bandwidth_limit: "{{ ntfy_visitor_attachment_daily_bandwidth_limit }}" -behind-proxy: true # uses 'X-Forwarded-For' Headers for individual visitors -# TODO i believe Caddy does not set the correct X-Forwarded-For header, see whoami container to check diff --git a/roles/restic/README.md b/roles/restic/README.md deleted file mode 100644 index 8849990..0000000 --- a/roles/restic/README.md +++ /dev/null @@ -1,49 +0,0 @@ -# restic - -Backup maintenance stack. - -Takes care of regularly pruning the backup repository and checking its integrity. -Currently only supports S3 as a backend. - -## Defaults - -```yaml -restic_timezone: US/Chicago -``` - -The timezone to be used for the cronjob. - -```yaml -restic_version: latest -``` - -The docker image version to be used in stack creation. - -```yaml -restic_repo: s3.eu-central-1.wasabisys.com/myrepo -restic_pass: -``` - -The repository url and the restic repository password. -See the restic documentation for more information. - -```yaml -restic_s3_key: -restic_s3_secret: -``` - -The restic S3 credentials, i.e. the `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY`. - -```yaml -restic_prune_cron: 0 0 4 * * * -restic_forget_args: --prune --keep-last 14 --keep-daily 2 --keep-weekly 2 -``` - -The default prune and forget cronjob schedule and arguments: Prune the repository every day at 4:00 AM and keep the last 14 snapshots, 2 daily snapshots and 2 weekly snapshots. - -```yaml -restic_check_cron: 0 15 5 * * * -restic_check_args: --read-data-subset=5% -``` - -The default check cronjob schedule and arguments: Check the repository integrity every day at 5:15 AM and in addition to structural checks, read 5 randomly chosen % for a data integrity check. diff --git a/roles/restic/defaults/main.yml b/roles/restic/defaults/main.yml deleted file mode 100644 index 48fdbc7..0000000 --- a/roles/restic/defaults/main.yml +++ /dev/null @@ -1,14 +0,0 @@ ---- -restic_version: latest - -# restic_repo: s3.eu-central-1.wasabisys.com/myrepo -# restic_pass: -# restic_s3_key: -# restic_s3_secret: -restic_timezone: "{{ server_timezone | default('US/Chicago') }}" - -restic_prune_cron: 0 0 4 * * * -restic_forget_args: --prune --keep-last 14 --keep-daily 2 --keep-weekly 2 - -restic_check_cron: 0 30 4 * * SUN -restic_check_args: --read-data-subset=15% diff --git a/roles/restic/meta/main.yml b/roles/restic/meta/main.yml deleted file mode 100644 index 0c765ab..0000000 --- a/roles/restic/meta/main.yml +++ /dev/null @@ -1,10 +0,0 @@ ---- -galaxy_info: - author: Marty Oehme - description: Installs a restic-based backup maintenance stack. Only supports S3 atm. - license: GPL-3.0-only - min_ansible_version: "2.9" - galaxy_tags: [] - -dependencies: - - docker-swarm diff --git a/roles/restic/tasks/main.yml b/roles/restic/tasks/main.yml deleted file mode 100644 index 5bb5027..0000000 --- a/roles/restic/tasks/main.yml +++ /dev/null @@ -1,11 +0,0 @@ ---- -- name: Deploy restic to swarm - community.general.docker_stack: - name: "{{ stack_name }}" - state: present - prune: yes - compose: - - "{{ stack_compose }}" - become: true - tags: - - docker-swarm diff --git a/roles/restic/templates/docker-stack.yml.j2 b/roles/restic/templates/docker-stack.yml.j2 deleted file mode 100644 index f64be0a..0000000 --- a/roles/restic/templates/docker-stack.yml.j2 +++ /dev/null @@ -1,30 +0,0 @@ -services: - prune: - image: "{{ stack_image }}:{{ restic_version }}" - hostname: docker - environment: - - "TZ={{ restic_timezone }}" - - "SKIP_INIT=true" - - "RUN_ON_STARTUP=true" - # go-cron starts w seconds - - "PRUNE_CRON={{ restic_prune_cron }}" - - "RESTIC_FORGET_ARGS={{ restic_forget_args }}" - - "RESTIC_REPOSITORY={{ restic_repo }}" - - "AWS_ACCESS_KEY_ID={{ restic_s3_key }}" - - "AWS_SECRET_ACCESS_KEY={{ restic_s3_secret }}" - - "RESTIC_PASSWORD={{ restic_pass }}" - - check: - image: "{{ stack_image }}:{{ restic_version }}" - hostname: docker - environment: - - "TZ={{ restic_timezone }}" - - "SKIP_INIT=true" - - "RUN_ON_STARTUP=false" - # go-cron starts w seconds - - "CHECK_CRON={{ restic_check_cron }}" - - "RESTIC_CHECK_ARGS={{ restic_check_args }}" - - "RESTIC_REPOSITORY={{ restic_repo }}" - - "AWS_ACCESS_KEY_ID={{ restic_s3_key }}" - - "AWS_SECRET_ACCESS_KEY={{ restic_s3_secret }}" - - "RESTIC_PASSWORD={{ restic_pass }}" diff --git a/roles/searx/README.md b/roles/searx/README.md index 09300d4..0d42d4a 100644 --- a/roles/searx/README.md +++ b/roles/searx/README.md @@ -26,8 +26,8 @@ The docker image version to be used in stack creation. subdomain_alias: search ``` -If the deployed container should be served over a uri that is not the stack name. -By default, it will be set to `search.yourdomain.com` - +If the deployed container should be served over a uri that is not the stack name. +By default, it will be set to `search.yourdomain.com` - if this option is not set it will be served on `searx.yourdomain.com` instead. ``` @@ -39,11 +39,11 @@ searx_authentication: By default, the searx instance is not protected with a login, however you can have caddy provide a basic auth login form by using this variable. -You can either change the login to suit you by generating a combination +You can either change the login to suit you by generating a combination (or multiple, it will also work with an arbitrary amount of logins), -or remove the necessity to login altogether by not setting the +or remove the necessity to login altogether by not setting the `searx_authentication` variable to anything. -The password needs to be in a hashed format, which is easiest to accomplish -with the help of caddy itself --- simply doing `caddy hash-password` will +The password needs to be in a hashed format, which is easiest to accomplish +with the help of caddy itself --- simply doing `caddy hash-password` will allow you to create a new hashed password. diff --git a/roles/searx/defaults/main.yml b/roles/searx/defaults/main.yml index dd9fa5a..b129040 100644 --- a/roles/searx/defaults/main.yml +++ b/roles/searx/defaults/main.yml @@ -1,15 +1,14 @@ --- + searx_version: latest searx_upstream_file_dir: "{{ docker_stack_files_dir }}/{{ stack_name }}" searx_use_https: true -searx_autoupdate: true - # the subdomain link searx will be reachable under subdomain_alias: search # searx_authentication: -# - username: # mysearxusername -# password: # mysearxpassword +# - username: mysearxusername +# password: JDJhJDE0JFdjUnQ5WWllcU8wa01xS0JBS2dlMy5zMEhRTmxqTXdIZmdjcTN6ZGFwRjJlYUdoSHAwRUhL # mysearxpassword diff --git a/roles/searx/handlers/main.yml b/roles/searx/handlers/main.yml index 3a6e6f6..d6f183e 100644 --- a/roles/searx/handlers/main.yml +++ b/roles/searx/handlers/main.yml @@ -3,15 +3,15 @@ ansible.builtin.file: path: "{{ searx_upstream_file_dir }}" state: directory - mode: "0755" - become: true + mode: '0755' + become: yes listen: "update searx upstream" - name: Update upstream template ansible.builtin.template: src: upstream.json.j2 dest: "{{ searx_upstream_file_dir }}/upstream.json" - become: true + become: yes listen: "update searx upstream" # figure out if upstream id exists @@ -22,7 +22,7 @@ curl localhost:2019/id/{{ stack_name }}_upstream/ changed_when: False register: result - become: true + become: yes listen: "update searx upstream" # upstream already exists, patch it @@ -31,7 +31,7 @@ container: "{{ caddy_container_id }}" command: > curl -X DELETE localhost:2019/id/{{ stack_name }}_upstream/ - become: true + become: yes when: (result.stdout | from_json)['error'] is not defined listen: "update searx upstream" @@ -40,13 +40,14 @@ community.docker.docker_container_exec: container: "{{ caddy_container_id }}" command: > - curl -X POST -H "Content-Type: application/json" -d @{{ searx_upstream_file_dir }}/upstream.json localhost:2019/config/apps/http/servers/{{ (searx_use_https == True) | ternary(caddy_https_server_name, caddy_http_server_name) }}/routes/0/ - become: true + curl -X POST -H "Content-Type: application/json" -d @{{ searx_upstream_file_dir }}/upstream.json localhost:2019/config/apps/http/servers/{{ (searx_use_https == True) | ternary(caddy_https_server_name, caddy_http_server_name) }}/routes/0/ + become: yes listen: "update searx upstream" - name: Ensure upstream directory is gone again ansible.builtin.file: path: "{{ searx_upstream_file_dir }}" state: absent - become: true + become: yes listen: "update searx upstream" + diff --git a/roles/searx/meta/main.yml b/roles/searx/meta/main.yml index 25dd7f7..bb6dde1 100644 --- a/roles/searx/meta/main.yml +++ b/roles/searx/meta/main.yml @@ -1,11 +1,14 @@ --- + galaxy_info: author: Marty Oehme description: Installs searx as a docker stack service license: GPL-3.0-only - min_ansible_version: "2.9" + min_ansible_version: 2.9 galaxy_tags: [] + dependencies: + - docker - docker-swarm - - caddy_id + - caddy diff --git a/roles/searx/tasks/main.yml b/roles/searx/tasks/main.yml index 75e7772..21b4281 100644 --- a/roles/searx/tasks/main.yml +++ b/roles/searx/tasks/main.yml @@ -7,7 +7,7 @@ curl localhost:2019/id/{{ stack_name }}_upstream/ register: result changed_when: (result.stdout | from_json) != (lookup('template', 'upstream.json.j2') | from_yaml) - become: true + become: yes notify: "update searx upstream" - name: Deploy searx to swarm @@ -17,7 +17,8 @@ prune: yes compose: - "{{ stack_compose }}" - become: true + become: yes tags: - docker-swarm notify: "update searx upstream" + diff --git a/roles/searx/templates/docker-stack.yml.j2 b/roles/searx/templates/docker-stack.yml.j2 index 9b853ac..6f9c774 100644 --- a/roles/searx/templates/docker-stack.yml.j2 +++ b/roles/searx/templates/docker-stack.yml.j2 @@ -6,14 +6,14 @@ services: networks: - "{{ docker_swarm_public_network_name }}" healthcheck: - test: ["CMD", "wget", "-q", "--spider", "http://127.0.0.1:8080"] + test: ["CMD", "wget", "-q", "--spider", "http://localhost:8080"] interval: 1m timeout: 10s retries: 3 start_period: 1m environment: - BIND_ADDRESS=0.0.0.0:8080 -{% if server_domain is not undefined and not none %} +{% if server_domain is not undefined and not none %} - "BASE_URL={{ (searx_use_https == True) | ternary('https', 'http') }}://{{ (subdomain_alias is not undefined and not none) | ternary(subdomain_alias, stack_name) }}.{{server_domain}}" {% else %} - "BASE_URL={{ (searx_use_https == True) | ternary('https', 'http') }}://localhost/{{ (subdomain_alias is not undefined and not none) | ternary(subdomain_alias, stack_name) }}" @@ -21,11 +21,6 @@ services: volumes: - /etc/localtime:/etc/localtime:ro - data:/etc/searx:rw -{% if searx_autoupdate is defined and searx_autoupdate %} - deploy: - labels: - - shepherd.autoupdate=true -{% endif %} volumes: data: diff --git a/roles/searx/vars/main.yml b/roles/searx/vars/main.yml index b55339a..9cdba41 100644 --- a/roles/searx/vars/main.yml +++ b/roles/searx/vars/main.yml @@ -1,4 +1,5 @@ --- + stack_name: searx stack_image: "searxng/searxng" diff --git a/roles/shaarli/README.md b/roles/shaarli/README.md index b6c56e9..f9e23f7 100644 --- a/roles/shaarli/README.md +++ b/roles/shaarli/README.md @@ -3,11 +3,11 @@ A simple and fast bookmark manager. Can be deployed in minutes and takes minimum amount of resources. -Be aware that shaarli installations can _not_ be fully automated. +Be aware that shaarli installations can *not* be fully automated. That means after running this ansible role you will still have to setup up the first run wizard and create a user and so forth (if not running with an existing data-store). Do this quickly after setup, -_especially_ if your instance is public-facing! +*especially* if your instance is public-facing! {: .alert .alert-warning} ## Defaults @@ -34,6 +34,7 @@ The docker image version to be used in stack creation. subdomain_alias: links ``` -If the deployed container should be served over a uri that is not the stack name. -By default, it will be set to `links.yourdomain.com` - +If the deployed container should be served over a uri that is not the stack name. +By default, it will be set to `links.yourdomain.com` - if this option is not set it will be served on `shaarli.yourdomain.com` instead. + diff --git a/roles/shaarli/defaults/main.yml b/roles/shaarli/defaults/main.yml index b72233c..6d676c3 100644 --- a/roles/shaarli/defaults/main.yml +++ b/roles/shaarli/defaults/main.yml @@ -1,15 +1,10 @@ --- -shaarli_version: release # they offer: latest and release (stable) versions + +shaarli_version: latest shaarli_upstream_file_dir: "{{ docker_stack_files_dir }}/{{ stack_name }}" shaarli_use_https: true -shaarli_autoupdate: true - # the subdomain link shaarli will be reachable under -subdomain_alias: shaarli - -# should we back up the data? -shaarli_backup_enable: true -shaarli_backup_cron: 0 45 3 * * * +subdomain_alias: links diff --git a/roles/shaarli/handlers/main.yml b/roles/shaarli/handlers/main.yml index 4f5434b..2887e93 100644 --- a/roles/shaarli/handlers/main.yml +++ b/roles/shaarli/handlers/main.yml @@ -3,15 +3,15 @@ ansible.builtin.file: path: "{{ shaarli_upstream_file_dir }}" state: directory - mode: "0755" - become: true + mode: '0755' + become: yes listen: "update shaarli upstream" - name: Update upstream template ansible.builtin.template: src: upstream.json.j2 dest: "{{ shaarli_upstream_file_dir }}/upstream.json" - become: true + become: yes listen: "update shaarli upstream" # figure out if upstream id exists @@ -22,7 +22,7 @@ curl localhost:2019/id/{{ stack_name }}_upstream/ changed_when: False register: result - become: true + become: yes listen: "update shaarli upstream" # upstream already exists, patch it @@ -31,7 +31,7 @@ container: "{{ caddy_container_id }}" command: > curl -X DELETE localhost:2019/id/{{ stack_name }}_upstream/ - become: true + become: yes when: (result.stdout | from_json)['error'] is not defined listen: "update shaarli upstream" @@ -40,13 +40,14 @@ community.docker.docker_container_exec: container: "{{ caddy_container_id }}" command: > - curl -X POST -H "Content-Type: application/json" -d @{{ shaarli_upstream_file_dir }}/upstream.json localhost:2019/config/apps/http/servers/{{ (shaarli_use_https == True) | ternary(caddy_https_server_name, caddy_http_server_name) }}/routes/0/ - become: true + curl -X POST -H "Content-Type: application/json" -d @{{ shaarli_upstream_file_dir }}/upstream.json localhost:2019/config/apps/http/servers/{{ (shaarli_use_https == True) | ternary(caddy_https_server_name, caddy_http_server_name) }}/routes/0/ + become: yes listen: "update shaarli upstream" - name: Ensure upstream directory is gone again ansible.builtin.file: path: "{{ shaarli_upstream_file_dir }}" state: absent - become: true + become: yes listen: "update shaarli upstream" + diff --git a/roles/shaarli/meta/main.yml b/roles/shaarli/meta/main.yml index 8ea1475..0be34a8 100644 --- a/roles/shaarli/meta/main.yml +++ b/roles/shaarli/meta/main.yml @@ -1,11 +1,14 @@ --- + galaxy_info: author: Marty Oehme description: Installs shaarli as a docker stack service license: GPL-3.0-only - min_ansible_version: "2.9" + min_ansible_version: 2.9 galaxy_tags: [] + dependencies: + - docker - docker-swarm - - caddy_id + - caddy diff --git a/roles/shaarli/tasks/main.yml b/roles/shaarli/tasks/main.yml index 8448ff3..b8eea52 100644 --- a/roles/shaarli/tasks/main.yml +++ b/roles/shaarli/tasks/main.yml @@ -7,7 +7,7 @@ curl localhost:2019/id/{{ stack_name }}_upstream/ register: result changed_when: (result.stdout | from_json) != (lookup('template', 'upstream.json.j2') | from_yaml) - become: true + become: yes notify: "update shaarli upstream" - name: Deploy shaarli to swarm @@ -17,7 +17,8 @@ prune: yes compose: - "{{ stack_compose }}" - become: true + become: yes tags: - docker-swarm notify: "update shaarli upstream" + diff --git a/roles/shaarli/templates/docker-stack.yml.j2 b/roles/shaarli/templates/docker-stack.yml.j2 index 0ee7f70..dc15fa2 100644 --- a/roles/shaarli/templates/docker-stack.yml.j2 +++ b/roles/shaarli/templates/docker-stack.yml.j2 @@ -4,7 +4,7 @@ services: app: image: "{{ stack_image }}:{{ shaarli_version }}" healthcheck: - test: ["CMD", "wget", "--quiet", "--spider", "--tries=1", "http://127.0.0.1:80"] + test: ["CMD", "wget", "--quiet", "--spider", "--tries=1", "http://localhost:80"] interval: 1m timeout: 10s retries: 3 @@ -14,28 +14,6 @@ services: volumes: - data:/var/www/shaarli/data - cache:/var/www/shaarli/cache -{% if shaarli_autoupdate is defined and shaarli_autoupdate %} - deploy: - labels: - - shepherd.autoupdate=true -{% endif %} - -{% if backup_enable is not undefined and not false and shaarli_backup_enable is not undefined and not false %} - backup: - image: mazzolino/restic - environment: - - "TZ={{ restic_timezone }}" - # go-cron starts w seconds - - "BACKUP_CRON={{ shaarli_backup_cron }}" - - "RESTIC_REPOSITORY={{ restic_repo }}" - - "AWS_ACCESS_KEY_ID={{ restic_s3_key }}" - - "AWS_SECRET_ACCESS_KEY={{ restic_s3_secret }}" - - "RESTIC_PASSWORD={{ restic_pass }}" - - "RESTIC_BACKUP_TAGS=shaarli" - - "RESTIC_BACKUP_SOURCES=/volumes" - volumes: - - data:/volumes/shaarli_data:ro -{% endif %} volumes: data: diff --git a/roles/shaarli/vars/main.yml b/roles/shaarli/vars/main.yml index 37a348c..c23e67a 100644 --- a/roles/shaarli/vars/main.yml +++ b/roles/shaarli/vars/main.yml @@ -1,6 +1,7 @@ --- + stack_name: shaarli -stack_image: "ghcr.io/shaarli/shaarli" +stack_image: "shaarli/shaarli" stack_compose: "{{ lookup('template', 'docker-stack.yml.j2') | from_yaml }}" diff --git a/roles/shepherd/README.md b/roles/shepherd/README.md deleted file mode 100644 index ff26192..0000000 --- a/roles/shepherd/README.md +++ /dev/null @@ -1,6 +0,0 @@ -# shepherd - -Monitor the deployed swarm containers for updates. -Will notify you when it found any update for any container. - -Can notify you through a wide variety of services using the apprise api. diff --git a/roles/shepherd/defaults/main.yml b/roles/shepherd/defaults/main.yml deleted file mode 100644 index 39e8fcb..0000000 --- a/roles/shepherd/defaults/main.yml +++ /dev/null @@ -1,13 +0,0 @@ ---- -shepherd_version: latest - -shepherd_tz: Europe/Berlin - -shepherd_ignored_services: label=shepherd.autoupdate=false -shepherd_filter_services: label=shepherd.autoupdate=true - -shepherd_sleeptime: 5m -shepherd_rollback_on_failure: true -shepherd_image_autoclean_limit: 5 - -shepherd_notification_targets: diff --git a/roles/shepherd/meta/main.yml b/roles/shepherd/meta/main.yml deleted file mode 100644 index b0824dd..0000000 --- a/roles/shepherd/meta/main.yml +++ /dev/null @@ -1,10 +0,0 @@ ---- -galaxy_info: - author: Marty Oehme - description: Apply docker swarm container updates - license: GPL-3.0-only - min_ansible_version: "2.9" - galaxy_tags: [] - -dependencies: - - docker-swarm diff --git a/roles/shepherd/tasks/main.yml b/roles/shepherd/tasks/main.yml deleted file mode 100644 index c2bef7f..0000000 --- a/roles/shepherd/tasks/main.yml +++ /dev/null @@ -1,11 +0,0 @@ ---- -- name: Deploy shepherd stack to swarm - community.general.docker_stack: - name: "{{ stack_name }}" - state: present - prune: yes - compose: - - "{{ stack_compose }}" - become: true - tags: - - docker-swarm diff --git a/roles/shepherd/templates/docker-stack.yml.j2 b/roles/shepherd/templates/docker-stack.yml.j2 deleted file mode 100644 index fdd5a50..0000000 --- a/roles/shepherd/templates/docker-stack.yml.j2 +++ /dev/null @@ -1,52 +0,0 @@ -version: '3.4' - -services: - app: - image: "{{ stack_image }}:{{ shepherd_version }}" - # healthcheck: - # test: ["CMD", "wget", "--spider", "-q", "127.0.0.1"] - # interval: 1m - # timeout: 10s - # retries: 3 - # start_period: 1m - command: serve - volumes: - - "/var/run/docker.sock:/var/run/docker.sock" - environment: - - "TZ={{ shepherd_tz }}" - - "SLEEP_TIME={{ shepherd_sleeptime }}" - - "IGNORELIST_SERVICES={{ shepherd_ignored_services }}" -{% if shepherd_filter_services is defined and not None %} - - "FILTER_SERVICES={{ shepherd_filter_services }}" -{% endif %} - - "ROLLBACK_ON_FAILURE={{ shepherd_rollback_on_failure }}" - - "IMAGE_AUTOCLEAN_LIMIT={{ shepherd_image_autoclean_limit }}" - - "VERBOSE=true" -{% if shepherd_notification_targets is defined and not None %} - - "APPRISE_SIDECAR_URL: notify:5000" -{% endif %} - networks: - - backend - deploy: - mode: replicated - replicas: 1 - placement: - constraints: - - node.role == manager - -{% if shepherd_notification_targets is defined and not None %} - notify: - image: mazzolino/apprise-microservice:latest - environment: - NOTIFICATION_URLS: {{ shepherd_notification_targets }} - networks: - - backend -{% endif %} - -volumes: - data: - -networks: - "{{ docker_swarm_public_network_name }}": - external: true - backend: diff --git a/roles/shepherd/vars/main.yml b/roles/shepherd/vars/main.yml deleted file mode 100644 index d8415bd..0000000 --- a/roles/shepherd/vars/main.yml +++ /dev/null @@ -1,6 +0,0 @@ ---- -stack_name: shepherd - -stack_image: "containrrr/shepherd" - -stack_compose: "{{ lookup('template', 'docker-stack.yml.j2') | from_yaml }}" diff --git a/roles/system-upgrade/tasks/Ubuntu.yml b/roles/system-upgrade/tasks/Ubuntu.yml index dc33abf..983e6b5 100644 --- a/roles/system-upgrade/tasks/Ubuntu.yml +++ b/roles/system-upgrade/tasks/Ubuntu.yml @@ -21,7 +21,7 @@ register: reboot_required_file stat: path: /var/run/reboot-required - get_checksum: false + get_md5: no tags: - os - reboot diff --git a/roles/traggo/README.md b/roles/traggo/README.md index 690a526..648933f 100644 --- a/roles/traggo/README.md +++ b/roles/traggo/README.md @@ -27,8 +27,8 @@ The docker image version to be used in stack creation. subdomain_alias: time ``` -If the deployed container should be served over a uri that is not the stack name. -By default, it will be set to `time.yourdomain.com` - +If the deployed container should be served over a uri that is not the stack name. +By default, it will be set to `time.yourdomain.com` - if this option is not set it will be served on `traggo.yourdomain.com` instead. ``` @@ -37,5 +37,5 @@ traggo_password: mytraggopassword ``` Set the default username and password combination on first container start. -If loading from an existing volume this does nothing, otherwise it sets the +If loading from an existing volume this does nothing, otherwise it sets the first user so you can instantly log in. diff --git a/roles/traggo/defaults/main.yml b/roles/traggo/defaults/main.yml index 63cfd5a..60b5b75 100644 --- a/roles/traggo/defaults/main.yml +++ b/roles/traggo/defaults/main.yml @@ -1,4 +1,5 @@ --- + traggo_version: latest traggo_upstream_file_dir: "{{ docker_stack_files_dir }}/{{ stack_name }}" diff --git a/roles/traggo/handlers/main.yml b/roles/traggo/handlers/main.yml index 6d7dc27..e072105 100644 --- a/roles/traggo/handlers/main.yml +++ b/roles/traggo/handlers/main.yml @@ -3,15 +3,15 @@ ansible.builtin.file: path: "{{ traggo_upstream_file_dir }}" state: directory - mode: "0755" - become: true + mode: '0755' + become: yes listen: "update traggo upstream" - name: Update upstream template ansible.builtin.template: src: upstream.json.j2 dest: "{{ traggo_upstream_file_dir }}/upstream.json" - become: true + become: yes listen: "update traggo upstream" # figure out if upstream id exists @@ -22,7 +22,7 @@ curl localhost:2019/id/{{ stack_name }}_upstream/ changed_when: False register: result - become: true + become: yes listen: "update traggo upstream" # upstream already exists, patch it @@ -31,7 +31,7 @@ container: "{{ caddy_container_id }}" command: > curl -X DELETE localhost:2019/id/{{ stack_name }}_upstream/ - become: true + become: yes when: (result.stdout | from_json)['error'] is not defined listen: "update traggo upstream" @@ -40,13 +40,14 @@ community.docker.docker_container_exec: container: "{{ caddy_container_id }}" command: > - curl -X POST -H "Content-Type: application/json" -d @{{ traggo_upstream_file_dir }}/upstream.json localhost:2019/config/apps/http/servers/{{ (traggo_use_https == True) | ternary(caddy_https_server_name, caddy_http_server_name) }}/routes/0/ - become: true + curl -X POST -H "Content-Type: application/json" -d @{{ traggo_upstream_file_dir }}/upstream.json localhost:2019/config/apps/http/servers/{{ (traggo_use_https == True) | ternary(caddy_https_server_name, caddy_http_server_name) }}/routes/0/ + become: yes listen: "update traggo upstream" - name: Ensure upstream directory is gone again ansible.builtin.file: path: "{{ traggo_upstream_file_dir }}" state: absent - become: true + become: yes listen: "update traggo upstream" + diff --git a/roles/traggo/meta/main.yml b/roles/traggo/meta/main.yml index 3e401d5..ddd5c7f 100644 --- a/roles/traggo/meta/main.yml +++ b/roles/traggo/meta/main.yml @@ -1,11 +1,14 @@ --- + galaxy_info: author: Marty Oehme description: Installs traggo as a docker stack service license: GPL-3.0-only - min_ansible_version: "2.9" + min_ansible_version: 2.9 galaxy_tags: [] + dependencies: + - docker - docker-swarm - - caddy_id + - caddy diff --git a/roles/traggo/tasks/main.yml b/roles/traggo/tasks/main.yml index d20a428..331e851 100644 --- a/roles/traggo/tasks/main.yml +++ b/roles/traggo/tasks/main.yml @@ -7,7 +7,7 @@ curl localhost:2019/id/{{ stack_name }}_upstream/ register: result changed_when: (result.stdout | from_json) != (lookup('template', 'upstream.json.j2') | from_yaml) - become: true + become: yes notify: "update traggo upstream" - name: Deploy traggo to swarm @@ -17,7 +17,8 @@ prune: yes compose: - "{{ stack_compose }}" - become: true + become: yes tags: - docker-swarm notify: "update traggo upstream" + diff --git a/roles/traggo/vars/main.yml b/roles/traggo/vars/main.yml index 4fd87bb..114f166 100644 --- a/roles/traggo/vars/main.yml +++ b/roles/traggo/vars/main.yml @@ -1,4 +1,5 @@ --- + stack_name: traggo stack_image: "traggo/server" diff --git a/roles/wallabag/README.md b/roles/wallabag/README.md index a2f7cc4..44eefa9 100644 --- a/roles/wallabag/README.md +++ b/roles/wallabag/README.md @@ -6,7 +6,7 @@ Contains only a single deployed image and a couple of simple variables to set. ## Variables ``` -wallabag_upstream_file_dir: "{{ docker_stack_files_dir }}/{{ stack_name }}" +wallabag_upstream_file_dir: "{{ docker_stack_files_dir }}/{{ stack.name }}" ``` The on-target directory where the proxy configuration file should be stashed. @@ -39,3 +39,4 @@ stack_image: "wallabag/wallabag" ``` The docker hub image to be use in provisioning. + diff --git a/roles/wallabag/defaults/main.yml b/roles/wallabag/defaults/main.yml index 55de954..a1b8116 100644 --- a/roles/wallabag/defaults/main.yml +++ b/roles/wallabag/defaults/main.yml @@ -1,11 +1,10 @@ --- + wallabag_version: latest -wallabag_upstream_file_dir: "{{ docker_stack_files_dir }}/{{ stack_name }}" +wallabag_upstream_file_dir: "{{ docker_stack_files_dir }}/{{ stack.name }}" wallabag_use_https: true -wallabag_autoupdate: true - # the subdomain link wallabag will be reachable under subdomain_alias: read diff --git a/roles/wallabag/handlers/main.yml b/roles/wallabag/handlers/main.yml index e693838..8695ea0 100644 --- a/roles/wallabag/handlers/main.yml +++ b/roles/wallabag/handlers/main.yml @@ -3,50 +3,51 @@ ansible.builtin.file: path: "{{ wallabag_upstream_file_dir }}" state: directory - mode: "0755" - become: true + mode: '0755' + become: yes listen: "update wallabag upstream" - name: Update upstream template ansible.builtin.template: src: upstream.json.j2 dest: "{{ wallabag_upstream_file_dir }}/upstream.json" - become: true + become: yes listen: "update wallabag upstream" # figure out if upstream id exists -- name: check {{ stack_name }} upstream +- name: check {{ stack.name }} upstream community.docker.docker_container_exec: container: "{{ caddy_container_id }}" command: > - curl localhost:2019/id/{{ stack_name }}_upstream/ + curl localhost:2019/id/{{ stack.name }}_upstream/ changed_when: False register: result - become: true + become: yes listen: "update wallabag upstream" # upstream already exists, patch it -- name: remove old {{ stack_name }} upstream +- name: remove old {{ stack.name }} upstream community.docker.docker_container_exec: container: "{{ caddy_container_id }}" command: > - curl -X DELETE localhost:2019/id/{{ stack_name }}_upstream/ - become: true + curl -X DELETE localhost:2019/id/{{ stack.name }}_upstream/ + become: yes when: (result.stdout | from_json)['error'] is not defined listen: "update wallabag upstream" # upstream has to be created -- name: add {{ stack_name }} upstream +- name: add {{ stack.name }} upstream community.docker.docker_container_exec: container: "{{ caddy_container_id }}" command: > - curl -X POST -H "Content-Type: application/json" -d @{{ wallabag_upstream_file_dir }}/upstream.json localhost:2019/config/apps/http/servers/{{ (wallabag_use_https == True) | ternary(caddy_https_server_name, caddy_http_server_name) }}/routes/0/ - become: true + curl -X POST -H "Content-Type: application/json" -d @{{ wallabag_upstream_file_dir }}/upstream.json localhost:2019/config/apps/http/servers/{{ (wallabag_use_https == True) | ternary(caddy_https_server_name, caddy_http_server_name) }}/routes/0/ + become: yes listen: "update wallabag upstream" - name: Ensure upstream directory is gone again ansible.builtin.file: path: "{{ wallabag_upstream_file_dir }}" state: absent - become: true + become: yes listen: "update wallabag upstream" + diff --git a/roles/wallabag/meta/main.yml b/roles/wallabag/meta/main.yml index d731e1a..ed54c0d 100644 --- a/roles/wallabag/meta/main.yml +++ b/roles/wallabag/meta/main.yml @@ -1,11 +1,14 @@ --- + galaxy_info: author: Marty Oehme description: Installs wallabag as a docker stack service license: GPL-3.0-only - min_ansible_version: "2.9" + min_ansible_version: 2.9 galaxy_tags: [] + dependencies: + - docker - docker-swarm - - caddy_id + - caddy diff --git a/roles/wallabag/tasks/main.yml b/roles/wallabag/tasks/main.yml index 34e5859..f62d8b7 100644 --- a/roles/wallabag/tasks/main.yml +++ b/roles/wallabag/tasks/main.yml @@ -4,20 +4,22 @@ community.docker.docker_container_exec: container: "{{ caddy_container_id }}" command: > - curl localhost:2019/id/{{ stack_name }}_upstream/ + curl localhost:2019/id/{{ stack.name }}_upstream/ register: result changed_when: (result.stdout | from_json) != (lookup('template', 'upstream.json.j2') | from_yaml) - become: true + become: yes notify: "update wallabag upstream" - name: Deploy wallabag to swarm community.general.docker_stack: - name: "{{ stack_name }}" + name: "{{ stack.name }}" state: present prune: yes compose: - - "{{ stack_compose }}" - become: true + - "{{ stack.compose }}" + when: stack is defined + become: yes tags: - docker-swarm notify: "update wallabag upstream" + diff --git a/roles/wallabag/templates/docker-stack.yml.j2 b/roles/wallabag/templates/docker-stack.yml.j2 index 387f3d7..ee7e307 100644 --- a/roles/wallabag/templates/docker-stack.yml.j2 +++ b/roles/wallabag/templates/docker-stack.yml.j2 @@ -15,16 +15,11 @@ services: - data:/var/www/wallabag/data environment: - SYMFONY__ENV__FOSUSER_REGISTRATION=false -{% if server_domain is not undefined and not none %} +{% if server_domain is not undefined and not none %} - "SYMFONY__ENV__DOMAIN_NAME={{ (wallabag_use_https == True) | ternary('https', 'http') }}://{{ (subdomain_alias is not undefined and not none) | ternary(subdomain_alias, stack_name) }}.{{server_domain}}" {% else %} - SYMFONY__ENV__DOMAIN_NAME={{ (wallabag_use_https == True) | ternary('https', 'http') }}://localhost {% endif %} -{% if wallabag_autoupdate is defined and wallabag_autoupdate %} - deploy: - labels: - - shepherd.autoupdate=true -{% endif %} redis: image: redis:alpine diff --git a/roles/wallabag/templates/upstream.json.j2 b/roles/wallabag/templates/upstream.json.j2 index a20061f..6db9d1a 100644 --- a/roles/wallabag/templates/upstream.json.j2 +++ b/roles/wallabag/templates/upstream.json.j2 @@ -1,5 +1,5 @@ { - "@id": "{{ stack_name }}_upstream", + "@id": "{{ stack.name }}_upstream", {% if server_domain is not undefined and not none %} "match": [ { @@ -7,7 +7,7 @@ {% if subdomain_alias is not undefined and not none %} "{{ subdomain_alias }}.{{ server_domain }}" {% else %} - "{{ stack_name }}.{{ server_domain }}" + "{{ stack.name }}.{{ server_domain }}" {% endif %} ] } @@ -19,7 +19,7 @@ {% if subdomain_alias is not undefined and not none %} "/{{ subdomain_alias }}*" {% else %} - "/{{ stack_name }}*" + "/{{ stack.name }}*" {% endif %} ] } @@ -30,7 +30,7 @@ "handler": "reverse_proxy", "upstreams": [ { - "dial": "{{ stack_name }}_app:80" + "dial": "{{ stack.name }}_app:80" } ] } diff --git a/roles/wallabag/vars/main.yml b/roles/wallabag/vars/main.yml index c89919f..7e281a4 100644 --- a/roles/wallabag/vars/main.yml +++ b/roles/wallabag/vars/main.yml @@ -1,6 +1,7 @@ --- -stack_name: wallabag + +stack: + name: wallabag + compose: "{{ lookup('template', 'docker-stack.yml.j2') | from_yaml }}" stack_image: "wallabag/wallabag" - -stack_compose: "{{ lookup('template', 'docker-stack.yml.j2') | from_yaml }}" diff --git a/roles/whoami/defaults/main.yml b/roles/whoami/defaults/main.yml index 9b749d6..f14f6aa 100644 --- a/roles/whoami/defaults/main.yml +++ b/roles/whoami/defaults/main.yml @@ -1,4 +1,5 @@ --- + whoami_version: latest whoami_upstream_file_dir: "{{ docker_stack_files_dir }}/{{ stack.name }}" diff --git a/roles/whoami/handlers/main.yml b/roles/whoami/handlers/main.yml index 8ef4221..a1196bc 100644 --- a/roles/whoami/handlers/main.yml +++ b/roles/whoami/handlers/main.yml @@ -3,15 +3,15 @@ ansible.builtin.file: path: "{{ whoami_upstream_file_dir }}" state: directory - mode: "0755" - become: true + mode: '0755' + become: yes listen: "update whoami upstream" - name: update whoami upstream template ansible.builtin.template: src: upstream.json.j2 dest: "{{ whoami_upstream_file_dir }}/upstream.json" - become: true + become: yes listen: "update whoami upstream" # figure out if upstream id exists @@ -22,7 +22,7 @@ curl localhost:2019/id/{{ stack.name }}_upstream/ changed_when: False register: result - become: true + become: yes listen: "update whoami upstream" # upstream already exists, patch it @@ -31,7 +31,7 @@ container: "{{ caddy_container_id }}" command: > curl -X DELETE localhost:2019/id/{{ stack.name }}_upstream/ - become: true + become: yes when: (result.stdout | from_json)['error'] is not defined listen: "update whoami upstream" @@ -40,13 +40,14 @@ community.docker.docker_container_exec: container: "{{ caddy_container_id }}" command: > - curl -X POST -H "Content-Type: application/json" -d @{{ whoami_upstream_file_dir }}/upstream.json localhost:2019/config/apps/http/servers/{{ (whoami_use_https == True) | ternary(caddy_https_server_name, caddy_http_server_name) }}/routes/0/ - become: true + curl -X POST -H "Content-Type: application/json" -d @{{ whoami_upstream_file_dir }}/upstream.json localhost:2019/config/apps/http/servers/{{ (whoami_use_https == True) | ternary(caddy_https_server_name, caddy_http_server_name) }}/routes/0/ + become: yes listen: "update whoami upstream" - name: Ensure upstream directory is gone again ansible.builtin.file: path: "{{ whoami_upstream_file_dir }}" state: absent - become: true + become: yes listen: "update whoami upstream" + diff --git a/roles/whoami/meta/main.yml b/roles/whoami/meta/main.yml index 196d3e4..bf789d1 100644 --- a/roles/whoami/meta/main.yml +++ b/roles/whoami/meta/main.yml @@ -1,4 +1,6 @@ --- + dependencies: + - docker - docker-swarm - - caddy_id + - caddy diff --git a/roles/whoami/tasks/main.yml b/roles/whoami/tasks/main.yml index 8723a63..f423e8e 100644 --- a/roles/whoami/tasks/main.yml +++ b/roles/whoami/tasks/main.yml @@ -7,7 +7,7 @@ curl localhost:2019/id/{{ stack.name }}_upstream/ register: result changed_when: (result.stdout | from_json) != (lookup('template', 'upstream.json.j2') | from_yaml) - become: true + become: yes notify: "update whoami upstream" - name: Deploy whoami to swarm @@ -18,7 +18,8 @@ compose: - "{{ stack.compose }}" when: stack is defined - become: true + become: yes tags: - docker-swarm notify: "update whoami upstream" + diff --git a/roles/whoami/vars/main.yml b/roles/whoami/vars/main.yml index 89447b2..80be9fa 100644 --- a/roles/whoami/vars/main.yml +++ b/roles/whoami/vars/main.yml @@ -1,4 +1,6 @@ --- + + stack: name: whoami compose: "{{ lookup('template', 'docker-stack.yml.j2') | from_yaml }}" diff --git a/site.yml b/site.yml index b2dbba2..5ed33b7 100644 --- a/site.yml +++ b/site.yml @@ -1,106 +1,80 @@ --- + - hosts: all tasks: - name: Make sure system is fully upgraded - import_role: + import_role: role: system-upgrade tags: system-upgrade - name: Make sure docker is installed - import_role: + import_role: role: docker tags: docker - name: Make sure docker-swarm is set up - import_role: + import_role: role: docker-swarm tags: docker-swarm - hosts: docker_swarm_manager_node tasks: - - name: Remove stacks without roles - import_role: - role: docker-clean - tags: - - docker-clean - - name: Install caddy reverse proxy - import_role: + import_role: role: caddy - tags: - - caddy + tags: caddy - - name: Install restic backup management - import_role: - role: restic - tags: - - restic - - - name: Grab caddy container id for all following services - import_role: - role: caddy_id - tags: - - caddy_id - - always + - name: Install whoami + import_role: + role: whoami + tags: whoami - name: Install wallabag - import_role: + import_role: role: wallabag tags: wallabag - name: Install miniflux - import_role: + import_role: role: miniflux tags: miniflux - name: Install searx - import_role: + import_role: role: searx tags: searx - name: Install traggo - import_role: + import_role: role: traggo - tags: - - traggo - - never + tags: traggo - name: Install monica - import_role: + import_role: role: monica - tags: - - monica - - never + tags: monica - name: Install nextcloud - import_role: + import_role: role: nextcloud tags: nextcloud - - name: Install linkding - import_role: - role: linkding - tags: linkding + - name: Install shaarli + import_role: + role: shaarli + tags: shaarli - name: Install landingpage - import_role: + import_role: role: landingpage tags: landingpage - - name: Install forgejo - import_role: - role: forgejo - tags: forgejo + - name: Install my personal blog + import_role: + role: blog + tags: blog - - name: Install ntfy - import_role: - role: ntfy - tags: - - ntfy - - never - - - name: Install shepherd - import_role: - role: shepherd - tags: - - shepherd + - name: Install gitea + import_role: + role: gitea + tags: gitea