Compare commits

..

No commits in common. "main" and "3a5b5680cfa0467c61ec01d440cf26c935da367d" have entirely different histories.

141 changed files with 815 additions and 1913 deletions

1
.gitignore vendored
View file

@ -60,4 +60,3 @@ tags
# End of https://www.toptal.com/developers/gitignore/api/vim,linux,vagrant,ansible
development.yml
single-test.yml

View file

@ -12,7 +12,7 @@ vagrant plugin install vagrant-hosts vagrant-hostsupdater
```
Additionally, since the test setup mirrors the production setup in that it makes use of subdomains for the individual hosted applications,
the server needs to be reachable under a domain name,
the server needs to be reachable under a domain name,
not just an IP address.
For now this is most simply accomplished through editing the hosts file, e.g.:
@ -23,20 +23,21 @@ For now this is most simply accomplished through editing the hosts file, e.g.:
```
This will allow you to reach the main domain under `http(s)://ansible.test` and sets up two subdomains that can be reached.
Be aware that the hosts file does not support subdomain wildcards.
You will have to specify each hostname individually or use a tool such as `dnsmasq`.
Be aware that the hosts file does not support subdomain wildcards.
You will have to specify each hostname individually or use a tool such as `dnsmasq`.
Read more [here](https://serverfault.com/questions/118378/in-my-etc-hosts-file-on-linux-osx-how-do-i-do-a-wildcard-subdomain).
Then you are ready to run the complete infrastructure setup locally,
Then you are ready to run the complete infrastructure setup locally,
simply by executing `ansible-playbook site.yml`.
You can of course pick and choose what should be executed with host limits, tags, group variables, and so on,
but this should provide an easy way to see if a) the playbook is working as intended and b) what it does is useful.
## Deployment
Most variables to be changed should be set either through `group_variables` or `host_variables`.
For my deployment I have a `production` group under `group_variables` which houses both a `vars.yml` containing basic variables
(like `server_domain`, `caddy_email`, etc.)
(like `server_domain`, `caddy_email`, etc.)
and a `vault.yml` which houses everything that should ideally not be lying around in plain-text
(individual container and database passwords for the various roles etc).

View file

@ -1,21 +1,21 @@
---
docker_swarm_advertise_addr: eth1
caddy_use_debug: yes
caddy_tls_use_staging: yes
blog_use_https: no
caddy_use_https: no
forgejo_use_https: no
blog_use_https: no
caddy_use_https: no
gitea_use_https: no
landingpage_use_https: no
miniflux_use_https: no
monica_use_https: no
nextcloud_use_https: no
ntfy_use_https: no
searx_use_https: no
shaarli_use_https: no
traggo_use_https: no
wallabag_use_https: no
whoami_use_https: no
miniflux_use_https: no
monica_use_https: no
nextcloud_use_https: no
searx_use_https: no
shaarli_use_https: no
traggo_use_https: no
wallabag_use_https: no
whoami_use_https: no
server_domain: ansible.test

8
inv-prod.yml Normal file
View file

@ -0,0 +1,8 @@
prod:
hosts:
ssdnodes:
docker_swarm_manager_node:
hosts:
ssdnodes:

37
roles/blog/README.md Normal file
View file

@ -0,0 +1,37 @@
# landingpage
The public face of my server.
Not much to see here honestly,
just a few simple lines of html explaining what this server is about and how to contact me.
I don't see anybody else benefiting massively from this role but me,
but if you want the same web presence go for it I suppose 😉
## Defaults
```
landingpage_upstream_file_dir: "{{ docker_stack_files_dir }}/{{ stack_name }}"
```
The on-target directory where the proxy configuration file should be stashed.
```
landingpage_use_https: true
```
Whether the service should be reachable through http (port 80) or through https (port 443) and provision an https certificate. Usually you will want this to stay `true`.
```
landingpage_version: latest
```
The docker image version to be used in stack creation.
```
subdomain_alias: www
```
If the deployed container should be served over a uri that is not the stack name.
By default, it will be set to `www.yourdomain.com` -
if this option is not set it will be served on `landingpage.yourdomain.com` instead.

View file

@ -0,0 +1,11 @@
---
# never got around to removing the master tag from the images
blog_version: master
blog_upstream_file_dir: "{{ docker_stack_files_dir }}/{{ stack_name }}"
blog_use_https: true
# the subdomain link blog will be reachable under
# subdomain_alias: blog

View file

@ -1,18 +1,18 @@
## Register reverse proxy
- name: Ensure upstream directory exists
ansible.builtin.file:
path: "{{ linkding_upstream_file_dir }}"
path: "{{ blog_upstream_file_dir }}"
state: directory
mode: "0755"
become: true
listen: "update linkding upstream"
mode: '0755'
become: yes
listen: "update blog upstream"
- name: Update upstream template
ansible.builtin.template:
src: upstream.json.j2
dest: "{{ linkding_upstream_file_dir }}/upstream.json"
become: true
listen: "update linkding upstream"
dest: "{{ blog_upstream_file_dir }}/upstream.json"
become: yes
listen: "update blog upstream"
# figure out if upstream id exists
- name: check {{ stack_name }} upstream
@ -22,8 +22,8 @@
curl localhost:2019/id/{{ stack_name }}_upstream/
changed_when: False
register: result
become: true
listen: "update linkding upstream"
become: yes
listen: "update blog upstream"
# upstream already exists, patch it
- name: remove old {{ stack_name }} upstream
@ -31,22 +31,23 @@
container: "{{ caddy_container_id }}"
command: >
curl -X DELETE localhost:2019/id/{{ stack_name }}_upstream/
become: true
become: yes
when: (result.stdout | from_json)['error'] is not defined
listen: "update linkding upstream"
listen: "update blog upstream"
# upstream has to be created
- name: add {{ stack_name }} upstream
community.docker.docker_container_exec:
container: "{{ caddy_container_id }}"
command: >
curl -X POST -H "Content-Type: application/json" -d @{{ linkding_upstream_file_dir }}/upstream.json localhost:2019/config/apps/http/servers/{{ (linkding_use_https == True) | ternary(caddy_https_server_name, caddy_http_server_name) }}/routes/0/
become: true
listen: "update linkding upstream"
curl -X POST -H "Content-Type: application/json" -d @{{ blog_upstream_file_dir }}/upstream.json localhost:2019/config/apps/http/servers/{{ (blog_use_https == True) | ternary(caddy_https_server_name, caddy_http_server_name) }}/routes/0/
become: yes
listen: "update blog upstream"
- name: Ensure upstream directory is gone again
ansible.builtin.file:
path: "{{ linkding_upstream_file_dir }}"
path: "{{ blog_upstream_file_dir }}"
state: absent
become: true
listen: "update linkding upstream"
become: yes
listen: "update blog upstream"

14
roles/blog/meta/main.yml Normal file
View file

@ -0,0 +1,14 @@
---
galaxy_info:
author: Marty Oehme
description: Installs my personal public facing landing page as a docker stack service
license: GPL-3.0-only
min_ansible_version: 2.9
galaxy_tags: []
dependencies:
- docker
- docker-swarm
- caddy

View file

@ -1,5 +1,5 @@
---
## install linkding container
## install blog container
- name: Check upstream status
community.docker.docker_container_exec:
container: "{{ caddy_container_id }}"
@ -7,17 +7,18 @@
curl localhost:2019/id/{{ stack_name }}_upstream/
register: result
changed_when: (result.stdout | from_json) != (lookup('template', 'upstream.json.j2') | from_yaml)
become: true
notify: "update linkding upstream"
become: yes
notify: "update blog upstream"
- name: Deploy linkding to swarm
- name: Deploy blog to swarm
community.general.docker_stack:
name: "{{ stack_name }}"
state: present
prune: yes
compose:
- "{{ stack_compose }}"
become: true
become: yes
tags:
- docker-swarm
notify: "update linkding upstream"
notify: "update blog upstream"

View file

@ -0,0 +1,20 @@
version: '3.4'
services:
app:
image: "{{ stack_image }}:{{ blog_version }}"
healthcheck:
test: ["CMD", "wget", "--spider", "-q", "localhost"]
interval: 1m
timeout: 10s
retries: 3
start_period: 1m
entrypoint: sh -c "/docker-entrypoint.sh nginx -g 'daemon off;'"
networks:
- "{{ docker_swarm_public_network_name }}"
networks:
"{{ docker_swarm_public_network_name }}":
external: true

View file

@ -9,6 +9,8 @@
{% else %}
"{{ stack_name }}.{{ server_domain }}"
{% endif %}
,
"{{ server_domain }}"
]
}
],

View file

@ -1,6 +1,7 @@
---
stack_name: ntfy
stack_image: "binwiederhier/ntfy"
stack_name: blog
stack_image: "registry.gitlab.com/cloud-serve/blog"
stack_compose: "{{ lookup('template', 'docker-stack.yml.j2') | from_yaml }}"

View file

@ -1,7 +1,7 @@
# Caddy
# Caddy
Caddy is the reverse proxy for all other services running on the infrastructure.
It was chosen for its relative ease of use,
It was chosen for its relative ease of use,
interactible API and https-by-default setup.
## Variables
@ -48,27 +48,28 @@ caddy_version: alpine
Sets the docker image version to be used.
## Internal variables
```yaml
caddy_stack:
name: caddy
compose: "{{ lookup('template', 'docker-stack.yml.j2') | from_yaml }}"
name: caddy
compose: "{{ lookup('template', 'docker-stack.yml.j2') | from_yaml }}"
```
Defines the actual docker stack which will later run on the target.
The name can be changed and will be used as a proxy target (`caddy.mydomain.com` or `192.168.1.1/caddy`) ---
Defines the actual docker stack which will later run on the target.
The name can be changed and will be used as a proxy target (`caddy.mydomain.com` or `192.168.1.1/caddy`) ---
though to be clear there is no intention currently to expose the caddy to the web at the moment.\
The compose option defines which template to use for the `docker-stack.yml` file. You can either change options for the stack in the template file,
The compose option defines which template to use for the `docker-stack.yml` file. You can either change options for the stack in the template file,
or directly here like the following:
```yaml
compose:
- "{{ lookup('template', 'docker-stack.yml.j2') | from_yaml }}"
- version: "3"
services:
another-container:
image: nginx:latest
compose:
- "{{ lookup('template', 'docker-stack.yml.j2') | from_yaml }}"
- version: '3'
services:
another-container:
image: nginx:latest
# ...
```

View file

@ -1,5 +1,6 @@
---
caddy_version: 2.8.4-alpine # tag exact version to avoid suprising container renewals
caddy_version: alpine
caddy_caddyfile_dir: "{{ docker_stack_files_dir }}/caddy"
caddy_use_debug: no
@ -8,4 +9,3 @@ caddy_use_https: yes
caddy_tls_use_staging: no
# caddy_email: your@email.here
# caddy_zerossl_api_key: your-zerossl-key-here-its-free

View file

@ -1,3 +1,5 @@
---
dependencies:
- docker
- docker-swarm

View file

@ -5,9 +5,9 @@
ansible.builtin.file:
path: "{{ caddy_caddyfile_dir }}"
state: directory
mode: "0755"
mode: '0755'
become: true
tags:
tags:
- fs
- name: Ensure Caddyfile exists
@ -27,9 +27,47 @@
compose:
- "{{ caddy_stack.compose }}"
when: caddy_stack is defined
become: true
become: yes
tags:
- docker-swarm
- name: Get caddy container info
ansible.builtin.command:
cmd: docker ps -q -f name={{ caddy_stack.name }}
become: yes
# bringing up the container takes some time, we have to wait
until: caddy_container_info['rc'] == 0 and caddy_container_info['stdout'] | length >= 1
retries: 5
delay: 10
changed_when: False
register: caddy_container_info
- name: Register caddy container id
ansible.builtin.set_fact: caddy_container_id={{ caddy_container_info['stdout'] }}
notify:
- debug caddy container
# FIXME this should be taken care of in Dockerfile not here
- name: Ensure caddy curl available
community.docker.docker_container_exec:
container: "{{ caddy_container_id }}"
command: >
apk add curl
become: yes
register: result
changed_when: "'Installing' in result.stdout"
- name: Ensure caddy api is responsive
community.docker.docker_container_exec:
container: "{{ caddy_container_id }}"
command: >
curl localhost:2019/config/
become: yes
until: result.rc == 0
when: caddy_use_api == True
changed_when: False
register: result
# TODO FIXME UP
# - name: Allow access to services
# firewalld:

View file

@ -51,19 +51,17 @@
{% if caddy_tls_use_staging is sameas true %}
"ca": "https://acme-staging-v02.api.letsencrypt.org/directory",
{% endif %}
{%- if caddy_email is not undefined and not none %}
{%- if caddy_email is not undefined and not none %}
"email": "{{ caddy_email }}",
{% endif %}
"module": "acme"
{%- if caddy_zerossl_api_key is not undefined and not none %}
},
{
"api_key": "{{ caddy_zerossl_api_key }}",
{%- if caddy_email is not undefined and not none %}
"email": "{{ caddy_email }}",
{% endif %}
"module": "zerossl"
}
{% else %}
}
{% endif %}
]
}
]

View file

@ -5,7 +5,7 @@ services:
image: caddy:{{ caddy_version }}
command: caddy run --config /etc/caddy/config.json
healthcheck:
test: ["CMD", "wget", "--quiet", "--spider", "--tries=1", "http://127.0.0.1:2019/metrics"]
test: ["CMD", "wget", "--quiet", "--spider", "--tries=1", "http://localhost:2019/metrics"]
interval: 1m
timeout: 10s
retries: 3

View file

@ -1,4 +1,5 @@
---
caddy_stack:
name: caddy
compose: "{{ lookup('template', 'docker-stack.yml.j2') | from_yaml }}"

View file

@ -1,83 +0,0 @@
# Caddy
Caddy is the reverse proxy for all other services running on the infrastructure.
It was chosen for its relative ease of use,
interactible API and https-by-default setup.
## Variables
```
caddy_caddyfile_dir: "{{ docker_stack_files_dir }}/caddy"
```
Sets up the on-target directory where important caddy files should be stored.
```
caddy_email: <your@email.here>
```
Which e-mail should be used to provision https certificates with. I believe theoretically caddy will work and provision you with certificates even without providing an e-mail, but I would strongly urge providing one.
```
caddy_tls_use_staging: no
```
If turned on will use the staging servers of the acme certificate service, which is useful for testing and playing around with https (due to higher API limits and less severe restrictions).
```
caddy_use_api: yes
```
If turned off, will turn off the admin api for caddy. Should only be used if no other services are intended to be provisioned on the target, since most other service stacks rely on the API to set up their proxy targets.
```
caddy_use_debug: no
```
If true, will turn on caddy's debug logging.
```
caddy_use_https: yes
```
If turned off will turn of all auto-provisioning of https certificates by caddy.
```
caddy_version: alpine
```
Sets the docker image version to be used.
## Internal variables
```yaml
caddy_stack:
name: caddy
compose: "{{ lookup('template', 'docker-stack.yml.j2') | from_yaml }}"
```
Defines the actual docker stack which will later run on the target.
The name can be changed and will be used as a proxy target (`caddy.mydomain.com` or `192.168.1.1/caddy`) ---
though to be clear there is no intention currently to expose the caddy to the web at the moment.\
The compose option defines which template to use for the `docker-stack.yml` file. You can either change options for the stack in the template file,
or directly here like the following:
```yaml
compose:
- "{{ lookup('template', 'docker-stack.yml.j2') | from_yaml }}"
- version: "3"
services:
another-container:
image: nginx:latest
# ...
```
```yaml
caddy_http_server_name: http
```
```yaml
caddy_https_server_name: https
```
The internal representation of the http and https servers respectively.

View file

@ -1,3 +0,0 @@
---
dependencies:
- docker-swarm

View file

@ -1,39 +0,0 @@
---
# get the caddy container id for all other containers
- name: Get caddy container info
ansible.builtin.command:
cmd: docker ps -q -f name={{ caddy_stack.name }}
become: true
# bringing up the container takes some time, we have to wait
until: caddy_container_info['rc'] | default('') == 0 and caddy_container_info['stdout'] | length >= 1
retries: 5
delay: 10
changed_when: False
register: caddy_container_info
- name: Register caddy container id
ansible.builtin.set_fact: caddy_container_id={{ caddy_container_info['stdout'] }}
notify:
- debug caddy container
# FIXME this should be taken care of in Dockerfile not here
- name: Ensure caddy curl available
community.docker.docker_container_exec:
container: "{{ caddy_container_id }}"
command: >
apk add curl
become: true
register: result
changed_when: "'Installing' in result.stdout"
- name: Ensure caddy api is responsive
community.docker.docker_container_exec:
container: "{{ caddy_container_id }}"
command: >
curl localhost:2019/config/
become: true
until: result.rc | default('') == 0
when: caddy_use_api == True
changed_when: False
register: result

View file

@ -1,72 +0,0 @@
{
{% if caddy_use_api is sameas false %}
"admin": {
"disabled": true
},
{% endif %}
{% if caddy_use_debug is sameas true %}
"logging": {
"logs": {
"default": {
"level": "DEBUG"
}
}
},
{% endif %}
"apps": {
"http": {
"servers": {
"{{ caddy_http_server_name }}": {
"listen": [
":80"
],
"routes": []
{% if caddy_use_https is sameas false %},
"automatic_https": {
"disable": true
}
{% endif %}
},
"{{ caddy_https_server_name }}": {
"listen": [
":443"
],
"routes": []
{% if caddy_use_https is sameas false %},
"automatic_https": {
"disable": true
}
{% endif %}
}
}
}
{% if caddy_use_https is sameas true %},
"tls": {
"automation": {
"policies": [
{
"subjects": [],
"issuers": [
{
{% if caddy_tls_use_staging is sameas true %}
"ca": "https://acme-staging-v02.api.letsencrypt.org/directory",
{% endif %}
{%- if caddy_email is not undefined and not none %}
"email": "{{ caddy_email }}",
{% endif %}
"module": "acme"
},
{
{%- if caddy_email is not undefined and not none %}
"email": "{{ caddy_email }}",
{% endif %}
"module": "zerossl"
}
]
}
]
}
}
{% endif %}
}
}

View file

@ -1,30 +0,0 @@
version: "3.7"
services:
app:
image: caddy:{{ caddy_version }}
command: caddy run --config /etc/caddy/config.json
healthcheck:
test: ["CMD", "wget", "--quiet", "--spider", "--tries=1", "http://127.0.0.1:2019/metrics"]
interval: 1m
timeout: 10s
retries: 3
start_period: 1m
ports:
- "80:80"
- "443:443"
volumes:
- "{{ caddy_caddyfile_dir }}:/etc/caddy"
- "{{ docker_stack_files_dir }}:/stacks:ro"
- data:/data
- config:/config
networks:
- "{{ docker_swarm_public_network_name }}"
volumes:
data:
config:
networks:
"{{ docker_swarm_public_network_name }}":
external: true

View file

@ -1,5 +0,0 @@
---
caddy_stack:
name: caddy
caddy_use_api: yes # if no turns off api interface; it is *required* for other swarm roles to be routed

View file

@ -1,5 +0,0 @@
# diun
Monitor the deployed swarm containers for updates.
Will notify you when it found any update for any container.
Can (currently) notify you either through mail or on matrix.

View file

@ -1,26 +0,0 @@
---
diun_version: 4
diun_upstream_file_dir: "{{ docker_stack_files_dir }}/{{ stack_name }}"
diun_use_https: true
# the subdomain link diun will be reachable under
subdomain_alias: diun
diun_tz: Europe/Berlin
diun_log_level: info
diun_watch_swarm_by_default: true
diun_notif_mail_host: localhost
diun_notif_mail_port: 25
# diun_notif_mail_username: required for mail
# diun_notif_mail_password: required for mail
# diun_notif_mail_from: required for mail
# diun_notif_mail_to: required for mail
diun_notif_matrix_url: "https://matrix.org"
#diun_notif_matrix_user: required for matrix
#diun_notif_matrix_password: required for matrix
#diun_notif_matrix_roomid: required for matrix

View file

@ -1,10 +0,0 @@
---
galaxy_info:
author: Marty Oehme
description: Notify on any docker swarm container updates
license: GPL-3.0-only
min_ansible_version: "2.9"
galaxy_tags: []
dependencies:
- docker-swarm

View file

@ -1,12 +0,0 @@
---
## install diun container
- name: Deploy diun to swarm
community.general.docker_stack:
name: "{{ stack_name }}"
state: present
prune: yes
compose:
- "{{ stack_compose }}"
become: true
tags:
- docker-swarm

View file

@ -1,51 +0,0 @@
version: '3.4'
services:
app:
image: crazymax/diun:latest
# healthcheck:
# test: ["CMD", "wget", "--spider", "-q", "127.0.0.1"]
# interval: 1m
# timeout: 10s
# retries: 3
# start_period: 1m
command: serve
volumes:
- "data:/data"
- "/var/run/docker.sock:/var/run/docker.sock"
environment:
- "TZ={{ diun_tz }}"
- "LOG_LEVEL={{ diun_log_level }}"
- "LOG_JSON=false"
- "DIUN_WATCH_WORKERS=20"
- "DIUN_WATCH_SCHEDULE=0 */6 * * *"
- "DIUN_WATCH_JITTER=30s"
- "DIUN_PROVIDERS_SWARM=true"
- "DIUN_PROVIDERS_SWARM_WATCHBYDEFAULT={{ diun_watch_swarm_by_default }}"
{% if diun_notif_matrix_user is not undefined and not None and diun_notif_matrix_password is not undefined and not None and diun_notif_matrix_roomid is not undefined and not None %}
- "DIUN_NOTIF_MATRIX_HOMESERVERURL={{ diun_notif_matrix_url }}"
- "DIUN_NOTIF_MATRIX_USER={{ diun_notif_matrix_user }}"
- "DIUN_NOTIF_MATRIX_PASSWORD={{ diun_notif_matrix_password }}"
- "DIUN_NOTIF_MATRIX_ROOMID={{ diun_notif_matrix_roomid }}"
{% endif %}
{% if diun_notif_mail_username is not undefined and not None and diun_notif_mail_password is not undefined and not None and diun_notif_mail_from is not undefined and not None and diun_notif_mail_to is not undefined and not None %}
- "DIUN_NOTIF_MAIL_HOST={{ diun_notif_mail_host }}"
- "DIUN_NOTIF_MAIL_PORT={{ diun_notif_mail_port }}"
- "DIUN_NOTIF_MAIL_USERNAME={{ diun_notif_mail_username }}"
- "DIUN_NOTIF_MAIL_PASSWORD={{ diun_notif_mail_password }}"
- "DIUN_NOTIF_MAIL_FROM={{ diun_notif_mail_from }}"
- "DIUN_NOTIF_MAIL_TO={{ diun_notif_mail_to }}"
{% endif %}
# deploy:
# mode: replicated
# replicas: 1
# placement:
# constraints:
# - node.role == manager
volumes:
data:
networks:
"{{ docker_swarm_public_network_name }}":
external: true

View file

@ -1,6 +0,0 @@
---
stack_name: diun
stack_image: "crazymax/diun"
stack_compose: "{{ lookup('template', 'docker-stack.yml.j2') | from_yaml }}"

View file

@ -1,12 +0,0 @@
---
- name: Get running docker stacks
community.docker.docker_stack_info:
register: running_stacks
become: true
- name: Remove stacks without matching role
community.docker.docker_stack:
name: "{{ item.Name }}"
state: "absent"
loop: "{{ running_stacks.results | rejectattr('Name', 'in', role_names) }}"
become: true

View file

@ -1,3 +1,5 @@
---
docker_stack_files_dir: /stacks
docker_swarm_public_network_name: public

View file

@ -1,3 +0,0 @@
---
dependencies:
- docker

View file

@ -28,7 +28,7 @@
ansible.builtin.file:
path: "{{ docker_stack_files_dir }}"
state: directory
mode: "0755"
mode: '0755'
become: true
tags:
tags:
- fs

View file

@ -4,4 +4,4 @@
state: started
enabled: yes
daemon_reload: yes
become: true
become: yes

View file

@ -1,7 +1,7 @@
- name: Ensure requirements installed
ansible.builtin.package:
name: "{{ requisites }}"
state: latest
state: present
update_cache: yes
tags:
- apt
@ -11,14 +11,11 @@
- name: Ensure docker GPG apt key exists
apt_key:
url: "https://download.docker.com/linux/ubuntu/gpg"
url: https://download.docker.com/linux/ubuntu/gpg
state: present
tags:
- apt
- repository
# FIXME: Needs a 'until:' defined for the retries to actually work
retries: 3
delay: 5
become: true
- name: Ensure docker repository exists
@ -30,18 +27,7 @@
- repository
become: true
- name: docker-ce is installed
ansible.builtin.package:
name: "{{ packages }}"
state: present
tags:
- apt
- download
- packages
become: true
notify: Handle docker daemon
- name: Latest docker-ce is installed
- name: Ensure latest docker-ce installed
ansible.builtin.package:
name: "{{ packages }}"
state: latest
@ -49,14 +35,12 @@
- apt
- download
- packages
- docker
- never
become: true
notify: Handle docker daemon
- name: Ensure docker requisites for python installed
pip:
name:
name:
- docker
- jsondiff
- pyyaml

View file

@ -1,40 +0,0 @@
# forgejo
A relatively light-weight git server hosting.
## Defaults
```
forgejo_upstream_file_dir: "{{ docker_stack_files_dir }}/{{ stack_name }}"
```
The on-target directory where the proxy configuration file should be stashed.
```
forgejo_use_https: true
```
Whether the service should be reachable through http (port 80) or through https (port 443) and provision an https certificate. Usually you will want this to stay `true`.
```
forgejo_version: latest
```
The docker image version to be used in stack creation.
```
subdomain_alias: git
```
If the deployed container should be served over a uri that is not the stack name.
By default, it will be set to `git.yourdomain.com` -
if this option is not set it will be served on `forgejo.yourdomain.com` instead.
For now forgejo will still need to be initially set up after installation.
This could be automated with the help of these commands:
```sh
docker run --name forgejo -p 8080:3000 -e FORGEJO__security__INSTALL_LOCK=true -d codeberg.org/forgejo/forgejo:7
$ docker exec forgejo migrate
$ docker exec forgejo forgejo admin user create --admin --username root --password admin1234 --email admin@example.com
```

View file

@ -1,50 +0,0 @@
---
forgejo_version: 11
forgejo_upstream_file_dir: "{{ docker_stack_files_dir }}/{{ stack_name }}"
forgejo_use_https: true
# the subdomain link forgejo will be reachable under
subdomain_alias: git
subdomain_ci_alias: ci
forgejo_db_database: forgejo
forgejo_db_username: forgejo
forgejo_db_password: forgejo
forgejo_app_admin_username: Myforgejousername # can not be set to admin in Forgejo
forgejo_app_admin_password: Myforgejopassword
forgejo_app_admin_email: myadmin@mydomain.mytld
# forgejo_smtp_addr: domain.com
# forgejo_smtp_port: 465
# forgejo_smtp_username: my@username.com
# forgejo_smtp_password: <password>
# forgejo_smtp_protocol: smtps # can be one of starttls | smtps
forgejo_use_lfs: false
forgejo_lfs_max_filesize: 0
forgejo_lfs_http_auth_expiry: 24h
# forgejo_lfs_jwt_secret:
forgejo_use_ci: false
# forgejo_ci_github_client:
# forgejo_ci_github_secret:
# forgejo_ci_gitlab_client:
# forgejo_ci_gitlab_secret:
# forgejo_ci_forgejo_client:
# forgejo_ci_forgejo_secret:
# forgejo_ci_gitea_url:
# forgejo_ci_gitea_client:
# forgejo_ci_gitea_secret:
forgejo_use_s3: false
forgejo_s3_use_ssl: true
forgejo_s3_bucket_lookup: auto # auto|dns|path
forgejo_s3_checksum: default # default|md5
# forgejo_s3_endpoint:
# forgejo_s3_region:
# forgejo_s3_key:
# forgejo_s3_secret:
# forgejo_s3_bucket:

View file

@ -1,100 +0,0 @@
- name: Add admin user
community.docker.docker_container_exec:
container: "{{ forgejo_app_container_name['stdout'] }}"
command: >
forgejo admin user create --admin --username {{ forgejo_app_admin_username }} --password {{ forgejo_app_admin_password }} --email {{ forgejo_app_admin_email }}
user: git
become: true
listen: "no admin user"
## Register reverse proxy
- name: Upstream directory exists
ansible.builtin.file:
path: "{{ forgejo_upstream_file_dir }}"
state: directory
mode: "0755"
become: true
listen: "update forgejo upstream"
- name: Update upstream template
ansible.builtin.template:
src: upstream.json.j2
dest: "{{ forgejo_upstream_file_dir }}/upstream.json"
mode: "0600"
become: true
listen: "update forgejo upstream"
- name: Update ci upstream template
ansible.builtin.template:
src: upstream_ci.json.j2
dest: "{{ forgejo_upstream_file_dir }}/upstream_ci.json"
mode: "0600"
become: true
listen: "update forgejo upstream"
# figure out if upstream id exists
- name: check {{ stack_name }} upstream
community.docker.docker_container_exec:
container: "{{ caddy_container_id }}"
command: >
curl localhost:2019/id/{{ stack_name }}_upstream/
changed_when: False
register: result
become: true
listen: "update forgejo upstream"
# upstream already exists, patch it
- name: remove old {{ stack_name }} upstream
community.docker.docker_container_exec:
container: "{{ caddy_container_id }}"
command: >
curl -X DELETE localhost:2019/id/{{ stack_name }}_upstream/
become: true
when: (result.stdout | from_json)['error'] is not defined
listen: "update forgejo upstream"
# upstream has to be created
- name: add {{ stack_name }} upstream
community.docker.docker_container_exec:
container: "{{ caddy_container_id }}"
command: >
curl -X POST -H "Content-Type: application/json" -d @{{ forgejo_upstream_file_dir }}/upstream.json localhost:2019/config/apps/http/servers/{{ (forgejo_use_https == True) | ternary(caddy_https_server_name, caddy_http_server_name) }}/routes/0/
become: true
listen: "update forgejo upstream"
# figure out if upstream id exists
- name: check {{ stack_name }}_ci upstream
community.docker.docker_container_exec:
container: "{{ caddy_container_id }}"
command: >
curl localhost:2019/id/{{ stack_name }}_ci_upstream/
changed_when: False
register: result
become: true
listen: "update forgejo upstream"
# upstream for ci already exists, patch it
- name: remove old {{ stack_name }}_ci upstream
community.docker.docker_container_exec:
container: "{{ caddy_container_id }}"
command: >
curl -X DELETE localhost:2019/id/{{ stack_name }}_ci_upstream/
become: true
when: (result.stdout | from_json)['error'] is not defined
listen: "update forgejo upstream"
# upstream for ci has to be created
- name: add {{ stack_name }}_ci upstream
community.docker.docker_container_exec:
container: "{{ caddy_container_id }}"
command: >
curl -X POST -H "Content-Type: application/json" -d @{{ forgejo_upstream_file_dir }}/upstream_ci.json localhost:2019/config/apps/http/servers/{{ (forgejo_use_https == True) | ternary(caddy_https_server_name, caddy_http_server_name) }}/routes/0/
become: true
listen: "update forgejo upstream"
- name: Ensure upstream directory is gone again
ansible.builtin.file:
path: "{{ forgejo_upstream_file_dir }}"
state: absent
become: true
listen: "update forgejo upstream"

View file

@ -1,11 +0,0 @@
---
## install requisites
- name: Ensure openssl installed
ansible.builtin.package:
name: "openssl"
state: present
become: true
tags:
- apt
- download
- packages

View file

@ -1,132 +0,0 @@
---
## Prepare woodpecker ci
- name: "Select tasks for {{ ansible_distribution }} {{ ansible_distribution_major_version }}"
include_tasks: "{{ distribution }}"
with_first_found:
- "{{ ansible_distribution }}-{{ ansible_distribution_major_version }}.yml"
- "{{ ansible_distribution }}.yml"
- "{{ ansible_os_family }}.yml"
loop_control:
loop_var: distribution
when: forgejo_use_ci == True
# TODO only generate when no existing (check with docker inspect?)
- name: Generate agent key
ansible.builtin.shell: openssl rand -hex 32
register: forgejo_woodpecker_agent_secret
when: forgejo_use_ci == True
- name: Set agent key
ansible.builtin.set_fact:
forgejo_woodpecker_agent_secret: "{{ forgejo_woodpecker_agent_secret.stdout }}"
when: forgejo_woodpecker_agent_secret.stdout is not undefined and not None
## Prepare forgejo
- name: Ensure git user exists with ssh key
ansible.builtin.user:
name: "{{ forgejo_git_username }}"
generate_ssh_key: yes
ssh_key_type: rsa
ssh_key_bits: 4096
ssh_key_comment: "Forgejo Host Key"
become: true
register: git_user
- name: Ensure git passthrough command directory exists
ansible.builtin.file:
path: "/app/forgejo/"
state: directory
mode: "0770"
owner: "{{ git_user['uid'] }}"
group: "{{ git_user['group'] }}"
become: true
- name: Passthrough git command is in right location
ansible.builtin.copy:
src: forgejo
dest: "/app/forgejo/forgejo"
owner: "{{ git_user['uid'] }}"
group: "{{ git_user['group'] }}"
mode: "0750"
become: true
- name: Host machine forgejo command points to passthrough command
ansible.builtin.file:
state: link
src: "/app/forgejo/forgejo"
dest: "/usr/local/bin/forgejo"
become: true
- name: Fetch keyfile
fetch:
src: "{{ git_user['home'] }}/.ssh/id_rsa.pub"
dest: "buffer/{{ansible_hostname}}-id_rsa.pub"
flat: yes
become: true
- name: Ensure git user has its own key authorized for access
ansible.posix.authorized_key:
user: "{{ git_user['name'] }}"
state: present
key: "{{ lookup('file', 'buffer/{{ ansible_hostname }}-id_rsa.pub') }}"
become: true
- name: Clean up buffer dir
ansible.builtin.file:
path: buffer
state: absent
delegate_to: localhost
## install forgejo container
- name: Check upstream status
community.docker.docker_container_exec:
container: "{{ caddy_container_id }}"
command: >
curl localhost:2019/id/{{ stack_name }}_upstream/
register: result
changed_when: (result.stdout | from_json) != (lookup('template', 'upstream.json.j2') | from_yaml)
become: true
notify: "update forgejo upstream"
- name: Deploy forgejo to swarm
community.general.docker_stack:
name: "{{ stack_name }}"
state: present
prune: yes
compose:
- "{{ stack_compose }}"
become: true
tags:
- docker-swarm
register: forgejo_deployment
notify: "update forgejo upstream"
- name: Wait a minute for forgejo to become healthy
wait_for:
timeout: 55
delegate_to: localhost
when: forgejo_deployment is changed
- name: Get app container info
ansible.builtin.command:
cmd: docker ps -q -f name={{ stack_name }}_app
become: true
until: forgejo_app_container_name['rc'] | default('') == 0 and forgejo_app_container_name['stdout'] | length >= 1
retries: 10
delay: 10
changed_when: False
register: forgejo_app_container_name
- name: Look for existing admin user
community.docker.docker_container_exec:
container: "{{ forgejo_app_container_name['stdout'] }}"
user: git
command: >
forgejo admin user list --admin
until: forgejo_admin_list is defined and forgejo_admin_list['rc'] | default('') == 0
retries: 15
delay: 20
become: true
register: forgejo_admin_list
changed_when: forgejo_admin_list['stdout_lines'] | length <= 1 and 'Username' in forgejo_admin_list['stdout']
notify: "no admin user"

View file

@ -1,146 +0,0 @@
version: '3.4'
services:
app:
image: "{{ stack_image }}:{{ forgejo_version }}"
healthcheck:
test: ["CMD", "wget", "--spider", "-q", "127.0.0.1:3000"]
interval: 1m
timeout: 10s
retries: 3
start_period: 1m
volumes:
- data:/data
- /home/git/.ssh:/data/git/.ssh
- /etc/timezone:/etc/timezone:ro
- /etc/localtime:/etc/localtime:ro
environment:
- USER_UID={{ git_user['uid'] }}
- USER_GID={{ git_user['group'] }}
- FORGEJO__database__DB_TYPE=postgres
- FORGEJO__database__HOST=db:5432
- "FORGEJO__database__NAME={{ forgejo_db_database }}"
- "FORGEJO__database__USER={{ forgejo_db_username }}"
- "FORGEJO__database__PASSWD={{ forgejo_db_password }}"
- "FORGEJO__server__ROOT_URL={{ (forgejo_use_https == True) | ternary('https', 'http') }}://{{ (subdomain_alias is not undefined and not none) | ternary(subdomain_alias, stack_name) }}.{{server_domain}}"
- "FORGEJO__server__SSH_DOMAIN={{ (subdomain_alias is not undefined and not none) | ternary(subdomain_alias, stack_name) }}.{{server_domain}}"
- FORGEJO__server__LANDING_PAGE=explore
- FORGEJO__service__DISABLE_REGISTRATION=true
{% if forgejo_app_admin_username is not undefined and not None and forgejo_app_admin_password is not undefined and not None %}
- FORGEJO__security__INSTALL_LOCK=true
{% endif %}
{% if forgejo_smtp_addr is not undefined and not None and forgejo_smtp_port is not undefined and not None and forgejo_smtp_username is not undefined and not None and forgejo_smtp_password is not undefined and not None %}
- FORGEJO__mailer__ENABLED=true
- FORGEJO__service__ENABLE_NOTIFY_MAIL=true
- FORGEJO__mailer__FROM=forgejo@{{ server_domain }}
- FORGEJO__mailer__TYPE=smtp
- FORGEJO__mailer__SMTP_ADDR={{ forgejo_smtp_addr }}
- FORGEJO__mailer__SMTP_PORT={{ forgejo_smtp_port }}
{% if forgejo_smtp_protocol is not undefined and not none %}
- FORGEJO__mailer__PROTOCOL={{ forgejo_smtp_protocol }}
{% endif %}
- FORGEJO__mailer__USER={{ forgejo_smtp_username }}
- FORGEJO__mailer__PASSWD={{ forgejo_smtp_password }}
{% endif %}
{% if forgejo_use_lfs %}
- FORGEJO__server__LFS_START_SERVER=true
{% if forgejo_lfs_jwt_secret is not undefined and not none %}
- FORGEJO__server__LFS_JWT_SECRET={{ forgejo_lfs_jwt_secret }}
{% endif %}
- FORGEJO__server__LFS_HTTP_AUTH_EXPIRY={{ forgejo_lfs_http_auth_expiry }}
- FORGEJO__server__LFS_MAX_FILE_SIZE={{ forgejo_lfs_max_filesize }}
{% endif %}
{% if forgejo_use_s3 %}
- FORGEJO__storage__STORAGE_TYPE="minio"
- FORGEJO__storage__MINIO_USE_SSL={{ forgejo_s3_use_ssl }}
- FORGEJO__storage__MINIO_BUCKET_LOOKUP={{ forgejo_s3_bucket_lookup }}
- FORGEJO__storage__MINIO_ENDPOINT={{ forgejo_s3_endpoint }}
- FORGEJO__storage__MINIO_ACCESS_KEY_ID={{ forgejo_s3_key }}
- FORGEJO__storage__MINIO_SECRET_ACCESS_KEY={{ forgejo_s3_secret }}
- FORGEJO__storage__MINIO_BUCKET={{ forgejo_s3_bucket }}
- FORGEJO__storage__MINIO_LOCATION={{ forgejo_s3_region }}
- FORGEJO__storage__MINIO_CHECKSUM_ALGORITHM={{ forgejo_s3_checksum }}
{% endif %}
networks:
- "{{ docker_swarm_public_network_name }}"
- backend
ports:
- "127.0.0.1:2222:22"
db:
image: postgres:13
healthcheck:
test: ["CMD", "pg_isready", "-q", "-U", "{{ forgejo_db_username }}"]
interval: 1m
timeout: 10s
retries: 3
start_period: 1m
volumes:
- db:/var/lib/postgresql/data
networks:
- backend
environment:
- POSTGRES_USER={{ forgejo_db_username }}
- POSTGRES_PASSWORD={{ forgejo_db_password }}
- POSTGRES_DB={{ forgejo_db_database }}
{% if forgejo_use_ci %}
wp-server:
image: woodpeckerci/woodpecker-server:v3
networks:
- "{{ docker_swarm_public_network_name }}"
- backend
volumes:
- woodpecker:/var/lib/woodpecker/
environment:
- WOODPECKER_OPEN=true
- "WOODPECKER_HOST={{ (forgejo_use_https == True) | ternary('https', 'http') }}://{{ (subdomain_ci_alias is not undefined and not none) | ternary(subdomain_ci_alias, stack_name + '_ci') }}.{{server_domain}}"
- WOODPECKER_AGENT_SECRET={{ forgejo_woodpecker_agent_secret }}
{% if forgejo_ci_github_client is not undefined and not None and forgejo_ci_github_secret is not undefined and not None %}
- WOODPECKER_GITHUB=true
- WOODPECKER_GITHUB_CLIENT={{ forgejo_ci_github_client }}
- WOODPECKER_GITHUB_SECRET={{ forgejo_ci_github_secret }}
{% endif %}
{% if forgejo_ci_gitlab_client is not undefined and not None and forgejo_ci_gitlab_secret is not undefined and not None %}
- WOODPECKER_GITLAB=true
- WOODPECKER_GITLAB_CLIENT={{ forgejo_ci_gitlab_client }}
- WOODPECKER_GITLAB_SECRET={{ forgejo_ci_gitlab_secret }}
{% endif %}
{% if forgejo_ci_forgejo_client is not undefined and not None and forgejo_ci_forgejo_secret is not undefined and not None %}
- WOODPECKER_FORGEJO=true
- "WOODPECKER_FORGEJO_URL={{ (forgejo_use_https == True) | ternary('https', 'http') }}://{{ (subdomain_alias is not undefined and not none) | ternary(subdomain_alias, stack_name) }}.{{server_domain}}"
- WOODPECKER_FORGEJO_CLIENT={{ forgejo_ci_forgejo_client }}
- WOODPECKER_FORGEJO_SECRET={{ forgejo_ci_forgejo_secret }}
{% endif %}
{% if forgejo_ci_gitea_url is not undefined and not None and forgejo_ci_gitea_client is not undefined and not None and forgejo_ci_gitea_secret is not undefined and not None %}
- WOODPECKER_GITEA=true
- "WOODPECKER_GITEA_URL={{ (forgejo_use_https == True) | ternary('https', 'http') }}://{{ (subdomain_alias is not undefined and not none) | ternary(subdomain_alias, stack_name) }}.{{server_domain}}"
- WOODPECKER_GITEA_CLIENT={{ forgejo_ci_gitea_client }}
- WOODPECKER_GITEA_SECRET={{ forgejo_ci_gitea_secret }}
{% endif %}
wp-agent:
image: woodpeckerci/woodpecker-agent:v3
networks:
- backend
command: agent
volumes:
- woodpecker-agent-config:/etc/woodpecker
- /var/run/docker.sock:/var/run/docker.sock
environment:
- WOODPECKER_SERVER=wp-server:9000
- WOODPECKER_AGENT_SECRET={{ forgejo_woodpecker_agent_secret }}
{% endif %}
volumes:
data:
db:
woodpecker:
woodpecker-agent-config:
networks:
"{{ docker_swarm_public_network_name }}":
external: true
backend:

View file

@ -1,39 +0,0 @@
{
"@id": "{{ stack_name }}_ci_upstream",
{% if server_domain is not undefined and not none %}
"match": [
{
"host": [
{% if subdomain_ci_alias is not undefined and not none %}
"{{ subdomain_ci_alias }}.{{ server_domain }}"
{% else %}
"{{ stack_name }}_ci.{{ server_domain }}"
{% endif %}
]
}
],
{% else %}
"match": [
{
"path": [
{% if subdomain_ci_alias is not undefined and not none %}
"/{{ subdomain_ci_alias }}*"
{% else %}
"/{{ stack_name }}_ci*"
{% endif %}
]
}
],
{% endif %}
"handle": [
{
"handler": "reverse_proxy",
"upstreams": [
{
"dial": "{{ stack_name }}_wp-server:8000"
}
]
}
]
}

View file

@ -1,8 +0,0 @@
---
stack_name: forgejo
stack_image: "codeberg.org/forgejo/forgejo"
stack_compose: "{{ lookup('template', 'docker-stack.yml.j2') | from_yaml }}"
forgejo_git_username: git

41
roles/gitea/README.md Normal file
View file

@ -0,0 +1,41 @@
# gitea
A relatively light-weight git server hosting.
## Defaults
```
gitea_upstream_file_dir: "{{ docker_stack_files_dir }}/{{ stack_name }}"
```
The on-target directory where the proxy configuration file should be stashed.
```
gitea_use_https: true
```
Whether the service should be reachable through http (port 80) or through https (port 443) and provision an https certificate. Usually you will want this to stay `true`.
```
gitea_version: latest
```
The docker image version to be used in stack creation.
```
subdomain_alias: git
```
If the deployed container should be served over a uri that is not the stack name.
By default, it will be set to `git.yourdomain.com` -
if this option is not set it will be served on `gitea.yourdomain.com` instead.
For now gitea will still need to be initially set up after installation.
This could be automated with the help of these commands:
```sh
docker run --name gitea -p 8080:3000 -e GITEA__security__INSTALL_LOCK=true -d gitea/gitea:1.14.2
$ docker exec gitea migrate
$ docker exec gitea gitea admin user create --admin --username root --password admin1234 --email admin@example.com
```

View file

@ -0,0 +1,24 @@
---
# never got around to removing the master tag from the images
gitea_version: latest
gitea_upstream_file_dir: "{{ docker_stack_files_dir }}/{{ stack_name }}"
gitea_use_https: true
# the subdomain link gitea will be reachable under
subdomain_alias: git
gitea_db_database: gitea
gitea_db_username: gitea
gitea_db_password: gitea
gitea_app_admin_username: Mygiteausername # can not be set to admin in Gitea
gitea_app_admin_password: Mygiteapassword
gitea_app_admin_email: myadmin@mydomain.mytld
# gitea_smtp_host: domain.com:port
# gitea_smtp_username: my@username.com
# gitea_smtp_password: <password>
# gitea_smtp_force_tls: false # forces tls if it is on a non-traditional tls port. Overwrites starttls so should generally be off

View file

@ -0,0 +1,62 @@
- name: Add admin user
community.docker.docker_container_exec:
container: "{{ gitea_app_container_name['stdout'] }}"
command: >
gitea admin user create --admin --username {{ gitea_app_admin_username }} --password {{ gitea_app_admin_password }} --email {{ gitea_app_admin_email }}
become: yes
listen: "no admin user"
## Register reverse proxy
- name: Ensure upstream directory exists
ansible.builtin.file:
path: "{{ gitea_upstream_file_dir }}"
state: directory
mode: '0755'
become: yes
listen: "update gitea upstream"
- name: Update upstream template
ansible.builtin.template:
src: upstream.json.j2
dest: "{{ gitea_upstream_file_dir }}/upstream.json"
mode: '0600'
become: yes
listen: "update gitea upstream"
# figure out if upstream id exists
- name: check {{ stack_name }} upstream
community.docker.docker_container_exec:
container: "{{ caddy_container_id }}"
command: >
curl localhost:2019/id/{{ stack_name }}_upstream/
changed_when: False
register: result
become: yes
listen: "update gitea upstream"
# upstream already exists, patch it
- name: remove old {{ stack_name }} upstream
community.docker.docker_container_exec:
container: "{{ caddy_container_id }}"
command: >
curl -X DELETE localhost:2019/id/{{ stack_name }}_upstream/
become: yes
when: (result.stdout | from_json)['error'] is not defined
listen: "update gitea upstream"
# upstream has to be created
- name: add {{ stack_name }} upstream
community.docker.docker_container_exec:
container: "{{ caddy_container_id }}"
command: >
curl -X POST -H "Content-Type: application/json" -d @{{ gitea_upstream_file_dir }}/upstream.json localhost:2019/config/apps/http/servers/{{ (gitea_use_https == True) | ternary(caddy_https_server_name, caddy_http_server_name) }}/routes/0/
become: yes
listen: "update gitea upstream"
- name: Ensure upstream directory is gone again
ansible.builtin.file:
path: "{{ gitea_upstream_file_dir }}"
state: absent
become: yes
listen: "update gitea upstream"

View file

@ -1,15 +1,16 @@
---
galaxy_info:
author: Marty Oehme
description: Light-weight git hosting
license: GPL-3.0-only
min_ansible_version: "2.9"
min_ansible_version: 2.9
galaxy_tags: []
platforms:
- name: GenericLinux
versions:
- all
versions: all
dependencies:
- docker
- docker-swarm
- caddy_id
- caddy

View file

@ -0,0 +1,95 @@
---
- name: Ensure git user exists with ssh key
ansible.builtin.user:
name: "{{ gitea_git_username }}"
generate_ssh_key: yes
ssh_key_type: rsa
ssh_key_bits: 4096
ssh_key_comment: "Gitea Host Key"
become: yes
register: git_user
- name: Ensure git passthrough command directory exists
ansible.builtin.file:
path: "/app/gitea/"
state: directory
mode: '0770'
owner: "{{ git_user['uid'] }}"
group: "{{ git_user['group'] }}"
become: yes
- name: Save git passthrough command in right location
ansible.builtin.copy:
src: gitea
dest: "/app/gitea/gitea"
owner: "{{ git_user['uid'] }}"
group: "{{ git_user['group'] }}"
mode: '0750'
become: yes
- name: Fetch keyfile
fetch:
src: "{{ git_user['home'] }}/.ssh/id_rsa.pub"
dest: "buffer/{{ansible_hostname}}-id_rsa.pub"
flat: yes
become: yes
- name: Ensure git user has its own key authorized for access
ansible.posix.authorized_key:
user: "{{ git_user['name'] }}"
state: present
key: "{{ lookup('file', 'buffer/{{ ansible_hostname }}-id_rsa.pub') }}"
become: yes
- name: Clean up buffer dir
ansible.builtin.file:
path: buffer
state: absent
delegate_to: localhost
## install gitea container
- name: Check upstream status
community.docker.docker_container_exec:
container: "{{ caddy_container_id }}"
command: >
curl localhost:2019/id/{{ stack_name }}_upstream/
register: result
changed_when: (result.stdout | from_json) != (lookup('template', 'upstream.json.j2') | from_yaml)
become: yes
notify: "update gitea upstream"
- name: Deploy gitea to swarm
community.general.docker_stack:
name: "{{ stack_name }}"
state: present
prune: yes
compose:
- "{{ stack_compose }}"
become: yes
tags:
- docker-swarm
notify: "update gitea upstream"
- name: Get app container info
ansible.builtin.command:
cmd: docker ps -q -f name={{ stack_name }}_app
become: yes
until: gitea_app_container_name['rc'] == 0 and gitea_app_container_name['stdout'] | length >= 1
retries: 5
delay: 10
changed_when: False
register: gitea_app_container_name
- name: Look for existing admin user
community.docker.docker_container_exec:
container: "{{ gitea_app_container_name['stdout'] }}"
command: >
gitea admin user list --admin
become: yes
until: "'connection refused' not in gitea_admin_list and 'Failed to run app' not in gitea_admin_list"
retries: 5
delay: 10
changed_when: gitea_admin_list['stdout_lines'] | length <= 1
failed_when: gitea_admin_list['rc'] == 1 and gitea_admin_list['attempts'] >= 5
register: gitea_admin_list
notify: "no admin user"

View file

@ -0,0 +1,68 @@
version: '3.4'
services:
app:
image: "{{ stack_image }}:{{ gitea_version }}"
healthcheck:
test: ["CMD", "wget", "--spider", "-q", "localhost:3000"]
interval: 1m
timeout: 10s
retries: 3
start_period: 1m
volumes:
- data:/data
- /home/git/.ssh:/data/git/.ssh
- /etc/timezone:/etc/timezone:ro
- /etc/localtime:/etc/localtime:ro
environment:
- USER_UID={{ git_user['uid'] }}
- USER_GID={{ git_user['group'] }}
- GITEA__database__DB_TYPE=postgres
- GITEA__database__HOST=db:5432
- GITEA__database__NAME={{ gitea_db_database }}
- GITEA__database__USER={{ gitea_db_username }}
- GITEA__database__PASSWD={{ gitea_db_password }}
- "GITEA__server__ROOT_URL={{ (gitea_use_https == True) | ternary('https', 'http') }}://{{ (subdomain_alias is not undefined and not none) | ternary(subdomain_alias, stack_name) }}.{{server_domain}}"
- "GITEA__server__SSH_DOMAIN={{ server_domain }}"
- GITEA__server__LANDINGPAGE=explore
- GITEA__service__DISABLE_REGISTRATION=true
{% if gitea_app_admin_username is not undefined and not None and gitea_app_admin_password is not undefined and not None %}
- GITEA__security__INSTALL_LOCK=true
{% endif %}
{% if gitea_smtp_host is not undefined and not None and gitea_smtp_username is not undefined and not None and gitea_smtp_password is not undefined and not None %}
- GITEA__mailer__ENABLED=true
- GITEA__service__ENABLE_NOTIFY_MAIL=true
- GITEA__mailer__FROM=gitea@{{ server_domain }}
- GITEA__mailer__TYPE=smtp
- GITEA__mailer__HOST={{ gitea_smtp_host }}
- GITEA__mailer__IS_TLS_ENABLED={{ (gitea_smtp_force_tls is not undefined and not None) | ternary(gitea_smtp_force_tls,'false') }}
- GITEA__mailer__USER={{ gitea_smtp_username }}
- GITEA__mailer__PASSWD={{ gitea_smtp_password }}
{% endif %}
networks:
- "{{ docker_swarm_public_network_name }}"
- backend
ports:
- "127.0.0.1:2222:22"
db:
image: postgres:13
volumes:
- db:/var/lib/postgresql/data
networks:
- backend
environment:
- POSTGRES_USER={{ gitea_db_username }}
- POSTGRES_PASSWORD={{ gitea_db_password }}
- POSTGRES_DB={{ gitea_db_database }}
volumes:
data:
db:
networks:
"{{ docker_swarm_public_network_name }}":
external: true
backend:

View file

@ -1,8 +1,9 @@
---
stack_name: restic
stack_image: "mazzolino/restic"
stack_name: gitea
stack_image: "gitea/gitea"
stack_compose: "{{ lookup('template', 'docker-stack.yml.j2') | from_yaml }}"
backup_enable: true
gitea_git_username: git

View file

@ -1,10 +1,10 @@
# landingpage
The public face of my server.
The public face of my server.
Not much to see here honestly,
just a few simple lines of html explaining what this server is about and how to contact me.
I don't see anybody else benefiting massively from this role but me,
I don't see anybody else benefiting massively from this role but me,
but if you want the same web presence go for it I suppose 😉
## Defaults
@ -31,6 +31,7 @@ The docker image version to be used in stack creation.
subdomain_alias: www
```
If the deployed container should be served over a uri that is not the stack name.
By default, it will be set to `www.yourdomain.com` -
If the deployed container should be served over a uri that is not the stack name.
By default, it will be set to `www.yourdomain.com` -
if this option is not set it will be served on `landingpage.yourdomain.com` instead.

View file

@ -1,11 +1,11 @@
---
landingpage_version: latest
# never got around to removing the master tag from the images
landingpage_version: master
landingpage_upstream_file_dir: "{{ docker_stack_files_dir }}/{{ stack_name }}"
landingpage_use_https: true
landingpage_autoupdate: true
# the subdomain link landingpage will be reachable under
subdomain_alias: www

View file

@ -3,15 +3,15 @@
ansible.builtin.file:
path: "{{ landingpage_upstream_file_dir }}"
state: directory
mode: "0755"
become: true
mode: '0755'
become: yes
listen: "update landingpage upstream"
- name: Update upstream template
ansible.builtin.template:
src: upstream.json.j2
dest: "{{ landingpage_upstream_file_dir }}/upstream.json"
become: true
become: yes
listen: "update landingpage upstream"
# figure out if upstream id exists
@ -22,7 +22,7 @@
curl localhost:2019/id/{{ stack_name }}_upstream/
changed_when: False
register: result
become: true
become: yes
listen: "update landingpage upstream"
# upstream already exists, patch it
@ -31,7 +31,7 @@
container: "{{ caddy_container_id }}"
command: >
curl -X DELETE localhost:2019/id/{{ stack_name }}_upstream/
become: true
become: yes
when: (result.stdout | from_json)['error'] is not defined
listen: "update landingpage upstream"
@ -40,13 +40,14 @@
community.docker.docker_container_exec:
container: "{{ caddy_container_id }}"
command: >
curl -X POST -H "Content-Type: application/json" -d @{{ landingpage_upstream_file_dir }}/upstream.json localhost:2019/config/apps/http/servers/{{ (landingpage_use_https == True) | ternary(caddy_https_server_name, caddy_http_server_name) }}/routes/0/
become: true
curl -X POST -H "Content-Type: application/json" -d @{{ landingpage_upstream_file_dir }}/upstream.json localhost:2019/config/apps/http/servers/{{ (landingpage_use_https == True) | ternary(caddy_https_server_name, caddy_http_server_name) }}/routes/0/
become: yes
listen: "update landingpage upstream"
- name: Ensure upstream directory is gone again
ansible.builtin.file:
path: "{{ landingpage_upstream_file_dir }}"
state: absent
become: true
become: yes
listen: "update landingpage upstream"

View file

@ -1,11 +1,14 @@
---
galaxy_info:
author: Marty Oehme
description: Installs my personal public facing landing page as a docker stack service
license: GPL-3.0-only
min_ansible_version: "2.9"
min_ansible_version: 2.9
galaxy_tags: []
dependencies:
- docker
- docker-swarm
- caddy_id
- caddy

View file

@ -7,7 +7,7 @@
curl localhost:2019/id/{{ stack_name }}_upstream/
register: result
changed_when: (result.stdout | from_json) != (lookup('template', 'upstream.json.j2') | from_yaml)
become: true
become: yes
notify: "update landingpage upstream"
- name: Deploy landingpage to swarm
@ -17,7 +17,8 @@
prune: yes
compose:
- "{{ stack_compose }}"
become: true
become: yes
tags:
- docker-swarm
notify: "update landingpage upstream"

View file

@ -4,7 +4,7 @@ services:
app:
image: "{{ stack_image }}:{{ landingpage_version }}"
healthcheck:
test: ["CMD", "wget", "--spider", "-q", "127.0.0.1"]
test: ["CMD", "wget", "--spider", "-q", "localhost"]
interval: 1m
timeout: 10s
retries: 3
@ -12,11 +12,6 @@ services:
entrypoint: sh -c "/docker-entrypoint.sh nginx -g 'daemon off;'"
networks:
- "{{ docker_swarm_public_network_name }}"
{% if landingpage_autoupdate is defined and landingpage_autoupdate %}
deploy:
labels:
- shepherd.autoupdate=true
{% endif %}
networks:
"{{ docker_swarm_public_network_name }}":

View file

@ -1,6 +1,7 @@
---
stack_name: landingpage
stack_image: "ghcr.io/marty-oehme/page"
stack_image: "registry.gitlab.com/cloud-serve/landing"
stack_compose: "{{ lookup('template', 'docker-stack.yml.j2') | from_yaml }}"

View file

@ -1,19 +0,0 @@
---
linkding_version: latest-plus # plus contains self-archiving possibilities with singlefile
linkding_upstream_file_dir: "{{ docker_stack_files_dir }}/{{ stack_name }}"
linkding_use_https: true
linkding_autoupdate: true
# the subdomain link linkding will be reachable under
subdomain_alias: links
# initial superuser creation
linkding_username: linkdinger
linkding_password: linkdingerpass123
# should we back up the data?
linkding_backup_enable: true
linkding_backup_cron: 0 45 3 * * *

View file

@ -1,11 +0,0 @@
---
galaxy_info:
author: Marty Oehme
description: Installs linkding as a docker stack service
license: GPL-3.0-only
min_ansible_version: "2.9"
galaxy_tags: []
dependencies:
- docker-swarm
- caddy_id

View file

@ -1,46 +0,0 @@
services:
app:
image: "{{ stack_image }}:{{ linkding_version }}"
healthcheck:
test: ["CMD", "curl", "--fail", "http://127.0.0.1:9090/health"]
interval: 1m
timeout: 10s
retries: 3
start_period: 1m
networks:
- "{{ docker_swarm_public_network_name }}"
volumes:
- data:/etc/linkding/data
environment:
- "LD_SUPERUSER_NAME={{ linkding_username }}"
- "LD_SUPERUSER_PASSWORD={{ linkding_password }}"
{% if linkding_autoupdate is defined and linkding_autoupdate %}
deploy:
labels:
- shepherd.autoupdate=true
{% endif %}
{% if backup_enable is not undefined and not false and linkding_backup_enable is not undefined and not false %}
backup:
image: mazzolino/restic
environment:
- "TZ={{ restic_timezone }}"
# go-cron starts w seconds
- "BACKUP_CRON={{ linkding_backup_cron }}"
- "RESTIC_REPOSITORY={{ restic_repo }}"
- "AWS_ACCESS_KEY_ID={{ restic_s3_key }}"
- "AWS_SECRET_ACCESS_KEY={{ restic_s3_secret }}"
- "RESTIC_PASSWORD={{ restic_pass }}"
- "RESTIC_BACKUP_TAGS=linkding"
- "RESTIC_BACKUP_SOURCES=/volumes"
volumes:
- data:/volumes/linkding_data:ro
{% endif %}
volumes:
data:
networks:
"{{ docker_swarm_public_network_name }}":
external: true

View file

@ -1,38 +0,0 @@
{
"@id": "{{ stack_name }}_upstream",
{% if server_domain is not undefined and not none %}
"match": [
{
"host": [
{% if subdomain_alias is not undefined and not none %}
"{{ subdomain_alias }}.{{ server_domain }}"
{% else %}
"{{ stack_name }}.{{ server_domain }}"
{% endif %}
]
}
],
{% else %}
"match": [
{
"path": [
{% if subdomain_alias is not undefined and not none %}
"/{{ subdomain_alias }}*"
{% else %}
"/{{ stack_name }}*"
{% endif %}
]
}
],
{% endif %}
"handle": [
{
"handler": "reverse_proxy",
"upstreams": [
{
"dial": "{{ stack_name }}_app:9090"
}
]
}
]
}

View file

@ -1,6 +0,0 @@
---
stack_name: linkding
stack_image: "ghcr.io/sissbruecker/linkding"
stack_compose: "{{ lookup('template', 'docker-stack.yml.j2') | from_yaml }}"

View file

@ -27,6 +27,6 @@ The docker image version to be used in stack creation.
subdomain_alias: rss
```
If the deployed container should be served over a uri that is not the stack name.
By default, it will be set to `rss.yourdomain.com` -
If the deployed container should be served over a uri that is not the stack name.
By default, it will be set to `rss.yourdomain.com` -
if this option is not set it will be served on `miniflux.yourdomain.com` instead.

View file

@ -1,4 +1,5 @@
---
miniflux_version: latest
miniflux_upstream_file_dir: "{{ docker_stack_files_dir }}/{{ stack_name }}"
@ -8,8 +9,6 @@ miniflux_use_https: true
# the subdomain link miniflux will be reachable under
subdomain_alias: rss
miniflux_autoupdate: true
# Should ideally be overwritten in encrypted group/host vars
miniflux_admin_username: myadmin
miniflux_admin_password: mypassword

View file

@ -3,15 +3,15 @@
ansible.builtin.file:
path: "{{ miniflux_upstream_file_dir }}"
state: directory
mode: "0755"
become: true
mode: '0755'
become: yes
listen: "update miniflux upstream"
- name: Update upstream template
ansible.builtin.template:
src: upstream.json.j2
dest: "{{ miniflux_upstream_file_dir }}/upstream.json"
become: true
become: yes
listen: "update miniflux upstream"
# figure out if upstream id exists
@ -22,7 +22,7 @@
curl localhost:2019/id/{{ stack_name }}_upstream/
changed_when: False
register: result
become: true
become: yes
listen: "update miniflux upstream"
# upstream already exists, patch it
@ -31,7 +31,7 @@
container: "{{ caddy_container_id }}"
command: >
curl -X DELETE localhost:2019/id/{{ stack_name }}_upstream/
become: true
become: yes
when: (result.stdout | from_json)['error'] is not defined
listen: "update miniflux upstream"
@ -40,13 +40,14 @@
community.docker.docker_container_exec:
container: "{{ caddy_container_id }}"
command: >
curl -X POST -H "Content-Type: application/json" -d @{{ miniflux_upstream_file_dir }}/upstream.json localhost:2019/config/apps/http/servers/{{ (miniflux_use_https == True) | ternary(caddy_https_server_name, caddy_http_server_name) }}/routes/0/
become: true
curl -X POST -H "Content-Type: application/json" -d @{{ miniflux_upstream_file_dir }}/upstream.json localhost:2019/config/apps/http/servers/{{ (miniflux_use_https == True) | ternary(caddy_https_server_name, caddy_http_server_name) }}/routes/0/
become: yes
listen: "update miniflux upstream"
- name: Ensure upstream directory is gone again
ansible.builtin.file:
path: "{{ miniflux_upstream_file_dir }}"
state: absent
become: true
become: yes
listen: "update miniflux upstream"

View file

@ -1,11 +1,14 @@
---
galaxy_info:
author: Marty Oehme
description: Installs miniflux as a docker stack service
license: GPL-3.0-only
min_ansible_version: "2.9"
min_ansible_version: 2.9
galaxy_tags: []
dependencies:
- docker
- docker-swarm
- caddy_id
- caddy

View file

@ -7,7 +7,7 @@
curl localhost:2019/id/{{ stack_name }}_upstream/
register: result
changed_when: (result.stdout | from_json) != (lookup('template', 'upstream.json.j2') | from_yaml)
become: true
become: yes
notify: "update miniflux upstream"
- name: Deploy miniflux to swarm
@ -17,7 +17,8 @@
prune: yes
compose:
- "{{ stack_compose }}"
become: true
become: yes
tags:
- docker-swarm
notify: "update miniflux upstream"

View file

@ -24,11 +24,6 @@ services:
{% else %}
- "BASE_URL={{ (miniflux_use_https == True) | ternary('https', 'http') }}://localhost/{{ (subdomain_alias is not undefined and not none) | ternary(subdomain_alias, stack_name) }}"
{% endif %}
{% if miniflux_autoupdate is defined and miniflux_autoupdate %}
deploy:
labels:
- shepherd.autoupdate=true
{% endif %}
db:
image: postgres:11

View file

@ -1,4 +1,5 @@
---
stack_name: miniflux
stack_image: "miniflux/miniflux"

View file

@ -27,8 +27,8 @@ The docker image version to be used in stack creation.
subdomain_alias: prm
```
If the deployed container should be served over a uri that is not the stack name.
By default, it will be set to `prm.yourdomain.com` (personal relationship manager) -
If the deployed container should be served over a uri that is not the stack name.
By default, it will be set to `prm.yourdomain.com` (personal relationship manager) -
if this option is not set it will be served on `monica.yourdomain.com` instead.
```
@ -38,14 +38,14 @@ monica_db_password: mymonicadbpassword
```
Set the default username and password combination on first container start.
If loading from an existing volume this does nothing, otherwise it sets the
If loading from an existing volume this does nothing, otherwise it sets the
first user so you can instantly log in.
```
monica_app_disable_signups: true
```
Sets the behavior on the login screen ---
Sets the behavior on the login screen ---
if set to true (default) will not let anyone but the first user sign up,
who automatically becomes an administrative user.
If set to false will allow multiple users to sign up on the instance.
@ -57,13 +57,13 @@ monica_app_weather_api_key: <your-darksky-key>
If `monica_app_geolocation_api_key` is set, Monica will translate addresses
input into the app to geographical latitude/ longitude data.
It requires an api key from https://locationiq.com/, which are free for
It requires an api key from https://locationiq.com/, which are free for
10.000 daily requests.
Similarly, if `monica_app_weather_api_key` is set, monica will (afaik) show
weather data for the location of individual contacts.
Similarly, if `monica_app_weather_api_key` is set, monica will (afaik) show
weather data for the location of individual contacts.
It requires an API key from https://darksky.net/dev/register, where
1.000 daily requests are free.
1.000 daily requests are free.
Be aware, however, that since darksky's sale to Apple, no new API signups are possible.
To use this feature, `monica_app_geolocation_api_key` must also be filled out.
@ -71,8 +71,8 @@ To use this feature, `monica_app_geolocation_api_key` must also be filled out.
monica_mail_host: smtp.eu.mailgun.org
monica_mail_port: 465
monica_mail_encryption: tls
monica_mail_username:
monica_mail_password:
monica_mail_username:
monica_mail_password:
monica_mail_from: monica@yourserver.com
monica_mail_from_name: Monica
monica_mail_new_user_notification_address: "{{ caddy_email }}"
@ -81,5 +81,5 @@ monica_mail_new_user_notification_address: "{{ caddy_email }}"
Sets up the necessary details for Monica to send out registration and reminder e-mails.
Requires an smtp server set up, most easily doable through things like mailgun or sendgrid.
Variables should be relatively self-explanatory,
with `monica_mail_new_user_notification_address` being the address the notifications should be sent _to_,
with `monica_mail_new_user_notification_address` being the address the notifications should be sent *to*,
so in all probability some sort of administration address.

View file

@ -1,4 +1,5 @@
---
monica_version: latest
monica_upstream_file_dir: "{{ docker_stack_files_dir }}/{{ stack_name }}"
@ -18,8 +19,8 @@ monica_db_password: mymonicadbpassword
#monica_app_weather_api_key:
#monica_mail_host: smtp.eu.mailgun.org
#monica_mail_username:
#monica_mail_password:
#monica_mail_username:
#monica_mail_password:
monica_mail_port: 465
monica_mail_encryption: tls
#monica_mail_from: monica@yourserver.com

View file

@ -3,15 +3,15 @@
ansible.builtin.file:
path: "{{ monica_upstream_file_dir }}"
state: directory
mode: "0755"
become: true
mode: '0755'
become: yes
listen: "update monica upstream"
- name: Update upstream template
ansible.builtin.template:
src: upstream.json.j2
dest: "{{ monica_upstream_file_dir }}/upstream.json"
become: true
become: yes
listen: "update monica upstream"
# figure out if upstream id exists
@ -22,7 +22,7 @@
curl localhost:2019/id/{{ stack_name }}_upstream/
changed_when: False
register: result
become: true
become: yes
listen: "update monica upstream"
# upstream already exists, patch it
@ -31,7 +31,7 @@
container: "{{ caddy_container_id }}"
command: >
curl -X DELETE localhost:2019/id/{{ stack_name }}_upstream/
become: true
become: yes
when: (result.stdout | from_json)['error'] is not defined
listen: "update monica upstream"
@ -40,13 +40,14 @@
community.docker.docker_container_exec:
container: "{{ caddy_container_id }}"
command: >
curl -X POST -H "Content-Type: application/json" -d @{{ monica_upstream_file_dir }}/upstream.json localhost:2019/config/apps/http/servers/{{ (monica_use_https == True) | ternary(caddy_https_server_name, caddy_http_server_name) }}/routes/0/
become: true
curl -X POST -H "Content-Type: application/json" -d @{{ monica_upstream_file_dir }}/upstream.json localhost:2019/config/apps/http/servers/{{ (monica_use_https == True) | ternary(caddy_https_server_name, caddy_http_server_name) }}/routes/0/
become: yes
listen: "update monica upstream"
- name: Ensure upstream directory is gone again
ansible.builtin.file:
path: "{{ monica_upstream_file_dir }}"
state: absent
become: true
become: yes
listen: "update monica upstream"

View file

@ -1,11 +1,14 @@
---
galaxy_info:
author: Marty Oehme
description: Installs monica as a docker stack service
license: GPL-3.0-only
min_ansible_version: "2.9"
min_ansible_version: 2.9
galaxy_tags: []
dependencies:
- docker
- docker-swarm
- caddy_id
- caddy

View file

@ -4,8 +4,9 @@
ansible.builtin.package:
name: "openssl"
state: present
become: true
become: yes
tags:
- apt
- download
- packages

View file

@ -12,7 +12,8 @@
ansible.builtin.shell: echo -n 'base64:'; openssl rand -base64 32
register: monica_app_key
- set_fact: monica_app_key={{ monica_app_key.stdout }}
- set_fact:
monica_app_key={{ monica_app_key.stdout }}
## install container
- name: Check upstream status
@ -22,7 +23,7 @@
curl localhost:2019/id/{{ stack_name }}_upstream/
register: result
changed_when: (result.stdout | from_json) != (lookup('template', 'upstream.json.j2') | from_yaml)
become: true
become: yes
notify: "update monica upstream"
- name: Deploy to swarm
@ -32,7 +33,8 @@
prune: yes
compose:
- "{{ stack_compose }}"
become: true
become: yes
tags:
- docker-swarm
notify: "update monica upstream"

View file

@ -1,4 +1,5 @@
---
stack_name: monica
stack_image: "monica"

View file

@ -4,14 +4,13 @@ A full office suite and groupware proposition,
though its main draw for most is the file synchronization abilities.
AKA Dropbox replacement.
This software can grow enormous and enormously complicated,
This software can grow enormous and enormously complicated,
this Ansible setup role concentrates on 3 things:
* a stable and secure base setup from the official docker container
* automatic setup of an email pipeline so users can reset passwords and be updated of changes
* the ability to use S3 object storage as the primary way of storing users' files
- a stable and secure base setup from the official docker container
- automatic setup of an email pipeline so users can reset passwords and be updated of changes
- the ability to use S3 object storage as the primary way of storing users' files
The rest should be taken care of either automatically,
The rest should be taken care of either automatically,
or supplied after the fact (if using different plugins or similar).
## Defaults
@ -33,7 +32,7 @@ nextcloud_version: fpm
nextcloud_db_version: 12
```
The docker image version to be used in stack creation.
The docker image version to be used in stack creation.
The role sets up the `php-fpm` version of the official Nextcloud image.
That means, Caddy is used in front as the server which presents all pages
and access to files, the Nextcloud image itself only serves as the PHP data store.
@ -42,17 +41,17 @@ If changing the version to one relying on Nextcloud's in-built Apache server,
take care to change where the upstream proxy is pointing to since the Caddy server in front loses its meaning.
The second variable points to the docker image that should be used for the PostgreSQL database,
with 12 pre-filled as default.
with 12 pre-filled as default.
You can put this to latest, but should take care to migrate the database correctly when an update rolls around,
or it _will_ destroy your data at some point.
or it *will* destroy your data at some point.
Generally, it seems easier to pin this to a specific version and then only update manually.
```yml
subdomain_alias: files
```
If the deployed container should be served over a uri that is not the stack name.
By default, it will be set to `files.yourdomain.com` -
If the deployed container should be served over a uri that is not the stack name.
By default, it will be set to `files.yourdomain.com` -
if this option is not set it will be served on `nextcloud.yourdomain.com` instead.
If you change or delete this, you should also change what `nextcloud_trusted_domains` points to.
@ -67,7 +66,7 @@ nextcloud_db_password: secretnextcloud
```
Sets the default username and password for application and database.
All of these variables are necessary to circumvent the manual installation process
All of these variables are necessary to circumvent the manual installation process
you would usually be faced with on first creating a Nextcloud instance.
Ideally change all of these for your personal setup,
but it is especially important to change the app admin login data since they are what is public facing.
@ -78,7 +77,7 @@ nextcloud_trusted_domains: "{{ subdomain_alias }}.{{ server_domain }}"
The domains that are allowed to access your Nextcloud instance.
Should point to any domains that you want it accessible on,
can be a space-separated list of them.
can be a space-separated list of them.
Take care to include the sub-domain if your are accessing it through one of them.
[Further explanation](https://blog.martyoeh.me/posts/2021-11-18-nextcloud-trusted-domains/).
@ -131,6 +130,7 @@ If your details are correct, Nextcloud should automatically set up S3 as its pri
Be careful if you switch an existing data volume of the Nextcloud image to S3
as you will lose all access to existing files.
The files _should_ not be deleted at this point,
The files *should* not be deleted at this point,
only access will be lost,
but you are playing with fire at this point.

View file

@ -1,8 +1,9 @@
---
# set preferred application version
nextcloud_version: 30-fpm-alpine
nextcloud_version: fpm-alpine
# set preferred postgres version
nextcloud_db_version: 16-alpine
nextcloud_db_version: 12-alpine
nextcloud_upstream_file_dir: "{{ docker_stack_files_dir }}/{{ stack_name }}"
@ -18,13 +19,6 @@ nextcloud_redis_password: myredispass
nextcloud_db_username: nextcloud
nextcloud_db_password: secretnextcloud
# run restic backups
nextcloud_backup_enable: true
nextcloud_backup_cron: 0 30 3 * * *
nextcloud_php_memory_limit: 5G # maximum ram php may use
nextcloud_php_upload_limit: 15G # maximum size of (web) uploaded files
# if you wish to access your nextcloud instance from the reverse proxy
nextcloud_trusted_domains: "{{ subdomain_alias }}.{{ server_domain }}"
@ -37,6 +31,7 @@ nextcloud_smtp_authtype: LOGIN
# nextcloud_smtp_password: <smtp-password>
nextcloud_smtp_from_address: noreply
nextcloud_smtp_from_domain: "{{ server_domain }}"
# the following block is required *fully* for primary object storage
# nextcloud_s3_host: s3.eu-central-1.wasabisys.com
# nextcloud_s3_bucket: nextcloud
@ -46,3 +41,4 @@ nextcloud_smtp_from_domain: "{{ server_domain }}"
# nextcloud_s3_ssl: true
# nextcloud_s3_region: eu-central-1
# nextcloud_s3_usepath_style: true

View file

@ -1,35 +1,15 @@
{
servers {
trusted_proxies static 10.0.0.0/8
}
}
:80 {
encode zstd gzip
root * /var/www/html
file_server
php_fastcgi app:9000
header {
# enable HSTS
Strict-Transport-Security max-age=31536000;includeSubDomains;preload;
Permissions-Policy interest-cohort=()
X-Content-Type-Options nosniff
X-Frame-Options SAMEORIGIN
Referrer-Policy no-referrer
X-XSS-Protection "1; mode=block"
X-Permitted-Cross-Domain-Policies none
X-Robots-Tag "noindex, nofollow"
Strict-Transport-Security max-age=31536000;
}
# client support (e.g. os x calendar / contacts)
redir /.well-known/carddav /remote.php/dav 301
redir /.well-known/caldav /remote.php/dav 301
redir /.well-known/webfinger /index.php/.well-known/webfinger 301
redir /.well-known/nodeinfo /index.php/.well-known/nodeinfo 301
# Uncomment this block if you use the high speed files backend: https://github.com/nextcloud/notify_push
#handle_path /push/* {
# reverse_proxy unix//run/notify_push/notify_push.sock # I love Unix sockets, but you can do :7867 also
#}
# .htaccess / data / config / ... shouldn't be accessible from outside
@forbidden {
@ -45,36 +25,8 @@
path /occ
path /console.php
}
handle @forbidden {
respond 404
}
handle {
root * /var/www/html
php_fastcgi app:9000 {
# Tells nextcloud to remove /index.php from URLs in links
env front_controller_active true
env modHeadersAvailable true # Avoid sending the security headers twice
}
}
respond @forbidden 404
# From .htaccess, set cache for versioned static files (cache-busting)
@immutable {
path *.css *.js *.mjs *.svg *.gif *.png *.jpg *.ico *.wasm *.tflite
query v=*
}
header @immutable Cache-Control "max-age=15778463, immutable"
# From .htaccess, set cache for normal static files
@static {
path *.css *.js *.mjs *.svg *.gif *.png *.jpg *.ico *.wasm *.tflite
not query v=*
}
header @static Cache-Control "max-age=15778463"
# From .htaccess, cache fonts for 1 week
@woff2 path *.woff2
header @woff2 Cache-Control "max-age=604800"
file_server
}

View file

@ -3,15 +3,15 @@
ansible.builtin.file:
path: "{{ nextcloud_upstream_file_dir }}"
state: directory
mode: "0755"
become: true
mode: '0755'
become: yes
listen: "update nextcloud upstream"
- name: Update upstream template
ansible.builtin.template:
src: upstream.json.j2
dest: "{{ nextcloud_upstream_file_dir }}/upstream.json"
become: true
become: yes
listen: "update nextcloud upstream"
# figure out if upstream id exists
@ -22,7 +22,7 @@
curl localhost:2019/id/{{ stack_name }}_upstream/
changed_when: False
register: result
become: true
become: yes
listen: "update nextcloud upstream"
# upstream already exists, patch it
@ -31,7 +31,7 @@
container: "{{ caddy_container_id }}"
command: >
curl -X DELETE localhost:2019/id/{{ stack_name }}_upstream/
become: true
become: yes
when: (result.stdout | from_json)['error'] is not defined
listen: "update nextcloud upstream"
@ -40,13 +40,14 @@
community.docker.docker_container_exec:
container: "{{ caddy_container_id }}"
command: >
curl -X POST -H "Content-Type: application/json" -d @{{ nextcloud_upstream_file_dir }}/upstream.json localhost:2019/config/apps/http/servers/{{ (nextcloud_use_https == True) | ternary(caddy_https_server_name, caddy_http_server_name) }}/routes/0/
become: true
curl -X POST -H "Content-Type: application/json" -d @{{ nextcloud_upstream_file_dir }}/upstream.json localhost:2019/config/apps/http/servers/{{ (nextcloud_use_https == True) | ternary(caddy_https_server_name, caddy_http_server_name) }}/routes/0/
become: yes
listen: "update nextcloud upstream"
- name: Ensure upstream directory is gone again
ansible.builtin.file:
path: "{{ nextcloud_upstream_file_dir }}"
state: absent
become: true
become: yes
listen: "update nextcloud upstream"

View file

@ -1,11 +1,14 @@
---
galaxy_info:
author: Marty Oehme
description: Installs nextcloud as a docker stack service
license: GPL-3.0-only
min_ansible_version: "2.9"
min_ansible_version: 2.9
galaxy_tags: []
dependencies:
- docker
- docker-swarm
- caddy_id
- caddy

View file

@ -7,21 +7,23 @@
curl localhost:2019/id/{{ stack_name }}_upstream/
register: result
changed_when: (result.stdout | from_json) != (lookup('template', 'upstream.json.j2') | from_yaml)
become: true
become: yes
notify: "update nextcloud upstream"
- name: Ensure target directory exists
ansible.builtin.file:
path: "{{ nextcloud_upstream_file_dir }}"
state: directory
mode: "0755"
become: true
mode: '0755'
become: yes
notify: "update nextcloud upstream"
- name: Move webserver Caddyfile to target dir
ansible.builtin.copy:
src: "Caddyfile"
dest: "{{ nextcloud_upstream_file_dir }}/Caddyfile"
become: true
become: yes
notify: "update nextcloud upstream"
- name: Deploy to swarm
community.general.docker_stack:
@ -30,6 +32,8 @@
prune: yes
compose:
- "{{ stack_compose }}"
become: true
become: yes
tags:
- docker-swarm
notify: "update nextcloud upstream"

View file

@ -7,7 +7,7 @@ services:
- backend
- "{{ docker_swarm_public_network_name }}"
healthcheck:
test: ["CMD", "wget", "--quiet", "--spider", "--tries=1", "http://127.0.0.1:2019/metrics"]
test: ["CMD", "wget", "--quiet", "--spider", "--tries=1", "http://localhost:2019/metrics"]
interval: 1m
timeout: 10s
retries: 3
@ -31,7 +31,7 @@ services:
start_period: 5m
# needed for db to be up,
# see https://help.nextcloud.com/t/failed-to-install-nextcloud-with-docker-compose/83681/15
# entrypoint: sh -c "while !(nc -z db 5432); do sleep 30; done; /entrypoint.sh php-fpm"
entrypoint: sh -c "while !(nc -z db 5432); do sleep 30; done; /entrypoint.sh php-fpm"
environment:
- NEXTCLOUD_ADMIN_USER={{ nextcloud_app_admin_username }}
- NEXTCLOUD_ADMIN_PASSWORD={{ nextcloud_app_admin_password }}
@ -41,8 +41,6 @@ services:
- POSTGRES_DB={{ nextcloud_db_username }}
- POSTGRES_USER={{ nextcloud_db_username }}
- POSTGRES_PASSWORD={{ nextcloud_db_password }}
- PHP_MEMORY_LIMIT={{ nextcloud_php_memory_limit }}
- PHP_UPLOAD_LIMIT={{ nextcloud_php_upload_limit }}
{% if nextcloud_trusted_domains is not undefined and not none %}
- NEXTCLOUD_TRUSTED_DOMAINS={{ nextcloud_trusted_domains }}
{% endif %}
@ -142,42 +140,6 @@ services:
networks:
- backend
# from https://okxo.de/speed-up-nextcloud-preview-generation-with-imaginary/
# and https://github.com/nextcloud/all-in-one/tree/main/Containers/imaginary
imaginary:
image: nextcloud/aio-imaginary:latest
environment:
- PORT=9000
healthcheck:
test: ["CMD", "/healthcheck.sh"]
interval: 1m
timeout: 10s
retries: 3
start_period: 1m
command: -return-size -max-allowed-resolution 222.2 -concurrency 50 -enable-url-source -log-level debug
cap_add:
- CAP_SYS_NICE
networks:
- backend
{% if backup_enable is not undefined and not false and nextcloud_backup_enable is not undefined and not false %}
backup:
image: mazzolino/restic
environment:
- "TZ={{ restic_timezone }}"
# go-cron starts w seconds
- "BACKUP_CRON={{ nextcloud_backup_cron }}"
- "RESTIC_REPOSITORY={{ restic_repo }}"
- "AWS_ACCESS_KEY_ID={{ restic_s3_key }}"
- "AWS_SECRET_ACCESS_KEY={{ restic_s3_secret }}"
- "RESTIC_PASSWORD={{ restic_pass }}"
- "RESTIC_BACKUP_TAGS=nextcloud"
- "RESTIC_BACKUP_SOURCES=/volumes"
volumes:
- db:/volumes/nextcloud_db:ro
- data:/volumes/nextcloud_data:ro
{% endif %}
# metrics:
# image: telegraf
# hostname: "${HOSTNAME:-vmi352583.contaboserver.net}"

View file

@ -1,4 +1,5 @@
---
stack_name: nextcloud
stack_image: "nextcloud"

View file

@ -1,42 +0,0 @@
# ntfy
A self-hosted notifications service.
Can take messages sent to the server through simple POST requests on specific topics and
blasts them out to any subscribed receiver on Android, Web, Commandline, or even in other applications.
Thus can function as a simple cross-platform push message service that fits very well into unix workflows.
## Defaults
```
ntfy_upstream_file_dir: "{{ docker_stack_files_dir }}/{{ stack_name }}"
```
The on-target directory where the proxy configuration file should be stashed.
```
ntfy_use_https: true
```
Whether the service should be reachable through http (port 80) or through https (port 443) and provision an https certificate.
Usually you will want this to stay `true`,
especially on the public facing web.
```
ntfy_version: latest
```
The docker image version to be used in stack creation.
```
subdomain_alias: push
```
If the deployed container should be served over a uri that is not the stack name.
By default, it will be set to `push.yourdomain.com` -
if this option is not set it will be served on `ntfy.yourdomain.com` instead.
The individual `ntfy` options to be changed are very well described on
[the ntfy documentation](https://ntfy.sh/docs/config/).
Together with the default variables for this role it should be easy to find a good pair of settings.

View file

@ -1,19 +0,0 @@
---
ntfy_version: latest
ntfy_upstream_file_dir: "{{ docker_stack_files_dir }}/{{ stack_name }}"
ntfy_use_https: true
subdomain_alias: push
ntfy_global_topic_limit: 15000
ntfy_visitor_subscription_limit: 30
ntfy_visitor_request_limit_burst: 60
ntfy_visitor_request_limit_replenish: "10s"
ntfy_cache_duration: "12h"
ntfy_attachment_total_size_limit: "5G"
ntfy_attachment_file_size_limit: "15M"
ntfy_attachment_expiry_duration: "5h"
ntfy_visitor_attachment_total_size_limit: "500M"
ntfy_visitor_attachment_daily_bandwidth_limit: "1G"

View file

@ -1,45 +0,0 @@
## Register reverse proxy
- name: Ensure upstream directory exists
ansible.builtin.file:
path: "{{ ntfy_upstream_file_dir }}"
state: directory
mode: "0755"
become: true
listen: "update ntfy upstream"
- name: Update upstream template
ansible.builtin.template:
src: upstream.json.j2
dest: "{{ ntfy_upstream_file_dir }}/upstream.json"
become: true
listen: "update ntfy upstream"
# figure out if upstream id exists
- name: check {{ stack_name }} upstream
community.docker.docker_container_exec:
container: "{{ caddy_container_id }}"
command: >
curl localhost:2019/id/{{ stack_name }}_upstream/
changed_when: False
register: result
become: true
listen: "update ntfy upstream"
# upstream already exists, patch it
- name: remove old {{ stack_name }} upstream
community.docker.docker_container_exec:
container: "{{ caddy_container_id }}"
command: >
curl -X DELETE localhost:2019/id/{{ stack_name }}_upstream/
become: true
when: (result.stdout | from_json)['error'] is not defined
listen: "update ntfy upstream"
# upstream has to be created
- name: add {{ stack_name }} upstream
community.docker.docker_container_exec:
container: "{{ caddy_container_id }}"
command: >
curl -X POST -H "Content-Type: application/json" -d @{{ ntfy_upstream_file_dir }}/upstream.json localhost:2019/config/apps/http/servers/{{ (ntfy_use_https == True) | ternary(caddy_https_server_name, caddy_http_server_name) }}/routes/0/
become: true
listen: "update ntfy upstream"

View file

@ -1,11 +0,0 @@
---
galaxy_info:
author: Marty Oehme
description: Installs a self-hosted push notification service through docker-swarm.
license: GPL-3.0-only
min_ansible_version: "2.9"
galaxy_tags: []
dependencies:
- docker-swarm
- caddy_id

View file

@ -1,37 +0,0 @@
---
- name: Ensure target directory exists
ansible.builtin.file:
path: "{{ ntfy_upstream_file_dir }}"
state: directory
mode: "0755"
become: true
- name: Move ntfy configuration file to target dir
ansible.builtin.template:
src: "server.yml.j2"
dest: "{{ ntfy_upstream_file_dir }}/server.yml"
become: true
notify: "update ntfy upstream"
## install ntfy container
- name: Check upstream status
community.docker.docker_container_exec:
container: "{{ caddy_container_id }}"
command: >
curl localhost:2019/id/{{ stack_name }}_upstream/
register: result
changed_when: (result.stdout | from_json) != (lookup('template', 'upstream.json.j2') | from_yaml)
become: true
notify: "update ntfy upstream"
- name: Deploy ntfy to swarm
community.general.docker_stack:
name: "{{ stack_name }}"
state: present
prune: yes
compose:
- "{{ stack_compose }}"
become: true
tags:
- docker-swarm
notify: "update ntfy upstream"

View file

@ -1,27 +0,0 @@
version: '3.4'
services:
app:
image: "{{ stack_image }}:{{ ntfy_version }}"
healthcheck:
test: ["CMD", "wget", "--spider", "-q", "127.0.0.1"]
interval: 1m
timeout: 10s
retries: 3
start_period: 1m
volumes:
- "{{ ntfy_upstream_file_dir }}/server.yml:/etc/ntfy/server.yml"
- cache:/var/cache/ntfy
networks:
- "{{ docker_swarm_public_network_name }}"
command:
- serve
volumes:
cache:
networks:
"{{ docker_swarm_public_network_name }}":
external: true

View file

@ -1,15 +0,0 @@
base-url: "https://{{ server_domain }}"
global_topic_limit: {{ ntfy_global_topic_limit }}
visitor_subscription_limit: {{ ntfy_visitor_subscription_limit }}
visitor_request_limit_burst: {{ ntfy_visitor_request_limit_burst }}
visitor_request_limit_replenish: "{{ ntfy_visitor_request_limit_replenish }}"
cache-file: "/var/cache/ntfy/cache.db"
cache_duration: "{{ ntfy_cache_duration }}"
attachment-cache-dir: "/var/cache/ntfy/attachments"
attachment_total_size_limit: "{{ ntfy_attachment_total_size_limit }}"
attachment_file_size_limit: "{{ ntfy_attachment_file_size_limit }}"
attachment_expiry_duration: "{{ ntfy_attachment_expiry_duration }}"
visitor_attachment_total_size_limit: "{{ ntfy_visitor_attachment_total_size_limit }}"
visitor_attachment_daily_bandwidth_limit: "{{ ntfy_visitor_attachment_daily_bandwidth_limit }}"
behind-proxy: true # uses 'X-Forwarded-For' Headers for individual visitors
# TODO i believe Caddy does not set the correct X-Forwarded-For header, see whoami container to check

View file

@ -1,49 +0,0 @@
# restic
Backup maintenance stack.
Takes care of regularly pruning the backup repository and checking its integrity.
Currently only supports S3 as a backend.
## Defaults
```yaml
restic_timezone: US/Chicago
```
The timezone to be used for the cronjob.
```yaml
restic_version: latest
```
The docker image version to be used in stack creation.
```yaml
restic_repo: s3.eu-central-1.wasabisys.com/myrepo
restic_pass: <restic-pass>
```
The repository url and the restic repository password.
See the restic documentation for more information.
```yaml
restic_s3_key: <s3-key>
restic_s3_secret: <s3-secret>
```
The restic S3 credentials, i.e. the `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY`.
```yaml
restic_prune_cron: 0 0 4 * * *
restic_forget_args: --prune --keep-last 14 --keep-daily 2 --keep-weekly 2
```
The default prune and forget cronjob schedule and arguments: Prune the repository every day at 4:00 AM and keep the last 14 snapshots, 2 daily snapshots and 2 weekly snapshots.
```yaml
restic_check_cron: 0 15 5 * * *
restic_check_args: --read-data-subset=5%
```
The default check cronjob schedule and arguments: Check the repository integrity every day at 5:15 AM and in addition to structural checks, read 5 randomly chosen % for a data integrity check.

View file

@ -1,14 +0,0 @@
---
restic_version: latest
# restic_repo: s3.eu-central-1.wasabisys.com/myrepo
# restic_pass: <restic-pass>
# restic_s3_key: <s3-key>
# restic_s3_secret: <s3-secret>
restic_timezone: "{{ server_timezone | default('US/Chicago') }}"
restic_prune_cron: 0 0 4 * * *
restic_forget_args: --prune --keep-last 14 --keep-daily 2 --keep-weekly 2
restic_check_cron: 0 30 4 * * SUN
restic_check_args: --read-data-subset=15%

View file

@ -1,10 +0,0 @@
---
galaxy_info:
author: Marty Oehme
description: Installs a restic-based backup maintenance stack. Only supports S3 atm.
license: GPL-3.0-only
min_ansible_version: "2.9"
galaxy_tags: []
dependencies:
- docker-swarm

View file

@ -1,11 +0,0 @@
---
- name: Deploy restic to swarm
community.general.docker_stack:
name: "{{ stack_name }}"
state: present
prune: yes
compose:
- "{{ stack_compose }}"
become: true
tags:
- docker-swarm

View file

@ -1,30 +0,0 @@
services:
prune:
image: "{{ stack_image }}:{{ restic_version }}"
hostname: docker
environment:
- "TZ={{ restic_timezone }}"
- "SKIP_INIT=true"
- "RUN_ON_STARTUP=true"
# go-cron starts w seconds
- "PRUNE_CRON={{ restic_prune_cron }}"
- "RESTIC_FORGET_ARGS={{ restic_forget_args }}"
- "RESTIC_REPOSITORY={{ restic_repo }}"
- "AWS_ACCESS_KEY_ID={{ restic_s3_key }}"
- "AWS_SECRET_ACCESS_KEY={{ restic_s3_secret }}"
- "RESTIC_PASSWORD={{ restic_pass }}"
check:
image: "{{ stack_image }}:{{ restic_version }}"
hostname: docker
environment:
- "TZ={{ restic_timezone }}"
- "SKIP_INIT=true"
- "RUN_ON_STARTUP=false"
# go-cron starts w seconds
- "CHECK_CRON={{ restic_check_cron }}"
- "RESTIC_CHECK_ARGS={{ restic_check_args }}"
- "RESTIC_REPOSITORY={{ restic_repo }}"
- "AWS_ACCESS_KEY_ID={{ restic_s3_key }}"
- "AWS_SECRET_ACCESS_KEY={{ restic_s3_secret }}"
- "RESTIC_PASSWORD={{ restic_pass }}"

Some files were not shown because too many files have changed in this diff Show more