Compare commits

...

24 commits

Author SHA1 Message Date
ff718b46ef
arr: Rename data dir to serve_dir 2025-07-16 22:11:46 +02:00
b8678bafaa
Merge paperless role 2025-07-16 22:11:45 +02:00
ce86cdc2c0
paperless: Add to main playbook 2025-07-16 22:11:44 +02:00
8391a54bda
paperless: Remove redundant docker compose lines 2025-07-16 22:11:44 +02:00
c0b5b04d3d
paperless: Add caddy labels 2025-07-16 22:11:43 +02:00
1b147c6a78
paperless: Set up directories 2025-07-16 22:11:43 +02:00
70ddb4df6a
paperless: Update secrets 2025-07-16 22:11:42 +02:00
dc8ec72581
paperless: Add role 2025-07-16 22:11:42 +02:00
73368d4c82
arr: Fix arrstack default timezone 2025-07-16 22:11:41 +02:00
5f09ce7099
Move arr stack to ansible 2025-07-16 22:11:40 +02:00
6374fa8eff
Keep sensitive vars in vault 2025-07-16 22:11:40 +02:00
2045f4ae58
Make arrstack docker compose ansible ready
Move sonarr to new system

Move sabnzbd to new system

Move radarr

Move lidarr

!unsafe directive makes ansible ignore potential template vars in the
string

https://github.com/ansible/ansible/issues/16443

Move readarr

Move prowlarr

Move homarr

Move beets

Move jellyseerr

Move audiobookshelf

Move jellyfin

Move gonic

Migrate torrent setup
2025-07-16 22:11:39 +02:00
93ae62dc93
Move to incus connection 2025-07-16 22:11:39 +02:00
578f699cb7
Move ansible docker module python requirements to docker tasks 2025-07-16 22:11:38 +02:00
e6b6154043
Add caddy reverse proxy role
Acts as reverse proxy for the docker instance. Can be configured through
docker labels. Proxies anything that is received on port 80 or 443.
2025-07-16 22:11:38 +02:00
ec91e97fed
Remove orphans on stack deployment 2025-07-16 22:11:37 +02:00
d930094638
Install instance req for ansible docker tasks 2025-07-16 22:11:36 +02:00
b54d14c98e
Rename host groups to host/instance 2025-07-16 22:11:36 +02:00
71244751c7
Split arr role from playbook 2025-07-16 22:11:35 +02:00
eaaa35de25
Split incus role from playbook 2025-07-16 22:11:35 +02:00
0f8822e632 Set utility scan script to color mode by default 2025-07-16 22:11:35 +02:00
5c6314dc73 Move arr yaml to arr subdirectory 2025-07-16 22:11:35 +02:00
122abbe723
Git ignore vaultpass 2025-07-16 22:11:34 +02:00
9e94dfbc52 Add incus server prep ansible stack 2025-04-02 20:52:29 +02:00
37 changed files with 1381 additions and 1 deletions

1
ansible/.gitignore vendored Normal file
View file

@ -0,0 +1 @@
vaultpass

5
ansible/ansible.cfg Normal file
View file

@ -0,0 +1,5 @@
[defaults]
remote_tmp = /tmp
inventory = inventory
vault_password_file = vaultpass

View file

@ -0,0 +1,4 @@
arrstack_tz: Europe/Berlin
stack_paperless_tz: Europe/Berlin
stack_paperless_ocr_language: deu+eng
stack_paperless_ocr_languages: eng deu frk

View file

@ -0,0 +1,33 @@
$ANSIBLE_VAULT;1.1;AES256
61653738666234666537383737636631323931396632353236366639323833363134313066613962
3730306266353834303135653939323561666131333539380a333039663466393061343736653935
38613532346132303730363835373165326361373561666565316332623964316265306131663332
3832373861666138330a646631346262666233383938366234373335383064353662326461656430
35626334356566613731333639663130306133353439666234636137636265313662393730626232
37633430663438666461353735656461623165316134346130663963356464656436313966613739
35373763633936336565666261353631366531323430663461376338373265393235313932363733
66323634656539303732303338376264353935316538373465633462386466383662626534623365
38363362653535386130366462393365393731313034316431373039393331396231663365616435
31373632633264383266623733613530333134643730396336373461633739383436623736613638
39303163373234313534323562643836636337323266366538323932656563376163376162306165
31356165343739373237363734393230356236363339653564326537313336663036313437646636
35313336396361353365323763643861656561633235383732376236333232363565376365386232
32353737373239396436316538323661303164373765343936643736663035323237663935643834
39333238666162363366633938333231313966613430303236636634316136373433323536613833
61386434636464633662383736323630326630313561356335306663333864643462366335643430
34396331633935656632323062613433386234386535613337313561353037356633376664333634
62303063636135343737323331623736303636643735613337633530666134333266323934376631
62386535323834323435373362333662613637383065346335373465343765663836626434666463
39623162313438346239393261613738376435636539623663646461396330326661343633353531
66643362326464376437643732366466633332323837323433343165313735653331346431656136
32376165666437373435646566373036313639316165353639626435333330626530373662333031
37656463333231356465383039336237626235613331376639656665303431353131336436653839
32383031376134643236333231323863633933376238613462623034663262316339353061343635
36336338323331343866643836666530616639313933306536343631303635383862363633373562
65663930306461316363376364663935663566393335396130626436316262643730393133363837
63343334613966343466646366303137663965633639653262316630626436346461643533393135
31303630616662623237313331356263613535323739616162393165336163346161303836653638
30386636663230613731383165333462363033333734326662373961383236386631643139373934
38353732393265656162633739623061333833353138303664633463643234396338643736353132
32313763656165643963373236346364636534333964383131373839616435356631386463646437
39336237356337383930

6
ansible/inventory Normal file
View file

@ -0,0 +1,6 @@
[host_system]
bob ansible_ssh_private_key_file=~/.ssh/keys/bob
[instance_system]
#ansibletest ansible_connection=docker
dockerbob ansible_connection=community.general.incus ansible_incus_remote=bob

49
ansible/playbook.yaml Normal file
View file

@ -0,0 +1,49 @@
---
- name: Prepare incus server host
hosts: host_system
tasks:
- name: Prepare system
ansible.builtin.import_role:
name: system
tags: system
# FIXME: Role needs much fixup before it can run
# - name: Prepare incus
# ansible.builtin.import_role:
# name: incus-install
# tags: incus
# ansible-galaxy install geerlingguy.docker
- name: Install docker
hosts: instance_system
tasks:
- name: Install docker and docker compose
ansible.builtin.import_role:
name: geerlingguy.docker
tags: docker
- name: Install docker python requirements
ansible.builtin.package:
name: "{{ item }}"
state: present
loop:
- python3-yaml # for docker compose_v2
- python3-requests # for docker network
- name: Prepare all docker hosted containers
hosts: instance_system
tasks:
- name: Set up Caddy stack
ansible.builtin.import_role:
name: caddy
tags: caddy
- name: Set up Arr stack
ansible.builtin.import_role:
name: arr
tags: arr
- name: Set up Paperless stack
ansible.builtin.import_role:
name: paperless
tags: paperless

View file

@ -0,0 +1,38 @@
Role Name
=========
A brief description of the role goes here.
Requirements
------------
Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required.
Role Variables
--------------
A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well.
Dependencies
------------
A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles.
Example Playbook
----------------
Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too:
- hosts: servers
roles:
- { role: username.rolename, x: 42 }
License
-------
BSD
Author Information
------------------
An optional section for the role authors to include contact information, or a website (HTML is not allowed).

View file

@ -0,0 +1,14 @@
---
arrstack_env_dir: /opt/arrstack
arrstack_serve_dir: /srv
arrstack_serve_dir_create: true
arrstack_puid: 1000
arrstack_pgid: 100
arrstack_tz: America/Chicago
arrstack_umask_set: 022
# arrstack_mb_user: Musicbrainz-user
# arrstack_mb_pass: Musicbrainz-password

View file

@ -0,0 +1,2 @@
---
# handlers file for arr

View file

@ -0,0 +1,26 @@
galaxy_info:
author: Marty Oehme
description: Deploying a full *arr stack
# If the issue tracker for your role is not on github, uncomment the
# next line and provide a value
# issue_tracker_url: http://example.com/issue/tracker
license: GPL-3.0-only
min_ansible_version: "2.1"
# If this a Container Enabled role, provide the minimum Ansible Container version.
# min_ansible_container_version:
galaxy_tags: []
# List tags for your role here, one per line. A tag is a keyword that describes
# and categorizes the role. Users find roles by searching for tags. Be sure to
# remove the '[]' above, if you add tags to this list.
#
# NOTE: A tag is limited to a single word comprised of alphanumeric characters.
# Maximum 20 tags per role.
dependencies: []
# List your role dependencies here, one per line. Be sure to remove the '[]' above,
# if you add dependencies to this list.

View file

@ -0,0 +1,43 @@
---
- name: Create Arr stack environment directory
ansible.builtin.file:
state: directory
path: "{{ arrstack_env_dir }}"
owner: root
group: root
mode: 0700
- name: Create Arr stack data directory
ansible.builtin.file:
state: directory
path: "{{ arrstack_data_dir }}/{{ item }}"
owner: "{{ arrstack_puid }}"
group: "{{ arrstack_pgid }}"
mode: 0770
when: arrstack_serve_dir_create
loop:
- ""
- files
- files/torrents
- files/usenet
- media
- media/tv
- media/movies
- media/music
- media/audiobooks
- name: Start the compose stack
community.docker.docker_compose_v2:
project_name: arrstack
# project_src: "{{ arrstack_env_dir }}"
definition: "{{ lookup('template', 'docker-compose.yaml.j2') | from_yaml }}"
remove_orphans: true
wait: true
wait_timeout: 60
# services:
# - transmission
# - flaresolverr
# - sonarr-hd
# - sonarr-4k
# - sonarr-anime
# - prowlarr

View file

@ -0,0 +1,171 @@
services:
sonarr:
container_name: sonarr
image: lscr.io/linuxserver/sonarr:latest
ports:
- 8989:8989
environment:
- PUID=${PUID}
- PGID=${PGID}
- UMASK_SET=022
- TZ=${TZ}
volumes:
- "./config/sonarr:/config"
- "CHANGE_TO_COMPOSE_DATA_PATH:/data"
restart: unless-stopped
radarr:
container_name: radarr
image: lscr.io/linuxserver/radarr:latest
ports:
- 7878:7878
environment:
- PUID=${PUID}
- PGID=${PGID}
- UMASK_SET=022
- TZ=${TZ}
volumes:
- "./config/radarr:/config"
- "CHANGE_TO_COMPOSE_DATA_PATH:/data"
restart: unless-stopped
lidarr:
container_name: lidarr
image: lscr.io/linuxserver/lidarr:latest
ports:
- 8686:8686
environment:
- PUID=${PUID}
- PGID=${PGID}
- UMASK_SET=022
- TZ=${TZ}
- DOCKER_MODS=linuxserver/mods:universal-docker
volumes:
- "./config/lidarr:/config"
- "CHANGE_TO_COMPOSE_DATA_PATH:/data"
- "/var/run/docker.sock:/var/run/docker.sock:ro"
restart: unless-stopped
readarr:
container_name: readarr
image: lscr.io/linuxserver/readarr:develop
ports:
- 8787:8787
environment:
- PUID=${PUID}
- PGID=${PGID}
- UMASK_SET=022
- TZ=${TZ}
volumes:
- "./config/readarr:/config"
- "CHANGE_TO_COMPOSE_DATA_PATH:/data"
restart: unless-stopped
prowlarr:
container_name: prowlarr
image: lscr.io/linuxserver/prowlarr:develop
environment:
- PUID=${PUID}
- PGID=${PGID}
- UMASK_SET=022
- TZ=${TZ}
volumes:
- "./config/prowlarr:/config"
ports:
- 9696:9696
restart: unless-stopped
sabnzbd:
container_name: sabnzbd
image: lscr.io/linuxserver/sabnzbd:latest
environment:
- PUID=${PUID}
- PGID=${PGID}
- TZ=${TZ}
volumes:
- "./config/sabnzbd:/config"
- "CHANGE_TO_COMPOSE_DATA_PATH/usenet:/data/usenet:rw"
ports:
- 8080:8080
restart: unless-stopped
pia-qbittorrent:
image: j4ym0/pia-qbittorrent
container_name: pia-qbittorrent
cap_add:
- NET_ADMIN
environment:
- UID=${PUID}
- GID=${PGID}
- TZ=${TZ}
- REGION=Netherlands
- USER=${PIA_USER}
- PASSWORD=${PIA_PASS}
volumes:
- "./config/piaqbit:/config"
- "CHANGE_TO_COMPOSE_DATA_PATH/torrent:/downloads:rw"
ports:
- "8888:8888"
restart: unless-stopped
jellyfin:
image: lscr.io/linuxserver/jellyfin:latest
container_name: jellyfin
environment:
- PUID={$PUID}
- PGID={$PGID}
- TZ=${TZ}
#- JELLYFIN_PublishedServerUrl=192.168.0.5 #optional
volumes:
- ".config/jellyfin:/config"
- "CHANGE_TO_COMPOSE_DATA_PATH/media:/data"
ports:
- 8096:8096
- 7359:7359/udp #optional - network discovery
- 1900:1900/udp #optional - dlna discovery
restart: unless-stopped
audiobookshelf:
container_name: audiobookshelf
image: ghcr.io/advplyr/audiobookshelf:latest
environment:
- PUID=${PUID}
- PGID=${PGID}
- UMASK_SET=022
- TZ=${TZ}
ports:
- 13378:80
volumes:
- "CHANGE_TO_COMPOSE_DATA_PATH/media/audio/books:/audiobooks"
- "CHANGE_TO_COMPOSE_DATA_PATH/media/audio/podcasts:/podcasts"
- ".config/audiobookshelf:/config"
- ".metadata/audiobookshelf:/metadata"
restart: unless-stopped
jellyseerr:
image: fallenbagel/jellyseerr:latest
container_name: jellyseerr
environment:
- TZ=${TZ}
ports:
- 5055:5055
volumes:
- "./config/jellyseerr:/app/config"
restart: unless-stopped
beets:
image: lscr.io/linuxserver/beets:latest
container_name: beets
environment:
- PUID=${PUID}
- PGID=${PGID}
- TZ=${TZ}
volumes:
- "./config/beets:/config"
- "CHANGE_TO_COMPOSE_DATA_PATH/media/audio/music:/music"
- "CHANGE_TO_COMPOSE_DATA_PATH/media/audio/music-unsorted:/downloads"
- "CHANGE_TO_COMPOSE_DATA_PATH:/data"
ports:
- 8337:8337
restart: unless-stopped
homarr:
image: ghcr.io/ajnart/homarr:latest
container_name: homarr
volumes:
- /var/run/docker.sock:/var/run/docker.sock # Optional, only if you want docker integration
- ./config/homarr/configs:/app/data/configs
- ./config/homarr/icons:/app/public/icons
- ./config/homarr/data:/data
ports:
- '80:7575'
restart: unless-stopped

View file

@ -0,0 +1,337 @@
services:
sonarr:
container_name: sonarr
image: lscr.io/linuxserver/sonarr:latest
networks:
- caddy
environment:
- PUID={{ arrstack_puid }}
- PGID={{ arrstack_pgid }}
- TZ={{ arrstack_tz }}
- UMASK_SET={{ arrstack_umask_set }}
volumes:
- "{{ arrstack_env_dir }}/config/sonarr:/config"
- "{{ arrstack_serve_dir }}/media/tv:/data/media/tv"
- "{{ arrstack_serve_dir }}/files/usenet:/data/usenet"
- "{{ arrstack_serve_dir }}/files/torrent:/data/torrent"
restart: unless-stopped
labels:
caddy: "http://sonarr.pichi.berlin"
caddy.reverse_proxy: "{{ '{{' }}upstreams 8989{{ '}}'}}"
radarr:
container_name: radarr
image: lscr.io/linuxserver/radarr:latest
networks:
- caddy
environment:
- PUID={{ arrstack_puid }}
- PGID={{ arrstack_pgid }}
- TZ={{ arrstack_tz }}
- UMASK_SET={{ arrstack_umask_set }}
volumes:
- "{{ arrstack_env_dir }}/config/radarr:/config"
- "/mnt/ext/data/media/movies:/data/media/movies" # FIXME: Find solution
- "{{ arrstack_serve_dir }}/files/usenet:/data/usenet"
- "{{ arrstack_serve_dir }}/files/torrent:/data/torrent"
restart: unless-stopped
labels:
caddy: "http://radarr.pichi.berlin"
caddy.reverse_proxy: "{{ '{{' }}upstreams 7878{{ '}}'}}"
lidarr:
container_name: lidarr
image: lscr.io/linuxserver/lidarr:latest
networks:
- caddy
environment:
- PUID={{ arrstack_puid }}
- PGID={{ arrstack_pgid }}
- TZ={{ arrstack_tz }}
- UMASK_SET={{ arrstack_umask_set }}
- MB_USER={{ arrstack_mb_user }}
- MB_PASS={{ arrstack_mb_pass }}
environment:
- DOCKER_MODS=linuxserver/mods:universal-docker
volumes:
- "{{ arrstack_env_dir }}/config/lidarr:/config"
- "/var/run/docker.sock:/var/run/docker.sock:ro"
- "{{ arrstack_serve_dir }}/media/music:/data/media/music"
- "{{ arrstack_serve_dir }}/files/usenet:/data/usenet"
- "{{ arrstack_serve_dir }}/files/torrent:/data/torrent"
restart: unless-stopped
labels:
caddy: "http://lidarr.pichi.berlin"
caddy.reverse_proxy: "{{ '{{' }}upstreams 8686{{ '}}'}}"
readarr:
container_name: readarr
image: lscr.io/linuxserver/readarr:develop
networks:
- caddy
environment:
- PUID={{ arrstack_puid }}
- PGID={{ arrstack_pgid }}
- TZ={{ arrstack_tz }}
- UMASK_SET={{ arrstack_umask_set }}
volumes:
- "{{ arrstack_env_dir }}/config/readarr:/config"
- "{{ arrstack_serve_dir }}/media/audiobooks:/data/media/audiobooks"
- "{{ arrstack_serve_dir }}/files/usenet:/data/usenet"
- "{{ arrstack_serve_dir }}/files/torrent:/data/torrent"
restart: unless-stopped
labels:
caddy: "http://readarr.pichi.berlin"
caddy.reverse_proxy: "{{ '{{' }}upstreams 8787{{ '}}'}}"
prowlarr:
container_name: prowlarr
image: lscr.io/linuxserver/prowlarr:develop
networks:
- caddy
environment:
- PUID={{ arrstack_puid }}
- PGID={{ arrstack_pgid }}
- TZ={{ arrstack_tz }}
- UMASK_SET={{ arrstack_umask_set }}
volumes:
- "{{ arrstack_env_dir }}/config/prowlarr:/config"
restart: unless-stopped
labels:
caddy: "http://prowlarr.pichi.berlin"
caddy.reverse_proxy: "{{ '{{' }}upstreams 9696{{ '}}'}}"
beets:
image: lscr.io/linuxserver/beets:latest
container_name: beets
networks:
- caddy
environment:
- PUID={{ arrstack_puid }}
- PGID={{ arrstack_pgid }}
- TZ={{ arrstack_tz }}
- UMASK_SET={{ arrstack_umask_set }}
- MB_USER={{ arrstack_mb_user }}
- MB_PASS={{ arrstack_mb_pass }}
volumes:
- "{{ arrstack_env_dir }}/config/beets:/config"
- "{{ arrstack_serve_dir }}/media/music:/music"
- "{{ arrstack_serve_dir }}/files/music-unsorted:/downloads"
restart: unless-stopped
labels:
caddy: "http://prowlarr.pichi.berlin"
caddy.reverse_proxy: "{{ '{{' }}upstreams 8337{{ '}}'}}"
sabnzbd:
container_name: sabnzbd
image: lscr.io/linuxserver/sabnzbd:latest
networks:
- caddy
environment:
- PUID={{ arrstack_puid }}
- PGID={{ arrstack_pgid }}
- TZ={{ arrstack_tz }}
- UMASK_SET={{ arrstack_umask_set }}
volumes:
- "{{ arrstack_env_dir }}/config/sabnzbd:/config"
- "{{ arrstack_serve_dir }}/files/usenet:/data/usenet:rw"
restart: unless-stopped
labels:
caddy: "http://usenet.pichi.berlin"
caddy.reverse_proxy: "{{ '{{' }}upstreams 8080{{ '}}'}}"
vpn:
container_name: vpn
image: qmcgaw/gluetun:v3
networks:
- caddy
environment:
- PUID={{ arrstack_puid }}
- PGID={{ arrstack_pgid }}
- TZ={{ arrstack_tz }}
- UMASK_SET={{ arrstack_umask_set }}
- VPN_PORT_FORWARDING_STATUS_FILE=/gluetun/forwarded_port
- FIREWALL_OUTBOUND_SUBNETS=172.18.0.0/24
- BLOCK_SURVEILLANCE=on
- VPN_SERVICE_PROVIDER={{ arrstack_vpn_provider }}
- OPENVPN_USER={{ arrstack_vpn_user }}
- OPENVPN_PASSWORD={{ arrstack_vpn_pass }}
- SERVER_REGIONS={{ arrstack_vpn_regions }}
- PORT_FORWARD_ONLY=true
- VPN_PORT_FORWARDING=on
- VPN_PORT_FORWARDING_PROVIDER={{ arrstack_vpn_provider }}
- QBITTORRENT_USER={{ arrstack_qbit_user }}
- QBITTORRENT_PASS={{ arrstack_qbit_pass }}
cap_add:
- NET_ADMIN
devices:
- /dev/net/tun:/dev/net/tun
volumes:
- "{{ arrstack_env_dir }}/config/gluetun:/gluetun"
#ports: # TODO: should this be exposed?
# - 8000:8000 # gluetun http control
restart: unless-stopped
labels:
caddy: "http://torrent.pichi.berlin"
caddy.reverse_proxy: "{{ '{{' }}upstreams 8888{{ '}}'}}"
qbittorrent:
image: linuxserver/qbittorrent
container_name: qbittorrent
environment:
- PUID={{ arrstack_puid }}
- PGID={{ arrstack_pgid }}
- TZ={{ arrstack_tz }}
- UMASK_SET={{ arrstack_umask_set }}
- WEBUI_PORT=8888
volumes:
- "{{ arrstack_env_dir }}/config/piaqbit:/config"
- "{{ arrstack_env_dir }}/config/gluetun:/gluetun"
- "{{ arrstack_serve_dir }}/files/torrent:/downloads"
depends_on:
- vpn
network_mode: "service:vpn"
restart: unless-stopped
gluetun-qbittorrent-port-manager:
image: patrickaclark/gluetun-qbittorrent-port-manager:latest
container_name: qbit-port-manager
environment:
- PUID={{ arrstack_puid }}
- PGID={{ arrstack_pgid }}
- TZ={{ arrstack_tz }}
- UMASK_SET={{ arrstack_umask_set }}
- WEBUI_PORT=8888
- QBITTORRENT_SERVER=qbittorrent # IP Address of qbittorrent
- QBITTORRENT_PORT=8888
- PORT_FORWARDED=/gluetun/forwarded_port
- HTTP_S=http # Select 'http' or 'https' depending on if you use certificates.
- GLUETUN_HOST=vpn # IP or FQDN of gluetun control server
- GLUETUN_PORT=8000 # port of gluetun control server
- RECHECK_TIME=60 # number of seconds between checks to gluetun server for port
- VPN_SERVICE_PROVIDER={{ arrstack_vpn_provider }}
- OPENVPN_USER={{ arrstack_vpn_user }}
- OPENVPN_PASSWORD={{ arrstack_vpn_pass }}
- SERVER_REGIONS={{ arrstack_vpn_regions }}
- PORT_FORWARD_ONLY=true
- VPN_PORT_FORWARDING=on
- VPN_PORT_FORWARDING_PROVIDER={{ arrstack_vpn_provider }}
- QBITTORRENT_USER={{ arrstack_qbit_user }}
- QBITTORRENT_PASS={{ arrstack_qbit_pass }}
volumes:
- "{{ arrstack_env_dir }}/config/gluetun:/gluetun"
depends_on:
- vpn
network_mode: "service:vpn"
restart: unless-stopped
healthcheck:
test: ["CMD", "curl", "-H", "Authorization: $controlServerAuthKey", "-s", "http://localhost:8000/v1/openvpn/status", "|", "grep", "-q", '{"status":"running"}']
interval: 30s
timeout: 10s
start_period: 60s
retries: 3
homarr:
image: ghcr.io/ajnart/homarr:latest
container_name: homarr
networks:
- caddy
volumes:
- {{ arrstack_env_dir }}/config/homarr/configs:/app/data/configs
- {{ arrstack_env_dir }}/config/homarr/icons:/app/public/icons
- {{ arrstack_env_dir }}/config/homarr/data:/data
- /var/run/docker.sock:/var/run/docker.sock # Optional, only if you want docker integration
restart: unless-stopped
labels:
caddy: "http://pichi.berlin"
caddy.reverse_proxy: "{{ '{{' }}upstreams 7575{{ '}}'}}"
jellyseerr:
image: fallenbagel/jellyseerr:latest
container_name: jellyseerr
networks:
- caddy
environment:
- PUID={{ arrstack_puid }}
- PGID={{ arrstack_pgid }}
- TZ={{ arrstack_tz }}
- UMASK_SET={{ arrstack_umask_set }}
volumes:
- "{{ arrstack_env_dir }}/config/jellyseerr:/app/config"
restart: unless-stopped
labels:
caddy: "http://get.pichi.berlin"
caddy.reverse_proxy: "{{ '{{' }}upstreams 5055{{ '}}'}}"
audiobookshelf:
container_name: audiobookshelf
image: ghcr.io/advplyr/audiobookshelf:latest
networks:
- caddy
environment:
- PUID={{ arrstack_puid }}
- PGID={{ arrstack_pgid }}
- TZ={{ arrstack_tz }}
- UMASK_SET={{ arrstack_umask_set }}
volumes:
- "{{ arrstack_env_dir }}/config/audiobookshelf:/config"
- "{{ arrstack_env_dir }}/data/audiobookshelf:/metadata"
- "{{ arrstack_serve_dir }}/media/audiobooks:/audiobooks"
# - "{{ arrstack_serve_dir }}/media/podcasts:/podcasts" # TODO: If integrating podcasts
restart: unless-stopped
labels:
caddy: "http://books.pichi.berlin"
caddy.reverse_proxy: "{{ '{{' }}upstreams 80{{ '}}'}}"
jellyfin:
image: lscr.io/linuxserver/jellyfin:latest
container_name: jellyfin
networks:
- caddy
environment:
- PUID={{ arrstack_puid }}
- PGID={{ arrstack_pgid }}
- TZ={{ arrstack_tz }}
- UMASK_SET={{ arrstack_umask_set }}
devices:
- /dev/dri:/dev/dri
#environment:
#- JELLYFIN_PublishedServerUrl=192.168.0.5 #optional
volumes:
- "{{ arrstack_env_dir }}/config/jellyfin:/config"
- "{{ arrstack_env_dir }}/data/jellyfin:/config/data"
- "/mnt/ext/data/media/movies:/media/movies" # FIXME: To be changed?
- "{{ arrstack_serve_dir }}/media/tv:/media/tv"
- "{{ arrstack_serve_dir }}/media/music:/media/music"
ports: # FIXME: how to enable discovery behind proxies?
- 7359:7359/udp #optional - network discovery
- 1900:1900/udp #optional - dlna discovery
restart: unless-stopped
labels:
caddy: "http://media.pichi.berlin"
caddy.reverse_proxy: "{{ '{{' }}upstreams 8096{{ '}}'}}"
gonic:
image: sentriz/gonic:latest
networks:
- caddy
environment:
- PUID={{ arrstack_puid }}
- PGID={{ arrstack_pgid }}
- TZ={{ arrstack_tz }}
- UMASK_SET={{ arrstack_umask_set }}
volumes:
- "{{ arrstack_env_dir }}/data/gonic:/data"
- "{{ arrstack_env_dir }}/data/gonic_playlists:/playlists"
- "/srv/media/music:/music:ro"
- "/srv/media/podcasts:/podcasts"
#- /path/to/cache:/cache # transcode / covers / etc cache dir
labels:
caddy: "http://music.pichi.berlin"
caddy.reverse_proxy: "{{ '{{' }}upstreams 80{{ '}}'}}"
networks:
caddy:
external: true
volumes:
caddy_data: {}

View file

@ -0,0 +1,2 @@
localhost

View file

@ -0,0 +1,5 @@
---
- hosts: localhost
remote_user: root
roles:
- arr

View file

@ -0,0 +1,2 @@
---
# vars file for arr

View file

@ -0,0 +1,11 @@
- name: Ensure caddy network exists
community.docker.docker_network:
name: caddy
- name: Start the compose stack
community.docker.docker_compose_v2:
project_name: caddy
definition: "{{ lookup('template', 'docker-compose.yaml.j2') | from_yaml }}"
remove_orphans: true
wait: true
wait_timeout: 60

View file

@ -0,0 +1,30 @@
services:
caddy:
image: lucaslorentz/caddy-docker-proxy:ci-alpine
ports:
- 80:80
- 443:443
networks:
- caddy
volumes:
- /var/run/docker.sock:/var/run/docker.sock
- caddy_data:/caddy
labels:
caddy.auto_https: "off"
whoami:
container_name: whoami
image: traefik/whoami
networks:
- caddy
labels:
caddy: "http://test.pichi.berlin"
caddy.reverse_proxy: "{{ '{{' }}upstreams 80{{ '}}'}}" # has to be done to prevent ansible templating
networks:
caddy:
external: true
volumes:
caddy_data: {}

View file

@ -0,0 +1,56 @@
---
- name: Incus - Add package repository (apt)
hosts: all
gather_facts: true
gather_subset:
- "distribution_release"
vars:
task_release: "{{ incus_release | default('stable') }}"
task_roles: "{{ incus_roles | default(['standalone', 'ui']) }}"
any_errors_fatal: true
tasks:
- name: Check if distribution is supported
ansible.builtin.meta: end_play
when: 'ansible_distribution not in ("Ubuntu", "Debian")'
- name: Create apt keyring path
ansible.builtin.file:
path: /etc/apt/keyrings/
mode: "0755"
state: directory
when: 'task_roles | length > 0 and task_release != "distro"'
- name: Add Zabbly repository key
ansible.builtin.copy:
src: files/zabbly-key.asc
dest: /etc/apt/keyrings/ansible-zabbly.asc
mode: "0644"
notify: Update apt
become: true
when: 'task_roles | length > 0 and task_release != "distro"'
- name: Get DPKG architecture
ansible.builtin.shell: dpkg --print-architecture
register: dpkg_architecture
changed_when: false
check_mode: false
when: 'task_roles | length > 0 and task_release != "distro"'
- name: Add Zabbly package source
ansible.builtin.template:
src: files/incus.sources.tpl
dest: /etc/apt/sources.list.d/ansible-zabbly-incus-{{ task_release }}.sources
notify: Update apt
become: true
when: 'task_roles|length > 0 and task_release != "distro"'
- name: Handle apt by flushing handlers
meta: flush_handlers
handlers:
- name: Update apt
ansible.builtin.apt:
force_apt_get: true
update_cache: true
cache_valid_time: 0
become: true

View file

@ -0,0 +1,220 @@
---
- name: Incus - Install packages and bootstrap
hosts: all
gather_facts: true
gather_subset:
- "default_ipv4"
- "default_ipv6"
- "distribution_release"
vars:
task_init: "{{ incus_init | default('{}') }}"
task_ip_address: "{{ incus_ip_address | default(ansible_default_ipv6['address'] | default(ansible_default_ipv4['address'])) }}"
task_name: "{{ incus_name | default('') }}"
task_roles: "{{ incus_roles | default(['ui', 'standalone']) }}"
task_ovn_northbound: "{{ lookup('template', '../files/ovn/ovn-central.servers.tpl') | from_yaml | map('regex_replace', '^(.*)$', 'ssl:[\\1]:6641') | join(',') }}"
task_servers: "{{ lookup('template', 'files/incus.servers.tpl') | from_yaml | sort }}"
any_errors_fatal: true
become: true
tasks:
- name: Install the Incus package (deb)
ansible.builtin.apt:
name:
- incus
install_recommends: no
state: present
register: install_deb
when: 'ansible_distribution in ("Debian", "Ubuntu") and task_roles | length > 0'
- name: Install the Incus package (rpm)
ansible.builtin.package:
name:
- incus
state: present
register: install_rpm
when: 'ansible_distribution == "CentOS" and task_roles | length > 0'
- name: Install the Incus UI package (deb)
ansible.builtin.apt:
name:
- incus-ui-canonical
install_recommends: no
state: present
when: 'ansible_distribution in ("Debian", "Ubuntu") and "ui" in task_roles'
# - name: Install btrfs tools
# ansible.builtin.package:
# name:
# - btrfs-progs
# state: present
# when: "task_roles | length > 0 and 'btrfs' in task_init['storage'] | dict2items | json_query('[].value.driver')"
#
# - name: Install ceph tools
# ansible.builtin.package:
# name:
# - ceph-common
# state: present
# when: "task_roles | length > 0 and 'ceph' in task_init['storage'] | dict2items | json_query('[].value.driver')"
#
# - name: Install LVM tools
# ansible.builtin.package:
# name:
# - lvm2
# state: present
# when: "task_roles | length > 0 and 'lvm' in task_init['storage'] | dict2items | json_query('[].value.driver')"
#
# - name: Install ZFS dependencies
# ansible.builtin.package:
# name:
# - zfs-dkms
# state: present
# when: "task_roles | length > 0 and 'zfs' in task_init['storage'] | dict2items | json_query('[].value.driver') and ansible_distribution == 'Debian'"
#
# - name: Install ZFS tools
# ansible.builtin.package:
# name:
# - zfsutils-linux
# state: present
# when: "task_roles | length > 0 and 'zfs' in task_init['storage'] | dict2items | json_query('[].value.driver')"
- name: Set uid allocation
ansible.builtin.shell:
cmd: "usermod root --add-subuids 10000000-1009999999"
when: '(install_deb.changed or install_rpm.changed) and ansible_distribution == "CentOS"'
- name: Set gid allocation
ansible.builtin.shell:
cmd: "usermod root --add-subgids 10000000-1009999999"
when: '(install_deb.changed or install_rpm.changed) and ansible_distribution == "CentOS"'
- name: Enable incus socket unit
ansible.builtin.systemd:
enabled: true
name: incus.socket
state: started
when: "install_deb.changed or install_rpm.changed"
- name: Enable incus service unit
ansible.builtin.systemd:
enabled: true
name: incus.service
state: started
when: "install_deb.changed or install_rpm.changed"
- name: Enable incus startup unit
ansible.builtin.systemd:
enabled: true
name: incus-startup.service
state: started
when: "install_deb.changed or install_rpm.changed"
- name: Set client listen address
ansible.builtin.shell:
cmd: "incus --force-local config set core.https_address {{ task_ip_address }}"
when: '(install_deb.changed or install_rpm.changed) and ("standalone" in task_roles or ("cluster" in task_roles and task_servers[0] == inventory_hostname))'
- name: Set cluster listen address
ansible.builtin.shell:
cmd: "incus --force-local config set cluster.https_address {{ task_ip_address }}"
when: '(install_deb.changed or install_rpm.changed) and "cluster" in task_roles and task_servers[0] == inventory_hostname'
# - name: Set OVN NorthBound database
# shell:
# cmd: "incus --force-local config set network.ovn.northbound_connection={{ task_ovn_northbound }} network.ovn.client_cert=\"{{ lookup('file', '../data/ovn/'+ovn_name+'/'+inventory_hostname+'.crt') }}\" network.ovn.client_key=\"{{ lookup('file', '../data/ovn/'+ovn_name+'/'+inventory_hostname+'.key') }}\" network.ovn.ca_cert=\"{{ lookup('file', '../data/ovn/'+ovn_name+'/ca.crt') }}\""
# notify: Restart Incus
# when: '(install_deb.changed or install_rpm.changed) and task_ovn_northbound and ("standalone" in task_roles or ("cluster" in task_roles and task_servers[0] == inventory_hostname))'
- name: Add networks
ansible.builtin.shell:
cmd: "incus network create {{ item.key }} --type={{ item.value.type }}{% for k in item.value.local_config | default([]) %} {{ k }}={{ item.value.local_config[k] }}{% endfor %}{% for k in item.value.config | default([]) %} {{ k }}={{ item.value.config[k] }}{% endfor %}"
loop: "{{ task_init['network'] | dict2items }}"
when: '(install_deb.changed or install_rpm.changed) and ("standalone" in task_roles or ("cluster" in task_roles and task_servers[0] == inventory_hostname))'
- name: Set network description
ansible.builtin.shell:
cmd: 'incus network set --property {{ item.key }} description="{{ item.value.description }}"'
loop: "{{ task_init['network'] | dict2items }}"
when: '(install_deb.changed or install_rpm.changed) and ("standalone" in task_roles or ("cluster" in task_roles and task_servers[0] == inventory_hostname)) and item.value.description | default(None)'
- name: Add storage pools
ansible.builtin.shell:
cmd: "incus storage create {{ item.key }} {{ item.value.driver }}{% for k in item.value.local_config | default([]) %} {{ k }}={{ item.value.local_config[k] }}{% endfor %}{% for k in item.value.config | default([]) %} {{ k }}={{ item.value.config[k] }}{% endfor %}"
loop: "{{ task_init['storage'] | dict2items }}"
when: '(install_deb.changed or install_rpm.changed) and ("standalone" in task_roles or ("cluster" in task_roles and task_servers[0] == inventory_hostname))'
- name: Set storage pool description
ansible.builtin.shell:
cmd: 'incus storage set --property {{ item.key }} description="{{ item.value.description }}"'
loop: "{{ task_init['storage'] | dict2items }}"
when: '(install_deb.changed or install_rpm.changed) and ("standalone" in task_roles or ("cluster" in task_roles and task_servers[0] == inventory_hostname)) and item.value.description | default(None)'
- name: Add storage pool to default profile
ansible.builtin.shell:
cmd: "incus profile device add default root disk path=/ pool={{ item }}"
loop: "{{ task_init['storage'] | dict2items | json_query('[?value.default].key') }}"
when: '(install_deb.changed or install_rpm.changed) and ("standalone" in task_roles or ("cluster" in task_roles and task_servers[0] == inventory_hostname))'
- name: Add network to default profile
ansible.builtin.shell:
cmd: "incus profile device add default eth0 nic network={{ item }} name=eth0"
loop: "{{ task_init['network'] | dict2items | json_query('[?value.default].key') }}"
when: '(install_deb.changed or install_rpm.changed) and ("standalone" in task_roles or ("cluster" in task_roles and task_servers[0] == inventory_hostname))'
- name: Bootstrap the cluster
ansible.builtin.shell:
cmd: "incus --force-local cluster enable {{ inventory_hostname }}"
when: '(install_deb.changed or install_rpm.changed) and "cluster" in task_roles and task_servers[0] == inventory_hostname'
- name: Create join tokens
delegate_to: "{{ task_servers[0] }}"
ansible.builtin.shell:
cmd: "incus --force-local --quiet cluster add {{ inventory_hostname }}"
register: cluster_add
when: '(install_deb.changed or install_rpm.changed) and "cluster" in task_roles and task_servers[0] != inventory_hostname'
- name: Wait 5s to avoid token use before valid
ansible.builtin.wait_for:
timeout: 5
delegate_to: localhost
when: "cluster_add.changed"
- name: Join the cluster
throttle: 1
ansible.builtin.shell:
cmd: "incus --force-local admin init --preseed"
stdin: |-
cluster:
enabled: true
cluster_address: "{{ task_ip_address }}"
cluster_token: "{{ cluster_add.stdout }}"
server_address: "{{ task_ip_address }}"
member_config: {% for pool in task_init.storage %}{% for key in task_init.storage[pool].local_config | default([]) %}
- entity: storage-pool
name: {{ pool }}
key: {{ key }}
value: {{ task_init.storage[pool].local_config[key] }}{% endfor %}{% endfor %}{% for network in task_init.network %}{% for key in task_init.network[network].local_config | default([]) %}
- entity: network
name: {{ network }}
key: {{ key }}
value: {{ task_init.network[network].local_config[key] }}{% endfor %}{% endfor %}
when: "cluster_add.changed"
- name: Apply additional configuration
ansible.builtin.shell:
cmd: 'incus config set {{ item.key }}="{{ item.value }}"'
loop: "{{ task_init['config'] | default({}) | dict2items }}"
when: '(install_deb.changed or install_rpm.changed) and ("standalone" in task_roles or ("cluster" in task_roles and task_servers[0] == inventory_hostname))'
- name: Load client certificates
ansible.builtin.shell:
cmd: 'incus config trust add-certificate --name "{{ item.key }}" --type={{ item.value.type | default(''client'') }} -'
stdin: "{{ item.value.certificate }}"
loop: "{{ task_init['clients'] | default({}) | dict2items }}"
when: '(install_deb.changed or install_rpm.changed) and ("standalone" in task_roles or ("cluster" in task_roles and task_servers[0] == inventory_hostname))'
handlers:
- name: Restart Incus
ansible.builtin.systemd:
name: incus.service
state: restarted

View file

@ -0,0 +1,7 @@
---
- name: "Add incus repository to system"
ansible.builtin.include_tasks: add-repo.yaml
# TODO: Should presumably be split
- name: "Install and bootstrap incus"
ansible.builtin.include_tasks: bootstrap.yaml

View file

@ -0,0 +1,38 @@
Role Name
=========
A brief description of the role goes here.
Requirements
------------
Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required.
Role Variables
--------------
A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well.
Dependencies
------------
A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles.
Example Playbook
----------------
Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too:
- hosts: servers
roles:
- { role: username.rolename, x: 42 }
License
-------
BSD
Author Information
------------------
An optional section for the role authors to include contact information, or a website (HTML is not allowed).

View file

@ -0,0 +1,23 @@
---
stack_paperless_env_dir: /opt/stack_paperless
stack_paperless_puid: 1000
stack_paperless_pgid: 100
stack_paperless_serve_dir: /srv
stack_paperless_serve_dir_create: true
stack_paperless_tz: America/Chicago
stack_paperless_ocr_language: eng # default OCR languages
stack_paperless_ocr_languages: eng deu frk # ALL installed languages
stack_paperless_ocr_skip_archive_file: with_text
stack_paperless_enable_update_check: true
# using !unsafe macro to stop ansible from interpolating as variables
stack_paperless_filename_format: !unsafe "{{created_year}}/{{correspondent}}/{{created}}_{{title}}"
stack_paperless_dbname: ppldbname
stack_paperless_dbuser: ppldbuser
stack_paperless_dbpass: ppldbpass
stack_paperless_secret_key: super-secret-random-key
stack_paperless_admin_user: AnAdminUserName
stack_paperless_admin_password: AnAdminPassword

View file

@ -0,0 +1,2 @@
---
# handlers file for paperless

View file

@ -0,0 +1,34 @@
galaxy_info:
author: your name
description: your role description
company: your company (optional)
# If the issue tracker for your role is not on github, uncomment the
# next line and provide a value
# issue_tracker_url: http://example.com/issue/tracker
# Choose a valid license ID from https://spdx.org - some suggested licenses:
# - BSD-3-Clause (default)
# - MIT
# - GPL-2.0-or-later
# - GPL-3.0-only
# - Apache-2.0
# - CC-BY-4.0
license: license (GPL-2.0-or-later, MIT, etc)
min_ansible_version: 2.1
# If this a Container Enabled role, provide the minimum Ansible Container version.
# min_ansible_container_version:
galaxy_tags: []
# List tags for your role here, one per line. A tag is a keyword that describes
# and categorizes the role. Users find roles by searching for tags. Be sure to
# remove the '[]' above, if you add tags to this list.
#
# NOTE: A tag is limited to a single word comprised of alphanumeric characters.
# Maximum 20 tags per role.
dependencies: []
# List your role dependencies here, one per line. Be sure to remove the '[]' above,
# if you add dependencies to this list.

View file

@ -0,0 +1,29 @@
---
- name: Create Paperless stack environment directory
ansible.builtin.file:
state: directory
path: "{{ stack_paperless_env_dir }}"
owner: root
group: root
mode: 0700
- name: Create user-facing data dir
ansible.builtin.file:
state: directory
path: "{{ stack_paperless_serve_dir }}/{{ item }}"
owner: "{{ stack_paperless_puid }}"
group: "{{ stack_paperless_pgid }}"
mode: 0770
when: stack_paperless_serve_dir_create
loop:
- ""
- documents
- consume
- name: Start compose stack
community.docker.docker_compose_v2:
project_name: paperless
definition: "{{ lookup('template', 'docker-compose.yaml.j2') | from_yaml }}"
remove_orphans: true
wait: true
wait_timeout: 60

View file

@ -0,0 +1,80 @@
services:
paperless:
container_name: paperless
image: ghcr.io/paperless-ngx/paperless-ngx:latest
restart: unless-stopped
networks:
- caddy
- backend
security_opt:
- no-new-privileges:true
depends_on:
- paperless-redis
- paperless-postgres
volumes:
- "{{ stack_paperless_env_dir }}/data/paperless:/usr/src/paperless/data" # container data
- "{{ stack_paperless_env_dir }}/data/paperless_export:/usr/src/paperless/export" # backup location
- "{{ stack_paperless_serve_dir }}/documents:/usr/src/paperless/media" # document location
- "{{ stack_paperless_serve_dir }}/consume:/usr/src/paperless/consume" # watch folder
environment:
- "PAPERLESS_TIME_ZONE={{ stack_paperless_tz }}"
- "USERMAP_UID={{ stack_paperless_puid }}"
- "USERMAP_GID={{ stack_paperless_pgid }}"
- "PAPERLESS_OCR_LANGUAGE={{ stack_paperless_ocr_language }}"
- "PAPERLESS_OCR_LANGUAGES={{ stack_paperless_ocr_languages }}"
- "PAPERLESS_OCR_SKIP_ARCHIVE_FILE={{ stack_paperless_ocr_skip_archive_file }}"
- "PAPERLESS_ENABLE_UPDATE_CHECK={{ stack_paperless_enable_update_check }}"
- "PAPERLESS_REDIS=redis://paperless-redis:6379"
- "PAPERLESS_DBHOST=paperless-postgres"
- "PAPERLESS_DBNAME={{ stack_paperless_dbname }}"
- "PAPERLESS_DBUSER={{ stack_paperless_dbuser }}"
- "PAPERLESS_DBPASS={{ stack_paperless_dbpass }}"
- "PAPERLESS_SECRET_KEY={{ stack_paperless_secret_key }}"
- "PAPERLESS_FILENAME_FORMAT={{ stack_paperless_filename_format }}"
- "PAPERLESS_ADMIN_USER={{ stack_paperless_admin_user }}"
- "PAPERLESS_ADMIN_PASSWORD={{ stack_paperless_admin_password }}"
labels:
caddy: "http://documents.pichi.berlin"
caddy.reverse_proxy: "{{ '{{' }}upstreams 8000{{ '}}'}}"
paperless-postgres:
container_name: paperless-postgres
image: postgres:16.0-alpine #fixedVersion
restart: unless-stopped
networks:
- backend
security_opt:
- no-new-privileges:true
volumes:
- "{{ stack_paperless_env_dir }}/data/postgres:/var/lib/postgresql/data"
environment:
POSTGRES_DB: "{{ stack_paperless_dbname }}"
POSTGRES_USER: "{{ stack_paperless_dbuser }}"
POSTGRES_PASSWORD: "{{ stack_paperless_dbpass }}"
paperless-redis:
container_name: paperless-redis
image: redis:7.2-alpine #fixedVersion
restart: unless-stopped
networks:
- backend
security_opt:
- no-new-privileges:true
volumes:
- "{{ stack_paperless_env_dir }}/data/redis:/data"
environment:
REDIS_ARGS: "--save 60 10"
networks:
caddy:
external: true
backend:
name: backend
driver: bridge
# secrets:
# paperless_db_paperless_passwd:
# file: ./secrets/paperless_db_paperless_passwd
# paperless_secret_key:
# file: ./secrets/paperless_secret_key
#

View file

@ -0,0 +1,2 @@
localhost

View file

@ -0,0 +1,5 @@
---
- hosts: localhost
remote_user: root
roles:
- paperless

View file

@ -0,0 +1,2 @@
---
# vars file for paperless

View file

@ -0,0 +1,5 @@
{% for host in vars['ansible_play_hosts'] | sort %}
{% if hostvars[host]['incus_name'] == task_name and "cluster" in hostvars[host]['incus_roles'] %}
- {{ host }}
{% endif %}
{% endfor %}

View file

@ -0,0 +1,8 @@
# Managed by Ansible, do not modify.
Enabled: yes
Types: deb
URIs: https://pkgs.zabbly.com/incus/{{ task_release }}/
Suites: {{ ansible_distribution_release }}
Components: main
Architectures: {{ dpkg_architecture.stdout }}
Signed-By: /etc/apt/keyrings/ansible-zabbly.asc

View file

@ -0,0 +1,41 @@
-----BEGIN PGP PUBLIC KEY BLOCK-----
mQGNBGTlYcIBDACYQoVXVyQ6Y3Of14GwEaiv/RstQ8jWnH441OtvDbD/VVT8yF0P
pUfypWjQS8aq0g32Qgb9H9+b8UAAKojA2W0szjJFlmmSq19YDMMmNC4AnfeZlKYM
61Zonna7fPaXmlsTlSiUeo/PGvmAXrkFURC9S8FbhZdWEcUpf9vcKAoEzV8qGA4J
xbKlj8EOjSkdq3OQ1hHjP8gynbbzMhZQwjbnWqoiPj35ed9EMn+0QcX+GmynGq6T
hBXdRdeQjZC6rmXzNF2opCyxqx3BJ0C7hUtpHegmeoH34wnJHCqGYkEKFAjlRLoW
tOzHY9J7OFvB6U7ENtnquj7lg2VQK+hti3uiHW+oide06QgjVw2irucCblQzphgo
iX5QJs7tgFFDsA9Ee0DZP6cu83hNFdDcXEZBc9MT5Iu0Ijvj7Oeym3DJpkCuIWgk
SeP56sp7333zrg73Ua7YZsZHRayAe/4YdNUua+90P4GD12TpTtJa4iRWRd7bis6m
tSkKRj7kxyTsxpEAEQEAAbQmWmFiYmx5IEtlcm5lbCBCdWlsZHMgPGluZm9AemFi
Ymx5LmNvbT6JAdQEEwEKAD4WIQRO/FkGlssVuHxzo62CzIeXyDjc/QUCZOVhwgIb
AwUJA8JnAAULCQgHAgYVCgkICwIEFgIDAQIeAQIXgAAKCRCCzIeXyDjc/W05C/4n
lGRTlyOETF2K8oWbjtan9wlttQ+pwymJCnP8T+JJDycGL8dPsGdG1ldHdorVZpFi
1P+Bem9bbiW73TpbX+WuCfP1g3WN7AVa2mYRfSVhsLNeBAMRgWgNW9JYsmg99lmY
aPsRYZdGu/PB+ffMIyWhjL3CKCbYS6lV5N5Mi4Lobyz/I1Euxpk2vJhhUqh786nJ
pQpDnvEl1CRANS6JD9bIvEdfatlAhFlrz1TTf6R7SlppyYI7tme4I/G3dnnHWYSG
cGRaLwpwobTq0UNSO71g7+at9eY8dh5nn2lZUvvxZvlbXoOoPxKUoeGVXqoq5F7S
QcMVAogYtyNlnLnsUfSPw6YFRaQ5o00h30bR3hk+YmJ47AJCRY9GIc/IEdSnd/Z5
Ea7CrP2Bo4zxPgcl8fe311FQRTRoWr19l5PXZgGjzy6siXTrYQi6GjLtqVB5SjJf
rrIIy1vZRyDL96WPu6fS+XQMpjsSygj+DBFk8OAvHhQhMCXHgT4BMyg4D5GE0665
AY0EZOVhwgEMAMIztf6WlRsweysb0tzktYE5E/GxIK1lwcD10Jzq3ovJJPa2Tg2t
J6ZBmMQfwU4OYO8lJxlgm7t6MYh41ZZaRhySCtbJiAXqK08LP9Gc1iWLRvKuMzli
NFSiFDFGT1D6kwucVfL/THxvZlQ559kK+LB4iXEKXz37r+MCX1K9uiv0wn63Vm0K
gD3HDgfXWYJcNyXXfJBe3/T5AhuSBOQcpa7Ow5n8zJ+OYg3FFKWHDBTSSZHpbJFr
ArMIGARz5/f+EVj9XGY4W/+ZJlxNh8FzrTLeRArmCWqKLPRG/KF36dTY7MDpOzlw
vu7frv+cgiXHZ2NfPrkH8oOl4L+ufze5KBGcN0QwFDcuwCkv/7Ft9Ta7gVaIBsK7
12oHInUJ6EkBovxpuaLlHlP8IfmZLZbbHzR2gR0e6IhLtrzd7urB+gXUtp6+wCL+
kWD14TTJhSQ+SFU8ajvUah7/1m2bxdjZNp9pzOPGkr/jEjCM0CpZiCY62SeIJqVc
4/ID9NYLAGmSIwARAQABiQG8BBgBCgAmFiEETvxZBpbLFbh8c6OtgsyHl8g43P0F
AmTlYcICGwwFCQPCZwAACgkQgsyHl8g43P0wEgv+LuknyXHpYpiUcJOl9Q5yLokd
o7tJwJ+9Fu7EDAfM7mPgyBj7Ad/v9RRP+JKWHqIYEjyrRnz9lmzciU+LT/CeoQu/
MgpU8wRI4gVtLkX2238amrTKKlVjQUUNHf7cITivUs/8e5W21JfwvcSzu5z4Mxyw
L6vMlBUAixtzZSXD6O7MO9uggHUZMt5gDSPXG2RcIgWm0Bd1yTHL7jZt67xBgZ4d
hUoelMN2XIDLv4SY78jbHAqVN6CLLtWrz0f5YdaeYj8OT6Ohr/iJQdlfVaiY4ikp
DzagLi0LvG9/GuB9eO6yLuojg45JEH8DC7NW5VbdUITxQe9NQ/j5kaRKTEq0fyZ+
qsrryTyvXghxK8oMUcI10l8d41qXDDPCA40kruuspCZSAle3zdqpYqiu6bglrgWr
Zr2Nm9ecm/kkqMIcyJ8e2mlkuufq5kVem0Oez+GIDegvwnK3HAqWQ9lzdWKvnLiE
gNkvg3bqIwZ/WoHBnSwOwwAzwarJl/gn8OG6CIeP
=8Uc6
-----END PGP PUBLIC KEY BLOCK-----

View file

@ -0,0 +1,11 @@
---
- name: Reboot host
ansible.builtin.reboot:
msg: "Reboot initiated by Ansible"
connect_timeout: 5
reboot_timeout: 600
pre_reboot_delay: 0
post_reboot_delay: 30
test_command: whoami
become: true
when: reboot_required_file.stat.exists

View file

@ -0,0 +1,38 @@
---
- name: Ensure aptitude installed
ansible.builtin.apt:
name: "aptitude"
state: present
tags:
- apt
become: true
- name: Ensure OS upgraded
ansible.builtin.apt:
upgrade: dist
tags:
- apt
- update
- os
become: true
- name: Check if reboot is necessary
register: reboot_required_file
ansible.builtin.stat:
path: /var/run/reboot-required
get_checksum: false
tags:
- os
- reboot
notify: Reboot host
- name: All system packages updated
ansible.builtin.apt:
name: "*"
state: latest # noqa package-latest
tags:
- apt
- update
- packages
become: true

View file

@ -5,7 +5,7 @@
# DEVICE="pixma:MG5400_BD2FD8000000"
DEVICE="airscan:w0:CANON INC. MG5400 series"
scanimage -d "$DEVICE" --mode Gray --batch --batch-prompt --format=png --resolution=300
scanimage -d "$DEVICE" --mode Color --batch --batch-prompt --format=png --resolution=300
# scanimage -d "$DEVICE" --mode Gray --batch --format=png --button-controlled=yes --resolution=300
# ensure correct order if we scan more than 9 pages