Compare commits

..

9 Commits

@ -60,7 +60,7 @@
"name": "plugins/modules/portainer_stack.py",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "8f8c5a3fcefa02becec7dad10442e92db1b40dccdd43e297baa273a03f260105",
"chksum_sha256": "15a5f2d3f4f641e5ffe92a52ec5ee8e39770d19c88426acb69e4f2fc7ea109d6",
"format": 1
},
{

@ -23,7 +23,7 @@
"name": "FILES.json",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "df6db8314d203f2365d1ff4253d6d793197067dced93c835a6b88074c18d4921",
"chksum_sha256": "1e0743f5c4caf0acd28dfbb7ca30d59971f3548a4a5da3d880cf327b497780dc",
"format": 1
},
"format": 1

@ -196,7 +196,7 @@ def _get_stack_contents(params):
return f.read()
if params.get("definition"):
return yaml.dump(params["definition"], indent=4)
return yaml.dump(params["definition"], indent=2)
raise ValueError("No docker_compose_file_path or definition provided.")

@ -12,24 +12,6 @@ users:
group: *main_user
passwordless_sudo: true
directories:
# path on qnap where downloads go
downloads_dir: /mnt/ssd0/downloads
# path on qnap where plex transcoding happens
transcoding_dir: /mnt/ssd0/transcoding
# path on qnap where movies are stored
movies_dir: /mnt/mergerfs/media/movies
# path on qnap where tv shows are stored
tv_dir: /mnt/mergerfs/media/tv
# path on qnap where docker compose files are stored
docker_compose_directory: /etc/docker-compose
# path where photoprism photos are stored
photoprism_dir: /mnt/mergerfs/photoprism
# path to youtube videos
youtube_dir: /mnt/mergerfs/media/youtube
# path to where paperless docs will be stored
documents_dir: /mnt/mergerfs/documents
desired_docker_images:
- ubuntu:latest
@ -37,3 +19,16 @@ portainer_endpoint: -1
portainer_base_url: "http://qnap:9000"
external_docker_networks: []
portainer_required_files: []
restart_policy: unless-stopped
backup_labels:
ie.cianhatton.backup.enabled: true
ie.cianhatton.backup.schedule: nightly
docker_backup_aws_s3_encrypt: false
docker_backup_host_backup_directory: "/tmp"
docker_backup_retain_count: 3
docker_backup_restore_latest_s3_key: true
docker_backup_fail_on_no_s3_backups: false
docker_backup_restore_force: false
portainer_user: admin

@ -52,7 +52,7 @@ docker_compose_services:
command: server --console-address ":9001" /data
volumes:
minio_storage: { }
minio_storage: {}
ansible_pull_path: /usr/local/bin/ansible-pull
@ -66,4 +66,4 @@ cron_hour: "4"
# - "tcp://0.0.0.0:2375"
# - "unix:///var/run/docker.sock"
docker_backup_host_backup_directory: "/tmp"
portainer_roles: []

@ -10,6 +10,24 @@ mounts:
- /mnt/data/device2
options: allow_other,use_ino
directories:
# path on qnap where downloads go
downloads_dir: /mnt/ssd0/downloads
# path on qnap where plex transcoding happens
transcoding_dir: /mnt/ssd0/transcoding
# path on qnap where movies are stored
movies_dir: /mnt/mergerfs/media/movies
# path on qnap where tv shows are stored
tv_dir: /mnt/mergerfs/media/tv
# path on qnap where docker compose files are stored
docker_compose_directory: /etc/docker-compose
# path where photoprism photos are stored
photoprism_dir: /mnt/mergerfs/photoprism
# path to youtube videos
youtube_dir: /mnt/mergerfs/media/youtube
# path to where paperless docs will be stored
documents_dir: /mnt/mergerfs/documents
# these directories will be backed up to s3.
backup_directories:
- path: /mnt/mergerfs/photoprism/originals
@ -23,10 +41,6 @@ backup_directories:
cron_hour: "5"
docker_backup_aws_s3_encrypt: false
docker_backup_host_backup_directory: "/tmp"
docker_backup_retain_count: 3
devices:
- uuid: a54c1bde-1400-4975-bf24-08c603ca3a11 # /dev/sdc1
path: /mnt/data/device0
@ -59,25 +73,6 @@ portainer_required_templates:
portainer_endpoint: 2
services:
- name: vpn-stack
template_vars:
vpn: protonwire # protonwire or surfshark
qbittorrent:
enabled: true
image: lscr.io/linuxserver/qbittorrent
tag: 4.5.3
radarr:
enabled: true
image: lscr.io/linuxserver/radarr
tag: 4.5.2
sonarr:
enabled: true
image: lscr.io/linuxserver/sonarr
tag: 3.0.10
jackett:
enabled: true
image: lscr.io/linuxserver/jackett
tag: 0.21.235
- name: gitea
template_vars:
image: gitea/gitea
@ -109,10 +104,18 @@ services:
image: photoprism/photoprism
tag: 230615
- name: olivetin
- name: pihole
- name: paperless
- name: gotify
- name: diun
# - name: ghost
pihole_volumes:
qnap_app: {}
qnap_dns: {}
portainer_roles:
- role: portainer_dashy
- role: portainer_dashdot
- role: portainer_glances
- role: portainer_arr
- role: portainer_gotify
- role: portainer_pihole

@ -1,9 +1,6 @@
---
vault_file: vault_vars/qnap-vault.yml
docker_backup_aws_s3_encrypt: false
docker_backup_host_backup_directory: "/tmp"
portainer_required_templates:
- source_file: diun-config.j2
dest_file_name: diun-config.yml
@ -12,7 +9,7 @@ portainer_required_templates:
portainer_endpoint: 23
services:
- name: pihole
# - name: pihole
- name: hasteypaste
- name: diun
@ -27,3 +24,18 @@ docker_daemon_options:
- "tcp://0.0.0.0:2375"
- "unix:///var/run/docker.sock"
# Note: this must be inline with the host name,
# this is a work around as it is not possible to template
# the name of the volume in the portainer task.
pihole_volumes:
snunmu_app: {}
snunmu_dns: {}
portainer_roles:
- role: portainer_bookstack
- role: portainer_vaultwarden
- role: portainer_dashy
- role: portainer_dashdot
- role: portainer_glances
- role: portainer_pihole

@ -2,6 +2,8 @@
- name: Update packages and ensure users on all hosts.
hosts: all
become: true
gather_facts: false
strategy: free
pre_tasks:
- name: Update Packages
ansible.builtin.apt:
@ -12,6 +14,7 @@
- name: Configure mergerfs pools.
hosts: mergerfs
strategy: free
become: true
roles:
- role: setup_mergerfs
@ -19,6 +22,7 @@
- name: Configure samba shares.
hosts: all
strategy: free
become: true
roles:
- role: geerlingguy.samba
@ -27,11 +31,13 @@
- name: Install Docker on Docker hosts.
hosts: docker
become: true
strategy: free
roles:
- role: geerlingguy.pip
- role: geerlingguy.docker
- name: Install Portainer on Portainer hosts.
gather_facts: false
hosts: portainer
become: true
pre_tasks:
@ -55,43 +61,30 @@
- role: setup_compose_services
tags: [compose]
- name: Setup and deploy portainer services (snunmu).
hosts: snunmu
- name: Setup and deploy portainer services.
gather_facts: false
hosts: servers
become: true
strategy: free
tags: [services]
pre_tasks:
- name: Include vault variables.
ansible.builtin.include_vars: '../{{ vault_file }}'
tags: [always]
roles:
- role: portainer_bookstack
- role: portainer_vaultwarden
- role: portainer_dashy
- role: portainer_dashdot
- role: portainer_glances
tasks:
- name: Setup and deploy portainer services.
include_role:
name: "{{ item.role }}"
with_items: "{{ portainer_roles }}"
- name: Setup and deploy portainer services (qnap).
hosts: qnap
- name: Setup and deploy templated portainer services.
hosts: servers
become: true
tags: [services]
pre_tasks:
- name: Include vault variables.
ansible.builtin.include_vars: '../{{ vault_file }}'
tags: [always]
roles:
- role: portainer_dashy
- role: portainer_dashdot
- role: portainer_glances
#
#- name: Setup and deploy templated portainer services.
# hosts: servers
# become: true
# pre_tasks:
# - name: Include vault variables.
# ansible.builtin.include_vars: '../{{ vault_file }}'
# tags: [always]
# roles:
# - role: setup_hosted_services
# tags: [services]
- role: setup_hosted_services
tags: [services]

@ -8,7 +8,7 @@ roles:
collections:
- name: https://github.com/chatton/ansible-portainer.git
type: git
version: support_definition
version: master
- name: https://github.com/chatton/ansible-docker-backup.git
type: git
version: master

@ -0,0 +1,38 @@
---
arr_portainer_stack_name: vpn-stack
arr_vpn_image: "ghcr.io/tprasadtp/protonwire"
arr_vpn_tag: "7.2.0"
# Qbittorrent
qbittorrent_image: lscr.io/linuxserver/qbittorrent
qbittorrent_tag: 4.5.3
qbittorrent_container_name: qbittorrent
qbittorrent_puid: 1000
qbittorrent_pgid: 1000
qbittorrent_exposed_web_ui_port: 15000
qbittorrent_exposed_download_port: 6881
# Radarr
radarr_image: lscr.io/linuxserver/radarr
radarr_tag: 4.5.2
radarr_container_name: radarr
radarr_exposed_port: 7878
radarr_puid: 1000
radarr_pgid: 1000
# Sonarr
sonarr_image: lscr.io/linuxserver/sonarr
sonarr_tag: 3.0.10
sonarr_exposed_port: 8989
sonarr_container_name: sonarr
sonarr_puid: 1000
sonarr_pgid: 1000
# Jackett
jackett_image: lscr.io/linuxserver/jackett
jackett_tag: 0.21.235
jackett_exposed_port: 9117
jackett_container_name: jackett
jackett_puid: 1000
jackett_pgid: 1000

@ -0,0 +1,137 @@
---
- name: "Arr | Restore any missing volumes from S3"
ansible.builtin.include_role:
name: chatton.docker_backup.docker_s3_volume_restore
vars:
docker_backup_s3_volume: "{{ item }}"
with_items:
- name: "{{ arr_portainer_stack_name }}_qbittorrent_config"
- name: "{{ arr_portainer_stack_name }}_radarr_config"
- name: "{{ arr_portainer_stack_name }}_sonarr_config"
- name: "{{ arr_portainer_stack_name }}_jackett_config"
- name: "Arr | Update Portainer."
chatton.portainer.portainer_stack:
username: '{{ portainer_user }}'
password: '{{ portainer.password }}'
base_url: '{{ portainer_base_url }}'
stack_name: '{{ arr_portainer_stack_name }}'
endpoint_id: '{{ portainer_endpoint }}'
state: present
definition:
version: "3.1"
services:
protonwire:
container_name: protonwire
image: "{{ arr_vpn_image }}:{{ arr_vpn_tag }}"
init: true
restart: unless-stopped
environment:
# Quote this value as server name can contain '#'.
PROTONVPN_SERVER: "{{ vpn.protonvpn_server }}"
# Set this to 1 to show debug logs for issue forms.
DEBUG: "1"
# Set this to 0 to disable kill-switch.
KILL_SWITCH: "0"
# follow instructions here https://github.com/tprasadtp/protonvpn-docker
WIREGUARD_PRIVATE_KEY: "{{ vpn.wireguard_private_key }}"
LAN_NETWORK: "{{ vpn.lan_network }}"
cap_add:
- NET_ADMIN
# sysctl net.ipv4.conf.all.rp_filter is mandatory!
# net.ipv6.conf.all.disable_ipv6 disables IPv6 as protonVPN does not support IPv6.
# 'net.*' sysctls are not required on application containers,
# as they share network stack with protonwire container.
sysctls:
net.ipv4.conf.all.rp_filter: 2
net.ipv6.conf.all.disable_ipv6: 1
volumes:
- type: tmpfs
target: /tmp
ports:
# qbittorrent
- "{{ qbittorrent_exposed_web_ui_port }}:15000"
- "{{ qbittorrent_exposed_download_port }}:6881"
- "{{ qbittorrent_exposed_download_port }}:6881/udp"
# radarr
- "{{ radarr_exposed_port }}:7878"
# sonarr
- "{{ sonarr_exposed_port }}:8989"
# jackett
- "{{ jackett_exposed_port }}:9117"
dns:
- 1.1.1.1
- 8.8.8.8
qbittorrent:
labels: "{{ backup_labels}}"
depends_on:
- protonwire
image: "{{ qbittorrent_image }}:{{ qbittorrent_tag }}"
container_name: "{{ qbittorrent_container_name }}"
network_mode: "service:protonwire"
environment:
- "PUID={{ qbittorrent_puid }}"
- "PGID={{ qbittorrent_pgid }}"
- TZ=Europe/London
- "WEBUI_PORT=15000"
volumes:
- qbittorrent_config:/config
- "{{ directories.downloads_dir }}:/downloads"
restart: unless-stopped
radarr:
labels: "{{ backup_labels}}"
depends_on:
- protonwire
image: "{{ radarr_image }}:{{ radarr_tag }}"
container_name: "{{ radarr_container_name }}"
network_mode: "service:protonwire"
environment:
- "PUID={{ radarr_puid }}"
- "PGID={{ radarr_pgid }}"
- TZ=Europe/London
volumes:
- radarr_config:/config
- "{{ directories.movies_dir }}:/movies"
- "{{ directories.downloads_dir }}:/downloads"
restart: unless-stopped
sonarr:
depends_on:
- protonwire
image: "{{ sonarr_image }}:{{ sonarr_tag }}"
labels: "{{ backup_labels}}"
container_name: "{{ sonarr_container_name }}"
network_mode: "service:protonwire"
environment:
- "PUID={{ sonarr_puid }}"
- "PGID={{ sonarr_pgid }}"
- TZ=Europe/London
volumes:
- sonarr_config:/config
- "{{ directories.tv_dir }}:/tv"
- "{{ directories.downloads_dir }}:/downloads"
restart: unless-stopped
jackett:
labels: "{{ backup_labels}}"
depends_on:
- protonwire
image: "{{ jackett_image }}:{{ jackett_tag }}"
container_name: "{{ jackett_container_name }}"
network_mode: "service:protonwire"
environment:
- "PUID={{ jackett_puid }}"
- "PGID={{ jackett_pgid }}"
- TZ=Europe/London
- AUTO_UPDATE=true
volumes:
- jackett_config:/config
- "{{ directories.downloads_dir }}:/downloads"
restart: unless-stopped
volumes:
qbittorrent_config: {}
radarr_config: {}
sonarr_config: {}
jackett_config: {}

@ -1,8 +1,7 @@
---
bookstack_state: present
bookstack_image: linuxserver/bookstack
bookstack_tag: 23.06.1
bookstack_backup_enabled: true
bookstack_backup_schedule: "nightly"
bookstack_puid: 1000
bookstack_pgid: 1000
bookstack_db_host: qnap
@ -10,9 +9,5 @@ bookstack_database: bookstackapp
bookstack_db_user: bookstack
bookstack_app_url: https://bookstack.cianhatton.ie
bookstack_expose_port: 6875
bookstack_restart_policy: unless-stopped
bookstack_container_name: bookstack
bookstack_portainer_stack_name: bookstack
bookstack_docker_backup_restore_force: false
bookstack_docker_backup_restore_latest_s3_key: true
bookstack_docker_backup_fail_on_no_s3_backups: false

@ -3,27 +3,22 @@
ansible.builtin.include_role:
name: chatton.docker_backup.docker_s3_volume_restore
vars:
docker_backup_restore_force: "{{ bookstack_docker_backup_restore_force }}"
docker_backup_restore_latest_s3_key: "{{ bookstack_docker_backup_restore_latest_s3_key }}"
docker_backup_fail_on_no_s3_backups: "{{ bookstack_docker_backup_fail_on_no_s3_backups }}"
docker_backup_s3_volume:
name: "{{ bookstack_portainer_stack_name }}_config"
- name: "Bookstack | Update Portainer."
chatton.portainer.portainer_stack:
username: admin
username: '{{ portainer_user }}'
password: '{{ portainer.password }}'
base_url: '{{ portainer_base_url }}'
stack_name: '{{ bookstack_portainer_stack_name }}'
endpoint_id: '{{ portainer_endpoint }}'
state: present
state: "{{ bookstack_state }}"
definition:
version: "3.1"
services:
bookstack:
labels:
ie.cianhatton.backup.enabled: "{{ bookstack_backup_enabled }}"
ie.cianhatton.backup.schedule: "{{ bookstack_backup_schedule }}"
labels: "{{ backup_labels }}"
image: "{{ bookstack_image }}:{{ bookstack_tag }}"
container_name: "{{ bookstack_container_name }}"
environment:
@ -38,6 +33,6 @@
- config:/config
ports:
- "{{ bookstack_expose_port }}:80"
restart: "{{ bookstack_restart_policy }}"
restart: "{{ restart_policy }}"
volumes:
config: {}

@ -1,7 +1,7 @@
---
dashdot_state: present
dashdot_image: mauricenino/dashdot
dashdot_tag: latest
dashdot_expose_port: 3010
dashdot_portainer_stack_name: dashdot
dashdot_container_name: dashdot
dashdot_restart_policy: unless-stopped

@ -1,18 +1,18 @@
---
- name: "Dashy | Update Portainer."
chatton.portainer.portainer_stack:
username: admin
username: '{{ portainer_user }}'
password: '{{ portainer.password }}'
base_url: '{{ portainer_base_url }}'
stack_name: '{{ dashdot_portainer_stack_name }}'
endpoint_id: '{{ portainer_endpoint }}'
state: present
state: "{{ dashdot_state }}"
definition:
services:
dashdot:
container_name: "{{ dashdot_container_name}}"
image: "{{ dashdot_image }}:{{ dashdot_tag }}"
restart: "{{ dashdot_restart_policy }}"
restart: "{{ restart_policy }}"
privileged: true
ports:
- '{{ dashdot_expose_port }}:3001'

@ -1,10 +1,10 @@
---
dashy_state: present
dashy_image: lissy93/dashy
dashy_tag: 2.1.1
dashy_expose_port: 4000
dashy_portainer_stack_name: dashy
dashy_container_name: dashy
dashy_restart_policy: unless-stopped
dashy_config_dir: /etc/config/dashy
dashy_config_file: dashy-config.yml

@ -16,12 +16,12 @@
- name: "Dashy | Update Portainer."
chatton.portainer.portainer_stack:
username: admin
username: '{{ portainer_user }}'
password: '{{ portainer.password }}'
base_url: '{{ portainer_base_url }}'
stack_name: '{{ dashy_portainer_stack_name }}'
endpoint_id: '{{ portainer_endpoint }}'
state: present
state: "{{ dashy_state }}"
definition:
version: '3'
services:
@ -36,7 +36,7 @@
- "NODE_ENV={{ dashy_node_env }}"
- "UID={{ dashy_uid }}"
- "GID={{ dashy_gid }}"
restart: "{{ dashy_restart_policy}}"
restart: "{{ restart_policy}}"
# Configure healthchecks
healthcheck:
test: ['CMD', 'node', '/app/services/healthcheck']

@ -1,8 +1,8 @@
---
glances_state: present
glances_image: nicolargo/glances
glances_tag: latest-alpine
glances_expose_port: 8083
glances_portainer_stack_name: glances
glances_container_name: glances
glances_restart_policy: unless-stopped
glances_pid: host

@ -1,17 +1,17 @@
---
- name: "Glances | Update Portainer."
chatton.portainer.portainer_stack:
username: admin
username: '{{ portainer_user }}'
password: '{{ portainer.password }}'
base_url: '{{ portainer_base_url }}'
stack_name: '{{ glances_portainer_stack_name }}'
endpoint_id: '{{ portainer_endpoint }}'
state: present
state: "{{ glances_state }}"
definition:
services:
glances:
image: "{{ glances_image }}:{{ glances_tag }}"
restart: "{{ glances_restart_policy }}"
restart: "{{ restart_policy }}"
pid: "{{ glances_pid }}"
ports:
- "{{ glances_expose_port}}:61208"

@ -0,0 +1,7 @@
---
gotify_state: present
gotify_image: gotify/server
gotify_tag: 2.2.5
gotify_expose_port: 7875
gotify_portainer_stack_name: gotify
gotify_container_name: gotify

@ -0,0 +1,30 @@
---
- name: "Gotify | Restore any missing volumes from S3"
ansible.builtin.include_role:
name: chatton.docker_backup.docker_s3_volume_restore
vars:
docker_backup_s3_volume:
name: "{{ gotify_portainer_stack_name }}_data"
- name: "Gotify | Update Portainer."
chatton.portainer.portainer_stack:
username: '{{ portainer_user }}'
password: '{{ portainer.password }}'
base_url: '{{ portainer_base_url }}'
stack_name: '{{ gotify_portainer_stack_name }}'
endpoint_id: '{{ portainer_endpoint }}'
state: "{{ gotify_state }}"
definition:
version: "3.1"
services:
gotify:
labels: "{{ backup_labels }}"
image: "{{ gotify_image}}:{{ gotify_tag }}"
container_name: "{{ gotify_container_name }}"
volumes:
- data:/app/data
ports:
- "{{ gotify_expose_port }}:80"
restart: "{{ restart_policy }}"
volumes:
data: {}

@ -1,22 +1,7 @@
---
linkding_state: present
linkding_image: sissbruecker/linkding
linkding_tag: latest
linkding_backup_enabled: true
linkding_backup_schedule: "nightly"
linkding_expose_port: 9090
linkding_portainer_stack_name: linkding
linkding_container_name: linkding
linkding_restart_policy: unless-stopped
# Environment variables
linkding_websocket_enabled: true
linkding_sends_allowed: true
linkding_emergency_access_allowed: true
linkding_domain: "https://vault.cianhatton.ie"
linkding_signups_allowed: false
linkding_webvault_enabled: true
# Backup variables
linkding_docker_backup_restore_force: false
linkding_docker_backup_restore_latest_s3_key: true
linkding_docker_backup_fail_on_no_s3_backups: false

@ -3,33 +3,28 @@
ansible.builtin.include_role:
name: chatton.docker_backup.docker_s3_volume_restore
vars:
docker_backup_restore_force: "{{ linkding_docker_backup_restore_force }}"
docker_backup_restore_latest_s3_key: "{{ linkding_docker_backup_restore_latest_s3_key }}"
docker_backup_fail_on_no_s3_backups: "{{ linkding_docker_backup_fail_on_no_s3_backups }}"
docker_backup_s3_volume:
name: "{{ linkding_portainer_stack_name }}_data"
- name: "Linkding | Update Portainer."
chatton.portainer.portainer_stack:
username: admin
username: '{{ portainer_user }}'
password: '{{ portainer.password }}'
base_url: '{{ portainer_base_url }}'
stack_name: '{{ linkding_portainer_stack_name }}'
endpoint_id: '{{ portainer_endpoint }}'
state: present
state: "{{ linkding_state }}"
definition:
version: '3'
services:
linkding:
labels:
ie.cianhatton.backup.enabled: "{{ linkding_backup_enabled }}"
ie.cianhatton.backup.schedule: "{{ linkding_backup_schedule }}"
labels: "{{ backup_labels }}"
container_name: "{{ linkding_container_name }}"
image: "{{ linkding_image }}:{{ linkding_tag }}"
ports:
- "{{ linkding_expose_port }}:9090"
volumes:
- "data:/etc/linkding/data"
restart: "{{ linkding_restart_policy }}"
restart: "{{ restart_policy }}"
volumes:
data: {}

@ -0,0 +1,8 @@
---
pihole_state: present
pihole_image: pihole/pihole
pihole_tag: 2022.09.2
pihole_portainer_stack_name: pihole
pihole_container_name: pihole
# this variable MUST be set. See the tasks for the expected value.
pihole_volumes: {}

@ -0,0 +1,41 @@
---
- name: "Pihole | Restore any missing volumes from S3"
ansible.builtin.include_role:
name: chatton.docker_backup.docker_s3_volume_restore
vars:
docker_backup_s3_volume: "{{ item }}"
with_items:
- name: "{{ pihole_portainer_stack_name }}_{{ inventory_hostname_short }}_app"
- name: "{{ pihole_portainer_stack_name }}_{{ inventory_hostname_short }}_dns"
- name: Setting host facts using complex arguments
ansible.builtin.set_fact:
vol_one: "{{ inventory_hostname_short }}_app"
- name: "Pihole | Update Portainer."
chatton.portainer.portainer_stack:
username: '{{ portainer_user }}'
password: '{{ portainer.password }}'
base_url: '{{ portainer_base_url }}'
stack_name: '{{ pihole_portainer_stack_name }}'
endpoint_id: '{{ portainer_endpoint }}'
state: "{{ pihole_state }}"
definition:
version: "3"
services:
pihole:
labels: "{{ backup_labels }}"
container_name: "{{ pihole_container_name }}"
image: "{{ pihole_image }}:{{ pihole_tag }}"
ports:
- "53:53/tcp"
- "53:53/udp"
- "85:80/tcp"
environment:
WEBPASSWORD: '{{ pihole_web_password }}'
DNSMASQ_LISTENING: "all"
volumes:
- '{{ inventory_hostname_short }}_app:/etc/pihole'
- '{{ inventory_hostname_short }}_dns:/etc/dnsmasq.d'
restart: unless-stopped
volumes: "{{ pihole_volumes }}"

@ -1,12 +1,10 @@
---
vaultwarden_state: present
vaultwarden_image: vaultwarden/server
vaultwarden_tag: 1.28.1
vaultwarden_backup_enabled: true
vaultwarden_backup_schedule: "nightly"
vaultwarden_expose_port: 80
vaultwarden_portainer_stack_name: vaultwarden
vaultwarden_container_name: vaultwarden
vaultwarden_restart_policy: unless-stopped
# Environment variables
vaultwarden_websocket_enabled: true
@ -15,8 +13,3 @@ vaultwarden_emergency_access_allowed: true
vaultwarden_domain: "https://vault.cianhatton.ie"
vaultwarden_signups_allowed: false
vaultwarden_webvault_enabled: true
# Backup variables
vaultwarden_docker_backup_restore_force: false
vaultwarden_docker_backup_restore_latest_s3_key: true
vaultwarden_docker_backup_fail_on_no_s3_backups: false

@ -3,27 +3,22 @@
ansible.builtin.include_role:
name: chatton.docker_backup.docker_s3_volume_restore
vars:
docker_backup_restore_force: "{{ vaultwarden_docker_backup_restore_force }}"
docker_backup_restore_latest_s3_key: "{{ vaultwarden_docker_backup_restore_latest_s3_key }}"
docker_backup_fail_on_no_s3_backups: "{{ vaultwarden_docker_backup_fail_on_no_s3_backups }}"
docker_backup_s3_volume:
name: "{{ vaultwarden_portainer_stack_name }}_data"
- name: "Vaultwarden | Update Portainer."
chatton.portainer.portainer_stack:
username: admin
username: '{{ portainer_user }}'
password: '{{ portainer.password }}'
base_url: '{{ portainer_base_url }}'
stack_name: '{{ vaultwarden_portainer_stack_name }}'
endpoint_id: '{{ portainer_endpoint }}'
state: present
state: "{{ vaultwarden_state }}"
definition:
version: '3.3'
services:
vaultwarden:
labels:
ie.cianhatton.backup.enabled: "{{ vaultwarden_backup_enabled }}"
ie.cianhatton.backup.schedule: "{{ vaultwarden_backup_schedule }}"
labels: "{{ backup_labels }}"
image: "{{ vaultwarden_image }}:{{ vaultwarden_tag }}"
container_name: "{{ vaultwarden_container_name }}"
environment:
@ -34,7 +29,7 @@
DOMAIN: "{{ vaultwarden_domain }}"
ADMIN_TOKEN: "{{ vaultwarden_admin_token }}"
SIGNUPS_ALLOWED: "{{ vaultwarden_signups_allowed }}"
restart: "{{ vaultwarden_restart_policy }}"
restart: "{{ restart_policy }}"
ports:
- "{{ vaultwarden_expose_port }}:80"
volumes:

@ -11,8 +11,8 @@ actions:
timeout: 30
- title: Restart Overseerr
icon: overseerr
shell: docker restart plex
shell: docker restart overseerr
timeout: 30
- title: Restart VPN Stack
shell: docker restart surfshark sonarr radarr jackett qbittorrent
shell: docker restart protonvpn sonarr radarr jackett qbittorrent
timeout: 90

@ -1,16 +0,0 @@
---
version: "3.1"
services:
gotify:
labels:
ie.cianhatton.backup.enabled: "true"
ie.cianhatton.backup.schedule: "{{ backups.schedule_keys.nightly }}"
image: gotify/server
container_name: gotify
volumes:
- data:/app/data
ports:
- 7875:80
restart: unless-stopped
volumes:
data:

@ -1,26 +0,0 @@
---
version: '3'
services:
minio:
image: "{{ template_vars.image }}:{{ template_vars.tag }}"
ports:
# The web console
- "11000:9000"
# The API
- "11001:9001"
volumes:
- minio_storage:/data
environment:
# specified in the vault file
MINIO_ROOT_USER: "{{ minio_username }}"
MINIO_ROOT_PASSWORD: "{{ minio_password }}"
MINIO_API_ROOT_ACCESS: "on"
MINIO_BROWSER_REDIRECT_URL: http://192.168.178.42:11000
# enable encryption
MINIO_KMS_AUTO_ENCRYPTION: "on"
MINIO_KMS_SECRET_KEY: "minio-key:{{ minio_encryption_key }}"
command: server --console-address ":9001" /data
volumes:
minio_storage: {}

@ -1,24 +0,0 @@
---
version: "3"
services:
pihole:
labels:
ie.cianhatton.backup.enabled: "true"
ie.cianhatton.backup.schedule: "{{ backups.schedule_keys.nightly }}"
container_name: pihole
image: "pihole/pihole:2022.09.2"
ports:
- "53:53/tcp"
- "53:53/udp"
- "85:80/tcp"
environment:
WEBPASSWORD: '{{ pihole_web_password }}'
DNSMASQ_LISTENING: "all"
volumes:
- '{{ inventory_hostname_short }}_app:/etc/pihole'
- '{{ inventory_hostname_short }}_dns:/etc/dnsmasq.d'
restart: unless-stopped
volumes:
{{ inventory_hostname_short }}_app:
{{ inventory_hostname_short }}_dns:

@ -1,169 +0,0 @@
---
version: "3"
services:
{% if template_vars.vpn == "protonwire" %}
protonwire:
container_name: protonwire
image: ghcr.io/tprasadtp/protonwire:7.2.0
init: true
restart: unless-stopped
environment:
# Quote this value as server name can contain '#'.
PROTONVPN_SERVER: "{{ vpn.protonvpn_server }}"
# Set this to 1 to show debug logs for issue forms.
DEBUG: "1"
# Set this to 0 to disable kill-switch.
KILL_SWITCH: "0"
# follow instructions here https://github.com/tprasadtp/protonvpn-docker
WIREGUARD_PRIVATE_KEY: "{{ vpn.wireguard_private_key }}"
LAN_NETWORK: "{{ vpn.lan_network }}"
cap_add:
- NET_ADMIN
# sysctl net.ipv4.conf.all.rp_filter is mandatory!
# net.ipv6.conf.all.disable_ipv6 disables IPv6 as protonVPN does not support IPv6.
# 'net.*' sysctls are not required on application containers,
# as they share network stack with protonwire container.
sysctls:
net.ipv4.conf.all.rp_filter: 2
net.ipv6.conf.all.disable_ipv6: 1
volumes:
- type: tmpfs
target: /tmp
ports:
# qbittorrent
- 15000:15000
- 6881:6881
- 6881:6881/udp
# radarr
- 7878:7878
# sonarr
- 8989:8989
# jackett
- 9117:9117
dns:
- 1.1.1.1
- 8.8.8.8
{% elif template_vars.vpn == "surfshark" %}
surfshark:
image: ilteoood/docker-surfshark
container_name: surfshark
environment:
- SURFSHARK_USER={{ vpn.surfshark_username }}
- SURFSHARK_PASSWORD={{ vpn.surfshark_password }}
# must specify LAN_NETWORK otherwise you will not be able
# to access ports which are exposed here.
- LAN_NETWORK={{ vpn.lan_network }}
cap_add:
- NET_ADMIN
devices:
- /dev/net/tun
ports:
# qbittorrent
- 15000:15000
- 6881:6881
- 6881:6881/udp
# radarr
- 7878:7878
# sonarr
- 8989:8989
# jackett
- 9117:9117
restart: unless-stopped
dns:
- 1.1.1.1
- 8.8.8.8
{% endif %}
{% if template_vars.qbittorrent.enabled %}
qbittorrent:
labels:
ie.cianhatton.backup.enabled: "true"
ie.cianhatton.backup.schedule: "{{ backups.schedule_keys.nightly }}"
depends_on:
- {{ template_vars.vpn }}
image: "{{ template_vars.qbittorrent.image }}:{{ template_vars.qbittorrent.tag }}"
container_name: qbittorrent
network_mode: "service:{{ template_vars.vpn }}"
environment:
- PUID=1000
- PGID=1000
- TZ=Europe/London
- WEBUI_PORT=15000
volumes:
- qbittorrent_config:/config
- {{ directories.downloads_dir }}:/downloads
restart: unless-stopped
{% endif %}
{% if template_vars.radarr.enabled %}
radarr:
labels:
ie.cianhatton.backup.enabled: "true"
ie.cianhatton.backup.schedule: "{{ backups.schedule_keys.nightly }}"
depends_on:
- {{ template_vars.vpn }}
image: "{{ template_vars.radarr.image }}:{{ template_vars.radarr.tag }}"
container_name: radarr
network_mode: "service:{{ template_vars.vpn }}"
environment:
- PUID=1000
- PGID=1000
- TZ=Europe/London
volumes:
- radarr_config:/config
- {{ directories.movies_dir }}:/movies
- {{ directories.downloads_dir }}:/downloads
restart: unless-stopped
{% endif %}
{% if template_vars.sonarr.enabled %}
sonarr:
depends_on:
- {{ template_vars.vpn }}
image: "{{ template_vars.sonarr.image }}:{{ template_vars.sonarr.tag }}"
labels:
ie.cianhatton.backup.enabled: "true"
ie.cianhatton.backup.schedule: "{{ backups.schedule_keys.nightly }}"
container_name: sonarr
network_mode: "service:{{ template_vars.vpn }}"
environment:
- PUID=1000
- PGID=1000
- TZ=Europe/London
volumes:
- sonarr_config:/config
- {{ directories.tv_dir }}:/tv
- {{ directories.downloads_dir }}:/downloads
restart: unless-stopped
{% endif %}
{% if template_vars.jackett.enabled %}
jackett:
labels:
ie.cianhatton.backup.enabled: "true"
ie.cianhatton.backup.schedule: "{{ backups.schedule_keys.nightly }}"
depends_on:
- {{ template_vars.vpn }}
image: "{{ template_vars.jackett.image }}:{{ template_vars.jackett.tag }}"
container_name: jackett
network_mode: "service:{{ template_vars.vpn }}"
environment:
- PUID=1000
- PGID=1000
- TZ=Europe/London
- AUTO_UPDATE=true
volumes:
- jackett_config:/config
- {{ directories.downloads_dir }}:/downloads
restart: unless-stopped
{% endif %}
volumes:
{% if template_vars.jackett.enabled %}
jackett_config:
{% endif %}
{% if template_vars.qbittorrent.enabled %}
qbittorrent_config:
{% endif %}
{% if template_vars.radarr.enabled %}
radarr_config:
{% endif %}
{% if template_vars.sonarr.enabled %}
sonarr_config:
{% endif %}

@ -0,0 +1,293 @@
#!/usr/bin/env python3
# Copyright (c) 2016, Antonio SJ Musumeci <trapexit@spawn.link>
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import argparse
import ctypes
import errno
import fnmatch
import io
import os
import shlex
import subprocess
import sys
_libc = ctypes.CDLL("libc.so.6",use_errno=True)
_lgetxattr = _libc.lgetxattr
_lgetxattr.argtypes = [ctypes.c_char_p,ctypes.c_char_p,ctypes.c_void_p,ctypes.c_size_t]
def lgetxattr(path,name):
if type(path) == str:
path = path.encode(errors='backslashreplace')
if type(name) == str:
name = name.encode(errors='backslashreplace')
length = 64
while True:
buf = ctypes.create_string_buffer(length)
res = _lgetxattr(path,name,buf,ctypes.c_size_t(length))
if res >= 0:
return buf.raw[0:res].decode(errors='backslashreplace')
else:
err = ctypes.get_errno()
if err == errno.ERANGE:
length *= 2
elif err == errno.ENODATA:
return None
else:
raise IOError(err,os.strerror(err),path)
def ismergerfs(path):
try:
lgetxattr(path,'user.mergerfs.version')
return True
except IOError as e:
return False
def mergerfs_control_file(basedir):
if basedir == '/':
return None
ctrlfile = os.path.join(basedir,'.mergerfs')
if os.path.exists(ctrlfile):
return ctrlfile
else:
dirname = os.path.dirname(basedir)
return mergerfs_control_file(dirname)
def mergerfs_srcmounts(ctrlfile):
srcmounts = lgetxattr(ctrlfile,'user.mergerfs.srcmounts')
srcmounts = srcmounts.split(':')
return srcmounts
def match(filename,matches):
for match in matches:
if fnmatch.fnmatch(filename,match):
return True
return False
def exclude_by_size(filepath,exclude_lt,exclude_gt):
try:
st = os.lstat(filepath)
if exclude_lt and st.st_size < exclude_lt:
return True
if exclude_gt and st.st_size > exclude_gt:
return True
return False
except:
return False
def find_a_file(src,
relpath,
file_includes,file_excludes,
path_includes,path_excludes,
exclude_lt,exclude_gt):
basepath = os.path.join(src,relpath)
for (dirpath,dirnames,filenames) in os.walk(basepath):
for filename in filenames:
filepath = os.path.join(dirpath,filename)
if match(filename,file_excludes):
continue
if match(filepath,path_excludes):
continue
if not match(filename,file_includes):
continue
if not match(filepath,path_includes):
continue
if exclude_by_size(filepath,exclude_lt,exclude_gt):
continue
return os.path.relpath(filepath,src)
return None
def execute(args):
return subprocess.call(args)
def print_args(args):
quoted = [shlex.quote(arg) for arg in args]
print(' '.join(quoted))
def build_move_file(src,dst,relfile):
frompath = os.path.join(src,'./',relfile)
topath = dst+'/'
args = ['rsync',
'-avlHAXWE',
'--relative',
'--progress',
'--remove-source-files',
frompath,
topath]
return args
def freespace_percentage(srcmounts):
lfsp = []
for srcmount in srcmounts:
vfs = os.statvfs(srcmount)
avail = vfs.f_bavail * vfs.f_frsize
total = vfs.f_blocks * vfs.f_frsize
per = avail / total
lfsp.append((srcmount,per))
return sorted(lfsp, key=lambda x: x[1])
def all_within_range(l,n):
if len(l) == 0 or len(l) == 1:
return True
return (abs(l[0][1] - l[-1][1]) <= n)
def human_to_bytes(s):
m = s[-1]
if m == 'K':
i = int(s[0:-1]) * 1024
elif m == 'M':
i = int(s[0:-1]) * 1024 * 1024
elif m == 'G':
i = int(s[0:-1]) * 1024 * 1024 * 1024
elif m == 'T':
i = int(s[0:-1]) * 1024 * 1024 * 1024 * 1024
else:
i = int(s)
return i
def buildargparser():
parser = argparse.ArgumentParser(description='balance files on a mergerfs mount based on percentage drive filled')
parser.add_argument('dir',
type=str,
help='starting directory')
parser.add_argument('-p',
dest='percentage',
type=float,
default=2.0,
help='percentage range of freespace (default 2.0)')
parser.add_argument('-i','--include',
dest='include',
type=str,
action='append',
default=[],
help='fnmatch compatible file filter (can use multiple times)')
parser.add_argument('-e','--exclude',
dest='exclude',
type=str,
action='append',
default=[],
help='fnmatch compatible file filter (can use multiple times)')
parser.add_argument('-I','--include-path',
dest='includepath',
type=str,
action='append',
default=[],
help='fnmatch compatible path filter (can use multiple times)')
parser.add_argument('-E','--exclude-path',
dest='excludepath',
type=str,
action='append',
default=[],
help='fnmatch compatible path filter (can use multiple times)')
parser.add_argument('-s',
dest='excludelt',
type=str,
default='0',
help='exclude files smaller than <int>[KMGT] bytes')
parser.add_argument('-S',
dest='excludegt',
type=str,
default='0',
help='exclude files larger than <int>[KMGT] bytes')
return parser
def main():
sys.stdout = io.TextIOWrapper(sys.stdout.buffer,
encoding='utf8',
errors="backslashreplace",
line_buffering=True)
sys.stderr = io.TextIOWrapper(sys.stderr.buffer,
encoding='utf8',
errors="backslashreplace",
line_buffering=True)
parser = buildargparser()
args = parser.parse_args()
args.dir = os.path.realpath(args.dir)
ctrlfile = mergerfs_control_file(args.dir)
if not ismergerfs(ctrlfile):
print("%s is not a mergerfs mount" % args.dir)
sys.exit(1)
relpath = ''
mntpoint = os.path.dirname(ctrlfile)
if args.dir != mntpoint:
relpath = os.path.relpath(args.dir,mntpoint)
file_includes = ['*'] if not args.include else args.include
file_excludes = args.exclude
path_includes = ['*'] if not args.includepath else args.includepath
path_excludes = args.excludepath
exclude_lt = human_to_bytes(args.excludelt)
exclude_gt = human_to_bytes(args.excludegt)
srcmounts = mergerfs_srcmounts(ctrlfile)
percentage = args.percentage / 100
try:
l = freespace_percentage(srcmounts)
while not all_within_range(l,percentage):
todrive = l[-1][0]
relfilepath = None
while not relfilepath and len(l):
fromdrive = l[0][0]
del l[0]
relfilepath = find_a_file(fromdrive,
relpath,
file_includes,file_excludes,
path_includes,path_excludes,
exclude_lt,exclude_gt)
if len(l) == 0:
print('Could not find file to transfer: exiting...')
break
if fromdrive == todrive:
print('Source drive == target drive: exiting...')
break
args = build_move_file(fromdrive,todrive,relfilepath)
print('file: {}\nfrom: {}\nto: {}'.format(relfilepath,fromdrive,todrive))
print_args(args)
rv = execute(args)
if rv:
print('ERROR - exited with exit code: {}'.format(rv))
break
l = freespace_percentage(srcmounts)
print('Branches within {:.1%} range: '.format(percentage))
for (branch,percentage) in l:
print(' * {}: {:.2%} free'.format(branch,percentage))
except KeyboardInterrupt:
print("exiting: CTRL-C pressed")
sys.exit(0)
if __name__ == "__main__":
main()

@ -34,3 +34,27 @@
name: sprat.mergerfs
vars:
mergerfs_mounts: '{{ mounts }}'
- name: Download mergerfs balance tool.
ansible.builtin.get_url:
url: https://raw.githubusercontent.com/trapexit/mergerfs-tools/master/src/mergerfs.balance
dest: /usr/local/bin/mergerfs.balance
mode: 0755
# Leave this as an option in case the url above goes down.
#- name: Add local mergerfs balance tool.
# ansible.builtin.copy:
# src: mergerfs.balance
# dest: /usr/local/bin/mergerfs.balance
# mode: 0755
- name: Add a cron job to balance mergerfs.
ansible.builtin.cron:
name: Balance mergerfs media.
user: "{{ homelab_user }}"
minute: 0
hour: 5
weekday: 0
job: /usr/local/bin/mergerfs.balance /mnt/mergerfs
state: present
cron_file: balance_mergerfs

@ -8,9 +8,6 @@
ansible.builtin.include_role:
name: chatton.docker_backup.docker_s3_volume_restore
vars:
docker_backup_restore_force: false
docker_backup_restore_latest_s3_key: true
docker_backup_fail_on_no_s3_backups: false
docker_backup_s3_volume:
name: portainer_portainer_data
@ -21,12 +18,10 @@
version: '3.2'
services:
portainer:
labels:
ie.cianhatton.backup.enabled: 'true'
ie.cianhatton.backup.schedule: nightly
labels: "{{ backup_labels }}"
image: "portainer/portainer-ce:{{ portainer_version }}"
container_name: portainer
restart: unless-stopped
restart: "{{ restart_policy }}"
ports:
- 9000:9000
volumes:

@ -3,7 +3,9 @@
ansible.builtin.apt:
pkg:
- sudo
# TODO move these somewhere else
- ufw
- htop
state: latest
update_cache: true

Loading…
Cancel
Save