Add bootstrap, Makefile, imports from ansible-docker-backup (#20)

pull/27/head
Cian Hatton 3 years ago committed by GitHub
parent 0e0c6886d0
commit 8a274a186d
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -0,0 +1,9 @@
skip_list:
- yaml[indentation]
# TODO I probably want these to be correct
- meta-no-info
- meta-incorrect
- schema
- fqcn-builtins
- schema

@ -1,12 +1,11 @@
---
name: CI name: CI
on: on:
workflow_dispatch: workflow_dispatch:
pull_request: pull_request:
env: env:
VAULT_PASSWORD: "${{ secrets.VAULT_PASSWORD }}" VAULT_PASSWORD: ${{ secrets.VAULT_PASSWORD }}
SSH_PUB: "${{ secrets.SSH_PUB }}" SSH_PUB: ${{ secrets.SSH_PUB }}
ANSIBLE_FORCE_COLOR: '1' ANSIBLE_FORCE_COLOR: '1'
jobs: jobs:
@ -14,39 +13,41 @@ jobs:
name: Test Ansible Playbooks name: Test Ansible Playbooks
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Create SSH pub - name: Create SSH pub
run: | run: |
echo "creating ~/.ssh/id_rsa.pub file" echo "creating ~/.ssh/id_rsa.pub file"
mkdir -p ~/.ssh/ mkdir -p ~/.ssh/
echo "$SSH_PUB" > ~/.ssh/id_rsa.pub echo "$SSH_PUB" > ~/.ssh/id_rsa.pub
- name: Install SSH key - name: Install SSH key
uses: shimataro/ssh-key-action@v2 uses: shimataro/ssh-key-action@v2
with: with:
key: ${{ secrets.SSH_KEY }} key: ${{ secrets.SSH_KEY }}
known_hosts: ${{ secrets.KNOWN_HOSTS }} known_hosts: ${{ secrets.KNOWN_HOSTS }}
- name: Check out the codebase. - name: Check out the codebase.
uses: actions/checkout@v3 uses: actions/checkout@v3
- name: Prepare Test Files - name: Prepare Test Files
run: | run: |
mv tests/.vault-pass.sh ./.vault-pass.sh mv tests/.vault-pass.sh ./.vault-pass.sh
mv tests/ansible.cfg ./ansible.cfg mv tests/ansible.cfg ./ansible.cfg
mv tests/hosts.ini ./hosts.ini mv tests/hosts.ini ./hosts.ini
- uses: actions/setup-python@v4 - uses: actions/setup-python@v4
with: with:
python-version: 3.8.9 python-version: 3.8.9
- name: Install test dependencies. - name: Install test dependencies.
run: pip install -r requirements.txt run: pip install -r requirements.txt
- name: Syntax Check - name: Install ansible dependencies.
run: | run: ansible-galaxy install -r requirements.yml
ansible-playbook setup-linode.yml --syntax-check - name: Syntax Check
ansible-playbook setup-homelab.yml --syntax-check run: |
ansible-playbook verify-homelab.yml --syntax-check ansible-playbook setup-linode.yml --syntax-check
- name: Setup Linode Instance ansible-playbook setup-homelab.yml --syntax-check
run: ansible-playbook setup-linode.yml ansible-playbook verify-homelab.yml --syntax-check
- name: Setup Home Lab - name: Setup Linode Instance
run: ansible-playbook setup-homelab.yml run: ansible-playbook setup-linode.yml
- name: Verify Home Lab - name: Setup Home Lab
run: ansible-playbook verify-homelab.yml run: ansible-playbook setup-homelab.yml
- name: Delete Linode Instance - name: Verify Home Lab
run: ansible-playbook setup-linode.yml --extra-vars "state=absent" run: ansible-playbook verify-homelab.yml
- name: Delete Linode Instance
run: ansible-playbook setup-linode.yml --extra-vars "state=absent"

@ -1,12 +1,11 @@
---
name: Delete test linode instance name: Delete test linode instance
on: on:
schedule: schedule:
- cron: "0 0 * * *" # delete at midnight every day - cron: 0 0 * * * # delete at midnight every day
env: env:
VAULT_PASSWORD: "${{ secrets.VAULT_PASSWORD }}" VAULT_PASSWORD: ${{ secrets.VAULT_PASSWORD }}
SSH_PUB: "${{ secrets.SSH_PUB }}" SSH_PUB: ${{ secrets.SSH_PUB }}
ANSIBLE_FORCE_COLOR: '1' ANSIBLE_FORCE_COLOR: '1'
jobs: jobs:
@ -14,28 +13,28 @@ jobs:
name: Delete Linode Instance name: Delete Linode Instance
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Create SSH pub - name: Create SSH pub
run: | run: |
echo "creating ~/.ssh/id_rsa.pub file" echo "creating ~/.ssh/id_rsa.pub file"
mkdir -p ~/.ssh/ mkdir -p ~/.ssh/
echo "$SSH_PUB" > ~/.ssh/id_rsa.pub echo "$SSH_PUB" > ~/.ssh/id_rsa.pub
- name: Install SSH key - name: Install SSH key
uses: shimataro/ssh-key-action@v2 uses: shimataro/ssh-key-action@v2
with: with:
key: ${{ secrets.SSH_KEY }} key: ${{ secrets.SSH_KEY }}
known_hosts: ${{ secrets.KNOWN_HOSTS }} known_hosts: ${{ secrets.KNOWN_HOSTS }}
- name: Check out the codebase. - name: Check out the codebase.
uses: actions/checkout@v3 uses: actions/checkout@v3
- name: Prepare Test Files - name: Prepare Test Files
run: | run: |
mv tests/.vault-pass.sh ./.vault-pass.sh mv tests/.vault-pass.sh ./.vault-pass.sh
mv tests/ansible.cfg ./ansible.cfg mv tests/ansible.cfg ./ansible.cfg
mv tests/hosts.ini ./hosts.ini mv tests/hosts.ini ./hosts.ini
- uses: actions/setup-python@v4 - uses: actions/setup-python@v4
with: with:
python-version: 3.8.9 python-version: 3.8.9
- name: Install test dependencies. - name: Install test dependencies.
run: pip install -r requirements.txt run: pip install -r requirements.txt
- name: Delete Linode Instance - name: Delete Linode Instance
run: ansible-playbook setup-linode.yml --extra-vars "state=absent" run: ansible-playbook setup-linode.yml --extra-vars "state=absent"

@ -0,0 +1,40 @@
name: Code Health
on:
workflow_dispatch:
push:
jobs:
lint:
runs-on: ubuntu-latest
strategy:
fail-fast: false
matrix:
directory:
- roles
- playbooks
- group_vars
- host_vars
steps:
- name: Checkout the codebase
uses: actions/checkout@v3
- uses: actions/setup-python@v4
with:
python-version: 3.8.9
- name: Install dependencies
run: make deps
- name: Test Directory
run: ansible-lint ${{ matrix.directory }}
format:
runs-on: ubuntu-latest
steps:
- name: Checkout the codebase
uses: actions/checkout@v3
- uses: actions/setup-python@v4
with:
python-version: 3.8.9
- name: Install dependencies
run: make deps
- name: Format code
run: make format

@ -0,0 +1,35 @@
#!/usr/bin/make -f
all: deps bootstrap homelab
SHELL := /bin/bash
bootstrap:
ansible-playbook playbooks/bootstrap.yml -K -e ansible_ssh_user=cianhatton -e ansible_ssh_private_key_file=~/.ssh/id_rsa
qnap:
ansible-playbook playbooks/setup-homelab.yml --limit qnap
homelab: bootstrap
ansible-playbook playbooks/setup-homelab.yml
verify:
ansible-playbook playbooks/verify-homelab.yml
deps:
pip install --upgrade pip
pip install -r requirements.txt
ansible-galaxy install -r requirements.yml
format:
scripts/format_all_yaml.sh
lint:
ansible-lint host_vars
ansible-lint group_vars
ansible-lint roles
ansible-lint playbooks
backup: deps
ansible-playbook playbooks/backup-docker-volumes.yml
restore: deps
ansible-playbook playbooks/restore-docker-volumes.yml

@ -1 +1,18 @@
# HomeLab # HomeLab
An ansible specific private key is used in these playbooks.
This key can be generated by running
### Generate the keyfile
```bash
ssh-keygen -t ed25519 -C "ansible"
```
### Add to authorized host
```bash
# replace the user/hostname with whatever you need to connect to.
ssh-copy-id -i ~/.ssh/ansible.pub cianhatton@qnap
```

@ -2,5 +2,8 @@
default_module_path=library default_module_path=library
inventory=hosts.ini inventory=hosts.ini
vault_password_file=.vault-pass.sh vault_password_file=.vault-pass.sh
host_key_checking = False host_key_checking = True
enable_plugins = linode enable_plugins = linode
private_key_file=~/.ssh/ansible
remote_user=ansible
roles_path = roles

@ -1,28 +0,0 @@
---
# all encrypted variables should go in the linked file.
vault_file: vault_vars/linode-vault.yml
# any linode specific variables go here
configure_mergefs: false
services:
- name: gitea
- name: mealie
- name: linkding
- name: overseerr
- name: nextcloud
# - name: dashboards
- name: nginx-proxy-manager
- name: uptime-kuma
# - name: vpn-stack
- name: docker-volume-backup
- name: mariadb
- name: photoprism
- name: olivetin
# any additional docker networks that should be created
docker_networks:
- mariadb_net
# use raw docker compose instead of portainer
use_docker_compose: true
use_portainer: false
restore_from_s3: false

@ -0,0 +1,24 @@
# all encrypted variables should go in the linked file.
vault_file: vault_vars/linode-vault.yml
# any linode specific variables go here
services:
- name: gitea
- name: mealie
- name: linkding
- name: overseerr
- name: nextcloud
- name: nginx-proxy-manager
- name: uptime-kuma
- name: docker-volume-backup
- name: mariadb
- name: photoprism
- name: olivetin
# any additional docker networks that should be created
docker_networks:
- mariadb_net
# use docker compose
container_deployment_mode: compose
restore_from_s3: false

@ -1,48 +0,0 @@
---
# all encrypted variables should go in the linked file.
vault_file: vault_vars/qnap-vault.yml
# any qnap specific variables go here
configure_mergefs: true
mounts:
- path: /mnt/mergerfs
state: mounted
branches:
- "/mnt/data/device0"
- "/mnt/data/device1"
- "/mnt/data/device2"
options: allow_other,use_ino
devices:
- uuid: "a54c1bde-1400-4975-bf24-08c603ca3a11" # /dev/sdc1
path: "/mnt/data/device0"
- uuid: "727dddaa-f7a1-439a-995f-5f4d35322e08" # /dev/sdd1
path: "/mnt/data/device1"
- uuid: "f3cff115-9adc-4761-b1e9-e81055f3e0af" # /dev/sda1
path: "/mnt/data/device2"
# SSD for downloads / transcoding
- uuid: "c528bf82-61ab-4f3d-87e0-d1e6e02ef7ec" # /dev/sdf
path: "/mnt/ssd0/"
services:
- name: gitea
- name: mealie
- name: linkding
- name: overseerr
- name: nextcloud
- name: dashboards
- name: nginx-proxy-manager
- name: plex
- name: uptime-kuma
- name: vpn-stack
- name: docker-volume-backup
- name: mariadb
- name: photoprism
- name: olivetin
# any additional docker networks that should be created
docker_networks:
- mariadb_net
use_portainer: true
use_docker_compose: false
restore_from_s3: true

@ -1,10 +1,7 @@
---
backups: backups:
schedule_keys: schedule_keys:
monthly: "monthly" monthly: monthly
nightly: "nightly" nightly: nightly
docker_compose_directory: /etc/docker-compose
# TODO: docker_volume_backup exists in vault. Clean this up. # TODO: docker_volume_backup exists in vault. Clean this up.
dockervolumebackup: dockervolumebackup:
@ -26,17 +23,38 @@ olivetin:
docker_networks: [] docker_networks: []
homelab_group: cianhatton
homelab_user: cianhatton homelab_user: cianhatton
configure_samba: true configure_samba: true
samba_group: smbgroup samba_group: smbgroup
samba_user: smbuser samba_user: smbuser
users: users:
- name: cianhatton - name: cianhatton
shares: group: cianhatton
- /share passwordless_sudo: true
- /share/public_files
- /share/private_files
- /share/cian_files
shares:
- /share
- /share/public_files
- /share/private_files
- /share/cian_files
directories:
# path on qnap where downloads go
downloads_dir: /mnt/ssd0/downloads
# path on qnap where plex transcoding happens
transcoding_dir: /mnt/ssd0/transcoding
# path on qnap where movies are stored
movies_dir: /mnt/mergerfs/media/movies
# path on qnap where tv shows are stored
tv_dir: /mnt/mergerfs/media/tv
# path on qnap where docker compose files are stored
docker_compose_directory: /etc/docker-compose
# path on qnap where backups are stored
backups_dir: /mnt/mergerfs/backups
# path where photoprism photos are stored
photoprism_dir: /mnt/mergerfs/photoprism
desired_docker_images:
- ubuntu:latest
- busybox:latest

@ -0,0 +1,60 @@
# all encrypted variables should go in the linked file.
vault_file: vault_vars/qnap-vault.yml
# any qnap specific variables go here
mounts:
- path: /mnt/mergerfs
state: mounted
branches:
- /mnt/data/device0
- /mnt/data/device1
- /mnt/data/device2
options: allow_other,use_ino
devices:
- uuid: a54c1bde-1400-4975-bf24-08c603ca3a11 # /dev/sdc1
path: /mnt/data/device0
- uuid: 727dddaa-f7a1-439a-995f-5f4d35322e08 # /dev/sdd1
path: /mnt/data/device1
- uuid: f3cff115-9adc-4761-b1e9-e81055f3e0af # /dev/sda1
path: /mnt/data/device2
# SSD for downloads / transcoding
- uuid: c528bf82-61ab-4f3d-87e0-d1e6e02ef7ec # /dev/sdf
path: /mnt/ssd0/
services:
- name: gitea
endpoint_id: 2
- name: mealie
endpoint_id: 2
- name: linkding
endpoint_id: 2
- name: overseerr
endpoint_id: 2
- name: nextcloud
endpoint_id: 2
- name: dashboards
endpoint_id: 2
- name: nginx-proxy-manager
endpoint_id: 2
- name: plex
endpoint_id: 2
- name: uptime-kuma
endpoint_id: 2
- name: vpn-stack
endpoint_id: 2
- name: docker-volume-backup
endpoint_id: 2
- name: mariadb
endpoint_id: 2
- name: photoprism
endpoint_id: 2
- name: olivetin
endpoint_id: 2
# any additional docker networks that should be created
docker_networks:
- mariadb_net
# use portainer
container_deployment_mode: portainer
restore_from_s3: true

@ -1,13 +1,16 @@
# group qnap and linode to run operations on. # group qnap and linode to run operations on.
# this ensures we don't run everything on localhost as well. # this ensures we don't run everything on localhost as well.
[servers:children] [servers:children]
qnaps
linodes
[qnaps]
qnap qnap
linode
[qnap]
cianhatton@qnap [dockerhosts]
qnap
# BEGIN ANSIBLE MANAGED BLOCK # BEGIN ANSIBLE MANAGED BLOCK
[linode] [linodes]
root@172.104.8.72
# END ANSIBLE MANAGED BLOCK # END ANSIBLE MANAGED BLOCK

@ -1,295 +0,0 @@
#!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import requests
DOCUMENTATION = r'''
---
module: my_test
short_description: This is my test module
# If this is part of a collection, you need to use semantic versioning,
# i.e. the version is of the form "2.5.0" and not "2.4".
version_added: "1.0.0"
description: This is my longer description explaining my test module.
options:
name:
description: This is the message to send to the test module.
required: true
type: str
new:
description:
- Control to demo if the result of this module is changed or not.
- Parameter description can be a list as well.
required: false
type: bool
# Specify this value according to your collection
# in format of namespace.collection.doc_fragment_name
extends_documentation_fragment:
- my_namespace.my_collection.my_doc_fragment_name
author:
- Your Name (@chatton)
'''
EXAMPLES = r'''
# Pass in a message
- name: Test with a message
my_namespace.my_collection.my_test:
name: hello world
# pass in a message and have changed true
- name: Test with a message and changed output
my_namespace.my_collection.my_test:
name: hello world
new: true
# fail the module
- name: Test failure of the module
my_namespace.my_collection.my_test:
name: fail me
'''
RETURN = r'''
# These are examples of possible return values, and in general should use other names for return values.
original_message:
description: The original name param that was passed in.
type: str
returned: always
sample: 'hello world'
message:
description: The output message that the test module generates.
type: str
returned: always
sample: 'goodbye'
'''
from ansible.module_utils.basic import AnsibleModule
def _extract_creds(module):
return {
"username": module.params["username"],
"password": module.params["password"],
"base_url": module.params["base_url"],
}
def _get_jwt_token(creds):
payload = {
"Username": creds["username"],
"Password": creds["password"],
}
base_url = creds["base_url"]
auth_url = f"{base_url}/api/auth"
resp = requests.post(auth_url, json=payload)
resp.raise_for_status()
return resp.json()["jwt"]
COMPOSE_STACK = 2
STRING_METHOD = "string"
def _query_params_to_string(params):
s = "?"
for k, v in params.items():
s += f"&{k}={v}"
return s
def _load_envs_from_file(filepath):
envs = []
with open(filepath) as f:
file_contents = f.read()
lines = file_contents.splitlines()
for line in lines:
name, value = line.split("=")
envs.append({
"name": name,
"value": value
})
return envs
class PortainerClient:
def __init__(self, creds):
self.base_url = creds["base_url"]
self.token = _get_jwt_token(creds)
self.headers = {
"Authorization": f"Bearer {self.token}"
}
def get(self, endpoint, query_params=None):
url = f"{self.base_url}/api/{endpoint}"
if query_params:
url = url + _query_params_to_string(query_params)
res = requests.get(url, headers=self.headers)
res.raise_for_status()
return res.json()
def delete(self, endpoint):
url = f"{self.base_url}/api/{endpoint}"
try:
# TODO: deletion works, but the request fails?
res = requests.delete(url, headers=self.headers)
res.raise_for_status()
except Exception:
pass
return {}
def put(self, endpoint, body):
url = f"{self.base_url}/api/{endpoint}"
res = requests.put(url, json=body, headers=self.headers)
res.raise_for_status()
return res.json()
def post(self, endpoint, body, query_params=None):
url = f"{self.base_url}/api/{endpoint}" + _query_params_to_string(query_params)
res = requests.post(url, json=body, headers=self.headers)
res.raise_for_status()
return res.json()
def _create_stack(client, module, file_contents, envs=None):
if not envs:
envs = []
target_stack_name = module.params["stack_name"]
body = {
"env": envs,
"name": target_stack_name,
"stackFileContent": file_contents,
}
query_params = {
"type": COMPOSE_STACK,
"method": STRING_METHOD,
"endpointId": 2,
}
return client.post("stacks", body=body, query_params=query_params)
def _update_stack(client, module, stack_id, envs=None):
if not envs:
envs = []
target_stack_name = module.params["stack_name"]
with open(module.params["docker_compose_file_path"]) as f:
file_contents = f.read()
return client.put(f"stacks/{stack_id}?&endpointId=2", body={
"env": envs,
"name": target_stack_name,
"stackFileContent": file_contents,
})
def handle_state_present(client, module):
result = dict(
changed=False,
stack_name=module.params["stack_name"]
)
already_exists = False
stacks = client.get("stacks")
result["stacks"] = stacks
with open(module.params["docker_compose_file_path"]) as f:
file_contents = f.read()
target_stack_name = module.params["stack_name"]
for stack in stacks:
if stack["Name"] == target_stack_name:
already_exists = True
result["stack_id"] = stack["Id"]
break
if not already_exists:
stack = _create_stack(client, module, file_contents)
result["changed"] = True
result["stack_id"] = stack["Id"]
module.exit_json(**result)
return
stack_id = result["stack_id"]
current_file_contents_resp = client.get(f"stacks/{stack_id}/file", query_params={
"endpointId": 2
})
result["are_equal"] = current_file_contents_resp["StackFileContent"] == file_contents
if result["are_equal"]:
module.exit_json(**result)
return
# the stack exists and we have a new config.
_update_stack(client, module, stack_id)
result["changed"] = True
module.exit_json(**result)
def handle_state_absent(client, module):
result = dict(
changed=False,
stack_name=module.params["stack_name"]
)
already_exists = False
target_stack_name = module.params["stack_name"]
stacks = client.get("stacks")
for stack in stacks:
if stack["Name"] == target_stack_name:
already_exists = True
result["stack_id"] = stack["Id"]
break
if not already_exists:
module.exit_json(**result)
return
stack_id = result['stack_id']
client.delete(f"stacks/{stack_id}" + _query_params_to_string({"endpointId": 2}))
result["changed"] = True
module.exit_json(**result)
def run_module():
# define available arguments/parameters a user can pass to the module
module_args = dict(
stack_name=dict(type='str', required=True),
docker_compose_file_path=dict(type='str', required=True),
username=dict(type='str', default='admin'),
password=dict(type='str', required=True, no_log=True),
base_url=dict(type='str', default="http://localhost:9000"),
state=dict(type='str', default="present", choices=['present', 'absent'])
)
state_fns = {
"present": handle_state_present,
"absent": handle_state_absent
}
# the AnsibleModule object will be our abstraction working with Ansible
# this includes instantiation, a couple of common attr would be the
# args/params passed to the execution, as well as if the module
# supports check mode
module = AnsibleModule(
argument_spec=module_args,
supports_check_mode=False
)
client = PortainerClient(creds=_extract_creds(module))
state_fns[module.params["state"]](client, module)
def main():
run_module()
if __name__ == '__main__':
main()

@ -0,0 +1,32 @@
---
- hosts: qnap
become: true
pre_tasks:
- name: Include vault variables.
include_vars: '../{{vault_file}}'
tags: [always]
tasks:
- name: Find Containers With Backup Label
register: docker_info
docker_host_info:
containers: true
containers_filters:
label:
- ie.cianhatton.backup.enabled=true
register: filter_output
- debug: msg="{{filter_output}}"
- name: Get Container Names
set_fact: container_names="{{ filter_output.containers | map(attribute="Names") | flatten }}"
- debug: msg="{{container_names}}"
- name: Backup Containers with backup label
include_role:
name: chatton.docker_backup.docker_s3_backup
vars:
container_backup: "{{ container_item | regex_replace('^\\/', '') }}"
with_items: "{{ container_names }}"
loop_control:
loop_var: container_item

@ -0,0 +1,18 @@
# needs to be run with a different user with access to create ansible key and user.
# e.g.
# ansible-playbook bootstrap.yml -e ansible_ssh_user=cianhatton
# might additionally require -K if your user requires password for sudo.
- name: Generate SSH Key for ansible
hosts: localhost
connection: local
tasks:
- name: Generate an OpenSSH rsa keypair for ansible
community.crypto.openssh_keypair:
path: ~/.ssh/ansible
passphrase: ''
- hosts: all
become: true
roles:
- role: bootstrap

@ -0,0 +1,15 @@
---
- hosts: qnap
become: true
pre_tasks:
- name: Include vault variables.
include_vars: '../{{vault_file}}'
tags: [always]
roles:
- role: chatton.docker_backup.docker_s3_volume_restore
vars:
docker_s3_volume_restore_force: true
docker_s3_volume_restore_latest_s3_key: true
docker_volume_s3_restores:
- volume_name: "linkding_data"
# s3_key: "linkding_data/linkding_data-2022-09-01T21:32:54Z.tar.gz"

@ -0,0 +1,54 @@
---
- name: Update packages and ensure users on all hosts
tags: [always]
hosts: all
become: true
pre_tasks:
- name: Update Packages
apt:
upgrade: dist
update_cache: true
roles:
- role: setup_users
- name: Install docker on docker hosts
hosts: dockerhosts
become: true
roles:
- role: setup_docker
tags: [setup, docker]
- name: Setup and deploy services on the QNAP
hosts: qnap
become: true
pre_tasks:
- name: Include vault variables.
include_vars: '../{{vault_file}}'
tags: [always]
roles:
- role: setup_mergerfs
tags: [mergerfs]
- role: setup_samba
tags: [samba]
- role: setup_portainer
tags: [services, portainer]
- role: setup_hosted_services
tags: [services]
- hosts: linodes
become: true
pre_tasks:
- name: Include vault variables.
include_vars: '../{{vault_file}}'
tags: [always]
roles:
- role: setup_samba
tags: [samba]
- role: setup_docker
tags: [docker]
- role: setup_hosted_services
tags: [services]

@ -0,0 +1,6 @@
- hosts: localhost
become: true
roles:
- role: roles/setup_linode
vars:
state: present

@ -0,0 +1,47 @@
- hosts: all
become: true
pre_tasks:
- name: Include vault variables.
include_vars: ../{{vault_file}}
tags:
- always
tasks:
- name: Docker Compose Files Exist
command: stat {{directories.docker_compose_directory}}/{{ item.name }}/docker-compose.yml
with_items: '{{services}}'
changed_when: false
register: docker_compose_stat
- name: Assert all Docker Compose files were created
assert:
that: item.rc == 0
with_items: '{{docker_compose_stat.results}}'
- name: Populate service facts
service_facts: {}
- name: Assert docker is installed and started
assert:
that:
- ansible_facts.services.docker.state == "running"
- ansible_facts.services['docker.service'].status == "enabled"
- name: Inspect all images
docker_image_info: {}
register: image_details
- name: Assert desired images exist
assert:
that: {{item in image_details.images | map(attribute='RepoTags') | flatten: !!null ''}: !!null ''}
with_items: '{{desired_docker_images}}'
- name: Fetch Sudoers Files
command: stat /etc/sudoers.d/{{item.name}}
changed_when: false
register: sudoers_stat
with_items: '{{users}}'
when: item.passwordless_sudo == true
- name: Assert sudoers files are created
assert:
that: item.rc == 0
with_items: '{{sudoers_stat.results}}'

@ -1,44 +1,33 @@
ansible==6.3.0 ansible==6.3.0
ansible-compat==2.2.0 ansible-compat==2.2.0
ansible-core==2.13.3 ansible-core==2.13.3
arrow==1.2.2 ansible-lint==6.5.1
attrs==22.1.0 attrs==22.1.0
binaryornot==0.4.4 bracex==2.3.post1
Cerberus==1.3.2
certifi==2022.6.15
cffi==1.15.1 cffi==1.15.1
chardet==5.0.0
charset-normalizer==2.1.1
click==8.1.3
click-help-colors==0.9.1
commonmark==0.9.1 commonmark==0.9.1
cookiecutter==2.1.1
cryptography==37.0.4 cryptography==37.0.4
enrich==1.2.7 enrich==1.2.7
idna==3.3 filelock==3.8.0
importlib-resources==5.9.0 importlib-resources==5.9.0
Jinja2==3.1.2 Jinja2==3.1.2
jinja2-time==0.2.0
jsonschema==4.14.0 jsonschema==4.14.0
linode-api4==5.2.1
MarkupSafe==2.1.1 MarkupSafe==2.1.1
molecule==4.0.1
packaging==21.3 packaging==21.3
pathspec==0.9.0
pkgutil_resolve_name==1.3.10 pkgutil_resolve_name==1.3.10
pluggy==1.0.0
pycparser==2.21 pycparser==2.21
Pygments==2.13.0 Pygments==2.13.0
pyparsing==3.0.9 pyparsing==3.0.9
pyrsistent==0.18.1 pyrsistent==0.18.1
python-dateutil==2.8.2
python-slugify==6.1.2
PyYAML==6.0 PyYAML==6.0
requests==2.28.1
resolvelib==0.8.1 resolvelib==0.8.1
rich==12.5.1 rich==12.5.1
six==1.16.0 ruamel.yaml==0.15.100
ruamel.yaml.clib==0.2.6
subprocess-tee==0.3.5 subprocess-tee==0.3.5
text-unidecode==1.3
typing_extensions==4.3.0 typing_extensions==4.3.0
urllib3==1.26.12 wcmatch==8.4
yamlfmt==1.1.0
yamllint==1.27.1
zipp==3.8.1 zipp==3.8.1

@ -0,0 +1,11 @@
---
roles:
- src: sprat.mergerfs
collections:
- name: https://github.com/chatton/ansible-portainer.git
type: git
version: master
- name: https://github.com/chatton/ansible-docker-backup.git,dev
type: git
version: master

@ -0,0 +1,25 @@
#
Role to bootstrap a target host. This role will add an ansible user and allow paswordless sudo.
## Table of content
- [Dependencies](#dependencies)
- [License](#license)
- [Author](#author)
---
## Dependencies
None.
## License
['MIT']
## Author
Cian Hatton

@ -0,0 +1 @@
ansible ALL=(ALL) NOPASSWD: ALL

@ -0,0 +1,53 @@
galaxy_info:
author: Cian Hatton
namespace: chatton
description: your role description
company: your company (optional)
# If the issue tracker for your role is not on github, uncomment the
# next line and provide a value
# issue_tracker_url: http://example.com/issue/tracker
# Choose a valid license ID from https://spdx.org - some suggested licenses:
# - BSD-3-Clause (default)
# - MIT
# - GPL-2.0-or-later
# - GPL-3.0-only
# - Apache-2.0
# - CC-BY-4.0
license: MIT
min_ansible_version: 2.1
# If this a Container Enabled role, provide the minimum Ansible Container version.
# min_ansible_container_version:
#
# Provide a list of supported platforms, and for each platform a list of versions.
# If you don't wish to enumerate all versions for a particular platform, use 'all'.
# To view available platforms and versions (or releases), visit:
# https://galaxy.ansible.com/api/v1/platforms/
#
# platforms:
# - name: Fedora
# versions:
# - all
# - 25
# - name: SomePlatform
# versions:
# - all
# - 1.0
# - 7
# - 99.99
galaxy_tags: []
# List tags for your role here, one per line. A tag is a keyword that describes
# and categorizes the role. Users find roles by searching for tags. Be sure to
# remove the '[]' above, if you add tags to this list.
#
# NOTE: A tag is limited to a single word comprised of alphanumeric characters.
# Maximum 20 tags per role.
dependencies: []
# List your role dependencies here, one per line. Be sure to remove the '[]' above,
# if you add dependencies to this list.

@ -0,0 +1,24 @@
- name: Update Packages
apt:
upgrade: dist
update_cache: true
- name: Create ansible user
user:
name: ansible
groups: root
system: true
- name: Add ssh key for ansible
authorized_key:
user: ansible
state: present
key: "{{ lookup('file', '~/.ssh/ansible.pub') }}"
- name: Add sudoers file for ansible
copy:
src: sudoer_ansible
dest: /etc/sudoers.d/ansible
owner: root
group: root
mode: 0440

@ -0,0 +1,2 @@
---
# defaults file for chatton.docker_restore

@ -0,0 +1,2 @@
---
# handlers file for chatton.docker_restore

@ -0,0 +1,52 @@
galaxy_info:
author: your name
description: your role description
company: your company (optional)
# If the issue tracker for your role is not on github, uncomment the
# next line and provide a value
# issue_tracker_url: http://example.com/issue/tracker
# Choose a valid license ID from https://spdx.org - some suggested licenses:
# - BSD-3-Clause (default)
# - MIT
# - GPL-2.0-or-later
# - GPL-3.0-only
# - Apache-2.0
# - CC-BY-4.0
license: license (GPL-2.0-or-later, MIT, etc)
min_ansible_version: 2.1
# If this a Container Enabled role, provide the minimum Ansible Container version.
# min_ansible_container_version:
#
# Provide a list of supported platforms, and for each platform a list of versions.
# If you don't wish to enumerate all versions for a particular platform, use 'all'.
# To view available platforms and versions (or releases), visit:
# https://galaxy.ansible.com/api/v1/platforms/
#
# platforms:
# - name: Fedora
# versions:
# - all
# - 25
# - name: SomePlatform
# versions:
# - all
# - 1.0
# - 7
# - 99.99
galaxy_tags: []
# List tags for your role here, one per line. A tag is a keyword that describes
# and categorizes the role. Users find roles by searching for tags. Be sure to
# remove the '[]' above, if you add tags to this list.
#
# NOTE: A tag is limited to a single word comprised of alphanumeric characters.
# Maximum 20 tags per role.
dependencies: []
# List your role dependencies here, one per line. Be sure to remove the '[]' above,
# if you add dependencies to this list.

@ -0,0 +1,121 @@
---
# tasks file for chatton.docker_backup
# https://docs.ansible.com/ansible/latest/collections/community/docker/docker_container_module.html#ansible-collections-community-docker-docker-container-module
# https://docs.docker.com/storage/volumes/#backup-restore-or-migrate-data-volumes
- name: Get container details
docker_container_info:
name: "{{ container_restore }}"
register: result
- name: Fail if container is not present
fail:
msg: Cannot restore volumes for a container when it does not exist. Ensure the container exists and try again.
when: result.exists == false
- debug: msg="{{ result }}"
- name: Extract only the volume mounts (not bind mounts)
set_fact: volume_mounts="{{ result.container.Mounts | selectattr("Type", "equalto", "volume")}}"
- debug: msg="{{ volume_mounts }}"
- name: Find relevant volume(s) in S3
amazon.aws.aws_s3:
bucket: "{{ aws_s3.bucket }}"
mode: list
region: "{{ aws_s3.region }}"
s3_url: "https://{{ aws_s3.s3_url }}"
prefix: "{{ item.Name }}/{{ item.Name }}"
aws_access_key: "{{ aws_s3.aws_access_key }}"
aws_secret_key: "{{ aws_s3.aws_secret_key }}"
register: s3_list_output
with_items: "{{ volume_mounts }}"
- debug: msg="{{ s3_list_output }}"
- name: Extract s3 keys for container
set_fact: container_s3_keys="{{ container_s3_keys | default([]) + [item.s3_keys | last] }}"
with_items: "{{ s3_list_output.results }}"
- debug: msg="{{ container_s3_keys }}"
- name: Create a directory for temporary backups if they do not exist
ansible.builtin.file:
path: "/tmp/{{ item.Name }}"
state: directory
mode: '0755'
with_items: "{{ volume_mounts }}"
- name: Download archives from S3
amazon.aws.aws_s3:
bucket: "{{ aws_s3.bucket }}"
object: "{{ item }}"
aws_access_key: "{{ aws_s3.aws_access_key }}"
aws_secret_key: "{{ aws_s3.aws_secret_key }}"
region: "{{ aws_s3.region }}"
s3_url: "https://{{ aws_s3.s3_url }}"
mode: get
dest: "/tmp/{{ item }}"
with_items: "{{ container_s3_keys }}"
register: get_out
- debug: msg="{{ get_out }}"
- set_fact:
volume_details: "{{ volume_details |default([]) + [ {'mount': item.0, 's3_key': item.1} ] }}"
with_together:
- "{{ volume_mounts }}"
- "{{ container_s3_keys }}"`
- debug: msg="{{ volume_details }}"
- name: Stop a container
community.docker.docker_container:
name: "{{ container_restore }}"
state: stopped
- name: Ensure Volume
docker_volume:
name: "{{ item.mount.Name }}"
state: present
with_items: "{{ volume_details }}"
- name: Remove contents of volumes
community.docker.docker_container:
name: "restore-container-{{ item.mount.Name }}-{{ 10 | random }}"
image: ubuntu
command: "rm -rf ./* "
auto_remove: true
detach: false # block until this container exists.
state: started
# start inside the directory we want to wipe
working_dir: "{{ item.mount.Destination }}"
volumes:
- /tmp:/tmp
volumes_from:
- "{{ container_restore }}"
with_items: "{{ volume_details }}"
- name: Restore contents of volumes
community.docker.docker_container:
name: "restore-container-{{ item.mount.Name }}-{{ 10 | random }}"
image: ubuntu
# extract the tar into the volume.
command: "tar xvf /tmp/{{ item.s3_key }}"
auto_remove: true
detach: false # block until this container exists.
state: started
# the compressed volume contains the directories, so we start from the root
working_dir: "/"
volumes:
- /tmp:/tmp
volumes_from:
- "{{ container_restore }}"
with_items: "{{ volume_details }}"
- name: Start a container
community.docker.docker_container:
name: "{{ container_restore }}"
state: started

@ -0,0 +1,2 @@
---
# vars file for chatton.docker_restore

@ -0,0 +1,25 @@
# setup_docker
Role to install docker. It also installs the docker and docker compose python modules.
## Table of content
- [Dependencies](#dependencies)
- [License](#license)
- [Author](#author)
---
## Dependencies
None.
## License
license (GPL-2.0-or-later, MIT, etc)
## Author
Cian Hatton

@ -1,53 +1,10 @@
galaxy_info: galaxy_info:
author: your name author: Cian Hatton
namespace: chatton namespace: chatton
description: your role description description: Bootstrap ansible
company: your company (optional) company: your company (optional)
license: MIT
# If the issue tracker for your role is not on github, uncomment the
# next line and provide a value
# issue_tracker_url: http://example.com/issue/tracker
# Choose a valid license ID from https://spdx.org - some suggested licenses:
# - BSD-3-Clause (default)
# - MIT
# - GPL-2.0-or-later
# - GPL-3.0-only
# - Apache-2.0
# - CC-BY-4.0
license: license (GPL-2.0-or-later, MIT, etc)
min_ansible_version: 2.1 min_ansible_version: 2.1
# If this a Container Enabled role, provide the minimum Ansible Container version.
# min_ansible_container_version:
#
# Provide a list of supported platforms, and for each platform a list of versions.
# If you don't wish to enumerate all versions for a particular platform, use 'all'.
# To view available platforms and versions (or releases), visit:
# https://galaxy.ansible.com/api/v1/platforms/
#
# platforms:
# - name: Fedora
# versions:
# - all
# - 25
# - name: SomePlatform
# versions:
# - all
# - 1.0
# - 7
# - 99.99
galaxy_tags: [] galaxy_tags: []
# List tags for your role here, one per line. A tag is a keyword that describes
# and categorizes the role. Users find roles by searching for tags. Be sure to
# remove the '[]' above, if you add tags to this list.
#
# NOTE: A tag is limited to a single word comprised of alphanumeric characters.
# Maximum 20 tags per role.
dependencies: [] dependencies: []
# List your role dependencies here, one per line. Be sure to remove the '[]' above,
# if you add dependencies to this list.

@ -1,14 +1,21 @@
# @meta author: Cian Hatton
# @meta description: >
# Role to install docker. It also installs the docker and docker compose python
# modules.
# @end
# following official instructions here: https://docs.docker.com/engine/install/debian/ # following official instructions here: https://docs.docker.com/engine/install/debian/
- name: Install packages using apt - name: Install packages using apt
apt: apt:
name: name:
- ca-certificates - ca-certificates
- curl - curl
- gnupg2 - gnupg2
- lsb-release - lsb-release
- pip - pip
state: present state: present
update_cache: yes update_cache: true
- name: Add Docker GPG apt Key - name: Add Docker GPG apt Key
apt_key: apt_key:
@ -24,12 +31,12 @@
- name: Install docker packages using apt - name: Install docker packages using apt
apt: apt:
name: name:
- docker-ce - docker-ce
- docker-ce-cli - docker-ce-cli
- containerd.io - containerd.io
- docker-compose-plugin - docker-compose-plugin
state: present state: present
update_cache: yes update_cache: true
- name: Install Docker Module for Python - name: Install Docker Module for Python
pip: pip:

@ -1,19 +1 @@
--- ---
qnap:
# path on qnap where downloads go
downloads_dir: /mnt/ssd0/downloads
# path on qnap where plex transcoding happens
transcoding_dir: /mnt/ssd0/transcoding
# path on qnap where movies are stored
movies_dir: /mnt/mergerfs/media/movies
# path on qnap where tv shows are stored
tv_dir: /mnt/mergerfs/media/tv
# path on qnap where docker compose files are stored
docker_compose_directory: /etc/docker-compose
# path on qnap where backups are stored
backups_dir: /mnt/mergerfs/backups
# path where photoprism photos are stored
photoprism_dir: /mnt/mergerfs/photoprism
use_portainer: true
use_docker_compose: false

@ -1,101 +1,96 @@
# https://github.com/Lissy93/dashy/blob/master/docs/showcase.md
# Details about config.
# https://github.com/Lissy93/dashy/blob/master/docs/configuring.md
# Widgets: https://github.com/Lissy93/dashy/blob/master/docs/widgets.md
---
pageInfo: pageInfo:
title: Home Lab title: Home Lab
sections: sections:
- name: Dashboards - name: Dashboards
widgets: widgets:
- type: gl-disk-space - type: gl-disk-space
options: options:
hostname: http://qnap:8083 hostname: http://qnap:8083
- type: crypto-watch-list - type: crypto-watch-list
options: options:
currency: GBP currency: GBP
sortBy: marketCap sortBy: marketCap
assets: assets:
- bitcoin - bitcoin
- ethereum - ethereum
- cosmos - cosmos
- polkadot - polkadot
- dogecoin - dogecoin
items: items:
- title: Dash Dot - title: Dash Dot
icon: hl-dashdot icon: hl-dashdot
url: "http://qnap:3010" url: http://qnap:3010
- title: Uptime Kuma - title: Uptime Kuma
icon: hl-uptime-kuma icon: hl-uptime-kuma
url: "http://qnap:3001" url: http://qnap:3001
- title: Tautulli - title: Tautulli
icon: hl-tautulli icon: hl-tautulli
url: "http://qnap:8182" url: http://qnap:8182
- title: Glances - title: Glances
icon: hl-glances icon: hl-glances
url: "http://qnap:8083" url: http://qnap:8083
- name: Media Stack - name: Media Stack
items: items:
- title: Plex - title: Plex
icon: hl-plex icon: hl-plex
url: "http://qnap:32400" url: http://qnap:32400
statusCheck: false statusCheck: false
- title: Sonarr - title: Sonarr
icon: hl-sonarr icon: hl-sonarr
url: "http://qnap:8989" url: http://qnap:8989
- title: Radarr - title: Radarr
icon: hl-radarr icon: hl-radarr
url: "http://qnap:7878" url: http://qnap:7878
- title: Overseerr - title: Overseerr
icon: hl-overseerr icon: hl-overseerr
url: "http://qnap:5055" url: http://qnap:5055
- title: Jackett - title: Jackett
icon: hl-jackett icon: hl-jackett
url: "http://qnap:9117" url: http://qnap:9117
statusCheckUrl: "http://qnap:9117/health" statusCheckUrl: http://qnap:9117/health
- title: Qbittorrent - title: Qbittorrent
icon: hl-qbittorrent icon: hl-qbittorrent
url: "http://qnap:15000" url: http://qnap:15000
- name: Tools - name: Tools
items: items:
- title: Photoprism - title: Photoprism
description: Manager photos description: Manager photos
icon: hl-photoprism icon: hl-photoprism
url: "http://qnap:2342" url: http://qnap:2342
- title: Olivetin - title: Olivetin
description: Run pre-defined shell commands description: Run pre-defined shell commands
icon: hl-olivetin icon: hl-olivetin
url: "http://qnap:1337" url: http://qnap:1337
- title: Linkding - title: Linkding
description: Manager photos description: Manager photos
icon: hl-linkding icon: hl-linkding
url: "http://qnap:9090" url: http://qnap:9090
- title: Nextcloud - title: Nextcloud
icon: hl-nextcloud icon: hl-nextcloud
url: "http://qnap:8081" url: http://qnap:8081
- title: Mealie - title: Mealie
icon: hl-mealie icon: hl-mealie
url: "https://mealie.cianhatton.ie" url: https://mealie.cianhatton.ie
- title: Gitea - title: Gitea
icon: hl-gitea icon: hl-gitea
url: "https://git.cianhatton.ie" url: https://git.cianhatton.ie
- name: System Admin - name: System Admin
items: items:
- title: Portainer - title: Portainer
description: Manage docker apps using Portainer description: Manage docker apps using Portainer
icon: hl-portainer icon: hl-portainer
url: "http://qnap:9000" url: http://qnap:9000
- title: Webmin - title: Webmin
icon: hl-webmin icon: hl-webmin
url: "http://qnap:10000" url: http://qnap:10000
- title: Adminer - title: Adminer
description: Manage MariaDB description: Manage MariaDB
icon: hl-adminer icon: hl-adminer
url: "http://qnap:3307" url: http://qnap:3307
- title: Nginx Proxy Manager - title: Nginx Proxy Manager
description: Manage reverse proxies description: Manage reverse proxies
icon: hl-nginx icon: hl-nginx
url: "http://qnap:8181" url: http://qnap:8181
appConfig: appConfig:
statusCheck: true statusCheck: true
showSplashScreen: false showSplashScreen: false

@ -1,23 +1,23 @@
# Listen on all addresses available, port 1337 # Listen on all addresses available, port 1337
listenAddressSingleHTTPFrontend: 0.0.0.0:1337 listenAddressSingleHTTPFrontend: 0.0.0.0:1337
# Choose from INFO (default), WARN and DEBUG # Choose from INFO (default), WARN and DEBUG
logLevel: "INFO" logLevel: INFO
# Actions (buttons) to show up on the WebUI: # Actions (buttons) to show up on the WebUI:
actions: actions:
# Docs: https://docs.olivetin.app/action-container-control.html # Docs: https://docs.olivetin.app/action-container-control.html
- title: Restart Plex - title: Restart Plex
icon: plex icon: plex
shell: docker restart plex shell: docker restart plex
timeout: 30 timeout: 30
- title: Restart Dashy - title: Restart Dashy
icon: restart icon: restart
shell: docker restart dashy shell: docker restart dashy
timeout: 30 timeout: 30
- title: Restart Dashy 2 - title: Restart Dashy 2
icon: restart icon: restart
shell: docker restart dashy shell: docker restart dashy
timeout: 30 timeout: 30
- title: Restart Olivetin - title: Restart Olivetin
icon: restart icon: restart
shell: docker restart olivetin shell: docker restart olivetin
timeout: 30 timeout: 30

@ -1,11 +1,10 @@
---
# handlers file for setup_hosted_services # handlers file for setup_hosted_services
- name: restart-dashy - name: restart-dashy
shell: docker restart dashy command: docker restart dashy
ignore_errors: True ignore_errors: true
- name: restart-olivetin - name: restart-olivetin
shell: docker restart olivetin command: docker restart olivetin
- name: restart-docker-volume-backup - name: restart-docker-volume-backup
shell: docker restart docker-volume-backup command: docker restart docker-volume-backup

@ -1,45 +1,10 @@
galaxy_info: galaxy_info:
author: your name author: Cian Hatton
namespace: chatton namespace: chatton
description: your role description description: your role description
company: your company (optional) company: your company (optional)
license: MIT
# If the issue tracker for your role is not on github, uncomment the
# next line and provide a value
# issue_tracker_url: http://example.com/issue/tracker
# Choose a valid license ID from https://spdx.org - some suggested licenses:
# - BSD-3-Clause (default)
# - MIT
# - GPL-2.0-or-later
# - GPL-3.0-only
# - Apache-2.0
# - CC-BY-4.0
license: license (GPL-2.0-or-later, MIT, etc)
min_ansible_version: 2.1 min_ansible_version: 2.1
# If this a Container Enabled role, provide the minimum Ansible Container version.
# min_ansible_container_version:
#
# Provide a list of supported platforms, and for each platform a list of versions.
# If you don't wish to enumerate all versions for a particular platform, use 'all'.
# To view available platforms and versions (or releases), visit:
# https://galaxy.ansible.com/api/v1/platforms/
#
# platforms:
# - name: Fedora
# versions:
# - all
# - 25
# - name: SomePlatform
# versions:
# - all
# - 1.0
# - 7
# - 99.99
galaxy_tags: [] galaxy_tags: []
# List tags for your role here, one per line. A tag is a keyword that describes # List tags for your role here, one per line. A tag is a keyword that describes
# and categorizes the role. Users find roles by searching for tags. Be sure to # and categorizes the role. Users find roles by searching for tags. Be sure to

@ -1,119 +1,114 @@
---
- name: Docker | Pull images - name: Docker | Pull images
docker_image: docker_image:
name: "{{item}}" name: '{{item}}'
source: pull source: pull
with_items: with_items:
- ubuntu - ubuntu
- busybox - busybox
- name: Docker Compose | Create a directory if it does not exist - name: Docker Compose | Create a directory if it does not exist
file: file:
path: "{{qnap.docker_compose_directory}}/{{item.name}}" path: '{{directories.docker_compose_directory}}/{{item.name}}'
state: directory state: directory
mode: '0755' mode: '0755'
with_items: "{{services}}" with_items: '{{services}}'
- name: Docker Compose | Template Docker Compose Files - name: Docker Compose | Template Docker Compose Files
template: template:
src: "{{item.name}}.j2" src: '{{item.name}}.j2'
dest: "{{qnap.docker_compose_directory}}/{{item.name}}/docker-compose.yml" dest: '{{directories.docker_compose_directory}}/{{item.name}}/docker-compose.yml'
with_items: "{{services}}" with_items: '{{services}}'
- name: Config Files | Create a directory if it does not exist - name: Directories | Ensure required directories
file: file:
path: "{{item}}" path: '{{item}}'
state: directory state: directory
mode: '0755' mode: '0755'
with_items: with_items:
- "{{dashy.config_directory}}" - '{{dashy.config_directory}}'
- "{{olivetin.config_directory}}" - '{{olivetin.config_directory}}'
- "{{dockervolumebackup.config_directory}}" - '{{dockervolumebackup.config_directory}}'
- '{{directories.backups_dir}}/.s3tmp'
- name: Dashy | Config Files - name: Dashy | Config Files
copy: copy:
src: "{{dashy.source_file}}" src: '{{dashy.source_file}}'
dest: "{{dashy.config_directory}}/{{dashy.config_file}}" dest: '{{dashy.config_directory}}/{{dashy.config_file}}'
notify: notify:
- restart-dashy - restart-dashy
- name: Olivetin | Config Files - name: Olivetin | Config Files
copy: copy:
src: "{{olivetin.source_file}}" src: '{{olivetin.source_file}}'
dest: "{{olivetin.config_directory}}/{{olivetin.config_file}}" dest: '{{olivetin.config_directory}}/{{olivetin.config_file}}'
notify: notify:
- restart-olivetin - restart-olivetin
- name: Docker Volume Backup | Config Files - name: Docker Volume Backup | Config Files
template: template:
src: "docker-volume-backup/config.j2" src: docker-volume-backup/config.j2
dest: "{{dockervolumebackup.config_directory}}/config.yml" dest: '{{dockervolumebackup.config_directory}}/config.yml'
notify: notify:
- restart-docker-volume-backup - restart-docker-volume-backup
- name: Install python dependencies (requests) - name: Install python dependencies (requests)
pip: pip:
name: requests name: requests
- name: Docker | Find docker volumes - name: Docker | Find docker volumes
shell: docker volume ls -f name={{item.name}} --format '{{ '{{' }} .Name {{ '}}' }}' shell: docker volume ls -f name={{item.name}} --format '{{ '{{' }} .Name {{ '}}'}}'
with_items: "{{services}}" with_items: '{{services}}'
register: find_volumes register: find_volumes
changed_when: False changed_when: false
- debug: msg="{{find_volumes.results | map(attribute='stdout_lines') | list | flatten }}" - debug: msg="{{ find_volumes.results | map(attribute='stdout_lines') | list | flatten }}"
- name: Docker | Find volumes that need to be restored - name: Docker | Find volumes that need to be restored
script: scripts/find-volumes-to-restore.py script: scripts/find-volumes-to-restore.py
environment: environment:
EXISTING_VOLUMES: "{{ find_volumes.results | map(attribute='stdout_lines') | list | flatten }}" EXISTING_VOLUMES: "{{ find_volumes.results | map(attribute='stdout_lines') | list | flatten }}"
SERVICES: "{{ services }}" SERVICES: '{{ services }}'
DOCKER_COMPOSE_DIR: "{{qnap.docker_compose_directory}}" DOCKER_COMPOSE_DIR: '{{ directories.docker_compose_directory }}'
args: args:
executable: python3 executable: python3
register: python_output register: python_output
changed_when: False changed_when: false
- debug: msg="{{python_output.stdout_lines | list }}" - debug: msg="{{ python_output.stdout_lines | list }}"
- name: Docker Volume Backup | Restore any missing backups from S3 - set_fact:
when: restore_from_s3 restore_volumes: "{{ restore_volumes | default([]) + [{ 'volume_name': item}] }}"
docker_container: with_items: "{{ python_output.stdout_lines | list }}"
command: "restore-volume --s3 --volume {{item}}"
image: "ghcr.io/chatton/docker-volume-backup:v0.3.0" - name: Restore any missing volumes from S3
name: "s3-restore-{{item}}" include_role:
cleanup: true # delete container after it's done. name: chatton.docker_backup.docker_s3_volume_restore
state: started # container should execute. when: restore_volumes is defined
detach: no # task fails if container exits. vars:
volumes: docker_s3_volume_restore_force: false
- /var/run/docker.sock:/var/run/docker.sock docker_s3_volume_restore_latest_s3_key: true
- /tmp:/tmp # temp s3 archive goes here docker_volume_s3_restores: "{{ restore_volumes }}"
env:
AWS_ACCESS_KEY_ID: "{{aws_s3.aws_access_key}}"
AWS_SECRET_ACCESS_KEY: "{{aws_s3.aws_secret_key}}"
AWS_DEFAULT_REGION: "{{aws_s3.region}}"
AWS_BUCKET: "{{aws_s3.bucket}}"
AWS_ENDPOINT: "{{aws_s3.s3_url}}"
with_items: "{{ python_output.stdout_lines }}"
- name: Docker | Create required docker networks - name: Docker | Create required docker networks
docker_network: docker_network:
name: "{{item}}" name: '{{item}}'
with_items: "{{ docker_networks }}" with_items: '{{ docker_networks }}'
- name: Portainer | Update Stack - name: Portainer | Update Stack
when: use_portainer when: container_deployment_mode == "portainer"
portainer: chatton.portainer.portainer_stack:
username: admin username: admin
password: "{{portainer.password}}" password: '{{portainer.password}}'
docker_compose_file_path: "{{qnap.docker_compose_directory}}/{{ item.name }}/docker-compose.yml" docker_compose_file_path: '{{ directories.docker_compose_directory }}/{{ item.name }}/docker-compose.yml'
stack_name: "{{ item.name }}" stack_name: '{{ item.name }}'
with_items: "{{services}}" endpoint_id: '{{ item.endpoint_id }}'
state: present
with_items: '{{services}}'
- name: Docker compose | Update Stack - name: Docker compose | Update Stack
when: use_docker_compose when: container_deployment_mode == "compose"
docker_compose: docker_compose:
project_src: "{{qnap.docker_compose_directory}}/{{ item.name }}" project_src: '{{directories.docker_compose_directory}}/{{ item.name }}'
state: present state: present
with_items: "{{services}}" with_items: '{{services}}'

@ -16,6 +16,6 @@ services:
volumes: volumes:
- /var/run/docker.sock:/var/run/docker.sock - /var/run/docker.sock:/var/run/docker.sock
- {{qnap.backups_dir}}:/backups - {{directories.backups_dir}}:/backups
- /tmp:/tmp - /tmp:/tmp
- {{dockervolumebackup.config_directory}}/config.yml:{{dockervolumebackup.config_directory}}/config.yml - {{dockervolumebackup.config_directory}}/config.yml:{{dockervolumebackup.config_directory}}/config.yml

@ -7,11 +7,11 @@ periodic_backups:
- name: "Nightly backup to local filesystem" - name: "Nightly backup to local filesystem"
type: "filesystem" type: "filesystem"
filesystem_options: filesystem_options:
host_path: {{ qnap.backups_dir }} host_path: {{ directories.backups_dir }}
- name: "Backup to iDrive E2" - name: "Backup to iDrive E2"
type: "s3" type: "s3"
s3_options: s3_options:
host_path: {{ qnap.backups_dir }} host_path: {{ directories.backups_dir }}
aws_access_key_id: {{ docker_volume_backup.aws_access_key_id }} aws_access_key_id: {{ docker_volume_backup.aws_access_key_id }}
aws_secret_access_key: {{ docker_volume_backup.aws_secret_access_key }} aws_secret_access_key: {{ docker_volume_backup.aws_secret_access_key }}
aws_default_region: {{ docker_volume_backup.aws_default_region }} aws_default_region: {{ docker_volume_backup.aws_default_region }}
@ -24,11 +24,11 @@ periodic_backups:
- name: "Monthly backup to local filesystem" - name: "Monthly backup to local filesystem"
type: "filesystem" type: "filesystem"
filesystem_options: filesystem_options:
host_path: {{ qnap.backups_dir }} host_path: {{ directories.backups_dir }}
- name: "Backup to iDrive E2" - name: "Backup to iDrive E2"
type: "s3" type: "s3"
s3_options: s3_options:
host_path: {{ qnap.backups_dir }} host_path: {{ directories.backups_dir }}
aws_access_key_id: {{ docker_volume_backup.aws_access_key_id }} aws_access_key_id: {{ docker_volume_backup.aws_access_key_id }}
aws_secret_access_key: {{ docker_volume_backup.aws_secret_access_key }} aws_secret_access_key: {{ docker_volume_backup.aws_secret_access_key }}
aws_default_region: {{ docker_volume_backup.aws_default_region }} aws_default_region: {{ docker_volume_backup.aws_default_region }}

@ -43,9 +43,9 @@ services:
- "/dev/dri:/dev/dri" # Intel QSV - "/dev/dri:/dev/dri" # Intel QSV
working_dir: "/photoprism" # do not change or remove working_dir: "/photoprism" # do not change or remove
volumes: volumes:
- {{ qnap.photoprism_dir}}/originals:/photoprism/originals # Original media files (DO NOT REMOVE) - {{ directories.photoprism_dir}}/originals:/photoprism/originals # Original media files (DO NOT REMOVE)
- {{ qnap.photoprism_dir}}/import:/photoprism/import # *Optional* base folder from which files can be imported to originals - {{ directories.photoprism_dir}}/import:/photoprism/import # *Optional* base folder from which files can be imported to originals
- {{ qnap.photoprism_dir}}/storage:/photoprism/storage # *Writable* storage folder for cache, database, and sidecar files (DO NOT REMOVE) - {{ directories.photoprism_dir}}/storage:/photoprism/storage # *Writable* storage folder for cache, database, and sidecar files (DO NOT REMOVE)
networks: networks:

@ -16,9 +16,9 @@ services:
- VERSION=docker - VERSION=docker
volumes: volumes:
- config:/config - config:/config
- {{ qnap.tv_dir }}:/tv - {{ directories.tv_dir }}:/tv
- {{ qnap.movies_dir }}:/movies - {{ directories.movies_dir }}:/movies
- {{ qnap.transcoding_dir }}:/transcoding - {{ directories.transcoding_dir }}:/transcoding
restart: unless-stopped restart: unless-stopped
devices: devices:
- /dev/dri:/dev/dri - /dev/dri:/dev/dri

@ -48,7 +48,7 @@ services:
- WEBUI_PORT=15000 - WEBUI_PORT=15000
volumes: volumes:
- qbittorrent_config:/config - qbittorrent_config:/config
- {{ qnap.downloads_dir }}:/downloads - {{ directories.downloads_dir }}:/downloads
restart: unless-stopped restart: unless-stopped
radarr: radarr:
@ -66,8 +66,8 @@ services:
- TZ=Europe/London - TZ=Europe/London
volumes: volumes:
- radarr_config:/config - radarr_config:/config
- {{ qnap.movies_dir }}:/movies - {{ directories.movies_dir }}:/movies
- {{ qnap.downloads_dir }}:/downloads - {{ directories.downloads_dir }}:/downloads
restart: unless-stopped restart: unless-stopped
sonarr: sonarr:
@ -85,8 +85,8 @@ services:
- TZ=Europe/London - TZ=Europe/London
volumes: volumes:
- sonarr_config:/config - sonarr_config:/config
- {{ qnap.tv_dir }}:/tv - {{ directories.tv_dir }}:/tv
- {{ qnap.downloads_dir }}:/downloads - {{ directories.downloads_dir }}:/downloads
restart: unless-stopped restart: unless-stopped
jackett: jackett:
@ -105,7 +105,7 @@ services:
- AUTO_UPDATE=true - AUTO_UPDATE=true
volumes: volumes:
- jackett_config:/config - jackett_config:/config
- {{ qnap.downloads_dir }}:/downloads - {{ directories.downloads_dir }}:/downloads
restart: unless-stopped restart: unless-stopped
volumes: volumes:

@ -1,12 +1,7 @@
$ANSIBLE_VAULT;1.1;AES256 $ANSIBLE_VAULT;1.1;AES256 36356161663039666634393933383830653035633438343866643730303434353632383031303965
36356161663039666634393933383830653035633438343866643730303434353632383031303965 3465343332353437616663643662343262373333366162300a373662386533363234636234633339 35383439373430656130353938653531636563663066646265643838323230356231333836326262
3465343332353437616663643662343262373333366162300a373662386533363234636234633339 6439626538643837390a383039373366626637333738386332356361306131323133383534323462 32376564376363663666383639313064316365353037356633363035373632313561643030643933
35383439373430656130353938653531636563663066646265643838323230356231333836326262 34663533313231636162306465656433663634643038343938396462643261656238626432633136 39356562353463353034373534386537313634326534623830616362336638396337386631663538
6439626538643837390a383039373366626637333738386332356361306131323133383534323462 30663236653532316461306636333536373836626537363430613961346137626261333238366234 30633438653936316539326436393634366236616664383835396434373966333166366265636661
32376564376363663666383639313064316365353037356633363035373632313561643030643933 39666335653265323565313831303264336331363339333532353939653330383362363965353032 383434386133323961373833303262336439
34663533313231636162306465656433663634643038343938396462643261656238626432633136 ...
39356562353463353034373534386537313634326534623830616362336638396337386631663538
30663236653532316461306636333536373836626537363430613961346137626261333238366234
30633438653936316539326436393634366236616664383835396434373966333166366265636661
39666335653265323565313831303264336331363339333532353939653330383362363965353032
383434386133323961373833303262336439

@ -1,13 +1,12 @@
---
# defaults file for setup_linode # defaults file for setup_linode
ssh_keys: ssh_keys:
- "{{ lookup('file', '~/.ssh/id_rsa.pub') }}" - "{{ lookup('file', '~/.ssh/id_rsa.pub') }}"
label: simple-linode label: simple-linode
# hosts that are added to the generated .ini file. # hosts that are added to the generated .ini file.
hosts: hosts:
- user: root - user: root
ip: "{{my_linode.instance.ipv4[0]}}" ip: '{{my_linode.instance.ipv4[0]}}'
# https://www.linode.com/community/questions/17190/obtain-a-list-of-image-and-plan-types-using-linode-apicli # https://www.linode.com/community/questions/17190/obtain-a-list-of-image-and-plan-types-using-linode-apicli
type: g6-standard-2 type: g6-standard-2

@ -1,5 +1,5 @@
galaxy_info: galaxy_info:
author: your name author: Cian Hatton
namespace: chatton namespace: chatton
description: your role description description: your role description
company: your company (optional) company: your company (optional)
@ -15,7 +15,7 @@ galaxy_info:
# - GPL-3.0-only # - GPL-3.0-only
# - Apache-2.0 # - Apache-2.0
# - CC-BY-4.0 # - CC-BY-4.0
license: license (GPL-2.0-or-later, MIT, etc) license: MIT
min_ansible_version: 2.1 min_ansible_version: 2.1

@ -1,31 +1,27 @@
---
- name: Include vault variables. - name: Include vault variables.
include_vars: defaults/main-vault.yml include_vars: defaults/main-vault.yml
- name: Create a new Linode. - name: Create a new Linode.
linode_v4: linode_v4:
label: "{{ label }}" label: '{{ label }}'
access_token: "{{ token }}" access_token: '{{ token }}'
type: "{{ type }}" type: '{{ type }}'
region: "{{ region }}" region: '{{ region }}'
image: linode/debian11 image: linode/debian11
root_pass: "{{ password }}" root_pass: '{{ password }}'
authorized_keys: "{{ ssh_keys }}" authorized_keys: '{{ ssh_keys }}'
group: servers group: servers
state: "{{ state }}" state: '{{ state }}'
register: my_linode register: my_linode
- name: Wait for SSH to come up - name: Wait for SSH to come up
local_action: local_action:
module: wait_for module: wait_for host={{ my_linode.instance.ipv4[0] }} port=22 search_regex=OpenSSH
host={{ my_linode.instance.ipv4[0] }}
port=22
search_regex=OpenSSH
timeout=320 timeout=320
- name: Instance Details - name: Instance Details
debug: debug:
msg: "{{my_linode}}" msg: '{{my_linode}}'
- name: Update hosts.ini - name: Update hosts.ini
blockinfile: blockinfile:

@ -1,2 +1,2 @@
--- null
# vars file for setup_linode ...

@ -1,5 +1,5 @@
galaxy_info: galaxy_info:
author: your name author: Cian Hatton
namespace: chatton namespace: chatton
description: your role description description: your role description
company: your company (optional) company: your company (optional)
@ -15,7 +15,7 @@ galaxy_info:
# - GPL-3.0-only # - GPL-3.0-only
# - Apache-2.0 # - Apache-2.0
# - CC-BY-4.0 # - CC-BY-4.0
license: license (GPL-2.0-or-later, MIT, etc) license: MIT
min_ansible_version: 2.1 min_ansible_version: 2.1

@ -1,4 +1,3 @@
---
# dangerous to use this as the device name changes when there are new drives. # dangerous to use this as the device name changes when there are new drives.
# can this be done with an ID? # can this be done with an ID?
#- name: Create a ext4 filesystem on /dev/sdb1 #- name: Create a ext4 filesystem on /dev/sdb1
@ -9,12 +8,12 @@
- name: Mount Volumes - name: Mount Volumes
mount: mount:
path: "{{item.path}}" path: '{{item.path}}'
src: "UUID={{item.uuid}}" src: UUID={{item.uuid}}
fstype: ext4 fstype: ext4
state: mounted state: mounted
opts: defaults,nofail opts: defaults,nofail
loop: "{{ devices }}" loop: '{{ devices }}'
register: volume_out register: volume_out
- debug: msg="{{volume_out}}" - debug: msg="{{volume_out}}"
@ -38,4 +37,4 @@
include_role: include_role:
name: sprat.mergerfs name: sprat.mergerfs
vars: vars:
mergerfs_mounts: "{{mounts}}" mergerfs_mounts: '{{mounts}}'

@ -0,0 +1,25 @@
# setup_portainer
Role to configure Portainer. This role copies the docker-compose.yml onto the host and starts portainer with docker compose. If there is a backup of the portainer volume stored in s3, it will be restored before portainer is started.
## Table of content
- [Dependencies](#dependencies)
- [License](#license)
- [Author](#author)
---
## Dependencies
None.
## License
MIT
## Author
Cian Hatton

@ -1,17 +1,16 @@
---
version: '3.2' version: '3.2'
services: services:
portainer: portainer:
labels: labels:
ie.cianhatton.backup.enabled: "true" ie.cianhatton.backup.enabled: 'true'
ie.cianhatton.backup.key: "nightly" ie.cianhatton.backup.key: nightly
image: portainer/portainer-ce image: portainer/portainer-ce
container_name: "portainer" container_name: portainer
ports: ports:
- "9000:9000" - 9000:9000
volumes: volumes:
- portainer_data:/data - portainer_data:/data
- /var/run/docker.sock:/var/run/docker.sock - /var/run/docker.sock:/var/run/docker.sock
volumes: volumes:
portainer_data: portainer_data:

@ -1,5 +1,5 @@
galaxy_info: galaxy_info:
author: your name author: Cian Hatton
namespace: chatton namespace: chatton
description: your role description description: your role description
company: your company (optional) company: your company (optional)
@ -15,7 +15,7 @@ galaxy_info:
# - GPL-3.0-only # - GPL-3.0-only
# - Apache-2.0 # - Apache-2.0
# - CC-BY-4.0 # - CC-BY-4.0
license: license (GPL-2.0-or-later, MIT, etc) license: MIT
min_ansible_version: 2.1 min_ansible_version: 2.1

@ -1,51 +1,51 @@
---
- name: Portainer | Create directory if it does not exist - name: Portainer | Create directory if it does not exist
ansible.builtin.file: ansible.builtin.file:
path: "{{docker_compose_directory}}" path: '{{directories.docker_compose_directory}}'
state: directory state: directory
mode: '0755' mode: '0755'
- name: Portainer | Copy docker compose file - name: Portainer | Copy docker compose file
copy: copy:
src: docker-compose.yml src: docker-compose.yml
dest: "{{docker_compose_directory}}/docker-compose.yml" dest: '{{directories.docker_compose_directory}}/docker-compose.yml'
- name: Portainer | Check if volume exists - name: Portainer | Check if volume exists
shell: docker volume ls -f name=portainer_portainer_data --format '{{ '{{' }} .Name {{ '}}' }}' shell: docker volume ls -f name=portainer_portainer_data --format '{{ '{{' }} .Name
{{ '}}' }}'
register: portainer_volume register: portainer_volume
changed_when: False changed_when: false
- name: Portainer | Pull images - name: Portainer | Pull images
docker_image: docker_image:
name: "{{item}}" name: '{{item}}'
source: pull source: pull
with_items: with_items:
- ubuntu - ubuntu
- busybox - busybox
- name: Docker Volume Backup | Restore Portainer volume from S3 - name: Docker Volume Backup | Restore Portainer volume from S3
when: (portainer_volume.stdout_lines | length) == 0 when: (portainer_volume.stdout_lines | length) == 0
docker_container: docker_container:
command: "restore-volume --s3 --volume portainer_portainer_data" command: restore-volume --s3 --volume portainer_portainer_data
image: "ghcr.io/chatton/docker-volume-backup:v0.3.0" image: ghcr.io/chatton/docker-volume-backup:v0.3.0
name: "s3-restore-portainer" name: s3-restore-portainer
cleanup: true # delete container after it's done. cleanup: true # delete container after it's done.
state: started # container should execute. state: started # container should execute.
detach: no # task fails if container exits. detach: no # task fails if container exits.
volumes: volumes:
- /var/run/docker.sock:/var/run/docker.sock - /var/run/docker.sock:/var/run/docker.sock
- /tmp:/tmp # temp s3 archive goes here - /tmp:/tmp # temp s3 archive goes here
env: env:
AWS_ACCESS_KEY_ID: "{{aws_s3.aws_access_key}}" AWS_ACCESS_KEY_ID: '{{aws_s3.aws_access_key}}'
AWS_SECRET_ACCESS_KEY: "{{aws_s3.aws_secret_key}}" AWS_SECRET_ACCESS_KEY: '{{aws_s3.aws_secret_key}}'
AWS_DEFAULT_REGION: "{{aws_s3.region}}" AWS_DEFAULT_REGION: '{{aws_s3.region}}'
AWS_BUCKET: "{{aws_s3.bucket}}" AWS_BUCKET: '{{aws_s3.bucket}}'
AWS_ENDPOINT: "{{aws_s3.s3_url}}" AWS_ENDPOINT: '{{aws_s3.s3_url}}'
- name: Portainer | Docker compose up - name: Portainer | Docker compose up
community.docker.docker_compose: community.docker.docker_compose:
project_src: "{{docker_compose_directory}}/portainer" project_src: '{{directories.docker_compose_directory}}/portainer'
# Don't really need this as long as there is an S3 backup. # Don't really need this as long as there is an S3 backup.
#- name: Portainer | Register Admin User #- name: Portainer | Register Admin User

@ -1,10 +1,9 @@
---
samba_group: smbgroup samba_group: smbgroup
samba_user: smbuser samba_user: smbuser
users: users:
- name: cianhatton - name: cianhatton
shares: shares:
- /share - /share
- /share/public_files - /share/public_files
- /share/private_files - /share/private_files
- /share/cian_files - /share/cian_files

@ -1,4 +1,3 @@
---
- name: restart-samba - name: restart-samba
ansible.builtin.service: ansible.builtin.service:
name: smbd name: smbd

@ -1,5 +1,5 @@
galaxy_info: galaxy_info:
author: your name author: Cian Hatton
namespace: chatton namespace: chatton
description: your role description description: your role description
company: your company (optional) company: your company (optional)
@ -15,7 +15,7 @@ galaxy_info:
# - GPL-3.0-only # - GPL-3.0-only
# - Apache-2.0 # - Apache-2.0
# - CC-BY-4.0 # - CC-BY-4.0
license: license (GPL-2.0-or-later, MIT, etc) license: MIT
min_ansible_version: 2.1 min_ansible_version: 2.1

@ -1,54 +1,53 @@
---
- name: Install samba packages using apt - name: Install samba packages using apt
apt: apt:
name: name:
- samba - samba
state: present state: present
update_cache: yes update_cache: true
- name: Ensure samba group exists - name: Ensure samba group exists
ansible.builtin.group: ansible.builtin.group:
name: "{{samba_group}}" name: '{{samba_group}}'
state: present state: present
system: true system: true
notify: notify:
- reload-samba - reload-samba
- name: Ensure samba user - name: Ensure samba user
ansible.builtin.user: ansible.builtin.user:
name: "{{samba_user}}" name: '{{samba_user}}'
comment: Samba user comment: Samba user
group: "{{samba_group}}" group: '{{samba_group}}'
shell: /bin/false shell: /bin/false
create_home: false create_home: false
notify: notify:
- reload-samba - reload-samba
- name: Add the User(s) to Samba group - name: Add the User(s) to Samba group
user: user:
name: "{{ item.name }}" name: '{{ item.name }}'
groups: "{{ samba_group }}" groups: '{{ samba_group }}'
append: yes append: true
with_items: "{{users}}" with_items: '{{users}}'
- name: Ensure Share directories - name: Ensure Share directories
ansible.builtin.file: ansible.builtin.file:
path: "{{item}}" path: '{{item}}'
recurse: yes recurse: true
state: directory state: directory
owner: "{{samba_user}}" owner: '{{samba_user}}'
group: "{{samba_group}}" group: '{{samba_group}}'
mode: 'u+rw,g+rw' mode: u+rw,g+rw
with_items: "{{shares}}" with_items: '{{shares}}'
notify: notify:
- reload-samba - reload-samba
- name: Copy smb conf files - name: Copy smb conf files
copy: copy:
src: "{{item}}" src: '{{item}}'
dest: "/etc/samba/{{item}}" dest: /etc/samba/{{item}}
with_items: with_items:
- smb.conf - smb.conf
- shares.conf - shares.conf
notify: notify:
- reload-samba - reload-samba

@ -1,5 +1,5 @@
galaxy_info: galaxy_info:
author: your name author: Cian Hatton
namespace: chatton namespace: chatton
description: your role description description: your role description
company: your company (optional) company: your company (optional)
@ -15,7 +15,7 @@ galaxy_info:
# - GPL-3.0-only # - GPL-3.0-only
# - Apache-2.0 # - Apache-2.0
# - CC-BY-4.0 # - CC-BY-4.0
license: license (GPL-2.0-or-later, MIT, etc) license: MIT
min_ansible_version: 2.1 min_ansible_version: 2.1

@ -2,31 +2,33 @@
- name: Install Sudo - name: Install Sudo
apt: apt:
pkg: pkg:
- sudo - sudo
state: latest state: latest
update_cache: true update_cache: true
- name: Make sure we have a '{{homelab_group}}' group - name: Make sure we have a groups
group: group:
name: "{{homelab_group}}" name: '{{ item.group }}'
state: present state: present
- name: Allow '{{homelab_user}}' group to have passwordless sudo with_items: '{{ users }}'
lineinfile:
dest: /etc/sudoers
state: present
regexp: '^%{{homelab_user}}'
line: '%{{homelab_user}} ALL=(ALL) NOPASSWD: ALL'
validate: 'visudo -cf %s'
- name: Add User - name: Add Users
ansible.builtin.user: ansible.builtin.user:
name: "{{homelab_user}}" name: '{{ item.name }}'
comment: "{{homelab_user}} user" comment: '{{ item.name }} user'
uid: 1000 group: '{{ item.group }}'
group: "{{homelab_group}}" with_items: '{{ users }}'
- name: Add sudoers
template:
src: sudoers.j2
dest: /etc/sudoers.d/{{item.name}}
mode: 0440
with_items: '{{ users }}'
when: item.passwordless_sudo == true
- name: Set authorized key - name: Set authorized key
authorized_key: authorized_key:
user: "{{homelab_user}}" user: '{{ homelab_user }}'
state: present state: present
key: "{{ lookup('file', '~/.ssh/id_rsa.pub') }}" key: "{{ lookup('file', '~/.ssh/id_rsa.pub') }}"

@ -0,0 +1 @@
{{item.name}} ALL=(ALL) NOPASSWD: ALL

@ -0,0 +1,2 @@
[flake8]
exclude = .svn,CVS,.bzr,.hg,.git,__pycache__,.tox,.eggs,*.egg,.*env

@ -0,0 +1,68 @@
---
name: CI
on: # yamllint disable-line rule:truthy
push:
schedule:
- cron: "0 5 * * 1"
jobs:
# test the role
test:
runs-on: ubuntu-latest
strategy:
matrix:
config:
- image: geerlingguy/docker-centos8-ansible
mode: github_releases
- image: geerlingguy/docker-centos7-ansible
mode: github_releases
- image: geerlingguy/docker-fedora32-ansible
mode: github_releases
- image: geerlingguy/docker-fedora31-ansible
mode: github_releases
- image: geerlingguy/docker-fedora30-ansible
mode: github_releases
- image: geerlingguy/docker-ubuntu2004-ansible
mode: github_releases
- image: geerlingguy/docker-ubuntu2004-ansible
mode: package_manager
- image: geerlingguy/docker-ubuntu1804-ansible
mode: github_releases
- image: geerlingguy/docker-ubuntu1604-ansible
mode: github_releases
- image: geerlingguy/docker-debian10-ansible
mode: package_manager
- image: geerlingguy/docker-debian10-ansible
mode: github_releases
- image: geerlingguy/docker-debian9-ansible
mode: github_releases
steps:
- name: Checkout
uses: actions/checkout@v2
- name: Setup Python 3
uses: actions/setup-python@v2
with:
python-version: '3.x'
- name: Install dependencies
run: |
python -m pip install --upgrade pip
pip install -r requirements.in
- name: Run molecule tests
env:
IMAGE: ${{ matrix.config.image }}
INSTALL_MODE: ${{ matrix.config.mode }}
run: molecule -v test
# publish the role on ansible galaxy
publish:
needs: test
runs-on: ubuntu-latest
steps:
- name: Publish
uses: robertdebock/galaxy-action@1.1.0
with:
galaxy_api_key: ${{ secrets.GALAXY_API_KEY }}

@ -0,0 +1,5 @@
*.retry
*.pyc
__pycache__/
*env/
.cache/

@ -0,0 +1,9 @@
---
extends: default
ignore: |
.*env/
rules:
line-length:
max: 120

@ -0,0 +1,20 @@
The MIT License (MIT)
Copyright (c) 2020 Sylvain Prat
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

@ -0,0 +1,54 @@
Ansible Role: mergerfs
======================
[![Build Status][build_badge]][build_link]
[![Ansible Galaxy][galaxy_badge]][galaxy_link]
Install and configure Mergerfs — A featureful union filesystem.
Requirements
------------
None.
Role Variables
--------------
See [defaults/main.yml](defaults/main.yml).
Dependencies
------------
None.
Example Playbook
----------------
```yaml
- hosts: server
roles:
- role: sprat.mergerfs
vars:
mergerfs_mounts:
- path: /mnt/data
branches:
- /mnt/data1
- /mnt/data2
options: allow_other,use_ino
```
License
-------
MIT
Author Information
------------------
This role was created in 2020 by [Sylvain Prat](https://github.com/sprat).
[build_badge]: https://img.shields.io/github/workflow/status/sprat/ansible-role-mergerfs/CI
[build_link]: https://github.com/sprat/ansible-role-mergerfs/actions?query=workflow:CI
[galaxy_badge]: https://img.shields.io/ansible/role/47517
[galaxy_link]: https://galaxy.ansible.com/sprat/mergerfs

@ -0,0 +1,23 @@
---
# Install mode: defines where to download and install the package from:
# - "github_releases": install from Mergerfs' GitHub releases
# - "package_manager": install from the Linux distribution package manager.
# Note that the mergerfs package does not exists in all distributions, so it
# may not work for you.
mergerfs_install_mode: github_releases
# Version to install: "latest" version or a specific version number, e.g. "2.28.2"
# This setting only applies in "github_releases" mode
mergerfs_version: latest
# Mergerfs mountpoints to create. For example:
# mergerfs_mounts:
# - path: /mnt/storage
# branches:
# - /mnt/data*
# - /mnt/other
# options: allow_other,use_ino
mergerfs_mounts: []
# Url of the mergerfs GitHub releases page
mergerfs_github_releases_url: https://github.com/trapexit/mergerfs/releases

@ -0,0 +1,2 @@
install_date: Thu 1 Sep 15:42:59 2022
version: master

@ -0,0 +1,30 @@
---
galaxy_info:
author: Sylvain Prat
role_name: mergerfs
namespace: sprat
description: Install and configure Mergerfs — A featureful union filesystem
license: MIT
company: none
min_ansible_version: 2.3
platforms:
- name: Ubuntu
versions:
- all
- name: Debian
versions:
- all
- name: Fedora
versions:
- all
- name: EL
versions:
- all
galaxy_tags:
- mergerfs
- union
- filesystem
- disk
- mount
dependencies: []

@ -0,0 +1,11 @@
---
- name: Converge
hosts: all
vars:
mergerfs_mounts:
- path: /mnt/storage
branches:
- /mnt/data*
options: allow_other,use_ino
roles:
- role: ansible-role-mergerfs

@ -0,0 +1,21 @@
---
dependency:
name: galaxy
driver:
name: docker
lint: yamllint -s . && ansible-lint . && flake8
platforms:
- name: instance
image: ${IMAGE:-geerlingguy/docker-ubuntu2004-ansible}
volumes:
- /sys/fs/cgroup:/sys/fs/cgroup:ro
privileged: true
pre_build_image: true
provisioner:
name: ansible
inventory:
group_vars:
all:
mergerfs_install_mode: ${INSTALL_MODE:-github_releases}
verifier:
name: testinfra

@ -0,0 +1,25 @@
---
- name: Prepare
hosts: all
tasks:
- name: Create directories
become: true
file:
path: "{{ item }}"
state: directory
loop:
- /mnt/data1
- /mnt/data2
- name: Create data files
become: true
copy:
content: "{{ item.content }}\n"
dest: "{{ item.path }}"
loop:
- path: /mnt/data1/file1.txt
content: file1
- path: /mnt/data2/file2.txt
content: file2
- path: /mnt/data2/file3.txt
content: file3

@ -0,0 +1,21 @@
import os
import testinfra.utils.ansible_runner
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ['MOLECULE_INVENTORY_FILE']
).get_hosts('all')
def test_mount_point(host):
mount_point = host.mount_point('/mnt/storage')
assert mount_point.exists
assert mount_point.filesystem == 'fuse.mergerfs'
assert 'allow_other' in mount_point.options
# assert 'use_ino' in mount_point.options
def test_data_files(host):
assert host.file('/mnt/storage/file1.txt').exists
assert host.file('/mnt/storage/file2.txt').exists
assert host.file('/mnt/storage/file3.txt').exists

@ -0,0 +1 @@
molecule[ansible,docker,test,lint]

@ -0,0 +1,249 @@
#
# This file is autogenerated by pip-compile
# To update, run:
#
# pip-compile
#
ansi2html==1.6.0
# via molecule
ansible-base==2.10.7
# via ansible
ansible-lint==5.0.7
# via molecule
ansible==3.2.0
# via molecule
apipkg==1.5
# via execnet
appdirs==1.4.4
# via virtualenv
arrow==1.0.3
# via jinja2-time
attrs==20.3.0
# via pytest
bcrypt==3.2.0
# via paramiko
binaryornot==0.4.4
# via cookiecutter
bracex==2.1.1
# via wcmatch
cerberus==1.3.2
# via molecule
certifi==2020.12.5
# via requests
cffi==1.14.5
# via
# bcrypt
# cryptography
# pynacl
cfgv==3.2.0
# via pre-commit
chardet==4.0.0
# via
# binaryornot
# requests
click-completion==0.5.2
# via molecule
click-help-colors==0.9
# via molecule
click==7.1.2
# via
# click-completion
# click-help-colors
# cookiecutter
# molecule
colorama==0.4.4
# via rich
commonmark==0.9.1
# via rich
cookiecutter==1.7.2
# via molecule
coverage==5.5
# via pytest-cov
cryptography==3.4.7
# via
# ansible-base
# paramiko
distlib==0.3.1
# via virtualenv
distro==1.5.0
# via selinux
docker==5.0.0
# via molecule-docker
enrich==1.2.6
# via
# ansible-lint
# molecule
execnet==1.8.0
# via pytest-xdist
filelock==3.0.12
# via virtualenv
flake8==3.9.0
# via molecule
identify==2.2.3
# via pre-commit
idna==2.10
# via requests
iniconfig==1.1.1
# via pytest
jinja2-time==0.2.0
# via cookiecutter
jinja2==2.11.3
# via
# ansible-base
# click-completion
# cookiecutter
# jinja2-time
# molecule
markupsafe==1.1.1
# via
# cookiecutter
# jinja2
mccabe==0.6.1
# via flake8
molecule-docker==0.2.4
# via molecule
molecule[ansible,docker,lint,test]==3.3.0
# via
# -r requirements.in
# molecule-docker
more-itertools==8.7.0
# via pytest-plus
nodeenv==1.6.0
# via pre-commit
packaging==20.9
# via
# ansible-base
# ansible-lint
# molecule
# pytest
paramiko==2.7.2
# via molecule
pathspec==0.8.1
# via yamllint
pexpect==4.8.0
# via molecule
pluggy==0.13.1
# via
# molecule
# pytest
poyo==0.5.0
# via cookiecutter
pre-commit==2.12.0
# via molecule
ptyprocess==0.7.0
# via pexpect
py==1.10.0
# via
# pytest
# pytest-forked
pycodestyle==2.7.0
# via flake8
pycparser==2.20
# via cffi
pyflakes==2.3.1
# via flake8
pygments==2.8.1
# via rich
pynacl==1.4.0
# via paramiko
pyparsing==2.4.7
# via packaging
pytest-cov==2.11.1
# via molecule
pytest-forked==1.3.0
# via pytest-xdist
pytest-helpers-namespace==2021.3.24
# via molecule
pytest-html==3.1.1
# via molecule
pytest-metadata==1.11.0
# via pytest-html
pytest-mock==3.5.1
# via molecule
pytest-plus==0.2
# via molecule
pytest-testinfra==6.2.0
# via molecule
pytest-verbose-parametrize==1.7.0
# via molecule
pytest-xdist==2.2.1
# via molecule
pytest==6.2.3
# via
# molecule
# pytest-cov
# pytest-forked
# pytest-helpers-namespace
# pytest-html
# pytest-metadata
# pytest-mock
# pytest-plus
# pytest-testinfra
# pytest-verbose-parametrize
# pytest-xdist
python-dateutil==2.8.1
# via arrow
python-slugify==4.0.1
# via cookiecutter
pyyaml==5.4.1
# via
# ansible-base
# ansible-lint
# molecule
# pre-commit
# yamllint
requests==2.25.1
# via
# cookiecutter
# docker
rich==10.1.0
# via
# ansible-lint
# enrich
# molecule
ruamel.yaml.clib==0.2.2
# via ruamel.yaml
ruamel.yaml==0.17.4
# via ansible-lint
selinux==0.2.1
# via
# molecule
# molecule-docker
shellingham==1.4.0
# via click-completion
six==1.15.0
# via
# bcrypt
# click-completion
# cookiecutter
# pynacl
# pytest-verbose-parametrize
# python-dateutil
# tenacity
# virtualenv
# websocket-client
subprocess-tee==0.2.0
# via molecule
tenacity==7.0.0
# via ansible-lint
text-unidecode==1.3
# via python-slugify
toml==0.10.2
# via
# pre-commit
# pytest
typing-extensions==3.7.4.3
# via rich
urllib3==1.26.4
# via requests
virtualenv==20.4.3
# via pre-commit
wcmatch==8.1.2
# via ansible-lint
websocket-client==0.58.0
# via docker
yamllint==1.26.1
# via molecule
# The following packages are considered to be unsafe in a requirements file:
# setuptools

@ -0,0 +1,54 @@
---
# Note: we don't use the GitHub API to retrieve the latest version because
# it has rate limits which are hard to avoid in CI (we need a token, authenticate
# with the API, etc.). Instead, we browse the latest release url which redirects
# to the release page, where we can find the version number in the URL.
- become: false
delegate_to: localhost
run_once: true
block:
- name: Get latest release information from GitHub
uri:
url: "{{ mergerfs_github_releases_url }}/latest"
register: mergerfs_github_release_page
- name: Set latest mergerfs version fact
set_fact:
mergerfs_version: "{{ mergerfs_github_release_page['url'].split('/')[-1] }}"
when: mergerfs_version == "latest"
- name: Determine package download url
set_fact:
mergerfs_package_url: "{{ mergerfs_github_releases_url }}/download/{{ mergerfs_version }}/\
{{ mergerfs_pkg_prefix }}{{ mergerfs_version }}{{ mergerfs_pkg_suffix }}"
- name: Install xz-utils package for .deb package installation
become: true
apt:
name: xz-utils
state: present
update_cache: true
when: ansible_pkg_mgr == 'apt'
- name: Install mergerfs package with apt
become: true
apt:
deb: "{{ mergerfs_package_url }}"
state: present
update_cache: true
when: ansible_pkg_mgr == 'apt'
- name: Install mergerfs package with yum
become: true
yum:
name: "{{ mergerfs_package_url }}"
state: present
disable_gpg_check: true # the package is not signed
when: ansible_pkg_mgr == 'yum'
- name: Install mergerfs package with dnf
become: true
dnf:
name: "{{ mergerfs_package_url }}"
state: present
disable_gpg_check: true # the package is not signed
when: ansible_pkg_mgr == 'dnf'

@ -0,0 +1,7 @@
---
- name: Install mergerfs package with package manager
become: true
package:
name: mergerfs
state: present
update_cache: true

@ -0,0 +1,34 @@
---
- name: Include OS-specific variables
include_vars: "{{ ansible_os_family }}.yml"
tags:
- mergerfs
- name: Install mergerfs prerequisites
become: true
package:
name: "{{ mergerfs_prerequisites }}"
state: present
update_cache: true
tags:
- mergerfs
- mergerfs_install
- name: Include install tasks
import_tasks: install_from_{{ mergerfs_install_mode }}.yml
tags:
- mergerfs
- mergerfs_install
- name: Mount mergerfs filesystems
become: true
mount:
fstype: fuse.mergerfs
src: "{{ ':'.join(item.branches | mandatory) }}"
path: "{{ item.path | mandatory }}"
opts: "{{ item.options | default('defaults') }}"
state: "{{ item.state | default('mounted') }}"
loop: "{{ mergerfs_mounts }}"
tags:
- mergerfs
- mergerfs_mount

@ -0,0 +1,12 @@
---
mergerfs_prerequisites:
- fuse
mergerfs_dist: "{{ ansible_distribution|lower }}-{{ ansible_distribution_release }}"
mergerfs_arch_map:
x86_64: amd64
i386: i386
aarch64: arm64
armv7l: armhf
mergerfs_arch: "{{ mergerfs_arch_map[ansible_userspace_architecture | default(ansible_architecture) ] }}"
mergerfs_pkg_prefix: "mergerfs_"
mergerfs_pkg_suffix: ".{{ mergerfs_dist }}_{{ mergerfs_arch }}.deb"

@ -0,0 +1,7 @@
---
mergerfs_prerequisites:
- fuse
mergerfs_dist: "{{ 'fc' if ansible_distribution == 'Fedora' else 'el' }}{{ ansible_distribution_major_version }}"
mergerfs_arch: "{{ ansible_userspace_architecture }}"
mergerfs_pkg_prefix: "mergerfs-"
mergerfs_pkg_suffix: "-1.{{ mergerfs_dist }}.{{ mergerfs_arch }}.rpm"

@ -0,0 +1,16 @@
#!/bin/bash
function format_dir(){
dir="${1}"
yaml_files="$(find ${dir} -type f -name "*.yml")"
for f in $yaml_files
do
yamlfmt $f -w
done
}
format_dir roles
format_dir playbooks
format_dir host_vars
format_dir group_vars
format_dir .github/workflows

@ -1,25 +0,0 @@
---
- hosts: servers
become: true
pre_tasks:
- name: Include vault variables.
include_vars: "{{vault_file}}"
tags: ["always"]
roles:
- role: 'roles/setup_mergerfs'
tags: ["mergerfs"]
when: configure_mergefs
- role: 'roles/setup_users'
tags: ["users"]
- role: 'roles/setup_samba'
tags: ["samba"]
when: configure_samba
- role: 'roles/setup_docker'
tags: ["docker"]
- role: 'roles/setup_portainer'
tags: ["portainer"]
when: use_portainer
- role: 'roles/setup_hosted_services'
tags: ["services"]

@ -1,7 +0,0 @@
---
- hosts: localhost
become: true
roles:
- role: 'roles/setup_linode'
vars:
state: present

@ -4,3 +4,4 @@ inventory=hosts.ini
vault_password_file=.vault-pass.sh vault_password_file=.vault-pass.sh
host_key_checking = False host_key_checking = False
enable_plugins = linode enable_plugins = linode
roles_path = roles

@ -1,43 +1,74 @@
$ANSIBLE_VAULT;1.1;AES256 $ANSIBLE_VAULT;1.1;AES256
37666165636561303539306466393465653238336365663731616363323164313361633830353730 31643662633861333962303536636166333533363233366265626661636532383530633336633633
3531623965653935303664383061386164383038656439330a323265306137613231313837383335 3137656636386638643461653165666130303132313232640a336262333830663666353762656138
31373763633930333536313533356333336235633265326265366337303035333464646462326163 30386564666435333532616665303131623964386434396363653039353761636364313865323063
6632656239626631380a333365653563313139343631306330643638396661393736376239613061 3133306161646262610a303331353633643733633164613264393665633236636164613936336632
36616234346663373236666633623231333137316561336362323830643531323934363735323837 62383332383165303063663331646236653433643862383139326662666234373065616163636532
39353363616462343065303538333637623837653633366437646436313963616333653834306334 34623633633134333036366635386263333538376636666131393330313439396432366264326636
30663839616335363162346135393037646330616331323464623631663931623935323563313437 31383731633365636335346231346337313634633861636437356632653764343532356637343131
62323462306463376433663636663033623633343562653834663066323932656230623232666136 36333737643236363530663537633765646164313937303233376464323136373061393430376664
66636264633631393937613535363366626135303939656364623937653763383865303461646536 38396237333363616461316433346632346639613236376363386536373832613634356536656261
65656665643866623363623464656632323261656433663030316333613935336662303763303062 30663235616439383761393239343763383938396431653964643961363961623432333033363130
30623937303738616138633933343438363935616334353837616465643930306435326462313961 63653631346366326639353138393765336635363339383230383363633233323034356631383238
65633131303732343135663635303933313935636533333935633830336439636236363330306337 64643133663537336331343064653263656366383761633032343636383666386263356437396630
36323932613339666631333038323538656334393565343666346133393864643664383662316166 36316439363331626536303630626361666636633465383634363138643939623533653963376264
32636462636165383230353231653261333564646562663035316266653466623565623661376637 35363439663238623961383061356634373931613439623765663737323136386162616237326463
36653935383539313864656165636234636562323664653435633732383737353039323565643537 64326638333630323861613534353736323362303733333337643136613665616539303064326430
34383739616639343831306565323930373961656235383666366139346662626166646462356139 38643731333139386138323361383931376332653737656334636530333334313030633865613563
31336334653864373037353135646538633039623332336635386132396664623737626436303530 65663964623766636165313937336662306439626466313062646333323736656238373962333934
63316532366639643861663433383335626530613437363831623232656339613235383734646466 37333936653835336562363432396636633932356336313030623036393437623065653863343364
39653838326566656439626663363731356134383362333535643736663231323030366565653461 65383961336237653334643365313439326630616637366666663761356532393033633835323337
62343166343762646533653065303531656564363866356366663364363438373731333861316163 37396432303166316433353062336464396633363762656532393364303762353465393531376639
35323430616164323962356635323436306265623335343736623132363138343663613163313862 66346466333533376530343565306133323632383665376233656431396562623637316261653063
37383066393335616666343336643131656431386264386230366434623362663733383334343037 37613765353035313162313539386337656534353439363930303362383862636562346432323261
36616237613233333630323233643630353330343730386464316634633938383030353765366436 30653335633363373863306538353562626163356265343039313563633161663832343934633665
62303962353838643461326565313236336265346234316637626338623031303430623039366562 63633539353331656564623134303061313434633336313664643831383737393237653230666635
64653062333963383865343263343232356366643238393636383139656536613639376135353163 33316337636538316461666531393231353833633461336263643831373032343735353034306230
32306233373533356365393233393165666132336637613862653038373839613036353463306233 66303563396466383765393938366534313633633562643535636332336237396338316239333836
63643564656364353836303665613862316165393263633536623731343137366162663335623066 61653437333263353834336664646430303839633463613036613137316438343162666530353863
66613835306135303563323036313336313632306131643730643931363438666364313864353766 37326161353362636163303131393738393763626134623631353832316161333633623261613164
36643431343265343036313362653262393636653134343339666361383263623936353564393266 30393461613336376634313831343633353836646432643138306637313066643161346530653434
36303836313437656436656634363462386362613361653536373038313263623562653833333735 35353037313162366465653632666531656461636532343236376564393131666165616134346161
62333339626437336464326263393838336135326566346436336637313035333062643662393463 65386539613238343164643535333330666432646432313730646530356537333733383564613930
38633964656361346530326437663233356139373537643130656266653236666437663730356561 65646231633737346364613665666563383237623765313064303863393264653435626363616365
66356662373961623537343063343162303833363130653439613965393363386532633966623537 30363237383236353436396531653133623830353566666439633038326533363464636639346264
66653730613866323933363733633734646437376530396234303161373365376235653132363262 32653963336666396562383032353830613436653339386439373763326531623130303034653939
36643965393164393165363231303336656238326530373531356631313532663864656261653936 38663064333430633435396164633437343034396230623437616539376666396438636430393463
37333365313036363330336133376431313839626633623732316163363632363033306338633030 65376638653134393232353136623762636564626562306562366465376464363363383630656538
34396530313764613465633435323435653161646634336562303064373563373938356132653638 63323364383534353135396535303266636530386664633931613366343537613033306665656363
63643361363763326532363836643433613166636266623933653065633631366234643366383464 32376532666165633238633761333165356563333636333431653863343530383130623032653333
39313465653432663833623030333862363430363036646334366261316161656633646339366231 63623937633134623030616461613433333766373262623165343961313534666235656531353537
37323533633139636363666664353965333637353735633039616337636439636266613962323138 31366161303565643564646162353466306436646132373965326535383930303366363336653633
39333962646233343361363461643936616661323237303030663534336634636239336164653939 33613233333364616135326165336164356532613531376532623037663830633736306464666533
6263396338326466363861343332376264376635653362643764 65646536356466356164336330653463303831323835356336646335333837616163316264336130
61663066303134373439336139376665356463633337666638313463646535663063343863373162
64313761353435333562373166336163636438353938663239326466396663613739636334666566
35643735323565323064346136633037383038633530363831636164306238656438636236366362
30343431303935316164343239626639666130663734643166363330306264616363333439353838
30313466343562643730393230303565646137616465336439636264653862323231363633623234
34333863376638313036353731393832313833346531313431386534656262613439646137666230
31613365363236656430323135643463636531663062376438626635626536653461343931636364
32316233376635616639623737636338346365323039343332613962653734346266623163333061
30323565653065346333353637653061353033313339636435336166623833376237656236336561
34376161303338663532616530343535323964623665303931366462303832396532653635346463
66333632326261373732313233396561316562613638643063663065393838376436313635316230
37643832623562386331393461663535353937636430386437663931363534373163373166366231
62313162643831643965303163306563626564353635343465663136396161623561666263363364
36336532633664633739346166303132393733373832636565663463353163343565653462643163
37373566323630656437643137646465626336623239656661373661663363343432663065373237
34306535323738343139626663323339363538376262333038373634313130623838363535616131
31393166653262656539643466633331316463306536323634353136663530386666313765326336
39666266613836363235613164363661626534393837313239663538613734353338636533626266
38366131326634666561613662313638616433636431633930303634633862316263653562643865
30326336346165643930383632646461303131336163343137346134663531633064663932303937
36616162353436623336336361343631326136303938316266383263303034633762303231666439
36336332663237643437326265623131653463326237373166623364356633336234316336366137
63363834336566646365316434326433336632373638633366616633336138323962366231383066
33663235363532626433323639323938386333646339376632333064343861613130666365333665
61373136396663363538363963656263633636663639633163373463346431303631366439656333
61393137363338363133336463306132396433646532663130386233626633363239336561633335
33343332316531633461326265623663653631663765343164363139306362633531643964653432
66353866376130363132343436386237326639326236386539383064646230636264376432663564
37313636353935616666333438653835646632333930323630393464636239363534636637343033
62363830366165323836313761643061316137353463393132306431316539333133393939613331
6465

@ -1,19 +0,0 @@
---
- hosts: linode
tasks:
# TODO
# Verify sudoers
# Verify ssh authorized key
# Verify docker installed
# Verify docker images pulled
# Verify app config files
- name: Docker Compose Files Exist
command: "stat {{docker_compose_directory}}/{{ item.name }}/docker-compose.yml"
with_items: "{{services}}"
changed_when: false
register: stat
- name: Assert all Docker Compose files were created
assert:
that: item.rc == 0
with_items: "{{stat.results}}"
Loading…
Cancel
Save