add deps to git

pull/27/head
Cian Hatton 3 years ago
parent f70527d570
commit 35afd3d71f

4
.gitignore vendored

@ -3,5 +3,5 @@ venv
stack.env stack.env
linodehosts.ini linodehosts.ini
test-playbook.yml test-playbook.yml
roles/sprat.mergerfs #roles/sprat.mergerfs
collections #collections

@ -11,3 +11,4 @@ collections_path = collections
; TODO: put the filters in a place that makes sense. ; TODO: put the filters in a place that makes sense.
; I don't think it should be required to specify the path to the collection's filters like this! ; I don't think it should be required to specify the path to the collection's filters like this!
filter_plugins = collections/ansible_collections/chatton/docker_backup/plugins/filter filter_plugins = collections/ansible_collections/chatton/docker_backup/plugins/filter
nocows=1

@ -0,0 +1,20 @@
name: E2E
on:
workflow_dispatch:
push:
jobs:
e2e:
runs-on: ubuntu-latest
steps:
- name: Checkout the codebase
uses: actions/checkout@v3
- uses: actions/setup-python@v4
with:
python-version: 3.8.9
- name: Install dependencies
run: make deps
- name: Test Backup & Restore
run: make e2e
env:
VAULT_KEY: "${{ secrets.VAULT_KEY }}"

@ -0,0 +1,18 @@
name: Code Health
on:
workflow_dispatch:
push:
jobs:
lint:
runs-on: ubuntu-latest
steps:
- name: Checkout the codebase
uses: actions/checkout@v3
- uses: actions/setup-python@v4
with:
python-version: 3.8.9
- name: Install dependencies
run: pip install -r requirements.txt
- name: Test Directory
run: ansible-lint roles

@ -0,0 +1,292 @@
{
"files": [
{
"name": ".",
"ftype": "dir",
"chksum_type": null,
"chksum_sha256": null,
"format": 1
},
{
"name": "LICENSE",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "eb1e623301bf97851c37dc8127635aba0407c91b8583725472905407facc3fb1",
"format": 1
},
{
"name": "requirements.txt",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "49b26f38de2b9738843174e6e259cba40e33e50581dd8e2fdf3f881f88e8ace5",
"format": 1
},
{
"name": "plugins",
"ftype": "dir",
"chksum_type": null,
"chksum_sha256": null,
"format": 1
},
{
"name": "plugins/filter",
"ftype": "dir",
"chksum_type": null,
"chksum_sha256": null,
"format": 1
},
{
"name": "plugins/filter/filters.py",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "dcfb6a2c0e2437210beffc1fb8857fa80dc7fda8dde987bc3f627bd14363776e",
"format": 1
},
{
"name": "Makefile",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "8e1aa2e50b6c38e0b2fcb86f3f8fd574603ad6d6ad0e25f717670e63a9f1ab33",
"format": 1
},
{
"name": "tests",
"ftype": "dir",
"chksum_type": null,
"chksum_sha256": null,
"format": 1
},
{
"name": "tests/ansible.cfg",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "efb91542404527c3081e347a8833c54a0c6f6f7b975e400dd5c350aef9c2c822",
"format": 1
},
{
"name": "tests/playbook.yml",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "a52f9f26b558f05a29a0447f10a1ea7ca96df1b7bc44a31b2727c624c267e3fb",
"format": 1
},
{
"name": "tests/docker-compose.yml",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "0a293c4b8f09df4855a04529029205634389596a4d25b249500eac9d1a4cc24c",
"format": 1
},
{
"name": "tests/host_vars",
"ftype": "dir",
"chksum_type": null,
"chksum_sha256": null,
"format": 1
},
{
"name": "tests/host_vars/localhost.yml",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "388a868ea1dd4cded00276063b18f221bb3ed43040378fdcd6443dc2240d3db5",
"format": 1
},
{
"name": "tests/vault_key.sh",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "48884297731a934b4f0ad2cdb104f53c12341370d2413fa61a62fa3c4024eab4",
"format": 1
},
{
"name": "roles",
"ftype": "dir",
"chksum_type": null,
"chksum_sha256": null,
"format": 1
},
{
"name": "roles/docker_s3_backup",
"ftype": "dir",
"chksum_type": null,
"chksum_sha256": null,
"format": 1
},
{
"name": "roles/docker_s3_backup/vars",
"ftype": "dir",
"chksum_type": null,
"chksum_sha256": null,
"format": 1
},
{
"name": "roles/docker_s3_backup/vars/main.yml",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "6c98c057bc941803da7172ff80160a669e987360bad34af5ea53593985f1443a",
"format": 1
},
{
"name": "roles/docker_s3_backup/tasks",
"ftype": "dir",
"chksum_type": null,
"chksum_sha256": null,
"format": 1
},
{
"name": "roles/docker_s3_backup/tasks/main.yml",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "0fcf1fbf3fafdaf129a33876344c1ea07d54ca562c253cc81c5eebc806ff51a9",
"format": 1
},
{
"name": "roles/docker_s3_backup/meta",
"ftype": "dir",
"chksum_type": null,
"chksum_sha256": null,
"format": 1
},
{
"name": "roles/docker_s3_backup/meta/main.yml",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "8e707c10e6614fae3baf7f6199e74ff23dedf7128dd170b8ad9924ed2cd607bc",
"format": 1
},
{
"name": "roles/docker_s3_backup/defaults",
"ftype": "dir",
"chksum_type": null,
"chksum_sha256": null,
"format": 1
},
{
"name": "roles/docker_s3_backup/defaults/main.yml",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "cd31b1ba5dd374feac22fce70bb91bd948a391c27691f8cc49190427dfd360aa",
"format": 1
},
{
"name": "roles/docker_s3_volume_restore",
"ftype": "dir",
"chksum_type": null,
"chksum_sha256": null,
"format": 1
},
{
"name": "roles/docker_s3_volume_restore/vars",
"ftype": "dir",
"chksum_type": null,
"chksum_sha256": null,
"format": 1
},
{
"name": "roles/docker_s3_volume_restore/vars/main.yml",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "8cbb7ae1ada7a17063d49063a2ded3b3f784a9acc5b0b42a35d417134713a3d2",
"format": 1
},
{
"name": "roles/docker_s3_volume_restore/tasks",
"ftype": "dir",
"chksum_type": null,
"chksum_sha256": null,
"format": 1
},
{
"name": "roles/docker_s3_volume_restore/tasks/main.yml",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "912b5afae6e9183741dd29242e1810ceb32deabb3624f2a2ccc2b1ea8984a45f",
"format": 1
},
{
"name": "roles/docker_s3_volume_restore/meta",
"ftype": "dir",
"chksum_type": null,
"chksum_sha256": null,
"format": 1
},
{
"name": "roles/docker_s3_volume_restore/meta/main.yml",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "8495f3dccdbd037f3ba05f8c1f00d1de836e751b5f76504fa57d6e5c8032ccb0",
"format": 1
},
{
"name": "roles/docker_s3_volume_restore/defaults",
"ftype": "dir",
"chksum_type": null,
"chksum_sha256": null,
"format": 1
},
{
"name": "roles/docker_s3_volume_restore/defaults/main.yml",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "b0ecdc12d3f7814ebfbded52d4db758462c2c69b34635e670ee7ee2322d6c44f",
"format": 1
},
{
"name": "roles/docker_s3_volume_restore/handlers",
"ftype": "dir",
"chksum_type": null,
"chksum_sha256": null,
"format": 1
},
{
"name": "roles/docker_s3_volume_restore/handlers/main.yml",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "bda514ba267156bf4b25e378da315e16c28e110fbc4a4033c91e71e8d417bb1b",
"format": 1
},
{
"name": "README.md",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "678fe27fb5f532e6c89a7538da7931d64f7d6522a4c857a2de5495afabaeaab3",
"format": 1
},
{
"name": ".gitignore",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "54c27deac980434757ea7dc177f753710633b9464576e23804c2d5f5bc549288",
"format": 1
},
{
"name": ".github",
"ftype": "dir",
"chksum_type": null,
"chksum_sha256": null,
"format": 1
},
{
"name": ".github/workflows",
"ftype": "dir",
"chksum_type": null,
"chksum_sha256": null,
"format": 1
},
{
"name": ".github/workflows/lint.yml",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "aa67d9ff8c695a9c47d18c540186647ef12160ed924608a4adabf4d0bfb5510a",
"format": 1
},
{
"name": ".github/workflows/e2e.yml",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "c78a395feeca4a7f3751cedd0666ad01c7d4e206ffecaf6928deefd0f8d9b384",
"format": 1
}
],
"format": 1
}

@ -0,0 +1,21 @@
MIT License
Copyright (c) 2022 Cian Hatton
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

@ -0,0 +1,30 @@
{
"collection_info": {
"namespace": "chatton",
"name": "docker_backup",
"version": "1.0.0",
"authors": [
"Cian Hatton cianhatton@protonmail.com"
],
"readme": "README.md",
"tags": [],
"description": "A collection of roles which allow backup and restore of docker volumes.",
"license": [
"MIT"
],
"license_file": "LICENSE",
"dependencies": {},
"repository": "https://github.com/chatton/ansible-docker-backup",
"documentation": "https://github.com/chatton/ansible-docker-backup#readme",
"homepage": "https://github.com/chatton/ansible-docker-backup",
"issues": "https://github.com/chatton/ansible-docker-backup/issues"
},
"file_manifest_file": {
"name": "FILES.json",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "4f37593e42d3f2b5b2225b4aade83be41bee2a507b88546d3bbd79e061017173",
"format": 1
},
"format": 1
}

@ -0,0 +1,17 @@
e2e: test clean
test:
cp -r tests/host_vars .
cp tests/ansible.cfg .
cp tests/docker-compose.yml .
cp tests/playbook.yml .
ansible-playbook playbook.yml
clean:
rm -r host_vars
rm ansible.cfg
rm docker-compose.yml
rm playbook.yml
deps:
pip install -r requirements.txt

@ -0,0 +1,3 @@
# Ansible Collection - chatton.docker_backup
Documentation for the collection.

@ -0,0 +1,15 @@
#!/usr/bin/python
class FilterModule(object):
def filters(self):
return {
'keep_last': self._keep_last,
}
"""
_keep_last keeps the last n items of a list of lists
"""
def _keep_last(self, list_of_lists, num_to_keep):
lists_to_return = []
for list_items in list_of_lists:
lists_to_return.append(list_items[0:-num_to_keep])
return lists_to_return

@ -0,0 +1,37 @@
ansible==6.3.0
ansible-compat==2.2.0
ansible-core==2.13.3
ansible-lint==6.5.2
attrs==22.1.0
black==22.8.0
bracex==2.3.post1
cffi==1.15.1
click==8.1.3
commonmark==0.9.1
cryptography==37.0.4
enrich==1.2.7
filelock==3.8.0
importlib-resources==5.9.0
Jinja2==3.1.2
jsonschema==4.15.0
MarkupSafe==2.1.1
mypy-extensions==0.4.3
packaging==21.3
pathspec==0.10.1
pkgutil_resolve_name==1.3.10
platformdirs==2.5.2
pycparser==2.21
Pygments==2.13.0
pyparsing==3.0.9
pyrsistent==0.18.1
PyYAML==6.0
resolvelib==0.8.1
rich==12.5.1
ruamel.yaml==0.17.21
ruamel.yaml.clib==0.2.6
subprocess-tee==0.3.5
tomli==2.0.1
typing_extensions==4.3.0
wcmatch==8.4
yamllint==1.27.1
zipp==3.8.1

@ -0,0 +1,15 @@
---
# defaults file for chatton.docker_s3_backup
# the number of backups of the same volume which should be kept.
# Any excess will be deleted.
docker_backup_retain_count: 3
# the backup directory where backups are stored on the host machine.
# these will be uploaded to S3.
docker_backup_aws_s3_region: "us-east-1"
docker_backup_aws_s3_bucket: "backups"
docker_backup_host_backup_directory: ""
docker_backup_aws_s3_url: ""
docker_backup_aws_s3_aws_access_key: ""
docker_backup_aws_s3_aws_secret_key: ""
docker_backup_aws_s3_permissions: []

@ -0,0 +1,11 @@
galaxy_info:
author: Cian Hatton
description: Role which backs up a dockver volume to s3.
license: MIT
min_ansible_version: "2.1"
galaxy_tags: []
platforms:
- name: Debian
versions:
- all
dependencies: []

@ -0,0 +1,85 @@
---
# https://docs.ansible.com/ansible/latest/collections/community/docker/docker_container_module.html#ansible-collections-community-docker-docker-container-module
# https://docs.docker.com/storage/volumes/#backup-restore-or-migrate-data-volumes
- name: Determine backup timestamp.
ansible.builtin.set_fact: backup_time="{{ ansible_date_time.iso8601 }}"
- name: Install Python dependencies
ansible.builtin.pip:
name:
- docker
- boto3
- name: Stop a container
community.docker.docker_container:
name: "{{ container_backup }}"
state: stopped
- name: Get container details
docker_container_info:
name: "{{ container_backup }}"
register: result
- name: Extract only the volume mounts (not bind mounts)
ansible.builtin.set_fact: volume_mounts="{{ result.container.Mounts | selectattr("Type", "equalto", "volume") }}"
- name: Create Backup of Container Volumes
community.docker.docker_container:
name: "backup-container-{{ item.Name }}-{{ 10 | random }}"
image: ubuntu
command: "tar -czvf /backups/{{ item.Name }}-{{ backup_time }}.tar.gz /data"
auto_remove: true
detach: false # block until this container exists.
state: started
volumes:
- "{{ item.Name }}:/data"
- "{{ docker_backup_host_backup_directory }}:/backups"
with_items: "{{ volume_mounts }}"
- name: Start the container
community.docker.docker_container:
name: "{{ container_backup }}"
state: started
- name: Upload backups to S3
register: upload_result
amazon.aws.aws_s3:
s3_url: "{{ docker_backup_aws_s3_url }}"
bucket: "{{ docker_backup_aws_s3_bucket }}"
object: "{{ item.Name }}/{{ item.Name }}-{{ backup_time }}.tar.gz"
src: "{{ docker_backup_host_backup_directory }}/{{ item.Name }}-{{ backup_time }}.tar.gz"
aws_access_key: "{{ docker_backup_aws_s3_aws_access_key }}"
aws_secret_key: "{{ docker_backup_aws_s3_aws_secret_key }}"
region: "{{ docker_backup_aws_s3_region }}"
mode: put
permission: "{{ docker_backup_aws_s3_permissions }}"
with_items: "{{ volume_mounts }}"
# try and find latest volume based on the name.
- name: Fetch Volumes From S3.
amazon.aws.aws_s3:
bucket: "{{ docker_backup_aws_s3_bucket }}"
mode: list
prefix: "{{ item.Name }}/{{ item.Name }}"
aws_access_key: "{{ docker_backup_aws_s3_aws_access_key }}"
aws_secret_key: "{{ docker_backup_aws_s3_aws_secret_key }}"
region: "{{ docker_backup_aws_s3_region }}"
s3_url: "{{ docker_backup_aws_s3_url }}"
register: s3_list_outputs
with_items: "{{ volume_mounts }}"
- name: Find keys to delete.
ansible.builtin.set_fact:
s3_keys_to_delete: "{{ s3_list_outputs.results | map(attribute='s3_keys') | keep_last(docker_backup_retain_count) | flatten }}"
- name: Delete old backups.
amazon.aws.aws_s3:
bucket: "{{ docker_backup_aws_s3_bucket }}"
aws_access_key: "{{ docker_backup_aws_s3_aws_access_key }}"
aws_secret_key: "{{ docker_backup_aws_s3_aws_secret_key }}"
region: "{{ docker_backup_aws_s3_region }}"
s3_url: "{{ docker_backup_aws_s3_url }}"
object: "{{ item }}"
mode: delobj
with_items: "{{ s3_keys_to_delete }}"

@ -0,0 +1,25 @@
---
# defaults file for docker_s3_volume_restore
# forces a revert to the volume.
docker_backup_restore_force: false
# specify docker_backup_restore_latest_s3_key true to automatically determine the latest
# backup in the s3 backup. The format which is expected has the prefix of volume_name/volume_name_*
# this is the format the the "docker_s3_backup" role updloads them with.
docker_backup_restore_latest_s3_key: false
docker_backup_s3_restores: []
# docker_backup_s3_restores:
# - volume_name: "linkding_data"
# s3_key: "linkding_data/linkding_data-2022-09-01T21:32:54Z.tar.gz"
# dynamically find the latest linkding_data backup.
# docker_backup_restore_latest_s3_key: true
# docker_backup_s3_restores:
# - volume_name: "linkding_data"
docker_backup_aws_s3_region: "us-east-1"
docker_backup_aws_s3_bucket: "backups"
docker_backup_aws_s3_url: ""
docker_backup_aws_s3_aws_access_key: ""
docker_backup_aws_s3_aws_secret_key: ""
docker_backup_aws_s3_permissions: []

@ -0,0 +1,11 @@
galaxy_info:
author: Cian Hatton
description: Role which restores a dockver volume from s3.
license: MIT
min_ansible_version: "2.1"
galaxy_tags: []
platforms:
- name: Debian
versions:
- all
dependencies: []

@ -0,0 +1,107 @@
---
# https://docs.ansible.com/ansible/latest/collections/community/docker/docker_container_module.html#ansible-collections-community-docker-docker-container-module
# https://docs.docker.com/storage/volumes/#backup-restore-or-migrate-data-volumes
- name: Ensure Volume.
docker_volume:
name: "{{ item.volume_name }}"
state: present
register: volume_out
with_items: "{{ docker_backup_s3_restores }}"
- name: Determine if backup is needed.
ansible.builtin.set_fact: should_perform_backup="{{ docker_backup_restore_force == true or volume_out.changed == true }}"
- name: End play as no backup is needed.
ansible.builtin.meta: end_play
when: not should_perform_backup
# try and find latest volume based on the name.
- name: Find latest s3 version.
when: docker_backup_restore_latest_s3_key
amazon.aws.aws_s3:
bucket: "{{ docker_backup_aws_s3_bucket }}"
mode: list
prefix: "{{ item.volume_name }}/{{ item.volume_name }}"
aws_access_key: "{{ docker_backup_aws_s3_aws_access_key }}"
aws_secret_key: "{{ docker_backup_aws_s3_aws_secret_key }}"
region: "{{ docker_backup_aws_s3_region }}"
s3_url: "{{ docker_backup_aws_s3_url }}"
register: s3_list_output
with_items: "{{ docker_backup_s3_restores }}"
- name: Extract S3 keys for container.
when: docker_backup_restore_latest_s3_key
ansible.builtin.set_fact: container_s3_keys="{{ container_s3_keys | default([]) + [item.s3_keys | last] }}"
with_items: "{{ s3_list_output.results }}"
- name: Extract volume names.
ansible.builtin.set_fact: volume_names="{{ docker_backup_s3_restores | map(attribute='volume_name') }}"
when: docker_backup_restore_latest_s3_key
- name: Merge volume names and S3 keys.
ansible.builtin.set_fact:
docker_backup_s3_restores_new: "{{ docker_backup_s3_restores_new | default([]) + [ {'volume_name': item.0, 's3_key': item.1} ] }}"
when: docker_backup_restore_latest_s3_key
with_together:
- "{{ volume_names }}"
- "{{ container_s3_keys }}"
- name: Set volumes to restore.
ansible.builtin.set_fact:
docker_backup_s3_restores_new: "{{ docker_backup_s3_restores }}"
when: not docker_backup_restore_latest_s3_key
- name: Create directories for /tmp file.
ansible.builtin.file:
path: '/tmp/{{ item.s3_key | dirname }}'
state: directory
mode: '0755'
with_items: '{{ docker_backup_s3_restores_new }}'
- name: Download archive from S3.
amazon.aws.aws_s3:
bucket: "{{ docker_backup_aws_s3_bucket }}"
object: "{{ item.s3_key }}"
aws_access_key: "{{ docker_backup_aws_s3_aws_access_key }}"
aws_secret_key: "{{ docker_backup_aws_s3_aws_secret_key }}"
region: "{{ docker_backup_aws_s3_region }}"
s3_url: "{{ docker_backup_aws_s3_url }}"
mode: get
dest: "/tmp/{{ item.s3_key }}"
register: get_out
with_items: "{{ docker_backup_s3_restores_new }}"
- name: Remove contents of volumes.
community.docker.docker_container:
name: "restore-container-{{ item.volume_name }}-{{ 10 | random }}"
image: ubuntu
command: "rm -rf ./*"
auto_remove: true
detach: false # block until this container exists.
state: started
# start inside the directory we want to wipe
working_dir: "/data"
volumes:
- "{{ item.volume_name }}:/data"
with_items: "{{ docker_backup_s3_restores_new }}"
- name: Restore contents of volumes
community.docker.docker_container:
name: "restore-container-{{ item.volume_name }}-{{ 10 | random }}"
image: ubuntu
# extract the tar into the volume.
command: "tar xvf /tmp/{{ item.s3_key }} -C /data --strip-components 1"
auto_remove: true
detach: false # block until this container exists.
state: started
volumes:
- "{{ item.volume_name }}:/data"
- /tmp:/tmp
with_items: "{{ docker_backup_s3_restores_new }}"
- name: Remove uploaded files from /tmp
ansible.builtin.file:
path: '/tmp/{{ item.s3_key }}'
state: absent
with_items: '{{ docker_backup_s3_restores_new }}'

@ -0,0 +1,4 @@
[defaults]
roles_path = roles
vault_password_file=tests/vault_key.sh
filter_plugins = plugins/filter

@ -0,0 +1,18 @@
---
version: '3.2'
services:
portainer:
image: portainer/portainer-ce
container_name: portainer
ports:
- 9000:9000
volumes:
- portainer_data:/data
- some_volume:/some_dir
- /var/run/docker.sock:/var/run/docker.sock
volumes:
some_volume:
portainer_data:
external: true
name: portainer_data

@ -0,0 +1,23 @@
$ANSIBLE_VAULT;1.1;AES256
31343237643365393335643262646139363037616365623238316536313238386237353436643738
3332376365393138633962373132633562363035396161650a323039646238373162333366353533
37333238643437623538343630393065313463313038616538613838366638636433313637313630
3733623838373864320a373636303030373737336265353165373463306233333033393266343838
31323634313531343262323561636438326166343461373133393166303630303865316661356165
64366435343933396131313665336437363234393239663831363033356433383330323964613833
36346436383739623163343130376465333465316139303437666333386633313132336234383936
65353437336637353739373837343766333266396263396562366463626332363061383435353132
34616134396136333266323930343866663332373864623537623765303435366331613466376137
63346337303461623036306362306235366365646137316165376634316230396239623132363337
35363932663361313533663436633532313732646564663463393233316231623361336332336135
39356338363336336231643661313062303734316539653031313630363866303464643438653035
37393039623961386539303235636562653130343237336332643639346631326633363366373466
62356536356664353466383131306664653132393837663635366466613665626535323930366637
38646263326264313363386634363834626638383563346361386165323430383266646631626362
64303263383138353739656534623734623638653438353666623464656461316636626564326536
34616163626539383265353963333734313363343162663434356337393266313637323732346231
33373835373465666637663330653337373130373732303632326530336132333236313466653239
30383632363337333833666132363563363361623865616134613538373439353836346366353065
34356134633038653839333430313738613531653634333430373635373239653362393461306330
35326532303432316636666134353534626139316331333538356165313965613739653665616237
3165353731626130666639346263333865316362623134373463

@ -0,0 +1,106 @@
---
- hosts: localhost
connection: local
become: true
tasks:
- name: Install Docker Module for Python
pip:
name:
- docker
- docker-compose
- boto3
- name: Remove Portainer
docker_compose:
project_src: 'tests'
state: absent
- name: Remove portainer volume
docker_volume:
name: portainer_data
state: absent
- name: Create portainer volume
docker_volume:
name: portainer_data
state: present
- name: Deploy Portainer
docker_compose:
project_src: 'tests'
state: present
- name: Portainer | Wait for ready
uri:
url: http://localhost:9000
method: GET
status_code: 200
register: result
until: result.status == 200
retries: 60
delay: 1
- debug: msg="{{result}}"
- name: Register Portainer Admin User
uri:
url: http://localhost:9000/api/users/admin/init
method: POST
body:
Username: admin
Password: "adminadminadmin"
status_code: 200
body_format: json
register: result
until: result.status == 200
retries: 60
delay: 1
- name: Backup Portainer
include_role:
name: docker_s3_backup
vars:
container_backup: portainer
- name: Remove Portainer
docker_compose:
project_src: 'tests'
state: absent
- name: Remove portainer volume
docker_volume:
name: portainer_data
state: absent
- name: Restore Portainer Volume
include_role:
name: docker_s3_volume_restore
vars:
docker_backup_restore_latest_s3_key: true
docker_backup_s3_restores:
- volume_name: portainer_data
- name: Deploy Portainer
docker_compose:
project_src: 'tests'
state: present
- name: Auth as old user
uri:
url: http://localhost:9000/api/auth
method: POST
body:
Username: admin
Password: "adminadminadmin"
status_code: 200
body_format: json
register: result
until: result.status == 200
retries: 60
delay: 1
- assert:
that:
- result.status == 200

@ -0,0 +1,117 @@
{
"files": [
{
"name": ".",
"ftype": "dir",
"chksum_type": null,
"chksum_sha256": null,
"format": 1
},
{
"name": "LICENSE",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "eb1e623301bf97851c37dc8127635aba0407c91b8583725472905407facc3fb1",
"format": 1
},
{
"name": "requirements.txt",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "15f22e3e2654ae2de7af40c232ecef6b73718c595f4d7632820feb31bcbb51e7",
"format": 1
},
{
"name": "plugins",
"ftype": "dir",
"chksum_type": null,
"chksum_sha256": null,
"format": 1
},
{
"name": "plugins/README.md",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "23e9939164cad964c2338b8059e4d3def72eef9523e32594503efd50960fcae4",
"format": 1
},
{
"name": "plugins/module_utils",
"ftype": "dir",
"chksum_type": null,
"chksum_sha256": null,
"format": 1
},
{
"name": "plugins/module_utils/portainer.py",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "95616af2c5bde69ee194b26473cc55a2c0dddf51c62d8886b9285cc4e21117b9",
"format": 1
},
{
"name": "plugins/modules",
"ftype": "dir",
"chksum_type": null,
"chksum_sha256": null,
"format": 1
},
{
"name": "plugins/modules/portainer_stack.py",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "8d7c6e609e3d6d1764768c479a07375b8b43d65a9fc30e7f8cefe1bc48fe7d2b",
"format": 1
},
{
"name": "tests",
"ftype": "dir",
"chksum_type": null,
"chksum_sha256": null,
"format": 1
},
{
"name": "tests/unit",
"ftype": "dir",
"chksum_type": null,
"chksum_sha256": null,
"format": 1
},
{
"name": "tests/unit/plugins",
"ftype": "dir",
"chksum_type": null,
"chksum_sha256": null,
"format": 1
},
{
"name": "tests/unit/plugins/modules",
"ftype": "dir",
"chksum_type": null,
"chksum_sha256": null,
"format": 1
},
{
"name": "tests/unit/plugins/modules/test_portainer_stack.py",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "557f9717bcb5a666c0b93fdc97525d6adae84bc84e3d307cbb33af1bc1bb066d",
"format": 1
},
{
"name": "README.md",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "294bfca64fb7c1480fb6b4f0fb6cfc1e73ce3ea13f37e34ddeb403a3fd87aabd",
"format": 1
},
{
"name": ".gitignore",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "f6a51f14c4c681963ae1e30806d6868fe6677d47f0a490473521df7497d713dc",
"format": 1
}
],
"format": 1
}

@ -0,0 +1,21 @@
MIT License
Copyright (c) 2022 Cian Hatton
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

@ -0,0 +1,30 @@
{
"collection_info": {
"namespace": "chatton",
"name": "portainer",
"version": "1.0.0",
"authors": [
"Cian Hatton"
],
"readme": "README.md",
"tags": [],
"description": "your collection description",
"license": [
"GPL-2.0-or-later"
],
"license_file": null,
"dependencies": {},
"repository": "https://github.com/chatton/ansible-portainer",
"documentation": "http://docs.example.com",
"homepage": "http://example.com",
"issues": "https://github.com/chatton/ansible-portainer/issues"
},
"file_manifest_file": {
"name": "FILES.json",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "af38f629ca55ff7f83f84e54ee9496e2fdcd93eb6afce03d79f12dd7d68a5153",
"format": 1
},
"format": 1
}

@ -0,0 +1,31 @@
# Collections Plugins Directory
This directory can be used to ship various plugins inside an Ansible collection. Each plugin is placed in a folder that
is named after the type of plugin it is in. It can also include the `module_utils` and `modules` directory that
would contain module utils and modules respectively.
Here is an example directory of the majority of plugins currently supported by Ansible:
```
└── plugins
├── action
├── become
├── cache
├── callback
├── cliconf
├── connection
├── filter
├── httpapi
├── inventory
├── lookup
├── module_utils
├── modules
├── netconf
├── shell
├── strategy
├── terminal
├── test
└── vars
```
A full list of plugin types can be found at [Working With Plugins](https://docs.ansible.com/ansible-core/2.13/plugins/plugins.html).

@ -0,0 +1,59 @@
import requests
def _query_params_to_string(params):
s = "?"
for k, v in params.items():
s += f"&{k}={v}"
return s
class PortainerClient:
def __init__(self, base_url, endpoint):
self.endpoint = endpoint
self.base_url = base_url
self.token = ""
self.headers = {}
def login(self, username, password):
payload = {
"Username": username,
"Password": password,
}
auth_url = f"{self.base_url}/api/auth"
resp = requests.post(auth_url, json=payload)
resp.raise_for_status()
self.token = resp.json()["jwt"]
self.headers = {"Authorization": f"Bearer {self.token}"}
def get(self, get_endpoint, query_params=None):
url = f"{self.base_url}/api/{get_endpoint}"
if query_params:
url = url + _query_params_to_string(query_params)
res = requests.get(url, headers=self.headers)
res.raise_for_status()
return res.json()
def delete(self, endpoint):
url = f"{self.base_url}/api/{endpoint}"
try:
# TODO: deletion works, but the request fails?
res = requests.delete(url, headers=self.headers)
res.raise_for_status()
except Exception:
pass
return {}
def put(self, endpoint, body):
url = f"{self.base_url}/api/{endpoint}"
res = requests.put(url, json=body, headers=self.headers)
res.raise_for_status()
return res.json()
def post(self, endpoint, body, query_params=None):
url = f"{self.base_url}/api/{endpoint}" + _query_params_to_string(query_params)
res = requests.post(url, json=body, headers=self.headers)
res.raise_for_status()
return res.json()

@ -0,0 +1,239 @@
#!/usr/bin/python
from __future__ import absolute_import, division, print_function
__metaclass__ = type
from ansible.module_utils.basic import AnsibleModule
try:
# FIXME: Hack to make imports work with IDE. The ansible import path is not valid for a regular python
# project.
from plugins.module_utils.portainer import *
except ImportError:
from ansible_collections.chatton.portainer.plugins.module_utils.portainer import (
PortainerClient,
_query_params_to_string,
)
DOCUMENTATION = r"""
---
module: portainer_stack
short_description: This is my test module
# If this is part of a collection, you need to use semantic versioning,
# i.e. the version is of the form "2.5.0" and not "2.4".
version_added: "1.0.0"
description: This is my longer description explaining my test module.
options:
name:
description: This is the message to send to the test module.
required: true
type: str
new:
description:
- Control to demo if the result of this module is changed or not.
- Parameter description can be a list as well.
required: false
type: bool
# Specify this value according to your collection
# in format of namespace.collection.doc_fragment_name
extends_documentation_fragment:
- my_namespace.my_collection.my_doc_fragment_name
author:
- Your Name (@chatton)
"""
EXAMPLES = r"""
# Deploy Gitea, Plex and Mealie stacks to portainer provided the files exist.
- name: Portainer | Update Stack
chatton.portainer.portainer_stack:
username: admin
password: "{{portainer.password}}"
docker_compose_file_path: "/etc/docker-compose/{{ item.name }}/docker-compose.yml"
stack_name: "{{ item.name }}"
endpoint_id: "{{ item.endpoint_id }}"
state: present
with_items:
- name: gitea
endpoint_id: 1
- name: plex
endpoint_id: 2
- name: mealie
endpoint_id: 3
# Delete plex stack
- name: Portainer | Delete Plex Stack
chatton.portainer.portainer_stack:
username: admin
password: "{{portainer.password}}"
stack_name: "plex"
endpoint_id: "2"
state: absent
"""
RETURN = r"""
# These are examples of possible return values, and in general should use other names for return values.
username:
description: The Portainer username.
type: str
returned: always
sample: 'admin'
password:
description: The provided user's password.
type: str
returned: never
sample: 'MyS00p3rS3cretPassw0rd'
docker_compose_file_path:
description: The path to a docker compose file which will be used to create the Portainer stack.
type: str
returned: never
sample: ''
"""
COMPOSE_STACK = 2
STRING_METHOD = "string"
def _create_stack(client, module, file_contents):
target_stack_name = module.params["stack_name"]
body = {
"name": target_stack_name,
"stackFileContent": file_contents,
}
query_params = {
"type": COMPOSE_STACK,
"method": STRING_METHOD,
"endpointId": client.endpoint,
}
return client.post("stacks", body=body, query_params=query_params)
def _update_stack(client, module, stack_id):
target_stack_name = module.params["stack_name"]
with open(module.params["docker_compose_file_path"]) as f:
file_contents = f.read()
return client.put(
f"stacks/{stack_id}?&endpointId={client.endpoint}",
body={
"name": target_stack_name,
"stackFileContent": file_contents,
},
)
def handle_state_present(client, module):
result = dict(changed=False, stack_name=module.params["stack_name"])
already_exists = False
stacks = client.get("stacks")
result["stacks"] = stacks
with open(module.params["docker_compose_file_path"]) as f:
file_contents = f.read()
target_stack_name = module.params["stack_name"]
for stack in stacks:
if stack["Name"] == target_stack_name:
already_exists = True
result["stack_id"] = stack["Id"]
break
if not already_exists:
stack = _create_stack(client, module, file_contents)
result["changed"] = True
result["stack_id"] = stack["Id"]
module.exit_json(**result)
return
stack_id = result["stack_id"]
current_file_contents_resp = client.get(
f"stacks/{stack_id}/file", query_params={"endpointId": client.endpoint}
)
result["are_equal"] = (
current_file_contents_resp["StackFileContent"] == file_contents
)
if result["are_equal"]:
module.exit_json(**result)
return
# the stack exists and we have a new config.
_update_stack(client, module, stack_id)
result["changed"] = True
module.exit_json(**result)
def handle_state_absent(client, module):
result = dict(changed=False, stack_name=module.params["stack_name"])
already_exists = False
target_stack_name = module.params["stack_name"]
stacks = client.get("stacks")
for stack in stacks:
if stack["Name"] == target_stack_name:
already_exists = True
result["stack_id"] = stack["Id"]
break
if not already_exists:
module.exit_json(**result)
return
stack_id = result["stack_id"]
client.delete(
f"stacks/{stack_id}" + _query_params_to_string({"endpointId": client.endpoint})
)
result["changed"] = True
module.exit_json(**result)
def run_module():
# define available arguments/parameters a user can pass to the module
module_args = dict(
stack_name=dict(type="str", required=True),
docker_compose_file_path=dict(type="str"),
username=dict(type="str", default="admin"),
password=dict(type="str", required=True, no_log=True),
endpoint_id=dict(type="int", required=True),
base_url=dict(type="str", default="http://localhost:9000"),
state=dict(type="str", default="present", choices=["present", "absent"]),
)
required_if = [
# docker compose file is only required if we are ensuring the stack is present.
["state", "present", ("docker_compose_file_path",)],
]
state_fns = {"present": handle_state_present, "absent": handle_state_absent}
# the AnsibleModule object will be our abstraction working with Ansible
# this includes instantiation, a couple of common attr would be the
# args/params passed to the execution, as well as if the module
# supports check mode
module = AnsibleModule(
argument_spec=module_args,
required_if=required_if,
# TODO: support check mode
supports_check_mode=False,
)
client = PortainerClient(
base_url=module.params["base_url"], endpoint=module.params["endpoint_id"]
)
client.login(module.params["username"], module.params["password"])
state_fns[module.params["state"]](client, module)
def main():
run_module()
if __name__ == "__main__":
main()

@ -0,0 +1,19 @@
ansible==6.3.0
ansible-core==2.13.3
certifi==2022.6.15
cffi==1.15.1
charset-normalizer==2.1.1
click==8.1.3
colorama==0.4.5
cryptography==37.0.4
idna==3.3
Jinja2==3.1.2
MarkupSafe==2.1.1
packaging==21.3
portainer-py==0.7.6
pycparser==2.21
pyparsing==3.0.9
PyYAML==6.0
requests==2.28.1
resolvelib==0.8.1
urllib3==1.26.12

@ -0,0 +1,9 @@
import unittest
class TestMyModule(unittest.TestCase):
def test_foo(self):
assert True
def test_foo2(self):
assert False

@ -9,7 +9,7 @@ services:
- name: nextcloud - name: nextcloud
- name: nginx-proxy-manager - name: nginx-proxy-manager
- name: uptime-kuma - name: uptime-kuma
- name: docker-volume-backup #- name: docker-volume-backup
- name: mariadb - name: mariadb
- name: photoprism - name: photoprism
- name: olivetin - name: olivetin

@ -4,10 +4,10 @@ backups:
nightly: nightly nightly: nightly
# TODO: docker_volume_backup exists in vault. Clean this up. # TODO: docker_volume_backup exists in vault. Clean this up.
dockervolumebackup: #dockervolumebackup:
image: ghcr.io/chatton/docker-volume-backup # image: ghcr.io/chatton/docker-volume-backup
tag: pr-20 # tag: pr-20
config_directory: /etc/docker-volume-backup # config_directory: /etc/docker-volume-backup
# dashy related config # dashy related config
dashy: dashy:

@ -42,8 +42,8 @@ services:
endpoint_id: 2 endpoint_id: 2
- name: vpn-stack - name: vpn-stack
endpoint_id: 2 endpoint_id: 2
- name: docker-volume-backup #- name: docker-volume-backup
endpoint_id: 2 # endpoint_id: 2
- name: mariadb - name: mariadb
endpoint_id: 2 endpoint_id: 2
- name: photoprism - name: photoprism

@ -0,0 +1,2 @@
[flake8]
exclude = .svn,CVS,.bzr,.hg,.git,__pycache__,.tox,.eggs,*.egg,.*env

@ -0,0 +1,68 @@
---
name: CI
on: # yamllint disable-line rule:truthy
push:
schedule:
- cron: "0 5 * * 1"
jobs:
# test the role
test:
runs-on: ubuntu-latest
strategy:
matrix:
config:
- image: geerlingguy/docker-centos8-ansible
mode: github_releases
- image: geerlingguy/docker-centos7-ansible
mode: github_releases
- image: geerlingguy/docker-fedora32-ansible
mode: github_releases
- image: geerlingguy/docker-fedora31-ansible
mode: github_releases
- image: geerlingguy/docker-fedora30-ansible
mode: github_releases
- image: geerlingguy/docker-ubuntu2004-ansible
mode: github_releases
- image: geerlingguy/docker-ubuntu2004-ansible
mode: package_manager
- image: geerlingguy/docker-ubuntu1804-ansible
mode: github_releases
- image: geerlingguy/docker-ubuntu1604-ansible
mode: github_releases
- image: geerlingguy/docker-debian10-ansible
mode: package_manager
- image: geerlingguy/docker-debian10-ansible
mode: github_releases
- image: geerlingguy/docker-debian9-ansible
mode: github_releases
steps:
- name: Checkout
uses: actions/checkout@v2
- name: Setup Python 3
uses: actions/setup-python@v2
with:
python-version: '3.x'
- name: Install dependencies
run: |
python -m pip install --upgrade pip
pip install -r requirements.in
- name: Run molecule tests
env:
IMAGE: ${{ matrix.config.image }}
INSTALL_MODE: ${{ matrix.config.mode }}
run: molecule -v test
# publish the role on ansible galaxy
publish:
needs: test
runs-on: ubuntu-latest
steps:
- name: Publish
uses: robertdebock/galaxy-action@1.1.0
with:
galaxy_api_key: ${{ secrets.GALAXY_API_KEY }}

@ -0,0 +1,5 @@
*.retry
*.pyc
__pycache__/
*env/
.cache/

@ -0,0 +1,9 @@
---
extends: default
ignore: |
.*env/
rules:
line-length:
max: 120

@ -0,0 +1,20 @@
The MIT License (MIT)
Copyright (c) 2020 Sylvain Prat
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

@ -0,0 +1,54 @@
Ansible Role: mergerfs
======================
[![Build Status][build_badge]][build_link]
[![Ansible Galaxy][galaxy_badge]][galaxy_link]
Install and configure Mergerfs — A featureful union filesystem.
Requirements
------------
None.
Role Variables
--------------
See [defaults/main.yml](defaults/main.yml).
Dependencies
------------
None.
Example Playbook
----------------
```yaml
- hosts: server
roles:
- role: sprat.mergerfs
vars:
mergerfs_mounts:
- path: /mnt/data
branches:
- /mnt/data1
- /mnt/data2
options: allow_other,use_ino
```
License
-------
MIT
Author Information
------------------
This role was created in 2020 by [Sylvain Prat](https://github.com/sprat).
[build_badge]: https://img.shields.io/github/workflow/status/sprat/ansible-role-mergerfs/CI
[build_link]: https://github.com/sprat/ansible-role-mergerfs/actions?query=workflow:CI
[galaxy_badge]: https://img.shields.io/ansible/role/47517
[galaxy_link]: https://galaxy.ansible.com/sprat/mergerfs

@ -0,0 +1,23 @@
---
# Install mode: defines where to download and install the package from:
# - "github_releases": install from Mergerfs' GitHub releases
# - "package_manager": install from the Linux distribution package manager.
# Note that the mergerfs package does not exists in all distributions, so it
# may not work for you.
mergerfs_install_mode: github_releases
# Version to install: "latest" version or a specific version number, e.g. "2.28.2"
# This setting only applies in "github_releases" mode
mergerfs_version: latest
# Mergerfs mountpoints to create. For example:
# mergerfs_mounts:
# - path: /mnt/storage
# branches:
# - /mnt/data*
# - /mnt/other
# options: allow_other,use_ino
mergerfs_mounts: []
# Url of the mergerfs GitHub releases page
mergerfs_github_releases_url: https://github.com/trapexit/mergerfs/releases

@ -0,0 +1,2 @@
install_date: Fri 2 Sep 21:20:46 2022
version: master

@ -0,0 +1,30 @@
---
galaxy_info:
author: Sylvain Prat
role_name: mergerfs
namespace: sprat
description: Install and configure Mergerfs — A featureful union filesystem
license: MIT
company: none
min_ansible_version: 2.3
platforms:
- name: Ubuntu
versions:
- all
- name: Debian
versions:
- all
- name: Fedora
versions:
- all
- name: EL
versions:
- all
galaxy_tags:
- mergerfs
- union
- filesystem
- disk
- mount
dependencies: []

@ -0,0 +1,11 @@
---
- name: Converge
hosts: all
vars:
mergerfs_mounts:
- path: /mnt/storage
branches:
- /mnt/data*
options: allow_other,use_ino
roles:
- role: ansible-role-mergerfs

@ -0,0 +1,21 @@
---
dependency:
name: galaxy
driver:
name: docker
lint: yamllint -s . && ansible-lint . && flake8
platforms:
- name: instance
image: ${IMAGE:-geerlingguy/docker-ubuntu2004-ansible}
volumes:
- /sys/fs/cgroup:/sys/fs/cgroup:ro
privileged: true
pre_build_image: true
provisioner:
name: ansible
inventory:
group_vars:
all:
mergerfs_install_mode: ${INSTALL_MODE:-github_releases}
verifier:
name: testinfra

@ -0,0 +1,25 @@
---
- name: Prepare
hosts: all
tasks:
- name: Create directories
become: true
file:
path: "{{ item }}"
state: directory
loop:
- /mnt/data1
- /mnt/data2
- name: Create data files
become: true
copy:
content: "{{ item.content }}\n"
dest: "{{ item.path }}"
loop:
- path: /mnt/data1/file1.txt
content: file1
- path: /mnt/data2/file2.txt
content: file2
- path: /mnt/data2/file3.txt
content: file3

@ -0,0 +1,21 @@
import os
import testinfra.utils.ansible_runner
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ['MOLECULE_INVENTORY_FILE']
).get_hosts('all')
def test_mount_point(host):
mount_point = host.mount_point('/mnt/storage')
assert mount_point.exists
assert mount_point.filesystem == 'fuse.mergerfs'
assert 'allow_other' in mount_point.options
# assert 'use_ino' in mount_point.options
def test_data_files(host):
assert host.file('/mnt/storage/file1.txt').exists
assert host.file('/mnt/storage/file2.txt').exists
assert host.file('/mnt/storage/file3.txt').exists

@ -0,0 +1 @@
molecule[ansible,docker,test,lint]

@ -0,0 +1,249 @@
#
# This file is autogenerated by pip-compile
# To update, run:
#
# pip-compile
#
ansi2html==1.6.0
# via molecule
ansible-base==2.10.7
# via ansible
ansible-lint==5.0.7
# via molecule
ansible==3.2.0
# via molecule
apipkg==1.5
# via execnet
appdirs==1.4.4
# via virtualenv
arrow==1.0.3
# via jinja2-time
attrs==20.3.0
# via pytest
bcrypt==3.2.0
# via paramiko
binaryornot==0.4.4
# via cookiecutter
bracex==2.1.1
# via wcmatch
cerberus==1.3.2
# via molecule
certifi==2020.12.5
# via requests
cffi==1.14.5
# via
# bcrypt
# cryptography
# pynacl
cfgv==3.2.0
# via pre-commit
chardet==4.0.0
# via
# binaryornot
# requests
click-completion==0.5.2
# via molecule
click-help-colors==0.9
# via molecule
click==7.1.2
# via
# click-completion
# click-help-colors
# cookiecutter
# molecule
colorama==0.4.4
# via rich
commonmark==0.9.1
# via rich
cookiecutter==1.7.2
# via molecule
coverage==5.5
# via pytest-cov
cryptography==3.4.7
# via
# ansible-base
# paramiko
distlib==0.3.1
# via virtualenv
distro==1.5.0
# via selinux
docker==5.0.0
# via molecule-docker
enrich==1.2.6
# via
# ansible-lint
# molecule
execnet==1.8.0
# via pytest-xdist
filelock==3.0.12
# via virtualenv
flake8==3.9.0
# via molecule
identify==2.2.3
# via pre-commit
idna==2.10
# via requests
iniconfig==1.1.1
# via pytest
jinja2-time==0.2.0
# via cookiecutter
jinja2==2.11.3
# via
# ansible-base
# click-completion
# cookiecutter
# jinja2-time
# molecule
markupsafe==1.1.1
# via
# cookiecutter
# jinja2
mccabe==0.6.1
# via flake8
molecule-docker==0.2.4
# via molecule
molecule[ansible,docker,lint,test]==3.3.0
# via
# -r requirements.in
# molecule-docker
more-itertools==8.7.0
# via pytest-plus
nodeenv==1.6.0
# via pre-commit
packaging==20.9
# via
# ansible-base
# ansible-lint
# molecule
# pytest
paramiko==2.7.2
# via molecule
pathspec==0.8.1
# via yamllint
pexpect==4.8.0
# via molecule
pluggy==0.13.1
# via
# molecule
# pytest
poyo==0.5.0
# via cookiecutter
pre-commit==2.12.0
# via molecule
ptyprocess==0.7.0
# via pexpect
py==1.10.0
# via
# pytest
# pytest-forked
pycodestyle==2.7.0
# via flake8
pycparser==2.20
# via cffi
pyflakes==2.3.1
# via flake8
pygments==2.8.1
# via rich
pynacl==1.4.0
# via paramiko
pyparsing==2.4.7
# via packaging
pytest-cov==2.11.1
# via molecule
pytest-forked==1.3.0
# via pytest-xdist
pytest-helpers-namespace==2021.3.24
# via molecule
pytest-html==3.1.1
# via molecule
pytest-metadata==1.11.0
# via pytest-html
pytest-mock==3.5.1
# via molecule
pytest-plus==0.2
# via molecule
pytest-testinfra==6.2.0
# via molecule
pytest-verbose-parametrize==1.7.0
# via molecule
pytest-xdist==2.2.1
# via molecule
pytest==6.2.3
# via
# molecule
# pytest-cov
# pytest-forked
# pytest-helpers-namespace
# pytest-html
# pytest-metadata
# pytest-mock
# pytest-plus
# pytest-testinfra
# pytest-verbose-parametrize
# pytest-xdist
python-dateutil==2.8.1
# via arrow
python-slugify==4.0.1
# via cookiecutter
pyyaml==5.4.1
# via
# ansible-base
# ansible-lint
# molecule
# pre-commit
# yamllint
requests==2.25.1
# via
# cookiecutter
# docker
rich==10.1.0
# via
# ansible-lint
# enrich
# molecule
ruamel.yaml.clib==0.2.2
# via ruamel.yaml
ruamel.yaml==0.17.4
# via ansible-lint
selinux==0.2.1
# via
# molecule
# molecule-docker
shellingham==1.4.0
# via click-completion
six==1.15.0
# via
# bcrypt
# click-completion
# cookiecutter
# pynacl
# pytest-verbose-parametrize
# python-dateutil
# tenacity
# virtualenv
# websocket-client
subprocess-tee==0.2.0
# via molecule
tenacity==7.0.0
# via ansible-lint
text-unidecode==1.3
# via python-slugify
toml==0.10.2
# via
# pre-commit
# pytest
typing-extensions==3.7.4.3
# via rich
urllib3==1.26.4
# via requests
virtualenv==20.4.3
# via pre-commit
wcmatch==8.1.2
# via ansible-lint
websocket-client==0.58.0
# via docker
yamllint==1.26.1
# via molecule
# The following packages are considered to be unsafe in a requirements file:
# setuptools

@ -0,0 +1,54 @@
---
# Note: we don't use the GitHub API to retrieve the latest version because
# it has rate limits which are hard to avoid in CI (we need a token, authenticate
# with the API, etc.). Instead, we browse the latest release url which redirects
# to the release page, where we can find the version number in the URL.
- become: false
delegate_to: localhost
run_once: true
block:
- name: Get latest release information from GitHub
uri:
url: "{{ mergerfs_github_releases_url }}/latest"
register: mergerfs_github_release_page
- name: Set latest mergerfs version fact
set_fact:
mergerfs_version: "{{ mergerfs_github_release_page['url'].split('/')[-1] }}"
when: mergerfs_version == "latest"
- name: Determine package download url
set_fact:
mergerfs_package_url: "{{ mergerfs_github_releases_url }}/download/{{ mergerfs_version }}/\
{{ mergerfs_pkg_prefix }}{{ mergerfs_version }}{{ mergerfs_pkg_suffix }}"
- name: Install xz-utils package for .deb package installation
become: true
apt:
name: xz-utils
state: present
update_cache: true
when: ansible_pkg_mgr == 'apt'
- name: Install mergerfs package with apt
become: true
apt:
deb: "{{ mergerfs_package_url }}"
state: present
update_cache: true
when: ansible_pkg_mgr == 'apt'
- name: Install mergerfs package with yum
become: true
yum:
name: "{{ mergerfs_package_url }}"
state: present
disable_gpg_check: true # the package is not signed
when: ansible_pkg_mgr == 'yum'
- name: Install mergerfs package with dnf
become: true
dnf:
name: "{{ mergerfs_package_url }}"
state: present
disable_gpg_check: true # the package is not signed
when: ansible_pkg_mgr == 'dnf'

@ -0,0 +1,7 @@
---
- name: Install mergerfs package with package manager
become: true
package:
name: mergerfs
state: present
update_cache: true

@ -0,0 +1,34 @@
---
- name: Include OS-specific variables
include_vars: "{{ ansible_os_family }}.yml"
tags:
- mergerfs
- name: Install mergerfs prerequisites
become: true
package:
name: "{{ mergerfs_prerequisites }}"
state: present
update_cache: true
tags:
- mergerfs
- mergerfs_install
- name: Include install tasks
import_tasks: install_from_{{ mergerfs_install_mode }}.yml
tags:
- mergerfs
- mergerfs_install
- name: Mount mergerfs filesystems
become: true
mount:
fstype: fuse.mergerfs
src: "{{ ':'.join(item.branches | mandatory) }}"
path: "{{ item.path | mandatory }}"
opts: "{{ item.options | default('defaults') }}"
state: "{{ item.state | default('mounted') }}"
loop: "{{ mergerfs_mounts }}"
tags:
- mergerfs
- mergerfs_mount

@ -0,0 +1,12 @@
---
mergerfs_prerequisites:
- fuse
mergerfs_dist: "{{ ansible_distribution|lower }}-{{ ansible_distribution_release }}"
mergerfs_arch_map:
x86_64: amd64
i386: i386
aarch64: arm64
armv7l: armhf
mergerfs_arch: "{{ mergerfs_arch_map[ansible_userspace_architecture | default(ansible_architecture) ] }}"
mergerfs_pkg_prefix: "mergerfs_"
mergerfs_pkg_suffix: ".{{ mergerfs_dist }}_{{ mergerfs_arch }}.deb"

@ -0,0 +1,7 @@
---
mergerfs_prerequisites:
- fuse
mergerfs_dist: "{{ 'fc' if ansible_distribution == 'Fedora' else 'el' }}{{ ansible_distribution_major_version }}"
mergerfs_arch: "{{ ansible_userspace_architecture }}"
mergerfs_pkg_prefix: "mergerfs-"
mergerfs_pkg_suffix: "-1.{{ mergerfs_dist }}.{{ mergerfs_arch }}.rpm"
Loading…
Cancel
Save