adding role and playbooks

test
chatton 3 years ago
parent f3ddff350d
commit 655b9499c0

2
.gitignore vendored

@ -0,0 +1,2 @@
playbooks/backups
.idea

@ -0,0 +1,3 @@
[defaults]
# look in the roles directory to find our defined roles.
roles_path = roles

@ -0,0 +1,15 @@
---
version: '3'
services:
linkding:
labels:
ie.cianhatton.backup.enabled: "true"
container_name: "linkding"
image: sissbruecker/linkding:latest
ports:
- "9090:9090"
volumes:
- "data:/etc/linkding/data"
restart: unless-stopped
volumes:
data:

@ -0,0 +1,26 @@
---
- name: Backup Docker Volumes.
hosts: localhost
# become: true
tasks:
- name: Find Containers With Backup Label
docker_host_info:
containers: true
containers_filters:
label:
- "ie.cianhatton.backup.enabled=true"
register: filter_output
- name: Get Container Names
ansible.builtin.set_fact: container_names="{{ filter_output.containers | map(attribute="Names") | flatten }}"
- name: Backup Containers with backup label
ansible.builtin.include_role:
name: docker_s3_backup
vars:
container_backup: "{{ container_item | regex_replace('^\\/', '') }}"
docker_backup_retain_count: 7
with_items: "{{ container_names }}"
loop_control:
loop_var: container_item

@ -0,0 +1,13 @@
---
# defaults file for docker_s3_backup
docker_backup_aws_s3_region: "us-east-1"
docker_backup_aws_s3_bucket: "backups"
# put backups locally in this directory.
docker_backup_host_backup_directory: "./backups"
docker_backup_aws_s3_url: ""
docker_backup_aws_s3_aws_access_key: ""
docker_backup_aws_s3_aws_secret_key: ""
docker_backup_aws_s3_permissions: []
docker_backup_aws_s3_encrypt: true

@ -0,0 +1,11 @@
galaxy_info:
author: Cian Hatton
description: Role which backs up a dockver volume to s3.
license: MIT
min_ansible_version: "2.1"
galaxy_tags: []
platforms:
- name: Debian
versions:
- all
dependencies: []

@ -0,0 +1,90 @@
---
# https://docs.ansible.com/ansible/latest/collections/community/docker/docker_container_module.html#ansible-collections-community-docker-docker-container-module
# https://docs.docker.com/storage/volumes/#backup-restore-or-migrate-data-volumes
- name: Determine backup timestamp.
ansible.builtin.set_fact: backup_time="{{ ansible_date_time.iso8601 }}"
- name: Install Python dependencies
ansible.builtin.pip:
name:
- docker
- boto3
- name: Stop a container
community.docker.docker_container:
name: "{{ container_backup }}"
state: stopped
- name: Get container details
docker_container_info:
name: "{{ container_backup }}"
register: result
- name: Extract only the volume mounts (not bind mounts)
ansible.builtin.set_fact: volume_mounts="{{ result.container.Mounts | selectattr("Type", "equalto", "volume") }}"
- name: Create Backup of Container Volumes
community.docker.docker_container:
name: "backup-container-{{ item.Name }}-{{ 10 | random }}"
image: ubuntu
command: "tar -czvf /backups/{{ item.Name }}-{{ backup_time }}.tar.gz /data"
cleanup: true
detach: false # block until this container exists.
state: started
volumes:
- "{{ item.Name }}:/data"
- "{{ docker_backup_host_backup_directory }}:/backups"
with_items: "{{ volume_mounts }}"
- name: Start the container
community.docker.docker_container:
name: "{{ container_backup }}"
state: started
- name: Upload backups to S3
register: upload_result
amazon.aws.aws_s3:
s3_url: "{{ docker_backup_aws_s3_url }}"
bucket: "{{ docker_backup_aws_s3_bucket }}"
object: "{{ item.Name }}/{{ item.Name }}-{{ backup_time }}.tar.gz"
src: "{{ docker_backup_host_backup_directory }}/{{ item.Name }}-{{ backup_time }}.tar.gz"
aws_access_key: "{{ docker_backup_aws_s3_aws_access_key }}"
aws_secret_key: "{{ docker_backup_aws_s3_aws_secret_key }}"
region: "{{ docker_backup_aws_s3_region }}"
mode: put
encrypt: "{{ docker_backup_aws_s3_encrypt }}"
permission: "{{ docker_backup_aws_s3_permissions }}"
with_items: "{{ volume_mounts }}"
# try and find latest volume based on the name.
- name: Fetch Volumes From S3.
when: docker_backup_retain_count is defined
amazon.aws.aws_s3:
bucket: "{{ docker_backup_aws_s3_bucket }}"
mode: list
prefix: "{{ item.Name }}/{{ item.Name }}"
aws_access_key: "{{ docker_backup_aws_s3_aws_access_key }}"
aws_secret_key: "{{ docker_backup_aws_s3_aws_secret_key }}"
region: "{{ docker_backup_aws_s3_region }}"
s3_url: "{{ docker_backup_aws_s3_url }}"
register: s3_list_outputs
with_items: "{{ volume_mounts }}"
- name: Find keys to delete.
when: docker_backup_retain_count is defined
ansible.builtin.set_fact:
s3_keys_to_delete: "{{ s3_list_outputs.results | map(attribute='s3_keys') | keep_last(docker_backup_retain_count) | flatten }}"
- name: Delete old backups.
when: docker_backup_retain_count is defined
amazon.aws.aws_s3:
bucket: "{{ docker_backup_aws_s3_bucket }}"
aws_access_key: "{{ docker_backup_aws_s3_aws_access_key }}"
aws_secret_key: "{{ docker_backup_aws_s3_aws_secret_key }}"
region: "{{ docker_backup_aws_s3_region }}"
s3_url: "{{ docker_backup_aws_s3_url }}"
object: "{{ item }}"
mode: delobj
with_items: "{{ s3_keys_to_delete }}"

@ -0,0 +1,2 @@
---
# vars file for chatton.docker_backup
Loading…
Cancel
Save