Compare commits
4 Commits
| Author | SHA1 | Date |
|---|---|---|
|
|
343c1ed083 | 3 years ago |
|
|
cbbb65a9b7 | 3 years ago |
|
|
655b9499c0 | 3 years ago |
|
|
f3ddff350d | 3 years ago |
@ -0,0 +1,2 @@
|
|||||||
|
playbooks/backups
|
||||||
|
.idea
|
||||||
@ -0,0 +1,3 @@
|
|||||||
|
[defaults]
|
||||||
|
# look in the roles directory to find our defined roles.
|
||||||
|
roles_path = roles
|
||||||
@ -0,0 +1,31 @@
|
|||||||
|
---
|
||||||
|
version: '3'
|
||||||
|
services:
|
||||||
|
linkding:
|
||||||
|
labels:
|
||||||
|
ie.cianhatton.backup.enabled: "true"
|
||||||
|
container_name: linkding
|
||||||
|
image: sissbruecker/linkding:latest
|
||||||
|
ports:
|
||||||
|
- "9090:9090"
|
||||||
|
volumes:
|
||||||
|
- "data:/etc/linkding/data"
|
||||||
|
restart: unless-stopped
|
||||||
|
minio:
|
||||||
|
image: minio/minio:latest
|
||||||
|
container_name: minio
|
||||||
|
ports:
|
||||||
|
- "9000:9000"
|
||||||
|
- "9001:9001"
|
||||||
|
volumes:
|
||||||
|
- minio_storage:/data
|
||||||
|
environment:
|
||||||
|
MINIO_ROOT_USER: minio
|
||||||
|
MINIO_ROOT_PASSWORD: "*!3^wYe&dJ2H9D9aDC68Gh6!v7ydB^eK5G^"
|
||||||
|
MINIO_API_ROOT_ACCESS: "on"
|
||||||
|
MINIO_BROWSER_REDIRECT_URL: http://127.0.0.1:9000
|
||||||
|
|
||||||
|
command: server --console-address ":9001" /data
|
||||||
|
volumes:
|
||||||
|
data:
|
||||||
|
minio_storage:
|
||||||
@ -0,0 +1,24 @@
|
|||||||
|
---
|
||||||
|
- name: Backup Docker Volumes.
|
||||||
|
hosts: localhost
|
||||||
|
|
||||||
|
tasks:
|
||||||
|
- name: Find Containers With Backup Label
|
||||||
|
docker_host_info:
|
||||||
|
containers: true
|
||||||
|
containers_filters:
|
||||||
|
label:
|
||||||
|
- "ie.cianhatton.backup.enabled=true"
|
||||||
|
register: filter_output
|
||||||
|
|
||||||
|
- name: Get Container Names
|
||||||
|
ansible.builtin.set_fact: container_names="{{ filter_output.containers | map(attribute="Names") | flatten }}"
|
||||||
|
|
||||||
|
- name: Backup Containers with backup label
|
||||||
|
ansible.builtin.include_role:
|
||||||
|
name: docker_s3_backup
|
||||||
|
vars:
|
||||||
|
container_backup: "{{ container_item | regex_replace('^\\/', '') }}"
|
||||||
|
with_items: "{{ container_names }}"
|
||||||
|
loop_control:
|
||||||
|
loop_var: container_item
|
||||||
@ -0,0 +1,11 @@
|
|||||||
|
---
|
||||||
|
- name: Restore a docker volume.
|
||||||
|
hosts: localhost
|
||||||
|
roles:
|
||||||
|
- role: docker_s3_volume_restore
|
||||||
|
vars:
|
||||||
|
docker_backup_restore_force: true
|
||||||
|
docker_backup_restore_latest_s3_key: "{{ volume_name != '' | bool }}"
|
||||||
|
docker_backup_fail_on_no_s3_backups: true
|
||||||
|
docker_backup_s3_volume:
|
||||||
|
name: "{{ volume_name }}"
|
||||||
@ -0,0 +1,12 @@
|
|||||||
|
# the aws region. For minio this will always be us-east-1.
|
||||||
|
docker_backup_aws_s3_region: "us-east-1"
|
||||||
|
# the name of the bucket in minio or s3
|
||||||
|
docker_backup_aws_s3_bucket: "backups"
|
||||||
|
# put backups locally in this directory.
|
||||||
|
docker_backup_host_backup_directory: "/tmp"
|
||||||
|
# the url of the minio server.
|
||||||
|
docker_backup_aws_s3_url: "http://127.0.0.1:9000"
|
||||||
|
docker_backup_aws_s3_aws_access_key: "83meItmzcEgb1NdasSgl"
|
||||||
|
docker_backup_aws_s3_aws_secret_key: "lwdAJ60gMkcZxRZCHsC6CsdPw63Xuds6h6mksnSz"
|
||||||
|
docker_backup_aws_s3_permissions: []
|
||||||
|
docker_backup_aws_s3_encrypt: off
|
||||||
@ -0,0 +1,61 @@
|
|||||||
|
---
|
||||||
|
- name: Determine backup timestamp.
|
||||||
|
ansible.builtin.set_fact: backup_time="{{ ansible_date_time.iso8601 }}"
|
||||||
|
|
||||||
|
- name: Install Python dependencies
|
||||||
|
ansible.builtin.pip:
|
||||||
|
name:
|
||||||
|
- docker
|
||||||
|
- boto3
|
||||||
|
|
||||||
|
- name: Stop a container
|
||||||
|
community.docker.docker_container:
|
||||||
|
name: "{{ container_backup }}"
|
||||||
|
state: stopped
|
||||||
|
|
||||||
|
- name: Get container details
|
||||||
|
docker_container_info:
|
||||||
|
name: "{{ container_backup }}"
|
||||||
|
register: result
|
||||||
|
|
||||||
|
- name: Extract only the volume mounts (not bind mounts)
|
||||||
|
ansible.builtin.set_fact: volume_mounts="{{ result.container.Mounts | selectattr("Type", "equalto", "volume") }}"
|
||||||
|
|
||||||
|
- name: Create Backup of Container Volumes
|
||||||
|
community.docker.docker_container:
|
||||||
|
name: "backup-container-{{ item.Name }}-{{ 10 | random }}"
|
||||||
|
image: ubuntu
|
||||||
|
command: "tar -czvf /backups/{{ item.Name }}-{{ backup_time }}.tar.gz /data"
|
||||||
|
cleanup: true
|
||||||
|
detach: false # block until this container exists.
|
||||||
|
state: started
|
||||||
|
volumes:
|
||||||
|
- "{{ item.Name }}:/data"
|
||||||
|
- "{{ docker_backup_host_backup_directory }}:/backups"
|
||||||
|
with_items: "{{ volume_mounts }}"
|
||||||
|
|
||||||
|
- name: Start the container
|
||||||
|
community.docker.docker_container:
|
||||||
|
name: "{{ container_backup }}"
|
||||||
|
state: started
|
||||||
|
|
||||||
|
- name: Upload backups to S3
|
||||||
|
register: upload_result
|
||||||
|
amazon.aws.aws_s3:
|
||||||
|
s3_url: "{{ docker_backup_aws_s3_url }}"
|
||||||
|
bucket: "{{ docker_backup_aws_s3_bucket }}"
|
||||||
|
object: "{{ item.Name }}/{{ item.Name }}-{{ backup_time }}.tar.gz"
|
||||||
|
src: "{{ docker_backup_host_backup_directory }}/{{ item.Name }}-{{ backup_time }}.tar.gz"
|
||||||
|
aws_access_key: "{{ docker_backup_aws_s3_aws_access_key }}"
|
||||||
|
aws_secret_key: "{{ docker_backup_aws_s3_aws_secret_key }}"
|
||||||
|
region: "{{ docker_backup_aws_s3_region }}"
|
||||||
|
mode: put
|
||||||
|
encrypt: "{{ docker_backup_aws_s3_encrypt }}"
|
||||||
|
permission: "{{ docker_backup_aws_s3_permissions }}"
|
||||||
|
with_items: "{{ volume_mounts }}"
|
||||||
|
|
||||||
|
- name: Remove local files.
|
||||||
|
ansible.builtin.file:
|
||||||
|
path: "{{ docker_backup_host_backup_directory }}/{{ item.Name }}-{{ backup_time }}.tar.gz"
|
||||||
|
state: absent
|
||||||
|
with_items: "{{ volume_mounts }}"
|
||||||
@ -0,0 +1,37 @@
|
|||||||
|
---
|
||||||
|
# the aws region. For minio this will always be us-east-1.
|
||||||
|
docker_backup_aws_s3_region: "us-east-1"
|
||||||
|
# the name of the bucket in minio or s3
|
||||||
|
docker_backup_aws_s3_bucket: "backups"
|
||||||
|
# put backups locally in this directory.
|
||||||
|
docker_backup_host_backup_directory: "/tmp"
|
||||||
|
# the url of the minio server.
|
||||||
|
docker_backup_aws_s3_url: "http://127.0.0.1:9000"
|
||||||
|
docker_backup_aws_s3_aws_access_key: "83meItmzcEgb1NdasSgl"
|
||||||
|
docker_backup_aws_s3_aws_secret_key: "lwdAJ60gMkcZxRZCHsC6CsdPw63Xuds6h6mksnSz"
|
||||||
|
docker_backup_aws_s3_permissions: []
|
||||||
|
docker_backup_aws_s3_encrypt: off
|
||||||
|
|
||||||
|
|
||||||
|
# forces a revert to the volume.
|
||||||
|
docker_backup_restore_force: false
|
||||||
|
# specify docker_backup_restore_latest_s3_key true to automatically determine the latest
|
||||||
|
# backup in the s3 backup. The format which is expected has the prefix of volume_name/volume_name_*
|
||||||
|
# this is the format the the "docker_s3_backup" role updloads them with.
|
||||||
|
docker_backup_restore_latest_s3_key: false
|
||||||
|
docker_backup_s3_volume:
|
||||||
|
name: ""
|
||||||
|
s3_key: "" # optional
|
||||||
|
|
||||||
|
# docker_backup_s3_volume:
|
||||||
|
# name: linkding
|
||||||
|
# s3_key: "linkding_data/linkding_data-2022-09-01T21:32:54Z.tar.gz"
|
||||||
|
|
||||||
|
# dynamically find the latest linkding_data backup.
|
||||||
|
# docker_backup_restore_latest_s3_key: true
|
||||||
|
# docker_backup_s3_restores:
|
||||||
|
# - volume_name: "linkding_data"
|
||||||
|
|
||||||
|
# fail on no S3 backups causes the task to fail if there are no s3 backups.
|
||||||
|
# setting this to true will cause the restore task to end early if there are no backups.
|
||||||
|
docker_backup_fail_on_no_s3_backups: true
|
||||||
@ -0,0 +1,103 @@
|
|||||||
|
---
|
||||||
|
- name: Ensure Volume.
|
||||||
|
docker_volume:
|
||||||
|
name: "{{ docker_backup_s3_volume.name }}"
|
||||||
|
state: present
|
||||||
|
register: volume_out
|
||||||
|
|
||||||
|
- name: Determine if backup is needed.
|
||||||
|
ansible.builtin.set_fact: should_perform_backup="{{ docker_backup_restore_force or volume_out.changed }}"
|
||||||
|
|
||||||
|
# try and find latest volume based on the name.
|
||||||
|
- name: Find latest s3 version.
|
||||||
|
when:
|
||||||
|
- should_perform_backup
|
||||||
|
- docker_backup_restore_latest_s3_key
|
||||||
|
amazon.aws.aws_s3:
|
||||||
|
bucket: "{{ docker_backup_aws_s3_bucket }}"
|
||||||
|
mode: list
|
||||||
|
prefix: "{{ docker_backup_s3_volume.name }}/{{ docker_backup_s3_volume.name }}"
|
||||||
|
aws_access_key: "{{ docker_backup_aws_s3_aws_access_key }}"
|
||||||
|
aws_secret_key: "{{ docker_backup_aws_s3_aws_secret_key }}"
|
||||||
|
region: "{{ docker_backup_aws_s3_region }}"
|
||||||
|
s3_url: "{{ docker_backup_aws_s3_url }}"
|
||||||
|
register: s3_list_output
|
||||||
|
|
||||||
|
- name: Fail as there no backups were found.
|
||||||
|
when:
|
||||||
|
- should_perform_backup
|
||||||
|
- docker_backup_fail_on_no_s3_backups
|
||||||
|
- s3_list_output.s3_keys | length == 0
|
||||||
|
ansible.builtin.fail:
|
||||||
|
msg: "There were no s3 backups found for {{ docker_backup_s3_volume.name }}"
|
||||||
|
|
||||||
|
- name: Extract S3 keys for container.
|
||||||
|
when:
|
||||||
|
- should_perform_backup
|
||||||
|
- docker_backup_restore_latest_s3_key
|
||||||
|
- s3_list_output.s3_keys | length > 0
|
||||||
|
ansible.builtin.set_fact: container_s3_key="{{ s3_list_output.s3_keys | last }}"
|
||||||
|
|
||||||
|
- name: Create directories for /tmp file.
|
||||||
|
when:
|
||||||
|
- should_perform_backup
|
||||||
|
- s3_list_output.s3_keys | length > 0
|
||||||
|
ansible.builtin.file:
|
||||||
|
path: '/tmp/{{ container_s3_key | dirname }}'
|
||||||
|
state: directory
|
||||||
|
mode: '0755'
|
||||||
|
|
||||||
|
- name: Download archive from S3.
|
||||||
|
when:
|
||||||
|
- should_perform_backup
|
||||||
|
- s3_list_output.s3_keys | length > 0
|
||||||
|
amazon.aws.aws_s3:
|
||||||
|
bucket: "{{ docker_backup_aws_s3_bucket }}"
|
||||||
|
object: "{{ container_s3_key }}"
|
||||||
|
aws_access_key: "{{ docker_backup_aws_s3_aws_access_key }}"
|
||||||
|
aws_secret_key: "{{ docker_backup_aws_s3_aws_secret_key }}"
|
||||||
|
region: "{{ docker_backup_aws_s3_region }}"
|
||||||
|
s3_url: "{{ docker_backup_aws_s3_url }}"
|
||||||
|
mode: get
|
||||||
|
dest: "/tmp/{{ container_s3_key }}"
|
||||||
|
register: get_out
|
||||||
|
|
||||||
|
- name: Remove contents of volumes.
|
||||||
|
when:
|
||||||
|
- should_perform_backup
|
||||||
|
- s3_list_output.s3_keys | length > 0
|
||||||
|
community.docker.docker_container:
|
||||||
|
name: "restore-container-{{ docker_backup_s3_volume.name }}-{{ 100 | random }}"
|
||||||
|
image: ubuntu
|
||||||
|
command: "rm -rf ./*"
|
||||||
|
cleanup: true
|
||||||
|
detach: false # block until this container exists.
|
||||||
|
state: started
|
||||||
|
# start inside the directory we want to wipe
|
||||||
|
working_dir: "/data"
|
||||||
|
volumes:
|
||||||
|
- "{{ docker_backup_s3_volume.name }}:/data"
|
||||||
|
|
||||||
|
- name: Restore contents of volumes
|
||||||
|
when:
|
||||||
|
- should_perform_backup
|
||||||
|
- s3_list_output.s3_keys | length > 0
|
||||||
|
community.docker.docker_container:
|
||||||
|
name: "restore-container-{{ docker_backup_s3_volume.name }}-{{ 100 | random }}"
|
||||||
|
image: ubuntu
|
||||||
|
# extract the tar into the volume.
|
||||||
|
command: "tar xvf /tmp/{{ container_s3_key }} -C /data --strip-components 1"
|
||||||
|
cleanup: true
|
||||||
|
detach: false # block until this container exists.
|
||||||
|
state: started
|
||||||
|
volumes:
|
||||||
|
- "{{ docker_backup_s3_volume.name }}:/data"
|
||||||
|
- /tmp:/tmp
|
||||||
|
|
||||||
|
- name: Remove uploaded files from /tmp
|
||||||
|
when:
|
||||||
|
- should_perform_backup
|
||||||
|
- s3_list_output.s3_keys | length > 0
|
||||||
|
ansible.builtin.file:
|
||||||
|
path: '/tmp/{{ container_s3_key }}'
|
||||||
|
state: absent
|
||||||
Loading…
Reference in New Issue