specify a single volume in the role instead of a list (#15)

master v0.1.0
Cian Hatton 3 years ago committed by GitHub
parent 24d998f0b6
commit 7819ea4dd7
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

1
.gitignore vendored

@ -1,2 +1,3 @@
.idea
__pycache__
venv

@ -7,10 +7,13 @@ docker_backup_restore_force: false
# backup in the s3 backup. The format which is expected has the prefix of volume_name/volume_name_*
# this is the format the the "docker_s3_backup" role updloads them with.
docker_backup_restore_latest_s3_key: false
docker_backup_s3_restores: []
# docker_backup_s3_restores:
# - volume_name: "linkding_data"
# s3_key: "linkding_data/linkding_data-2022-09-01T21:32:54Z.tar.gz"
docker_backup_s3_volume:
name: ""
s3_key: "" # optional
# docker_backup_s3_volume:
# name: linkding
# s3_key: "linkding_data/linkding_data-2022-09-01T21:32:54Z.tar.gz"
# dynamically find the latest linkding_data backup.
# docker_backup_restore_latest_s3_key: true
@ -23,3 +26,7 @@ docker_backup_aws_s3_url: ""
docker_backup_aws_s3_aws_access_key: ""
docker_backup_aws_s3_aws_secret_key: ""
docker_backup_aws_s3_permissions: []
# fail on no S3 backups causes the task to fail if there are no s3 backups.
# setting this to true will cause the restore task to end early if there are no backups.
docker_backup_fail_on_no_s3_backups: true

@ -4,77 +4,73 @@
- name: Ensure Volume.
docker_volume:
name: "{{ item.volume_name }}"
name: "{{ docker_backup_s3_volume.name }}"
state: present
register: volume_out
with_items: "{{ docker_backup_s3_restores }}"
- name: Determine if backup is needed.
ansible.builtin.set_fact: should_perform_backup="{{ docker_backup_restore_force == true or volume_out.changed == true }}"
- name: End play as no backup is needed.
ansible.builtin.meta: end_play
when: not should_perform_backup
ansible.builtin.set_fact: should_perform_backup="{{ docker_backup_restore_force or volume_out.changed }}"
# try and find latest volume based on the name.
- name: Find latest s3 version.
when: docker_backup_restore_latest_s3_key
when:
- should_perform_backup
- docker_backup_restore_latest_s3_key
amazon.aws.aws_s3:
bucket: "{{ docker_backup_aws_s3_bucket }}"
mode: list
prefix: "{{ item.volume_name }}/{{ item.volume_name }}"
prefix: "{{ docker_backup_s3_volume.name }}/{{ docker_backup_s3_volume.name }}"
aws_access_key: "{{ docker_backup_aws_s3_aws_access_key }}"
aws_secret_key: "{{ docker_backup_aws_s3_aws_secret_key }}"
region: "{{ docker_backup_aws_s3_region }}"
s3_url: "{{ docker_backup_aws_s3_url }}"
register: s3_list_output
with_items: "{{ docker_backup_s3_restores }}"
- name: Extract S3 keys for container.
when: docker_backup_restore_latest_s3_key
ansible.builtin.set_fact: container_s3_keys="{{ container_s3_keys | default([]) + [item.s3_keys | last] }}"
with_items: "{{ s3_list_output.results }}"
- name: Extract volume names.
ansible.builtin.set_fact: volume_names="{{ docker_backup_s3_restores | map(attribute='volume_name') }}"
when: docker_backup_restore_latest_s3_key
- name: Fail as there no backups were found.
when:
- should_perform_backup
- docker_backup_fail_on_no_s3_backups
- s3_list_output.s3_keys | length == 0
ansible.builtin.fail:
msg: "There were no s3 backups found for {{ docker_backup_s3_volume.name }}"
- name: Merge volume names and S3 keys.
ansible.builtin.set_fact:
docker_backup_s3_restores_new: "{{ docker_backup_s3_restores_new | default([]) + [ {'volume_name': item.0, 's3_key': item.1} ] }}"
when: docker_backup_restore_latest_s3_key
with_together:
- "{{ volume_names }}"
- "{{ container_s3_keys }}"
- name: Set volumes to restore.
ansible.builtin.set_fact:
docker_backup_s3_restores_new: "{{ docker_backup_s3_restores }}"
when: not docker_backup_restore_latest_s3_key
- name: Extract S3 keys for container.
when:
- should_perform_backup
- docker_backup_restore_latest_s3_key
- s3_list_output.s3_keys | length > 0
ansible.builtin.set_fact: container_s3_key="{{ s3_list_output.s3_keys | last }}"
- name: Create directories for /tmp file.
when:
- should_perform_backup
- s3_list_output.s3_keys | length > 0
ansible.builtin.file:
path: '/tmp/{{ item.s3_key | dirname }}'
path: '/tmp/{{ container_s3_key | dirname }}'
state: directory
mode: '0755'
with_items: '{{ docker_backup_s3_restores_new }}'
- name: Download archive from S3.
when:
- should_perform_backup
- s3_list_output.s3_keys | length > 0
amazon.aws.aws_s3:
bucket: "{{ docker_backup_aws_s3_bucket }}"
object: "{{ item.s3_key }}"
object: "{{ container_s3_key }}"
aws_access_key: "{{ docker_backup_aws_s3_aws_access_key }}"
aws_secret_key: "{{ docker_backup_aws_s3_aws_secret_key }}"
region: "{{ docker_backup_aws_s3_region }}"
s3_url: "{{ docker_backup_aws_s3_url }}"
mode: get
dest: "/tmp/{{ item.s3_key }}"
dest: "/tmp/{{ container_s3_key }}"
register: get_out
with_items: "{{ docker_backup_s3_restores_new }}"
- name: Remove contents of volumes.
when:
- should_perform_backup
- s3_list_output.s3_keys | length > 0
community.docker.docker_container:
name: "restore-container-{{ item.volume_name }}-{{ 10 | random }}"
name: "restore-container-{{ docker_backup_s3_volume.name }}-{{ 100 | random }}"
image: ubuntu
command: "rm -rf ./*"
auto_remove: true
@ -83,25 +79,28 @@
# start inside the directory we want to wipe
working_dir: "/data"
volumes:
- "{{ item.volume_name }}:/data"
with_items: "{{ docker_backup_s3_restores_new }}"
- "{{ docker_backup_s3_volume.name }}:/data"
- name: Restore contents of volumes
when:
- should_perform_backup
- s3_list_output.s3_keys | length > 0
community.docker.docker_container:
name: "restore-container-{{ item.volume_name }}-{{ 10 | random }}"
name: "restore-container-{{ docker_backup_s3_volume.name }}-{{ 100 | random }}"
image: ubuntu
# extract the tar into the volume.
command: "tar xvf /tmp/{{ item.s3_key }} -C /data --strip-components 1"
command: "tar xvf /tmp/{{ container_s3_key }} -C /data --strip-components 1"
auto_remove: true
detach: false # block until this container exists.
state: started
volumes:
- "{{ item.volume_name }}:/data"
- "{{ docker_backup_s3_volume.name }}:/data"
- /tmp:/tmp
with_items: "{{ docker_backup_s3_restores_new }}"
- name: Remove uploaded files from /tmp
when:
- should_perform_backup
- s3_list_output.s3_keys | length > 0
ansible.builtin.file:
path: '/tmp/{{ item.s3_key }}'
path: '/tmp/{{ container_s3_key }}'
state: absent
with_items: '{{ docker_backup_s3_restores_new }}'

@ -2,8 +2,9 @@
- hosts: localhost
connection: local
become: true
tasks:
- set_fact:
portainer_password: "portainer-password-{{ 10000 | random }}"
- name: Install Docker Module for Python
pip:
name:
@ -50,7 +51,7 @@
method: POST
body:
Username: admin
Password: "adminadminadmin"
Password: "{{ portainer_password }}"
status_code: 200
body_format: json
register: result
@ -79,8 +80,8 @@
name: docker_s3_volume_restore
vars:
docker_backup_restore_latest_s3_key: true
docker_backup_s3_restores:
- volume_name: portainer_data
docker_backup_s3_volume:
name: portainer_data
- name: Deploy Portainer
docker_compose:
@ -93,7 +94,7 @@
method: POST
body:
Username: admin
Password: "adminadminadmin"
Password: "{{ portainer_password }}"
status_code: 200
body_format: json
register: result

Loading…
Cancel
Save