adding s3 restore task

test
chatton 3 years ago
parent cbbb65a9b7
commit 343c1ed083

@ -0,0 +1,11 @@
---
- name: Restore a docker volume.
hosts: localhost
roles:
- role: docker_s3_volume_restore
vars:
docker_backup_restore_force: true
docker_backup_restore_latest_s3_key: "{{ volume_name != '' | bool }}"
docker_backup_fail_on_no_s3_backups: true
docker_backup_s3_volume:
name: "{{ volume_name }}"

@ -0,0 +1,37 @@
---
# the aws region. For minio this will always be us-east-1.
docker_backup_aws_s3_region: "us-east-1"
# the name of the bucket in minio or s3
docker_backup_aws_s3_bucket: "backups"
# put backups locally in this directory.
docker_backup_host_backup_directory: "/tmp"
# the url of the minio server.
docker_backup_aws_s3_url: "http://127.0.0.1:9000"
docker_backup_aws_s3_aws_access_key: "83meItmzcEgb1NdasSgl"
docker_backup_aws_s3_aws_secret_key: "lwdAJ60gMkcZxRZCHsC6CsdPw63Xuds6h6mksnSz"
docker_backup_aws_s3_permissions: []
docker_backup_aws_s3_encrypt: off
# forces a revert to the volume.
docker_backup_restore_force: false
# specify docker_backup_restore_latest_s3_key true to automatically determine the latest
# backup in the s3 backup. The format which is expected has the prefix of volume_name/volume_name_*
# this is the format the the "docker_s3_backup" role updloads them with.
docker_backup_restore_latest_s3_key: false
docker_backup_s3_volume:
name: ""
s3_key: "" # optional
# docker_backup_s3_volume:
# name: linkding
# s3_key: "linkding_data/linkding_data-2022-09-01T21:32:54Z.tar.gz"
# dynamically find the latest linkding_data backup.
# docker_backup_restore_latest_s3_key: true
# docker_backup_s3_restores:
# - volume_name: "linkding_data"
# fail on no S3 backups causes the task to fail if there are no s3 backups.
# setting this to true will cause the restore task to end early if there are no backups.
docker_backup_fail_on_no_s3_backups: true

@ -0,0 +1,103 @@
---
- name: Ensure Volume.
docker_volume:
name: "{{ docker_backup_s3_volume.name }}"
state: present
register: volume_out
- name: Determine if backup is needed.
ansible.builtin.set_fact: should_perform_backup="{{ docker_backup_restore_force or volume_out.changed }}"
# try and find latest volume based on the name.
- name: Find latest s3 version.
when:
- should_perform_backup
- docker_backup_restore_latest_s3_key
amazon.aws.aws_s3:
bucket: "{{ docker_backup_aws_s3_bucket }}"
mode: list
prefix: "{{ docker_backup_s3_volume.name }}/{{ docker_backup_s3_volume.name }}"
aws_access_key: "{{ docker_backup_aws_s3_aws_access_key }}"
aws_secret_key: "{{ docker_backup_aws_s3_aws_secret_key }}"
region: "{{ docker_backup_aws_s3_region }}"
s3_url: "{{ docker_backup_aws_s3_url }}"
register: s3_list_output
- name: Fail as there no backups were found.
when:
- should_perform_backup
- docker_backup_fail_on_no_s3_backups
- s3_list_output.s3_keys | length == 0
ansible.builtin.fail:
msg: "There were no s3 backups found for {{ docker_backup_s3_volume.name }}"
- name: Extract S3 keys for container.
when:
- should_perform_backup
- docker_backup_restore_latest_s3_key
- s3_list_output.s3_keys | length > 0
ansible.builtin.set_fact: container_s3_key="{{ s3_list_output.s3_keys | last }}"
- name: Create directories for /tmp file.
when:
- should_perform_backup
- s3_list_output.s3_keys | length > 0
ansible.builtin.file:
path: '/tmp/{{ container_s3_key | dirname }}'
state: directory
mode: '0755'
- name: Download archive from S3.
when:
- should_perform_backup
- s3_list_output.s3_keys | length > 0
amazon.aws.aws_s3:
bucket: "{{ docker_backup_aws_s3_bucket }}"
object: "{{ container_s3_key }}"
aws_access_key: "{{ docker_backup_aws_s3_aws_access_key }}"
aws_secret_key: "{{ docker_backup_aws_s3_aws_secret_key }}"
region: "{{ docker_backup_aws_s3_region }}"
s3_url: "{{ docker_backup_aws_s3_url }}"
mode: get
dest: "/tmp/{{ container_s3_key }}"
register: get_out
- name: Remove contents of volumes.
when:
- should_perform_backup
- s3_list_output.s3_keys | length > 0
community.docker.docker_container:
name: "restore-container-{{ docker_backup_s3_volume.name }}-{{ 100 | random }}"
image: ubuntu
command: "rm -rf ./*"
cleanup: true
detach: false # block until this container exists.
state: started
# start inside the directory we want to wipe
working_dir: "/data"
volumes:
- "{{ docker_backup_s3_volume.name }}:/data"
- name: Restore contents of volumes
when:
- should_perform_backup
- s3_list_output.s3_keys | length > 0
community.docker.docker_container:
name: "restore-container-{{ docker_backup_s3_volume.name }}-{{ 100 | random }}"
image: ubuntu
# extract the tar into the volume.
command: "tar xvf /tmp/{{ container_s3_key }} -C /data --strip-components 1"
cleanup: true
detach: false # block until this container exists.
state: started
volumes:
- "{{ docker_backup_s3_volume.name }}:/data"
- /tmp:/tmp
- name: Remove uploaded files from /tmp
when:
- should_perform_backup
- s3_list_output.s3_keys | length > 0
ansible.builtin.file:
path: '/tmp/{{ container_s3_key }}'
state: absent
Loading…
Cancel
Save