Add support for automatically finding the latest S3 backup (#1)

pull/6/head
Cian Hatton 3 years ago committed by GitHub
parent 01ae429c44
commit 3d93af7a5e
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -42,7 +42,7 @@
s3_url: "{{ docker_s3_backup_aws_s3_url }}" s3_url: "{{ docker_s3_backup_aws_s3_url }}"
bucket: "{{ docker_s3_backup_aws_s3_bucket }}" bucket: "{{ docker_s3_backup_aws_s3_bucket }}"
object: "{{ item.Name }}/{{ item.Name }}-{{ backup_time }}.tar.gz" object: "{{ item.Name }}/{{ item.Name }}-{{ backup_time }}.tar.gz"
src: {{ docker_s3_backup_host_backup_directory }}/{{ item.Name }}-{{ backup_time }}.tar.gz src: "{{ docker_s3_backup_host_backup_directory }}/{{ item.Name }}-{{ backup_time }}.tar.gz"
aws_access_key: "{{ docker_s3_backup_aws_s3_aws_access_key }}" aws_access_key: "{{ docker_s3_backup_aws_s3_aws_access_key }}"
aws_secret_key: "{{ docker_s3_backup_aws_s3_aws_secret_key }}" aws_secret_key: "{{ docker_s3_backup_aws_s3_aws_secret_key }}"
region: "{{ docker_s3_backup_aws_s3_region }}" region: "{{ docker_s3_backup_aws_s3_region }}"

@ -3,11 +3,20 @@
# forces a revert to the volume. # forces a revert to the volume.
docker_s3_volume_restore_force: false docker_s3_volume_restore_force: false
# specify docker_s3_volume_restore_latest_s3_key true to automatically determine the latest
# backup in the s3 backup. The format which is expected has the prefix of volume_name/volume_name_*
# this is the format the the "docker_s3_backup" role updloads them with.
docker_s3_volume_restore_latest_s3_key: false
docker_s3_volume_restores: [] docker_s3_volume_restores: []
#docker_s3_volume_restores: #docker_volume_s3_restores:
# - volume_name: "linkding_data" # - volume_name: "linkding_data"
# s3_key: "linkding_data/linkding_data-2022-09-01T21:32:54Z.tar.gz" # s3_key: "linkding_data/linkding_data-2022-09-01T21:32:54Z.tar.gz"
# dynamically find the latest linkding_data backup.
# docker_s3_volume_restore_latest_s3_key: true
#docker_volume_s3_restores:
# - volume_name: "linkding_data"
docker_s3_volume_restore_aws_s3_region: "us-east-1" docker_s3_volume_restore_aws_s3_region: "us-east-1"
docker_s3_volume_restore_aws_s3_bucket: "backups" docker_s3_volume_restore_aws_s3_bucket: "backups"
docker_s3_volume_restore_aws_s3_url: "" docker_s3_volume_restore_aws_s3_url: ""

@ -10,12 +10,69 @@
with_items: "{{ docker_volume_s3_restores }}" with_items: "{{ docker_volume_s3_restores }}"
- name: Determine if backup is needed. - name: Determine if backup is needed.
set_fact: should_perform_backup="{{ docker_volume_s3_force == true or volume_out.changed == true }}" set_fact: should_perform_backup="{{ docker_s3_volume_restore_force == true or volume_out.changed == true }}"
- name: End play as no backup is needed. - name: End play as no backup is needed.
meta: end_play meta: end_play
when: not should_perform_backup when: not should_perform_backup
# try and find latest volume based on the name.
- name: Find latest s3 version.
when: docker_s3_volume_restore_latest_s3_key
amazon.aws.aws_s3:
bucket: "{{ docker_s3_volume_restore_aws_s3_bucket }}"
mode: list
prefix: "{{ item.volume_name }}/{{ item.volume_name }}"
bucket: "{{ docker_s3_volume_restore_aws_s3_bucket }}"
aws_access_key: "{{ docker_s3_volume_restore_aws_s3_aws_access_key }}"
aws_secret_key: "{{ docker_s3_volume_restore_aws_s3_aws_secret_key }}"
region: "{{ docker_s3_volume_restore_aws_s3_region }}"
s3_url: "{{ docker_s3_volume_restore_aws_s3_url }}"
register: s3_list_output
with_items: "{{ docker_volume_s3_restores }}"
- debug: msg="{{ s3_list_output }}"
when: docker_s3_volume_restore_latest_s3_key
- name: Extract s3 keys for container
when: docker_s3_volume_restore_latest_s3_key
set_fact: container_s3_keys="{{ container_s3_keys | default([]) + [item.s3_keys | last] }}"
with_items: "{{ s3_list_output.results }}"
- debug: msg="{{ container_s3_keys }}"
when: docker_s3_volume_restore_latest_s3_key
- set_fact: volume_names="{{ docker_volume_s3_restores | map(attribute='volume_name') }}"
when: docker_s3_volume_restore_latest_s3_key
# remove existing values so we can determine the s3 key automatically/
#- set_fact: docker_volume_s3_restores_new="{{ [] }}"
# when: docker_s3_volume_restore_latest_s3_key
- debug: msg="volume_names={{ volume_names }}"
- debug: msg="container_s3_keys={{ container_s3_keys }}"
- set_fact:
docker_volume_s3_restores_new: "{{ docker_volume_s3_restores_new | default([]) + [ {'volume_name': item.0, 's3_key': item.1} ] }}"
when: docker_s3_volume_restore_latest_s3_key
with_together:
- "{{ volume_names }}"
- "{{ container_s3_keys }}"
- debug: msg="{{ docker_volume_s3_restores_new }}"
- set_fact:
docker_volume_s3_restores_new: "{{ docker_volume_s3_restores }}"
when: not docker_s3_volume_restore_latest_s3_key
- debug: msg="{{ docker_volume_s3_restores_new }}"
- name: Create directories for /tmp file
file:
path: '/tmp/{{ item.s3_key | dirname }}'
state: directory
mode: '0755'
with_items: '{{ docker_volume_s3_restores_new }}'
- name: Download archive from S3 - name: Download archive from S3
amazon.aws.aws_s3: amazon.aws.aws_s3:
bucket: "{{ docker_s3_volume_restore_aws_s3_bucket }}" bucket: "{{ docker_s3_volume_restore_aws_s3_bucket }}"
@ -27,7 +84,7 @@
mode: get mode: get
dest: "/tmp/{{ item.s3_key }}" dest: "/tmp/{{ item.s3_key }}"
register: get_out register: get_out
with_items: "{{ docker_volume_s3_restores }}" with_items: "{{ docker_volume_s3_restores_new }}"
- name: Remove contents of volumes - name: Remove contents of volumes
community.docker.docker_container: community.docker.docker_container:
@ -41,7 +98,7 @@
working_dir: "/data" working_dir: "/data"
volumes: volumes:
- "{{ item.volume_name }}:/data" - "{{ item.volume_name }}:/data"
with_items: "{{ docker_volume_s3_restores }}" with_items: "{{ docker_volume_s3_restores_new }}"
- name: Restore contents of volumes - name: Restore contents of volumes
community.docker.docker_container: community.docker.docker_container:
@ -55,4 +112,10 @@
volumes: volumes:
- "{{ item.volume_name }}:/data" - "{{ item.volume_name }}:/data"
- /tmp:/tmp - /tmp:/tmp
with_items: "{{ docker_volume_s3_restores }}" with_items: "{{ docker_volume_s3_restores_new }}"
- name: Remove uploaded files from /tmp
file:
path: '/tmp/{{ item.s3_key }}'
state: absent
with_items: '{{ docker_volume_s3_restores_new }}'

Loading…
Cancel
Save