You cannot select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

117 lines
4.2 KiB
YAML

---
# https://docs.ansible.com/ansible/latest/collections/community/docker/docker_container_module.html#ansible-collections-community-docker-docker-container-module
# https://docs.docker.com/storage/volumes/#backup-restore-or-migrate-data-volumes
- name: Ensure Volume.
docker_volume:
name: "{{ item.volume_name }}"
state: present
register: volume_out
with_items: "{{ docker_volume_s3_restores }}"
- name: Determine if backup is needed.
set_fact: should_perform_backup="{{ docker_s3_volume_restore_force == true or volume_out.changed == true }}"
- name: End play as no backup is needed.
meta: end_play
when: not should_perform_backup
# try and find latest volume based on the name.
- name: Find latest s3 version.
when: docker_s3_volume_restore_latest_s3_key
amazon.aws.aws_s3:
bucket: "{{ docker_s3_volume_restore_aws_s3_bucket }}"
mode: list
prefix: "{{ item.volume_name }}/{{ item.volume_name }}"
bucket: "{{ docker_s3_volume_restore_aws_s3_bucket }}"
aws_access_key: "{{ docker_s3_volume_restore_aws_s3_aws_access_key }}"
aws_secret_key: "{{ docker_s3_volume_restore_aws_s3_aws_secret_key }}"
region: "{{ docker_s3_volume_restore_aws_s3_region }}"
s3_url: "{{ docker_s3_volume_restore_aws_s3_url }}"
register: s3_list_output
with_items: "{{ docker_volume_s3_restores }}"
- debug: msg="{{ s3_list_output }}"
when: docker_s3_volume_restore_latest_s3_key
- name: Extract s3 keys for container
when: docker_s3_volume_restore_latest_s3_key
set_fact: container_s3_keys="{{ container_s3_keys | default([]) + [item.s3_keys | last] }}"
with_items: "{{ s3_list_output.results }}"
- debug: msg="{{ container_s3_keys }}"
when: docker_s3_volume_restore_latest_s3_key
- set_fact: volume_names="{{ docker_volume_s3_restores | map(attribute='volume_name') }}"
when: docker_s3_volume_restore_latest_s3_key
# remove existing values so we can determine the s3 key automatically/
- set_fact: docker_volume_s3_restores_new="{{ [] }}"
when: docker_s3_volume_restore_latest_s3_key
- set_fact:
docker_volume_s3_restores_new: "{{ docker_volume_s3_restores_new | default([]) + [ {'volume_name': item.0, 's3_key': item.1} ] }}"
when: docker_s3_volume_restore_latest_s3_key
with_together:
- "{{ volume_names }}"
- "{{ container_s3_keys }}"
- set_fact:
docker_volume_s3_restores_new: "{{ docker_volume_s3_restores }}"
when: not docker_s3_volume_restore_latest_s3_key
- debug: msg="{{ docker_volume_s3_restores_new }}"
- name: Create directories for /tmp file
file:
path: '/tmp/{{ item.s3_key | dirname }}'
state: directory
mode: '0755'
with_items: '{{ docker_volume_s3_restores_new }}'
- name: Download archive from S3
amazon.aws.aws_s3:
bucket: "{{ docker_s3_volume_restore_aws_s3_bucket }}"
object: "{{ item.s3_key }}"
aws_access_key: "{{ docker_s3_volume_restore_aws_s3_aws_access_key }}"
aws_secret_key: "{{ docker_s3_volume_restore_aws_s3_aws_secret_key }}"
region: "{{ docker_s3_volume_restore_aws_s3_region }}"
s3_url: "{{ docker_s3_volume_restore_aws_s3_url }}"
mode: get
dest: "/tmp/{{ item.s3_key }}"
register: get_out
with_items: "{{ docker_volume_s3_restores_new }}"
- name: Remove contents of volumes
community.docker.docker_container:
name: "restore-container-{{ item.volume_name }}-{{ 10 | random }}"
image: ubuntu
command: "rm -rf ./*"
auto_remove: true
detach: false # block until this container exists.
state: started
# start inside the directory we want to wipe
working_dir: "/data"
volumes:
- "{{ item.volume_name }}:/data"
with_items: "{{ docker_volume_s3_restores_new }}"
- name: Restore contents of volumes
community.docker.docker_container:
name: "restore-container-{{ item.volume_name }}-{{ 10 | random }}"
image: ubuntu
# extract the tar into the volume.
command: "tar xvf /tmp/{{ item.s3_key }} -C /data --strip-components 1"
auto_remove: true
detach: false # block until this container exists.
state: started
volumes:
- "{{ item.volume_name }}:/data"
- /tmp:/tmp
with_items: "{{ docker_volume_s3_restores_new }}"
- name: Remove uploaded files from /tmp
file:
path: '/tmp/{{ item.s3_key }}'
state: absent
with_items: '{{ docker_volume_s3_restores_new }}'