specify a single volume in the role instead of a list (#15)

master v0.1.0
Cian Hatton 3 years ago committed by GitHub
parent 24d998f0b6
commit 7819ea4dd7
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

1
.gitignore vendored

@ -1,2 +1,3 @@
.idea .idea
__pycache__
venv venv

@ -7,9 +7,12 @@ docker_backup_restore_force: false
# backup in the s3 backup. The format which is expected has the prefix of volume_name/volume_name_* # backup in the s3 backup. The format which is expected has the prefix of volume_name/volume_name_*
# this is the format the the "docker_s3_backup" role updloads them with. # this is the format the the "docker_s3_backup" role updloads them with.
docker_backup_restore_latest_s3_key: false docker_backup_restore_latest_s3_key: false
docker_backup_s3_restores: [] docker_backup_s3_volume:
# docker_backup_s3_restores: name: ""
# - volume_name: "linkding_data" s3_key: "" # optional
# docker_backup_s3_volume:
# name: linkding
# s3_key: "linkding_data/linkding_data-2022-09-01T21:32:54Z.tar.gz" # s3_key: "linkding_data/linkding_data-2022-09-01T21:32:54Z.tar.gz"
# dynamically find the latest linkding_data backup. # dynamically find the latest linkding_data backup.
@ -23,3 +26,7 @@ docker_backup_aws_s3_url: ""
docker_backup_aws_s3_aws_access_key: "" docker_backup_aws_s3_aws_access_key: ""
docker_backup_aws_s3_aws_secret_key: "" docker_backup_aws_s3_aws_secret_key: ""
docker_backup_aws_s3_permissions: [] docker_backup_aws_s3_permissions: []
# fail on no S3 backups causes the task to fail if there are no s3 backups.
# setting this to true will cause the restore task to end early if there are no backups.
docker_backup_fail_on_no_s3_backups: true

@ -4,77 +4,73 @@
- name: Ensure Volume. - name: Ensure Volume.
docker_volume: docker_volume:
name: "{{ item.volume_name }}" name: "{{ docker_backup_s3_volume.name }}"
state: present state: present
register: volume_out register: volume_out
with_items: "{{ docker_backup_s3_restores }}"
- name: Determine if backup is needed. - name: Determine if backup is needed.
ansible.builtin.set_fact: should_perform_backup="{{ docker_backup_restore_force == true or volume_out.changed == true }}" ansible.builtin.set_fact: should_perform_backup="{{ docker_backup_restore_force or volume_out.changed }}"
- name: End play as no backup is needed.
ansible.builtin.meta: end_play
when: not should_perform_backup
# try and find latest volume based on the name. # try and find latest volume based on the name.
- name: Find latest s3 version. - name: Find latest s3 version.
when: docker_backup_restore_latest_s3_key when:
- should_perform_backup
- docker_backup_restore_latest_s3_key
amazon.aws.aws_s3: amazon.aws.aws_s3:
bucket: "{{ docker_backup_aws_s3_bucket }}" bucket: "{{ docker_backup_aws_s3_bucket }}"
mode: list mode: list
prefix: "{{ item.volume_name }}/{{ item.volume_name }}" prefix: "{{ docker_backup_s3_volume.name }}/{{ docker_backup_s3_volume.name }}"
aws_access_key: "{{ docker_backup_aws_s3_aws_access_key }}" aws_access_key: "{{ docker_backup_aws_s3_aws_access_key }}"
aws_secret_key: "{{ docker_backup_aws_s3_aws_secret_key }}" aws_secret_key: "{{ docker_backup_aws_s3_aws_secret_key }}"
region: "{{ docker_backup_aws_s3_region }}" region: "{{ docker_backup_aws_s3_region }}"
s3_url: "{{ docker_backup_aws_s3_url }}" s3_url: "{{ docker_backup_aws_s3_url }}"
register: s3_list_output register: s3_list_output
with_items: "{{ docker_backup_s3_restores }}"
- name: Extract S3 keys for container.
when: docker_backup_restore_latest_s3_key
ansible.builtin.set_fact: container_s3_keys="{{ container_s3_keys | default([]) + [item.s3_keys | last] }}"
with_items: "{{ s3_list_output.results }}"
- name: Extract volume names. - name: Fail as there no backups were found.
ansible.builtin.set_fact: volume_names="{{ docker_backup_s3_restores | map(attribute='volume_name') }}" when:
when: docker_backup_restore_latest_s3_key - should_perform_backup
- docker_backup_fail_on_no_s3_backups
- s3_list_output.s3_keys | length == 0
ansible.builtin.fail:
msg: "There were no s3 backups found for {{ docker_backup_s3_volume.name }}"
- name: Merge volume names and S3 keys. - name: Extract S3 keys for container.
ansible.builtin.set_fact: when:
docker_backup_s3_restores_new: "{{ docker_backup_s3_restores_new | default([]) + [ {'volume_name': item.0, 's3_key': item.1} ] }}" - should_perform_backup
when: docker_backup_restore_latest_s3_key - docker_backup_restore_latest_s3_key
with_together: - s3_list_output.s3_keys | length > 0
- "{{ volume_names }}" ansible.builtin.set_fact: container_s3_key="{{ s3_list_output.s3_keys | last }}"
- "{{ container_s3_keys }}"
- name: Set volumes to restore.
ansible.builtin.set_fact:
docker_backup_s3_restores_new: "{{ docker_backup_s3_restores }}"
when: not docker_backup_restore_latest_s3_key
- name: Create directories for /tmp file. - name: Create directories for /tmp file.
when:
- should_perform_backup
- s3_list_output.s3_keys | length > 0
ansible.builtin.file: ansible.builtin.file:
path: '/tmp/{{ item.s3_key | dirname }}' path: '/tmp/{{ container_s3_key | dirname }}'
state: directory state: directory
mode: '0755' mode: '0755'
with_items: '{{ docker_backup_s3_restores_new }}'
- name: Download archive from S3. - name: Download archive from S3.
when:
- should_perform_backup
- s3_list_output.s3_keys | length > 0
amazon.aws.aws_s3: amazon.aws.aws_s3:
bucket: "{{ docker_backup_aws_s3_bucket }}" bucket: "{{ docker_backup_aws_s3_bucket }}"
object: "{{ item.s3_key }}" object: "{{ container_s3_key }}"
aws_access_key: "{{ docker_backup_aws_s3_aws_access_key }}" aws_access_key: "{{ docker_backup_aws_s3_aws_access_key }}"
aws_secret_key: "{{ docker_backup_aws_s3_aws_secret_key }}" aws_secret_key: "{{ docker_backup_aws_s3_aws_secret_key }}"
region: "{{ docker_backup_aws_s3_region }}" region: "{{ docker_backup_aws_s3_region }}"
s3_url: "{{ docker_backup_aws_s3_url }}" s3_url: "{{ docker_backup_aws_s3_url }}"
mode: get mode: get
dest: "/tmp/{{ item.s3_key }}" dest: "/tmp/{{ container_s3_key }}"
register: get_out register: get_out
with_items: "{{ docker_backup_s3_restores_new }}"
- name: Remove contents of volumes. - name: Remove contents of volumes.
when:
- should_perform_backup
- s3_list_output.s3_keys | length > 0
community.docker.docker_container: community.docker.docker_container:
name: "restore-container-{{ item.volume_name }}-{{ 10 | random }}" name: "restore-container-{{ docker_backup_s3_volume.name }}-{{ 100 | random }}"
image: ubuntu image: ubuntu
command: "rm -rf ./*" command: "rm -rf ./*"
auto_remove: true auto_remove: true
@ -83,25 +79,28 @@
# start inside the directory we want to wipe # start inside the directory we want to wipe
working_dir: "/data" working_dir: "/data"
volumes: volumes:
- "{{ item.volume_name }}:/data" - "{{ docker_backup_s3_volume.name }}:/data"
with_items: "{{ docker_backup_s3_restores_new }}"
- name: Restore contents of volumes - name: Restore contents of volumes
when:
- should_perform_backup
- s3_list_output.s3_keys | length > 0
community.docker.docker_container: community.docker.docker_container:
name: "restore-container-{{ item.volume_name }}-{{ 10 | random }}" name: "restore-container-{{ docker_backup_s3_volume.name }}-{{ 100 | random }}"
image: ubuntu image: ubuntu
# extract the tar into the volume. # extract the tar into the volume.
command: "tar xvf /tmp/{{ item.s3_key }} -C /data --strip-components 1" command: "tar xvf /tmp/{{ container_s3_key }} -C /data --strip-components 1"
auto_remove: true auto_remove: true
detach: false # block until this container exists. detach: false # block until this container exists.
state: started state: started
volumes: volumes:
- "{{ item.volume_name }}:/data" - "{{ docker_backup_s3_volume.name }}:/data"
- /tmp:/tmp - /tmp:/tmp
with_items: "{{ docker_backup_s3_restores_new }}"
- name: Remove uploaded files from /tmp - name: Remove uploaded files from /tmp
when:
- should_perform_backup
- s3_list_output.s3_keys | length > 0
ansible.builtin.file: ansible.builtin.file:
path: '/tmp/{{ item.s3_key }}' path: '/tmp/{{ container_s3_key }}'
state: absent state: absent
with_items: '{{ docker_backup_s3_restores_new }}'

@ -2,8 +2,9 @@
- hosts: localhost - hosts: localhost
connection: local connection: local
become: true become: true
tasks: tasks:
- set_fact:
portainer_password: "portainer-password-{{ 10000 | random }}"
- name: Install Docker Module for Python - name: Install Docker Module for Python
pip: pip:
name: name:
@ -50,7 +51,7 @@
method: POST method: POST
body: body:
Username: admin Username: admin
Password: "adminadminadmin" Password: "{{ portainer_password }}"
status_code: 200 status_code: 200
body_format: json body_format: json
register: result register: result
@ -79,8 +80,8 @@
name: docker_s3_volume_restore name: docker_s3_volume_restore
vars: vars:
docker_backup_restore_latest_s3_key: true docker_backup_restore_latest_s3_key: true
docker_backup_s3_restores: docker_backup_s3_volume:
- volume_name: portainer_data name: portainer_data
- name: Deploy Portainer - name: Deploy Portainer
docker_compose: docker_compose:
@ -93,7 +94,7 @@
method: POST method: POST
body: body:
Username: admin Username: admin
Password: "adminadminadmin" Password: "{{ portainer_password }}"
status_code: 200 status_code: 200
body_format: json body_format: json
register: result register: result

Loading…
Cancel
Save