updated to use individual volumes rather that list (#30)

pull/35/head
Cian Hatton 3 years ago committed by GitHub
parent a553be7d72
commit c549e9b1de
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -67,7 +67,7 @@
"name": "tests/playbook.yml",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "a52f9f26b558f05a29a0447f10a1ea7ca96df1b7bc44a31b2727c624c267e3fb",
"chksum_sha256": "d0b385b6401db77864f63dd71011d2833ef5eb8eefe83888ac2d64c00b0b787a",
"format": 1
},
{
@ -200,7 +200,7 @@
"name": "roles/docker_s3_volume_restore/tasks/main.yml",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "912b5afae6e9183741dd29242e1810ceb32deabb3624f2a2ccc2b1ea8984a45f",
"chksum_sha256": "476842dfde43303d0a1899e9c53e9eb7e05626860531436f198baa900f11aef1",
"format": 1
},
{
@ -228,7 +228,7 @@
"name": "roles/docker_s3_volume_restore/defaults/main.yml",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "b0ecdc12d3f7814ebfbded52d4db758462c2c69b34635e670ee7ee2322d6c44f",
"chksum_sha256": "936e4f593c6df8559487e4fba725bf7dd7221e771f5cc02b56b5550cb61e4e44",
"format": 1
},
{
@ -256,7 +256,7 @@
"name": ".gitignore",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "54c27deac980434757ea7dc177f753710633b9464576e23804c2d5f5bc549288",
"chksum_sha256": "cf66f46c39b7cf8f396c3f42c7a8c59b50ccf6ae7777b31f635187d9a3e03bbe",
"format": 1
},
{

@ -23,7 +23,7 @@
"name": "FILES.json",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "4f37593e42d3f2b5b2225b4aade83be41bee2a507b88546d3bbd79e061017173",
"chksum_sha256": "d430b04ca511676cafb9644eb216080037b0ba3d56bed40d4e48f3fe3283000f",
"format": 1
},
"format": 1

@ -7,10 +7,13 @@ docker_backup_restore_force: false
# backup in the s3 backup. The format which is expected has the prefix of volume_name/volume_name_*
# this is the format the the "docker_s3_backup" role updloads them with.
docker_backup_restore_latest_s3_key: false
docker_backup_s3_restores: []
# docker_backup_s3_restores:
# - volume_name: "linkding_data"
# s3_key: "linkding_data/linkding_data-2022-09-01T21:32:54Z.tar.gz"
docker_backup_s3_volume:
name: ""
s3_key: "" # optional
# docker_backup_s3_volume:
# name: linkding
# s3_key: "linkding_data/linkding_data-2022-09-01T21:32:54Z.tar.gz"
# dynamically find the latest linkding_data backup.
# docker_backup_restore_latest_s3_key: true
@ -23,3 +26,7 @@ docker_backup_aws_s3_url: ""
docker_backup_aws_s3_aws_access_key: ""
docker_backup_aws_s3_aws_secret_key: ""
docker_backup_aws_s3_permissions: []
# fail on no S3 backups causes the task to fail if there are no s3 backups.
# setting this to true will cause the restore task to end early if there are no backups.
docker_backup_fail_on_no_s3_backups: true

@ -4,77 +4,73 @@
- name: Ensure Volume.
docker_volume:
name: "{{ item.volume_name }}"
name: "{{ docker_backup_s3_volume.name }}"
state: present
register: volume_out
with_items: "{{ docker_backup_s3_restores }}"
- name: Determine if backup is needed.
ansible.builtin.set_fact: should_perform_backup="{{ docker_backup_restore_force == true or volume_out.changed == true }}"
- name: End play as no backup is needed.
ansible.builtin.meta: end_play
when: not should_perform_backup
ansible.builtin.set_fact: should_perform_backup="{{ docker_backup_restore_force or volume_out.changed }}"
# try and find latest volume based on the name.
- name: Find latest s3 version.
when: docker_backup_restore_latest_s3_key
when:
- should_perform_backup
- docker_backup_restore_latest_s3_key
amazon.aws.aws_s3:
bucket: "{{ docker_backup_aws_s3_bucket }}"
mode: list
prefix: "{{ item.volume_name }}/{{ item.volume_name }}"
prefix: "{{ docker_backup_s3_volume.name }}/{{ docker_backup_s3_volume.name }}"
aws_access_key: "{{ docker_backup_aws_s3_aws_access_key }}"
aws_secret_key: "{{ docker_backup_aws_s3_aws_secret_key }}"
region: "{{ docker_backup_aws_s3_region }}"
s3_url: "{{ docker_backup_aws_s3_url }}"
register: s3_list_output
with_items: "{{ docker_backup_s3_restores }}"
- name: Extract S3 keys for container.
when: docker_backup_restore_latest_s3_key
ansible.builtin.set_fact: container_s3_keys="{{ container_s3_keys | default([]) + [item.s3_keys | last] }}"
with_items: "{{ s3_list_output.results }}"
- name: Extract volume names.
ansible.builtin.set_fact: volume_names="{{ docker_backup_s3_restores | map(attribute='volume_name') }}"
when: docker_backup_restore_latest_s3_key
- name: Fail as there no backups were found.
when:
- should_perform_backup
- docker_backup_fail_on_no_s3_backups
- s3_list_output.s3_keys | length == 0
ansible.builtin.fail:
msg: "There were no s3 backups found for {{ docker_backup_s3_volume.name }}"
- name: Merge volume names and S3 keys.
ansible.builtin.set_fact:
docker_backup_s3_restores_new: "{{ docker_backup_s3_restores_new | default([]) + [ {'volume_name': item.0, 's3_key': item.1} ] }}"
when: docker_backup_restore_latest_s3_key
with_together:
- "{{ volume_names }}"
- "{{ container_s3_keys }}"
- name: Set volumes to restore.
ansible.builtin.set_fact:
docker_backup_s3_restores_new: "{{ docker_backup_s3_restores }}"
when: not docker_backup_restore_latest_s3_key
- name: Extract S3 keys for container.
when:
- should_perform_backup
- docker_backup_restore_latest_s3_key
- s3_list_output.s3_keys | length > 0
ansible.builtin.set_fact: container_s3_key="{{ s3_list_output.s3_keys | last }}"
- name: Create directories for /tmp file.
when:
- should_perform_backup
- s3_list_output.s3_keys | length > 0
ansible.builtin.file:
path: '/tmp/{{ item.s3_key | dirname }}'
path: '/tmp/{{ container_s3_key | dirname }}'
state: directory
mode: '0755'
with_items: '{{ docker_backup_s3_restores_new }}'
- name: Download archive from S3.
when:
- should_perform_backup
- s3_list_output.s3_keys | length > 0
amazon.aws.aws_s3:
bucket: "{{ docker_backup_aws_s3_bucket }}"
object: "{{ item.s3_key }}"
object: "{{ container_s3_key }}"
aws_access_key: "{{ docker_backup_aws_s3_aws_access_key }}"
aws_secret_key: "{{ docker_backup_aws_s3_aws_secret_key }}"
region: "{{ docker_backup_aws_s3_region }}"
s3_url: "{{ docker_backup_aws_s3_url }}"
mode: get
dest: "/tmp/{{ item.s3_key }}"
dest: "/tmp/{{ container_s3_key }}"
register: get_out
with_items: "{{ docker_backup_s3_restores_new }}"
- name: Remove contents of volumes.
when:
- should_perform_backup
- s3_list_output.s3_keys | length > 0
community.docker.docker_container:
name: "restore-container-{{ item.volume_name }}-{{ 10 | random }}"
name: "restore-container-{{ docker_backup_s3_volume.name }}-{{ 100 | random }}"
image: ubuntu
command: "rm -rf ./*"
auto_remove: true
@ -83,25 +79,28 @@
# start inside the directory we want to wipe
working_dir: "/data"
volumes:
- "{{ item.volume_name }}:/data"
with_items: "{{ docker_backup_s3_restores_new }}"
- "{{ docker_backup_s3_volume.name }}:/data"
- name: Restore contents of volumes
when:
- should_perform_backup
- s3_list_output.s3_keys | length > 0
community.docker.docker_container:
name: "restore-container-{{ item.volume_name }}-{{ 10 | random }}"
name: "restore-container-{{ docker_backup_s3_volume.name }}-{{ 100 | random }}"
image: ubuntu
# extract the tar into the volume.
command: "tar xvf /tmp/{{ item.s3_key }} -C /data --strip-components 1"
command: "tar xvf /tmp/{{ container_s3_key }} -C /data --strip-components 1"
auto_remove: true
detach: false # block until this container exists.
state: started
volumes:
- "{{ item.volume_name }}:/data"
- "{{ docker_backup_s3_volume.name }}:/data"
- /tmp:/tmp
with_items: "{{ docker_backup_s3_restores_new }}"
- name: Remove uploaded files from /tmp
when:
- should_perform_backup
- s3_list_output.s3_keys | length > 0
ansible.builtin.file:
path: '/tmp/{{ item.s3_key }}'
path: '/tmp/{{ container_s3_key }}'
state: absent
with_items: '{{ docker_backup_s3_restores_new }}'

@ -2,8 +2,9 @@
- hosts: localhost
connection: local
become: true
tasks:
- set_fact:
portainer_password: "portainer-password-{{ 10000 | random }}"
- name: Install Docker Module for Python
pip:
name:
@ -50,7 +51,7 @@
method: POST
body:
Username: admin
Password: "adminadminadmin"
Password: "{{ portainer_password }}"
status_code: 200
body_format: json
register: result
@ -79,8 +80,8 @@
name: docker_s3_volume_restore
vars:
docker_backup_restore_latest_s3_key: true
docker_backup_s3_restores:
- volume_name: portainer_data
docker_backup_s3_volume:
name: portainer_data
- name: Deploy Portainer
docker_compose:
@ -93,7 +94,7 @@
method: POST
body:
Username: admin
Password: "adminadminadmin"
Password: "{{ portainer_password }}"
status_code: 200
body_format: json
register: result

@ -34,17 +34,6 @@ external_docker_networks:
ansible_pull_path: /home/{{ homelab_user }}/.local/bin/ansible-pull
dashy:
source_file: dashboards/dashy-config.yml
config_file: dashy-config.yml
config_directory: /etc/config/dashy
# olivetin related config
olivetin:
source_file: olivetin/config.yml
config_file: config.yml
config_directory: /etc/config/OliveTin
portainer_required_files:
- source_file: dashboards/dashy-config.yml
dest_file_name: dashy-config.yml

@ -1,6 +1,5 @@
---
- name: Update packages and ensure users on all hosts
tags: [always]
hosts: all
become: true
pre_tasks:

@ -40,7 +40,7 @@
- name: "Stack {{ portainer_stack_name }} | Build list of volumes to restore."
ansible.builtin.set_fact:
restore_volumes: "{{ restore_volumes | default([]) + [{'volume_name':item}] }}"
restore_volumes: "{{ restore_volumes | default([]) + [{'name':item}] }}"
with_items: "{{ python_output.stdout_lines | list }}"
- name: "Stack {{ portainer_stack_name }} | Restore any missing volumes from S3"
@ -50,7 +50,11 @@
vars:
docker_backup_restore_force: false
docker_backup_restore_latest_s3_key: true
docker_backup_s3_restores: "{{ restore_volumes }}"
docker_backup_fail_on_no_s3_backups: false
docker_backup_s3_volume: "{{ volume }}"
with_items: "{{ restore_volumes }}"
loop_control:
loop_var: volume
- name: "Stack {{ portainer_stack_name }} | Update Portainer."
chatton.portainer.portainer_stack:

Loading…
Cancel
Save