From 7819ea4dd7548a69a2def3cb6c409b4ce3ee20e2 Mon Sep 17 00:00:00 2001 From: Cian Hatton Date: Thu, 8 Sep 2022 21:48:48 +0100 Subject: [PATCH] specify a single volume in the role instead of a list (#15) --- .gitignore | 1 + .../defaults/main.yml | 15 +++- roles/docker_s3_volume_restore/tasks/main.yml | 87 +++++++++---------- tests/playbook.yml | 11 +-- 4 files changed, 61 insertions(+), 53 deletions(-) diff --git a/.gitignore b/.gitignore index e04276f..698a214 100644 --- a/.gitignore +++ b/.gitignore @@ -1,2 +1,3 @@ .idea +__pycache__ venv diff --git a/roles/docker_s3_volume_restore/defaults/main.yml b/roles/docker_s3_volume_restore/defaults/main.yml index 5d0d1f5..9e46ae4 100644 --- a/roles/docker_s3_volume_restore/defaults/main.yml +++ b/roles/docker_s3_volume_restore/defaults/main.yml @@ -7,10 +7,13 @@ docker_backup_restore_force: false # backup in the s3 backup. The format which is expected has the prefix of volume_name/volume_name_* # this is the format the the "docker_s3_backup" role updloads them with. docker_backup_restore_latest_s3_key: false -docker_backup_s3_restores: [] -# docker_backup_s3_restores: -# - volume_name: "linkding_data" -# s3_key: "linkding_data/linkding_data-2022-09-01T21:32:54Z.tar.gz" +docker_backup_s3_volume: + name: "" + s3_key: "" # optional + +# docker_backup_s3_volume: +# name: linkding +# s3_key: "linkding_data/linkding_data-2022-09-01T21:32:54Z.tar.gz" # dynamically find the latest linkding_data backup. # docker_backup_restore_latest_s3_key: true @@ -23,3 +26,7 @@ docker_backup_aws_s3_url: "" docker_backup_aws_s3_aws_access_key: "" docker_backup_aws_s3_aws_secret_key: "" docker_backup_aws_s3_permissions: [] + +# fail on no S3 backups causes the task to fail if there are no s3 backups. +# setting this to true will cause the restore task to end early if there are no backups. +docker_backup_fail_on_no_s3_backups: true diff --git a/roles/docker_s3_volume_restore/tasks/main.yml b/roles/docker_s3_volume_restore/tasks/main.yml index 46e7c9d..bc36bfd 100644 --- a/roles/docker_s3_volume_restore/tasks/main.yml +++ b/roles/docker_s3_volume_restore/tasks/main.yml @@ -4,77 +4,73 @@ - name: Ensure Volume. docker_volume: - name: "{{ item.volume_name }}" + name: "{{ docker_backup_s3_volume.name }}" state: present register: volume_out - with_items: "{{ docker_backup_s3_restores }}" - name: Determine if backup is needed. - ansible.builtin.set_fact: should_perform_backup="{{ docker_backup_restore_force == true or volume_out.changed == true }}" - -- name: End play as no backup is needed. - ansible.builtin.meta: end_play - when: not should_perform_backup + ansible.builtin.set_fact: should_perform_backup="{{ docker_backup_restore_force or volume_out.changed }}" # try and find latest volume based on the name. - name: Find latest s3 version. - when: docker_backup_restore_latest_s3_key + when: + - should_perform_backup + - docker_backup_restore_latest_s3_key amazon.aws.aws_s3: bucket: "{{ docker_backup_aws_s3_bucket }}" mode: list - prefix: "{{ item.volume_name }}/{{ item.volume_name }}" + prefix: "{{ docker_backup_s3_volume.name }}/{{ docker_backup_s3_volume.name }}" aws_access_key: "{{ docker_backup_aws_s3_aws_access_key }}" aws_secret_key: "{{ docker_backup_aws_s3_aws_secret_key }}" region: "{{ docker_backup_aws_s3_region }}" s3_url: "{{ docker_backup_aws_s3_url }}" register: s3_list_output - with_items: "{{ docker_backup_s3_restores }}" - -- name: Extract S3 keys for container. - when: docker_backup_restore_latest_s3_key - ansible.builtin.set_fact: container_s3_keys="{{ container_s3_keys | default([]) + [item.s3_keys | last] }}" - with_items: "{{ s3_list_output.results }}" -- name: Extract volume names. - ansible.builtin.set_fact: volume_names="{{ docker_backup_s3_restores | map(attribute='volume_name') }}" - when: docker_backup_restore_latest_s3_key +- name: Fail as there no backups were found. + when: + - should_perform_backup + - docker_backup_fail_on_no_s3_backups + - s3_list_output.s3_keys | length == 0 + ansible.builtin.fail: + msg: "There were no s3 backups found for {{ docker_backup_s3_volume.name }}" -- name: Merge volume names and S3 keys. - ansible.builtin.set_fact: - docker_backup_s3_restores_new: "{{ docker_backup_s3_restores_new | default([]) + [ {'volume_name': item.0, 's3_key': item.1} ] }}" - when: docker_backup_restore_latest_s3_key - with_together: - - "{{ volume_names }}" - - "{{ container_s3_keys }}" - -- name: Set volumes to restore. - ansible.builtin.set_fact: - docker_backup_s3_restores_new: "{{ docker_backup_s3_restores }}" - when: not docker_backup_restore_latest_s3_key +- name: Extract S3 keys for container. + when: + - should_perform_backup + - docker_backup_restore_latest_s3_key + - s3_list_output.s3_keys | length > 0 + ansible.builtin.set_fact: container_s3_key="{{ s3_list_output.s3_keys | last }}" - name: Create directories for /tmp file. + when: + - should_perform_backup + - s3_list_output.s3_keys | length > 0 ansible.builtin.file: - path: '/tmp/{{ item.s3_key | dirname }}' + path: '/tmp/{{ container_s3_key | dirname }}' state: directory mode: '0755' - with_items: '{{ docker_backup_s3_restores_new }}' - name: Download archive from S3. + when: + - should_perform_backup + - s3_list_output.s3_keys | length > 0 amazon.aws.aws_s3: bucket: "{{ docker_backup_aws_s3_bucket }}" - object: "{{ item.s3_key }}" + object: "{{ container_s3_key }}" aws_access_key: "{{ docker_backup_aws_s3_aws_access_key }}" aws_secret_key: "{{ docker_backup_aws_s3_aws_secret_key }}" region: "{{ docker_backup_aws_s3_region }}" s3_url: "{{ docker_backup_aws_s3_url }}" mode: get - dest: "/tmp/{{ item.s3_key }}" + dest: "/tmp/{{ container_s3_key }}" register: get_out - with_items: "{{ docker_backup_s3_restores_new }}" - name: Remove contents of volumes. + when: + - should_perform_backup + - s3_list_output.s3_keys | length > 0 community.docker.docker_container: - name: "restore-container-{{ item.volume_name }}-{{ 10 | random }}" + name: "restore-container-{{ docker_backup_s3_volume.name }}-{{ 100 | random }}" image: ubuntu command: "rm -rf ./*" auto_remove: true @@ -83,25 +79,28 @@ # start inside the directory we want to wipe working_dir: "/data" volumes: - - "{{ item.volume_name }}:/data" - with_items: "{{ docker_backup_s3_restores_new }}" + - "{{ docker_backup_s3_volume.name }}:/data" - name: Restore contents of volumes + when: + - should_perform_backup + - s3_list_output.s3_keys | length > 0 community.docker.docker_container: - name: "restore-container-{{ item.volume_name }}-{{ 10 | random }}" + name: "restore-container-{{ docker_backup_s3_volume.name }}-{{ 100 | random }}" image: ubuntu # extract the tar into the volume. - command: "tar xvf /tmp/{{ item.s3_key }} -C /data --strip-components 1" + command: "tar xvf /tmp/{{ container_s3_key }} -C /data --strip-components 1" auto_remove: true detach: false # block until this container exists. state: started volumes: - - "{{ item.volume_name }}:/data" + - "{{ docker_backup_s3_volume.name }}:/data" - /tmp:/tmp - with_items: "{{ docker_backup_s3_restores_new }}" - name: Remove uploaded files from /tmp + when: + - should_perform_backup + - s3_list_output.s3_keys | length > 0 ansible.builtin.file: - path: '/tmp/{{ item.s3_key }}' + path: '/tmp/{{ container_s3_key }}' state: absent - with_items: '{{ docker_backup_s3_restores_new }}' diff --git a/tests/playbook.yml b/tests/playbook.yml index c91d143..eb44f4c 100644 --- a/tests/playbook.yml +++ b/tests/playbook.yml @@ -2,8 +2,9 @@ - hosts: localhost connection: local become: true - tasks: + - set_fact: + portainer_password: "portainer-password-{{ 10000 | random }}" - name: Install Docker Module for Python pip: name: @@ -50,7 +51,7 @@ method: POST body: Username: admin - Password: "adminadminadmin" + Password: "{{ portainer_password }}" status_code: 200 body_format: json register: result @@ -79,8 +80,8 @@ name: docker_s3_volume_restore vars: docker_backup_restore_latest_s3_key: true - docker_backup_s3_restores: - - volume_name: portainer_data + docker_backup_s3_volume: + name: portainer_data - name: Deploy Portainer docker_compose: @@ -93,7 +94,7 @@ method: POST body: Username: admin - Password: "adminadminadmin" + Password: "{{ portainer_password }}" status_code: 200 body_format: json register: result