diff --git a/roles/docker_s3_backup/tasks/main.yml b/roles/docker_s3_backup/tasks/main.yml index 372a62e..7722179 100644 --- a/roles/docker_s3_backup/tasks/main.yml +++ b/roles/docker_s3_backup/tasks/main.yml @@ -42,7 +42,7 @@ s3_url: "{{ docker_s3_backup_aws_s3_url }}" bucket: "{{ docker_s3_backup_aws_s3_bucket }}" object: "{{ item.Name }}/{{ item.Name }}-{{ backup_time }}.tar.gz" - src: {{ docker_s3_backup_host_backup_directory }}/{{ item.Name }}-{{ backup_time }}.tar.gz + src: "{{ docker_s3_backup_host_backup_directory }}/{{ item.Name }}-{{ backup_time }}.tar.gz" aws_access_key: "{{ docker_s3_backup_aws_s3_aws_access_key }}" aws_secret_key: "{{ docker_s3_backup_aws_s3_aws_secret_key }}" region: "{{ docker_s3_backup_aws_s3_region }}" diff --git a/roles/docker_s3_volume_restore/defaults/main.yml b/roles/docker_s3_volume_restore/defaults/main.yml index 488f6a7..4700094 100644 --- a/roles/docker_s3_volume_restore/defaults/main.yml +++ b/roles/docker_s3_volume_restore/defaults/main.yml @@ -3,11 +3,20 @@ # forces a revert to the volume. docker_s3_volume_restore_force: false +# specify docker_s3_volume_restore_latest_s3_key true to automatically determine the latest +# backup in the s3 backup. The format which is expected has the prefix of volume_name/volume_name_* +# this is the format the the "docker_s3_backup" role updloads them with. +docker_s3_volume_restore_latest_s3_key: false docker_s3_volume_restores: [] -#docker_s3_volume_restores: +#docker_volume_s3_restores: # - volume_name: "linkding_data" # s3_key: "linkding_data/linkding_data-2022-09-01T21:32:54Z.tar.gz" +# dynamically find the latest linkding_data backup. +# docker_s3_volume_restore_latest_s3_key: true +#docker_volume_s3_restores: +# - volume_name: "linkding_data" + docker_s3_volume_restore_aws_s3_region: "us-east-1" docker_s3_volume_restore_aws_s3_bucket: "backups" docker_s3_volume_restore_aws_s3_url: "" diff --git a/roles/docker_s3_volume_restore/tasks/main.yml b/roles/docker_s3_volume_restore/tasks/main.yml index 3626124..a320938 100644 --- a/roles/docker_s3_volume_restore/tasks/main.yml +++ b/roles/docker_s3_volume_restore/tasks/main.yml @@ -10,12 +10,69 @@ with_items: "{{ docker_volume_s3_restores }}" - name: Determine if backup is needed. - set_fact: should_perform_backup="{{ docker_volume_s3_force == true or volume_out.changed == true }}" + set_fact: should_perform_backup="{{ docker_s3_volume_restore_force == true or volume_out.changed == true }}" - name: End play as no backup is needed. meta: end_play when: not should_perform_backup +# try and find latest volume based on the name. +- name: Find latest s3 version. + when: docker_s3_volume_restore_latest_s3_key + amazon.aws.aws_s3: + bucket: "{{ docker_s3_volume_restore_aws_s3_bucket }}" + mode: list + prefix: "{{ item.volume_name }}/{{ item.volume_name }}" + bucket: "{{ docker_s3_volume_restore_aws_s3_bucket }}" + aws_access_key: "{{ docker_s3_volume_restore_aws_s3_aws_access_key }}" + aws_secret_key: "{{ docker_s3_volume_restore_aws_s3_aws_secret_key }}" + region: "{{ docker_s3_volume_restore_aws_s3_region }}" + s3_url: "{{ docker_s3_volume_restore_aws_s3_url }}" + register: s3_list_output + with_items: "{{ docker_volume_s3_restores }}" + +- debug: msg="{{ s3_list_output }}" + when: docker_s3_volume_restore_latest_s3_key + +- name: Extract s3 keys for container + when: docker_s3_volume_restore_latest_s3_key + set_fact: container_s3_keys="{{ container_s3_keys | default([]) + [item.s3_keys | last] }}" + with_items: "{{ s3_list_output.results }}" + +- debug: msg="{{ container_s3_keys }}" + when: docker_s3_volume_restore_latest_s3_key + +- set_fact: volume_names="{{ docker_volume_s3_restores | map(attribute='volume_name') }}" + when: docker_s3_volume_restore_latest_s3_key + # remove existing values so we can determine the s3 key automatically/ +#- set_fact: docker_volume_s3_restores_new="{{ [] }}" +# when: docker_s3_volume_restore_latest_s3_key + +- debug: msg="volume_names={{ volume_names }}" +- debug: msg="container_s3_keys={{ container_s3_keys }}" + +- set_fact: + docker_volume_s3_restores_new: "{{ docker_volume_s3_restores_new | default([]) + [ {'volume_name': item.0, 's3_key': item.1} ] }}" + when: docker_s3_volume_restore_latest_s3_key + with_together: + - "{{ volume_names }}" + - "{{ container_s3_keys }}" + +- debug: msg="{{ docker_volume_s3_restores_new }}" + +- set_fact: + docker_volume_s3_restores_new: "{{ docker_volume_s3_restores }}" + when: not docker_s3_volume_restore_latest_s3_key + +- debug: msg="{{ docker_volume_s3_restores_new }}" + +- name: Create directories for /tmp file + file: + path: '/tmp/{{ item.s3_key | dirname }}' + state: directory + mode: '0755' + with_items: '{{ docker_volume_s3_restores_new }}' + - name: Download archive from S3 amazon.aws.aws_s3: bucket: "{{ docker_s3_volume_restore_aws_s3_bucket }}" @@ -27,7 +84,7 @@ mode: get dest: "/tmp/{{ item.s3_key }}" register: get_out - with_items: "{{ docker_volume_s3_restores }}" + with_items: "{{ docker_volume_s3_restores_new }}" - name: Remove contents of volumes community.docker.docker_container: @@ -41,7 +98,7 @@ working_dir: "/data" volumes: - "{{ item.volume_name }}:/data" - with_items: "{{ docker_volume_s3_restores }}" + with_items: "{{ docker_volume_s3_restores_new }}" - name: Restore contents of volumes community.docker.docker_container: @@ -55,4 +112,10 @@ volumes: - "{{ item.volume_name }}:/data" - /tmp:/tmp - with_items: "{{ docker_volume_s3_restores }}" + with_items: "{{ docker_volume_s3_restores_new }}" + +- name: Remove uploaded files from /tmp + file: + path: '/tmp/{{ item.s3_key }}' + state: absent + with_items: '{{ docker_volume_s3_restores_new }}'