diff --git a/roles/docker_s3_backup/defaults/main.yml b/roles/docker_s3_backup/defaults/main.yml index dcc83ee..4d77d44 100644 --- a/roles/docker_s3_backup/defaults/main.yml +++ b/roles/docker_s3_backup/defaults/main.yml @@ -1,6 +1,9 @@ --- # defaults file for chatton.docker_s3_backup +# the number of backups of the same volume which should be kept. +# Any excess will be deleted. +docker_backup_retain_count: 3 # the backup directory where backups are stored on the host machine. # these will be uploaded to S3. docker_backup_aws_s3_region: "us-east-1" diff --git a/roles/docker_s3_backup/files/scripts/determine-s3-keys-to-delete.py b/roles/docker_s3_backup/files/scripts/determine-s3-keys-to-delete.py new file mode 100644 index 0000000..44435b0 --- /dev/null +++ b/roles/docker_s3_backup/files/scripts/determine-s3-keys-to-delete.py @@ -0,0 +1,21 @@ +#!/usr/bin/python +import os + + +def main(): + s3_result = eval(os.getenv("S3_RESULTS")) + num_backups_to_keep = int(os.getenv("NUM_BACKUPS_TO_KEEP")) + + items_to_delete = [] + for res in s3_result: + s3_keys = res["s3_keys"] + # fetch all of the backups before the desired number. + # these are the ones we want to delete. + items_to_delete.extend(s3_keys[0:-num_backups_to_keep]) + + for item in items_to_delete: + print(item) + + +if __name__ == "__main__": + main() diff --git a/roles/docker_s3_backup/tasks/main.yml b/roles/docker_s3_backup/tasks/main.yml index bc5dc02..36e655e 100644 --- a/roles/docker_s3_backup/tasks/main.yml +++ b/roles/docker_s3_backup/tasks/main.yml @@ -55,3 +55,36 @@ mode: put permission: "{{ docker_backup_aws_s3_permissions }}" with_items: "{{ volume_mounts }}" + +# try and find latest volume based on the name. +- name: Fetch Volumes From S3. + amazon.aws.aws_s3: + bucket: "{{ docker_backup_aws_s3_bucket }}" + mode: list + prefix: "{{ item.Name }}/{{ item.Name }}" + aws_access_key: "{{ docker_backup_aws_s3_aws_access_key }}" + aws_secret_key: "{{ docker_backup_aws_s3_aws_secret_key }}" + region: "{{ docker_backup_aws_s3_region }}" + s3_url: "{{ docker_backup_aws_s3_url }}" + register: s3_list_outputs + with_items: "{{ volume_mounts }}" + +# TODO: do this in a more native way rather than a python script reading env vars. +- name: Determine which backups should be deleted. + script: scripts/determine-s3-keys-to-delete.py + environment: + S3_RESULTS: "{{ s3_list_outputs.results }}" + NUM_BACKUPS_TO_KEEP: "{{ docker_backup_retain_count}}" + register: python_output + changed_when: false + +- name: Delete old backups. + amazon.aws.aws_s3: + bucket: "{{ docker_backup_aws_s3_bucket }}" + aws_access_key: "{{ docker_backup_aws_s3_aws_access_key }}" + aws_secret_key: "{{ docker_backup_aws_s3_aws_secret_key }}" + region: "{{ docker_backup_aws_s3_region }}" + s3_url: "{{ docker_backup_aws_s3_url }}" + object: "{{ item }}" + mode: delobj + with_items: "{{ python_output.stdout_lines }}" diff --git a/tests/docker-compose.yml b/tests/docker-compose.yml index 85d3d04..734a26d 100755 --- a/tests/docker-compose.yml +++ b/tests/docker-compose.yml @@ -8,9 +8,11 @@ services: - 9000:9000 volumes: - portainer_data:/data + - some_volume:/some_dir - /var/run/docker.sock:/var/run/docker.sock volumes: + some_volume: portainer_data: external: true name: portainer_data