From 69c13e37c30a39f175ef532858ce830bbfc6c919 Mon Sep 17 00:00:00 2001 From: Cian Hatton Date: Sat, 13 Aug 2022 22:09:56 +0100 Subject: [PATCH] Restore Volumes from S3 when they are not present (#4) --- .../setup_hosted_services/defaults/main.yml | 14 ++++++- .../files/gitea/docker-compose.yml | 21 ++++++++++ .../files/scripts/find-volumes-to-restore.py | 20 +++++++++ .../setup_hosted_services/tasks/main.yml | 41 +++++++++++++++++-- secrets-example.yml | 2 + 5 files changed, 94 insertions(+), 4 deletions(-) create mode 100644 ansible/roles/setup_hosted_services/files/gitea/docker-compose.yml create mode 100644 ansible/roles/setup_hosted_services/files/scripts/find-volumes-to-restore.py diff --git a/ansible/roles/setup_hosted_services/defaults/main.yml b/ansible/roles/setup_hosted_services/defaults/main.yml index 76291c0..64855c3 100644 --- a/ansible/roles/setup_hosted_services/defaults/main.yml +++ b/ansible/roles/setup_hosted_services/defaults/main.yml @@ -1,10 +1,22 @@ --- docker_compose_directory: /etc/docker-compose services: + - name: gitea + volumes: ["gitea_data"] - name: mealie + volumes: ["mealie_data"] - name: linkding + volumes: ["linkding_data"] - name: overseerr + volumes: ["overseerr_config"] - name: nextcloud + volumes: ["nextcloud_data"] docker_networks: - - nextcloud_net \ No newline at end of file + - nextcloud_net + +aws_s3: + s3_url: "l8x8.ie11.idrivee2-6.com" + aws_access_key: "nyNMQ3fRMSV0bA1xw5uV" + region: "us-east-1" + bucket: "backups" diff --git a/ansible/roles/setup_hosted_services/files/gitea/docker-compose.yml b/ansible/roles/setup_hosted_services/files/gitea/docker-compose.yml new file mode 100644 index 0000000..89e16c0 --- /dev/null +++ b/ansible/roles/setup_hosted_services/files/gitea/docker-compose.yml @@ -0,0 +1,21 @@ +version: "3" + +services: + gitea: + labels: + ie.cianhatton.backup.enabled: "true" + image: gitea/gitea:1.16.9 + container_name: gitea + environment: + - USER_UID=1000 + - USER_GID=1000 + restart: unless-stopped + volumes: + - data:/data + - /etc/timezone:/etc/timezone:ro + - /etc/localtime:/etc/localtime:ro + ports: + - "3000:3000" + - "222:22" +volumes: + data: \ No newline at end of file diff --git a/ansible/roles/setup_hosted_services/files/scripts/find-volumes-to-restore.py b/ansible/roles/setup_hosted_services/files/scripts/find-volumes-to-restore.py new file mode 100644 index 0000000..bd7f425 --- /dev/null +++ b/ansible/roles/setup_hosted_services/files/scripts/find-volumes-to-restore.py @@ -0,0 +1,20 @@ +#!/usr/bin/python +import os + + +def main(): + existing = eval(os.getenv("EXISTING_VOLUMES")) + services = eval(os.getenv("SERVICES")) + + missing_volumes = [] + for service in services: + for volume_name in service.get("volumes", []): + if volume_name not in existing: + missing_volumes.append(volume_name) + + for mv in missing_volumes: + print(mv) + + +if __name__ == "__main__": + main() diff --git a/ansible/roles/setup_hosted_services/tasks/main.yml b/ansible/roles/setup_hosted_services/tasks/main.yml index 9d8c36b..665be9a 100644 --- a/ansible/roles/setup_hosted_services/tasks/main.yml +++ b/ansible/roles/setup_hosted_services/tasks/main.yml @@ -15,9 +15,44 @@ ansible.builtin.pip: name: requests -- name: Install python dependencies (boto3) - ansible.builtin.pip: - name: boto3 +- name: Find docker volumes + shell: docker volume ls -f name={{item.name}} --format '{{ '{{' }} .Name {{ '}}' }}' + with_items: "{{services}}" + register: find_volumes + changed_when: False + +- debug: msg="{{find_volumes.results | map(attribute='stdout_lines') | list | flatten }}" + +- name: Find volumes that need to be restored + script: scripts/find-volumes-to-restore.py + environment: + EXISTING_VOLUMES: "{{ find_volumes.results | map(attribute='stdout_lines') | list | flatten }}" + SERVICES: "{{ services }}" + args: + executable: python3 + register: python_output + changed_when: False + +- debug: msg="{{python_output.stdout_lines | list }}" + +- name: Restore any missing backups from S3 + docker_container: + command: "restore-volume --s3 --volume {{item}}" + image: "ghcr.io/chatton/docker-volume-backup:v0.3.0" + name: "s3-restore-{{item}}" + cleanup: true # delete container after it's done. + state: started # container should execute. + detach: no # task fails if container exits. + volumes: + - /var/run/docker.sock:/var/run/docker.sock + - /tmp:/tmp # temp s3 archive goes here + env: + AWS_ACCESS_KEY_ID: "{{aws_s3.aws_access_key}}" + AWS_SECRET_ACCESS_KEY: "{{aws_s3_secrets.aws_secret_key}}" + AWS_DEFAULT_REGION: "{{aws_s3.region}}" + AWS_BUCKET: "{{aws_s3.bucket}}" + AWS_ENDPOINT: "{{aws_s3.s3_url}}" + with_items: "{{ python_output.stdout_lines }}" - name: Create required docker networks docker_network: diff --git a/secrets-example.yml b/secrets-example.yml index be42b0f..ac9a1ad 100644 --- a/secrets-example.yml +++ b/secrets-example.yml @@ -1,2 +1,4 @@ portainer: password: "" +aws_s3_secrets: + aws_secret_key: ""