Restore backups from S3 when there is no volume present

pull/4/head
Cian Hatton 3 years ago
parent 908eac4bc3
commit c0ea0fb203

@ -1,16 +1,22 @@
---
docker_compose_directory: /etc/docker-compose
services:
- name: gitea
volumes: ["gitea_data"]
- name: mealie
# - name: linkding
# - name: overseerr
# - name: nextcloud
volumes: ["mealie_data"]
- name: linkding
volumes: ["linkding_data"]
- name: overseerr
volumes: ["overseerr_config"]
- name: nextcloud
volumes: ["nextcloud_data"]
docker_networks:
- nextcloud_net
aws_s3:
s3_url: "https://l8x8.ie11.idrivee2-6.com"
s3_url: "l8x8.ie11.idrivee2-6.com"
aws_access_key: "nyNMQ3fRMSV0bA1xw5uV"
region: "us-east-1"
bucket: "backups"

@ -0,0 +1,21 @@
version: "3"
services:
gitea:
labels:
ie.cianhatton.backup.enabled: "true"
image: gitea/gitea:1.16.9
container_name: gitea
environment:
- USER_UID=1000
- USER_GID=1000
restart: unless-stopped
volumes:
- data:/data
- /etc/timezone:/etc/timezone:ro
- /etc/localtime:/etc/localtime:ro
ports:
- "3000:3000"
- "222:22"
volumes:
data:

@ -0,0 +1,20 @@
#!/usr/bin/python
import os
def main():
existing = eval(os.getenv("EXISTING_VOLUMES"))
services = eval(os.getenv("SERVICES"))
missing_volumes = []
for service in services:
for volume_name in service.get("volumes", []):
if volume_name not in existing:
missing_volumes.append(volume_name)
for mv in missing_volumes:
print(mv)
if __name__ == "__main__":
main()

@ -15,34 +15,44 @@
ansible.builtin.pip:
name: requests
- name: Install python dependencies (boto3)
ansible.builtin.pip:
name: boto3
- name: Find relevant volume(s) in S3
amazon.aws.aws_s3:
bucket: "{{aws_s3.bucket}}"
mode: list
region: "{{aws_s3.region}}"
s3_url: "{{aws_s3.s3_url}}"
prefix: "mealie"
aws_access_key: "{{aws_s3.aws_access_key}}"
aws_secret_key: "{{aws_s3_secrets.aws_secret_key}}"
register: s3_list_output
- debug: msg="{{s3_list_output.s3_keys}}"
- name: Download volume(s) from S3
amazon.aws.aws_s3:
bucket: "{{aws_s3.bucket}}"
object: "{{item}}"
dest: "/tmp/{{item}}"
mode: get
region: "{{aws_s3.region}}"
s3_url: "{{aws_s3.s3_url}}"
aws_access_key: "{{aws_s3.aws_access_key}}"
aws_secret_key: "{{aws_s3_secrets.aws_secret_key}}"
with_items: "{{s3_list_output.s3_keys}}"
- name: Find docker volumes
shell: docker volume ls -f name={{item.name}} --format '{{ '{{' }} .Name {{ '}}' }}'
with_items: "{{services}}"
register: find_volumes
changed_when: False
- debug: msg="{{find_volumes.results | map(attribute='stdout_lines') | list | flatten }}"
- name: Find volumes that need to be restored
script: scripts/find-volumes-to-restore.py
environment:
EXISTING_VOLUMES: "{{ find_volumes.results | map(attribute='stdout_lines') | list | flatten }}"
SERVICES: "{{ services }}"
args:
executable: python3
register: python_output
changed_when: False
- debug: msg="{{python_output.stdout_lines | list }}"
- name: Restore any missing backups from S3
docker_container:
command: "restore-volume --s3 --volume {{item}}"
image: "ghcr.io/chatton/docker-volume-backup:v0.3.0"
name: "s3-restore-{{item}}"
cleanup: true # delete container after it's done.
state: started # container should execute.
detach: no # task fails if container exits.
volumes:
- /var/run/docker.sock:/var/run/docker.sock
- /tmp:/tmp # temp s3 archive goes here
env:
AWS_ACCESS_KEY_ID: "{{aws_s3.aws_access_key}}"
AWS_SECRET_ACCESS_KEY: "{{aws_s3_secrets.aws_secret_key}}"
AWS_DEFAULT_REGION: "{{aws_s3.region}}"
AWS_BUCKET: "{{aws_s3.bucket}}"
AWS_ENDPOINT: "{{aws_s3.s3_url}}"
with_items: "{{ python_output.stdout_lines }}"
- name: Create required docker networks
docker_network:

@ -4,7 +4,7 @@
vars_files:
- ../secrets.yml
roles:
# - role: 'roles/setup_users'
# - role: 'roles/setup_docker'
# - role: 'roles/setup_portainer'
- role: 'roles/setup_users'
- role: 'roles/setup_docker'
- role: 'roles/setup_portainer'
- role: 'roles/setup_hosted_services'

Loading…
Cancel
Save