Add ci for ansible lint (#6)

pull/11/head
Cian Hatton 3 years ago committed by GitHub
parent 3d93af7a5e
commit a582d1d902
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -0,0 +1,18 @@
name: Code Health
on:
workflow_dispatch:
push:
jobs:
lint:
runs-on: ubuntu-latest
steps:
- name: Checkout the codebase
uses: actions/checkout@v3
- uses: actions/setup-python@v4
with:
python-version: 3.8.9
- name: Install dependencies
run: pip install -r requirements.txt
- name: Test Directory
run: ansible-lint roles

1
.gitignore vendored

@ -1 +1,2 @@
.idea
venv

@ -0,0 +1,37 @@
ansible==6.3.0
ansible-compat==2.2.0
ansible-core==2.13.3
ansible-lint==6.5.2
attrs==22.1.0
black==22.8.0
bracex==2.3.post1
cffi==1.15.1
click==8.1.3
commonmark==0.9.1
cryptography==37.0.4
enrich==1.2.7
filelock==3.8.0
importlib-resources==5.9.0
Jinja2==3.1.2
jsonschema==4.15.0
MarkupSafe==2.1.1
mypy-extensions==0.4.3
packaging==21.3
pathspec==0.10.1
pkgutil_resolve_name==1.3.10
platformdirs==2.5.2
pycparser==2.21
Pygments==2.13.0
pyparsing==3.0.9
pyrsistent==0.18.1
PyYAML==6.0
resolvelib==0.8.1
rich==12.5.1
ruamel.yaml==0.17.21
ruamel.yaml.clib==0.2.6
subprocess-tee==0.3.5
tomli==2.0.1
typing_extensions==4.3.0
wcmatch==8.4
yamllint==1.27.1
zipp==3.8.1

@ -1,52 +1,11 @@
galaxy_info:
author: your name
description: your role description
company: your company (optional)
# If the issue tracker for your role is not on github, uncomment the
# next line and provide a value
# issue_tracker_url: http://example.com/issue/tracker
# Choose a valid license ID from https://spdx.org - some suggested licenses:
# - BSD-3-Clause (default)
# - MIT
# - GPL-2.0-or-later
# - GPL-3.0-only
# - Apache-2.0
# - CC-BY-4.0
license: license (GPL-2.0-or-later, MIT, etc)
min_ansible_version: 2.1
# If this a Container Enabled role, provide the minimum Ansible Container version.
# min_ansible_container_version:
#
# Provide a list of supported platforms, and for each platform a list of versions.
# If you don't wish to enumerate all versions for a particular platform, use 'all'.
# To view available platforms and versions (or releases), visit:
# https://galaxy.ansible.com/api/v1/platforms/
#
# platforms:
# - name: Fedora
# versions:
# - all
# - 25
# - name: SomePlatform
# versions:
# - all
# - 1.0
# - 7
# - 99.99
author: Cian Hatton
description: Role which backs up a dockver volume to s3.
license: MIT
min_ansible_version: "2.1"
galaxy_tags: []
# List tags for your role here, one per line. A tag is a keyword that describes
# and categorizes the role. Users find roles by searching for tags. Be sure to
# remove the '[]' above, if you add tags to this list.
#
# NOTE: A tag is limited to a single word comprised of alphanumeric characters.
# Maximum 20 tags per role.
platforms:
- name: Debian
versions:
- all
dependencies: []
# List your role dependencies here, one per line. Be sure to remove the '[]' above,
# if you add dependencies to this list.

@ -3,7 +3,7 @@
# https://docs.docker.com/storage/volumes/#backup-restore-or-migrate-data-volumes
- name: Determine backup timestamp.
set_fact: backup_time="{{ ansible_date_time.iso8601 }}"
ansible.builtin.set_fact: backup_time="{{ ansible_date_time.iso8601 }}"
- name: Stop a container
community.docker.docker_container:
@ -16,7 +16,7 @@
register: result
- name: Extract only the volume mounts (not bind mounts)
set_fact: volume_mounts="{{ result.container.Mounts | selectattr("Type", "equalto", "volume")}}"
ansible.builtin.set_fact: volume_mounts="{{ result.container.Mounts | selectattr("Type", "equalto", "volume") }}"
- name: Create Backup of Container Volumes
community.docker.docker_container:

@ -8,13 +8,13 @@ docker_s3_volume_restore_force: false
# this is the format the the "docker_s3_backup" role updloads them with.
docker_s3_volume_restore_latest_s3_key: false
docker_s3_volume_restores: []
#docker_volume_s3_restores:
# docker_volume_s3_restores:
# - volume_name: "linkding_data"
# s3_key: "linkding_data/linkding_data-2022-09-01T21:32:54Z.tar.gz"
# dynamically find the latest linkding_data backup.
# docker_s3_volume_restore_latest_s3_key: true
#docker_volume_s3_restores:
# docker_volume_s3_restores:
# - volume_name: "linkding_data"
docker_s3_volume_restore_aws_s3_region: "us-east-1"

@ -1,52 +1,11 @@
galaxy_info:
author: your name
description: your role description
company: your company (optional)
# If the issue tracker for your role is not on github, uncomment the
# next line and provide a value
# issue_tracker_url: http://example.com/issue/tracker
# Choose a valid license ID from https://spdx.org - some suggested licenses:
# - BSD-3-Clause (default)
# - MIT
# - GPL-2.0-or-later
# - GPL-3.0-only
# - Apache-2.0
# - CC-BY-4.0
license: license (GPL-2.0-or-later, MIT, etc)
min_ansible_version: 2.1
# If this a Container Enabled role, provide the minimum Ansible Container version.
# min_ansible_container_version:
#
# Provide a list of supported platforms, and for each platform a list of versions.
# If you don't wish to enumerate all versions for a particular platform, use 'all'.
# To view available platforms and versions (or releases), visit:
# https://galaxy.ansible.com/api/v1/platforms/
#
# platforms:
# - name: Fedora
# versions:
# - all
# - 25
# - name: SomePlatform
# versions:
# - all
# - 1.0
# - 7
# - 99.99
author: Cian Hatton
description: Role which restores a dockver volume from s3.
license: MIT
min_ansible_version: "2.1"
galaxy_tags: []
# List tags for your role here, one per line. A tag is a keyword that describes
# and categorizes the role. Users find roles by searching for tags. Be sure to
# remove the '[]' above, if you add tags to this list.
#
# NOTE: A tag is limited to a single word comprised of alphanumeric characters.
# Maximum 20 tags per role.
platforms:
- name: Debian
versions:
- all
dependencies: []
# List your role dependencies here, one per line. Be sure to remove the '[]' above,
# if you add dependencies to this list.

@ -10,10 +10,10 @@
with_items: "{{ docker_volume_s3_restores }}"
- name: Determine if backup is needed.
set_fact: should_perform_backup="{{ docker_s3_volume_restore_force == true or volume_out.changed == true }}"
ansible.builtin.set_fact: should_perform_backup="{{ docker_s3_volume_restore_force == true or volume_out.changed == true }}"
- name: End play as no backup is needed.
meta: end_play
ansible.builtin.meta: end_play
when: not should_perform_backup
# try and find latest volume based on the name.
@ -23,7 +23,6 @@
bucket: "{{ docker_s3_volume_restore_aws_s3_bucket }}"
mode: list
prefix: "{{ item.volume_name }}/{{ item.volume_name }}"
bucket: "{{ docker_s3_volume_restore_aws_s3_bucket }}"
aws_access_key: "{{ docker_s3_volume_restore_aws_s3_aws_access_key }}"
aws_secret_key: "{{ docker_s3_volume_restore_aws_s3_aws_secret_key }}"
region: "{{ docker_s3_volume_restore_aws_s3_region }}"
@ -31,49 +30,36 @@
register: s3_list_output
with_items: "{{ docker_volume_s3_restores }}"
- debug: msg="{{ s3_list_output }}"
when: docker_s3_volume_restore_latest_s3_key
- name: Extract s3 keys for container
- name: Extract S3 keys for container.
when: docker_s3_volume_restore_latest_s3_key
set_fact: container_s3_keys="{{ container_s3_keys | default([]) + [item.s3_keys | last] }}"
ansible.builtin.set_fact: container_s3_keys="{{ container_s3_keys | default([]) + [item.s3_keys | last] }}"
with_items: "{{ s3_list_output.results }}"
- debug: msg="{{ container_s3_keys }}"
when: docker_s3_volume_restore_latest_s3_key
- set_fact: volume_names="{{ docker_volume_s3_restores | map(attribute='volume_name') }}"
- name: Extract volume names.
ansible.builtin.set_fact: volume_names="{{ docker_volume_s3_restores | map(attribute='volume_name') }}"
when: docker_s3_volume_restore_latest_s3_key
# remove existing values so we can determine the s3 key automatically/
#- set_fact: docker_volume_s3_restores_new="{{ [] }}"
# when: docker_s3_volume_restore_latest_s3_key
- debug: msg="volume_names={{ volume_names }}"
- debug: msg="container_s3_keys={{ container_s3_keys }}"
- set_fact:
- name: Merge volume names and S3 keys.
ansible.builtin.set_fact:
docker_volume_s3_restores_new: "{{ docker_volume_s3_restores_new | default([]) + [ {'volume_name': item.0, 's3_key': item.1} ] }}"
when: docker_s3_volume_restore_latest_s3_key
with_together:
- "{{ volume_names }}"
- "{{ container_s3_keys }}"
- debug: msg="{{ docker_volume_s3_restores_new }}"
- set_fact:
- name: Set volumes to restore.
ansible.builtin.set_fact:
docker_volume_s3_restores_new: "{{ docker_volume_s3_restores }}"
when: not docker_s3_volume_restore_latest_s3_key
- debug: msg="{{ docker_volume_s3_restores_new }}"
- name: Create directories for /tmp file
file:
- name: Create directories for /tmp file.
ansible.builtin.file:
path: '/tmp/{{ item.s3_key | dirname }}'
state: directory
mode: '0755'
with_items: '{{ docker_volume_s3_restores_new }}'
- name: Download archive from S3
- name: Download archive from S3.
amazon.aws.aws_s3:
bucket: "{{ docker_s3_volume_restore_aws_s3_bucket }}"
object: "{{ item.s3_key }}"
@ -86,7 +72,7 @@
register: get_out
with_items: "{{ docker_volume_s3_restores_new }}"
- name: Remove contents of volumes
- name: Remove contents of volumes.
community.docker.docker_container:
name: "restore-container-{{ item.volume_name }}-{{ 10 | random }}"
image: ubuntu
@ -115,7 +101,7 @@
with_items: "{{ docker_volume_s3_restores_new }}"
- name: Remove uploaded files from /tmp
file:
ansible.builtin.file:
path: '/tmp/{{ item.s3_key }}'
state: absent
with_items: '{{ docker_volume_s3_restores_new }}'

Loading…
Cancel
Save