add move roles

This commit is contained in:
Alexander Buntakov 2020-03-07 14:54:40 +03:00
parent 596f285ecc
commit 1bf68612f7
51 changed files with 1318 additions and 0 deletions

View File

@ -0,0 +1,29 @@
---
- name: DOCKER.DEPLOYMENT.PREPARE | Set facts
set_fact:
docker_deployment__deploy_path: "{{ system__deploy_user_home }}/{{ deployment_name }}"
docker_deployment__volume_path: "{{ system__volume_directory }}/{{ deployment_name }}"
docker_deployment__deploy_user_name: "{{ system__deploy_user_name }}"
tags:
- prepare
- name: DOCKER.DEPLOYMENT.PREPARE | Create deployment directory
file:
path: "{{ docker_deployment__deploy_path }}"
state: directory
mode: 0755
owner: "{{ deployment_user | default(system__deploy_user_name) }}"
become: yes
become_user: "{{ docker_deployment__deploy_user_name }}"
tags:
- prepare
- name: DOCKER.DEPLOYMENT.PREPARE | Create volume directory
file:
path: "{{ docker_deployment__volume_path }}"
state: directory
mode: 0755
owner: "{{ volume_user | default(docker_deployment__deploy_user_name) }}"
become: yes
tags:
- prepare

View File

@ -0,0 +1,6 @@
---
traefik__image_name: traefik
traefik__image_tag: v1.7.20
traefik__image: "{{ traefik__image_name }}:{{ traefik__image_tag }}"
traefik__letsencrypt_email: hi@touchin.ru

View File

@ -0,0 +1,52 @@
---
- name: DOCKER.TRAEFIK | Prepare deployment
include_role:
name: docker.deployment.prepare
public: "yes"
vars:
deployment_name: traefik
tags:
- traefik
- name: DOCKER.TRAEFIK | Copy traefik config
template:
src: traefik.toml.j2
dest: "{{ docker_deployment__deploy_path }}/traefik.toml"
mode: 0644
- name: DOCKER.TRAEFIK | Ensure ACME exists
copy:
content: ""
dest: "{{ docker_deployment__deploy_path }}/acme.json"
force: no
owner: root
mode: 0600
tags:
- traefik
- files
- name: DOCKER.TRAEFIK | Create Traefik network
docker_network:
name: "{{ docker__traefik_network }}"
tags:
- traefik
- network
- name: DOCKER.TRAEFIK | Run deployment
docker_container:
name: traefik
image: "{{ traefik__image }}"
command: "--api --docker"
restart_policy: unless-stopped
networks:
- name: "{{ docker__traefik_network }}"
ports:
- 80:80
- 443:443
- 127.0.0.1:5000:8080
volumes:
- "/var/run/docker.sock:/var/run/docker.sock"
- "{{ docker_deployment__deploy_path }}/traefik.toml:/traefik.toml"
- "{{ docker_deployment__deploy_path }}/acme.json:/acme.json"
tags:
- traefik

View File

@ -0,0 +1,167 @@
################################################################
# Global configuration
################################################################
# Enable debug mode
#
# Optional
# Default: false
#
# debug = true
# Log level
#
# Optional
# Default: "ERROR"
#
logLevel = "INFO"
# Entrypoints to be used by frontends that do not specify any entrypoint.
# Each frontend can specify its own entrypoints.
#
# Optional
# Default: ["http"]
#
defaultEntryPoints = ["http"]
################################################################
# Entrypoints configuration
################################################################
# Entrypoints definition
#
# Optional
# Default:
[entryPoints]
[entryPoints.http]
address = ":80"
[entryPoints.https]
address = ":443"
[entryPoints.https.tls]
[retry]
################################################################
# Traefik logs configuration
################################################################
# Traefik logs
# Enabled by default and log to stdout
#
# Optional
#
# [traefikLog]
# Sets the filepath for the traefik log. If not specified, stdout will be used.
# Intermediate directories are created if necessary.
#
# Optional
# Default: os.Stdout
#
# filePath = "log/traefik.log"
# Format is either "json" or "common".
#
# Optional
# Default: "common"
#
# format = "common"
################################################################
# Access logs configuration
################################################################
# Enable access logs
# By default it will write to stdout and produce logs in the textual
# Common Log Format (CLF), extended with additional fields.
#
# Optional
#
# [accessLog]
# Sets the file path for the access log. If not specified, stdout will be used.
# Intermediate directories are created if necessary.
#
# Optional
# Default: os.Stdout
#
# filePath = "/path/to/log/log.txt"
# Format is either "json" or "common".
#
# Optional
# Default: "common"
#
# format = "common"
################################################################
# API and dashboard configuration
################################################################
# Enable API and dashboard
[api]
# Name of the related entry point
#
# Optional
# Default: "traefik"
#
# entryPoint = "traefik"
# Enabled Dashboard
#
# Optional
# Default: true
#
# dashboard = false
################################################################
# Ping configuration
################################################################
# Enable ping
[ping]
# Name of the related entry point
#
# Optional
# Default: "traefik"
#
# entryPoint = "traefik"
################################################################
# Docker configuration backend
################################################################
# Enable Docker configuration backend
[docker]
# Docker server endpoint. Can be a tcp or a unix socket endpoint.
#
# Required
# Default: "unix:///var/run/docker.sock"
#
# endpoint = "tcp://10.10.10.10:2375"
# Default domain used.
# Can be overridden by setting the "traefik.domain" label on a container.
#
# Optional
# Default: ""
#
watch = true
# Expose containers by default in traefik
#
# Optional
# Default: true
#
exposedByDefault = false
[acme]
email = "{{ traefik__letsencrypt_email }}"
storage = "acme.json"
entryPoint = "https"
OnHostRule = true
[acme.httpChallenge]
entryPoint = "http"

View File

@ -0,0 +1,3 @@
alertmanager__image_name: "prom/alertmanager"
alertmanager__image_tag: "v0.20.0"
alertmanager__image: "{{ alertmanager__image_name }}:{{ alertmanager__image_tag }}"

View File

@ -0,0 +1,32 @@
---
- name: MONITORING.ALERTMANAGER | Prepare deployment
import_role:
name: docker.deployment.prepare
vars:
deployment_name: alertmanager
tags:
- alertmanager
- name: MONITORING.ALERTMANAGER | Copy templates
template:
src: templates/{{ item }}.j2
dest: "{{ docker_deployment__deploy_path }}/{{ item }}"
with_items:
- docker-compose.yml
become_user: "{{ docker_deployment__deploy_user_name }}"
become: yes
tags:
- files
- alertmanager
- name: MONITORING.ALERTMANAGER | Create mount placeholders
file:
path: "{{ docker_deployment__deploy_path }}/{{ item }}"
state: touch
mode: 0755
owner: "{{ deployment_user | default(system__deploy_user_name) }}"
loop:
- "alertmanager.yml"
tags:
- files
- alertmanager

View File

@ -0,0 +1,24 @@
version: "3.7"
services:
alertmanager:
container_name: alertmanager
image: "{{ alertmanager__image }}"
user: root
volumes:
- ./alertmanager.yml:/etc/alertmanager.yml:ro
restart: unless-stopped
networks:
- "{{ docker__prometheus_network }}"
ports:
- "{{ alertmanager__port }}:9093"
logging:
driver: json-file
options:
max-file: "1"
max-size: "50m"
command: --config.file=/etc/alertmanager.yml --log.level=debug
networks:
{{ docker__prometheus_network }}:
external: true

View File

@ -0,0 +1,3 @@
blackbox__image_name: "prom/blackbox-exporter"
blackbox__image_tag: "v0.16.0"
blackbox__image: "{{ blackbox__image_name }}:{{ blackbox__image_tag }}"

View File

@ -0,0 +1,31 @@
---
- name: MONITORING.BLACKBOX | Prepare deployment
import_role:
name: docker.deployment.prepare
vars:
deployment_name: blackbox
tags:
- blackbox-exporter
- name: MONITORING.BLACKBOX | Copy templates
template:
src: templates/{{ item }}.j2
dest: "{{ docker_deployment__deploy_path }}/{{ item }}"
with_items:
- blackbox.yml
- docker-compose.yml
become_user: "{{ docker_deployment__deploy_user_name }}"
become: yes
tags:
- files
- blackbox-exporter
- name: MONITORING.BLACKBOX | Run deployment
docker_service:
restarted: "yes"
project_src: "{{ docker_deployment__deploy_path }}"
become: yes
become_user: "{{ docker_deployment__deploy_user_name }}"
tags:
- run
- blackbox-exporter

View File

@ -0,0 +1,8 @@
modules:
icmp:
prober: icmp
timeout: 5s
http:
prober: http
timeout: 5s
http: {}

View File

@ -0,0 +1,18 @@
version: "3.7"
services:
blackbox-exporter:
container_name: blackbox-exporter
image: "{{ blackbox__image }}"
user: root
volumes:
- ./blackbox.yml:/etc/blackbox.yml:ro
restart: unless-stopped
ports:
- "{{ blackbox_exporter__port }}:9115"
logging:
driver: json-file
options:
max-file: "1"
max-size: "50m"
command: --config.file=/etc/blackbox.yml

View File

@ -0,0 +1,5 @@
filebeat__image_name: "docker.elastic.co/beats/filebeat"
filebeat__image_tag: "7.5.2"
filebeat__image: "{{ filebeat__image_name }}:{{ filebeat__image_tag }}"
filebeat__config_user: 0

View File

@ -0,0 +1,39 @@
---
- name: MONITORING.FILEBEAT | Prepare deployment
import_role:
name: docker.deployment.prepare
vars:
deployment_name: filebeat
tags:
- filebeat
- name: MONITORING.FILEBEAT | Copy templates
template:
src: templates/{{ item }}.j2
dest: "{{ docker_deployment__deploy_path }}/{{ item }}"
with_items:
- filebeat.yml
- docker-compose.yml
become_user: "{{ docker_deployment__deploy_user_name }}"
become: yes
tags:
- files
- filebeat
- name: MONITORING.FILEBEAT | Set config ownership
file:
path: "{{ docker_deployment__deploy_path }}/filebeat.yml"
owner: "{{ filebeat__config_user }}"
tags:
- files
- filebeat
- name: MONITORING.FILEBEAT | Run deployment
docker_service:
restarted: "yes"
project_src: "{{ docker_deployment__deploy_path }}"
become: yes
become_user: "{{ docker_deployment__deploy_user_name }}"
tags:
- run
- filebeat

View File

@ -0,0 +1,17 @@
version: "3.7"
services:
filebeat:
container_name: filebeat
image: "{{ filebeat__image }}"
user: root
volumes:
- /var/lib/docker:/var/lib/docker:ro
- /var/run/docker.sock:/var/run/docker.sock:ro
- ./filebeat.yml:/usr/share/filebeat/filebeat.yml:ro
restart: unless-stopped
logging:
driver: json-file
options:
max-file: "1"
max-size: "50m"

View File

@ -0,0 +1,23 @@
monitoring.enabled: false
output.file.enabled: false
filebeat.inputs:
- type: container
paths:
- "/var/lib/docker/containers/*/*.log"
processors:
- add_docker_metadata:
host: "unix:///var/run/docker.sock"
labels.dedot: true
- drop_event:
when:
equals:
container.name: "filebeat"
- drop_event:
when:
not:
equals:
container.labels.log_consumed-by: "filebeat"
output.logstash:
hosts: {{ filebeat.logstash.hosts | to_json }}

View File

@ -0,0 +1,16 @@
# grafana__server_root: "http://10.0.8.1:7001"
grafana__user_id: 104
grafana__image_name: "grafana/grafana"
grafana__image_tag: "6.6.0-ubuntu"
grafana__image: "{{ grafana__image_name }}:{{ grafana__image_tag }}"
grafana__enable_ldap_auth: "true"
grafana__default_labels:
"traefik.enable": "true"
"traefik.port": "3000"
"traefik.backend": "grafana"
"traefik.docker.network": "{{ docker__traefik_network }}"
"traefik.frontend.rule": "Host:{{ grafana.domain }}"
"traefik.frontend.entryPoints": "http"

View File

@ -0,0 +1,33 @@
---
- name: MONITORING.GRAFANA | Prepare deployment
include_role:
name: docker.deployment.prepare
public: "yes"
vars:
deployment_name: grafana
volume_user: "{{ grafana__user_id }}"
tags:
- grafana
- name: MONITORING.GRAFANA | Copy templates
template:
src: templates/{{ item }}.j2
dest: "{{ docker_deployment__deploy_path }}/{{ item }}"
with_items:
- ldap.toml
- docker-compose.yml
become_user: "{{ docker_deployment__deploy_user_name }}"
become: yes
tags:
- files
- grafana
- name: MONITORING.GRAFANA | Run deployment
docker_service:
restarted: "yes"
project_src: "{{ docker_deployment__deploy_path }}"
become: yes
become_user: "{{ docker_deployment__deploy_user_name }}"
tags:
- run
- grafana

View File

@ -0,0 +1,30 @@
version: "3.7"
services:
grafana:
container_name: grafana
image: "{{ grafana__image }}"
user: "{{ grafana__user_id }}"
environment:
GF_SERVER_DOMAIN: "{{ grafana.domain }}"
GF_SERVER_ROOT_URL: "http://{{ grafana.domain }}"
GF_AUTH_LDAP_ENABLED: "{{ grafana__enable_ldap_auth }}"
volumes:
- "{{ docker_deployment__volume_path }}:/var/lib/grafana"
- "./ldap.toml:/etc/grafana/ldap.toml"
labels: {{ grafana__default_labels | combine(grafana.docker.labels | default({})) | to_json }}
networks: {{ grafana.docker.networks | default([]) | to_json }}
restart: unless-stopped
logging:
driver: json-file
options:
max-file: "1"
max-size: "50m"
{% if grafana.docker.networks is defined %}
networks:
{% for network in grafana.docker.networks %}
{{ network | indent(width=2) }}:
external: true
{% endfor %}
{% endif %}

View File

@ -0,0 +1,28 @@
[[servers]]
host = "{{ ldap__host }}"
port = {{ ldap__port }}
bind_dn = "{{ ldap__binddn }}"
bind_password = "{{ ldap__bindpw }}"
search_filter = "(&(uid=%s)(memberOf={{ ldap__groups.services }}))"
search_base_dns = ["{{ ldap__users_dn }}"]
[[servers.group_mappings]]
group_dn="{{ grafana.access_groups.admin }}"
org_role = "Admin"
[[servers.group_mappings]]
group_dn="{{ grafana.access_groups.editor }}"
org_role = "Editor"
[[servers.group_mappings]]
group_dn="{{ grafana.access_groups.viewer }}"
org_role = "Viewer"
[servers.attributes]
name = "givenName"
surname = "sn"
username = "uid"
member_of = "memberOf"
email = "mail"

View File

@ -0,0 +1,3 @@
node_exporter__version: 0.18.1
node_exporter__dir: "{{ system__vendor_deploy_path }}/node-exporter"
node_exporter__url: "https://github.com/prometheus/node_exporter/releases/download/v{{ node_exporter__version }}/node_exporter-{{ node_exporter__version }}.linux-amd64.tar.gz"

View File

@ -0,0 +1,11 @@
[Unit]
Description=Node Exporter
After = network-online.target
[Service]
User=node-exporter
EnvironmentFile=/etc/sysconfig/node_exporter
ExecStart=/usr/sbin/node_exporter $OPTIONS
[Install]
WantedBy=multi-user.target

View File

@ -0,0 +1,74 @@
---
- name: MONITORING.NODE-EXPORTER | Create user
user:
name: "node-exporter"
shell: /sbin/nologin
tags:
- system
- node-exporter
- name: MONITORING.NODE-EXPORTER | Ensure node-exporter folder exists
file:
path: "{{ node_exporter__dir }}"
state: directory
tags:
- prepare
- node-exporter
- name: MONITORING.NODE-EXPORTER | Get Node Exporter
unarchive:
src: "{{ node_exporter__url }}"
dest: "{{ node_exporter__dir }}"
remote_src: yes
extra_opts: ['--strip-components=1', '--show-stored-names']
tags:
- files
- node-exporter
- name: MONITORING.NODE-EXPORTER | Install Node Exporter
copy:
src: "{{ node_exporter__dir }}/node_exporter"
dest: "/usr/sbin"
remote_src: yes
mode: 755
tags:
- files
- node-exporter
- name: MONITORING.NODE-EXPORTER | Ensure sysconfig folder exists
file:
path: "/etc/sysconfig"
state: directory
tags:
- prepare
- node-exporter
- name: MONITORING.NODE-EXPORTER | Copy systemctl service
copy:
src: "files/node_exporter.service"
dest: "/etc/systemd/system/node_exporter.service"
mode: 755
tags:
- files
- service
- node-exporter
- name: MONITORING.NODE-EXPORTER | Copy systemctl config
template:
src: "templates/node_exporter"
dest: "/etc/sysconfig/node_exporter"
mode: 755
tags:
- files
- service
- node-exporter
- name: MONITORING.NODE-EXPORTER | Enable and Run Node Exporter Service
systemd:
name: node_exporter
daemon_reload: yes
enabled: true
state: restarted
tags:
- service
- node-exporter

View File

@ -0,0 +1 @@
OPTIONS="--web.listen-address={{ system__internal_listen_address }}:9100"

View File

@ -0,0 +1,3 @@
prometheus__image_name: "prom/prometheus"
prometheus__image_tag: "v2.15.2"
prometheus__image: "{{ prometheus__image_name }}:{{ prometheus__image_tag }}"

View File

@ -0,0 +1,57 @@
---
- name: MONITORING.PROMETHEUS | Prepare deployment
include_role:
name: docker.deployment.prepare
public: "yes"
vars:
deployment_name: prometheus
tags:
- prometheus
- name: MONITORING.PROMETHEUS | Create mount directories
file:
path: "{{ docker_deployment__deploy_path }}/{{ item }}"
state: directory
mode: 0755
owner: "{{ deployment_user | default(system__deploy_user_name) }}"
loop:
- "alert-rules"
become_user: "{{ docker_deployment__deploy_user_name }}"
become: yes
tags:
- files
- prometheus
- name: MONITORING.PROMETHEUS | Create mount placeholders
file:
path: "{{ docker_deployment__deploy_path }}/{{ item }}"
state: touch
mode: 0755
owner: "{{ deployment_user | default(system__deploy_user_name) }}"
loop:
- "prometheus.yml"
become_user: "{{ docker_deployment__deploy_user_name }}"
become: yes
tags:
- files
- prometheus
- name: MONITORING.PROMETHEUS | Copy templates
template:
src: templates/{{ item }}.j2
dest: "{{ docker_deployment__deploy_path }}/{{ item }}"
with_items:
- docker-compose.yml
become_user: "{{ docker_deployment__deploy_user_name }}"
become: yes
tags:
- files
- prometheus
- name: MONITORING.PROMETHEUS | Create Prometheus Docker network
docker_network:
name: "{{ docker__prometheus_network }}"
tags:
- docker
- network
- prometheus

View File

@ -0,0 +1,25 @@
version: "3.7"
services:
prometheus:
container_name: prometheus
image: "{{ prometheus__image }}"
networks:
- "{{ docker__prometheus_network }}"
volumes:
- "{{ docker_deployment__deploy_path }}/alert-rules:/etc/prometheus/alert-rules:ro"
- "{{ docker_deployment__deploy_path }}/prometheus.yml:/etc/prometheus/prometheus.yml:ro"
restart: unless-stopped
ports:
- "{{ prometheus__port }}:9090"
logging:
driver: "json-file"
options:
max-file: "1"
max-size: "50m"
command:
- --config.file=/etc/prometheus/prometheus.yml
networks:
{{ docker__prometheus_network }}:
external: true

View File

@ -0,0 +1,9 @@
nexus__image_name: "sonatype/nexus3"
nexus__image_tag: "3.21.1"
nexus__image: "{{ nexus__image_name }}:{{ nexus__image_tag }}"
nexus__container_name: "nexus"
nexus__ui_port: "8081"
nexus__registry_port: "8082"
nexus__volume_user: "200"

View File

@ -0,0 +1,32 @@
---
- name: DOCKER.NEXUS | Prepare deployment
include_role:
name: docker.deployment.prepare
public: "yes"
vars:
deployment_name: nexus
volume_user: "{{ nexus__volume_user }}"
tags:
- nexus
- name: DOCKER.NEXUS | Copy templates
template:
src: templates/{{ item }}.j2
dest: "{{ docker_deployment__deploy_path }}/{{ item }}"
with_items:
- docker-compose.yml
become_user: "{{ docker_deployment__deploy_user_name }}"
become: yes
tags:
- files
- nexus
- name: DOCKER.NEXUS | Run deployment
docker_service:
restarted: "yes"
project_src: "{{ docker_deployment__deploy_path }}"
become: yes
become_user: "{{ docker_deployment__deploy_user_name }}"
tags:
- run
- nexus

View File

@ -0,0 +1,29 @@
version: "3"
services:
nexus3:
container_name: "{{ nexus__container_name }}"
image: "{{ nexus__image }}"
volumes:
- "{{ docker_deployment__volume_path }}:/nexus-data"
restart: unless-stopped
networks:
- "{{ docker__traefik_network }}"
labels:
"traefik.enable": "true"
"traefik.docker.network": "{{ docker__traefik_network }}"
# ui labels
"traefik.ui.backend": "{{ nexus__container_name }}-ui"
"traefik.ui.port": "{{ nexus__ui_port }}"
"traefik.ui.frontend.rule": "Host:{{ domains.nexus.ui }}"
"traefik.ui.frontend.entryPoints": "http"
# registry labels
"traefik.registry.backend": "{{ nexus__container_name }}-registry"
"traefik.registry.port": "{{ nexus__registry_port }}"
"traefik.registry.frontend.rule": "Host:{{ domains.nexus.registry }}"
"traefik.registry.frontend.entryPoints": "http,https"
"traefik.registry.frontend.redirect.permanent": "true"
"traefik.registry.frontend.headers.SSLRedirect": "true"
networks:
{{ docker__traefik_network }}:
external: true

View File

@ -0,0 +1,46 @@
---
- name: SYSTEM.DEPENDENCIES | Install common dependencies
apt:
name:
- python3-pip
- nmap
- htop
- curl
- vim
- expect
- unzip
- bash-completion
become: yes
tags:
- dependencies
- name: SYSTEM.DEPENDENCIES | Install Java dependencies
apt:
name:
- openjdk-8-jdk
when: java is defined and java|bool
tags:
- java
- dependencies
- name: SYSTEM.DEPENDENCIES | Install MySQL dependencies
apt:
name:
- mysql-client
- python3-mysqldb
become: yes
when: mysql is defined and mysql|bool
tags:
- mysql
- dependencies
- name: SYSTEM.DEPENDENCIES | Install Ansible Docker dependencies
pip:
name:
- docker
- docker-compose
executable: pip3
when: docker is defined and docker|bool
tags:
- docker
- dependencies

View File

@ -0,0 +1,30 @@
---
- name: SYSTEM.DEPLOY-USER | Create user
user:
name: "{{ system__deploy_user_name }}"
shell: /bin/bash
home: "{{ system__deploy_user_home }}"
createhome: yes
tags:
- system
- deploy-user
- name: SYSTEM.DEPLOY-USER | Ensure Docker dir exists
file:
path: "{{ system__deploy_user_home }}/.docker"
state: directory
owner: "{{ system__deploy_user_name }}"
mode: 0755
when: docker is defined
tags:
- docker
- prepare
- name: SYSTEM.DEPLOY-USER | Copy Docker credentials
template:
src: "templates/docker-config.json.j2"
dest: "{{ system__deploy_user_home }}/.docker/config.json"
when: docker is defined
tags:
- files
- docker

View File

@ -0,0 +1,10 @@
{
"auths": {
"dhub.touchin.ru": {
"auth": "{{ docker__touchin_registry_auth_token }}"
}
},
"HttpHeaders": {
"User-Agent": "Docker-Client/18.05.0-ce (linux)"
}
}

3
system.docker/.gitignore vendored Normal file
View File

@ -0,0 +1,3 @@
*.retry
*/__pycache__
*.pyc

20
system.docker/LICENSE Normal file
View File

@ -0,0 +1,20 @@
The MIT License (MIT)
Copyright (c) 2017 Jeff Geerling
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

89
system.docker/README.md Normal file
View File

@ -0,0 +1,89 @@
# Ansible Role: Docker
[![Build Status](https://travis-ci.org/geerlingguy/ansible-role-docker.svg?branch=master)](https://travis-ci.org/geerlingguy/ansible-role-docker)
An Ansible Role that installs [Docker](https://www.docker.com) on Linux.
## Requirements
None.
## Role Variables
Available variables are listed below, along with default values (see `defaults/main.yml`):
# Edition can be one of: 'ce' (Community Edition) or 'ee' (Enterprise Edition).
docker_edition: 'ce'
docker_package: "docker-{{ docker_edition }}"
docker_package_state: present
The `docker_edition` should be either `ce` (Community Edition) or `ee` (Enterprise Edition). You can also specify a specific version of Docker to install using the distribution-specific format: Red Hat/CentOS: `docker-{{ docker_edition }}-<VERSION>`; Debian/Ubuntu: `docker-{{ docker_edition }}=<VERSION>`.
You can control whether the package is installed, uninstalled, or at the latest version by setting `docker_package_state` to `present`, `absent`, or `latest`, respectively. Note that the Docker daemon will be automatically restarted if the Docker package is updated. This is a side effect of flushing all handlers (running any of the handlers that have been notified by this and any other role up to this point in the play).
docker_service_state: started
docker_service_enabled: true
docker_restart_handler_state: restarted
Variables to control the state of the `docker` service, and whether it should start on boot. If you're installing Docker inside a Docker container without systemd or sysvinit, you should set these to `stopped` and set the enabled variable to `no`.
docker_install_compose: true
docker_compose_version: "1.22.0"
docker_compose_path: /usr/local/bin/docker-compose
Docker Compose installation options.
docker_apt_release_channel: stable
docker_apt_arch: amd64
docker_apt_repository: "deb [arch={{ docker_apt_arch }}] https://download.docker.com/linux/{{ ansible_distribution|lower }} {{ ansible_distribution_release }} {{ docker_apt_release_channel }}"
docker_apt_ignore_key_error: True
(Used only for Debian/Ubuntu.) You can switch the channel to `edge` if you want to use the Edge release.
docker_yum_repo_url: https://download.docker.com/linux/centos/docker-{{ docker_edition }}.repo
docker_yum_repo_enable_edge: 0
docker_yum_repo_enable_test: 0
(Used only for RedHat/CentOS.) You can enable the Edge or Test repo by setting the respective vars to `1`.
docker_users:
- user1
- user2
A list of system users to be added to the `docker` group (so they can use Docker on the server).
## Use with Ansible (and `docker` Python library)
Many users of this role wish to also use Ansible to then _build_ Docker images and manage Docker containers on the server where Docker is installed. In this case, you can easily add in the `docker` Python library using the `geerlingguy.pip` role:
```yaml
- hosts: all
vars:
pip_install_packages:
- name: docker
roles:
- geerlingguy.pip
- geerlingguy.docker
```
## Dependencies
None.
## Example Playbook
```yaml
- hosts: all
roles:
- geerlingguy.docker
```
## License
MIT / BSD
## Author Information
This role was created in 2017 by [Jeff Geerling](https://www.jeffgeerling.com/), author of [Ansible for DevOps](https://www.ansiblefordevops.com/).

View File

@ -0,0 +1,29 @@
---
# Edition can be one of: 'ce' (Community Edition) or 'ee' (Enterprise Edition).
docker_edition: 'ce'
docker_package: "docker-{{ docker_edition }}"
docker_package_state: present
# Service options.
docker_service_state: started
docker_service_enabled: true
docker_restart_handler_state: restarted
# Docker Compose options.
docker_install_compose: true
docker_compose_version: "1.22.0"
docker_compose_path: /usr/local/bin/docker-compose
# Used only for Debian/Ubuntu. Switch 'stable' to 'edge' if needed.
docker_apt_release_channel: stable
docker_apt_arch: amd64
docker_apt_repository: "deb [arch={{ docker_apt_arch }}] https://download.docker.com/linux/{{ ansible_distribution|lower }} {{ ansible_distribution_release }} {{ docker_apt_release_channel }}"
docker_apt_ignore_key_error: true
# Used only for RedHat/CentOS/Fedora.
docker_yum_repo_url: https://download.docker.com/linux/{{ (ansible_distribution == "Fedora") | ternary("fedora","centos") }}/docker-{{ docker_edition }}.repo
docker_yum_repo_enable_edge: 0
docker_yum_repo_enable_test: 0
# A list of users who will be added to the docker group.
docker_users: []

View File

@ -0,0 +1,3 @@
---
- name: restart docker
service: "name=docker state={{ docker_restart_handler_state }}"

View File

@ -0,0 +1,16 @@
---
- name: Ensure containerd service dir exists.
file:
path: /etc/systemd/system/containerd.service.d
state: directory
- name: Add shim to ensure Docker can start in all environments.
template:
src: override.conf.j2
dest: /etc/systemd/system/containerd.service.d/override.conf
register: override_template
- name: Reload systemd daemon if template is changed.
systemd:
daemon_reload: true
when: override_template is changed

View File

@ -0,0 +1,20 @@
---
- name: Check current docker-compose version.
command: docker-compose --version
register: docker_compose_current_version
changed_when: false
failed_when: false
- name: Delete existing docker-compose version if it's different.
file:
path: "{{ docker_compose_path }}"
state: absent
when: >
docker_compose_current_version.stdout is defined
and docker_compose_version not in docker_compose_current_version.stdout
- name: Install Docker Compose (if configured).
get_url:
url: https://github.com/docker/compose/releases/download/{{ docker_compose_version }}/docker-compose-Linux-x86_64
dest: "{{ docker_compose_path }}"
mode: 0755

View File

@ -0,0 +1,7 @@
---
- name: Ensure docker users are added to the docker group.
user:
name: "{{ item }}"
groups: docker
append: true
with_items: "{{ docker_users }}"

View File

@ -0,0 +1,31 @@
---
- include_tasks: setup-RedHat.yml
when: ansible_os_family == 'RedHat'
- include_tasks: setup-Debian.yml
when: ansible_os_family == 'Debian'
- name: Install Docker.
package:
name: "{{ docker_package }}"
state: "{{ docker_package_state }}"
notify: restart docker
# TODO: Remove this shim once 18.09.1 or later is released.
- import_tasks: docker-1809-shim.yml
when: ansible_service_mgr == 'systemd'
- name: Ensure Docker is started and enabled at boot.
service:
name: docker
state: "{{ docker_service_state }}"
enabled: "{{ docker_service_enabled }}"
- name: Ensure handlers are notified now to avoid firewall conflicts.
meta: flush_handlers
- include_tasks: docker-compose.yml
when: docker_install_compose
- include_tasks: docker-users.yml
when: docker_users

View File

@ -0,0 +1,38 @@
---
- name: Ensure old versions of Docker are not installed.
package:
name:
- docker
- docker-engine
state: absent
- name: Ensure dependencies are installed.
apt:
name:
- apt-transport-https
- ca-certificates
state: present
- name: Add Docker apt key.
apt_key:
url: https://download.docker.com/linux/ubuntu/gpg
id: 9DC858229FC7DD38854AE2D88D81803C0EBFCD88
state: present
register: add_repository_key
ignore_errors: "{{ docker_apt_ignore_key_error }}"
- name: Ensure curl is present (on older systems without SNI).
package: name=curl state=present
when: add_repository_key is failed
- name: Add Docker apt key (alternative for older systems without SNI).
shell: "curl -sSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -"
args:
warn: false
when: add_repository_key is failed
- name: Add Docker repository.
apt_repository:
repo: "{{ docker_apt_repository }}"
state: present
update_cache: true

View File

@ -0,0 +1,35 @@
---
- name: Ensure old versions of Docker are not installed.
package:
name:
- docker
- docker-common
- docker-engine
state: absent
- name: Add Docker GPG key.
rpm_key:
key: https://download.docker.com/linux/centos/gpg
state: present
- name: Add Docker repository.
get_url:
url: "{{ docker_yum_repo_url }}"
dest: '/etc/yum.repos.d/docker-{{ docker_edition }}.repo'
owner: root
group: root
mode: 0644
- name: Configure Docker Edge repo.
ini_file:
dest: '/etc/yum.repos.d/docker-{{ docker_edition }}.repo'
section: 'docker-{{ docker_edition }}-edge'
option: enabled
value: '{{ docker_yum_repo_enable_edge }}'
- name: Configure Docker Test repo.
ini_file:
dest: '/etc/yum.repos.d/docker-{{ docker_edition }}.repo'
section: 'docker-{{ docker_edition }}-test'
option: enabled
value: '{{ docker_yum_repo_enable_test }}'

View File

@ -0,0 +1,3 @@
# {{ ansible_managed }}
[Service]
ExecStartPre=

View File

View File

@ -0,0 +1,79 @@
---
- name: SYSTEM.VPN-CLIENT | Install dependencies
apt:
name:
- xl2tpd
- strongswan
- strongswan-starter
- strongswan-charon
tags:
- dependencies
- sys
- vpn
- name: SYSTEM.VPN-CLIENT | Install xl2tpd configuration
template:
src: "{{ item }}.j2"
dest: "{{ vpn__xl2tpd_config_dir }}/{{ item }}"
with_items:
- xl2tpd.conf
tags:
- files
- vpn
- name: SYSTEM.VPN-CLIENT | Install pppd configuration
template:
src: "{{ item }}.j2"
dest: "{{ vpn__pppd_config_dir }}/{{ item }}"
with_items:
- options.l2tpd.client
tags:
- files
- vpn
- name: SYSTEM.VPN-CLIENT | Install pppd up script(s)
template:
src: "{{ item }}.j2"
dest: "{{ vpn__pppd_up_script_dir }}/{{ item }}"
mode: 0751
with_items:
- route10
tags:
- files
- vpn
- name: SYSTEM.VPN-CLIENT | Install strongswan configuration
template:
src: "{{ item }}.j2"
dest: "{{ vpn__strongswan_config_dir }}/{{ item }}"
owner: root
group: root
mode: 0600
with_items:
- ipsec.conf
- ipsec.secrets
tags:
- files
- vpn
- name: SYSTEM.VPN-CLIENT | Enable and restart strongswan unit(s)
systemd:
unit: "{{ item }}"
enabled: true
state: restarted
with_items:
- strongswan.service
tags:
- vpn
- service
- name: SYSTEM.VPN-CLIENT | Enable and restart common systemd unit(s)
systemd:
unit: "{{ item }}"
enabled: true
state: restarted
with_items:
- xl2tpd.service
tags:
- vpn
- service

View File

@ -0,0 +1,32 @@
# ipsec.conf - strongSwan IPsec configuration file
# basic configuration
config setup
# strictcrlpolicy=yes
# uniqueids = no
# Add connections here.
# Sample VPN connections
conn %default
ikelifetime=60m
keylife=20m
rekeymargin=3m
keyingtries=1
keyexchange=ikev1
authby=secret
ike=aes256-sha1-modp1024
esp=aes256-sha1
conn {{ vpn__connection_id }}
keyexchange=ikev1
left=%defaultroute
auto=route
authby=secret
type=transport
leftprotoport=17/1701
rightprotoport=17/1701
right={{ vpn__public_host }}
keyingtries=%forever

View File

@ -0,0 +1 @@
: PSK "{{ vpn__psk }}"

View File

@ -0,0 +1,5 @@
debug
noauth
usepeerdns
name {{ vpn__username}}
password {{ vpn__password }}

View File

@ -0,0 +1,3 @@
#!/usr/bin/env sh
ip route add {{ vpn__subnet }} via {{ vpn__ppp_ip }}

View File

@ -0,0 +1,10 @@
[lac {{ vpn__connection_id }}]
lns = {{ vpn__public_host }}
ppp debug = yes
require chap = yes
pppoptfile = {{ vpn__pppd_config_dir }}/options.l2tpd.client
length bit = yes
redial=yes
redial timeout=2
max redials=100000000
autodial=yes