move from svn to this git repo

This commit is contained in:
Jens Heinitz 2021-06-10 14:41:21 +02:00
parent 3232ce9c1c
commit 365ba0d343
45 changed files with 2180 additions and 0 deletions

7
hello.yml Normal file
View File

@ -0,0 +1,7 @@
- name: Moin Hello World
hosts: all
tasks:
- name: Moin Hello Message
debug:
msg: "Moin! Hello World!"

View File

@ -0,0 +1,9 @@
---
all:
hosts:
raspberrypi1.seboto.net:
raspberrypi2.seboto.net:
raspberrypi3.seboto.net:
raspberrypi4.seboto.net:
raspberrypi5.seboto.net:
...

View File

@ -0,0 +1,9 @@
---
all:
hosts:
raspberrypi1.seboto.net:
raspberrypi2.seboto.net:
raspberrypi3.seboto.net:
raspberrypi4.seboto.net:
raspberrypi5.seboto.net:
...

View File

@ -0,0 +1,31 @@
---
all:
hosts:
server1.seboto.net:
ansible_user: jens
children:
seboto:
hosts:
srv1.seboto.de:
srv2.seboto.de:
srv3.seboto.de:
pi:
hosts:
raspberrypi1.seboto.net:
ansible_user: pi
raspberrypi2.seboto.net:
ansible_user: pi
raspberrypi3.seboto.net:
ansible_user: pi
raspberrypi4.seboto.net:
ansible_user: pi
raspberrypi5.seboto.net:
ansible_user: pi
vip:
hosts:
srv2.seboto.de:
raspberrypi3.seboto.net:
ansible_user: pi
vars:
ansible_python_interpreter: /usr/bin/python3
...

View File

@ -0,0 +1,7 @@
---
all:
hosts:
srv1.seboto.de:
srv2.seboto.de:
srv3.seboto.de:
...

50
pkg_upgrade.yml Normal file
View File

@ -0,0 +1,50 @@
---
- name: Linux Package Upgrade
hosts: all
remote_user: root
tasks:
- name: "[Debian] Refresh Package Cache if not older that 1h"
apt:
update_cache: yes
cache_valid_time: 3600
become: true
when: ansible_os_family == "Debian"
- name: "[AlmaLinux] Refresh Package Cache if not older that 1h"
dnf:
update_cache: yes
become: true
when: ansible_os_family == "AlmaLinux"
- name: "[Debian] Remove Packages that can no longer be downloaded"
apt:
autoclean: yes
become: true
when: ansible_os_family == "Debian"
- name: "[Debian] Remove packages that are no longer required"
apt:
autoremove: yes
become: true
when: ansible_os_family == "Debian"
- name: "[AlmaLinux] Remove packages that are no longer required"
dnf:
autoremove: yes
become: true
when: ansible_os_family == "AlmaLinux"
- name: "[Debian] Upgrade all packages that need an update"
apt:
upgrade: full
become: true
when: ansible_os_family == "Debian"
- name: "[AlmaLinux] Upgrade all packages that need an update"
dnf:
name: "*"
state: latest
become: true
when: ansible_os_family == "AlmaLinux"
...

7
playbooks/hello.yml Normal file
View File

@ -0,0 +1,7 @@
- name: Moin Hello World
hosts: all
tasks:
- name: Moin Hello Message
debug:
msg: "Moin! Hello World!"

27
playbooks/pkg_upgrade.yml Normal file
View File

@ -0,0 +1,27 @@
---
- name: Linux Package Upgrade
hosts: all
remote_user: root
tasks:
- name: Refresh Package Cache if not older that 1h
apt:
update_cache: yes
cache_valid_time: 3600
become: true
- name: Remove Packages that can no longer be downloaded
apt:
autoclean: yes
become: true
- name: Remove packages that are no longer required
apt:
autoremove: yes
become: true
- name: Upgrade all packages that need an update
apt:
upgrade: full
become: true
...

27
playbooks/reboot.yml Normal file
View File

@ -0,0 +1,27 @@
---
- name: Reboot host
hosts: all
remote_user: root
tasks:
- name: reboot host
shell: 'sleep 5 && shutdown -r now "Reboot by Ansible."'
register: reboot
async: 1
poll: 0
ignore_errors: true
- name: wait for server to come down
local_action: wait_for host={{ inventory_hostname }} port=22 state=stopped
- name: wait for server to come up
local_action: wait_for host={{ inventory_hostname }} port=22 state=started
- name: check uptime
shell: 'uptime'
register: uptime
- name: show uptime
debug:
msg: "Uptime: {{ uptime.stdout }}"
...

View File

@ -0,0 +1,39 @@
---
- name: Reboot host
hosts: all
remote_user: root
tasks:
- name: check if reboot is required
stat:
path: /lib/modules/{{ ansible_kernel }}
register: modules
- name: reboot host
shell: 'sleep 5 && shutdown -r now "Reboot by Ansible."'
register: reboot
async: 1
poll: 0
ignore_errors: true
when: modules.stat.isdir is not defined
- name: wait for server to come down
local_action: wait_for host={{ inventory_hostname }} port=22 state=stopped
when:
- reboot.stdout is defined
- "'Reboot by Ansible.' in reboot.stdout"
- name: wait for server to come up
local_action: wait_for host={{ inventory_hostname }} port=22 state=started
when:
- reboot.stdout is defined
- "'Reboot by Ansible.' in reboot.stdout"
- name: check uptime
shell: 'uptime'
register: uptime
- name: show uptime
debug:
msg: "Uptime: {{ uptime.stdout }}"
...

1
playbooks/roles Symbolic link
View File

@ -0,0 +1 @@
../roles

7
playbooks/setup_awx.yml Normal file
View File

@ -0,0 +1,7 @@
---
- name: setup AWX
hosts: server1.seboto.net
remote_user: root
roles:
- awx
...

1
playbooks/test.txt Normal file
View File

@ -0,0 +1 @@
This is a test file.

15
playbooks/test.yml Normal file
View File

@ -0,0 +1,15 @@
---
- name: Moin Hello World
hosts: all
tasks:
- name: Moin Hello Message
debug:
msg: "Moin! Hello World!"
- name: Copy File
copy:
src: test.txt
dest: /tmp/test_{{ ansible_hostname }}.txt
mode: "0644"
owner: jens
group: users
...

27
reboot.yml Normal file
View File

@ -0,0 +1,27 @@
---
- name: Reboot host
hosts: all
remote_user: root
tasks:
- name: reboot host
shell: 'sleep 5 && shutdown -r now "Reboot by Ansible."'
register: reboot
async: 1
poll: 0
ignore_errors: true
- name: wait for server to come down
local_action: wait_for host={{ inventory_hostname }} port=22 state=stopped
- name: wait for server to come up
local_action: wait_for host={{ inventory_hostname }} port=22 state=started
- name: check uptime
shell: 'uptime'
register: uptime
- name: show uptime
debug:
msg: "Uptime: {{ uptime.stdout }}"
...

39
reboot_required.yml Normal file
View File

@ -0,0 +1,39 @@
---
- name: Reboot host
hosts: all
remote_user: root
tasks:
- name: check if reboot is required
stat:
path: /lib/modules/{{ ansible_kernel }}
register: modules
- name: reboot host
shell: 'sleep 5 && shutdown -r now "Reboot by Ansible."'
register: reboot
async: 1
poll: 0
ignore_errors: true
when: modules.stat.isdir is not defined
- name: wait for server to come down
local_action: wait_for host={{ inventory_hostname }} port=22 state=stopped
when:
- reboot.stdout is defined
- "'Reboot by Ansible.' in reboot.stdout"
- name: wait for server to come up
local_action: wait_for host={{ inventory_hostname }} port=22 state=started
when:
- reboot.stdout is defined
- "'Reboot by Ansible.' in reboot.stdout"
- name: check uptime
shell: 'uptime'
register: uptime
- name: show uptime
debug:
msg: "Uptime: {{ uptime.stdout }}"
...

View File

@ -0,0 +1,74 @@
---
#
# AWX version and base directory to deploy the Git Repo
#
awx_version: 19.2.0
awx_basedir: /opt/awx
awx_github_repo: https://github.com/ansible/awx.git
#
# directory and names of files for the docker-compose project
#
awx_composedir: /opt/awxcompose{{ awx_version }}
awx_compose_name: 'docker-compose.yml'
awx_compose_override_name: 'docker-compose.override.yml'
awx_prefix: "awx{{ awx_version.split('.') | join }}"
#
# Propeties of UNIX User awx
#
awx_user: awx
awx_group: docker
awx_password: '%a1W2x3$'
#
# Flag to force a deployment if already deployed
#
awx_force_deployment: False
#
# Flag to keep local data when awx_force_deployment is set to True
#
awx_keep_existing: False
#
# Flag to disable active Schedules in AWX database
#
disable_schedule: True
#
# Build User Interface
# Remark: Must be run atleast once
#
build_ui: True
#
# Build and Push Docker Image
# Remark: Build of the Docker image is required before running a new AWX version
#
build_docker_image: True
push_docker_image: True
#
# Name and version of the Docker image
#
awx_image: "{{ docker_registry }}/awx/awx_devel"
awx_image_tag: "{{ awx_version }}"
#
# PostgreSQL settings
#
pg_port: 5432
pg_username: 'awxuser'
pg_database: 'awx'
pg_password: 'awxpassw0rd'
#
# Number of AWX containers. If set to 1, no haproxy is deployed
#
cluster_node_count: 1
traefik_http_port: 8013
http_port: 8013
https_port: 8043
sdb_port_base: 15899
#database_dump_file: pg_dumpall_awx_17.1.0_2021-05-30_23.00.01.sql.gz
database_dump_file: pg_dump_awx_2021-06-09_11.23.45.sql.gz
docker_registry: pi4.seboto.net:50000
#
# AWX Host for Traefik
#
awx_traefik_host: "awx{{ awx_version.split('.') | join }}.seboto.ma-wan.de"
awx_traefik_sans:
- "ansible{{ awx_version.split('.') | join }}.seboto.ma-wan.de"
...

74
roles/awx/files/HouseKeeping.sh Executable file
View File

@ -0,0 +1,74 @@
#!/bin/sh
#
# This script will clean up a directory from old files.
#
# Options:
# -d <directory>
# -t <time in days to keep files>
# -p <pattern to search>
# -v ==> VERBOSE
#
usage()
{
echo "Usage: `basename $0`: -d <dir> -t <time> -p <pattern> -v"
echo "Options:"
echo "-d <directory to search>"
echo "-t <files older than x days>"
echo "-p <search pattern>"
echo "-v ==> Verbose flag"
echo ""
echo "A script to cleanup a directory"
exit 0
}
WORKFILE=/tmp/`basename $0`.$$
VERBOSE=0
#
while getopts d:t:p:v OPT
do
case ${OPT} in
d)
DIR=${OPTARG}
;;
p)
PATTERN=${OPTARG}
;;
t)
TIME_KEEP=${OPTARG}
;;
v)
VERBOSE=1
;;
\?)
usage
;;
esac
done
if [ -z "${DIR}" ] ; then
echo "Must specify a directory, aborting..."
usage
fi
if [ ! -d "${DIR}" ] ; then
echo "Directory ${DIR} does not exist, aborting ..."
exit 1
fi
if [ -z "${PATTERN}" ] ; then
echo "Must specify a pattern to search, aborting..."
usage
fi
if [ ! -z "${TIME_KEEP}" ] ; then
MTIME="-mtime +${TIME_KEEP}"
fi
if [ ${VERBOSE} -eq 0 ] ; then
PRINT=""
else
PRINT="-print"
fi
find ${DIR} -name "${PATTERN}" -type f ${MTIME} ${PRINT} -exec rm -f {} \;

View File

@ -0,0 +1,51 @@
# Copyright (c) 2015 Ansible, Inc. (formerly AnsibleWorks, Inc.)
# All Rights Reserved.
# Local Django settings for AWX project. Rename to "local_settings.py" and
# edit as needed for your development environment.
# All variables defined in awx/settings/development.py will already be loaded
# into the global namespace before this file is loaded, to allow for reading
# and updating the default settings as needed.
###############################################################################
# MISC PROJECT SETTINGS
###############################################################################
# Enable the following lines and install the browser extension to use Django debug toolbar
# if your deployment method is not VMWare of Docker-for-Mac you may
# need a different IP address from request.META['REMOTE_ADDR']
# INTERNAL_IPS = ('172.19.0.1', '172.18.0.1', '192.168.100.1')
# ALLOWED_HOSTS = ['*']
# Location for cross-development of inventory plugins
AWX_ANSIBLE_COLLECTIONS_PATHS = '/var/lib/awx/vendor/awx_ansible_collections'
# The UUID of the system, for HA.
SYSTEM_UUID = '00000000-0000-0000-0000-000000000000'
# If set, use -vvv for project updates instead of -v for more output.
# PROJECT_UPDATE_VVV=True
###############################################################################
# LOGGING SETTINGS
###############################################################################
# Enable logging to syslog. Setting level to ERROR captures 500 errors,
# WARNING also logs 4xx responses.
# Enable the following lines to turn on lots of permissions-related logging.
#LOGGING['loggers']['awx.main.access']['level'] = 'DEBUG'
#LOGGING['loggers']['awx.main.signals']['level'] = 'DEBUG'
#LOGGING['loggers']['awx.main.permissions']['level'] = 'DEBUG'
# Enable the following line to turn on database settings logging.
#LOGGING['loggers']['awx.conf']['level'] = 'DEBUG'
# Enable the following lines to turn on LDAP auth logging.
#LOGGING['loggers']['django_auth_ldap']['handlers'] = ['console']
#LOGGING['loggers']['django_auth_ldap']['level'] = 'DEBUG'
BROADCAST_WEBSOCKET_PORT = 8013
BROADCAST_WEBSOCKET_VERIFY_CERT = False
BROADCAST_WEBSOCKET_PROTOCOL = 'http'

View File

@ -0,0 +1,31 @@
worker_processes 1;
error_log /var/log/nginx/error.log warn;
pid /var/run/nginx.pid;
events {
worker_connections 1024;
}
http {
include /etc/nginx/mime.types;
default_type application/octet-stream;
server_tokens off;
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
access_log /var/log/nginx/access.log main;
map $http_upgrade $connection_upgrade {
default upgrade;
'' close;
}
sendfile on;
#tcp_nopush on;
#gzip on;
include /etc/nginx/conf.d/*.conf;
}

View File

@ -0,0 +1,122 @@
upstream uwsgi {
server localhost:8050;
}
upstream daphne {
server localhost:8051;
}
# server {
# listen 8013 default_server;
# listen [::]:8013 default_server;
# server_name _;
# return 301 https://$host:8043$request_uri;
# }
server {
listen 8013 default_server;
# If you have a domain name, this is where to add it
server_name _;
keepalive_timeout 65;
# HSTS (ngx_http_headers_module is required) (15768000 seconds = 6 months)
add_header Strict-Transport-Security max-age=15768000;
location /static/ {
root /awx_devel;
try_files /awx/ui/$uri /awx/$uri /awx/public/$uri =404;
access_log off;
sendfile off;
}
location ~ ^/websocket {
# Pass request to the upstream alias
proxy_pass http://daphne;
# Require http version 1.1 to allow for upgrade requests
proxy_http_version 1.1;
# We want proxy_buffering off for proxying to websockets.
proxy_buffering off;
# http://en.wikipedia.org/wiki/X-Forwarded-For
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
# enable this if you use HTTPS:
proxy_set_header X-Forwarded-Proto https;
# pass the Host: header from the client for the sake of redirects
proxy_set_header Host $http_host;
# We've set the Host header, so we don't need Nginx to muddle
# about with redirects
proxy_redirect off;
# Depending on the request value, set the Upgrade and
# connection headers
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection $connection_upgrade;
}
location / {
# Add trailing / if missing
rewrite ^(.*[^/])$ $1/ permanent;
uwsgi_read_timeout 120s;
uwsgi_pass uwsgi;
include /etc/nginx/uwsgi_params;
}
}
server {
listen 8043 default_server ssl;
# If you have a domain name, this is where to add it
server_name _;
keepalive_timeout 65;
ssl_certificate /etc/nginx/nginx.crt;
ssl_certificate_key /etc/nginx/nginx.key;
ssl_session_timeout 1d;
ssl_session_cache shared:SSL:50m;
ssl_session_tickets off;
# intermediate configuration. tweak to your needs.
ssl_protocols TLSv1.2;
ssl_ciphers 'ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA384:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA256';
ssl_prefer_server_ciphers on;
# HSTS (ngx_http_headers_module is required) (15768000 seconds = 6 months)
add_header Strict-Transport-Security max-age=15768000;
location /static/ {
root /awx_devel;
try_files /awx/ui/$uri /awx/$uri /awx/public/$uri =404;
access_log off;
sendfile off;
}
location ~ ^/websocket {
# Pass request to the upstream alias
proxy_pass http://daphne;
# Require http version 1.1 to allow for upgrade requests
proxy_http_version 1.1;
# We want proxy_buffering off for proxying to websockets.
proxy_buffering off;
# http://en.wikipedia.org/wiki/X-Forwarded-For
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
# enable this if you use HTTPS:
proxy_set_header X-Forwarded-Proto https;
# pass the Host: header from the client for the sake of redirects
proxy_set_header Host $http_host;
# We've set the Host header, so we don't need Nginx to muddle
# about with redirects
proxy_redirect off;
# Depending on the request value, set the Upgrade and
# connection headers
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection $connection_upgrade;
}
location / {
# Add trailing / if missing
rewrite ^(.*[^/])$ $1/ permanent;
uwsgi_read_timeout 120s;
uwsgi_pass uwsgi;
include /etc/nginx/uwsgi_params;
}
}

Binary file not shown.

View File

@ -0,0 +1,28 @@
---
- log-level: info
- control-service:
service: control
filename: /var/run/receptor/receptor.sock
- local-only:
- work-command:
worktype: local
command: ansible-runner
params: worker
allowruntimeparams: true
- work-kubernetes:
worktype: kubernetes-runtime-auth
authmethod: runtime
allowruntimeauth: true
allowruntimepod: true
allowruntimeparams: true
- work-kubernetes:
worktype: kubernetes-incluster-auth
authmethod: incluster
allowruntimeauth: true
allowruntimepod: true
allowruntimeparams: true

View File

@ -0,0 +1,10 @@
unixsocket /var/run/redis/redis.sock
unixsocketperm 770
port 0
# Do not actually listen to any tcp port
# but include the bind directive because without it redis will
# listen on the public interface. Port 0 causes it to NOT listen on
# the public interface. Adding the below line is an extra precaution.
# If a developer comes by later and wants to listen on a tcp port and changes
# the above port, it will ONLY listen on the local interface.
bind 127.0.0.1

View File

@ -0,0 +1,115 @@
[supervisord]
umask = 022
minfds = 4096
nodaemon=true
[program:awx-dispatcher]
command = make dispatcher
autostart = true
autorestart = true
stopwaitsecs = 1
stopsignal=KILL
stopasgroup=true
killasgroup=true
redirect_stderr=true
stdout_events_enabled = true
stderr_events_enabled = true
[program:awx-receiver]
command = make receiver
autostart = true
autorestart = true
stopwaitsecs = 1
stopsignal=KILL
stopasgroup=true
killasgroup=true
redirect_stderr=true
stdout_events_enabled = true
stderr_events_enabled = true
[program:awx-wsbroadcast]
command = make wsbroadcast
autostart = true
autorestart = true
stopwaitsecs = 1
stopsignal=KILL
stopasgroup=true
killasgroup=true
redirect_stderr=true
stdout_events_enabled = true
stderr_events_enabled = true
[program:awx-uwsgi]
command = make uwsgi
autostart = true
autorestart = true
redirect_stderr=true
stopwaitsecs = 1
stopsignal=KILL
stopasgroup=true
killasgroup=true
stdout_events_enabled = true
stderr_events_enabled = true
[program:awx-daphne]
command = make daphne
autostart = true
autorestart = true
redirect_stderr=true
stopwaitsecs = 1
stopsignal=KILL
stopasgroup=true
killasgroup=true
stdout_events_enabled = true
stderr_events_enabled = true
[program:awx-nginx]
command = make nginx
autostart = true
autorestart = true
redirect_stderr=true
stdout_events_enabled = true
stderr_events_enabled = true
[program:awx-rsyslogd]
command = rsyslogd -n -i /var/run/awx-rsyslog/rsyslog.pid -f /var/lib/awx/rsyslog/rsyslog.conf
autostart = true
autorestart = true
stopwaitsecs = 5
stopsignal=TERM
stopasgroup=true
killasgroup=true
redirect_stderr=true
stdout_events_enabled = true
stderr_events_enabled = true
[program:awx-receptor]
command = receptor --config /etc/receptor/receptor.conf
autostart = true
autorestart = true
stopsignal = KILL
stopasgroup = true
killasgroup = true
redirect_stderr=true
stdout_events_enabled = true
stderr_events_enabled = true
[group:tower-processes]
programs=awx-dispatcher,awx-receiver,awx-uwsgi,awx-daphne,awx-nginx,awx-wsbroadcast,awx-rsyslogd
priority=5
[unix_http_server]
file=/var/run/supervisor/supervisor.sock
[supervisorctl]
serverurl=unix:///var/run/supervisor/supervisor.sock ; use a unix:// URL for a unix socket
[rpcinterface:supervisor]
supervisor.rpcinterface_factory = supervisor.rpcinterface:make_main_rpcinterface
[eventlistener:stdout]
command = supervisor_stdout
buffer_size = 100
events = PROCESS_LOG
result_handler = supervisor_stdout:event_handler

View File

@ -0,0 +1,681 @@
---
#
# make sure that group <awx_group> is defined
#
- name: create group {{ awx_group|default('awx') }} if not already present
group:
name: "{{ awx_group|default('awx') }}"
state: present
#
# make sure that user <awx_user> is defined
# (user will be locked if <awx_password> is not defined)
#
- name: create user {{ awx_user|default('awx') }} if not already present
user:
name: "{{ awx_user }}"
shell: /bin/bash
comment: "AWX User"
group: "{{ awx_group|default('awx') }}"
groups: docker
home: "/home/{{ awx_user }}"
createhome: yes
password: "{{ awx_password|default('') | password_hash('sha512') }}"
password_lock: "{{ awx_password is not defined }}"
#
# Create the base directroy for checking out the AWX repo from Github
#
- name: make sure that directory {{ awx_basedir }} exists
file:
path: "{{ awx_basedir }}"
state: directory
owner: "{{ awx_user }}"
group: "{{ awx_group }}"
mode: '0755'
#
# Checkout the Git Repo from Github using the given AWX version
# This is needed to build the image locally.
#
- name: checkout awx repo version {{ awx_version }} to {{ awx_basedir }}
git:
clone: yes
dest: "{{ awx_basedir | default('/opt/awx') }}/{{ awx_version }}"
repo: "{{ awx_github_repo }}"
version: "{{ awx_version }}"
force: yes
become: yes
become_user: "{{ awx_user }}"
#
# Add our proxy for http and https to the Makefile that we just have checked
# out from Github. Otherwise the "make docker-compose-build" which builds the
# local docker image will fail.
#
- name: add build-args to Makefile in {{ awx_basedir }}/{{ awx_version }}
lineinfile:
path: "{{ awx_basedir }}/{{ awx_version }}/Makefile"
line: ' --build-arg http_proxy=${http_proxy} --build-arg https_proxy=${https_proxy} \'
firstmatch: yes
insertafter: ".*--build-arg BUILDKIT_INLINE_CACHE.*"
state: present
when:
- build_docker_image is defined
- build_docker_image|bool == True
#
# run "make docker-compose-build" in the checked out Git Repo context to
# build the docker image.
#
- name: run make docker-compose-build in {{ awx_basedir }}/{{ awx_version }}
make:
chdir: "{{ awx_basedir }}/{{ awx_version }}"
target: docker-compose-build
params:
COMPOSE_TAG: "{{ awx_image_tag }}"
DEVEL_IMAGE_NAME: "{{ awx_image }}:{{ awx_image_tag }}"
become: yes
become_user: "{{ awx_user }}"
when:
- build_docker_image is defined
- build_docker_image|bool == True
#
# Push the Docker image to our local registry
#
- name: push image to docker registry
include_tasks:
push.yml
when:
- push_docker_image is defined
- push_docker_image|bool == True
#
# create the directory where we will deploy our docker-compose
# configuration files.
#
- name: make sure that directory {{ awx_composedir }} exists
file:
path: "{{ awx_composedir }}"
state: directory
owner: "{{ awx_user }}"
group: "{{ awx_group }}"
mode: '0750'
#
# create a sub-directory initdb.d to hold the postgres database dump
# in case we want to deploy AWX using an existing database dump.
#
- name: make sure that directory {{ awx_composedir }}/initdb.d exists
file:
path: "{{ awx_composedir }}/initdb.d"
state: directory
owner: "{{ awx_user }}"
group: "{{ awx_group }}"
mode: '0755'
when:
- database_dump_file is defined
#
# copy the postgres database dump file to the initdb.d directory
# during initialization of the postgres container, the file will
# be loaded an the database will be populated.
#
- name: copy database dump file to initialize postgres db
copy:
src: "{{ database_dump_file }}"
dest: "{{ awx_composedir }}/initdb.d/{{ database_dump_file }}"
owner: "{{ awx_user }}"
group: "{{ awx_group }}"
mode: '0644'
when: database_dump_file is defined
#
# make sure that there is only one single postgres dump file in
# the initdb.d directory. Otherwise the initialization will fail
# because the postgres container will be unable to load two files
# with a similar database definition.
#
# Check if there are other files in the initdb.d directory and register
# list of files in variable obsolete_dump_files
#
- name: check if other files exist in initdb.d directory
find:
path: "{{ awx_composedir }}/initdb.d"
excludes: "{{ database_dump_file|default() }}"
register: obsolete_dump_files
#
# delete all files from initdb.d directory registered in variable
# named obsolete_dump_files.
#
- name: delete obsolete files from initdb.d directory
file:
path: "{{ item.path }}"
state: absent
with_items:
- "{{ obsolete_dump_files.files }}"
when:
- obsolete_dump_files.files is defined
#
# create a redis sub-directory under the compose directory
#
- name: Create redis directory
file:
path: "{{ awx_composedir }}/redis"
state: 'directory'
mode: '0755'
owner: "{{ awx_user }}"
group: "{{ awx_group }}"
#
# create a sub-directory to store our secrets
#
- name: Create secrets directory
file:
path: "{{ awx_composedir }}/secrets"
state: 'directory'
mode: '0750'
owner: "{{ awx_user }}"
group: "{{ awx_group }}"
#
# check if secrets are already available in secret sub-directory
# under the docker-compose directory
#
- name: Detect secrets
stat:
path: "{{ awx_composedir }}/secrets/{{ item }}.yml"
register: secrets
when: not lookup('vars', item, default='')
loop:
- pg_password
- secret_key
- broadcast_websocket_secret
#
# set variables based on the secrets
#
- name: set variables from secrets if needed
set_fact:
'{{ item }}': "{{ lookup('vars', item, default='') or lookup('password', '/dev/null chars=ascii_letters') }}"
when: not lookup('vars', item, default='')
loop:
- pg_password
- secret_key
- broadcast_websocket_secret
#
# write variables to secret files in secret sub-directory
# under the docker-compose directory
#
- name: Generate secrets if needed
template:
src: 'secrets.yml.j2'
dest: '{{ awx_composedir }}/secrets/{{ item.item }}.yml'
mode: '0640'
owner: "{{ awx_user }}"
group: "{{ awx_group }}"
when: not lookup('vars', item.item, default='') and not item.stat.exists
loop: "{{ secrets.results }}"
#
# include vars from secret files if they are not explicitly defined
#
- name: Include generated secrets unless they are explicitly passed in
include_vars: "secrets/{{ item.item }}.yml"
#no_log: true
when: not lookup('vars', item.item, default='')
loop: "{{ secrets.results }}"
#
# write the SECRET_KEY file in the docker-compose directory
#
- name: Write out SECRET_KEY
copy:
content: "{{ secret_key }}"
dest: "{{ awx_composedir }}/SECRET_KEY"
owner: "{{ awx_user }}"
group: "{{ awx_group }}"
mode: '0640'
no_log: true
#
# deploy configuration files to the docker-compose directory
#
- name: deploy configuration files
template:
src: "{{ item }}.j2"
dest: "{{ awx_composedir }}/{{ item }}"
mode: '0640'
owner: "{{ awx_user }}"
group: "{{ awx_group }}"
register: deploy_awx_cfg
with_items:
- "database.py"
- "websocket_secret.py"
#
# deploy haproxy.cfg to the docker-compose directory
# if the cluster_node_count is greater than 1
# (default=1, haproxy is only deployed if cluster_node_count > 1)
#
- name: deploy configuration files
template:
src: "haproxy.cfg.j2"
dest: "{{ awx_composedir }}/haproxy.cfg"
mode: '0640'
owner: "{{ awx_user }}"
group: "{{ awx_group }}"
register: deploy_ha_cfg
when: cluster_node_count|default(1)|int > 1
#
# remove haproxy.cfg from docker-compose directory if
# there is any leftover from previous deployments. File
# will be removed if cluster_node_count <= 1 (default=1).
#
- name: remove haproxy.cfg if no haproxy is deployed
file:
path: "{{ awx_composedir }}/haproxy.cfg"
state: absent
when: cluster_node_count|default(1)|int <= 1
#
# copy file local_settings.py to docker-compose directory
#
- name: Copy local_settings.py
copy:
src: "local_settings.py"
dest: "{{ awx_composedir }}/local_settings.py"
owner: "{{ awx_user }}"
group: "{{ awx_group }}"
mode: '0640'
#
# get some information about the OS from the docker system
#
- name: Get OS info for sdb
shell: |
docker info | grep 'Operating System'
register: os_info
changed_when: false
#
# Get the UID of the user which will run the AWX container.
# The AWX container will not run as root. The user is defined
# using the variable awx_user.
#
- name: Get user UID
shell: "id -u {{ awx_user }}"
register: awx_user_id
changed_when: false
#
# set a variable user_id based on the output from the previous command
# this will be used in the docker-compose.yml template
#
- name: Set fact with user UID
set_fact:
user_id: "{{ awx_user_id.stdout|int }}"
#
# set "awx_image_tag" variable from the VERSION file in the Git Repo if
# no "awx_image_tag" is defined.
#
- name: Set global version if not provided
set_fact:
awx_image_tag: "{{ lookup('file', awx_basedir + '/' + awx_version + '/VERSION') }}"
when: awx_image_tag is not defined
#
# copy the supervisor.conf to the docker-compose directory
#
- name: deploy supervisor.conf
copy:
src: supervisor.conf
dest: "{{ awx_composedir }}/supervisor.conf"
owner: "{{ awx_user }}"
group: "{{ awx_group }}"
#
# copy the receptor.conf to the docker-compose directory
#
- name: deploy receptor.conf
copy:
src: receptor.conf
dest: "{{ awx_composedir }}/receptor.conf"
owner: "{{ awx_user }}"
group: "{{ awx_group }}"
#
# copy the redis.conf to the redis sub-directory in
# the docker-compose directory
#
- name: deploy redis.conf
copy:
src: redis.conf
dest: "{{ awx_composedir }}/redis/redis.conf"
owner: "{{ awx_user }}"
group: "{{ awx_group }}"
mode: '0640'
#
# create docker-compose.yml in the docker-compose directory
# from a template
#
- name: deploy docker-compose.yml
template:
src: docker-compose.yml.j2
dest: "{{ awx_composedir }}/{{ awx_compose_name }}"
owner: "{{ awx_user }}"
group: "{{ awx_group }}"
mode: '0644'
#
# create docker-compose.override.yml in the docker-compose directory
# from a template
#
- name: deploy docker-compose.override.yml
template:
src: docker-compose.override.yml.j2
dest: "{{ awx_composedir }}/{{ awx_compose_override_name }}"
owner: "{{ awx_user }}"
group: "{{ awx_group }}"
mode: '0644'
#
# Create directory to store the database dumps
#
- name: create directory {{ pg_database_dumpdir | default('/var/lib/pgdocker/backup_'+awx_version) }}
file:
# path: "{{ pg_database_dumpdir }}"
path: "{{ pg_database_dumpdir | default('/var/lib/pgdocker/backup_'+awx_version) }}"
state: directory
owner: "{{ awx_user }}"
group: "{{awx_group }}"
mode: '0750'
#
# deploy pg_dump.sh to dump the postgres database to a gzipped sql
# file.
#
- name: deploy backup script pg_dump.sh
template:
src: pg_dump.sh.j2
dest: "{{ awx_composedir }}/pg_dump.sh"
owner: "{{ awx_user }}"
group: "{{ awx_group }}"
mode: '0750'
#
# deploy HouseKeeping.sh to remove files older than x days
#
- name: deploy maintainance script HouseKeeping.sh
copy:
src: HouseKeeping.sh
dest: "{{ awx_composedir }}/HouseKeeping.sh"
owner: "{{ awx_user }}"
group: "{{ awx_group }}"
mode: '0750'
#
# install a cron entry to run the postgres backup daily at 23:00h
#
- name: create a cron file under /etc/cron.d for postgres backup
cron:
name: "postgres backup for awx ({{ item.tag }})"
weekday: "*"
minute: "{{ item.mm }}"
hour: "{{ item.hh }}"
user: "{{ awx_user }}"
job: "{{ awx_composedir }}/pg_dump.sh"
#cron_file: ansible_awx-postgres-backup
with_items:
- { tag: "version {{ awx_version }} #1", hh: 23, mm: 00 }
- { tag: "version {{ awx_version }} #2", hh: 05, mm: 00 }
#
# start the postgres container from the new created AWX deployment
# This will either create a fresh awx database if no database_dump_file is
# present in the initdb.d sub-directory or it will process the dump file and
# load the data in the file.
#
- name: bring up awx docker-compose project (postgres only)
docker_compose:
project_src: "{{ awx_composedir }}"
services: postgres
state: present
files: [ 'docker-compose.yml', 'docker-compose.override.yml' ]
register: compose_output
#
# check logs from postgres container to see if there is a message indicating that
# a previous database already exists. This can be the case if the awx_db volume
# exists before deploying this instance of AWX.
#
- name: check if postgres already has an existing db and skips init process
shell:
cmd: "docker logs {{ awx_prefix }}_postgres_1"
register: skip_db_init
#
# run "docker logs <postgres container>" every 5sec until the message
# "PostgreSQL init process complete" is found. This indicates that the
# loading of the dump file is complete and postgres is ready for client
# connections. This is only performed if the postgres container has
# not skipped the initialization sequence.
#
- name: wait for postgres to finish loading the dump
shell:
cmd: "docker logs {{ awx_prefix }}_postgres_1"
register: wait_for_initdb
until: wait_for_initdb.stdout is search('PostgreSQL init process complete')
retries: 30
delay: 5
when: not skip_db_init.stdout is search('Skipping initialization')
#
# Wait another 10s to ensure that postgres is available because it will restart
# at the end of the initialization sequence.
#
- name: wait another 10sec ...
pause:
seconds: 10
when: not skip_db_init.stdout is search('Skipping initialization')
#
# set docker_restart to true if either "deploy_awx_cfg.changed" is true
# or "deploy_ha_cfg.changed" is true
#
- name: set variable docker_restart depending on config file changes
set_fact:
docker_restart: "{{ deploy_awx_cfg.changed|default(false) or deploy_ha_cfg.changed|default(false) }}"
#
# bring up the remaining containers for the AWX deployment. These
# are redis and awx. The number depends on the variable cluster_node_count.
# If cluster_node_count is greater than 1, a haproxy container is also launched (default=1).
# Containers are restarted if one of the config files has changed.
#
- name: bring up awx docker-compose project
docker_compose:
project_src: "{{ awx_composedir }}"
state: present
restarted: "{{ docker_restart }}"
remove_orphans: true
files: [ 'docker-compose.yml', 'docker-compose.override.yml' ]
register: compose_output
#
# The initialization of the awx container may take a while. If a database
# dump is loaded from an older version of AWX, the database migration is
# performed automatically. We will wait for the message "supervisord started"
# in the log of the awx_1 container as an indication that the migration
# and initialization is complete.
#
- name: wait for awx initialization to complete
shell:
cmd: "docker logs {{ awx_prefix }}_awx_1"
register: wait_for_awx
until: wait_for_awx.stdout is search('supervisord started')
retries: 40
delay: 5
#
# Login to the first awx container and run "make clean-ui ui-devel" to compile the
# AWX User Interface.
#
- name: run make clean-ui ui-devel to build AWX UI in docker container - This will take 5-10min...
shell:
cmd: "docker exec {{ awx_prefix }}_awx_1 make clean-ui ui-devel"
register: build_ui_output
when:
- build_ui is defined
- build_ui|bool == True
#
# show the output from the UI build process
#
- name: show output from build UI
debug:
var: build_ui_output
when:
- build_ui is defined
- build_ui|bool == True
#
# When a database dump is loaded, it may contain active schedules. These
# schedules can be disabled by using direct SQL updates in the awx database.
# To disable the schedules, the flag "disable_schedule" must be set to true.
#
- name: disable all awx schedules in database
shell:
cmd: |
docker exec -i {{ awx_prefix }}_postgres_1 psql --user {{ pg_username }} --dbname {{ pg_database }} -t -A <<-EOT
UPDATE main_schedule SET enabled='f', next_run=NULL
WHERE enabled='t';
EOT
register: disable_schedule_output
when:
- disable_schedule|default(false)
- database_dump_file is defined
#
# show the output from previous update on the awx schedules
#
- name: show output from disable awx schedules
debug:
var: disable_schedule_output.stdout
when:
- disable_schedule|default(false)
- database_dump_file is defined
#
# if a database dump is loaded, there might be an old instance
# in a table called "main_instance" in the database. we will
# remove these "stale" instances from the table based on the
# field "version" which must be different from the awx_version
# that we are currently deploying.
#
# Even after the container seems to have completed the initialization,
# the database table main_instance still contains rows where the version
# field is empty.
#
# check until all rows have a valid version in table main_instance
#
- name: check if all rows have a valid version in main_instance
shell:
cmd: |
docker exec -i {{ awx_prefix }}_postgres_1 psql --user {{ pg_username }} --dbname {{ pg_database }} -t -A <<-EOT
SELECT hostname FROM main_instance
WHERE version = '';
EOT
register: no_version
until: no_version.stdout_lines|length == 0
retries: 12
delay: 5
when: database_dump_file is defined
#
# get list of hostnames from instances with an older awx version
#
- name: check if old instance exists in awx db
shell:
cmd: |
docker exec -i {{ awx_prefix }}_postgres_1 psql --user {{ pg_username }} --dbname {{ pg_database }} -t -A <<-EOT
SELECT hostname FROM main_instance
WHERE version != '{{ awx_version }}';
EOT
register: db_output
when: database_dump_file is defined
#
# show result from SQL command above containing the hostnames to delete
#
- name: show output from DB
debug:
var: db_output.stdout_lines
when:
- database_dump_file is defined
- db_output.stdout
#
# obsolete instances will be removed from the awx deployment using
# command "awx-manage deprovision_instance --hostname <host>" using
# all hosts found in SQL command above.
#
- name: remove old instance from awx if present
shell:
cmd: "docker exec -i {{ awx_prefix }}_awx_1 awx-manage deprovision_instance --hostname {{ item }}"
register: remove_instance_output
with_items:
- "{{ db_output.stdout_lines }}"
when:
- database_dump_file is defined
- db_output.stdout_lines is defined
#
# show result from awx-manage command above
#
- name: show output from awx-manage
debug:
var: remove_instance_output
when: remove_instance_output.stdout is defined
#
# When we create a fresh awx deployment, an admin user is
# created automatically. The Admin password is written to
# the log of the awx_1 container. We will monitor the log
# of the container for the message "Admin password".
#
- name: wait for Admin User to be created (when no dump is loaded)
shell:
cmd: "docker logs {{ awx_prefix }}_awx_1"
register: wait_for_admin
until: wait_for_admin.stdout is search('Admin password')
retries: 10
delay: 5
when: not database_dump_file is defined
#
# set variable admin_password from the output of the awx_1
# container.
#
- name: retrieve admin password (only for new install)
set_fact:
admin_password: "{{ wait_for_admin.stdout_lines|default() |select('match', 'Admin password.+') | list }}"
when: not database_dump_file is defined
#
# Display some useful information about the just deployed awx
#
- name: display the fresh created password for the admin user (only for new install)
debug:
msg: "{{ item }} "
with_items:
- "Admin User: admin"
- "{{ admin_password|default('Admin password: ** not changed - loaded from dump **') }}"
- "URL: https://{{ ansible_hostname }}.{{ ansible_domain }}:{{ https_port }}/#/login"
...

100
roles/awx/tasks/main.yml Normal file
View File

@ -0,0 +1,100 @@
---
#
# Check if there is already a container from the AWX deployment
#
- name: check if containers with prefix {{ awx_prefix }} already exist
docker_container_info:
name: "{{ awx_prefix }}_postgres_1"
register: postgres_container
#
# Check if there is already a volume containing the database from the AWX deployment
#
- name: check if volume with prefix {{ awx_prefix }} already exist
docker_volume_info:
name: "{{ awx_prefix }}_awx_db"
register: postgres_volume
#
# Destroy all related containers from a previous deployment
# if postgres container exists and the awx_force_deployment
# is set to true
#
- name: bring down awx docker-compose project
docker_compose:
project_src: "{{ awx_composedir }}"
services: postgres
remove_orphans: true
state: absent
files: [ 'docker-compose.yml', 'docker-compose.override.yml' ]
register: compose_output
when:
- postgres_container.exists == True
- awx_force_deployment|default(False)|bool == True
- awx_keep_existing|default(True)|bool == False
#
# Destroy the awx_db volume containing the AWX database if
# the awx_force_deployment is set to true
#
- name: cleanup awx_db volume using docker volume rm
docker_volume:
name: "{{ awx_prefix }}_awx_db"
state: absent
when:
- awx_force_deployment|default(False)|bool == True
- awx_keep_existing|default(True)|bool == False
#
# Destroy the all redis_socket volumes. The number is determined
# by the variable 'cluster_node_count' (default=1). Volumes are only destroyed
# if the awx_force_deployment is set to true
#
- name: cleanup redis_socket volumes using docker volume rm
docker_volume:
name: "{{ awx_prefix }}_redis_socket_{{ item }}"
state: absent
with_sequence: start=1 end={{ cluster_node_count|default(1) }} stride=1
when:
- awx_force_deployment|default(False)|bool == True
- awx_keep_existing|default(True)|bool == False
#
# Destroy the all receptor volumes. The number is determined
# by the variable 'cluster_node_count' (default=1). Volumes are only destroyed
# if the awx_force_deployment is set to true
#
- name: cleanup receptor volumes using docker volume rm
docker_volume:
name: "{{ awx_prefix }}_receptor_{{ item }}"
state: absent
with_sequence: start=1 end={{ cluster_node_count|default(1) }} stride=1
when:
- awx_force_deployment|default(False)|bool == True
- awx_keep_existing|default(True)|bool == False
#
# run the steps to deploy AWX from a dedicated
# YAML file. This is only performed if no postgres
# container or volume exists. This can be overwritten
# by setting the awx_force_deployment to true
#
- name: deploy awx using awx2_deploy.yml
include_tasks:
awx2_deploy.yml
when:
- postgres_container.exists == False or
(awx_force_deployment|default(False)|bool == True)
- postgres_volume.exists == False or
(awx_force_deployment|default(False)|bool == True)
#
# write a message if depolyment is skipped
#
- name: information
debug:
msg: "AWX Deployment was skipped because either the awx_db volume or a postgres_container exists. You may force deployment by setting awx_force_deployment to true."
when:
- (postgres_container.exists == True and awx_force_deployment|default(False)|bool == False) or
(postgres_volume.exists == True and awx_force_deployment|default(False)|bool == False)
...

48
roles/awx/tasks/push.yml Normal file
View File

@ -0,0 +1,48 @@
---
#
# Check if the awx image exists locally. Otherwise we cannot
# push the image to our docker registry
#
- name: check if image {{ awx_image }}:{{ awx_image_tag }} already exist
docker_image_info:
name: "{{ awx_image }}:{{ awx_image_tag }}"
register: awx_image_info
- name: set var awx_image_exists based on awx_image_info
set_fact:
awx_image_exists: "{{ awx_image_info.images | length == 1 }}"
- name: debug
debug:
var: awx_image_exists
- name: tag latest
docker_image:
name: "{{ awx_image }}:{{awx_image_tag }}"
repository: "{{ awx_image }}:latest"
force_tag: yes
source: local
when: awx_image_exists|bool == True
- name: "push image with docker_image module to artifactory"
docker_image:
#repository: "{{ awx_image }}:{{ item }}"
name: "{{ awx_image }}"
tag: "{{ item }}"
push: yes
source: local
force_tag: yes
state: present
when: awx_image_exists|bool == True
with_items:
- "{{ awx_image_tag }}"
- "latest"
- debug:
msg: "docker image {{ docker_registry }}/{{ awx_image }}:{{ awx_image_tag | default('latest') }} pushed"
when: awx_image_exists|bool == True
- debug:
msg: "docker image {{ awx_image }}:{{ awx_image_tag | default('latest') }} not found locally, set build_docker_image=True to build the image first."
when: awx_image_exists|bool == False
...

View File

@ -0,0 +1,10 @@
---
- debug:
msg: "Vault location: {{ playbook_dir }}/../vaults/{{ jh_inventory_name }}"
- name: include vault
include_vars:
dir: "{{ playbook_dir }}/../vaults/{{ jh_inventory_name }}"
files_matching: vault.yml
no_log: true
...

61
roles/awx/templates/d.o Normal file
View File

@ -0,0 +1,61 @@
---
version: '2.1'
services:
# Primary AWX Development Container
awx_1:
hostname: awx1920_1
networks:
ansible-net:
aliases:
- awx1920_1
traefik-net:
aliases:
- awx1920_1
restart: unless-stopped
environment:
http_proxy:
https_proxy:
no_proxy:
labels:
- "traefik.enable=true"
- "traefik.http.routers.awx1920_1.service=awx1920_1"
- "traefik.http.routers.awx1920_1.entrypoints=http"
- "traefik.http.routers.awx1920_1.rule=Host(`awx1920.seboto.my-wan.de`) || Host(`awx19.seboto.my-wan.de`)"
- "traefik.http.middlewares.awx1920_1-https-redirect.redirectscheme.scheme=https"
- "traefik.http.routers.awx1920_1.middlewares=awx1920_1-https-redirect"
- "traefik.http.routers.awx1920_1-secure.entrypoints=https"
- "traefik.http.routers.awx1920_1-secure.rule=Host(`awx1920.seboto.my-wan.de`) || Host(`awx19.seboto.my-wan.de`)"
- "traefik.http.routers.awx1920_1-secure.tls=true"
- "traefik.http.routers.awx1920_1-secure.tls.domains[0].main=awx1920.seboto.my-wan.de"
- "traefik.http.routers.awx1920_1-secure.tls.domains[0].sans=awx19.seboto.my-wan.de"
- "traefik.http.routers.awx1920_1-secure.tls.certresolver=http"
- "traefik.http.routers.awx1920_1-secure.service=awx1920_1"
- "traefik.http.services.awx1920_1.loadbalancer.server.port=8013"
- "traefik.docker.network=traefik_proxy"
# volumes:
# - "/home/awx/certs/ldap.conf:/etc/openldap/ldap.conf:ro"
# - "/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro"
# - "/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro"
# - "/etc/pki/tls/private/ansible01.warburg.com.pem:/etc/nginx/nginx.key:ro"
# - "/etc/pki/tls/certs/ansible01.warburg.com.crt:/etc/nginx/nginx.crt:ro"
redis_1:
networks:
ansible-net:
aliases:
- redis_1
restart: unless-stopped
postgres:
networks:
ansible-net:
aliases:
- postgres
restart: unless-stopped
volumes:
- "/opt/awxcompose19.2.0/initdb.d:/docker-entrypoint-initdb.d:ro"
networks:
ansible-net:
traefik-net:
external: true
name: traefik_proxy
...

View File

@ -0,0 +1,11 @@
DATABASES = {
'default': {
'ATOMIC_REQUESTS': True,
'ENGINE': 'awx.main.db.profiled_pg',
'NAME': "{{ pg_database }}",
'USER': "{{ pg_username }}",
'PASSWORD': "{{ pg_password }}",
'HOST': "{{ pg_hostname | default('postgres') }}",
'PORT': "{{ pg_port }}",
}
}

View File

@ -0,0 +1,96 @@
---
version: '2.1'
services:
{% for i in range(cluster_node_count|default(1)|int) %}
{% set container_postfix = loop.index %}
{% set awx_sdb_port_start = 7899 + (loop.index0*1000) | int %}
{% set awx_sdb_port_end = 7999 + (loop.index0*1000) | int %}
# Primary AWX Development Container
awx_{{ container_postfix }}:
hostname: {{ awx_prefix }}_{{ container_postfix }}
networks:
ansible-net:
aliases:
- {{ awx_prefix }}_{{ container_postfix }}
traefik-net:
aliases:
- {{ awx_prefix }}_{{ container_postfix }}
restart: unless-stopped
environment:
http_proxy:
https_proxy:
no_proxy:
{% if cluster_node_count == 1 %}
labels:
- "traefik.enable=true"
- "traefik.http.routers.{{ awx_prefix }}_{{ container_postfix }}.service={{ awx_prefix }}_{{ container_postfix }}"
- "traefik.http.routers.{{ awx_prefix }}_{{ container_postfix }}.entrypoints=http"
- "traefik.http.routers.{{ awx_prefix }}_{{ container_postfix }}.rule=Host(`{{ awx_prefix }}.seboto.my-wan.de`) || Host(`ansible-{{ awx_prefix }}.seboto.my-wan.de`)"
- "traefik.http.middlewares.{{ awx_prefix }}_{{ container_postfix }}-https-redirect.redirectscheme.scheme=https"
- "traefik.http.routers.{{ awx_prefix }}_{{ container_postfix }}.middlewares={{ awx_prefix }}_{{ container_postfix }}-https-redirect"
- "traefik.http.routers.{{ awx_prefix }}_{{ container_postfix }}-secure.entrypoints=https"
- "traefik.http.routers.{{ awx_prefix }}_{{ container_postfix }}-secure.rule=Host(`{{ awx_prefix }}.seboto.my-wan.de`) || Host(`ansible-{{ awx_prefix }}.seboto.my-wan.de`)"
- "traefik.http.routers.{{ awx_prefix }}_{{ container_postfix }}-secure.tls=true"
- "traefik.http.routers.{{ awx_prefix }}_{{ container_postfix }}-secure.tls.domains[0].main={{ awx_prefix }}.seboto.my-wan.de"
- "traefik.http.routers.{{ awx_prefix }}_{{ container_postfix }}-secure.tls.domains[0].sans=ansible-{{ awx_prefix }}.seboto.my-wan.de"
- "traefik.http.routers.{{ awx_prefix }}_{{ container_postfix }}-secure.tls.certresolver=http"
- "traefik.http.routers.{{ awx_prefix }}_{{ container_postfix }}-secure.service={{ awx_prefix }}_{{ container_postfix }}"
- "traefik.http.services.{{ awx_prefix }}_{{ container_postfix }}.loadbalancer.server.port={{ traefik_http_port }}"
- "traefik.docker.network=traefik_proxy"
{% endif %}
# volumes:
# - "/home/awx/certs/ldap.conf:/etc/openldap/ldap.conf:ro"
# - "/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro"
# - "/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro"
# - "/etc/pki/tls/private/ansible01.warburg.com.pem:/etc/nginx/nginx.key:ro"
# - "/etc/pki/tls/certs/ansible01.warburg.com.crt:/etc/nginx/nginx.crt:ro"
redis_{{ container_postfix }}:
restart: unless-stopped
networks:
ansible-net:
aliases:
- redis_{{ container_postfix }}
restart: unless-stopped
{% endfor %}
{% if cluster_node_count|default(1)|int > 1 %}
haproxy:
hostname: haproxy
networks:
ansible-net:
aliases:
- haproxy
traefik-net:
aliases:
- haproxy
labels:
- "traefik.enable=true"
- "traefik.http.routers.haproxy.service=haproxy"
- "traefik.http.routers.haproxy.entrypoints=http"
- "traefik.http.routers.haproxy.rule=Host(`{{ awx_prefix }}.seboto.my-wan.de`) || Host(`ansible-{{ awx_prefix }}.seboto.my-wan.de`)"
- "traefik.http.middlewares.haproxy-https-redirect.redirectscheme.scheme=https"
- "traefik.http.routers.haproxy.middlewares=haproxy-https-redirect"
- "traefik.http.routers.haproxy-secure.entrypoints=https"
- "traefik.http.routers.haproxy-secure.rule=Host(`{{ awx_prefix }}.seboto.my-wan.de`) || Host(`ansible-{{ awx_prefix }}.seboto.my-wan.de`)"
- "traefik.http.routers.haproxy-secure.tls=true"
- "traefik.http.routers.haproxy-secure.tls.domains[0].main={{ awx_prefix }}.seboto.my-wan.de"
- "traefik.http.routers.haproxy-secure.tls.domains[0].sans=ansible-{{ awx_prefix }}.seboto.my-wan.de"
- "traefik.http.routers.haproxy-secure.tls.certresolver=http"
- "traefik.http.routers.haproxy-secure.service=haproxy"
- "traefik.http.services.haproxy.loadbalancer.server.port={{ traefik_http_port }}"
- "traefik.docker.network=traefik_proxy"
{% endif %}
postgres:
networks:
ansible-net:
aliases:
- postgres
restart: unless-stopped
volumes:
- "{{ awx_composedir }}/initdb.d:/docker-entrypoint-initdb.d:ro"
networks:
ansible-net:
traefik-net:
external: true
name: traefik_proxy
...

View File

@ -0,0 +1,102 @@
---
version: '2.1'
services:
{% for i in range(cluster_node_count|default(1)|int) %}
{% set container_postfix = loop.index %}
{% set awx_sdb_port_start = sdb_port_base|int + (loop.index0*1000) | int %}
{% set awx_sdb_port_end = sdb_port_base|int +100 + (loop.index0*1000) | int %}
# Primary AWX Development Container
awx_{{ container_postfix }}:
user: "{{ user_id }}"
image: "{{ awx_image }}:{{ awx_image_tag }}"
container_name: {{ awx_prefix }}_awx_{{ container_postfix }}
hostname: awx_{{ container_postfix }}
command: launch_awx.sh
environment:
OS: "{{ os_info.stdout }}"
SDB_HOST: 0.0.0.0
SDB_PORT: {{ awx_sdb_port_start }}
AWX_GROUP_QUEUES: tower
RECEPTORCTL_SOCKET: /var/run/receptor/receptor.sock
{% if loop.index == 1 %}
RUN_MIGRATIONS: 1
{% endif %}
links:
- postgres
- redis_{{ container_postfix }}
working_dir: "/awx_devel"
volumes:
- "{{ awx_basedir }}/{{ awx_version }}:/awx_devel"
- "{{ awx_composedir }}/supervisor.conf:/etc/supervisord.conf"
- "{{ awx_composedir }}//database.py:/etc/tower/conf.d/database.py"
- "{{ awx_composedir }}//websocket_secret.py:/etc/tower/conf.d/websocket_secret.py"
- "{{ awx_composedir }}//local_settings.py:/etc/tower/conf.d/local_settings.py"
- "{{ awx_composedir }}//SECRET_KEY:/etc/tower/SECRET_KEY"
- "{{ awx_composedir }}/receptor.conf:/etc/receptor/receptor.conf"
- "/sys/fs/cgroup:/sys/fs/cgroup"
- "~/.kube/config:/var/lib/awx/.kube/config"
- "redis_socket_{{ container_postfix }}:/var/run/redis/:rw"
- "receptor_{{ container_postfix }}:/var/run/receptor/"
privileged: true
tty: true
ports:
- "{{ awx_sdb_port_start }}-{{ awx_sdb_port_end }}:{{ awx_sdb_port_start }}-{{ awx_sdb_port_end }}" # sdb-listen
{% if cluster_node_count|default(1)|int == 1 %}
#- "6899:6899"
#- "8080:8080" # unused but mapped for debugging
#- "8888:8888" # jupyter notebook
- "{{ http_port }}:8013" # http
- "{{ https_port }}:8043" # https
{% endif %}
redis_{{ container_postfix }}:
image: redis:latest
container_name: {{ awx_prefix }}_redis_{{ container_postfix }}
volumes:
- "{{ awx_composedir }}/redis/redis.conf:/usr/local/etc/redis/redis.conf"
- "redis_socket_{{ container_postfix }}:/var/run/redis/:rw"
entrypoint: ["redis-server"]
command: ["/usr/local/etc/redis/redis.conf"]
{% endfor %}
{% if cluster_node_count|default(1)|int > 1 %}
haproxy:
user: "{{ user_id }}"
image: haproxy
container_name: {{ awx_prefix }}_haproxy_1
volumes:
- "{{ awx_composedir }}/haproxy.cfg:/usr/local/etc/haproxy/haproxy.cfg"
ports:
- "{{ http_port }}:8013"
- "{{ https_port }}:8043"
- "1936:1936"
depends_on:
{% for i in range(cluster_node_count|default(1)|int) -%}
{% set container_postfix = loop.index %}
- "awx_{{ container_postfix }}"
{% endfor %}
{% endif %}
# A useful container that simply passes through log messages to the console
# helpful for testing awx/tower logging
# logstash:
# build:
# context: ./docker-compose
# dockerfile: Dockerfile-logstash
postgres:
image: postgres:12
container_name: {{ awx_prefix }}_postgres_1
environment:
POSTGRES_HOST_AUTH_METHOD: trust
POSTGRES_USER: {{ pg_username }}
POSTGRES_DB: {{ pg_database }}
POSTGRES_PASSWORD: {{ pg_password }}
volumes:
- "awx_db:/var/lib/postgresql/data"
volumes:
awx_db:
name: {{ awx_prefix }}_awx_db
{% for i in range(cluster_node_count|default(1)|int) -%}
{% set container_postfix = loop.index %}
receptor_{{ container_postfix }}:
name: {{ awx_prefix }}_receptor_{{ container_postfix }}
redis_socket_{{ container_postfix }}:
name: {{ awx_prefix }}_redis_socket_{{ container_postfix }}
{% endfor -%}

View File

@ -0,0 +1,10 @@
DATABASE_USER={{ pg_username|quote }}
DATABASE_NAME={{ pg_database|quote }}
DATABASE_HOST={{ pg_hostname|default('postgres')|quote }}
DATABASE_PORT={{ pg_port|default('5432')|quote }}
DATABASE_PASSWORD={{ pg_password|default('awxpass')|quote }}
{% if pg_admin_password is defined %}
DATABASE_ADMIN_PASSWORD={{ pg_admin_password|quote }}
{% endif %}
AWX_ADMIN_USER={{ admin_user|quote }}
AWX_ADMIN_PASSWORD={{ admin_password|quote }}

View File

@ -0,0 +1,49 @@
global
stats socket /tmp/admin.sock
stats timeout 30s
defaults
log global
mode http
option httplog
option dontlognull
timeout connect 5000
timeout client 50000
timeout server 50000
frontend localnodes
bind *:8013
mode http
default_backend nodes
frontend localnodes_ssl
bind *:8043
mode tcp
default_backend nodes_ssl
backend nodes
mode http
balance roundrobin
option forwardfor
option http-pretend-keepalive
http-request set-header X-Forwarded-Port %[dst_port]
http-request add-header X-Forwarded-Proto https if { ssl_fc }
option httpchk HEAD / HTTP/1.1\r\nHost:localhost
{% for i in range(cluster_node_count|default(1)|int) %}
{% set container_postfix = loop.index %}
server {{ awx_prefix }}_awx_{{ container_postfix }} {{ awx_prefix }}_awx_{{ container_postfix }}:8013 check
{% endfor %}
backend nodes_ssl
mode tcp
balance roundrobin
{% for i in range(cluster_node_count|default(1)|int) %}
{% set container_postfix = loop.index %}
server {{ awx_prefix }}_awx_{{ container_postfix }} {{ awx_prefix }}_awx_{{ container_postfix }}:8043 check
{% endfor %}
listen stats
bind *:1936
stats enable
stats uri /

View File

@ -0,0 +1,122 @@
#user awx;
worker_processes 1;
pid /tmp/nginx.pid;
events {
worker_connections 1024;
}
http {
include /etc/nginx/mime.types;
default_type application/octet-stream;
server_tokens off;
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
access_log /dev/stdout main;
map $http_upgrade $connection_upgrade {
default upgrade;
'' close;
}
sendfile on;
#tcp_nopush on;
#gzip on;
upstream uwsgi {
server 127.0.0.1:8050;
}
upstream daphne {
server 127.0.0.1:8051;
}
{% if ssl_certificate is defined %}
server {
listen 8052 default_server;
server_name _;
# Redirect all HTTP links to the matching HTTPS page
return 301 https://$host$request_uri;
}
{%endif %}
server {
{% if (ssl_certificate is defined) and (ssl_certificate_key is defined) %}
listen 8053 ssl;
ssl_certificate /etc/nginx/awxweb.pem;
ssl_certificate_key /etc/nginx/awxweb_key.pem;
{% elif (ssl_certificate is defined) and (ssl_certificate_key is not defined) %}
listen 8053 ssl;
ssl_certificate /etc/nginx/awxweb.pem;
ssl_certificate_key /etc/nginx/awxweb.pem;
{% else %}
listen 8052 default_server;
{% endif %}
# If you have a domain name, this is where to add it
server_name _;
keepalive_timeout 65;
# HSTS (ngx_http_headers_module is required) (15768000 seconds = 6 months)
add_header Strict-Transport-Security max-age=15768000;
# Protect against click-jacking https://www.owasp.org/index.php/Testing_for_Clickjacking_(OTG-CLIENT-009)
add_header X-Frame-Options "DENY";
location /nginx_status {
stub_status on;
access_log off;
allow 127.0.0.1;
deny all;
}
location /static/ {
alias /var/lib/awx/public/static/;
}
location /favicon.ico { alias /var/lib/awx/public/static/favicon.ico; }
location /websocket {
# Pass request to the upstream alias
proxy_pass http://daphne;
# Require http version 1.1 to allow for upgrade requests
proxy_http_version 1.1;
# We want proxy_buffering off for proxying to websockets.
proxy_buffering off;
# http://en.wikipedia.org/wiki/X-Forwarded-For
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
# enable this if you use HTTPS:
proxy_set_header X-Forwarded-Proto https;
# pass the Host: header from the client for the sake of redirects
proxy_set_header Host $http_host;
# We've set the Host header, so we don't need Nginx to muddle
# about with redirects
proxy_redirect off;
# Depending on the request value, set the Upgrade and
# connection headers
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection $connection_upgrade;
}
location / {
# Add trailing / if missing
rewrite ^(.*)$http_host(.*[^/])$ $1$http_host$2/ permanent;
uwsgi_read_timeout 120s;
uwsgi_pass uwsgi;
include /etc/nginx/uwsgi_params;
{%- if extra_nginx_include is defined %}
include {{ extra_nginx_include }};
{%- endif %}
proxy_set_header X-Forwarded-Port 443;
uwsgi_param HTTP_X_FORWARDED_PORT 443;
}
}
}

View File

@ -0,0 +1,41 @@
#!/bin/bash
#
PG_SERVICE=postgres
PG_USER={{ pg_username }}
PG_DATABASE={{ pg_database }}
PG_DUMPDIR={{ pg_database_dumpdir | default('/var/lib/pgdocker/backup_'+awx_version) }}
PG_TIMESTAMP=`date '+%Y-%m-%d_%H.%M.%S'`
PG_DUMPFILE=${PG_DUMPDIR}/pg_dump_${PG_DATABASE}_${PG_TIMESTAMP}.sql.gz
PG_NB_DAYS=3
#
# /usr/local/bin in den Suchpfad aufnehmen
#
PATH=/usr/local/bin:${PATH}
export PATH
PROG_DIR=`dirname $0`
COMPOSE_DIR={{ awx_composedir }}
COMPOSE_FILES="${COMPOSE_DIR}/{{ awx_compose_name }} ${COMPOSE_DIR}/{{ awx_compose_override_name }}"
#
# Loesche Dump Dateien aelter als 3 Tage
#
echo "Lösche alle Dumps, die älter sind als drei Tage ..."
${PROG_DIR}/HouseKeeping.sh -d ${PG_DUMPDIR} -t ${PG_NB_DAYS} -p '*' -v
[ -d ${PG_DUMPDIR} ] || mkdir ${PG_DUMPDIR}
echo "Stelle sicher, dass der PostgreSQL Container gestartet ist ..."
COMPOSE_OPTIONS="--project-directory ${COMPOSE_DIR} "
for COMPOSE_FILE in ${COMPOSE_FILES}
do
COMPOSE_OPTIONS="${COMPOSE_OPTIONS} -f ${COMPOSE_FILE}"
done
docker-compose ${COMPOSE_OPTIONS} up -d ${PG_SERVICE}
echo "Erstelle Dump der Datenbanken ..."
docker-compose ${COMPOSE_OPTIONS} exec -T ${PG_SERVICE} pg_dump -U ${PG_USER} ${PG_DATABASE} | gzip >${PG_DUMPFILE}
echo "Größe des Datenbank Dumps: `du -sh ${PG_DUMPFILE}`"

View File

@ -0,0 +1 @@
{{ item.item }}: '{{ lookup('vars', item.item, default='') or lookup('password', '/dev/null chars=ascii_letters') }}'

View File

@ -0,0 +1 @@
BROADCAST_WEBSOCKET_SECRET = "{{ broadcast_websocket_secret | b64encode }}"

5
roles/awx/vars/main.yml Normal file
View File

@ -0,0 +1,5 @@
---
pg_password: awxpassw0rd
secret_key: 'awxsecret0'
broadcast_websocket_secret: 'hallo123'
...

1
test.txt Normal file
View File

@ -0,0 +1 @@
This is a test file.

15
test.yml Normal file
View File

@ -0,0 +1,15 @@
---
- name: Moin Hello World
hosts: all
tasks:
- name: Moin Hello Message
debug:
msg: "Moin! Hello World!"
- name: Copy File
copy:
src: test.txt
dest: /tmp/test_{{ ansible_hostname }}.txt
mode: "0644"
owner: jens
group: users
...

6
vaults/home/vault.yml Normal file
View File

@ -0,0 +1,6 @@
$ANSIBLE_VAULT;1.2;AES256;vault_home
64623131373336656532363536366235303364663862396432336331643530323061373133343038
3239616562356535663264633735303234363361343964300a333566613536396236356637373063
61663166343639356562376161383239393932396630323866343763643962373362333035366133
3262626431313130350a373431356464616630643539383965616639323438353066316430373735
33353030313463393734303333323462306330326563373537356562616264383762

6
vaults/jens/vault.yml Normal file
View File

@ -0,0 +1,6 @@
$ANSIBLE_VAULT;1.2;AES256;vault_jens
39393333323265613531366631613939313334646163323361616135353165636236626664666637
6462656438303931393938373762363033633166613963630a326534366366306235323031353165
36363833346537363132376230346165353831633333303539316332393233633765373531333037
3737353364303837370a383364396334393634323134313333356437626136363566623138623637
38396261623730613131393165336564343034356634376534386533353136666162

6
vaults/seboto/vault.yml Normal file
View File

@ -0,0 +1,6 @@
$ANSIBLE_VAULT;1.2;AES256;vault_seboto
33313738323230383631316138333163653162303032616636623161346330666433396637353531
3736316265333362636535333130626565363661396435350a666433393562386538633937383332
62666138353934653865326464366639643038353934316164323335646164326433363837623130
3865666338616161620a626364656432313461353264653561643138633133313435303837643465
66356334393033343038643533663638356538666537373534656539666432313137