ansible based wordpress installation not working - mysql

Hope y'all are enjoying the holidays. I am attempting an automated installation of wordpress on my Linux VM using ansible. To that end, I have written this ansible piece of code that tries to mimic the official ubuntu guide.
Here is the code:
- name: "Installing wordpress dependencies"
hosts: all
become: True
gather_facts: True
vars:
get_installer: 'curl -sS https://getcomposer.org/installer -o /tmp/composer-setup.php || /bin/true'
get_signature: 'curl -sS https://composer.github.io/installer.sig'
tasks:
- name: "Update repository"
apt:
update_cache: "yes"
- name: "Installing requirements"
apt:
name:
- "curl"
- "php"
- "php-cli"
- "gnupg"
- "unzip"
- "mysql-server"
- "php-fpm"
- "php-mysql"
- "apache2"
- "ghostscript"
- "libapache2-mod-php"
- "php-bcmath"
- "php-curl"
- "php-imagick"
- "php-intl"
- "php-json"
- "php-mbstring"
- "php-xml"
- "php-zip"
state: present
- name: Populate service facts
ansible.builtin.service_facts:
- name: Print service facts
ansible.builtin.debug:
var: ansible_facts.services
- name: "stopping nginx if running"
service:
name: nginx
state: stopped
when: "'nginx' in ansible_facts.services"
- name: "remove nginx if installed"
apt:
name:
- "nginx"
state: absent
- name: stop Mysql
service:
name: mysql
state: stopped
when: "'mysql' in ansible_facts.services"
- name: stop apache2
service:
name: apache2
state: stopped
when: "'apache2' in ansible_facts.services"
- name: Installing wordpress through source
hosts: all
become: True
gather_facts: False
vars:
wprootdir: "/srv/www/wordpress"
tasks:
- name: checking if wp src dir exists
stat:
path: "{{ wprootdir }}"
register: dir_details
- name: delete existing wordpress source files
become_user: www-data
no_log: True
file:
#path: "{{ item.path }}"
#recurse: True
path: "{{ wprootdir }}"
state: absent
#with_items: "{{ path_list.files }}"
- name: creating /var/www for wordpress source
file:
#path: "'{{ wp-root-dir }}' + 'wordpress'"
path: "/srv/www/wordpress"
recurse: yes
state: directory
owner: www-data
mode: '0755'
- name: downloading and extracting wordpress source
shell:
cmd: "curl https://wordpress.org/latest.tar.gz | sudo -u www-data tar zx -C /srv/www"
register: status
- fail:
msg: "Unable to download or extract wordpress source"
when: (status.rc != 0)
- name: Configuring apache for wordpress
hosts: all
become: True
gather_facts: False
vars:
wprootdir: "/srv/www/wordpress"
wpconffile: "/etc/apache2/sites-available/wordpress.conf"
tasks:
- name: deleting the file if it exists
file:
path: "{{ wpconffile }}"
state: absent
- name: creating wordpress conf file
file:
path: "{{ wpconffile }}"
state: touch
owner: www-data
- name: populating wordpress conf file
template:
src: apache2.j2
dest: "{{ wpconffile }}"
- name: enabling the site
shell:
cmd: "a2ensite wordpress"
- name: enable URL rewriting
shell:
cmd: "a2enmod rewrite"
- name: disable default "it works" site
shell:
cmd: "a2dissite 000-default"
- name: restart apache2
service:
name: apache2
state: reloaded
- name: Configuring database
hosts: all
become: True
gather_facts: True
#gather_facts: yes
vars:
mysql_port: 3306
mysql_socket: /var/run/mysqld/mysqld.sock
mysql_superuser: root
mysql_superuser_home: "{% if mysql_superuser == 'root' %}/root{% else %}/home/{{ mysql_superuser }}{% endif %}"
mysql_superuser_password: SuperUserPwd
mysql_wordpress_password: WordpressPwd
http_port: 80
tasks:
- name: Installing PyMySql through pip
pip:
name: PyMySql
state: present
- name: ensure mysql is running and starts on boot
service:
name: mysql
state: started
enabled: True
- name: Removes anonymous user account for localhost
community.mysql.mysql_user:
name: ''
state: absent
login_user: root
login_password: ""
login_unix_socket: "{{ mysql_socket }}"
when: ansible_local.mysqlinfo is undefined
- name: adding a password for root user
mysql_user:
# Update the superuser to have all grants and a password
name: "{{ mysql_superuser }}"
host: localhost
password: "{{ mysql_superuser_password }}"
priv: "*.*:ALL,GRANT"
# Login *as root* to perform this change, even though you might
# be altering the root user itself
login_user: root
login_password: ""
login_port: "{{ mysql_port }}"
login_host: localhost
login_unix_socket: "{{ mysql_socket }}"
# As a good measure,have ansible check whether an implicit login
# is possible first
check_implicit_admin: yes
when: ansible_local.mysqlinfo is undefined
- name: "Create custom fact directory"
file:
path: "/etc/ansible/facts.d"
state: "directory"
recurse: yes
when: ansible_local.mysqlinfo is undefined
- name: "record mysql info in custom fact"
template:
src: mysqlinfo.j2
dest: /etc/ansible/facts.d/mysqlinfo.fact
mode: 0644
when: ansible_local.mysqlinfo is undefined
- name: "re-run setup to use custom facts"
setup:
filter: ansible_local
when: ansible_local.mysqlinfo is undefined
- debug:
msg:
- "mysqlinfo is {{ ansible_local.mysqlinfo }}"
when: ansible_local.mysqlinfo is defined
#- name: Create system-wide mysql configuration file
#template:
#src: mysql_sys.cnf.j2
#dest: /etc/my.cnf
#- name: Create mysql configuration file for `{{ mysql_superuser }}`
#template:
#src: mysql_superuser.cnf.j2
#dest: "{{ mysql_superuser_home }}/.my.cnf"
- name: create database wordpress
mysql_db:
db: wordpress
state: present
login_user: "{{ ansible_local.mysqlinfo.mysql_superuser }}"
login_password: "{{ ansible_local.mysqlinfo.mysql_superuser_password }}"
login_unix_socket: "{{ mysql_socket }}"
when: ansible_local.mysqlinfo is defined
- name: Create database user 'wordpress' with all database privileges
community.mysql.mysql_user:
name: wordpress
password: "{{ mysql_wordpress_password }}"
login_user: "{{ ansible_local.mysqlinfo.mysql_superuser }}"
login_password: "{{ ansible_local.mysqlinfo.mysql_superuser_password }}"
priv: '*.*:ALL'
state: present
when: ansible_local.mysqlinfo is defined
- name: Flush privileges
mysql_query:
login_db: wordpress
login_user: "{{ ansible_local.mysqlinfo.mysql_superuser }}"
login_password: "{{ ansible_local.mysqlinfo.mysql_superuser_password }}"
login_unix_socket: "{{ mysql_socket }}"
query: FLUSH PRIVILEGES
# UFW Configuration
- name: "UFW - Allow HTTP on port {{ http_port }}"
ufw:
rule: allow
port: "{{ http_port }}"
proto: tcp
notify:
- Restart Mysql
tags: [ system ]
handlers:
- name: Restart Mysql
service:
name: mysql
state: restarted
- name: Restart Apache2
service:
name: apache2
state: restarted
- name: Configuring wordpress to connect to the database
hosts: all
gather_facts: False
become: true
vars:
wpconfigfile: "/srv/www/wordpress/wp-config.php"
tasks:
- name: copy sample config to wp-config.php
#become_user: www-data
copy:
remote_src: yes
src: /srv/www/wordpress/wp-config-sample.php
dest: "{{ wpconfigfile }}"
owner: www-data
- name: "re-run setup to use custom facts"
setup:
filter: ansible_local
- name: set database credentials in the config file
become: false
#become_user: www-data
#become_method: "su"
# multiple commands are run like this whereas with
# single command one can use a cmd paramater
# since this is technically *not* a list passed to /bin/sh
# we do not need a list here. Instead it is a series of
# commands being passed to /bin/sh
#shell: |
# apparently, passing this list directly doesn't seem to work
# what works is this loop
command: "{{ item }}"
with_items:
- "sudo -u www-data sed -i s/database_name_here/wordpress/ {{ wpconfigfile }}"
- "sudo -u www-data sed -i s/username_here/wordpress/ {{ wpconfigfile }}"
- "sudo -u www-data sed -i s/password_here/{{ ansible_local.mysqlinfo.mysql_wordpress_password }}/ {{ wpconfigfile }}"
- name: get random secret keys
uri:
url: https://api.wordpress.org/secret-key/1.1/salt/
return_content: yes
body_format: json
register: wordpress_keys
- debug:
var: wordpress_keys.content
- name: delete existing bak file
file:
path: "{{ wpconfigfile }}.bak"
state: absent
- name: run script to remove key placeholders
become_user: www-data
script:
chdir: /srv/www/wordpress/
cmd: replacelines.py
executable: /usr/bin/python3
environment: /srv/www/wordpress/
- name: update config file
become_user: www-data
copy:
remote_src: yes
src: "{{ wpconfigfile }}.bak"
dest: "{{ wpconfigfile }}"
- blockinfile:
path: "{{ wpconfigfile }}"
marker: // {mark} ANSIBLE MANAGED BLOCK
# having this separator here was giving me issues
#block: |
block:
"{{ wordpress_keys.content }}"
handlers:
- name: Restart Mysql
service:
name: mysql
state: restarted
- name: Restart Apache2
service:
name: apache2
state: restarted
Associated jinja2 template files are here:
Apache2 template:
<VirtualHost *:80>
Servername {{ ansible_hostname }}
DocumentRoot "{{ wprootdir }}"
<Directory "{{ wprootdir }}">
Options FollowSymLinks
AllowOverride Limit Options FileInfo
DirectoryIndex index.php
Require all granted
</Directory>
<Directory "{{ wprootdir }}/wp-content">
Options FollowSymLinks
Require all granted
</Directory>
ErrorLog ${APACHE_LOG_DIR}/error.log
CustomLog ${APACHE_LOG_DIR}/access.log combined
</VirtualHost>
mysqlinfo template
{
"mysql_port": "{{ mysql_port }}",
"mysql_socket": "{{ mysql_socket }}",
"mysql_superuser": "{{ mysql_superuser }}",
"mysql_superuser_password": "{{ mysql_superuser_password }}",
"mysql_wordpress_password": "{{ mysql_wordpress_password }}"
}
replacelines.py script:
import re
with open("wp-config.php", "r") as wpconfig, open("wp-config.php.bak", "w") as wpconfigbak:
for line in wpconfig:
found = re.search(r'AUTH_KEY|SECURE_AUTH_KEY|LOGGED_IN_KEY|NONCE_KEY|AUTH_SALT|SECURE_AUTH_SALT|LOGGED_IN_SALT|NONCE_SALT', line.strip());
if (not found):
wpconfigbak.write(line)
else:
continue
inventory file:
[local]
localhost ansible_connection=local
With this playbook I am able to see the wordpress landing page when I open 'localhost:80/' on my Linux machine. However I am unable to get to the wordpress dashboard. I run the playbook like so: ansible-playbook -i inventory SetupWordpress.yaml
To save time, you may use my github repo:
git clone -b WIP git#github.com:redbilledpanda/DevOpsScripts.git
cd DevOpsScripts && ansible-playbook -i inventory SetupWordpress.yaml
After the playbook completes, I go to http://localhost:80 and I am presented with the installer:
I fill in the details:
Apparently, it succeeds:
When I try logging in, I don't see the dashboard. Instead, I never go past the login screen (it doesn't say incorrect credentials or anything though):
I am at a loss as to what am I doing wrong. Keen to hear from you folks.
UPDATE1: If I skip the part where I generate the wordpress 'salts'/keys it works. I can see the dashboard etc. With these salts however, it just won't get to the wordpress admin dashboard.

Using a minimal sample config file wpconfig.file
<?php
/**
* The base configuration for WordPress
* ...
* Authentication unique keys and salts.
*
* Change these to different unique phrases! You can generate these using
* the {#link https://api.wordpress.org/secret-key/1.1/salt/ WordPress.org secret-key service}.
*
* You can change these at any point in time to invalidate all existing cookies.
* This will force all users to have to log in again.
* ...
*/
and a minimal example playbook
---
- hosts: localhost
become: false
gather_facts: false
tasks:
- name: Get random secret keys
uri:
url: https://api.wordpress.org/secret-key/1.1/salt/
return_content: yes
body_format: json
register: wordpress_keys
- name: Show keys
debug:
var: wordpress_keys.content
- name: Write keys to config
blockinfile:
path: wpconfig.file
marker: // {mark} ANSIBLE MANAGED BLOCK
block:
"{{ wordpress_keys.content }}"
it results into the expected and probably correct output.
TASK [Show keys] ************************************************************************************************
ok: [localhost] =>
wordpress_keys.content: |-
define('AUTH_KEY', '...');
define('SECURE_AUTH_KEY', '...');
define('LOGGED_IN_KEY', '...');
define('NONCE_KEY', '...');
define('AUTH_SALT', '...');
define('SECURE_AUTH_SALT', '...');
define('LOGGED_IN_SALT', '...');
define('NONCE_SALT', '...');
<?php
/**
* The base configuration for WordPress
* ...
* Authentication unique keys and salts.
*
* Change these to different unique phrases! You can generate these using
* the {#link https://api.wordpress.org/secret-key/1.1/salt/ WordPress.org secret-key service}.
*
* You can change these at any point in time to invalidate all existing cookies.
* This will force all users to have to log in again.
* ...
*/
// BEGIN ANSIBLE MANAGED BLOCK
define('AUTH_KEY', '...');
define('SECURE_AUTH_KEY', '...');
define('LOGGED_IN_KEY', '...');
define('NONCE_KEY', '...');
define('AUTH_SALT', '...');
define('SECURE_AUTH_SALT', '...');
define('LOGGED_IN_SALT', '...');
define('NONCE_SALT', '...');
// END ANSIBLE MANAGED BLOCK
Summary
Your current question and description seems not to be focused on the necessary part but on everything not so related around
On Ansible tasks I am not able to (re-)produce an issue
The part deals with configuration for a 3rd party web service or PHP only
According this it seems not to be related to Ansible at all
The problem domain seems to be Wordpress and PHP setup and configuration only, namely the config file
For further troubleshooting you may try to template module – Template a file out to a target host, the config file including keys generated define('AUTH_KEY', '{{ lookup('password', '/dev/null chars=ascii_letters length=64') }}');
Check with Browser in Incognito Mode because of invalidated cookies
Therefore it is also not about programming at all
An other site on Stack like serverfault.com, superuser.com, devops.staexchange.com or wordpress.stackexchange.com might fit better for your question

-regenerate the security keys
-Make sure the keys are entered correctly in the wp-config file of your WordPress installation.

Related

Checking the key value in a JSON file

I'm having trouble verifying the value of a json file on a remote server. I have to overwrite the file once on the remote machine from the template (j2). After that, I start a service that writes additional values to this file.
But when restarting ansible-playbook, this file is overwritten because it differs from the template. Before starting the task of writing a file from a template, I want to check the file for unique values.
For testing on a local machine, I do this and everything works:
- name: Check file
hosts: localhost
vars:
config: "{{ lookup('file','config.json') | from_json }}"
tasks:
- name: Check info
set_fact:
info: "{{ config.Settings.TimeStartUP }}"
- name: Print info
debug:
var: info
- name: Create directory
when: interfaces | length != 0
ansible.builtin.file:
...
But when I try to do the same in a task on a remote machine, for some reason ansible is looking for a file on the local machine
all.yml
---
config_file: "{{ lookup('file','/opt/my_project/config.json') | from_json }}"
site.yml
---
- name: Install My_project
hosts: server
tasks:
- name: Checking if a value exists
set_fact:
info: "{{ config_file.Settings.TimeStartUP }}"
- name: Print info
debug:
var: info
Error:
fatal: [server]: FAILED! => {"msg": "An unhandled exception occurred while templating '{{ lookup('file','/opt/my_project/config.json') | from_json }}'. Error was a <class 'ansible.errors.AnsibleError'>, original message: An unhandled exception occurred while running the lookup plugin 'file'. Error was a <class 'ansible.errors.AnsibleError'>, original message: could not locate file in lookup: /opt/my_project/config.json. could not locate file in lookup: /opt/my_project/config.json"}
Please tell me how to correctly check the key value in a JSON file on a remote server?
fetch the files from the remote hosts first. For example, given the files below for testing
shell> ssh admin#test_11 cat /tmp/config.json
{"Settings": {"TimeStartUP": "today"}}
shell> ssh admin#test_12 cat /tmp/config.json
{"Settings": {"TimeStartUP": "yesterday"}}
The playbook below
- hosts: test_11,test_12
gather_facts: false
tasks:
- file:
state: directory
path: "{{ playbook_dir }}/configs"
delegate_to: localhost
run_once: true
- fetch:
src: /tmp/config.json
dest: "{{ playbook_dir }}/configs"
- include_vars:
file: "{{ config_path }}"
name: config
vars:
config_path: "{{ playbook_dir }}/configs/{{ inventory_hostname }}/tmp/config.json"
- debug:
var: config.Settings.TimeStartUP
will create the directory configs in playbook_dir on the controller and will fetch the files from the remote hosts into this directory. See the parameter dest on how the path will be created
shell> cat configs/test_11/tmp/config.json
{"Settings": {"TimeStartUP": "today"}}
shell> cat configs/test_12/tmp/config.json
{"Settings": {"TimeStartUP": "yesterday"}}
Then include_vars and store the dictionary into the variable config
ok: [test_11] =>
config.Settings.TimeStartUP: today
ok: [test_12] =>
config.Settings.TimeStartUP: yesterday

Airflow 1.10.0 via Ansible

Below is my Ansible code which is trying to install Airflow 1.10.0.
sudo journalctl -u airflow-webserver -e output is
Dec 31 12:13:48 ip-10-136-94-232.eu-central-1.compute.internal airflow[22224]: ProgrammingError: (_mysql_exceptions.ProgrammingError) (1146, "Table 'airflow.log' doesn't exist") [SQL: u'INSERT INTO log (dttm, dag_id,
sudo journalctl -u airflow-scheduler -e output is
Dec 31 12:14:19 ip-10-136-94-232.eu-central-1.compute.internal airflow[22307]: ProgrammingError: (_mysql_exceptions.ProgrammingError) (1146, "Table 'airflow.log' doesn't exist") [SQL: u'INSERT INTO log (dttm, dag_id,
install.yml
---
- name: Airflow | Install | Basic Packages
yum:
name: "{{ packages }}"
vars:
packages:
- gcc
- gcc-c++
- zlib-devel
- bzip2-devel
- openssl-devel
- ncurses-devel
- sqlite-devel
- cyrus-sasl-devel
- postgresql
- postgresql-server
- mariadb-server
- mariadb
- python2-pip
- python2-devel
- mysql-devel
- python-setuptools
- java-1.8.0-openjdk.x86_64
- MySQL-python
- mysql-connector-python
register: airflow_dbsetup
notify:
- restart postgresql
- restart rabbitmq-server
- restart mariadb
- name: Airflow | Install | Upgrade pip
shell: "pip install --upgrade pip"
- name: Airflow | Install | Upgrade setuptools
shell: "pip install --upgrade setuptools"
- name: Airflow | Inatall | Start mariadb
systemd: state=started name=mariadb daemon_reload=yes
sudo: yes
- name: Airflow | Install | Group dev
yum:
name: "#Development"
state: latest
- name: Airflow | Install | Numpy
pip:
name: numpy
version: latest
sudo: yes
- name: Airflow | Install | cython
pip:
name: cython
version: latest
sudo: yes
- name: Airflow | Install | With pip
pip:
name: apache-airflow
version: 1.10.0
- name: Airflow | Install | crypto
pip:
name: apache-airflow[crypto]
version: 1.10.0
register: airflow_install
- name: Airflow | Install | hive
pip:
name: apache-airflow[hive]
version: 1.10.0
register: airflow_install
- name: Airflow | Inatall | MySQL
pip:
name: apache-airflow[mysql]
version: 1.10.0
register: airflow_install
- name: Airflow | Install | jdbc
pip:
name: apache-airflow[jdbc]
version: 1.10.0
register: airflow_install
- name: Airflow | Install | password
pip:
name: apache-airflow[password]
version: 1.10.0
register: airflow_install
- name: Airflow | Install | s3
pip:
name: apache-airflow[s3]
version: 1.10.0
register: airflow_install
- name: Airflow | Install | slack
pip:
name: apache-airflow[slack]
version: 1.10.0
register: airflow_install
- name: Airflow | Install | ssh
pip:
name: apache-airflow[ssh]
version: 1.10.0
register: airflow_install
- name: Airflow | Install | Reinstall pip
shell: "pip install --upgrade --force-reinstall pip==9.0.0"
- name: Airflow | Install | devel
pip:
name: apache-airflow[devel]
version: 1.10.0
register: airflow_install
- name: Airflow | Inatall | MSSql
pip:
name: apache-airflow[mssql]
version: 1.10.0
register: airflow_install
- name: Airflow | Install | Celery
pip:
name: celery
- name: Airflow | Install | psycopg2
pip:
name: psycopg2
- name: Airflow | Inatall | psycopg2-binary
pip:
name: psycopg2-binary
- name: Airflow | Install | erlang
yum:
name: https://github.com/rabbitmq/erlang-rpm/releases/download/v20.1.7/erlang-20.1.7-1.el6.x86_64.rpm
state: present
- name: Airflow | Install | socat
yum:
name: socat
state: present
- name: Airflow | Install | Rabbitmq
yum:
name: https://dl.bintray.com/rabbitmq/all/rabbitmq-server/3.7.8/rabbitmq-server-3.7.8-1.el7.noarch.rpm
state: present
database.yml
---
- name: Airflow | DB | Uninstall markupsafe
pip:
name: markupsafe
state: absent
- name: Airflow | DB | Install markupsafe
pip:
name: markupsafe
version: latest
- name: Airflow | DB | Set PostgreSQL environment variables
template:
src: postgres.sh.j2
dest: /etc/profile.d/postgres.sh
mode: 0644
notify: restart postgresql
- name: Airflow | DB | Ensure PostgreSQL data directory exists
file:
path: "{{ postgresql_data_dir }}"
owner: "{{ postgresql_user }}"
group: "{{ postgresql_group }}"
state: directory
mode: 0700
become: yes
become_method: sudo
become_user: root
register: airflow_dbsetup
notify:
- restart postgresql
- name: Airflow | DB | Check if PostgreSQL database is initialized
stat:
path: "{{ postgresql_data_dir }}/PG_VERSION"
register: file_exists
- name: Airflow | DB | Initialize PostgreSQL Database
command: "{{ airflow_executable_pgsql }} initdb"
when: not file_exists.stat.exists
become: yes
become_method: sudo
become_user: root
register: airflow_dbsetup
notify:
- restart postgresql
- name: Airflow | DB | Copy Postgresql hba file
copy:
src: ../templates/pg_hba.conf.j2
dest: "{{ postgresql_data_dir }}/pg_hba.conf"
owner: "{{ postgresql_user }}"
group: "{{ postgresql_group }}"
mode: 0600
become: yes
become_method: sudo
become_user: root
register: airflow_dbsetup
notify:
- restart postgresql
- name: Airflow | DB | Copy Postgresql config file
copy:
src: ../templates/postgresql.conf.j2
dest: "{{ postgresql_data_dir }}/postgresql.conf.j2"
owner: "{{ postgresql_user }}"
group: "{{ postgresql_group }}"
mode: 0600
become: yes
become_method: sudo
become_user: root
register: airflow_dbsetup
notify:
- restart postgresql
- name: Airflow | DB | Restart PostgreSQL
shell: "systemctl restart postgresql"
become: yes
become_method: sudo
become_user: root
- name: Airflow | DB | Postgresql Create DB
postgresql_db:
name: airflow
- name: Airflow | DB | Postgresql User
postgresql_user:
db: airflow
name: airflow
password: airflow
priv: "ALL"
expires: infinity
become: yes
become_method: sudo
become_user: root
register: airflow_dbsetup
notify:
- restart postgresql
- name: Airflow | DB | Postgresql Privileges
postgresql_privs:
db: airflow
objs: ALL_DEFAULT
privs: ALL
type: default_privs
role: airflow
grant_option: yes
- name: Airflow | DB | Restart RabbitMQ-Server
shell: "systemctl restart rabbitmq-server"
become: yes
become_method: sudo
become_user: root
- name: Airflow | DB | RabbitMQ Add v_host
rabbitmq_vhost:
name: af-host
state: present
- name: Airflow | DB | RabbitMQ User
rabbitmq_user:
user: airflow
password: airflow
tags: airflow-user
vhost: af-host
configure_priv: .*
read_priv: .*
write_priv: .*
state: present
force: yes
become: yes
become_method: sudo
become_user: root
register: airflow_dbsetup
notify:
- restart rabbitmq-server
- name: Airflow | DB | Create MySQL DB
mysql_db:
name: airflow
state: present
- name: Airflow | DB | MySQL user
mysql_user:
name: airflow
password: airflow
priv: '*.*:ALL,GRANT'
state: present
#- name: CREATE USER
# shell: "sudo -i -u postgres psql -c "CREATE USER airflow WITH PASSWORD 'airflow';""
#- name: CREATE DATABASE
# shell: "sudo -i -u postgres psql -c "CREATE DATABASE airflow;""
#- name: GRANT PRIVILEGES ON DATABASE
# shell: "sudo -i -u postgres psql -c "GRANT ALL PRIVILEGES ON DATABASE airflow TO airflow;""
#- name: GRANT PRIVILEGES ON TABLES
# shell: "sudo -i -u postgres psql -c "GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA public TO airflow;""
config.yml
- name: Airflow | Config | Ensure airflow directories structure
file:
path: "{{ item }}"
state: directory
owner: "{{ airflow_user }}"
group: "{{ airflow_group }}"
with_items:
- "{{ airflow_logs_folder }}"
- "{{ airflow_child_process_log_folder }}"
- "{{ airflow_dags_folder }}"
- "{{ airflow_plugins_folder }}"
- name: Airflow | Config | Copy gunicorn logrotate config
template:
src: gunicorn-logrotate.j2
dest: /etc/logrotate.d/airflow
owner: "{{ airflow_user }}"
group: "{{ airflow_group }}"
mode: 0644
become: yes
become_method: sudo
become_user: root
- name: Airflow | Config | Copy sample dag hello_world
copy:
src: "{{ airflow_home }}/cng-ansible/roles/airflow/files/cng-hello_world.py"
dest: "{{ airflow_dags_folder }}/cng-hello_world.py"
owner: "{{ airflow_user }}"
group: "{{ airflow_group }}"
mode: 0644
remote_src: True
- name: Airflow | Config | Synchronization of DAGs
synchronize:
src: "{{ airflow_home }}/cng-ansible/roles/airflow/files/"
dest: "{{ airflow_dags_folder }}"
- name: Airflow | Config | Install airflow environmet file
template:
src: airflow-environment-file.j2
dest: "{{ airflow_environment_file_folder }}/airflow"
owner: "{{ airflow_user }}"
group: "{{ airflow_group }}"
mode: 0640
- name: Airflow | Config | Copy basic airflow config file
template:
src: airflow.cfg.j2
dest: "{{ airflow_home }}/airflow/airflow.cfg"
owner: "{{ airflow_user }}"
group: "{{ airflow_group }}"
mode: 0640
register: airflow_config
notify:
- restart airflow-webserver
- restart airflow-scheduler
- restart airflow-worker
- name: Airflow | Config | Initialize Airflow Database
shell: "{{ airflow_executable }} initdb"
args:
chdir: "{{ airflow_home }}"
executable: /bin/bash
become: yes
become_method: sudo
become_user: "{{ airflow_user }}"
- name: Airflow | Config | Install webserver systemd unit file
template:
src: airflow-webserver.service.j2
dest: /usr/lib/systemd/system/airflow-webserver.service
owner: "{{ airflow_user }}"
group: "{{ airflow_group }}"
mode: 0640
register: airflow_config
notify:
- restart airflow-webserver
- restart airflow-scheduler
- restart airflow-worker
- name: Airflow | Config | Install scheduler systemd unit file
template:
src: airflow-scheduler.service.j2
dest: /usr/lib/systemd/system/airflow-scheduler.service
owner: "{{ airflow_user }}"
group: "{{ airflow_group }}"
mode: 0640
register: airflow_config
notify:
- restart airflow-webserver
- restart airflow-scheduler
- restart airflow-worker
- name: Airflow | Config | Install worker systemd unit file
template:
src: airflow-worker.service.j2
dest: /usr/lib/systemd/system/airflow-worker.service
owner: "{{ airflow_user }}"
group: "{{ airflow_group }}"
mode: 0640
register: airflow_config
notify:
- restart airflow-webserver
- restart airflow-scheduler
- restart airflow-worker
- name: Airflow | Config | Copy extra airflow config files (provided by playbooks)
copy:
src: "{{ item }}"
dest: "{{ airflow_home }}/{{ item | basename }}"
owner: "{{ airflow_user }}"
group: "{{ airflow_group }}"
mode: 0640
with_fileglob:
- "{{ airflow_extra_conf_path }}/*"
notify:
- restart airflow-webserver
- restart airflow-scheduler
- restart airflow-worker
- name: Airflow | Config | Copy extra airflow config templates (provided by playbooks)
template:
src: "{{ item }}"
dest: "{{ airflow_home }}/{{ item | basename }}"
owner: "{{ airflow_user }}"
group: "{{ airflow_group }}"
mode: 0640
with_fileglob:
- "{{ airflow_extra_conf_template_path }}/*"
notify:
- restart airflow-webserver
- restart airflow-scheduler
- restart airflow-worker
- name: Airflow | Config | Add variables from configuration file
command: "{{ airflow_executable }} variables -s {{ item.key }} {{ item.value }}"
environment:
AIRFLOW_HOME: "{{ airflow_home }}"
become: true
become_user: "{{ airflow_user }}"
with_items: "{{ airflow_admin_variables }}"
tags:
skip_ansible_lint
- name: Airflow | Config | Add connections from configuration file
command: "{{ airflow_executable }} connections -a {% for key, value in item.iteritems() %}--{{ key }} '{{ value }}' {% endfor %}"
environment:
AIRFLOW_HOME: "{{ airflow_home }}"
become: true
become_user: "{{ airflow_user }}"
with_items: "{{ airflow_admin_connections }}"
tags:
skip_ansible_lint
service.yml
---
- name: Airflow | Services |Configuring service
systemd:
name: "{{ item.key }}"
state: "{{ item.value.state }}"
enabled: "{{ item.value.enabled }}"
daemon_reload: yes
become: yes
become_method: sudo
become_user: root
with_dict: "{{ airflow_services }}"
when: "{{ item.value.enabled }}"
changed_when: false
health.yml
---
- name: Airflow | Health | DB Bug fix
shell: "mysql -u root -e 'alter table airflow.task_instance add column executor_config varchar(15) after task_id;'"
- name: Airflow | Health | Status
wait_for:
host: localhost
port: "{{ item }}"
state: started # Port should be open
delay: 15 # No wait before first check (sec)
timeout: 3 # Stop checking after timeout (sec)
ignore_errors: yes
with_items:
- 8080
Error Log while installing this in AWS RHEL server
TASK [../../roles/airflow : Airflow | Health | DB Bug fix] ********************************************************************************************************************
fatal: [127.0.0.1]: FAILED! => {"changed": true, "cmd": "mysql -u root -e 'alter table airflow.task_instance add column executor_config varchar(15) after task_id;'", "delta": "0:00:00.192266", "end": "2018-12-31 10:35:22.455342", "msg": "non-zero return code", "rc": 1, "start": "2018-12-31 10:35:22.263076", "stderr": "ERROR 1146 (42S02) at line 1: Table 'airflow.task_instance' doesn't exist", "stderr_lines": ["ERROR 1146 (42S02) at line 1: Table 'airflow.task_instance' doesn't exist"], "stdout": "", "stdout_lines": []}
I was following below link to proceed with the installation to upgrade from 1.8 to 1.10.0 :-
https://airflow.apache.org/installation.html
Error after suggestions:-
TASK [../../roles/airflow : Airflow | Config | Initialize Airflow Database] ***********************************************************************************************************************
fatal: [127.0.0.1]: FAILED! => {"changed": true, "cmd": "/usr/bin/airflow initdb", "delta": "0:00:00.202622", "end": "2018-12-31 16:15:59.082736", "msg": "non-zero return code", "rc": 1, "start": "2018-12-31 16:15:58.880114", "stderr": "Traceback (most recent call last):\n File \"/usr/bin/airflow\", line 21, in <module>\n from airflow import configuration\n File \"/usr/lib/python2.7/site-packages/airflow/__init__.py\", line 35, in <module>\n from airflow import configuration as conf\n File \"/usr/lib/python2.7/site-packages/airflow/configuration.py\", line 506, in <module>\n conf.read(AIRFLOW_CONFIG)\n File \"/usr/lib/python2.7/site-packages/airflow/configuration.py\", line 280, in read\n super(AirflowConfigParser, self).read(filenames)\n File \"/usr/lib/python2.7/site-packages/backports/configparser/__init__.py\", line 705, in read\n self._read(fp, filename)\n File \"/usr/lib/python2.7/site-packages/backports/configparser/__init__.py\", line 1087, in _read\n lineno)\nbackports.configparser.DuplicateSectionError: While reading from '/home/ec2-user/airflow/airflow.cfg' [line 60]: section u'core' already exists", "stderr_lines": ["Traceback (most recent call last):", " File \"/usr/bin/airflow\", line 21, in <module>", " from airflow import configuration", " File \"/usr/lib/python2.7/site-packages/airflow/__init__.py\", line 35, in <module>", " from airflow import configuration as conf", " File \"/usr/lib/python2.7/site-packages/airflow/configuration.py\", line 506, in <module>", " conf.read(AIRFLOW_CONFIG)", " File \"/usr/lib/python2.7/site-packages/airflow/configuration.py\", line 280, in read", " super(AirflowConfigParser, self).read(filenames)", " File \"/usr/lib/python2.7/site-packages/backports/configparser/__init__.py\", line 705, in read", " self._read(fp, filename)", " File \"/usr/lib/python2.7/site-packages/backports/configparser/__init__.py\", line 1087, in _read", " lineno)", "backports.configparser.DuplicateSectionError: While reading from '/home/ec2-user/airflow/airflow.cfg' [line 60]: section u'core' already exists"], "stdout": "", "stdout_lines": []}
New error log after implementing #kaxil suggestion:-
sqlalchemy.exc.InvalidRequestError: This Session's transaction has been rolled back due to a previous exception during flush. To begin a new transaction with this Session, first issue Session.rollback(). Original exception was: (_mysql_exceptions.ProgrammingError) (1146, "Table 'airflow.log' doesn't exist") [SQL: u'INSERT INTO log (dttm, dag_id, task_id, event, execution_date, owner, extra) VALUES (%s, %s, %s, %s, %s, %s, %s)'] [parameters: (datetime.datetime(2019, 1, 2, 10, 49, 11, 49590, tzinfo=<Timezone [UTC]>), None, None, 'cli_webserver', None, 'ec2-user', '{"full_command": "[\'/usr/bin/airflow\', \'webserver\']", "host_name": "ip-10-136-94-144.eu-central-1.compute.internal"}')]
In your config.yml file, can you reorder the below 2 tasks (Airflow | Config | Initialize Airflow Database & Airflow | Config | Copy basic airflow config file) to
Airflow | Config | Copy basic airflow config file
Airflow | Config | Initialize Airflow Database
Basically, your airflow.cfg.j2 file should contain metadata database connection string like this sql_alchemy_conn = my_conn_string in [core] section as mentioned in https://airflow.apache.org/howto/set-config.html#setting-configuration-options (Double check it)
Once your config file is copied and initdb is run, it creates all the necessary tables needed by airflow.
- name: Airflow | Config | Initialize Airflow Database
shell: "{{ airflow_executable }} initdb"
args:
chdir: "{{ airflow_home }}"
executable: /bin/bash
become: yes
become_method: sudo
become_user: "{{ airflow_user }}"
- name: Airflow | Config | Copy basic airflow config file
template:
src: airflow.cfg.j2
dest: "{{ airflow_home }}/airflow/airflow.cfg"
owner: "{{ airflow_user }}"
group: "{{ airflow_group }}"
mode: 0640
register: airflow_config
notify:
- restart airflow-webserver
- restart airflow-scheduler
- restart airflow-worker

Install mysql-server 5.7 on lv mounted on /var/lib/mysql

Since 2 weeks, I'm working on MySQL deployment and stuff with Ansible. I have to install MySQL on a LV.
Before MySQL deployment, Ansible script creates /var/lib/mysql, LV and mount it on /var/lib/mysql. Then, it create MySQL user and MySQL group to set 0700 right on MySQL directory. When it done, Ansible deploy MySQL 5.7.
Part of my Ansible code :
- name: "Group : mysql"
group:
name: "mysql"
state: "present"
tags:
- User mysql
- name: "user : mysql"
user:
name: "mysql"
shell: "mysql"
group: "mysql"
createhome: "no"
append: "True"
state: "present"
tags:
- User
- name: "Set rights on mysql dir "
file:
path: "/var/lib/mysql"
owner: "mysql"
group: "mysql"
mode: 0700
tags:
- mysql dir rights
- name: "mysql root password"
debconf:
name: "mysql-server"
question: "mysql-server/root_password"
value: "{{ password_root_mysql }}"
vtype: "password"
when: password_root_mysql is defined
tags:
- Install
- name: "mysql root password confirmation"
debconf:
name: "mysql-server"
question: "mysql-server/root_password_again"
value: "{{ password_root_mysql }}"
vtype: "password"
when: password_root_mysql is defined
tags:
- Install mysql
- name: "Install : MySQL Server"
apt:
update_cache: "True"
name: "mysql-server"
install_recommends: "True"
tags:
- Install mysql
notify:
- stop mysql
- name: "Copie du template root.cnf.j2 vers root/.my.cnf "
template:
src: "{{ mysql_template_rootcnf }}"
dest: "~/.my.cnf"
owner: "root"
mode: "0600"
tags:
- Install mysql
So when I try to install mysql-server without any LV and directory settings, it works. But when I prepare directory MySQL with good rights, installation doesn't work, whether manual or automatic deployment.
Any ideas ?
Ubuntu 16.04 with MYSQL 5.7.
Ansible v2.7
Ok, I've found the problem, Lost+Found directory in /var/lib/mysql (lv mounted on it) is considerated like a database, mysql doesn't like it. In my code, ive just added :
- name: "Remove lost+found from {{ mysql_dir }}"
file:
path: "{{ mysql_dir }}/lost+found"
state: absent

Ansible similar role refactoring

Installing munin plugins very similar: create some symlinks + template config file.
For example:
Role munin_plugin_nginx:
---
- name: create symlink for plugin
file:
src="/usr/share/munin/plugins/{{ item }}"
dest="/etc/munin/plugins/{{ item }}"
state=link
with_items:
- "nginx_request"
- "nginx_status"
- name: template /etc/munin/plugin-conf.d/nginx
template:
src: etc/munin/plugin-conf.d/nginx.j2
dest: /etc/munin/plugin-conf.d/nginx
owner: root
group: root
mode: 0644
notify: restart munin-node
Role munin_plugin_httpd:
---
- name: create symlink for plugin
file:
src="/usr/share/munin/plugins/{{ item }}"
dest="/etc/munin/plugins/{{ item }}"
state=link
with_items:
- "apache_accesses"
- "apache_processes"
- "apache_volume"
- name: template /etc/munin/plugin-conf.d/httpd
template:
src: etc/munin/plugin-conf.d/httpd.j2
dest: /etc/munin/plugin-conf.d/httpd
owner: root
group: root
mode: 0644
notify: restart munin-node
Other munin_plugins have similar steps too.
How can i refactor this roles to avoid 'copy-pasted' code?
One of possible ways:
Add /roles/munin_plugin/vars/main.yml:
---
munin_plugins_list:
nginx:
symlinks:
- nginx_request
- nginx_status
httpd:
symlinks:
- apache_accesses
- apache_processes
- apache_volume
And /roles/munin_plugin/tasks/main.yml:
---
- name: check server type
fail:
msg: "Unknown server type \"{{ server_type }}\" – should be one of {{ munin_plugins_list.keys() }}"
when: munin_plugins_list[server_type] is not defined
- name: create symlinks for plugin
file:
src: "/usr/share/munin/plugins/{{ item }}"
dest: "/etc/munin/plugins/{{ item }}"
state: link
with_items: "{{ munin_plugins_list[server_type]['symlinks'] }}"
- name: template config file
template:
src: "etc/munin/plugin-conf.d/{{ server_type }}.j2"
dest: "/etc/munin/plugin-conf.d/{{ server_type }}"
owner: root
group: root
mode: 0644
notify: restart munin-node
So you can apply role like this:
roles:
- role: munin_plugin
server_type: nginx
- role: munin_plugin
server_type: httpd

Can't access register variable in a loop

I've been following this example playbook to create rackspace servers using Ansible
http://nicholaskuechler.com/2015/01/09/build-rackspace-cloud-servers-ansible-virtualenv/
Which works great, but only works on one server at a time, so I am trying to make it more dynamic, using with_items to loop through the number of servers I want to build
tasks:
- name: Rackspace cloud server build request
local_action:
module: rax
credentials: "{{ credentials }}"
name: "{{ item }}"
flavor: "{{ flavor }}"
image: "{{ image }}"
region: "{{ region }}"
files: "{{ files }}"
wait: yes
state: present
networks:
- private
- public
with_items:
- server-app-01
- server-app-02
register: rax
This creates the servers fine, but when I try and add this to the deploy group using the method in the link, I get an error, as expected as now there is a 'results' key I"ve tried all kinds of ways to try and target this in the way that I perceive the documentation to allude to:
- name: Add new cloud server to host group
local_action:
module: add_host
hostname: "{{ item.success.name }}"
ansible_ssh_host: "{{ item.success.rax_accessipv4 }}"
ansible_ssh_user: root
groupname: deploy
with_items: rax.results
(I’ve also tried many other kinds of ways to target this)
But I get ‘One or more undefined variables: ‘list object’ has no attribute ‘rax_accessipv4”
This is a stripped down version of the object I get back from rax, through debug. These servers don't exist anymore.
http://pastebin.com/NRvM7anS
Can anyone tell me where I'm going wrong I'm starting to go a bit mad
If you notice the type of rax.results.success is list.
So this: hostname: "{{ item.success.name }}"
should be
hostname: "{{ item.success[0].name }}" or
hostname: "{{ item['success'][0]['name'] }}"
.
{
"changed": true,
"msg": "All items completed",
"results": [
{
"instances": [
{
"name": "server-app-01",
"rax_accessipv4": "134.213.51.171",
"rax_accessipv6": "2a00:1a48:7808:101:be76:4eff:fe08:5251",
}
],
"item": "server-app-01",
"success": [
{
"name": "server-app-01",
"rax_accessipv4": "134.213.51.171",
"rax_accessipv6": "2a00:1a48:7808:101:be76:4eff:fe08:5251",
}
],
"timeout": []
},
......
}
I was just wrestling with this Friday. Here is my solution:
---
- name: Provision rackspace webheads
hosts: localhost
gather_facts: false
max_fail_percentage: 10
tasks:
- name: Provision a set of instances
local_action:
group: servers
count: 5
exact_count: yes
credentials: cred.ini
flavor: <FLAVOR ID>
group: raxhosts
image: <IMAGE ID>
key_name: <SSH KEYNAME>
module: rax
name: webheads
state: present
wait: yes
register: rax
- name: Add new instances to the group 'raxhosts'
local_action:
module: add_host
hostname: "{{ item.name }}"
ansible_ssh_host: "{{ item.rax_accessipv4 }}"
ansible_ssh_pass: "{{ item.rax_adminpass }}"
groupname: raxhosts
with_items: rax.success
when: rax.action == 'create'
- name: Wait for hosts
local_action: wait_for host={{ item.rax_accessipv4 }} port=22 delay=60 timeout=600 state=started
with_items: rax.success
Here is what my cred.ini looks like:
[rackspace_cloud]
username =
api_key =
Run it like so:
RAX_CREDS_FILE=cred.ini RAX_REGION=DFW ansible-playbook <playbook>.yml