summaryrefslogtreecommitdiffstats
path: root/deploy
diff options
context:
space:
mode:
Diffstat (limited to 'deploy')
-rwxr-xr-x[-rw-r--r--]deploy/adapters/ansible/kubernetes/ansible-kubernetes.yml6
-rw-r--r--deploy/adapters/ansible/kubernetes/roles/post-k8s/tasks/main.yml16
-rw-r--r--deploy/adapters/ansible/roles/config-osa/files/chrony.conf.j2104
-rw-r--r--deploy/adapters/ansible/roles/config-osa/tasks/fix_pip_version.yml25
-rw-r--r--deploy/adapters/ansible/roles/config-osa/tasks/fix_rescue.yml10
-rwxr-xr-xdeploy/adapters/ansible/roles/config-osa/tasks/main.yml31
-rw-r--r--deploy/adapters/ansible/roles/config-osa/templates/openstack_user_config.yml.j28
-rw-r--r--deploy/adapters/ansible/roles/config-osa/templates/user_variables.yml.j22
-rw-r--r--deploy/adapters/ansible/roles/setup-openstack/tasks/main.yml2
-rw-r--r--deploy/client.py17
-rwxr-xr-xdeploy/compass_vm.sh34
-rw-r--r--deploy/conf/base.conf1
-rw-r--r--deploy/conf/compass.conf36
-rw-r--r--deploy/config_parse.py1
-rwxr-xr-xdeploy/launch.sh5
-rw-r--r--deploy/playbook_done.py7
-rw-r--r--deploy/status_callback.py78
17 files changed, 274 insertions, 109 deletions
diff --git a/deploy/adapters/ansible/kubernetes/ansible-kubernetes.yml b/deploy/adapters/ansible/kubernetes/ansible-kubernetes.yml
index 9f20cdbc..eb80066e 100644..100755
--- a/deploy/adapters/ansible/kubernetes/ansible-kubernetes.yml
+++ b/deploy/adapters/ansible/kubernetes/ansible-kubernetes.yml
@@ -30,3 +30,9 @@
max_fail_percentage: 0
roles:
- kargo
+
+- hosts: kube_master
+ remote_user: root
+ max_fail_percentage: 0
+ roles:
+ - post-k8s
diff --git a/deploy/adapters/ansible/kubernetes/roles/post-k8s/tasks/main.yml b/deploy/adapters/ansible/kubernetes/roles/post-k8s/tasks/main.yml
new file mode 100644
index 00000000..3feca3e5
--- /dev/null
+++ b/deploy/adapters/ansible/kubernetes/roles/post-k8s/tasks/main.yml
@@ -0,0 +1,16 @@
+##############################################################################
+# Copyright (c) 2018 HUAWEI TECHNOLOGIES CO.,LTD and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+
+- name: fetch admin.conf
+ fetch:
+ src: /etc/kubernetes/admin.conf
+ dest: /opt/admin.conf
+ flat: "yes"
+ when: inventory_hostname == groups['kube_master'][0]
diff --git a/deploy/adapters/ansible/roles/config-osa/files/chrony.conf.j2 b/deploy/adapters/ansible/roles/config-osa/files/chrony.conf.j2
new file mode 100644
index 00000000..1c2443e0
--- /dev/null
+++ b/deploy/adapters/ansible/roles/config-osa/files/chrony.conf.j2
@@ -0,0 +1,104 @@
+# {{ ansible_managed }}
+#
+# This the default chrony.conf file for the Debian chrony package. After
+# editing this file use the command 'invoke-rc.d chrony restart' to make
+# your changes take effect. John Hasler <jhasler@debian.org> 1998-2008
+
+# See www.pool.ntp.org for an explanation of these servers. Please
+# consider joining the project if possible. If you can't or don't want to
+# use these servers I suggest that you try your ISP's nameservers. We mark
+# the servers 'offline' so that chronyd won't try to connect when the link
+# is down. Scripts in /etc/ppp/ip-up.d and /etc/ppp/ip-down.d use chronyc
+# commands to switch it on when a dialup link comes up and off when it goes
+# down. Code in /etc/init.d/chrony attempts to determine whether or not
+# the link is up at boot time and set the online status accordingly. If
+# you have an always-on connection such as cable omit the 'offline'
+# directive and chronyd will default to online.
+#
+# Note that if Chrony tries to go "online" and dns lookup of the servers
+# fails they will be discarded. Thus under some circumstances it is
+# better to use IP numbers than host names.
+
+{% for ntp_server in security_ntp_servers %}
+server {{ ntp_server }} maxpoll 10 minpoll 8
+{% endfor %}
+
+# Look here for the admin password needed for chronyc. The initial
+# password is generated by a random process at install time. You may
+# change it if you wish.
+
+keyfile /etc/chrony/chrony.keys
+
+# Set runtime command key. Note that if you change the key (not the
+# password) to anything other than 1 you will need to edit
+# /etc/ppp/ip-up.d/chrony, /etc/ppp/ip-down.d/chrony, /etc/init.d/chrony
+# and /etc/cron.weekly/chrony as these scripts use it to get the password.
+
+commandkey 1
+
+# I moved the driftfile to /var/lib/chrony to comply with the Debian
+# filesystem standard.
+
+driftfile /var/lib/chrony/chrony.drift
+
+# Comment this line out to turn off logging.
+
+log tracking measurements statistics
+logdir /var/log/chrony
+
+# Stop bad estimates upsetting machine clock.
+
+maxupdateskew 100.0
+
+# Dump measurements when daemon exits.
+
+dumponexit
+
+# Specify directory for dumping measurements.
+
+dumpdir /var/lib/chrony
+
+# Let computer be a server when it is unsynchronised.
+
+local stratum 10
+
+# Allow computers on the unrouted nets to use the server.
+
+{% for subnet in security_allowed_ntp_subnets %}
+allow {{ subnet }}
+{% endfor %}
+
+# This directive forces `chronyd' to send a message to syslog if it
+# makes a system clock adjustment larger than a threshold value in seconds.
+
+logchange 0.5
+
+# This directive defines an email address to which mail should be sent
+# if chronyd applies a correction exceeding a particular threshold to the
+# system clock.
+
+# mailonchange root@localhost 0.5
+
+# This directive tells chrony to regulate the real-time clock and tells it
+# Where to store related data. It may not work on some newer motherboards
+# that use the HPET real-time clock. It requires enhanced real-time
+# support in the kernel. I've commented it out because with certain
+# combinations of motherboard and kernel it is reported to cause lockups.
+
+# rtcfile /var/lib/chrony/chrony.rtc
+
+# If the last line of this file reads 'rtconutc' chrony will assume that
+# the CMOS clock is on UTC (GMT). If it reads '# rtconutc' or is absent
+# chrony will assume local time. The line (if any) was written by the
+# chrony postinst based on what it found in /etc/default/rcS. You may
+# change it if necessary.
+rtconutc
+
+{% if security_ntp_bind_local_interfaces_only | bool %}
+# Listen for NTP requests only on local interfaces.
+port 0
+bindcmdaddress 127.0.0.1
+{% if not security_disable_ipv6 | bool %}
+bindcmdaddress ::1
+{% endif %}
+{% endif %}
diff --git a/deploy/adapters/ansible/roles/config-osa/tasks/fix_pip_version.yml b/deploy/adapters/ansible/roles/config-osa/tasks/fix_pip_version.yml
new file mode 100644
index 00000000..61d263b4
--- /dev/null
+++ b/deploy/adapters/ansible/roles/config-osa/tasks/fix_pip_version.yml
@@ -0,0 +1,25 @@
+# #############################################################################
+# Copyright (c) 2017 HUAWEI TECHNOLOGIES CO.,LTD and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+# #############################################################################
+---
+- name: copy the repo_fix_andas.yml
+ template:
+ src: repo_fix_pandas.yml
+ dest: /etc/ansible/roles/repo_build/tasks/repo_fix_pandas.yml
+
+- name: fix the python-ldap version
+ lineinfile:
+ dest: /etc/ansible/roles/os_keystone/defaults/main.yml
+ regexp: '^ - python-ldap'
+ line: ' - python-ldap==2.5.2'
+
+- name: add pkgconfig in gnocchi requires pip packages
+ lineinfile:
+ dest: /etc/ansible/roles/repo_build/defaults/main.yml
+ insertafter: "repo_pip_packages:"
+ line: ' - pkgconfig'
diff --git a/deploy/adapters/ansible/roles/config-osa/tasks/fix_rescue.yml b/deploy/adapters/ansible/roles/config-osa/tasks/fix_rescue.yml
index eea06b48..ff7d4250 100644
--- a/deploy/adapters/ansible/roles/config-osa/tasks/fix_rescue.yml
+++ b/deploy/adapters/ansible/roles/config-osa/tasks/fix_rescue.yml
@@ -41,3 +41,13 @@
dest: "/opt/openstack-ansible/playbooks/lxc-hosts-setup.yml"
regexp: "max_fail_percentage*"
state: absent
+
+- name: fix rescue problem for setup-openstack
+ blockinfile:
+ dest: "/opt/openstack-ansible/playbooks/setup-openstack.yml"
+ block: |
+ - hosts: localhost
+ user: root
+ tasks:
+ - name: Mark setup-openstack completed
+ shell: echo "Setup openstack completed!"
diff --git a/deploy/adapters/ansible/roles/config-osa/tasks/main.yml b/deploy/adapters/ansible/roles/config-osa/tasks/main.yml
index 49e4e26d..f9eef749 100755
--- a/deploy/adapters/ansible/roles/config-osa/tasks/main.yml
+++ b/deploy/adapters/ansible/roles/config-osa/tasks/main.yml
@@ -7,11 +7,24 @@
# http://www.apache.org/licenses/LICENSE-2.0
# #############################################################################
---
+- name: remove osa log directory if exist
+ file:
+ path: /var/log/osa/
+ state: absent
+
- name: create osa log directory
file:
path: /var/log/osa/
state: directory
+- name: remove osa user secrets if exist
+ shell: cp -rf /opt/openstack-ansible/etc/openstack_deploy/user_secrets.yml /etc/openstack_deploy/
+
+- name: generate the osa password
+ command: python pw-token-gen.py --file /etc/openstack_deploy/user_secrets.yml
+ args:
+ chdir: "/opt/openstack-ansible/scripts"
+
- name: disable kernel update in rt_kvm scenario
lineinfile:
dest: /etc/ansible/roles/openstack_hosts/vars/ubuntu-16.04.yml
@@ -88,6 +101,11 @@
delay: 10
when: hostvars[hostvars[inventory_hostname]['groups']['controller'][0]]['local_mirror'] == 'CentOS'
+- name: copy chrony.conf
+ copy:
+ src: chrony.conf.j2
+ dest: /etc/ansible/roles/ansible-hardening/templates/
+
- name: update the directory of chrony key
lineinfile:
dest: /etc/ansible/roles/ansible-hardening/templates/chrony.conf.j2
@@ -314,11 +332,6 @@
when:
- "{{ hostvars[inventory_hostname]['groups']['controller'] | length < 2 }}"
-- name: copy the repo_fix_andas.yml
- template:
- src: repo_fix_pandas.yml
- dest: /etc/ansible/roles/repo_build/tasks/repo_fix_pandas.yml
-
# - name: change repore build
# lineinfile:
# dest: /etc/ansible/roles/repo_build/tasks/main.yml
@@ -327,10 +340,8 @@
- include: meters.yml
-- name: fix the python-ldap version
- lineinfile:
- dest: /etc/ansible/roles/os_keystone/defaults/main.yml
- regexp: '^ - python-ldap'
- line: ' - python-ldap==2.5.2'
+# upstream has fix this issue so somments it
+# maybe will use in the furture
+- include: fix_pip_version.yml
- include: fix_rescue.yml
diff --git a/deploy/adapters/ansible/roles/config-osa/templates/openstack_user_config.yml.j2 b/deploy/adapters/ansible/roles/config-osa/templates/openstack_user_config.yml.j2
index be119fbe..a4f54b43 100644
--- a/deploy/adapters/ansible/roles/config-osa/templates/openstack_user_config.yml.j2
+++ b/deploy/adapters/ansible/roles/config-osa/templates/openstack_user_config.yml.j2
@@ -123,9 +123,11 @@ haproxy_hosts:
{% endfor %}
# rsyslog server
-#log_hosts:
- # log1:
- # ip: 10.1.0.53
+log_hosts:
+{% for host in groups.controller%}
+ {{host}}:
+ ip: {{ hostvars[host]['ansible_ssh_host'] }}
+{% endfor %}
###
### OpenStack
diff --git a/deploy/adapters/ansible/roles/config-osa/templates/user_variables.yml.j2 b/deploy/adapters/ansible/roles/config-osa/templates/user_variables.yml.j2
index 5fa999a5..130b5ad1 100644
--- a/deploy/adapters/ansible/roles/config-osa/templates/user_variables.yml.j2
+++ b/deploy/adapters/ansible/roles/config-osa/templates/user_variables.yml.j2
@@ -65,4 +65,4 @@ neutron_provider_networks:
security_sshd_permit_root_login: yes
security_ntp_servers:
- - 45.79.111.114
+ - {{ ntp_server }}
diff --git a/deploy/adapters/ansible/roles/setup-openstack/tasks/main.yml b/deploy/adapters/ansible/roles/setup-openstack/tasks/main.yml
index a55b1a01..a6ecb82f 100644
--- a/deploy/adapters/ansible/roles/setup-openstack/tasks/main.yml
+++ b/deploy/adapters/ansible/roles/setup-openstack/tasks/main.yml
@@ -20,7 +20,7 @@
- fail:
msg: "some task failed when setup openstack."
- when: setup_openstack_result.stdout.find('failed=1') != -1
+ when: setup_openstack_result.stdout.find('Mark setup-openstack completed') == -1
- fail:
msg: "some host are unreachable."
diff --git a/deploy/client.py b/deploy/client.py
index 910aa7f8..891e632c 100644
--- a/deploy/client.py
+++ b/deploy/client.py
@@ -399,17 +399,20 @@ class CompassClient(object):
except:
raise RuntimeError('subnet %s format is invalid' % subnet)
- if CONF.expansion == "false":
+ subnet_exist = False
+ for subnet_in_db in subnets_in_db:
+ if subnet == subnet_in_db['subnet']:
+ subnet_mapping[subnet] = subnet_in_db['id']
+ subnet_exist = True
+ break
+
+ if not subnet_exist:
status, resp = self.client.add_subnet(subnet)
LOG.info('add subnet %s status %s response %s',
subnet, status, resp)
if not self.is_ok(status):
raise RuntimeError('failed to add subnet %s' % subnet)
subnet_mapping[resp['subnet']] = resp['id']
- else:
- for subnet_in_db in subnets_in_db:
- if subnet == subnet_in_db['subnet']:
- subnet_mapping[subnet] = subnet_in_db['id']
self.subnet_mapping = subnet_mapping
@@ -475,8 +478,8 @@ class CompassClient(object):
if host['hostname'] in hostnames:
self.host_mapping[host['hostname']] = host['id']
- if CONF.expansion == "false":
- assert(len(self.host_mapping) == len(machines))
+ # if CONF.expansion == "false":
+ # assert(len(self.host_mapping) == len(machines))
def set_cluster_os_config(self, cluster_id):
"""set cluster os config."""
diff --git a/deploy/compass_vm.sh b/deploy/compass_vm.sh
index cc0471ae..cf215f3b 100755
--- a/deploy/compass_vm.sh
+++ b/deploy/compass_vm.sh
@@ -10,6 +10,26 @@
compass_vm_dir=$WORK_DIR/vm/compass
rsa_file=$compass_vm_dir/boot.rsa
ssh_args="-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i $rsa_file"
+
+function check_container_alive() {
+ docker exec -it compass-deck bash -c "exit" 1>/dev/null 2>&1
+ local deck_state=$?
+ docker exec -it compass-tasks bash -c "exit" 1>/dev/null 2>&1
+ local tasks_state=$?
+ docker exec -it compass-cobbler bash -c "exit" 1>/dev/null 2>&1
+ local cobbler_state=$?
+ docker exec -it compass-db bash -c "exit" 1>/dev/null 2>&1
+ local db_state=$?
+ docker exec -it compass-mq bash -c "exit" 1>/dev/null 2>&1
+ local mq_state=$?
+
+ if [ $((deck_state||tasks_state||cobbler_state||db_state||mq-state)) == 0 ]; then
+ echo "true"
+ else
+ echo "false"
+ fi
+}
+
function tear_down_compass() {
sudo virsh destroy compass > /dev/null 2>&1
sudo virsh undefine compass > /dev/null 2>&1
@@ -27,11 +47,11 @@ function install_compass_core() {
}
function set_compass_machine() {
- local config_file=$WORK_DIR/installer/compass-docker-compose/group_vars/all
+ local config_file=$WORK_DIR/installer/docker-compose/group_vars/all
sed -i '/pxe_boot_macs/d' $config_file
echo "pxe_boot_macs: [${machines}]" >> $config_file
- ansible-playbook $WORK_DIR/installer/compass-docker-compose/add_machine.yml
+ ansible-playbook $WORK_DIR/installer/docker-compose/add_machine.yml
}
function install_compass() {
@@ -129,13 +149,9 @@ function wait_ok() {
}
function launch_compass() {
- local group_vars=$WORK_DIR/installer/compass-docker-compose/group_vars/all
+ local group_vars=$WORK_DIR/installer/docker-compose/group_vars/all
sed -i "s#^\(compass_dir:\).*#\1 $COMPASS_DIR#g" $group_vars
- sed -i "s#^\(compass_deck:\).*#\1 $COMPASS_DECK#g" $group_vars
- sed -i "s#^\(compass_tasks:\).*#\1 $COMPASS_TASKS#g" $group_vars
- sed -i "s#^\(compass_cobbler:\).*#\1 $COMPASS_COBBLER#g" $group_vars
- sed -i "s#^\(compass_db:\).*#\1 $COMPASS_DB#g" $group_vars
- sed -i "s#^\(compass_mq:\).*#\1 $COMPASS_MQ#g" $group_vars
+ sed -i "s#^\(compose_images:\).*#\1 $COMPOSE_IMAGES#g" $group_vars
if [[ $OFFLINE_DEPLOY == "Enable" ]]; then
sed -i "s#.*\(compass_repo:\).*#\1 $COMPASS_REPO#g" $group_vars
@@ -150,7 +166,7 @@ function launch_compass() {
sed -i "s#^\(deck_port:\).*#\1 $COMPASS_DECK_PORT#g" $group_vars
sed -i "s#^\(repo_port:\).*#\1 $COMPASS_REPO_PORT#g" $group_vars
- ansible-playbook $WORK_DIR/installer/compass-docker-compose/bring_up_compass.yml
+ ansible-playbook $WORK_DIR/installer/docker-compose/bring_up_compass.yml
}
function recover_compass() {
diff --git a/deploy/conf/base.conf b/deploy/conf/base.conf
index c2bf5291..5395405d 100644
--- a/deploy/conf/base.conf
+++ b/deploy/conf/base.conf
@@ -8,7 +8,6 @@ export EXT_NAT_GW=${EXT_NAT_GW:-192.16.1.1}
export EXT_NAT_IP_START=${EXT_NAT_IP_START:-192.16.1.3}
export EXT_NAT_IP_END=${EXT_NAT_IP_END:-192.16.1.254}
export EXTERNAL_NIC=${EXTERNAL_NIC:-eth0}
-export CLUSTER_NAME="opnfv2"
export DOMAIN="ods.com"
export PARTITIONS="/=30%,/home=5%,/tmp=5%,/var=60%"
export SUBNETS="10.1.0.0/24,172.16.2.0/24,172.16.3.0/24,172.16.4.0/24"
diff --git a/deploy/conf/compass.conf b/deploy/conf/compass.conf
index 78dd141b..9d9145f1 100644
--- a/deploy/conf/compass.conf
+++ b/deploy/conf/compass.conf
@@ -17,38 +17,4 @@ export NTP_SERVER="$COMPASS_SERVER"
export NAMESERVERS=${USER_NAMESERVER:-"$COMPASS_SERVER"}
export COMPASS_REPO_PORT="5151"
export OFFLINE_DEPLOY=${OFFLINE_DEPLOY:-'Disable'}
-
-if [ "$COMPASS_ARCH" = "aarch64" ]; then
- # Docker images for aarch64
-
- # Arm images are temporarily held at dockerhub linaro repo
- COMPASS_DOCKER_REPO=${COMPASS_DOCKER_REPO:-linaro}
-
- export COMPASS_DECK="$COMPASS_DOCKER_REPO/compass-deck"
- export COMPASS_TASKS="$COMPASS_DOCKER_REPO/compass-tasks-osa"
-
- if [[ "x"$KUBERNETES_VERSION != "x" ]]; then
- export COMPASS_TASKS="$COMPASS_DOCKER_REPO/compass-tasks-k8s"
- fi
-
- export COMPASS_DB="$COMPASS_DOCKER_REPO/compass-db"
- export COMPASS_MQ="$COMPASS_DOCKER_REPO/compass-mq"
- export COMPASS_REPO="$COMPASS_DOCKER_REPO/compass-repo-osa-ubuntu:euphrates"
- export COMPASS_COBBLER="$COMPASS_DOCKER_REPO/compass-cobbler"
-
-else
- # Docker images for x86_64
-
- export COMPASS_DECK="compass4nfv/compass-deck"
- export COMPASS_TASKS="yifei0van0xue/compass-tasks-osa"
-
- if [[ "x"$KUBERNETES_VERSION != "x" ]]; then
- export COMPASS_TASKS="compass4nfv/compass-tasks-k8s"
- fi
-
- export COMPASS_DB="compass4nfv/compass-db"
- export COMPASS_MQ="compass4nfv/compass-mq"
- export COMPASS_REPO="compass4nfv/compass-repo-osa-ubuntu:euphrates"
- export COMPASS_COBBLER="yifei0van0xue/compass-cobbler"
-
-fi
+export COMPOSE_IMAGES="[compass-db,compass-mq,compass-deck,compass-tasks,compass-cobbler]"
diff --git a/deploy/config_parse.py b/deploy/config_parse.py
index 3d8dedc5..1575ca37 100644
--- a/deploy/config_parse.py
+++ b/deploy/config_parse.py
@@ -104,6 +104,7 @@ def export_dha_file(dha, dha_file, ofile):
plugin_list.append(plugin_str)
env.update({'plugins': ','.join(plugin_list)})
+ env.update({'CLUSTER_NAME': dha.get('NAME', "opnfv")})
env.update({'TYPE': dha.get('TYPE', "virtual")})
env.update({'FLAVOR': dha.get('FLAVOR', "cluster")})
env.update({'HOSTNAMES': hostnames(dha, ',')})
diff --git a/deploy/launch.sh b/deploy/launch.sh
index 6cbad1fa..98d9e4d6 100755
--- a/deploy/launch.sh
+++ b/deploy/launch.sh
@@ -55,7 +55,8 @@ if [[ "$EXPANSION" == "false" ]]; then
export machines
- if [[ "$DEPLOY_COMPASS" == "true" ]]; then
+ CONTAINER_ALIVE=$(check_container_alive)
+ if [[ "$DEPLOY_COMPASS" == "true" && "$CONTAINER_ALIVE" == "false" ]]; then
if ! prepare_env;then
echo "prepare_env failed"
exit 1
@@ -71,7 +72,7 @@ if [[ "$EXPANSION" == "false" ]]; then
log_error "launch_compass failed"
exit 1
fi
- else
+ elif [[ "$DEPLOY_COMPASS" == "true" && "$CONTAINER_ALIVE" == "true" ]]; then
refresh_compass_core
fi
else
diff --git a/deploy/playbook_done.py b/deploy/playbook_done.py
index 24c8c55b..6b1043d4 100644
--- a/deploy/playbook_done.py
+++ b/deploy/playbook_done.py
@@ -15,16 +15,15 @@
# limitations under the License.
"""Ansible playbook callback after a playbook run has completed."""
-import os
import sys
from ansible.plugins.callback import CallbackBase
-current_dir = os.path.dirname(os.path.realpath(__file__))
-sys.path.append(current_dir + '/..')
+compass_bin = "/opt/compass/bin"
+sys.path.append(compass_bin)
+import switch_virtualenv # noqa: F401
-# import switch_virtualenv # noqa
from compass.apiclient.restful import Client # noqa: E402
from compass.utils import flags # noqa: E402
diff --git a/deploy/status_callback.py b/deploy/status_callback.py
index 4bbbc321..6169b87f 100644
--- a/deploy/status_callback.py
+++ b/deploy/status_callback.py
@@ -8,7 +8,7 @@
##############################################################################
import httplib
-import json
+import simplejson as json
import sys # noqa:F401
from ansible.plugins.callback import CallbackBase
@@ -16,13 +16,13 @@ from ansible.plugins.callback import CallbackBase
COMPASS_HOST = "compass-deck"
-def task_error(display, host, data):
- display.display("task_error: host=%s,data=%s" % (host, data))
-
-# if isinstance(data, dict):
-# invocation = data.pop('invocation', {})
-
- notify_host(display, COMPASS_HOST, host, "failed")
+# def task_error(display, host, data):
+# display.display("task_error: host=%s,data=%s" % (host, data))
+#
+# if isinstance(data, dict):
+# invocation = data.pop('invocation', {})
+#
+# notify_host(display, COMPASS_HOST, host, "failed")
class CallbackModule(CallbackBase):
@@ -101,39 +101,47 @@ class CallbackModule(CallbackBase):
def v2_playbook_on_stats(self, stats):
self._display.display("playbook_on_stats enter")
+ all_vars = self.play.get_variable_manager().get_vars(self.loader)
+ host_vars = all_vars["hostvars"]
hosts = sorted(stats.processed.keys())
- failures = False
- unreachable = False
-
- for host in hosts:
- summary = stats.summarize(host)
- # self._display.display("host: %s \nsummary: %s\n" % (host, summary)) # noqa
-
- if summary['failures'] > 0:
- failures = True
- if summary['unreachable'] > 0:
- unreachable = True
+ cluster_name = host_vars[hosts[0]]['cluster_name']
headers = {"Content-type": "application/json",
"Accept": "*/*"}
-
conn = httplib.HTTPConnection(COMPASS_HOST, 80)
token = auth(conn)
headers["X-Auth-Token"] = token
- get_url = "/api/hosts"
+ get_url = "/api/clusterhosts"
conn.request("GET", get_url, "", headers)
resp = conn.getresponse()
raise_for_status(resp)
- host_data = json.loads(resp.read())
- clusterhosts = [item["name"] for item in host_data]
-
- if failures or unreachable:
- host_status = "error"
- else:
- host_status = "succ"
+ clusterhost_data = json.loads(resp.read())
+ clusterhost_mapping = {}
+ for item in clusterhost_data:
+ if item["clustername"] == cluster_name:
+ clusterhost_mapping.update({item["hostname"]:
+ item["clusterhost_id"]})
+
+ force_error = False
+ if "localhost" in hosts:
+ summary = stats.summarize("localhost")
+ if summary['failures'] > 0 or summary['unreachable'] > 0:
+ force_error = True
+
+ for hostname, hostid in clusterhost_mapping.iteritems():
+ if hostname not in hosts:
+ continue
+
+ summary = stats.summarize(hostname)
+ # self._display.display("host: %s \nsummary: %s\n" % (host, summary)) # noqa
- for host in clusterhosts:
- notify_host(self._display, "compass-deck", host, host_status)
+ if summary['failures'] > 0 or summary['unreachable'] > 0 \
+ or force_error:
+ status = "error"
+ else:
+ status = "succ"
+ self._display.display("hostname: %s" % hostname)
+ notify_host(self._display, COMPASS_HOST, hostid, status)
def raise_for_status(resp):
@@ -157,17 +165,15 @@ def auth(conn):
return json.loads(resp.read())["token"]
-def notify_host(display, compass_host, host, status):
- display.display("hostname: %s" % host)
- host = host.strip("host")
- url = "/api/clusterhosts/%s/state" % host
+def notify_host(display, compass_host, hostid, status):
+ url = "/api/clusterhosts/%s/state" % hostid
if status == "succ":
body = {"state": "SUCCESSFUL"}
elif status == "error":
body = {"state": "ERROR"}
else:
- display.error("notify_host: host %s with status %s is not supported"
- % (host, status))
+ display.error("notify_host: hostid %s with status %s is not supported"
+ % (hostid, status))
return
headers = {"Content-type": "application/json",