summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTomi Juvonen <tomi.juvonen@nokia.com>2018-12-21 12:43:57 +0200
committerTomi Juvonen <tomi.juvonen@nokia.com>2019-03-26 15:53:28 +0200
commit73605c5c34b97ab56306bfa9af0f5888f3c7e46d (patch)
tree7175ebaec5ed949d32ee62b7ac412729b366446e
parent33293e9c23a21ad3228f46d2063f18c915eb2b79 (diff)
Support Fenix as admin tool
If ADMIN_TOOL_TYPE=fenix we run maintenance testing using Fenix instead of sample implementation. Testing will build Fenix Docker container from latest master and make configuration according to controller host. JIRA: DOCTOR-131 Change-Id: I84c566b7afc3c4e488aeed63b5cf5c75046d1427 Signed-off-by: Tomi Juvonen <tomi.juvonen@nokia.com>
-rw-r--r--doctor_tests/admin_tool/fenix/Dockerfile33
-rwxr-xr-xdoctor_tests/admin_tool/fenix/run32
-rw-r--r--doctor_tests/common/constants.py4
-rw-r--r--doctor_tests/installer/apex.py21
-rw-r--r--doctor_tests/installer/base.py7
-rw-r--r--doctor_tests/installer/common/set_fenix.sh52
-rw-r--r--doctor_tests/scenario/maintenance.py41
7 files changed, 177 insertions, 13 deletions
diff --git a/doctor_tests/admin_tool/fenix/Dockerfile b/doctor_tests/admin_tool/fenix/Dockerfile
new file mode 100644
index 00000000..90039b0d
--- /dev/null
+++ b/doctor_tests/admin_tool/fenix/Dockerfile
@@ -0,0 +1,33 @@
+FROM gliderlabs/alpine:3.5
+
+ARG BRANCH=master
+ARG OPENSTACK=master
+
+EXPOSE 12347
+
+RUN echo "Building Fenix container against OpenStack $OPENSTACK" && \
+ echo "Building Fenix with $BRANCH" && \
+ mkdir /etc/fenix && \
+ mkdir -p /var/tmp/fenix
+WORKDIR /var/tmp/fenix
+COPY fenix*.conf /etc/fenix/
+RUN apk --no-cache add ca-certificates && \
+ apk --no-cache add --update python3 sshpass py-pip git curl && \
+ apk --no-cache add --virtual .build-deps --update \
+ python-dev python3-dev build-base linux-headers libffi-dev \
+ openssl-dev libjpeg-turbo-dev && \
+ curl https://git.openstack.org/cgit/openstack/requirements/plain/upper-constraints.txt?h=$OPENSTACK > upper-constraints.txt && \
+ pip install --upgrade pip && \
+ pip install alembic aodhclient ast decorator \
+ eventlet flask Flask-RESTful importlib \
+ keystoneauth1 logging python-novaclient oslo.config oslo.db \
+ oslo.log oslo.messaging oslo.serialization oslo.service \
+ oslotest oslo.utils pbr pymysql setuptools six sqlalchemy \
+ wsgiref -cupper-constraints.txt && \
+ git clone https://git.openstack.org/openstack/fenix -b $BRANCH /fenix && \
+ rm -fr /var/tmp/fenix
+COPY run /fenix
+COPY overcloudrc /fenix
+WORKDIR /fenix
+RUN python setup.py install
+CMD ./run
diff --git a/doctor_tests/admin_tool/fenix/run b/doctor_tests/admin_tool/fenix/run
new file mode 100755
index 00000000..2a2e37cd
--- /dev/null
+++ b/doctor_tests/admin_tool/fenix/run
@@ -0,0 +1,32 @@
+#!/bin/sh
+. overcloudrc
+
+# Start the first process
+nohup python /fenix/fenix/cmd/engine.py > /var/log/fenix-engine.log&
+status=$?
+if [ $status -ne 0 ]; then
+ echo "Failed to start engine.py: $status"
+ exit $status
+fi
+
+# Start the second process
+nohup python /fenix/fenix/cmd/api.py > /var/log/fenix-api.log&
+status=$?
+if [ $status -ne 0 ]; then
+ echo "Failed to start api.py: $status"
+ exit $status
+fi
+
+echo "started Fenix: engine and api"
+while sleep 60; do
+ ps aux |grep "cmd/engine.py" |grep -q -v grep
+ PROCESS_1_STATUS=$?
+ ps aux |grep "cmd/api.py" |grep -q -v grep
+ PROCESS_2_STATUS=$?
+ # If the greps above find anything, they exit with 0 status
+ # If they are not both 0, then something is wrong
+ if [ $PROCESS_1_STATUS -ne 0 -o $PROCESS_2_STATUS -ne 0 ]; then
+ echo "One of the processes has already exited."
+ exit 1
+ fi
+done
diff --git a/doctor_tests/common/constants.py b/doctor_tests/common/constants.py
index 088ff633..201f3fc4 100644
--- a/doctor_tests/common/constants.py
+++ b/doctor_tests/common/constants.py
@@ -12,6 +12,10 @@ from collections import namedtuple
Host = namedtuple('Host', ['name', 'ip'])
+def is_fenix(conf):
+ return conf.admin_tool.type == 'fenix'
+
+
class Inspector(object):
CONGRESS = 'congress'
SAMPLE = 'sample'
diff --git a/doctor_tests/installer/apex.py b/doctor_tests/installer/apex.py
index 79c59e9a..3ec2100c 100644
--- a/doctor_tests/installer/apex.py
+++ b/doctor_tests/installer/apex.py
@@ -9,6 +9,8 @@
import time
from doctor_tests.common.constants import Inspector
+from doctor_tests.common.constants import is_fenix
+from doctor_tests.common.utils import get_doctor_test_root_dir
from doctor_tests.common.utils import SSHClient
from doctor_tests.installer.base import BaseInstaller
@@ -19,6 +21,7 @@ class ApexInstaller(BaseInstaller):
cm_set_script = 'set_config.py'
nc_set_compute_script = 'set_compute_config.py'
cg_set_script = 'set_congress.py'
+ fe_set_script = 'set_fenix.sh'
cm_restore_script = 'restore_config.py'
nc_restore_compute_script = 'restore_compute_config.py'
cg_restore_script = 'restore_congress.py'
@@ -40,6 +43,8 @@ class ApexInstaller(BaseInstaller):
self.log.info('Setup Apex installer start......')
self.key_file = self.get_ssh_key_from_installer()
self._get_overcloud_conf()
+ if is_fenix(self.conf):
+ self._copy_overcloudrc_to_controllers()
self.create_flavor()
self.set_apply_patches()
self.setup_stunnel()
@@ -53,6 +58,11 @@ class ApexInstaller(BaseInstaller):
key_path = '/home/stack/.ssh/id_rsa'
return self._get_ssh_key(self.client, key_path)
+ def _copy_overcloudrc_to_controllers(self):
+ for ip in self.controllers:
+ cmd = "scp overcloudrc %s@%s:" % (self.node_user_name, ip)
+ self._run_cmd_remote(self.client, cmd)
+
def _get_overcloud_conf(self):
self.log.info('Get overcloud config details from Apex installer'
'......')
@@ -90,6 +100,7 @@ class ApexInstaller(BaseInstaller):
def set_apply_patches(self):
self.log.info('Set apply patches start......')
+ fenix_files = None
set_scripts = [self.cm_set_script]
@@ -104,6 +115,10 @@ class ApexInstaller(BaseInstaller):
if self.conf.test_case != 'fault_management':
if self.use_containers:
restart_cmd += self._set_docker_restart_cmd("nova-scheduler")
+ if is_fenix(self.conf):
+ set_scripts.append(self.fe_set_script)
+ testdir = get_doctor_test_root_dir()
+ fenix_files = ["Dockerfile", "run"]
else:
restart_cmd += ' openstack-nova-scheduler.service'
set_scripts.append(self.nc_set_compute_script)
@@ -118,6 +133,12 @@ class ApexInstaller(BaseInstaller):
for node_ip in self.controllers:
client = SSHClient(node_ip, self.node_user_name,
key_filename=self.key_file)
+ if fenix_files is not None:
+ for fenix_file in fenix_files:
+ src_file = '{0}/{1}/{2}'.format(testdir,
+ 'admin_tool/fenix',
+ fenix_file)
+ client.scp(src_file, fenix_file)
self._run_apply_patches(client,
restart_cmd,
set_scripts,
diff --git a/doctor_tests/installer/base.py b/doctor_tests/installer/base.py
index df781ee1..7e2658e5 100644
--- a/doctor_tests/installer/base.py
+++ b/doctor_tests/installer/base.py
@@ -209,8 +209,13 @@ class BaseInstaller(object):
except:
client.scp(script_abs_path, script_name)
try:
- cmd = 'sudo %s %s' % (python, script_name)
+ if ".py" in script_name:
+ cmd = 'sudo %s %s' % (python, script_name)
+ else:
+ cmd = 'sudo chmod 700 %s;sudo ./%s' % (script_name,
+ script_name)
ret, output = client.ssh(cmd)
+ self.log.info('Command %s output %s' % (cmd, output))
except:
ret, output = client.ssh(cmd)
diff --git a/doctor_tests/installer/common/set_fenix.sh b/doctor_tests/installer/common/set_fenix.sh
new file mode 100644
index 00000000..a660af79
--- /dev/null
+++ b/doctor_tests/installer/common/set_fenix.sh
@@ -0,0 +1,52 @@
+#!/usr/bin/env bash
+
+##############################################################################
+# Copyright (c) 2018 Nokia Corporation and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# Config files
+echo "[DEFAULT]" > fenix.conf
+echo "[DEFAULT]" > fenix-api.conf
+echo "port = 12347" >> fenix.conf
+echo "port = 12347" >> fenix-api.conf
+grep -m1 "^transport" /var/lib/config-data/puppet-generated/nova/etc/nova/nova.conf >> fenix.conf
+grep -m1 "^transport" /var/lib/config-data/puppet-generated/nova/etc/nova/nova.conf >> fenix-api.conf
+echo "[database]" >> fenix.conf
+MYSQLIP=`grep -m1 "^connection=mysql" /var/lib/config-data/puppet-generated/nova/etc/nova/nova.conf | sed -e "s/.*@//;s/\/.*//"`
+echo "connection=mysql+pymysql://fenix:fenix@$MYSQLIP/fenix?read_default_group=tripleo&read_default_file=/etc/my.cnf.d/tripleo.cnf" >> fenix.conf
+
+# Mysql pw
+MYSQLPW=`cat /var/lib/config-data/mysql/etc/puppet/hieradata/service_configs.json | grep mysql | grep root_password | awk -F": " '{print $2}' | awk -F"\"" '{print $2}'`
+
+# Fenix DB
+[ `mysql -uroot -p$MYSQLPW -e "SELECT host, user FROM mysql.user;" | grep fenix | wc -l` -eq 0 ] && {
+ mysql -uroot -p$MYSQLPW -hlocalhost -e "CREATE USER 'fenix'@'localhost' IDENTIFIED BY 'fenix';"
+ mysql -uroot -p$MYSQLPW -hlocalhost -e "GRANT ALL PRIVILEGES ON fenix.* TO 'fenix'@'' identified by 'fenix';FLUSH PRIVILEGES;"
+}
+mysql -ufenix -pfenix -hlocalhost -e "DROP DATABASE IF EXISTS fenix;"
+mysql -ufenix -pfenix -hlocalhost -e "CREATE DATABASE fenix CHARACTER SET utf8;"
+
+# Remove previous container
+for img in `docker image list | grep "^fenix" | awk '{print $1}'`; do
+ for dock in `docker ps --all -f "ancestor=$img" | grep "$img" | awk '{print $1}'`; do
+ docker stop $dock; docker rm $dock;
+ done;
+ docker image rm $img;
+done
+
+# Build Fenix container and run it
+chmod 700 run
+docker build --build-arg OPENSTACK=master --build-arg BRANCH=master --network host /home/heat-admin -t fenix | tail -1
+docker run --network host -d --name fenix -p 12347:12347 -ti fenix
+if [ $? -eq 0 ]; then
+ echo "Fenix start: OK"
+else
+ echo "Fenix start: FAILED"
+fi
+# To debug check log from fenix container
+# docker exec -ti fenix tail -f /var/log/fenix-engine.log
diff --git a/doctor_tests/scenario/maintenance.py b/doctor_tests/scenario/maintenance.py
index a2129f61..7c2c17e0 100644
--- a/doctor_tests/scenario/maintenance.py
+++ b/doctor_tests/scenario/maintenance.py
@@ -142,22 +142,39 @@ class Maintenance(object):
(self.conf.admin_tool.ip,
self.conf.admin_tool.port,
self.endpoint))
-
- # let's start maintenance 20sec from now, so projects will have
- # time to ACK to it before that
- maintenance_at = (datetime.datetime.utcnow() +
- datetime.timedelta(seconds=30)
- ).strftime('%Y-%m-%d %H:%M:%S')
- data = {'hosts': maintenance_hosts,
- 'state': 'MAINTENANCE',
- 'maintenance_at': maintenance_at,
- 'metadata': {'openstack_version': 'Rocky'},
- 'workflow': 'default'}
headers = {
'Content-Type': 'application/json',
'Accept': 'application/json'}
- ret = requests.post(url, data=json.dumps(data), headers=headers)
+ retries = 12
+ while retries > 0:
+ # let's start maintenance 20sec from now, so projects will have
+ # time to ACK to it before that
+ maintenance_at = (datetime.datetime.utcnow() +
+ datetime.timedelta(seconds=30)
+ ).strftime('%Y-%m-%d %H:%M:%S')
+
+ data = {'state': 'MAINTENANCE',
+ 'maintenance_at': maintenance_at,
+ 'metadata': {'openstack_version': 'Rocky'},
+ 'workflow': 'default'}
+
+ if self.conf.admin_tool.type == 'sample':
+ data['hosts'] = maintenance_hosts
+ else:
+ data['hosts'] = []
+ try:
+ ret = requests.post(url, data=json.dumps(data),
+ headers=headers)
+ except:
+ if retries == 0:
+ raise Exception('admin tool did not respond in 120s')
+ else:
+ self.log.info('admin tool not ready, retry in 10s')
+ retries = retries - 1
+ time.sleep(10)
+ continue
+ break
if ret.status_code != 200:
raise Exception(ret.text)
return ret.json()['session_id']