summaryrefslogtreecommitdiffstats
path: root/doctor_tests/installer
diff options
context:
space:
mode:
Diffstat (limited to 'doctor_tests/installer')
-rw-r--r--doctor_tests/installer/__init__.py8
-rw-r--r--doctor_tests/installer/apex.py98
-rw-r--r--doctor_tests/installer/base.py58
-rw-r--r--doctor_tests/installer/common/restore_compute_config.py22
-rw-r--r--doctor_tests/installer/common/set_compute_config.py34
-rw-r--r--doctor_tests/installer/common/set_config.py1
-rw-r--r--doctor_tests/installer/common/set_fenix.sh106
-rw-r--r--doctor_tests/installer/devstack.py151
-rw-r--r--doctor_tests/installer/local.py118
-rw-r--r--doctor_tests/installer/mcp.py179
10 files changed, 520 insertions, 255 deletions
diff --git a/doctor_tests/installer/__init__.py b/doctor_tests/installer/__init__.py
index 2b9ad83d..00a01667 100644
--- a/doctor_tests/installer/__init__.py
+++ b/doctor_tests/installer/__init__.py
@@ -13,8 +13,8 @@ from oslo_utils import importutils
OPTS = [
cfg.StrOpt('type',
- default=os.environ.get('INSTALLER_TYPE', 'local'),
- choices=['local', 'apex', 'daisy', 'fuel'],
+ default=os.environ.get('INSTALLER_TYPE', 'devstack'),
+ choices=['apex', 'daisy', 'fuel', 'devstack'],
help='the type of installer',
required=True),
cfg.StrOpt('ip',
@@ -28,10 +28,10 @@ OPTS = [
_installer_name_class_mapping = {
- 'local': 'doctor_tests.installer.local.LocalInstaller',
'apex': 'doctor_tests.installer.apex.ApexInstaller',
'daisy': 'doctor_tests.installer.daisy.DaisyInstaller',
- 'fuel': 'doctor_tests.installer.mcp.McpInstaller'
+ 'fuel': 'doctor_tests.installer.mcp.McpInstaller',
+ 'devstack': 'doctor_tests.installer.devstack.DevstackInstaller'
}
diff --git a/doctor_tests/installer/apex.py b/doctor_tests/installer/apex.py
index 2aa81ff9..3ec2100c 100644
--- a/doctor_tests/installer/apex.py
+++ b/doctor_tests/installer/apex.py
@@ -6,10 +6,11 @@
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-import re
import time
from doctor_tests.common.constants import Inspector
+from doctor_tests.common.constants import is_fenix
+from doctor_tests.common.utils import get_doctor_test_root_dir
from doctor_tests.common.utils import SSHClient
from doctor_tests.installer.base import BaseInstaller
@@ -20,6 +21,7 @@ class ApexInstaller(BaseInstaller):
cm_set_script = 'set_config.py'
nc_set_compute_script = 'set_compute_config.py'
cg_set_script = 'set_congress.py'
+ fe_set_script = 'set_fenix.sh'
cm_restore_script = 'restore_config.py'
nc_restore_compute_script = 'restore_compute_config.py'
cg_restore_script = 'restore_congress.py'
@@ -36,13 +38,13 @@ class ApexInstaller(BaseInstaller):
self.key_file = None
self.controllers = list()
self.computes = list()
- self.controller_clients = list()
- self.compute_clients = list()
def setup(self):
self.log.info('Setup Apex installer start......')
self.key_file = self.get_ssh_key_from_installer()
self._get_overcloud_conf()
+ if is_fenix(self.conf):
+ self._copy_overcloudrc_to_controllers()
self.create_flavor()
self.set_apply_patches()
self.setup_stunnel()
@@ -56,6 +58,11 @@ class ApexInstaller(BaseInstaller):
key_path = '/home/stack/.ssh/id_rsa'
return self._get_ssh_key(self.client, key_path)
+ def _copy_overcloudrc_to_controllers(self):
+ for ip in self.controllers:
+ cmd = "scp overcloudrc %s@%s:" % (self.node_user_name, ip)
+ self._run_cmd_remote(self.client, cmd)
+
def _get_overcloud_conf(self):
self.log.info('Get overcloud config details from Apex installer'
'......')
@@ -83,26 +90,6 @@ class ApexInstaller(BaseInstaller):
host_ips = self._run_cmd_remote(self.client, command)
return host_ips[0]
- def get_transport_url(self):
- client = SSHClient(self.controllers[0], self.node_user_name,
- key_filename=self.key_file)
- if self.use_containers:
- ncbase = "/var/lib/config-data/puppet-generated/nova"
- else:
- ncbase = ""
- command = 'sudo grep "^transport_url" %s/etc/nova/nova.conf' % ncbase
-
- ret, url = client.ssh(command)
- if ret:
- raise Exception('Exec command to get host ip from controller(%s)'
- 'in Apex installer failed, ret=%s, output=%s'
- % (self.controllers[0], ret, url))
- # need to use ip instead of hostname
- ret = (re.sub("@.*:", "@%s:" % self.controllers[0],
- url[0].split("=", 1)[1]))
- self.log.debug('get_transport_url %s' % ret)
- return ret
-
def _set_docker_restart_cmd(self, service):
# There can be multiple instances running so need to restart all
cmd = "for container in `sudo docker ps | grep "
@@ -113,6 +100,7 @@ class ApexInstaller(BaseInstaller):
def set_apply_patches(self):
self.log.info('Set apply patches start......')
+ fenix_files = None
set_scripts = [self.cm_set_script]
@@ -127,6 +115,10 @@ class ApexInstaller(BaseInstaller):
if self.conf.test_case != 'fault_management':
if self.use_containers:
restart_cmd += self._set_docker_restart_cmd("nova-scheduler")
+ if is_fenix(self.conf):
+ set_scripts.append(self.fe_set_script)
+ testdir = get_doctor_test_root_dir()
+ fenix_files = ["Dockerfile", "run"]
else:
restart_cmd += ' openstack-nova-scheduler.service'
set_scripts.append(self.nc_set_compute_script)
@@ -141,29 +133,34 @@ class ApexInstaller(BaseInstaller):
for node_ip in self.controllers:
client = SSHClient(node_ip, self.node_user_name,
key_filename=self.key_file)
- self.controller_clients.append(client)
+ if fenix_files is not None:
+ for fenix_file in fenix_files:
+ src_file = '{0}/{1}/{2}'.format(testdir,
+ 'admin_tool/fenix',
+ fenix_file)
+ client.scp(src_file, fenix_file)
self._run_apply_patches(client,
restart_cmd,
set_scripts,
python=self.python)
+ time.sleep(5)
+
+ self.log.info('Set apply patches start......')
if self.conf.test_case != 'fault_management':
if self.use_containers:
- restart_cmd = self._set_docker_restart_cmd("nova-compute")
+ restart_cmd = self._set_docker_restart_cmd("nova")
else:
restart_cmd = 'sudo systemctl restart' \
' openstack-nova-compute.service'
for node_ip in self.computes:
client = SSHClient(node_ip, self.node_user_name,
key_filename=self.key_file)
- self.compute_clients.append(client)
self._run_apply_patches(client,
restart_cmd,
[self.nc_set_compute_script],
python=self.python)
-
- if self.conf.test_case != 'fault_management':
- time.sleep(10)
+ time.sleep(5)
def restore_apply_patches(self):
self.log.info('restore apply patches start......')
@@ -192,39 +189,22 @@ class ApexInstaller(BaseInstaller):
restart_cmd += ' openstack-congress-server.service'
restore_scripts.append(self.cg_restore_script)
- for client, node_ip in zip(self.controller_clients, self.controllers):
- retry = 0
- while retry < 2:
- try:
- self._run_apply_patches(client,
- restart_cmd,
- restore_scripts,
- python=self.python)
- except Exception:
- if retry > 0:
- raise Exception("SSHClient to %s feiled" % node_ip)
- client = SSHClient(node_ip, self.node_user_name,
- key_filename=self.key_file)
- retry += 1
- break
+ for node_ip in self.controllers:
+ client = SSHClient(node_ip, self.node_user_name,
+ key_filename=self.key_file)
+ self._run_apply_patches(client,
+ restart_cmd,
+ restore_scripts,
+ python=self.python)
+
if self.conf.test_case != 'fault_management':
if self.use_containers:
restart_cmd = self._set_docker_restart_cmd("nova-compute")
else:
restart_cmd = 'sudo systemctl restart' \
' openstack-nova-compute.service'
- for client, node_ip in zip(self.compute_clients, self.computes):
- retry = 0
- while retry < 2:
- try:
- self._run_apply_patches(
- client, restart_cmd,
- [self.nc_restore_compute_script],
- python=self.python)
- except Exception:
- if retry > 0:
- raise Exception("SSHClient to %s feiled" % node_ip)
- client = SSHClient(node_ip, self.node_user_name,
- key_filename=self.key_file)
- retry += 1
- break
+ for node_ip in self.computes:
+ self._run_apply_patches(
+ client, restart_cmd,
+ [self.nc_restore_compute_script],
+ python=self.python)
diff --git a/doctor_tests/installer/base.py b/doctor_tests/installer/base.py
index 30435931..de4d2f2e 100644
--- a/doctor_tests/installer/base.py
+++ b/doctor_tests/installer/base.py
@@ -14,8 +14,9 @@ import pwd
import six
import stat
import subprocess
+import time
-from doctor_tests.common.utils import get_doctor_test_root_dir
+from doctor_tests.common import utils
from doctor_tests.identity_auth import get_session
from doctor_tests.os_clients import nova_client
@@ -75,7 +76,7 @@ class BaseInstaller(object):
cmd = ("ssh -o UserKnownHostsFile=/dev/null"
" -o StrictHostKeyChecking=no"
" -i %s %s@%s -R %s:localhost:%s"
- " sleep %s > ssh_tunnel.%s"
+ " sleep %s > ssh_tunnel.%s.%s"
" 2>&1 < /dev/null "
% (self.key_file,
self.node_user_name,
@@ -83,9 +84,28 @@ class BaseInstaller(object):
port,
port,
tunnel_uptime,
- node_ip))
+ node_ip,
+ port))
server = subprocess.Popen('exec ' + cmd, shell=True)
self.servers.append(server)
+ if self.conf.admin_tool.type == 'fenix':
+ port = self.conf.admin_tool.port
+ self.log.info('tunnel for port %s' % port)
+ cmd = ("ssh -o UserKnownHostsFile=/dev/null"
+ " -o StrictHostKeyChecking=no"
+ " -i %s %s@%s -L %s:localhost:%s"
+ " sleep %s > ssh_tunnel.%s.%s"
+ " 2>&1 < /dev/null "
+ % (self.key_file,
+ self.node_user_name,
+ node_ip,
+ port,
+ port,
+ tunnel_uptime,
+ node_ip,
+ port))
+ server = subprocess.Popen('exec ' + cmd, shell=True)
+ self.servers.append(server)
def _get_ssh_key(self, client, key_path):
self.log.info('Get SSH keys from %s installer......'
@@ -96,7 +116,8 @@ class BaseInstaller(object):
% self.conf.installer.type)
return self.key_file
- ssh_key = '{0}/{1}'.format(get_doctor_test_root_dir(), 'instack_key')
+ ssh_key = '{0}/{1}'.format(utils.get_doctor_test_root_dir(),
+ 'instack_key')
client.scp(key_path, ssh_key, method='get')
user = getpass.getuser()
uid = pwd.getpwnam(user).pw_uid
@@ -105,6 +126,10 @@ class BaseInstaller(object):
os.chmod(ssh_key, stat.S_IREAD)
return ssh_key
+ @abc.abstractmethod
+ def get_transport_url(self):
+ pass
+
def _run_cmd_remote(self, client, command):
self.log.info('Run command=%s in %s installer......'
% (command, self.conf.installer.type))
@@ -131,19 +156,36 @@ class BaseInstaller(object):
ret = False
return ret
+ @utils.run_async
def _run_apply_patches(self, client, restart_cmd, script_names,
python='python3'):
installer_dir = os.path.dirname(os.path.realpath(__file__))
-
if isinstance(script_names, list):
for script_name in script_names:
script_abs_path = '{0}/{1}/{2}'.format(installer_dir,
'common', script_name)
- client.scp(script_abs_path, script_name)
- cmd = 'sudo %s %s' % (python, script_name)
- ret, output = client.ssh(cmd)
+ if self.conf.installer.type == "devstack":
+ script_name = "/opt/stack/%s" % script_name
+ try:
+ client.scp(script_abs_path, script_name)
+ except Exception:
+ client.scp(script_abs_path, script_name)
+ try:
+ if ".py" in script_name:
+ cmd = 'sudo %s %s' % (python, script_name)
+ else:
+ cmd = 'sudo chmod 700 %s;sudo ./%s' % (script_name,
+ script_name)
+ ret, output = client.ssh(cmd)
+ self.log.info('Command %s output %s' % (cmd, output))
+ except Exception:
+ ret, output = client.ssh(cmd)
+ self.log.info('Command %s output %s' % (cmd, output))
if ret:
raise Exception('Do the command in remote'
' node failed, ret=%s, cmd=%s, output=%s'
% (ret, cmd, output))
+ if 'nova' in restart_cmd or 'devstack@n-' in restart_cmd:
+ # Make sure scheduler has proper cpu_allocation_ratio
+ time.sleep(5)
client.ssh(restart_cmd)
diff --git a/doctor_tests/installer/common/restore_compute_config.py b/doctor_tests/installer/common/restore_compute_config.py
index 0e9939fd..82e10a66 100644
--- a/doctor_tests/installer/common/restore_compute_config.py
+++ b/doctor_tests/installer/common/restore_compute_config.py
@@ -11,18 +11,16 @@ import shutil
def restore_cpu_allocation_ratio():
- nova_base = "/var/lib/config-data/puppet-generated/nova"
- if not os.path.isdir(nova_base):
- nova_base = ""
- nova_file = nova_base + '/etc/nova/nova.conf'
- nova_file_bak = nova_base + '/etc/nova/nova.bak'
-
- if not os.path.isfile(nova_file_bak):
- print('Bak_file:%s does not exist.' % nova_file_bak)
- else:
- print('restore: %s' % nova_file)
- shutil.copyfile(nova_file_bak, nova_file)
- os.remove(nova_file_bak)
+ for nova_file_bak in ["/var/lib/config-data/puppet-generated/nova_libvirt/etc/nova/nova.bak", # noqa
+ "/var/lib/config-data/puppet-generated/nova/etc/nova/nova.bak", # noqa
+ "/etc/nova/nova.bak"]:
+ if os.path.isfile(nova_file_bak):
+ nova_file = nova_file_bak.replace(".bak", ".conf")
+ print('restoring nova.bak.')
+ shutil.copyfile(nova_file_bak, nova_file)
+ os.remove(nova_file_bak)
+ return
+ print('nova.bak does not exist.')
return
restore_cpu_allocation_ratio()
diff --git a/doctor_tests/installer/common/set_compute_config.py b/doctor_tests/installer/common/set_compute_config.py
index 86266085..615f1895 100644
--- a/doctor_tests/installer/common/set_compute_config.py
+++ b/doctor_tests/installer/common/set_compute_config.py
@@ -10,37 +10,25 @@ import os
import shutil
-def make_initial_config(service, dest):
- for mk in ["", "/etc", "/%s" % service]:
- dest += mk
- os.mkdir(dest)
- src = "/etc/%s/%s.conf" % (service, service)
- dest += "/%s.conf" % service
- shutil.copyfile(src, dest)
-
-
def set_cpu_allocation_ratio():
- docker_conf_base_dir = "/var/lib/config-data/puppet-generated"
- if not os.path.isdir(docker_conf_base_dir):
- nova_base = ""
- else:
- nova_base = "%s/nova" % docker_conf_base_dir
- if not os.path.isdir(nova_base):
- # nova.conf to be used might not exist
- make_initial_config("nova", nova_base)
- nova_file = nova_base + '/etc/nova/nova.conf'
- nova_file_bak = nova_base + '/etc/nova/nova.bak'
+ nova_file_bak = None
+ for nova_file in ["/var/lib/config-data/puppet-generated/nova_libvirt/etc/nova/nova.conf", # noqa
+ "/var/lib/config-data/puppet-generated/nova/etc/nova/nova.conf", # noqa
+ "/etc/nova/nova.conf"]:
+ if os.path.isfile(nova_file):
+ nova_file_bak = nova_file.replace(".conf", ".bak")
+ break
- if not os.path.isfile(nova_file):
- raise Exception("File doesn't exist: %s." % nova_file)
+ if nova_file_bak is None:
+ raise Exception("Could not find nova.conf")
# TODO (tojuvone): Unfortunately ConfigParser did not produce working conf
fcheck = open(nova_file)
found_list = ([ca for ca in fcheck.readlines() if "cpu_allocation_ratio"
in ca])
fcheck.close()
+ change = False
+ found = False
if found_list and len(found_list):
- change = False
- found = False
for car in found_list:
if car.startswith('#'):
continue
diff --git a/doctor_tests/installer/common/set_config.py b/doctor_tests/installer/common/set_config.py
index 3dc6cd9a..e66d4c2c 100644
--- a/doctor_tests/installer/common/set_config.py
+++ b/doctor_tests/installer/common/set_config.py
@@ -125,6 +125,7 @@ def set_event_definitions():
'reply_url': {'fields': 'payload.reply_url'},
'actions_at': {'fields': 'payload.actions_at',
'type': 'datetime'},
+ 'reply_at': {'fields': 'payload.reply_at', 'type': 'datetime'},
'state': {'fields': 'payload.state'},
'session_id': {'fields': 'payload.session_id'},
'project_id': {'fields': 'payload.project_id'},
diff --git a/doctor_tests/installer/common/set_fenix.sh b/doctor_tests/installer/common/set_fenix.sh
new file mode 100644
index 00000000..bd1eae47
--- /dev/null
+++ b/doctor_tests/installer/common/set_fenix.sh
@@ -0,0 +1,106 @@
+#!/usr/bin/env bash
+
+##############################################################################
+# Copyright (c) 2019 Nokia Corporation and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# Config files
+docker -v >/dev/null || {
+echo "Fenix needs docker to be installed..."
+ver=`grep "UBUNTU_CODENAME" /etc/os-release | cut -d '=' -f 2`
+curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
+add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $ver stable"
+apt install apt-transport-https ca-certificates curl software-properties-common
+apt update
+apt-cache policy docker-ce
+apt-get install -y docker-ce docker-ce-cli containerd.io
+dpkg -r --force-depends golang-docker-credential-helpers
+}
+
+docker ps | grep fenix -q && {
+REMOTE=`git ls-remote https://opendev.org/x/fenix HEAD | awk '{ print $1}'`
+LOCAL=`docker exec -t fenix git rev-parse @`
+if [[ "$LOCAL" =~ "$REMOTE" ]]; then
+ # Difference in above string ending marks, so cannot compare equal
+ echo "Fenix start: Already running latest $LOCAL equals $REMOTE"
+ exit 0
+else
+ echo "Fenix container needs to be recreated $LOCAL not $REMOTE"
+ # Remove previous container
+ for img in `docker image list | grep "^fenix" | awk '{print $1}'`; do
+ for dock in `docker ps --all -f "ancestor=$img" | grep "$img" | awk '{print $1}'`; do
+ docker stop $dock; docker rm $dock;
+ done;
+ docker image rm $img;
+ done
+fi
+} || echo "Fenix container needs to be created..."
+
+cp /root/keystonercv3 .
+
+transport=`grep -m1 "^transport" /etc/nova/nova.conf`
+. keystonercv3
+
+echo "[DEFAULT]" > fenix.conf
+echo "port = 12347" >> fenix.conf
+echo $transport >> fenix.conf
+
+echo "[database]" >> fenix.conf
+MYSQLIP=`grep -m1 "^connection" /etc/nova/nova.conf | sed -e "s/.*@//;s/\/.*//"`
+echo "connection = mysql+pymysql://fenix:fenix@$MYSQLIP/fenix" >> fenix.conf
+
+echo "[service_user]" >> fenix.conf
+echo "os_auth_url = $OS_AUTH_URL" >> fenix.conf
+echo "os_username = $OS_USERNAME" >> fenix.conf
+echo "os_password = $OS_PASSWORD" >> fenix.conf
+echo "os_user_domain_name = $OS_USER_DOMAIN_NAME" >> fenix.conf
+echo "os_project_name = $OS_PROJECT_NAME" >> fenix.conf
+echo "os_project_domain_name = $OS_PROJECT_DOMAIN_NAME" >> fenix.conf
+
+echo "[DEFAULT]" > fenix-api.conf
+echo "port = 12347" >> fenix-api.conf
+echo $transport >> fenix-api.conf
+
+echo "[keystone_authtoken]" >> fenix-api.conf
+echo "auth_url = $OS_AUTH_URL" >> fenix-api.conf
+echo "auth_type = password" >> fenix-api.conf
+echo "project_domain_name = $OS_PROJECT_DOMAIN_NAME" >> fenix-api.conf
+echo "project_name = $OS_PROJECT_NAME" >> fenix-api.conf
+echo "user_domain_name = $OS_PROJECT_DOMAIN_NAME" >> fenix-api.conf
+echo "password = $OS_PASSWORD" >> fenix-api.conf
+echo "username = $OS_USERNAME" >> fenix-api.conf
+echo "cafile = /opt/stack/data/ca-bundle.pem" >> fenix-api.conf
+
+openstack service list | grep -q maintenance || {
+openstack service create --name fenix --enable maintenance
+openstack endpoint create --region $OS_REGION_NAME --enable fenix public http://localhost:12347/v1
+}
+
+# Mysql pw
+# MYSQLPW=`cat /var/lib/config-data/mysql/etc/puppet/hieradata/service_configs.json | grep mysql | grep root_password | awk -F": " '{print $2}' | awk -F"\"" '{print $2}'`
+MYSQLPW=root
+
+# Fenix DB
+[ `mysql -uroot -p$MYSQLPW -e "SELECT host, user FROM mysql.user;" | grep fenix | wc -l` -eq 0 ] && {
+ mysql -uroot -p$MYSQLPW -hlocalhost -e "CREATE USER 'fenix'@'localhost' IDENTIFIED BY 'fenix';"
+ mysql -uroot -p$MYSQLPW -hlocalhost -e "GRANT ALL PRIVILEGES ON fenix.* TO 'fenix'@'' identified by 'fenix';FLUSH PRIVILEGES;"
+}
+mysql -ufenix -pfenix -hlocalhost -e "DROP DATABASE IF EXISTS fenix;"
+mysql -ufenix -pfenix -hlocalhost -e "CREATE DATABASE fenix CHARACTER SET utf8;"
+
+# Build Fenix container and run it
+chmod 700 run
+docker build --build-arg OPENSTACK=master --build-arg BRANCH=master --network host $PWD -t fenix | tail -1
+docker run --network host -d --name fenix -p 12347:12347 -ti fenix
+if [ $? -eq 0 ]; then
+ echo "Fenix start: OK"
+else
+ echo "Fenix start: FAILED"
+fi
+# To debug check log from fenix container
+# docker exec -ti fenix tail -f /var/log/fenix-engine.log
diff --git a/doctor_tests/installer/devstack.py b/doctor_tests/installer/devstack.py
new file mode 100644
index 00000000..02f3601a
--- /dev/null
+++ b/doctor_tests/installer/devstack.py
@@ -0,0 +1,151 @@
+##############################################################################
+# Copyright (c) 2019 Nokia Corporation and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+import os
+import socket
+import time
+
+from doctor_tests.common.utils import SSHClient
+from doctor_tests.common.utils import LocalSSH
+from doctor_tests.identity_auth import get_session
+from doctor_tests.installer.base import BaseInstaller
+from doctor_tests.os_clients import nova_client
+
+
+class DevstackInstaller(BaseInstaller):
+ node_user_name = None
+ cm_set_script = 'set_config.py'
+ nc_set_compute_script = 'set_compute_config.py'
+ cm_restore_script = 'restore_config.py'
+ nc_restore_compute_script = 'restore_compute_config.py'
+ ac_restart_script = 'restart_aodh.py'
+ ac_restore_script = 'restore_aodh.py'
+ python = 'python'
+
+ def __init__(self, conf, log):
+ super(DevstackInstaller, self).__init__(conf, log)
+ # Run Doctor under users home. sudo hides other env param to be used
+ home, self.node_user_name = (iter(os.environ.get('VIRTUAL_ENV')
+ .split('/', 3)[1:3]))
+ # Migration needs to work so ssh should have proper key defined
+ self.key_file = '/%s/%s/.ssh/id_rsa' % (home, self.node_user_name)
+ self.log.info('ssh uses: %s and %s' % (self.node_user_name,
+ self.key_file))
+ self.controllers = ([ip for ip in
+ socket.gethostbyname_ex(socket.gethostname())[2]
+ if not ip.startswith('127.')] or
+ [[(s.connect(('8.8.8.8', 53)),
+ s.getsockname()[0], s.close())
+ for s in [socket.socket(socket.AF_INET,
+ socket.SOCK_DGRAM)]][0][1]])
+ conf.admin_tool.ip = self.controllers[0]
+ self.computes = list()
+ self.nova = nova_client(conf.nova_version, get_session())
+
+ def setup(self):
+ self.log.info('Setup Devstack installer start......')
+ self._get_devstack_conf()
+ self.create_flavor()
+ self.set_apply_patches()
+
+ def cleanup(self):
+ self.restore_apply_patches()
+
+ def get_ssh_key_from_installer(self):
+ return self.key_file
+
+ def get_transport_url(self):
+ client = LocalSSH(self.log)
+ cmd = 'sudo grep -m1 "^transport_url" /etc/nova/nova.conf'
+ ret, url = client.ssh(cmd)
+ url = url.split("= ", 1)[1][:-1]
+ self.log.info('get_transport_url %s' % url)
+ return url
+
+ def get_host_ip_from_hostname(self, hostname):
+ return [hvisor.__getattr__('host_ip') for hvisor in self.hvisors
+ if hvisor.__getattr__('hypervisor_hostname') == hostname][0]
+
+ def _get_devstack_conf(self):
+ self.log.info('Get devstack config details for Devstack installer'
+ '......')
+ self.hvisors = self.nova.hypervisors.list(detailed=True)
+ self.log.info('checking hypervisors.......')
+ self.computes = [hvisor.__getattr__('host_ip') for hvisor in
+ self.hvisors]
+ self.use_containers = False
+ self.log.info('controller_ips:%s' % self.controllers)
+ self.log.info('compute_ips:%s' % self.computes)
+ self.log.info('use_containers:%s' % self.use_containers)
+
+ def _set_docker_restart_cmd(self, service):
+ # There can be multiple instances running so need to restart all
+ cmd = "for container in `sudo docker ps | grep "
+ cmd += service
+ cmd += " | awk '{print $1}'`; do sudo docker restart $container; \
+ done;"
+ return cmd
+
+ def set_apply_patches(self):
+ self.log.info('Set apply patches start......')
+
+ set_scripts = [self.cm_set_script]
+
+ restart_cmd = 'sudo systemctl restart' \
+ ' devstack@ceilometer-anotification.service'
+
+ client = LocalSSH(self.log)
+ self._run_apply_patches(client,
+ restart_cmd,
+ set_scripts,
+ python=self.python)
+ time.sleep(7)
+
+ self.log.info('Set apply patches start......')
+
+ if self.conf.test_case != 'fault_management':
+ restart_cmd = 'sudo systemctl restart' \
+ ' devstack@n-cpu.service'
+ for node_ip in self.computes:
+ client = SSHClient(node_ip, self.node_user_name,
+ key_filename=self.key_file)
+ self._run_apply_patches(client,
+ restart_cmd,
+ [self.nc_set_compute_script],
+ python=self.python)
+ time.sleep(7)
+
+ def restore_apply_patches(self):
+ self.log.info('restore apply patches start......')
+
+ restore_scripts = [self.cm_restore_script]
+
+ restart_cmd = 'sudo systemctl restart' \
+ ' devstack@ceilometer-anotification.service'
+
+ if self.conf.test_case != 'fault_management':
+ restart_cmd += ' devstack@n-sch.service'
+ restore_scripts.append(self.nc_restore_compute_script)
+
+ client = LocalSSH(self.log)
+ self._run_apply_patches(client,
+ restart_cmd,
+ restore_scripts,
+ python=self.python)
+
+ if self.conf.test_case != 'fault_management':
+
+ restart_cmd = 'sudo systemctl restart' \
+ ' devstack@n-cpu.service'
+ for node_ip in self.computes:
+ client = SSHClient(node_ip, self.node_user_name,
+ key_filename=self.key_file)
+ self._run_apply_patches(
+ client, restart_cmd,
+ [self.nc_restore_compute_script],
+ python=self.python)
diff --git a/doctor_tests/installer/local.py b/doctor_tests/installer/local.py
deleted file mode 100644
index fee14f33..00000000
--- a/doctor_tests/installer/local.py
+++ /dev/null
@@ -1,118 +0,0 @@
-##############################################################################
-# Copyright (c) 2017 ZTE Corporation and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-import os
-import shutil
-import subprocess
-
-from doctor_tests.installer.base import BaseInstaller
-from doctor_tests.installer.common.vitrage import \
- set_vitrage_host_down_template
-from doctor_tests.common.constants import Inspector
-from doctor_tests.common.utils import load_json_file
-from doctor_tests.common.utils import write_json_file
-
-
-class LocalInstaller(BaseInstaller):
- node_user_name = 'root'
-
- nova_policy_file = '/etc/nova/policy.json'
- nova_policy_file_backup = '%s%s' % (nova_policy_file, '.bak')
-
- def __init__(self, conf, log):
- super(LocalInstaller, self).__init__(conf, log)
- self.policy_modified = False
- self.add_policy_file = False
-
- def setup(self):
- self.get_ssh_key_from_installer()
- self.set_apply_patches()
-
- def cleanup(self):
- self.restore_apply_patches()
-
- def get_ssh_key_from_installer(self):
- self.log.info('Assuming SSH keys already exchanged with computer'
- 'for local installer type')
- return None
-
- def get_host_ip_from_hostname(self, hostname):
- self.log.info('Get host ip from host name in local installer......')
-
- cmd = "getent hosts %s | awk '{ print $1 }'" % (hostname)
- server = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True)
- stdout, stderr = server.communicate()
- host_ip = stdout.strip().decode("utf-8")
-
- self.log.info('Get host_ip:%s from host_name:%s in local installer'
- % (host_ip, hostname))
- return host_ip
-
- def set_apply_patches(self):
- self._set_nova_policy()
- if self.conf.inspector.type == Inspector.VITRAGE:
- set_vitrage_host_down_template()
- os.system('sudo systemctl restart devstack@vitrage-graph.service')
-
- def restore_apply_patches(self):
- self._restore_nova_policy()
-
- def _set_nova_policy(self):
- host_status_policy = 'os_compute_api:servers:show:host_status'
- host_status_rule = 'rule:admin_or_owner'
- policy_data = {
- 'context_is_admin': 'role:admin',
- 'owner': 'user_id:%(user_id)s',
- 'admin_or_owner': 'rule:context_is_admin or rule:owner',
- host_status_policy: host_status_rule
- }
-
- if os.path.isfile(self.nova_policy_file):
- data = load_json_file(self.nova_policy_file)
- if host_status_policy in data:
- rule_origion = data[host_status_policy]
- if host_status_rule == rule_origion:
- self.log.info('Do not need to modify nova policy.')
- self.policy_modified = False
- else:
- # update the host_status_policy
- data[host_status_policy] = host_status_rule
- self.policy_modified = True
- else:
- # add the host_status_policy, if the admin_or_owner is not
- # defined, add it also
- for policy, rule in policy_data.items():
- if policy not in data:
- data[policy] = rule
- self.policy_modified = True
- if self.policy_modified:
- self.log.info('Nova policy is Modified.')
- shutil.copyfile(self.nova_policy_file,
- self.nova_policy_file_backup)
- else:
- # file does not exit, create a new one and add the policy
- self.log.info('Nova policy file not exist. Creating a new one')
- data = policy_data
- self.add_policy_file = True
-
- if self.policy_modified or self.add_policy_file:
- write_json_file(self.nova_policy_file, data)
- os.system('sudo systemctl restart devstack@n-api.service')
-
- def _restore_nova_policy(self):
- if self.policy_modified:
- shutil.copyfile(self.nova_policy_file_backup,
- self.nova_policy_file)
- os.remove(self.nova_policy_file_backup)
- elif self.add_policy_file:
- os.remove(self.nova_policy_file)
-
- if self.add_policy_file or self.policy_modified:
- os.system('sudo systemctl restart devstack@n-api.service')
- self.add_policy_file = False
- self.policy_modified = False
diff --git a/doctor_tests/installer/mcp.py b/doctor_tests/installer/mcp.py
index 9cfff92d..7659c9e2 100644
--- a/doctor_tests/installer/mcp.py
+++ b/doctor_tests/installer/mcp.py
@@ -1,5 +1,5 @@
##############################################################################
-# Copyright (c) 2018 ZTE Corporation and others.
+# Copyright (c) 2019 ZTE Corporation and others.
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
@@ -7,15 +7,26 @@
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
from os.path import isfile
+import re
+import time
+from doctor_tests.common.constants import is_fenix
+from doctor_tests.common.utils import get_doctor_test_root_dir
from doctor_tests.common.utils import SSHClient
from doctor_tests.installer.base import BaseInstaller
class McpInstaller(BaseInstaller):
node_user_name = 'ubuntu'
- cm_set_script = 'set_ceilometer.py'
- cm_restore_script = 'restore_ceilometer.py'
+
+ cm_set_script = 'set_config.py'
+ nc_set_compute_script = 'set_compute_config.py'
+ fe_set_script = 'set_fenix.sh'
+ cm_restore_script = 'restore_config.py'
+ nc_restore_compute_script = 'restore_compute_config.py'
+ ac_restart_script = 'restart_aodh.py'
+ ac_restore_script = 'restore_aodh.py'
+ python = 'python3'
def __init__(self, conf, log):
super(McpInstaller, self).__init__(conf, log)
@@ -26,40 +37,87 @@ class McpInstaller(BaseInstaller):
look_for_keys=True)
self.controllers = list()
self.controller_clients = list()
+ self.computes = list()
def setup(self):
self.log.info('Setup MCP installer start......')
-
- self.controllers = self.get_controller_ips()
+ self.get_node_ips()
self.create_flavor()
- self.set_apply_patches()
+ if is_fenix(self.conf):
+ self.set_apply_patches()
self.setup_stunnel()
def cleanup(self):
- self.restore_apply_patches()
+ if is_fenix(self.conf):
+ self.restore_apply_patches()
for server in self.servers:
server.terminate()
def get_ssh_key_from_installer(self):
self.log.info('Get SSH keys from MCP......')
- # Assuming mcp.rsa is already mapped to functest container
- # if not, only the test runs on jumphost can get the ssh_key
- # default in path /var/lib/opnfv/mcp.rsa
+ # Default in path /var/lib/opnfv/mcp.rsa
ssh_key = '/root/.ssh/id_rsa'
mcp_key = '/var/lib/opnfv/mcp.rsa'
- return ssh_key if isfile(ssh_key) else mcp_key
-
- def get_controller_ips(self):
- self.log.info('Get controller ips from Mcp installer......')
-
- command = "sudo salt --out yaml 'ctl*' " \
- "pillar.get _param:openstack_control_address |" \
- "awk '{print $2}'"
- controllers = self._run_cmd_remote(self.client, command)
- self.log.info('Get controller_ips:%s from Mcp installer'
- % controllers)
- return controllers
+ return mcp_key if isfile(mcp_key) else ssh_key
+
+ def get_transport_url(self):
+ client = SSHClient(self.controllers[0], self.node_user_name,
+ key_filename=self.key_file)
+ try:
+ cmd = 'sudo grep -m1 "^transport_url" /etc/nova/nova.conf'
+ ret, url = client.ssh(cmd)
+
+ if ret:
+ raise Exception('Exec command to get transport from '
+ 'controller(%s) in MCP installer failed, '
+ 'ret=%s, output=%s'
+ % (self.controllers[0], ret, url))
+ elif self.controllers[0] not in url:
+ # need to use ip instead of hostname
+ url = (re.sub("@.*:", "@%s:" % self.controllers[0],
+ url[0].split("=", 1)[1]))
+ except Exception:
+ cmd = 'grep -i "^rabbit" /etc/nova/nova.conf'
+ ret, lines = client.ssh(cmd)
+ if ret:
+ raise Exception('Exec command to get transport from '
+ 'controller(%s) in MCP installer failed, '
+ 'ret=%s, output=%s'
+ % (self.controllers[0], ret, url))
+ else:
+ for line in lines.split('\n'):
+ if line.startswith("rabbit_userid"):
+ rabbit_userid = line.split("=")
+ if line.startswith("rabbit_port"):
+ rabbit_port = line.split("=")
+ if line.startswith("rabbit_password"):
+ rabbit_password = line.split("=")
+ url = "rabbit://%s:%s@%s:%s/?ssl=0" % (rabbit_userid,
+ rabbit_password,
+ self.controllers[0],
+ rabbit_port)
+ self.log.info('get_transport_url %s' % url)
+ return url
+
+ def _copy_overcloudrc_to_controllers(self):
+ for ip in self.controllers:
+ cmd = "scp overcloudrc %s@%s:" % (self.node_user_name, ip)
+ self._run_cmd_remote(self.client, cmd)
+
+ def get_node_ips(self):
+ self.log.info('Get node ips from Mcp installer......')
+
+ command = 'sudo salt "*" --out yaml pillar.get _param:single_address'
+ node_details = self._run_cmd_remote(self.client, command)
+
+ self.controllers = [line.split()[1] for line in node_details
+ if line.startswith("ctl")]
+ self.computes = [line.split()[1] for line in node_details
+ if line.startswith("cmp")]
+
+ self.log.info('controller_ips:%s' % self.controllers)
+ self.log.info('compute_ips:%s' % self.computes)
def get_host_ip_from_hostname(self, hostname):
command = "sudo salt --out yaml '%s*' " \
@@ -70,21 +128,80 @@ class McpInstaller(BaseInstaller):
def set_apply_patches(self):
self.log.info('Set apply patches start......')
+ fenix_files = None
+ set_scripts = [self.cm_set_script]
+ thrs = []
+
+ restart_cmd = 'sudo systemctl restart' \
+ ' ceilometer-agent-notification.service'
+
+ if self.conf.test_case != 'fault_management':
+ if is_fenix(self.conf):
+ set_scripts.append(self.fe_set_script)
+ testdir = get_doctor_test_root_dir()
+ fenix_files = ["Dockerfile", "run"]
+ restart_cmd += ' nova-scheduler.service'
+ set_scripts.append(self.nc_set_compute_script)
- restart_cm_cmd = 'sudo service ceilometer-agent-notification restart'
for node_ip in self.controllers:
client = SSHClient(node_ip, self.node_user_name,
key_filename=self.key_file)
- self.controller_clients.append(client)
- self._run_apply_patches(client,
- restart_cm_cmd,
- [self.cm_set_script])
+ if fenix_files is not None:
+ for fenix_file in fenix_files:
+ src_file = '{0}/{1}/{2}'.format(testdir,
+ 'admin_tool/fenix',
+ fenix_file)
+ client.scp(src_file, fenix_file)
+ thrs.append(self._run_apply_patches(client,
+ restart_cmd,
+ set_scripts,
+ python=self.python))
+ time.sleep(5)
+
+ self.log.info('Set apply patches start......')
+
+ if self.conf.test_case != 'fault_management':
+ restart_cmd = 'sudo systemctl restart nova-compute.service'
+ for node_ip in self.computes:
+ client = SSHClient(node_ip, self.node_user_name,
+ key_filename=self.key_file)
+ thrs.append(self._run_apply_patches(
+ client,
+ restart_cmd,
+ [self.nc_set_compute_script],
+ python=self.python))
+ time.sleep(5)
+ # If Fenix container ir build, it needs to be ready before continue
+ for thr in thrs:
+ thr.join()
def restore_apply_patches(self):
self.log.info('restore apply patches start......')
- restart_cm_cmd = 'sudo service ceilometer-agent-notification restart'
- for client in self.controller_clients:
+ restore_scripts = [self.cm_restore_script]
+
+ restore_scripts.append(self.ac_restore_script)
+ restart_cmd = 'sudo systemctl restart' \
+ ' ceilometer-agent-notification.service'
+
+ if self.conf.test_case != 'fault_management':
+ restart_cmd += ' nova-scheduler.service'
+ restore_scripts.append(self.nc_restore_compute_script)
+
+ for node_ip in self.controllers:
+ client = SSHClient(node_ip, self.node_user_name,
+ key_filename=self.key_file)
self._run_apply_patches(client,
- restart_cm_cmd,
- [self.cm_restore_script])
+ restart_cmd,
+ restore_scripts,
+ python=self.python)
+
+ if self.conf.test_case != 'fault_management':
+ restart_cmd = 'sudo systemctl restart nova-compute.service'
+ for node_ip in self.computes:
+ client = SSHClient(node_ip, self.node_user_name,
+ key_filename=self.key_file)
+ self._run_apply_patches(
+ client, restart_cmd,
+ [self.nc_restore_compute_script],
+ python=self.python)