From a0528f67abe01f0bb4be3565cfef4fd049afa1fa Mon Sep 17 00:00:00 2001 From: Tomi Juvonen Date: Thu, 7 Nov 2019 13:09:45 +0200 Subject: Maintenance support for latest Fenix, python3 and Fuel JIRA: DOCTOR-134 Signed-off-by: Tomi Juvonen Change-Id: I51a93637f30b0eece2075a8277616fb97a1b230e --- doctor_tests/installer/base.py | 4 +- doctor_tests/installer/common/set_fenix.sh | 82 +++++++++++++++---- doctor_tests/installer/mcp.py | 126 ++++++++++++++++++++++++----- 3 files changed, 175 insertions(+), 37 deletions(-) (limited to 'doctor_tests/installer') diff --git a/doctor_tests/installer/base.py b/doctor_tests/installer/base.py index 7e2658e5..b2270654 100644 --- a/doctor_tests/installer/base.py +++ b/doctor_tests/installer/base.py @@ -139,10 +139,10 @@ class BaseInstaller(object): ret, url = client.ssh(cmd) if ret: raise Exception('Exec command to get transport from ' - 'controller(%s) in Apex installer failed, ' + 'controller(%s) failed, ' 'ret=%s, output=%s' % (self.controllers[0], ret, url)) - else: + elif self.controllers[0] not in url: # need to use ip instead of hostname ret = (re.sub("@.*:", "@%s:" % self.controllers[0], url[0].split("=", 1)[1])) diff --git a/doctor_tests/installer/common/set_fenix.sh b/doctor_tests/installer/common/set_fenix.sh index a660af79..aac376cd 100644 --- a/doctor_tests/installer/common/set_fenix.sh +++ b/doctor_tests/installer/common/set_fenix.sh @@ -1,7 +1,7 @@ #!/usr/bin/env bash ############################################################################## -# Copyright (c) 2018 Nokia Corporation and others. +# Copyright (c) 2019 Nokia Corporation and others. # # All rights reserved. This program and the accompanying materials # are made available under the terms of the Apache License, Version 2.0 @@ -10,18 +10,74 @@ ############################################################################## # Config files +docker -v >/dev/null || { +echo "Fenix needs docker to be installed..." +ver=`grep "UBUNTU_CODENAME" /etc/os-release | cut -d '=' -f 2` +curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add - +add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $ver stable" +apt install apt-transport-https ca-certificates curl software-properties-common +apt update +apt-cache policy docker-ce +apt-get install -y docker-ce docker-ce-cli containerd.io +dpkg -r --force-depends golang-docker-credential-helpers +} + +docker ps | grep fenix >/dev/null && { +REMOTE=`docker exec -ti fenix git rev-parse origin/master` +LOCAL=`docker exec -ti fenix git rev-parse @` +if [ $LOCAL = $REMOTE ]; then + echo "Fenix start: Already running latest" + exit 0 +else + echo "Fenix container needs to be recreated..." + # Remove previous container + for img in `docker image list | grep "^fenix" | awk '{print $1}'`; do + for dock in `docker ps --all -f "ancestor=$img" | grep "$img" | awk '{print $1}'`; do + docker stop $dock; docker rm $dock; + done; + docker image rm $img; + done +fi +} || echo "Fenix container needs to be created..." + +cp /root/keystonercv3 . + +transport=`grep -m1 "^transport" /etc/nova/nova.conf` +. keystonercv3 + echo "[DEFAULT]" > fenix.conf -echo "[DEFAULT]" > fenix-api.conf echo "port = 12347" >> fenix.conf -echo "port = 12347" >> fenix-api.conf -grep -m1 "^transport" /var/lib/config-data/puppet-generated/nova/etc/nova/nova.conf >> fenix.conf -grep -m1 "^transport" /var/lib/config-data/puppet-generated/nova/etc/nova/nova.conf >> fenix-api.conf +echo $transport >> fenix.conf + echo "[database]" >> fenix.conf -MYSQLIP=`grep -m1 "^connection=mysql" /var/lib/config-data/puppet-generated/nova/etc/nova/nova.conf | sed -e "s/.*@//;s/\/.*//"` -echo "connection=mysql+pymysql://fenix:fenix@$MYSQLIP/fenix?read_default_group=tripleo&read_default_file=/etc/my.cnf.d/tripleo.cnf" >> fenix.conf +MYSQLIP=`grep -m1 "^connection" /etc/nova/nova.conf | sed -e "s/.*@//;s/\/.*//"` +echo "connection = mysql+pymysql://fenix:fenix@$MYSQLIP/fenix" >> fenix.conf + +echo "[service_user]" >> fenix.conf +echo "os_auth_url = $OS_AUTH_URL" >> fenix.conf +echo "os_username = $OS_USERNAME" >> fenix.conf +echo "os_password = $OS_PASSWORD" >> fenix.conf +echo "os_user_domain_name = $OS_USER_DOMAIN_NAME" >> fenix.conf +echo "os_project_name = $OS_PROJECT_NAME" >> fenix.conf +echo "os_project_domain_name = $OS_PROJECT_DOMAIN_NAME" >> fenix.conf + +echo "[DEFAULT]" > fenix-api.conf +echo "port = 12347" >> fenix-api.conf +echo $transport >> fenix-api.conf + +echo "[keystone_authtoken]" >> fenix-api.conf +echo "auth_url = $OS_AUTH_URL" >> fenix-api.conf +echo "auth_type = password" >> fenix-api.conf +echo "project_domain_name = $OS_PROJECT_DOMAIN_NAME" >> fenix-api.conf +echo "project_name = $OS_PROJECT_NAME" >> fenix-api.conf +echo "user_domain_name = $OS_PROJECT_DOMAIN_NAME" >> fenix-api.conf +echo "password = $OS_PASSWORD" >> fenix-api.conf +echo "username = $OS_USERNAME" >> fenix-api.conf +echo "cafile = /opt/stack/data/ca-bundle.pem" >> fenix-api.conf # Mysql pw -MYSQLPW=`cat /var/lib/config-data/mysql/etc/puppet/hieradata/service_configs.json | grep mysql | grep root_password | awk -F": " '{print $2}' | awk -F"\"" '{print $2}'` +# MYSQLPW=`cat /var/lib/config-data/mysql/etc/puppet/hieradata/service_configs.json | grep mysql | grep root_password | awk -F": " '{print $2}' | awk -F"\"" '{print $2}'` +MYSQLPW=root # Fenix DB [ `mysql -uroot -p$MYSQLPW -e "SELECT host, user FROM mysql.user;" | grep fenix | wc -l` -eq 0 ] && { @@ -31,17 +87,9 @@ MYSQLPW=`cat /var/lib/config-data/mysql/etc/puppet/hieradata/service_configs.jso mysql -ufenix -pfenix -hlocalhost -e "DROP DATABASE IF EXISTS fenix;" mysql -ufenix -pfenix -hlocalhost -e "CREATE DATABASE fenix CHARACTER SET utf8;" -# Remove previous container -for img in `docker image list | grep "^fenix" | awk '{print $1}'`; do - for dock in `docker ps --all -f "ancestor=$img" | grep "$img" | awk '{print $1}'`; do - docker stop $dock; docker rm $dock; - done; - docker image rm $img; -done - # Build Fenix container and run it chmod 700 run -docker build --build-arg OPENSTACK=master --build-arg BRANCH=master --network host /home/heat-admin -t fenix | tail -1 +docker build --build-arg OPENSTACK=master --build-arg BRANCH=master --network host $PWD -t fenix | tail -1 docker run --network host -d --name fenix -p 12347:12347 -ti fenix if [ $? -eq 0 ]; then echo "Fenix start: OK" diff --git a/doctor_tests/installer/mcp.py b/doctor_tests/installer/mcp.py index 80e559ed..65c8ed70 100644 --- a/doctor_tests/installer/mcp.py +++ b/doctor_tests/installer/mcp.py @@ -1,5 +1,5 @@ ############################################################################## -# Copyright (c) 2018 ZTE Corporation and others. +# Copyright (c) 2019 ZTE Corporation and others. # # All rights reserved. This program and the accompanying materials # are made available under the terms of the Apache License, Version 2.0 @@ -7,7 +7,10 @@ # http://www.apache.org/licenses/LICENSE-2.0 ############################################################################## from os.path import isfile +import time +from doctor_tests.common.constants import is_fenix +from doctor_tests.common.utils import get_doctor_test_root_dir from doctor_tests.common.utils import SSHClient from doctor_tests.installer.base import BaseInstaller @@ -15,6 +18,15 @@ from doctor_tests.installer.base import BaseInstaller class McpInstaller(BaseInstaller): node_user_name = 'ubuntu' + cm_set_script = 'set_config.py' + nc_set_compute_script = 'set_compute_config.py' + fe_set_script = 'set_fenix.sh' + cm_restore_script = 'restore_config.py' + nc_restore_compute_script = 'restore_compute_config.py' + ac_restart_script = 'restart_aodh.py' + ac_restore_script = 'restore_aodh.py' + python = 'python3' + def __init__(self, conf, log): super(McpInstaller, self).__init__(conf, log) self.key_file = self.get_ssh_key_from_installer() @@ -24,40 +36,48 @@ class McpInstaller(BaseInstaller): look_for_keys=True) self.controllers = list() self.controller_clients = list() + self.computes = list() def setup(self): self.log.info('Setup MCP installer start......') - - self.controllers = self.get_controller_ips() + self.get_node_ips() self.create_flavor() - self.set_apply_patches() + if is_fenix(self.conf): + self.set_apply_patches() self.setup_stunnel() def cleanup(self): - self.restore_apply_patches() + if is_fenix(self.conf): + self.restore_apply_patches() for server in self.servers: server.terminate() def get_ssh_key_from_installer(self): self.log.info('Get SSH keys from MCP......') - # Assuming mcp.rsa is already mapped to functest container - # if not, only the test runs on jumphost can get the ssh_key - # default in path /var/lib/opnfv/mcp.rsa + # Default in path /var/lib/opnfv/mcp.rsa ssh_key = '/root/.ssh/id_rsa' mcp_key = '/var/lib/opnfv/mcp.rsa' - return ssh_key if isfile(ssh_key) else mcp_key + return mcp_key if isfile(mcp_key) else ssh_key + + def _copy_overcloudrc_to_controllers(self): + for ip in self.controllers: + cmd = "scp overcloudrc %s@%s:" % (self.node_user_name, ip) + self._run_cmd_remote(self.client, cmd) + + def get_node_ips(self): + self.log.info('Get node ips from Mcp installer......') + + command = 'sudo salt "*" --out yaml pillar.get _param:single_address' + node_details = self._run_cmd_remote(self.client, command) - def get_controller_ips(self): - self.log.info('Get controller ips from Mcp installer......') + self.controllers = [line.split()[1] for line in node_details + if line.startswith("ctl")] + self.computes = [line.split()[1] for line in node_details + if line.startswith("cmp")] - command = "sudo salt --out yaml 'ctl*' " \ - "pillar.get _param:openstack_control_address |" \ - "awk '{print $2}'" - controllers = self._run_cmd_remote(self.client, command) - self.log.info('Get controller_ips:%s from Mcp installer' - % controllers) - return controllers + self.log.info('controller_ips:%s' % self.controllers) + self.log.info('compute_ips:%s' % self.computes) def get_host_ip_from_hostname(self, hostname): command = "sudo salt --out yaml '%s*' " \ @@ -68,6 +88,76 @@ class McpInstaller(BaseInstaller): def set_apply_patches(self): self.log.info('Set apply patches start......') + fenix_files = None + + set_scripts = [self.cm_set_script] + + restart_cmd = 'sudo systemctl restart' \ + ' ceilometer-agent-notification.service' + + if self.conf.test_case != 'fault_management': + if is_fenix(self.conf): + set_scripts.append(self.fe_set_script) + testdir = get_doctor_test_root_dir() + fenix_files = ["Dockerfile", "run"] + restart_cmd += ' nova-scheduler.service' + set_scripts.append(self.nc_set_compute_script) + + for node_ip in self.controllers: + client = SSHClient(node_ip, self.node_user_name, + key_filename=self.key_file) + if fenix_files is not None: + for fenix_file in fenix_files: + src_file = '{0}/{1}/{2}'.format(testdir, + 'admin_tool/fenix', + fenix_file) + client.scp(src_file, fenix_file) + self._run_apply_patches(client, + restart_cmd, + set_scripts, + python=self.python) + time.sleep(5) + + self.log.info('Set apply patches start......') + + if self.conf.test_case != 'fault_management': + restart_cmd = 'sudo systemctl restart nova-compute.service' + for node_ip in self.computes: + client = SSHClient(node_ip, self.node_user_name, + key_filename=self.key_file) + self._run_apply_patches(client, + restart_cmd, + [self.nc_set_compute_script], + python=self.python) + time.sleep(5) def restore_apply_patches(self): self.log.info('restore apply patches start......') + + restore_scripts = [self.cm_restore_script] + + restore_scripts.append(self.ac_restore_script) + restart_cmd = 'sudo systemctl restart' \ + ' ceilometer-agent-notification.service' + + if self.conf.test_case != 'fault_management': + restart_cmd += ' nova-scheduler.service' + restore_scripts.append(self.nc_restore_compute_script) + + for node_ip in self.controllers: + client = SSHClient(node_ip, self.node_user_name, + key_filename=self.key_file) + self._run_apply_patches(client, + restart_cmd, + restore_scripts, + python=self.python) + + if self.conf.test_case != 'fault_management': + restart_cmd = 'sudo systemctl restart nova-compute.service' + for node_ip in self.computes: + client = SSHClient(node_ip, self.node_user_name, + key_filename=self.key_file) + self._run_apply_patches( + client, restart_cmd, + [self.nc_restore_compute_script], + python=self.python) -- cgit 1.2.3-korg