diff options
author | Tomi Juvonen <tomi.juvonen@nokia.com> | 2018-09-28 12:15:43 +0300 |
---|---|---|
committer | Tomi Juvonen <tomi.juvonen@nokia.com> | 2018-10-25 13:57:03 +0300 |
commit | a6575910a137f8932e294f66c9da3194ad937691 (patch) | |
tree | e11851ae691892fea7f0efb9922f115fbb0521a9 /doctor_tests/installer/apex.py | |
parent | 61eb3927ada784cc3dffb5ddd17f66e47871f708 (diff) |
Support Apex with services in containers
Upstream apex now works in containers, so if used, it needs to be supported
JIRA: DOCTOR-130
Change-Id: I3d73a1699e4fee53b001f043f55d0eeefa7bfb7b
Signed-off-by: Tomi Juvonen <tomi.juvonen@nokia.com>
Diffstat (limited to 'doctor_tests/installer/apex.py')
-rw-r--r-- | doctor_tests/installer/apex.py | 99 |
1 files changed, 76 insertions, 23 deletions
diff --git a/doctor_tests/installer/apex.py b/doctor_tests/installer/apex.py index 694adb88..9b0010e4 100644 --- a/doctor_tests/installer/apex.py +++ b/doctor_tests/installer/apex.py @@ -16,17 +16,21 @@ from doctor_tests.installer.base import BaseInstaller class ApexInstaller(BaseInstaller): node_user_name = 'heat-admin' + installer_username = 'stack' cm_set_script = 'set_config.py' nc_set_compute_script = 'set_compute_config.py' cg_set_script = 'set_congress.py' cm_restore_script = 'restore_config.py' nc_restore_compute_script = 'restore_compute_config.py' cg_restore_script = 'restore_congress.py' + ac_restart_script = 'restart_aodh.py' + ac_restore_script = 'restore_aodh.py' + python = 'python' def __init__(self, conf, log): super(ApexInstaller, self).__init__(conf, log) self.client = SSHClient(self.conf.installer.ip, - self.conf.installer.username, + self.installer_username, key_filename=self.conf.installer.key_file, look_for_keys=True) self.key_file = None @@ -38,7 +42,7 @@ class ApexInstaller(BaseInstaller): def setup(self): self.log.info('Setup Apex installer start......') self.key_file = self.get_ssh_key_from_installer() - self._get_and_set_ips() + self._get_overcloud_conf() self.create_flavor() self.set_apply_patches() self.setup_stunnel() @@ -52,8 +56,8 @@ class ApexInstaller(BaseInstaller): key_path = '/home/stack/.ssh/id_rsa' return self._get_ssh_key(self.client, key_path) - def _get_and_set_ips(self): - self.log.info('Get controller and compute ips from Apex installer' + def _get_overcloud_conf(self): + self.log.info('Get overcloud config details from Apex installer' '......') command = "source stackrc; nova list | grep ' overcloud-'" @@ -64,8 +68,11 @@ class ApexInstaller(BaseInstaller): self.controllers.append(ip) elif 'overcloud-novacompute-' in line: self.computes.append(ip) + command = "grep docker /home/stack/deploy_command" + self.use_containers = self._check_cmd_remote(self.client, command) self.log.info('controller_ips:%s' % self.controllers) self.log.info('compute_ips:%s' % self.computes) + self.log.info('use_containers:%s' % self.use_containers) def get_host_ip_from_hostname(self, hostname): self.log.info('Get host ip by hostname=%s from Apex installer......' @@ -79,8 +86,12 @@ class ApexInstaller(BaseInstaller): def get_transport_url(self): client = SSHClient(self.controllers[0], self.node_user_name, key_filename=self.key_file) + if self.use_containers: + ncbase = "/var/lib/config-data/puppet-generated/nova" + else: + ncbase = "" + command = 'sudo grep "^transport_url" %s/etc/nova/nova.conf' % ncbase - command = 'sudo grep "^transport_url" /etc/nova/nova.conf' ret, url = client.ssh(command) if ret: raise Exception('Exec command to get host ip from controller(%s)' @@ -92,19 +103,39 @@ class ApexInstaller(BaseInstaller): self.log.debug('get_transport_url %s' % ret) return ret + def _set_docker_restart_cmd(self, service): + # There can be multiple instances running so need to restart all + cmd = "for container in `sudo docker ps | grep " + cmd += service + cmd += " | awk '{print $1}'`; do sudo docker restart $container; \ + done;" + return cmd + def set_apply_patches(self): self.log.info('Set apply patches start......') - restart_cmd = 'sudo systemctl restart' \ - ' openstack-ceilometer-notification.service' - set_scripts = [self.cm_set_script] + if self.use_containers: + restart_cmd = (self._set_docker_restart_cmd( + "ceilometer-notification")) + set_scripts.append(self.ac_restart_script) + else: + restart_cmd = 'sudo systemctl restart' \ + ' openstack-ceilometer-notification.service' + if self.conf.test_case != 'fault_management': - restart_cmd += ' openstack-nova-scheduler.service' + if self.use_containers: + restart_cmd += self._set_docker_restart_cmd("nova-scheduler") + else: + restart_cmd += ' openstack-nova-scheduler.service' + set_scripts.append(self.nc_set_compute_script) if self.conf.inspector.type == Inspector.CONGRESS: - restart_cmd += ' openstack-congress-server.service' + if self.use_containers: + restart_cmd += self._set_docker_restart_cmd("congress-server") + else: + restart_cmd += ' openstack-congress-server.service' set_scripts.append(self.cg_set_script) for node_ip in self.controllers: @@ -113,18 +144,23 @@ class ApexInstaller(BaseInstaller): self.controller_clients.append(client) self._run_apply_patches(client, restart_cmd, - set_scripts) + set_scripts, + python=self.python) if self.conf.test_case != 'fault_management': - restart_cmd = 'sudo systemctl restart' \ - ' openstack-nova-compute.service' + if self.use_containers: + restart_cmd = self._set_docker_restart_cmd("nova-compute") + else: + restart_cmd = 'sudo systemctl restart' \ + ' openstack-nova-compute.service' for node_ip in self.computes: client = SSHClient(node_ip, self.node_user_name, key_filename=self.key_file) self.compute_clients.append(client) self._run_apply_patches(client, restart_cmd, - [self.nc_set_compute_script]) + [self.nc_set_compute_script], + python=self.python) if self.conf.test_case != 'fault_management': time.sleep(10) @@ -132,27 +168,44 @@ class ApexInstaller(BaseInstaller): def restore_apply_patches(self): self.log.info('restore apply patches start......') - restart_cmd = 'sudo systemctl restart' \ - ' openstack-ceilometer-notification.service' - restore_scripts = [self.cm_restore_script] + if self.use_containers: + restart_cmd = (self._set_docker_restart_cmd( + "ceilometer-notification")) + restore_scripts.append(self.ac_restore_script) + else: + restart_cmd = 'sudo systemctl restart' \ + ' openstack-ceilometer-notification.service' + if self.conf.test_case != 'fault_management': - restart_cmd += ' openstack-nova-scheduler.service' + if self.use_containers: + restart_cmd += self._set_docker_restart_cmd("nova-scheduler") + else: + restart_cmd += ' openstack-nova-scheduler.service' + restore_scripts.append(self.nc_restore_compute_script) if self.conf.inspector.type == Inspector.CONGRESS: - restart_cmd += ' openstack-congress-server.service' + if self.use_containers: + restart_cmd += self._set_docker_restart_cmd("congress-server") + else: + restart_cmd += ' openstack-congress-server.service' restore_scripts.append(self.cg_restore_script) for client in self.controller_clients: self._run_apply_patches(client, restart_cmd, - restore_scripts) + restore_scripts, + python=self.python) if self.conf.test_case != 'fault_management': - restart_cmd = 'sudo systemctl restart' \ - ' openstack-nova-compute.service' + if self.use_containers: + restart_cmd = self._set_docker_restart_cmd("nova-compute") + else: + restart_cmd = 'sudo systemctl restart' \ + ' openstack-nova-compute.service' for client in self.compute_clients: self._run_apply_patches(client, restart_cmd, - [self.nc_restore_compute_script]) + [self.nc_restore_compute_script], + python=self.python) |