summaryrefslogtreecommitdiffstats
path: root/tests
diff options
context:
space:
mode:
Diffstat (limited to 'tests')
-rw-r--r--tests/inspector.py55
-rwxr-xr-xtests/run.sh195
2 files changed, 180 insertions, 70 deletions
diff --git a/tests/inspector.py b/tests/inspector.py
index c1f95697..7195969a 100644
--- a/tests/inspector.py
+++ b/tests/inspector.py
@@ -14,6 +14,7 @@ from flask import request
import json
import logger as doctor_log
import os
+import threading
import time
import novaclient.client as novaclient
@@ -23,19 +24,41 @@ import nova_force_down
LOG = doctor_log.Logger('doctor_inspector').getLogger()
+class ThreadedResetState(threading.Thread):
+
+ def __init__(self, nova, state, server):
+ threading.Thread.__init__(self)
+ self.nova = nova
+ self.state = state
+ self.server = server
+
+ def run(self):
+ self.nova.servers.reset_state(self.server, self.state)
+ LOG.info('doctor mark vm(%s) error at %s' % (self.server, time.time()))
+
+
class DoctorInspectorSample(object):
- nova_api_version = '2.11'
+ NOVA_API_VERSION = '2.11'
+ NUMBER_OF_CLIENTS = 50
+ # TODO(tojuvone): This could be enhanced in future with dynamic
+ # reuse of self.novaclients when all threads in use and
+ # self.NUMBER_OF_CLIENTS based on amount of cores or overriden by input
+ # argument
def __init__(self):
self.servers = collections.defaultdict(list)
- self.nova = novaclient.Client(self.nova_api_version,
- os.environ['OS_USERNAME'],
- os.environ['OS_PASSWORD'],
- os.environ['OS_TENANT_NAME'],
- os.environ['OS_AUTH_URL'],
- connection_pool=True)
- # check nova is available
+ self.novaclients = list()
+ # Pool of novaclients for redundant usage
+ for i in range(self.NUMBER_OF_CLIENTS):
+ self.novaclients.append(novaclient.Client(self.NOVA_API_VERSION,
+ os.environ['OS_USERNAME'],
+ os.environ['OS_PASSWORD'],
+ os.environ['OS_TENANT_NAME'],
+ os.environ['OS_AUTH_URL'],
+ connection_pool=True))
+ # Normally we use this client for non redundant API calls
+ self.nova=self.novaclients[0]
self.nova.servers.list(detailed=False)
self.init_servers_list()
@@ -52,10 +75,18 @@ class DoctorInspectorSample(object):
LOG.error('can not get hostname from server=%s' % server)
def disable_compute_host(self, hostname):
- for server in self.servers[hostname]:
- self.nova.servers.reset_state(server, 'error')
- LOG.info('doctor mark vm(%s) error at %s' % (server, time.time()))
-
+ threads = []
+ if len(self.servers[hostname]) > self.NUMBER_OF_CLIENTS:
+ # TODO(tojuvone): This could be enhanced in future with dynamic
+ # reuse of self.novaclients when all threads in use
+ LOG.error('%d servers in %s. Can handle only %d'%(
+ self.servers[hostname], hostname, self.NUMBER_OF_CLIENTS))
+ for nova, server in zip(self.novaclients, self.servers[hostname]):
+ t = ThreadedResetState(nova, "error", server)
+ t.start()
+ threads.append(t)
+ for t in threads:
+ t.join()
# NOTE: We use our own client here instead of this novaclient for a
# workaround. Once keystone provides v2.1 nova api endpoint
# in the service catalog which is configured by OpenStack
diff --git a/tests/run.sh b/tests/run.sh
index 32562221..855ecfc6 100755
--- a/tests/run.sh
+++ b/tests/run.sh
@@ -17,9 +17,11 @@ IMAGE_URL=https://launchpad.net/cirros/trunk/0.3.0/+download/cirros-0.3.0-x86_64
IMAGE_NAME=${IMAGE_NAME:-cirros}
IMAGE_FILE="${IMAGE_NAME}.img"
IMAGE_FORMAT=qcow2
-VM_NAME=doctor_vm1
+VM_BASENAME=doctor_vm
VM_FLAVOR=m1.tiny
-ALARM_NAME=doctor_alarm1
+#if VM_COUNT set, use that instead
+VM_COUNT=${VM_COUNT:-1}
+ALARM_BASENAME=doctor_alarm
INSPECTOR_PORT=12345
CONSUMER_PORT=12346
DOCTOR_USER=doctor
@@ -38,8 +40,8 @@ as_doctor_user="--os-username $DOCTOR_USER --os-password $DOCTOR_PW
# Functions
get_compute_host_info() {
- # get computer host info which VM boot in
- COMPUTE_HOST=$(openstack $as_doctor_user server show $VM_NAME |
+ # get computer host info which first VM boot in
+ COMPUTE_HOST=$(openstack $as_doctor_user server show ${VM_BASENAME}1 |
grep "OS-EXT-SRV-ATTR:host" | awk '{ print $4 }')
compute_host_in_undercloud=${COMPUTE_HOST%%.*}
die_if_not_set $LINENO COMPUTE_HOST "Failed to get compute hostname"
@@ -113,31 +115,60 @@ create_test_user() {
openstack role add "$DOCTOR_ROLE" --user "$DOCTOR_USER" \
--project "$DOCTOR_PROJECT"
}
+ # tojuvone: openstack quota show is broken and have to use nova
+ # https://bugs.launchpad.net/manila/+bug/1652118
+ # Note! while it is encouraged to use openstack client it has proven
+ # quite buggy.
+ # QUOTA=$(openstack quota show $DOCTOR_PROJECT)
+ DOCTOR_QUOTA=$(nova quota-show --tenant DOCTOR_PROJECT)
+ # We make sure that quota allows number of instances and cores
+ OLD_INSTANCE_QUOTA=$(echo "${DOCTOR_QUOTA}" | grep " instances " | \
+ awk '{print $4}')
+ if [ $OLD_INSTANCE_QUOTA -lt $VM_COUNT ]; then
+ openstack quota set --instances $VM_COUNT \
+ $DOCTOR_USER
+ fi
+ OLD_CORES_QUOTA=$(echo "${DOCTOR_QUOTA}" | grep " cores " | \
+ awk '{print $4}')
+ if [ $OLD_CORES_QUOTA -lt $VM_COUNT ]; then
+ openstack quota set --cores $VM_COUNT \
+ $DOCTOR_USER
+ fi
}
boot_vm() {
# test VM done with test user, so can test non-admin
- openstack $as_doctor_user server list | grep -q " $VM_NAME " && return 0
- openstack $as_doctor_user server create --flavor "$VM_FLAVOR" \
+ servers=$(openstack $as_doctor_user server list)
+ for i in `seq $VM_COUNT`; do
+ echo "${servers}" | grep -q " $VM_BASENAME$i " && continue
+ openstack $as_doctor_user server create --flavor "$VM_FLAVOR" \
--image "$IMAGE_NAME" \
- "$VM_NAME"
+ "$VM_BASENAME$i"
+ done
sleep 1
}
create_alarm() {
# get vm_id as test user
- ceilometer $as_doctor_user alarm-list | grep -q " $ALARM_NAME " && return 0
- vm_id=$(openstack $as_doctor_user server list | grep " $VM_NAME " | awk '{print $2}')
- # TODO(r-mibu): change notification endpoint from localhost to the consumer
- # IP address (functest container).
- ceilometer $as_doctor_user alarm-event-create --name "$ALARM_NAME" \
- --alarm-action "http://localhost:$CONSUMER_PORT/failure" \
- --description "VM failure" \
- --enabled True \
- --repeat-actions False \
- --severity "moderate" \
- --event-type compute.instance.update \
- -q "traits.state=string::error; traits.instance_id=string::$vm_id"
+ alarm_list=$(ceilometer $as_doctor_user alarm-list)
+ vms=$(openstack $as_doctor_user server list)
+ for i in `seq $VM_COUNT`; do
+ echo "${alarm_list}" | grep -q " $ALARM_BASENAME$i " || {
+ vm_id=$(echo "${vms}" | grep " $VM_BASENAME$i " | awk '{print $2}')
+ # TODO(r-mibu): change notification endpoint from localhost to the
+ # consumer. IP address (functest container).
+ ceilometer $as_doctor_user alarm-event-create \
+ --name "$ALARM_BASENAME$i" \
+ --alarm-action "http://localhost:$CONSUMER_PORT/failure" \
+ --description "VM failure" \
+ --enabled True \
+ --repeat-actions False \
+ --severity "moderate" \
+ --event-type compute.instance.update \
+ -q "traits.state=string::error; \
+ traits.instance_id=string::$vm_id"
+ }
+ done
}
start_monitor() {
@@ -189,19 +220,31 @@ wait_for_vm_launch() {
count=0
while [[ ${count} -lt 60 ]]
do
- state=$(openstack $as_doctor_user server list | grep " $VM_NAME " | awk '{print $6}')
- if [[ "$state" == "ACTIVE" ]]; then
- # NOTE(cgoncalves): sleeping for a bit to stabilize
- # See python-openstackclient/functional/tests/compute/v2/test_server.py:wait_for_status
- sleep 5
+ active_count=0
+ vms=$(openstack $as_doctor_user server list)
+ for i in `seq $VM_COUNT`; do
+ state=$(echo "${vms}" | grep " $VM_BASENAME$i " | awk '{print $6}')
+ if [[ "$state" == "ACTIVE" ]]; then
+ active_count=$(($active_count+1))
+ elif [[ "$state" == "ERROR" ]]; then
+ die $LINENO "vm state $VM_BASENAME$i is ERROR"
+ else
+ #This VM not yet active
+ count=$(($count+1))
+ sleep 5
+ continue
+ fi
+ done
+ [[ $active_count -eq $VM_COUNT ]] && {
+ echo "get computer host info..."
+ get_compute_host_info
+ VMS_ON_FAILED_HOST=$(openstack $as_doctor_user server list --host \
+ $COMPUTE_HOST | grep " ${VM_BASENAME}" | wc -l)
return 0
- fi
- if [[ "$state" == "ERROR" ]]; then
- openstack $as_doctor_user server show $VM_NAME
- die $LINENO "vm state is ERROR"
- fi
+ }
+ #Not all VMs active
count=$(($count+1))
- sleep 1
+ sleep 5
done
die $LINENO "Time out while waiting for VM launch"
}
@@ -227,17 +270,27 @@ END_TXT
triggered=$(date "+%s.%N")
}
-calculate_notification_time() {
- if ! grep -q "doctor consumer notified at" consumer.log ; then
- die $LINENO "Consumer hasn't received fault notification."
- fi
+wait_consumer() {
+ local interval=1
+ local rounds=$(($1 / $interval))
+ for i in `seq $rounds`; do
+ notified_count=$(grep "doctor consumer notified at" consumer.log | wc -l)
+ if [[ $notified_count -eq $VMS_ON_FAILED_HOST ]]; then
+ return 0
+ fi
+ sleep $interval
+ done
+ die $LINENO "Consumer hasn't received fault notification."
+}
+calculate_notification_time() {
+ wait_consumer 60
#keep 'at' as the last keyword just before the value, and
#use regex to get value instead of the fixed column
detected=$(grep "doctor monitor detected at" monitor.log |\
sed -e "s/^.* at //")
notified=$(grep "doctor consumer notified at" consumer.log |\
- sed -e "s/^.* at //")
+ sed -e "s/^.* at //" | tail -1)
echo "$notified $detected" | \
awk '{
@@ -247,17 +300,42 @@ calculate_notification_time() {
}'
}
+wait_ping() {
+ local interval=5
+ local rounds=$(($1 / $interval))
+ for i in `seq $rounds`; do
+ ping -c 1 "$COMPUTE_IP"
+ if [[ $? -ne 0 ]] ; then
+ sleep $interval
+ continue
+ fi
+ return 0
+ done
+}
+
check_host_status() {
+ # Check host related to first Doctor VM is in wanted state
+ # $1 Expected state
+ # $2 Seconds to wait to have wanted state
expected_state=$1
-
- host_status_line=$(openstack $as_doctor_user --os-compute-api-version 2.16 \
- server show $VM_NAME | grep "host_status")
- host_status=$(echo $host_status_line | awk '{print $4}')
- die_if_not_set $LINENO host_status "host_status not reported by: nova show $VM_NAME"
+ local interval=5
+ local rounds=$(($2 / $interval))
+ for i in `seq $rounds`; do
+ host_status_line=$(openstack $as_doctor_user --os-compute-api-version \
+ 2.16 server show ${VM_BASENAME}1 | grep "host_status")
+ host_status=$(echo $host_status_line | awk '{print $4}')
+ die_if_not_set $LINENO host_status "host_status not reported by: nova show ${VM_BASENAME}1"
+ if [[ "$expected_state" =~ "$host_status" ]] ; then
+ echo "${VM_BASENAME}1 showing host_status: $host_status"
+ return 0
+ else
+ sleep $interval
+ fi
+ done
if [[ "$expected_state" =~ "$host_status" ]] ; then
- echo "$VM_NAME showing host_status: $host_status"
+ echo "${VM_BASENAME}1 showing host_status: $host_status"
else
- die $LINENO "host_status:$host_status not equal to expected_state: $expected_state"
+ die $LINENO "host_status:$host_status not equal to expected_state: $expected_state"
fi
}
@@ -320,18 +398,23 @@ cleanup() {
stop_consumer
unset_forced_down_hosts
- # TODO: We need to make sure the target compute host is back to IP
- # reachable. wait_ping() will be added by tojuvone .
- sleep 110
- if is_set COMPUTE_IP; then
- scp $ssh_opts_cpu "$COMPUTE_USER@$COMPUTE_IP:disable_network.log" .
- fi
- openstack $as_doctor_user server list | grep -q " $VM_NAME " && openstack $as_doctor_user server delete "$VM_NAME"
- sleep 1
- alarm_id=$(ceilometer $as_doctor_user alarm-list | grep " $ALARM_NAME " | awk '{print $2}')
- sleep 1
- [ -n "$alarm_id" ] && ceilometer $as_doctor_user alarm-delete "$alarm_id"
+ wait_ping 120
+
+ scp $ssh_opts_cpu "$COMPUTE_USER@$COMPUTE_IP:disable_network.log" .
+ vms=$(openstack $as_doctor_user server list)
+ vmstodel=""
+ for i in `seq $VM_COUNT`; do
+ $(echo "${vms}" | grep -q " $VM_BASENAME$i ") &&
+ vmstodel+=" $VM_BASENAME$i"
+ done
+ [[ $vmstodel ]] && openstack $as_doctor_user server delete $vmstodel
+ alarm_list=$(ceilometer $as_doctor_user alarm-list)
+ for i in `seq $VM_COUNT`; do
+ alarm_id=$(echo "${alarm_list}" | grep " $ALARM_BASENAME$i " |
+ awk '{print $2}')
+ [ -n "$alarm_id" ] && ceilometer $as_doctor_user alarm-delete "$alarm_id"
+ done
sleep 1
image_id=$(openstack image list | grep " $IMAGE_NAME " | awk '{print $2}')
@@ -373,9 +456,6 @@ echo "creating VM..."
boot_vm
wait_for_vm_launch
-echo "get computer host info..."
-get_compute_host_info
-
echo "creating alarm..."
#TODO: change back to use, network problems depends on infra and installers
#get_consumer_ip
@@ -389,9 +469,8 @@ start_consumer
sleep 60
echo "injecting host failure..."
inject_failure
-sleep 60
-check_host_status "(DOWN|UNKNOWN)"
+check_host_status "(DOWN|UNKNOWN)" 60
calculate_notification_time
collect_logs
run_profiler