aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.circleci/config.yml4
-rw-r--r--.travis.yml53
-rw-r--r--PI.md32
-rw-r--r--README.md207
-rw-r--r--ansible/site.cntt.yml50
-rw-r--r--ansible/site.gate.yml91
-rw-r--r--ansible/site.yml55
-rw-r--r--build.sh37
-rw-r--r--docker/benchmarking-cntt/Dockerfile4
-rw-r--r--docker/benchmarking-cntt/testcases.yaml67
-rw-r--r--docker/benchmarking/Dockerfile11
-rw-r--r--docker/benchmarking/testcases.yaml7
-rw-r--r--docker/core/Dockerfile11
-rw-r--r--docker/features/Dockerfile2
-rw-r--r--docker/healthcheck/Dockerfile15
-rw-r--r--docker/healthcheck/testcases.yaml20
-rw-r--r--docker/smoke-cntt/Dockerfile5
-rw-r--r--docker/smoke-cntt/tempest_conf.yaml94
-rw-r--r--docker/smoke-cntt/testcases.yaml273
-rw-r--r--docker/smoke/Dockerfile35
-rw-r--r--docker/smoke/testcases.yaml93
-rw-r--r--docker/tempest/Accept-custom-registered-endpoints.patch99
-rw-r--r--docker/tempest/Create-new-server-in-test_create_backup.patch84
-rw-r--r--docker/tempest/Dockerfile36
-rw-r--r--docker/tempest/Fixes-race-condition-in-test_add_remove_fixed_ip.patch165
-rw-r--r--docker/tempest/Switch-to-threading.Thread-for-Rally-tasks.patch50
-rw-r--r--docker/tempest/object-storage-fix-and-cleanup-header-checks.patch171
-rw-r--r--docker/vnf/Dockerfile37
-rw-r--r--docker/vnf/clearwater-heat-singlenet-deps.patch122
-rw-r--r--docker/vnf/testcases.yaml9
-rw-r--r--docs/conf.py2
-rw-r--r--docs/release/release-notes/functest-release.rst26
-rw-r--r--docs/release/release-notes/index.rst2
-rw-r--r--docs/spelling_wordlist.txt164
-rw-r--r--docs/testing/developer/devguide/index.rst2
-rw-r--r--docs/testing/user/configguide/ci.rst16
-rw-r--r--docs/testing/user/configguide/configguide.rst209
-rw-r--r--docs/testing/user/configguide/index.rst3
-rw-r--r--docs/testing/user/configguide/prerequisites.rst2
-rw-r--r--docs/testing/user/userguide/reporting.rst4
-rw-r--r--docs/testing/user/userguide/test_details.rst2
-rw-r--r--docs/testing/user/userguide/test_overview.rst6
-rw-r--r--docs/testing/user/userguide/test_results.rst202
-rw-r--r--docs/testing/user/userguide/troubleshooting.rst44
-rw-r--r--functest/ci/config_aarch64_patch.yaml84
-rw-r--r--functest/ci/config_patch.yaml333
-rw-r--r--functest/ci/download_images.sh6
-rw-r--r--functest/ci/logging.debug.ini2
-rw-r--r--functest/ci/logging.ini9
-rw-r--r--functest/ci/testcases.yaml484
-rw-r--r--functest/core/cloudify.py15
-rw-r--r--functest/core/singlevm.py54
-rw-r--r--functest/core/tenantnetwork.py31
-rw-r--r--functest/opnfv_tests/openstack/api/connection_check.py17
-rw-r--r--functest/opnfv_tests/openstack/cinder/cinder_test.py12
-rw-r--r--functest/opnfv_tests/openstack/rally/blacklist.yaml15
-rw-r--r--functest/opnfv_tests/openstack/rally/rally.py98
-rw-r--r--functest/opnfv_tests/openstack/rally/scenario/full/opnfv-glance.yaml5
-rw-r--r--functest/opnfv_tests/openstack/rally/scenario/full/opnfv-neutron.yaml2
-rw-r--r--functest/opnfv_tests/openstack/rally/scenario/full/opnfv-nova.yaml28
-rw-r--r--functest/opnfv_tests/openstack/rally/scenario/opnfv-quotas.yaml11
-rw-r--r--functest/opnfv_tests/openstack/rally/scenario/opnfv-swift.yaml71
-rw-r--r--functest/opnfv_tests/openstack/rally/scenario/sanity/opnfv-glance.yaml5
-rw-r--r--functest/opnfv_tests/openstack/rally/scenario/sanity/opnfv-nova.yaml16
-rw-r--r--functest/opnfv_tests/openstack/rally/task.yaml4
-rw-r--r--functest/opnfv_tests/openstack/shaker/shaker.py24
-rw-r--r--functest/opnfv_tests/openstack/tempest/custom_tests/blacklist.yaml3
-rw-r--r--functest/opnfv_tests/openstack/tempest/custom_tests/public_blacklist.yaml15
-rw-r--r--functest/opnfv_tests/openstack/tempest/custom_tests/tempest_conf.yaml80
-rw-r--r--functest/opnfv_tests/openstack/tempest/custom_tests/tempest_conf_ovn.yaml94
-rw-r--r--functest/opnfv_tests/openstack/tempest/tempest.py217
-rw-r--r--functest/opnfv_tests/openstack/vmtp/vmtp.py13
-rw-r--r--functest/opnfv_tests/openstack/vping/vping_ssh.py6
-rw-r--r--functest/opnfv_tests/vnf/epc/juju_epc.py352
-rw-r--r--functest/opnfv_tests/vnf/ims/heat_ims.py2
-rw-r--r--functest/opnfv_tests/vnf/router/cloudify_vrouter.py2
-rw-r--r--functest/tests/unit/openstack/rally/test_rally.py26
-rw-r--r--functest/tests/unit/openstack/tempest/test_tempest.py6
-rw-r--r--functest/tests/unit/openstack/vping/test_vping_ssh.py24
-rw-r--r--functest/tests/unit/vnf/ims/test_clearwater.py1
-rw-r--r--functest/utils/env.py11
-rw-r--r--functest/utils/functest_utils.py47
-rw-r--r--requirements.txt2
-rw-r--r--rtd-requirements.txt3
-rw-r--r--setup.cfg3
-rw-r--r--test-requirements.txt6
-rw-r--r--tox.ini6
-rw-r--r--upper-constraints.txt14
88 files changed, 3909 insertions, 973 deletions
diff --git a/.circleci/config.yml b/.circleci/config.yml
index bfdd54dc6..22b2a995c 100644
--- a/.circleci/config.yml
+++ b/.circleci/config.yml
@@ -8,10 +8,10 @@ jobs:
- checkout
- run:
name: Install dependendencies
- command: sudo apt-get install python2.7-dev
+ command: sudo apt-get update && sudo apt-get install python2.7-dev enchant
- run:
name: Run tox
- command: sudo pip install tox && tox
+ command: sudo pip install tox tox-pip-version && tox
- store_artifacts:
path: api/build
destination: api
diff --git a/.travis.yml b/.travis.yml
index 0f0db3949..e6b405c22 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -9,11 +9,12 @@ addons:
- libssl1.0.0
before_script:
- - sudo apt-get -y install qemu-user-static
+ - sudo apt-get -y install enchant
+ - sudo docker run --rm --privileged multiarch/qemu-user-static --reset -p yes
- sudo add-apt-repository -y ppa:deadsnakes/ppa
- sudo apt-get update
- sudo apt-get install python3.6 python3.6-dev
- - sudo pip install tox
+ - sudo pip install tox tox-pip-version
- sudo -E docker login -u="${DOCKER_USERNAME}" -p="${DOCKER_PASSWORD}"
- (cd .. && git clone https://github.com/estesp/manifest-tool)
- (cd ../manifest-tool && git checkout v0.9.0)
@@ -192,3 +193,51 @@ jobs:
--platforms linux/amd64,linux/arm,linux/arm64 \
--template ${DOCKER_USERNAME}/functest-vnf:ARCH-hunter \
--target ${DOCKER_USERNAME}/functest-vnf:hunter
+ - stage: build all functest cntt images
+ script: sudo -E bash build.sh
+ env:
+ - REPO="${DOCKER_USERNAME}"
+ - amd64_dirs="docker/smoke-cntt"
+ - arm64_dirs=""
+ - arm_dirs=""
+ - script: sudo -E bash build.sh
+ env:
+ - REPO="${DOCKER_USERNAME}"
+ - amd64_dirs=""
+ - arm64_dirs="docker/smoke-cntt"
+ - arm_dirs=""
+ - script: sudo -E bash build.sh
+ env:
+ - REPO="${DOCKER_USERNAME}"
+ - amd64_dirs=""
+ - arm64_dirs=""
+ - arm_dirs="docker/smoke-cntt"
+ - script: sudo -E bash build.sh
+ env:
+ - REPO="${DOCKER_USERNAME}"
+ - amd64_dirs="docker/benchmarking-cntt"
+ - arm64_dirs=""
+ - arm_dirs=""
+ - script: sudo -E bash build.sh
+ env:
+ - REPO="${DOCKER_USERNAME}"
+ - amd64_dirs=""
+ - arm64_dirs="docker/benchmarking-cntt"
+ - arm_dirs=""
+ - script: sudo -E bash build.sh
+ env:
+ - REPO="${DOCKER_USERNAME}"
+ - amd64_dirs=""
+ - arm64_dirs=""
+ - arm_dirs="docker/benchmarking-cntt"
+ - stage: publish all cntt manifests
+ script: >
+ sudo manifest-tool push from-args \
+ --platforms linux/amd64,linux/arm,linux/arm64 \
+ --template ${DOCKER_USERNAME}/functest-smoke-cntt:ARCH-hunter \
+ --target ${DOCKER_USERNAME}/functest-smoke-cntt:hunter
+ - script: >
+ sudo manifest-tool push from-args \
+ --platforms linux/amd64,linux/arm,linux/arm64 \
+ --template ${DOCKER_USERNAME}/functest-benchmarking-cntt:ARCH-hunter \
+ --target ${DOCKER_USERNAME}/functest-benchmarking-cntt:hunter
diff --git a/PI.md b/PI.md
new file mode 100644
index 000000000..1a695a1b5
--- /dev/null
+++ b/PI.md
@@ -0,0 +1,32 @@
+# Run Functest containers on Raspberry PI
+
+All Functest containers (Hunter and newer) are cross-compiled for arm and arm64
+via [travis-ci](https://travis-ci.org/collivier/functest/branches).
+They are built on top of Alpine armhf to support most of Raspberry PI models.
+
+All Docker manifests are published to run these containers via the same
+commands whatever the architecture.
+
+## Copy the image to the SD card
+
+> https://www.raspberrypi.org/documentation/installation/installing-images/linux.md
+>
+> This is very important, as you will lose all the data on the hard drive if you provide the wrong device name.
+> Make sure the device name is the name of the whole SD card as described above, not just a partition. For example: sdd, not sdds1 or sddp1; mmcblk0, not mmcblk0p1.
+
+
+## Install Docker
+
+```bash
+wget https://downloads.raspberrypi.org/raspbian/images/raspbian-2018-11-15/2018-11-13-raspbian-stretch.zip
+unzip 2018-11-13-raspbian-stretch.zip
+sudo dd bs=4M if=2018-11-13-raspbian-stretch.img of=/dev/mmcblk0 conv=fsync
+```
+
+## Install Docker
+
+```bash
+curl -sSL https://get.docker.com | sudo sh
+```
+
+## That's all folks
diff --git a/README.md b/README.md
new file mode 100644
index 000000000..a4b8d9f82
--- /dev/null
+++ b/README.md
@@ -0,0 +1,207 @@
+# Functest
+
+Network virtualization has dramatically modified our architectures which asks
+for more automation and powerful testing tools like Functest, a collection of
+state-of-the-art virtual infrastructure test suites, including automatic VNF
+testing (cf.
+[[1]](https://www.linuxfoundation.org/press-release/2019/05/opnfv-hunter-delivers-test-tools-ci-cd-framework-to-enable-common-nfvi-for-verifying-vnfs/)).
+
+In context of OPNFV, Functest verifies any kind of OpenStack and Kubernetes
+deployments including production environments. It conforms to upstream rules
+and integrates smoothly lots of the test cases available in the opensource
+market. It includes about 3000+ functional tests and 3 hours upstream API and
+dataplane benchmarks. It’s completed by Virtual Network Function deployments
+and testing (vIMS, vRouter and vEPC) to ensure that the platforms meet Network
+Functions Virtualization requirements. Raspberry PI is also supported to verify
+datacenters as the lowest cost (50 euros hardware and software included).
+
+| Functest releases | OpenStack releases |
+|-------------------|--------------------|
+| **Hunter** | **Rocky** |
+| Iruya | Stein |
+| Jerma | Train |
+| Kali | Ussuri |
+| Leguer | Victoria |
+| Master | next Wallaby |
+
+## Prepare your environment
+
+cat env
+```
+DEPLOY_SCENARIO=XXX # if not os-nosdn-nofeature-noha scenario
+NAMESERVER=XXX # if not 8.8.8.8
+EXTERNAL_NETWORK=XXX # if not first network with router:external=True
+DASHBOARD_URL=XXX # else tempest_horizon will be skipped
+NEW_USER_ROLE=XXX # if not member
+SDN_CONTROLLER_IP=XXX # if odl scenario
+VOLUME_DEVICE_NAME=XXX # if not vdb
+FLAVOR_EXTRA_SPECS=hw:mem_page_size:large # if fdio scenarios
+```
+
+cat openstack.creds
+```
+export OS_AUTH_URL=XXX
+export OS_USER_DOMAIN_NAME=XXX
+export OS_PROJECT_DOMAIN_NAME=XXX
+export OS_USERNAME=XXX
+export OS_PROJECT_NAME=XXX
+export OS_PASSWORD=XXX
+export OS_IDENTITY_API_VERSION=3
+export OS_REGION_NAME=XXX
+```
+
+mkdir -p images && wget -q -O- https://git.opnfv.org/functest/plain/functest/ci/download_images.sh?h=stable/hunter | bash -s -- images && ls -1 images/*
+```
+images/cirros-0.4.0-aarch64-disk.img
+images/cirros-0.4.0-x86_64-disk.img
+images/cloudify-docker-manager-community-19.01.24.tar
+images/Fedora-Cloud-Base-30-1.2.x86_64.qcow2
+images/shaker-image-1.3.0+stretch.qcow2
+images/ubuntu-14.04-server-cloudimg-amd64-disk1.img
+images/ubuntu-14.04-server-cloudimg-arm64-uefi1.img
+images/ubuntu-16.04-server-cloudimg-amd64-disk1.img
+images/vyos-1.1.8-amd64.qcow2
+```
+
+## Run healthcheck suite
+
+```bash
+sudo docker run --env-file env \
+ -v $(pwd)/openstack.creds:/home/opnfv/functest/conf/env_file \
+ -v $(pwd)/images:/home/opnfv/functest/images \
+ opnfv/functest-healthcheck:hunter
+```
+
+```
++--------------------------+------------------+---------------------+------------------+----------------+
+| TEST CASE | PROJECT | TIER | DURATION | RESULT |
++--------------------------+------------------+---------------------+------------------+----------------+
+| connection_check | functest | healthcheck | 00:03 | PASS |
+| tenantnetwork1 | functest | healthcheck | 00:08 | PASS |
+| tenantnetwork2 | functest | healthcheck | 00:16 | PASS |
+| vmready1 | functest | healthcheck | 00:09 | PASS |
+| vmready2 | functest | healthcheck | 00:10 | PASS |
+| singlevm1 | functest | healthcheck | 00:51 | PASS |
+| singlevm2 | functest | healthcheck | 00:41 | PASS |
+| vping_ssh | functest | healthcheck | 00:56 | PASS |
+| vping_userdata | functest | healthcheck | 00:42 | PASS |
+| cinder_test | functest | healthcheck | 02:19 | PASS |
+| tempest_smoke | functest | healthcheck | 07:02 | PASS |
+| tempest_horizon | functest | healthcheck | 00:52 | PASS |
+| odl | functest | healthcheck | 00:00 | SKIP |
++--------------------------+------------------+---------------------+------------------+----------------+
+```
+
+## Run smoke suite
+
+```bash
+sudo docker run --env-file env \
+ -v $(pwd)/openstack.creds:/home/opnfv/functest/conf/env_file \
+ -v $(pwd)/images:/home/opnfv/functest/images \
+ opnfv/functest-smoke:hunter
+```
+
+```
++---------------------------+------------------+---------------+------------------+----------------+
+| TEST CASE | PROJECT | TIER | DURATION | RESULT |
++---------------------------+------------------+---------------+------------------+----------------+
+| tempest_neutron | functest | smoke | 16:49 | PASS |
+| tempest_cinder | functest | smoke | 01:39 | PASS |
+| tempest_keystone | functest | smoke | 00:57 | PASS |
+| tempest_heat | functest | smoke | 24:33 | PASS |
+| rally_sanity | functest | smoke | 18:41 | PASS |
+| refstack_defcore | functest | smoke | 10:38 | PASS |
+| tempest_full | functest | smoke | 55:19 | PASS |
+| tempest_scenario | functest | smoke | 11:06 | PASS |
+| tempest_slow | functest | smoke | 61:39 | PASS |
+| patrole | functest | smoke | 02:46 | PASS |
+| networking-bgpvpn | functest | smoke | 00:00 | SKIP |
+| networking-sfc | functest | smoke | 00:00 | SKIP |
+| tempest_barbican | functest | smoke | 02:30 | PASS |
++---------------------------+------------------+---------------+------------------+----------------+
+```
+
+## Run smoke CNTT suite
+
+```bash
+sudo docker run --env-file env \
+ -v $(pwd)/openstack.creds:/home/opnfv/functest/conf/env_file \
+ -v $(pwd)/images:/home/opnfv/functest/images \
+ opnfv/functest-smoke-cntt:hunter
+```
+
+```
++-------------------------------+------------------+---------------+------------------+----------------+
+| TEST CASE | PROJECT | TIER | DURATION | RESULT |
++-------------------------------+------------------+---------------+------------------+----------------+
+| tempest_neutron_cntt | functest | smoke | 13:54 | PASS |
+| tempest_cinder_cntt | functest | smoke | 01:46 | PASS |
+| tempest_keystone_cntt | functest | smoke | 00:58 | PASS |
+| tempest_heat_cntt | functest | smoke | 25:31 | PASS |
+| rally_sanity_cntt | functest | smoke | 18:50 | PASS |
+| tempest_full_cntt | functest | smoke | 44:32 | PASS |
+| tempest_scenario_cntt | functest | smoke | 11:14 | PASS |
+| tempest_slow_cntt | functest | smoke | 43:55 | PASS |
++-------------------------------+------------------+---------------+------------------+----------------+
+```
+
+## Run benchmarking suite
+
+```bash
+sudo docker run --env-file env \
+ -v $(pwd)/openstack.creds:/home/opnfv/functest/conf/env_file \
+ -v $(pwd)/images:/home/opnfv/functest/images \
+ opnfv/functest-benchmarking:hunter
+```
+
+```
++--------------------+------------------+----------------------+------------------+----------------+
+| TEST CASE | PROJECT | TIER | DURATION | RESULT |
++--------------------+------------------+----------------------+------------------+----------------+
+| rally_full | functest | benchmarking | 108:34 | PASS |
+| rally_jobs | functest | benchmarking | 22:07 | PASS |
+| vmtp | functest | benchmarking | 15:38 | PASS |
+| shaker | functest | benchmarking | 25:12 | PASS |
++--------------------+------------------+----------------------+------------------+----------------+
+```
+
+## Run benchmarking CNTT suite
+
+```bash
+sudo docker run --env-file env \
+ -v $(pwd)/openstack.creds:/home/opnfv/functest/conf/env_file \
+ -v $(pwd)/images:/home/opnfv/functest/images \
+ opnfv/functest-benchmarking-cntt:hunter
+```
+
+```
++-------------------------+------------------+----------------------+------------------+----------------+
+| TEST CASE | PROJECT | TIER | DURATION | RESULT |
++-------------------------+------------------+----------------------+------------------+----------------+
+| rally_full_cntt | functest | benchmarking | 106:60 | PASS |
+| rally_jobs_cntt | functest | benchmarking | 21:16 | PASS |
+| vmtp | functest | benchmarking | 16:15 | PASS |
+| shaker | functest | benchmarking | 25:09 | PASS |
++-------------------------+------------------+----------------------+------------------+----------------+
+```
+
+## Run vnf suite
+
+```bash
+sudo docker run --env-file env \
+ -v $(pwd)/openstack.creds:/home/opnfv/functest/conf/env_file \
+ -v $(pwd)/images:/home/opnfv/functest/images \
+ opnfv/functest-vnf:hunter
+```
+
+```
++----------------------+------------------+--------------+------------------+----------------+
+| TEST CASE | PROJECT | TIER | DURATION | RESULT |
++----------------------+------------------+--------------+------------------+----------------+
+| cloudify | functest | vnf | 04:35 | PASS |
+| cloudify_ims | functest | vnf | 24:16 | PASS |
+| heat_ims | functest | vnf | 30:36 | PASS |
+| vyos_vrouter | functest | vnf | 15:37 | PASS |
+| juju_epc | functest | vnf | 34:39 | PASS |
++----------------------+------------------+--------------+------------------+----------------+
+```
diff --git a/ansible/site.cntt.yml b/ansible/site.cntt.yml
new file mode 100644
index 000000000..cddc1ab1e
--- /dev/null
+++ b/ansible/site.cntt.yml
@@ -0,0 +1,50 @@
+---
+- hosts:
+ - 127.0.0.1
+ roles:
+ - role: collivier.xtesting
+ project: functest
+ docker_tags:
+ - hunter:
+ branch: stable/hunter
+ suites:
+ - container: functest-healthcheck
+ tests:
+ - connection_check
+ - tenantnetwork1
+ - tenantnetwork2
+ - vmready1
+ - vmready2
+ - singlevm1
+ - singlevm2
+ - vping_ssh
+ - vping_userdata
+ - cinder_test
+ - odl
+ - tempest_smoke
+ - container: functest-smoke-cntt
+ timeout: 2h
+ tests:
+ - tempest_neutron_cntt
+ - tempest_cinder_cntt
+ - tempest_keystone_cntt
+ - tempest_heat_cntt
+ - rally_sanity_cntt
+ - tempest_full_cntt
+ - tempest_scenario_cntt
+ - tempest_slow_cntt
+ - container: functest-benchmarking-cntt
+ timeout: 4h
+ tests:
+ - rally_full_cntt
+ - rally_jobs_cntt
+ - vmtp
+ - shaker
+ - container: functest-vnf
+ timeout: 2h
+ tests:
+ - cloudify
+ - cloudify_ims
+ - heat_ims
+ - vyos_vrouter
+ - juju_epc
diff --git a/ansible/site.gate.yml b/ansible/site.gate.yml
new file mode 100644
index 000000000..d9e9e4f47
--- /dev/null
+++ b/ansible/site.gate.yml
@@ -0,0 +1,91 @@
+---
+- hosts:
+ - 127.0.0.1
+ roles:
+ - role: collivier.xtesting
+ project: functest
+ gerrit_project: functest
+ docker_tags:
+ - hunter:
+ branch: stable/hunter
+ builds:
+ dependencies:
+ - repo: _
+ dport:
+ container: alpine
+ tag: 3.9
+ steps:
+ - name: build opnfv/functest-core
+ containers:
+ - name: functest-core
+ ref_arg: BRANCH
+ path: docker/core
+ - name: build opnfv/functest-tempest
+ containers:
+ - name: functest-tempest
+ ref_arg: BRANCH
+ path: docker/tempest
+ - name: build containers
+ containers:
+ - name: functest-healthcheck
+ ref_arg: BRANCH
+ path: docker/healthcheck
+ - name: functest-smoke
+ ref_arg: BRANCH
+ path: docker/smoke
+ - name: functest-benchmarking
+ ref_arg: BRANCH
+ path: docker/benchmarking
+ - name: functest-vnf
+ ref_arg:
+ path: docker/vnf
+ - name: functest-features
+ ref_arg: BRANCH
+ path: docker/features
+ suites:
+ - container: functest-healthcheck
+ tests:
+ - connection_check
+ - tenantnetwork1
+ - tenantnetwork2
+ - vmready1
+ - vmready2
+ - singlevm1
+ - singlevm2
+ - vping_ssh
+ - vping_userdata
+ - cinder_test
+ - odl
+ - tempest_smoke
+ - tempest_horizon
+ - container: functest-smoke
+ timeout: 2h
+ tests:
+ - tempest_neutron
+ - tempest_cinder
+ - tempest_keystone
+ - tempest_heat
+ - rally_sanity
+ - refstack_defcore
+ - tempest_full
+ - tempest_scenario
+ - tempest_slow
+ - patrole
+ - networking-bgpvpn
+ - networking-sfc
+ - barbican
+ - container: functest-benchmarking
+ timeout: 4h
+ tests:
+ - rally_full
+ - rally_jobs
+ - vmtp
+ - shaker
+ - container: functest-vnf
+ timeout: 2h
+ tests:
+ - cloudify
+ - cloudify_ims
+ - heat_ims
+ - vyos_vrouter
+ - juju_epc
diff --git a/ansible/site.yml b/ansible/site.yml
index bb0977e04..92bde9134 100644
--- a/ansible/site.yml
+++ b/ansible/site.yml
@@ -4,44 +4,9 @@
roles:
- role: collivier.xtesting
project: functest
- gerrit_project: functest
docker_tags:
- hunter:
branch: stable/hunter
- builds:
- dependencies:
- - repo: _
- dport:
- container: alpine
- tag: 3.9
- steps:
- - name: build opnfv/functest-core
- containers:
- - name: functest-core
- ref_arg: BRANCH
- path: docker/core
- - name: build opnfv/functest-tempest
- containers:
- - name: functest-tempest
- ref_arg: BRANCH
- path: docker/tempest
- - name: build containers
- containers:
- - name: functest-healthcheck
- ref_arg: BRANCH
- path: docker/healthcheck
- - name: functest-smoke
- ref_arg: BRANCH
- path: docker/smoke
- - name: functest-benchmarking
- ref_arg: BRANCH
- path: docker/benchmarking
- - name: functest-vnf
- ref_arg:
- path: docker/vnf
- - name: functest-features
- ref_arg: BRANCH
- path: docker/features
suites:
- container: functest-healthcheck
tests:
@@ -57,37 +22,35 @@
- cinder_test
- odl
- tempest_smoke
+ - tempest_horizon
- container: functest-smoke
+ timeout: 2h
tests:
- - neutron-tempest-plugin-api
+ - tempest_neutron
+ - tempest_cinder
+ - tempest_keystone
+ - tempest_heat
- rally_sanity
- refstack_defcore
- tempest_full
- tempest_scenario
+ - tempest_slow
- patrole
- - neutron_trunk
- networking-bgpvpn
- networking-sfc
- barbican
- container: functest-benchmarking
+ timeout: 4h
tests:
- rally_full
- rally_jobs
- vmtp
- shaker
- container: functest-vnf
+ timeout: 2h
tests:
- cloudify
- cloudify_ims
- heat_ims
- vyos_vrouter
- juju_epc
- properties:
- execution-type: SEQUENTIALLY
- - container: functest-features
- tests:
- - doctor-notification
- - functest-odl-sfc
- - barometercollectd
- - vgpu
- - stor4nfv_os
diff --git a/build.sh b/build.sh
index ce6f9deea..831f944d6 100644
--- a/build.sh
+++ b/build.sh
@@ -10,14 +10,10 @@ docker/healthcheck \
docker/smoke \
docker/benchmarking \
docker/features \
-docker/vnf"}
-arm_dirs=${arm_dirs-"\
-docker/core \
-docker/tempest \
-docker/healthcheck \
-docker/smoke \
-docker/benchmarking \
-docker/features"}
+docker/vnf \
+docker/smoke-cntt \
+docker/benchmarking-cntt"}
+arm_dirs=${arm_dirs-${amd64_dirs}}
arm64_dirs=${arm64_dirs-${amd64_dirs}}
build_opts=("--pull=true" --no-cache "--force-rm=true")
@@ -27,6 +23,12 @@ find . -name Dockerfile -exec sed -i \
-e \
"s|opnfv/functest-tempest:hunter|${repo}/functest-tempest:amd64-hunter|g" \
{} +
+find . -name Dockerfile -exec sed -i \
+ -e "s|opnfv/functest-smoke:hunter|${repo}/functest-smoke:amd64-hunter|g" \
+ {} +
+find . -name Dockerfile -exec sed -i \
+ -e "s|opnfv/functest-benchmarking:hunter|\
+${repo}/functest-benchmarking:amd64-hunter|g" {} +
for dir in ${amd64_dirs}; do
(cd "${dir}" &&
docker build "${build_opts[@]}" \
@@ -40,13 +42,19 @@ done
find . -name Dockerfile -exec git checkout {} +
find . -name Dockerfile -exec sed -i \
- -e "s|alpine:3.9|multiarch/alpine:arm64-v3.9|g" {} +
+ -e "s|alpine:3.9|arm64v8/alpine:3.9|g" {} +
find . -name Dockerfile -exec sed -i \
-e "s|opnfv/functest-core:hunter|${repo}/functest-core:arm64-hunter|g" {} +
find . -name Dockerfile -exec sed -i \
-e \
"s|opnfv/functest-tempest:hunter|${repo}/functest-tempest:arm64-hunter|g" \
{} +
+find . -name Dockerfile -exec sed -i \
+ -e "s|opnfv/functest-smoke:hunter|${repo}/functest-smoke:arm64-hunter|g" \
+ {} +
+find . -name Dockerfile -exec sed -i \
+ -e "s|opnfv/functest-benchmarking:hunter|\
+${repo}/functest-benchmarking:arm64-hunter|g" {} +
for dir in ${arm64_dirs}; do
(cd "${dir}" && docker build "${build_opts[@]}" \
-t "${repo}/functest-${dir##**/}:arm64-hunter" .)
@@ -56,17 +64,22 @@ for dir in ${arm64_dirs}; do
done
[ -n "${arm64_dirs}" ] &&
(docker rmi "${repo}/functest-core:arm64-hunter" \
- multiarch/alpine:arm64-v3.9 || true)
+ arm64v8/alpine:3.9 || true)
find . -name Dockerfile -exec git checkout {} +
find . -name Dockerfile -exec sed -i \
- -e "s|alpine:3.9|multiarch/alpine:armhf-v3.9|g" {} +
+ -e "s|alpine:3.9|arm32v6/alpine:3.9|g" {} +
find . -name Dockerfile -exec sed -i \
-e "s|opnfv/functest-core:hunter|${repo}/functest-core:arm-hunter|g" {} +
find . -name Dockerfile -exec sed -i \
-e \
"s|opnfv/functest-tempest:hunter|${repo}/functest-tempest:arm-hunter|g" \
{} +
+find . -name Dockerfile -exec sed -i \
+ -e "s|opnfv/functest-smoke:hunter|${repo}/functest-smoke:arm-hunter|g" {} +
+find . -name Dockerfile -exec sed -i \
+ -e "s|opnfv/functest-benchmarking:hunter|\
+${repo}/functest-benchmarking:arm-hunter|g" {} +
for dir in ${arm_dirs}; do
(cd "${dir}" && docker build "${build_opts[@]}" \
-t "${repo}/functest-${dir##**/}:arm-hunter" .)
@@ -76,7 +89,7 @@ for dir in ${arm_dirs}; do
done
[ -n "${arm_dirs}" ] &&
(docker rmi "${repo}/functest-core:arm-hunter" \
- multiarch/alpine:armhf-v3.9 || true)
+ arm32v6/alpine:3.9 || true)
find . -name Dockerfile -exec git checkout {} +
exit $?
diff --git a/docker/benchmarking-cntt/Dockerfile b/docker/benchmarking-cntt/Dockerfile
new file mode 100644
index 000000000..f4a9642b2
--- /dev/null
+++ b/docker/benchmarking-cntt/Dockerfile
@@ -0,0 +1,4 @@
+FROM opnfv/functest-benchmarking:hunter
+
+COPY testcases.yaml /usr/lib/python2.7/site-packages/xtesting/ci/testcases.yaml
+CMD ["run_tests", "-t", "all"]
diff --git a/docker/benchmarking-cntt/testcases.yaml b/docker/benchmarking-cntt/testcases.yaml
new file mode 100644
index 000000000..30eb3e631
--- /dev/null
+++ b/docker/benchmarking-cntt/testcases.yaml
@@ -0,0 +1,67 @@
+---
+tiers:
+ -
+ name: benchmarking_cntt
+ description: >-
+ Run several OpenStack performance tools
+ https://docs.openstack.org/performance-docs/latest/methodologies/tools.html
+ testcases:
+ -
+ case_name: rally_full_cntt
+ project_name: functest
+ criteria: 100
+ blocking: false
+ description: >-
+ This test case runs the full suite of scenarios of the
+ OpenStack Rally suite using several threads and iterations.
+ run:
+ name: rally_full
+ args:
+ tests:
+ - 'authenticate'
+ - 'glance'
+ - 'cinder'
+ - 'heat'
+ - 'keystone'
+ - 'neutron'
+ - 'nova'
+ - 'quotas'
+ - 'swift'
+
+ -
+ case_name: rally_jobs_cntt
+ project_name: functest
+ criteria: 100
+ blocking: false
+ description: >-
+ This test case runs a group of Rally jobs used in
+ OpenStack gating
+ run:
+ name: rally_jobs
+ args:
+ tests:
+ - 'neutron'
+ -
+ case_name: vmtp
+ project_name: functest
+ criteria: 100
+ blocking: false
+ description: >-
+ VMTP is a small python application that will automatically
+ perform ping connectivity, round trip time measurement
+ (latency) and TCP/UDP throughput
+ run:
+ name: vmtp
+
+ -
+ case_name: shaker
+ project_name: functest
+ criteria: 100
+ blocking: false
+ description: >-
+ Shaker wraps around popular system network testing tools
+ like iperf, iperf3 and netperf (with help of flent). Shaker
+ is able to deploy OpenStack instances and networks in
+ different topologies.
+ run:
+ name: shaker
diff --git a/docker/benchmarking/Dockerfile b/docker/benchmarking/Dockerfile
index 43a8fb783..b2ff3e1e8 100644
--- a/docker/benchmarking/Dockerfile
+++ b/docker/benchmarking/Dockerfile
@@ -10,10 +10,14 @@ RUN apk --no-cache add --update libxml2 libxslt && \
python-dev build-base linux-headers libffi-dev \
openssl-dev libjpeg-turbo-dev libxml2-dev libxslt-dev && \
wget -q -O- https://opendev.org/openstack/requirements/raw/branch/$OPENSTACK_TAG/upper-constraints.txt > upper-constraints.txt && \
- sed -i -E s/^tempest==+.*$/-e\ git+https:\\/\\/opendev.org\\/openstack\\/tempest@$TEMPEST_TAG#egg=tempest/ upper-constraints.txt && \
+ sed -i -E /#egg=tempest/d upper-constraints.txt && \
+ sed -i -E /^ujson==+.*$/d upper-constraints.txt && \
+ sed -i -E /^kubernetes==+.*$/d upper-constraints.txt && \
case $(uname -m) in aarch*|arm*) sed -i -E /^PyNaCl=/d upper-constraints.txt ;; esac && \
wget -q -O- https://git.opnfv.org/functest/plain/upper-constraints.txt?h=$BRANCH > upper-constraints.opnfv.txt && \
sed -i -E /#egg=functest/d upper-constraints.opnfv.txt && \
+ sed -i -E /#egg=rally/d upper-constraints.opnfv.txt && \
+ sed -i -E /#egg=xrally-kubernetes/d upper-constraints.opnfv.txt && \
case $(uname -m) in aarch*|arm*) CFLAGS="-O0" pip install --no-cache-dir -cupper-constraints.txt -cupper-constraints.opnfv.txt lxml ;; esac && \
git init /src/vmtp && \
(cd /src/vmtp && \
@@ -22,12 +26,13 @@ RUN apk --no-cache add --update libxml2 libxslt && \
update-requirements -s --source /src/openstack-requirements /src/vmtp/ && \
pip install --no-cache-dir --src /src -cupper-constraints.txt -cupper-constraints.opnfv.txt \
/src/vmtp && \
- mkdir -p /home/opnfv/functest/data/rally/neutron && \
+ mkdir -p /home/opnfv/functest/data/rally/neutron/rally-jobs && \
git init /src/neutron && \
(cd /src/neutron && \
git fetch --tags https://opendev.org/openstack/neutron.git $OPENSTACK_TAG && \
git checkout FETCH_HEAD) && \
- cp -r /src/neutron/rally-jobs /home/opnfv/functest/data/rally/neutron/rally-jobs && \
+ sed "s/NeutronTrunks.create_and_list_trunk_subports/NeutronTrunks.create_and_list_trunks/g" \
+ /src/neutron/rally-jobs/task-neutron.yaml > /home/opnfv/functest/data/rally/neutron/rally-jobs/task-neutron.yaml && \
rm -r upper-constraints.txt upper-constraints.opnfv.txt /src/vmtp /src/neutron && \
apk del .build-deps
COPY testcases.yaml /usr/lib/python2.7/site-packages/xtesting/ci/testcases.yaml
diff --git a/docker/benchmarking/testcases.yaml b/docker/benchmarking/testcases.yaml
index eeb209113..12b3ec57a 100644
--- a/docker/benchmarking/testcases.yaml
+++ b/docker/benchmarking/testcases.yaml
@@ -2,7 +2,6 @@
tiers:
-
name: benchmarking
- order: 2
description: >-
Run several OpenStack performance tools
https://docs.openstack.org/performance-docs/latest/methodologies/tools.html
@@ -29,6 +28,8 @@ tiers:
description: >-
This test case runs a group of Rally jobs used in
OpenStack gating
+ dependencies:
+ - NO_TENANT_NETWORK: '^(?![tT]rue$)'
run:
name: rally_jobs
args:
@@ -45,7 +46,7 @@ tiers:
perform ping connectivity, round trip time measurement
(latency) and TCP/UDP throughput
dependencies:
- - POD_ARCH: '^(?!aarch64$)'
+ - NO_TENANT_NETWORK: '^(?![tT]rue$)'
run:
name: vmtp
@@ -59,5 +60,7 @@ tiers:
like iperf, iperf3 and netperf (with help of flent). Shaker
is able to deploy OpenStack instances and networks in
different topologies.
+ dependencies:
+ - NO_TENANT_NETWORK: '^(?![tT]rue$)'
run:
name: shaker
diff --git a/docker/core/Dockerfile b/docker/core/Dockerfile
index 26ffdccf7..f2fd8555f 100644
--- a/docker/core/Dockerfile
+++ b/docker/core/Dockerfile
@@ -3,16 +3,23 @@ FROM alpine:3.9
ARG BRANCH=stable/hunter
ARG OPENSTACK_TAG=stable/rocky
-RUN apk --no-cache add --update \
+RUN apk -U upgrade && \
+ apk --no-cache add --update libcurl --repository=http://dl-cdn.alpinelinux.org/alpine/v3.11/main && \
+ apk --no-cache add --update \
python libffi openssl libjpeg-turbo py-pip bash \
- grep sed wget ca-certificates git openssh-client qemu-img iputils && \
+ grep sed wget ca-certificates git openssh-client qemu-img iputils coreutils mailcap \
+ libstdc++ && \
apk --no-cache add --virtual .build-deps --update \
python-dev build-base linux-headers libffi-dev \
openssl-dev libjpeg-turbo-dev && \
wget -q -O- https://opendev.org/openstack/requirements/raw/branch/$OPENSTACK_TAG/upper-constraints.txt > upper-constraints.txt && \
+ sed -i -E /^ujson==+.*$/d upper-constraints.txt && \
+ sed -i -E /^kubernetes==+.*$/d upper-constraints.txt && \
case $(uname -m) in aarch*|arm*) sed -i -E /^PyNaCl=/d upper-constraints.txt && apk add --no-cache py-pynacl ;; esac && \
wget -q -O- https://git.opnfv.org/functest/plain/upper-constraints.txt?h=$BRANCH > upper-constraints.opnfv.txt && \
sed -i -E /#egg=functest/d upper-constraints.opnfv.txt && \
+ sed -i -E /#egg=rally/d upper-constraints.opnfv.txt && \
+ sed -i -E /#egg=xrally-kubernetes/d upper-constraints.opnfv.txt && \
pip install --no-cache-dir --src /src -cupper-constraints.opnfv.txt -cupper-constraints.txt \
-e git+https://opendev.org/openstack/requirements@$OPENSTACK_TAG#egg=openstack_requirements && \
git init /src/functest && \
diff --git a/docker/features/Dockerfile b/docker/features/Dockerfile
index b0123d0ce..6e8c0881f 100644
--- a/docker/features/Dockerfile
+++ b/docker/features/Dockerfile
@@ -14,6 +14,8 @@ RUN apk --no-cache add --update python3 sshpass && \
case $(uname -m) in aarch*|arm*) sed -i -E /^PyNaCl=/d upper-constraints.txt ;; esac && \
wget -q -O- https://git.opnfv.org/functest/plain/upper-constraints.txt?h=$BRANCH > upper-constraints.opnfv.txt && \
sed -i -E /#egg=functest/d upper-constraints.opnfv.txt && \
+ sed -i -E /#egg=rally/d upper-constraints.opnfv.txt && \
+ sed -i -E /#egg=xrally-kubernetes/d upper-constraints.opnfv.txt && \
pip install --no-cache-dir --src /src -cupper-constraints.txt \
-cupper-constraints.opnfv.txt \
-rthirdparty-requirements.txt && \
diff --git a/docker/healthcheck/Dockerfile b/docker/healthcheck/Dockerfile
index 5611da4df..d1154ea79 100644
--- a/docker/healthcheck/Dockerfile
+++ b/docker/healthcheck/Dockerfile
@@ -4,20 +4,29 @@ ARG BRANCH=stable/hunter
ARG OPENSTACK_TAG=stable/rocky
ARG TEMPEST_TAG=21.0.0
ARG ODL_TAG=85448c9d97b89989488e675b29b38ac42d8674e4
+ARG TEMPEST_HORIZON_TAG=0.1.0
COPY thirdparty-requirements.txt thirdparty-requirements.txt
RUN wget -q -O- https://opendev.org/openstack/requirements/raw/branch/$OPENSTACK_TAG/upper-constraints.txt > upper-constraints.txt && \
- sed -i -E s/^tempest==+.*$/-e\ git+https:\\/\\/opendev.org\\/openstack\\/tempest@$TEMPEST_TAG#egg=tempest/ upper-constraints.txt && \
+ sed -i -E /#egg=tempest/d upper-constraints.txt && \
+ sed -i -E /^kubernetes==+.*$/d upper-constraints.txt && \
case $(uname -m) in aarch*|arm*) sed -i -E /^PyNaCl=/d upper-constraints.txt ;; esac && \
wget -q -O- https://git.opnfv.org/functest/plain/upper-constraints.txt?h=$BRANCH > upper-constraints.opnfv.txt && \
sed -i -E /#egg=functest/d upper-constraints.opnfv.txt && \
+ sed -i -E /#egg=rally/d upper-constraints.opnfv.txt && \
+ sed -i -E /#egg=xrally-kubernetes/d upper-constraints.opnfv.txt && \
+ git init /src/tempest-horizon && \
+ (cd /src/tempest-horizon && \
+ git fetch --tags https://opendev.org/openstack/tempest-horizon.git $TEMPEST_HORIZON_TAG && \
+ git checkout FETCH_HEAD) && \
+ update-requirements -s --source /src/openstack-requirements /src/tempest-horizon/ && \
pip install --no-cache-dir --src /src -cupper-constraints.txt -cupper-constraints.opnfv.txt \
- -rthirdparty-requirements.txt && \
+ /src/tempest-horizon/ -rthirdparty-requirements.txt && \
git init /src/odl_test && \
(cd /src/odl_test && \
git fetch --tags https://git.opendaylight.org/gerrit/integration/test $ODL_TAG && \
git checkout FETCH_HEAD) && \
- rm -r /src/odl_test/.git thirdparty-requirements.txt upper-constraints.txt \
+ rm -r /src/odl_test/.git /src/tempest-horizon/ thirdparty-requirements.txt upper-constraints.txt \
upper-constraints.opnfv.txt
COPY testcases.yaml /usr/lib/python2.7/site-packages/xtesting/ci/testcases.yaml
CMD ["run_tests", "-t", "all"]
diff --git a/docker/healthcheck/testcases.yaml b/docker/healthcheck/testcases.yaml
index 5aac7010e..3ebe8cb67 100644
--- a/docker/healthcheck/testcases.yaml
+++ b/docker/healthcheck/testcases.yaml
@@ -2,7 +2,6 @@
tiers:
-
name: healthcheck
- order: 0
description: >-
First tier to be executed to verify the basic
operations in the VIM.
@@ -28,6 +27,8 @@ tiers:
It creates and configures all tenant network ressources
required by advanced testcases (subnet, network and
router).
+ dependencies:
+ - NO_TENANT_NETWORK: '^(?![tT]rue$)'
run:
name: tenantnetwork1
@@ -40,6 +41,8 @@ tiers:
It creates new user/project before creating and configuring
all tenant network ressources required by a testcase
(subnet, network and router).
+ dependencies:
+ - NO_TENANT_NETWORK: '^(?![tT]rue$)'
run:
name: tenantnetwork2
@@ -162,3 +165,18 @@ tiers:
mode: '(?=.*\[.*\bsmoke\b.*\])(^tempest\.api)'
option:
- '--concurrency=4'
+
+ -
+ case_name: tempest_horizon
+ project_name: functest
+ criteria: 100
+ blocking: false
+ description: >-
+ This test case runs the Tempest suite proposed by the
+ Horizon project.
+ dependencies:
+ - DASHBOARD_URL: '^(?!\s*$).+'
+ run:
+ name: tempest_horizon
+ args:
+ mode: '^tempest_horizon.'
diff --git a/docker/smoke-cntt/Dockerfile b/docker/smoke-cntt/Dockerfile
new file mode 100644
index 000000000..20d7717ca
--- /dev/null
+++ b/docker/smoke-cntt/Dockerfile
@@ -0,0 +1,5 @@
+FROM opnfv/functest-smoke:hunter
+
+COPY testcases.yaml /usr/lib/python2.7/site-packages/xtesting/ci/testcases.yaml
+COPY tempest_conf.yaml /usr/lib/python2.7/site-packages/functest/opnfv_tests/openstack/tempest/custom_tests/tempest_conf.yaml
+CMD ["run_tests", "-t", "all"]
diff --git a/docker/smoke-cntt/tempest_conf.yaml b/docker/smoke-cntt/tempest_conf.yaml
new file mode 100644
index 000000000..20ca43ed9
--- /dev/null
+++ b/docker/smoke-cntt/tempest_conf.yaml
@@ -0,0 +1,94 @@
+---
+compute:
+ max_microversion: 2.53
+compute-feature-enabled:
+ attach_encrypted_volume: false
+ block_migration_for_live_migration: false
+ block_migrate_cinder_iscsi: false
+ change_password: false
+ cold_migration: true
+ config_drive: true
+ console_output: true
+ disk_config: true
+ enable_instance_password: true
+ interface_attach: true
+ live_migration: true
+ live_migrate_back_and_forth: false
+ metadata_service: true
+ pause: true
+ personality: false
+ rdp_console: false
+ rescue: true
+ resize: true
+ scheduler_available_filters: "RetryFilter,AvailabilityZoneFilter,\
+ ComputeFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter,\
+ ServerGroupAntiAffinityFilter,ServerGroupAffinityFilter,SameHostFilter,\
+ DifferentHostFilter"
+ serial_console: false
+ shelve: true
+ snapshot: true
+ spice_console: false
+ suspend: true
+ swap_volume: false
+ vnc_console: true
+ volume_backed_live_migration: false
+ volume_multiattach: false
+identity:
+ auth_version: v3
+ user_unique_last_password_count: 2
+ user_lockout_duration: 10
+ user_lockout_failure_attempts: 2
+identity-feature-enabled:
+ trust: true
+ api_v2: false
+ api_v2_admin: false
+ security_compliance: true
+ federation: false
+ external_idp: false
+ project_tags: true
+ application_credentials: true
+image-feature-enabled:
+ api_v2: true
+ api_v1: false
+network-feature-enabled:
+ port_admin_state_change: true
+ port_security: true
+placement:
+ max_microversion: "1.10"
+validation:
+ image_ssh_user: cirros
+ ssh_timeout: 196
+ ip_version_for_ssh: 4
+ run_validation: true
+volume:
+ max_microversion: 3.43
+ storage_protocol: ceph
+ manage_volume_ref: source-name,volume-%s
+ manage_snapshot_ref: source-name,snapshot-%s
+volume-feature-enabled:
+ multi_backend: false
+ backup: true
+ snapshot: true
+ clone: true
+ manage_snapshot: false
+ manage_volume: true
+ extend_attached_volume: false
+ consistency_group: false
+ volume_revert: true
+neutron_plugin_options:
+ agent_availability_zone: nova
+ available_type_drivers: flat,geneve,vlan,gre,local,vxlan
+ provider_vlans: foo,
+object-storage-feature-enabled:
+ discoverable_apis: "account_quotas,formpost,bulk_upload,bulk_delete,\
+ tempurl,crossdomain,container_quotas,staticweb,account_quotas,slo"
+ object_versioning: true
+ discoverability: true
+heat_plugin:
+ skip_functional_test_list: RemoteStackTest,EncryptionVolTypeTest
+ skip_scenario_test_list: "AodhAlarmTest,SoftwareConfigIntegrationTest,\
+ VolumeBackupRestoreIntegrationTest,CfnInitIntegrationTest,\
+ LoadBalancerTest,RemoteDeeplyNestedStackTest"
+ auth_version: 3
+heat_features_enabled:
+ multi_cloud: false
diff --git a/docker/smoke-cntt/testcases.yaml b/docker/smoke-cntt/testcases.yaml
new file mode 100644
index 000000000..50608fc90
--- /dev/null
+++ b/docker/smoke-cntt/testcases.yaml
@@ -0,0 +1,273 @@
+---
+tiers:
+ -
+ name: smoke_cntt
+ description: >-
+ Set of basic Functional tests to validate the OPNFV scenarios.
+ testcases:
+ -
+ case_name: tempest_neutron_cntt
+ project_name: functest
+ criteria: 100
+ blocking: false
+ deny_skipping: true
+ tests_count: 478
+ description: >-
+ This test case runs the Tempest suite proposed by the
+ Neutron project. The list of test cases is generated by
+ Tempest automatically and depends on the parameters of
+ the OpenStack deployment.
+ run:
+ name: tempest_common
+ args:
+ mode: "(?!.*admin.test_agent_availability_zone)\
+ (?!.*admin.test_dhcp_agent_scheduler)\
+ (?!.*admin.test_l3_agent_scheduler)\
+ (?!.*admin.test_logging)\
+ (?!.*admin.test_logging_negative)\
+ (?!.*admin.test_network_segment_range)\
+ (?!.*admin.test_ports.PortTestCasesAdmin.test_regenerate_mac_address)\
+ (?!.*admin.test_ports.PortTestCasesResourceRequest)\
+ (?!.*admin.test_routers_dvr)\
+ (?!.*admin.test_routers_flavors)\
+ (?!.*admin.test_routers_ha)\
+ (?!.*test_conntrack_helper)\
+ (?!.*test_floating_ips.FloatingIPPoolTestJSON)\
+ (?!.*test_floating_ips.FloatingIPTestJSON.test_create_update_floatingip_port_details)\
+ (?!.*test_metering_extensions)\
+ (?!.*test_metering_negative)\
+ (?!.*test_networks.NetworksSearchCriteriaTest.test_list_validation_filters)\
+ (?!.*test_networks.NetworksTestAdmin.test_create_tenant_network_vxlan)\
+ (?!.*test_networks.NetworksTestJSON.test_create_update_network_dns_domain)\
+ (?!.*test_ports.PortsTestJSON.test_create_port_with_propagate_uplink_status)\
+ (?!.*test_ports.PortsTestJSON.test_create_port_without_propagate_uplink_status)\
+ (?!.*test_ports.PortsTestJSON.test_create_update_port_with_dns_domain)\
+ (?!.*test_ports.PortsTestJSON.test_create_update_port_with_dns_name)\
+ (?!.*test_ports.PortsTestJSON.test_create_update_port_with_no_dns_name)\
+ (?!.*test_qos.QosMinimumBandwidthRuleTestJSON)\
+ (?!.*test_revisions.TestRevisions.test_update_dns_domain_bumps_revision)\
+ (?!.*test_revisions.TestRevisions.test_update_router_extra_attributes_bumps_revision)\
+ (?!.*test_router_interface_fip)\
+ (?!.*test_routers.DvrRoutersTest)\
+ (?!.*test_routers.HaRoutersTest)\
+ (?!.*test_routers.RoutersIpV6Test.test_extra_routes_atomic)\
+ (?!.*test_routers.RoutersTest.test_extra_routes_atomic)\
+ (?!.*test_routers_negative.DvrRoutersNegativeTest)\
+ (?!.*test_routers_negative.DvrRoutersNegativeTestExtended)\
+ (?!.*test_routers_negative.HaRoutersNegativeTest)\
+ (?!.*test_security_groups.RbacSharedSecurityGroupTest)\
+ (?!.*test_subnetpools.SubnetPoolsSearchCriteriaTest.test_list_validation_filters)\
+ (?!.*test_subnets.SubnetsSearchCriteriaTest.test_list_validation_filters)\
+ (?!.*test_timestamp.TestTimeStamp.test_segment_with_timestamp)\
+ (?!.*test_trunk.TrunkTestInheritJSONBase.test_add_subport)\
+ (?!.*test_trunk.TrunkTestMtusJSON)\
+ (?!.*test_trunk_negative.TrunkTestJSON.test_create_subport_invalid_inherit_network_segmentation_type)\
+ (?!.*test_trunk_negative.TrunkTestMtusJSON)\
+ (^neutron_tempest_plugin.api)"
+ option:
+ - '--concurrency=4'
+
+ -
+ case_name: tempest_cinder_cntt
+ project_name: functest
+ criteria: 100
+ blocking: false
+ deny_skipping: true
+ tests_count: 7
+ description: >-
+ This test case runs the Tempest suite proposed by the
+ Cinder project.
+ run:
+ name: tempest_common
+ args:
+ mode: "(?!.*test_incremental_backup)\
+ (?!.*test_consistencygroups)\
+ (^cinder_tempest_plugin.)"
+ option:
+ - '--concurrency=4'
+
+ -
+ case_name: tempest_keystone_cntt
+ project_name: functest
+ criteria: 100
+ blocking: false
+ deny_skipping: true
+ tests_count: 27
+ description: >-
+ This test case runs the Tempest suite proposed by the
+ Keystone project.
+ run:
+ name: tempest_common
+ args:
+ mode: "(?!.*api.identity.v3.test_oauth1_tokens)\
+ (?!.*scenario.test_federated_authentication)\
+ keystone_tempest_plugin."
+ option:
+ - '--concurrency=4'
+
+ -
+ case_name: tempest_heat_cntt
+ project_name: functest
+ criteria: 100
+ blocking: false
+ deny_skipping: true
+ tests_count: 112
+ description: >-
+ This test case runs the Tempest suite proposed by the
+ Heat project.
+ run:
+ name: tempest_heat
+ args:
+ mode: "(?!.*functional.test_lbaasv2)\
+ (?!.*functional.test_encryption_vol_type)\
+ (?!.*functional.test_remote_stack.RemoteStackTest)\
+ (?!.*RemoteStackTest.test_stack_create_with_cloud_credential)\
+ (?!.*scenario.test_aodh_alarm)\
+ (?!.*tests.scenario.test_autoscaling_lb)\
+ (?!.*scenario.test_autoscaling_lbv2)\
+ (?!.*scenario.test_remote_deeply_nested.RemoteDeeplyNestedStackTest)\
+ (?!.*scenario.test_server_software_config)\
+ (?!.*test_volumes.VolumeBackupRestoreIntegrationTest)\
+ (?!.*scenario.test_octavia_lbaas)\
+ (?!.*scenario.test_server_cfn_init)\
+ ^heat_tempest_plugin.tests"
+ option:
+ - '--concurrency=1'
+
+ -
+ case_name: rally_sanity_cntt
+ project_name: functest
+ criteria: 100
+ blocking: false
+ description: >-
+ This test case runs a sub group of tests of the OpenStack
+ Rally suite in smoke mode.
+ run:
+ name: rally_sanity
+ args:
+ tests:
+ - 'authenticate'
+ - 'glance'
+ - 'cinder'
+ - 'heat'
+ - 'keystone'
+ - 'neutron'
+ - 'nova'
+ - 'quotas'
+ - 'swift'
+
+ -
+ case_name: tempest_full_cntt
+ project_name: functest
+ criteria: 100
+ blocking: false
+ deny_skipping: true
+ tests_count: 1280
+ description: >-
+ The list of test cases is generated by
+ Tempest automatically and depends on the parameters of
+ the OpenStack deployment.
+ https://github.com/openstack/tempest/blob/18.0.0/tox.ini#L83
+ run:
+ name: tempest_common
+ args:
+ mode: "(?!.*test_fixed_ips)\
+ (?!.*test_fixed_ips_negative)\
+ (?!.*test_auto_allocate_network)(?!.*test_floating_ips_bulk)\
+ (?!.*test_flavors_microversions.FlavorsV255TestJSON)\
+ (?!.*test_flavors_microversions.FlavorsV261TestJSON)\
+ (?!.*test_live_migration.LiveAutoBlockMigrationV225Test.test_iscsi_volume)\
+ (?!.*test_live_migration.LiveAutoBlockMigrationV225Test.test_volume_backed_live_migration)\
+ (?!.*test_live_migration.LiveMigrationTest.test_iscsi_volume)\
+ (?!.*test_live_migration.LiveMigrationTest.test_volume_backed_live_migration)\
+ (?!.*test_live_migration.LiveMigrationRemoteConsolesV26Test)\
+ (?!.*test_quotas.QuotasAdminTestV257)\
+ (?!.*certificates.test_certificates)\
+ (?!.*test_quotas_negative.QuotasSecurityGroupAdminNegativeTest)\
+ (?!.*test_novnc)(?!.*test_server_personality)\
+ (?!.*test_servers.ServerShowV263Test.test_show_update_rebuild_list_server)\
+ (?!.*test_servers_microversions.ServerShowV254Test)\
+ (?!.*test_servers_microversions.ServerShowV257Test)\
+ (?!.*test_servers_negative.ServersNegativeTestJSON.test_personality_file_contents_not_encoded)\
+ (?!.*servers.test_virtual_interfaces)\
+ (?!.*test_server_actions.ServerActionsTestJSON.test_change_server_password)\
+ (?!.*test_server_actions.ServerActionsTestJSON.test_get_vnc_console)\
+ (?!.*test_server_actions.ServerActionsTestJSON.test_reboot_server_soft)\
+ (?!.*test_security_group_default_rules)\
+ (?!.*test_security_groups_negative.SecurityGroupsNegativeTestJSON.test_security_group_create_with_duplicate_name)\
+ (?!.*test_security_groups_negative.SecurityGroupsNegativeTestJSON.test_security_group_create_with_invalid_group_description)\
+ (?!.*test_security_groups_negative.SecurityGroupsNegativeTestJSON.test_security_group_create_with_invalid_group_name)\
+ (?!.*test_security_groups_negative.SecurityGroupsNegativeTestJSON.test_update_security_group_with_invalid_sg_des)\
+ (?!.*test_security_groups_negative.SecurityGroupsNegativeTestJSON.test_update_security_group_with_invalid_sg_id)\
+ (?!.*test_security_groups_negative.SecurityGroupsNegativeTestJSON.test_update_security_group_with_invalid_sg_name)\
+ (?!.*test_list_server_filters.ListServerFiltersTestJSON.test_list_servers_filtered_by_ip_regex)\
+ (?!.*compute.test_virtual_interfaces)(?!.*compute.test_virtual_interfaces_negative)\
+ (?!.*compute.test_networks)\
+ (?!.*test_attach_volume.AttachVolumeMultiAttach)\
+ (?!.*identity.admin.v2)(?!.*identity.v2)\
+ (?!.*image.v1)\
+ (?!.*admin.test_dhcp_agent_scheduler)\
+ (?!.*admin.test_routers_dvr)\
+ (?!.*test_metering_extensions)(?!.*network.test_tags)\
+ (?!.*test_routers_negative.DvrRoutersNegativeTest)\
+ (?!.*test_routers.RoutersIpV6Test.test_create_router_set_gateway_with_fixed_ip)\
+ (?!.*test_routers.RoutersTest.test_create_router_set_gateway_with_fixed_ip)\
+ (?!.*test_group_snapshots.GroupSnapshotsV319Test.test_reset_group_snapshot_status)\
+ (?!.*test_multi_backend)\
+ (?!.*test_snapshot_manage)\
+ (?!.*test_volume_retype.VolumeRetypeWithMigrationTest)\
+ (?!.*test_volume_delete_cascade.VolumesDeleteCascade.test_volume_from_snapshot_cascade_delete)\
+ (?!.*test_volumes_backup.VolumesBackupsTest.test_volume_backup_create_get_detailed_list_restore_delete)\
+ (?!.*test_volumes_extend.VolumesExtendAttachedTest.test_extend_attached_volume)\
+ (?!.*\\[.*\\bslow\\b.*\\])(^tempest.api)"
+ option:
+ - '--concurrency=4'
+
+ -
+ case_name: tempest_scenario_cntt
+ project_name: functest
+ criteria: 100
+ blocking: false
+ deny_skipping: true
+ tests_count: 9
+ description: >-
+ The list of test cases is generated by
+ Tempest automatically and depends on the parameters of
+ the OpenStack deployment.
+ https://github.com/openstack/tempest/blob/18.0.0/tox.ini#L84
+ run:
+ name: tempest_common
+ args:
+ mode: "\
+ (?!.*test_volume_boot_pattern.TestVolumeBootPattern.test_boot_server_from_encrypted_volume_luks)\
+ (?!.*\\[.*\\bslow\\b.*\\])(^tempest.scenario)"
+ option:
+ - '--concurrency=1'
+
+ -
+ case_name: tempest_slow_cntt
+ project_name: functest
+ criteria: 100
+ blocking: false
+ deny_skipping: true
+ tests_count: 43
+ description: >-
+ The list of test cases is generated by
+ Tempest automatically and depends on the parameters of
+ the OpenStack deployment.
+ https://github.com/openstack/tempest/blob/18.0.0/tox.ini#L84
+ run:
+ name: tempest_common
+ args:
+ mode: "(?!.*test_volume_swap)\
+ (?!.*test_server_personality)\
+ (?!.*test_container_sync.ContainerSyncTest.test_container_synchronization)\
+ (?!.*test_container_sync_middleware.ContainerSyncMiddlewareTest.test_container_synchronization)\
+ (?!.*test_encrypted_cinder_volumes)\
+ (?!.*test_minbw_allocation_placement)\
+ (?!.*test_network_basic_ops.TestNetworkBasicOps.test_router_rescheduling)\
+ (?!.*test_stamp_pattern.TestStampPattern.test_stamp_pattern)\
+ (?!.*test_volume_migrate_attached)\
+ (?=.*\\[.*\\bslow\\b.*\\])(^tempest.)"
+ option:
+ - '--concurrency=1'
diff --git a/docker/smoke/Dockerfile b/docker/smoke/Dockerfile
index 22e3f85f2..75413f1d7 100644
--- a/docker/smoke/Dockerfile
+++ b/docker/smoke/Dockerfile
@@ -6,16 +6,24 @@ ARG TEMPEST_TAG=21.0.0
ARG REFSTACK_TARGET=2018.11
ARG PATROLE_TAG=0.4.0
ARG NEUTRON_TEMPEST_TAG=0.3.0
+ARG CINDER_TEMPEST_TAG=0.2.0
+ARG KEYSTONE_TEMPEST_TAG=0.2.0
ARG BARBICAN_TAG=0.1.0
+ARG HEAT_TEMPEST_TAG=1.0.0
-RUN apk --no-cache add --virtual .build-deps --update \
+RUN apk --no-cache add --update libxml2 libxslt libpcre16 libpcre32 && \
+ apk --no-cache add --virtual .build-deps --update \
python-dev build-base linux-headers libffi-dev \
- openssl-dev libjpeg-turbo-dev && \
+ openssl-dev libjpeg-turbo-dev libxml2-dev libxslt-dev pcre-dev && \
wget -q -O- https://opendev.org/openstack/requirements/raw/branch/$OPENSTACK_TAG/upper-constraints.txt > upper-constraints.txt && \
- sed -i -E s/^tempest==+.*$/-e\ git+https:\\/\\/opendev.org\\/openstack\\/tempest@$TEMPEST_TAG#egg=tempest/ upper-constraints.txt && \
+ sed -i -E /^tempest==+.*$/d upper-constraints.txt && \
+ sed -i -E /^ujson==+.*$/d upper-constraints.txt && \
case $(uname -m) in aarch*|arm*) sed -i -E /^PyNaCl=/d upper-constraints.txt ;; esac && \
wget -q -O- https://git.opnfv.org/functest/plain/upper-constraints.txt?h=$BRANCH > upper-constraints.opnfv.txt && \
sed -i -E /#egg=functest/d upper-constraints.opnfv.txt && \
+ sed -i -E /#egg=rally/d upper-constraints.opnfv.txt && \
+ sed -i -E /#egg=xrally-kubernetes/d upper-constraints.opnfv.txt && \
+ case $(uname -m) in aarch*|arm*) CFLAGS="-O0" pip install --no-cache-dir -cupper-constraints.txt -cupper-constraints.opnfv.txt lxml ;; esac && \
git init /src/patrole && \
(cd /src/patrole && \
git fetch --tags https://opendev.org/openstack/patrole.git $PATROLE_TAG && \
@@ -26,16 +34,33 @@ RUN apk --no-cache add --virtual .build-deps --update \
git fetch --tags https://opendev.org/openstack/neutron-tempest-plugin.git $NEUTRON_TEMPEST_TAG && \
git checkout FETCH_HEAD) && \
update-requirements -s --source /src/openstack-requirements /src/neutron-tempest-plugin && \
+ git init /src/cinder-tempest-plugin && \
+ (cd /src/cinder-tempest-plugin && \
+ git fetch --tags https://opendev.org/openstack/cinder-tempest-plugin.git $CINDER_TEMPEST_TAG && \
+ git checkout FETCH_HEAD) && \
+ update-requirements -s --source /src/openstack-requirements /src/cinder-tempest-plugin && \
+ git init /src/keystone-tempest-plugin && \
+ (cd /src/keystone-tempest-plugin && \
+ git fetch --tags https://opendev.org/openstack/keystone-tempest-plugin.git $KEYSTONE_TEMPEST_TAG && \
+ git checkout FETCH_HEAD) && \
+ update-requirements -s --source /src/openstack-requirements /src/keystone-tempest-plugin && \
git init /src/barbican-tempest-plugin && \
(cd /src/barbican-tempest-plugin && \
git fetch --tags https://opendev.org/openstack/barbican-tempest-plugin.git $BARBICAN_TAG && \
git checkout FETCH_HEAD) && \
update-requirements -s --source /src/openstack-requirements /src/barbican-tempest-plugin/ && \
+ git init /src/heat-tempest-plugin && \
+ (cd /src/heat-tempest-plugin && \
+ git fetch --tags https://opendev.org/openstack/heat-tempest-plugin.git $HEAT_TEMPEST_TAG && \
+ git checkout FETCH_HEAD) && \
pip install --no-cache-dir --src /src -cupper-constraints.txt -cupper-constraints.opnfv.txt \
/src/patrole /src/barbican-tempest-plugin /src/neutron-tempest-plugin \
- networking-bgpvpn networking-sfc && \
+ networking-bgpvpn networking-sfc /src/cinder-tempest-plugin /src/keystone-tempest-plugin \
+ /src/heat-tempest-plugin && \
rm -r upper-constraints.txt upper-constraints.opnfv.txt \
- /src/patrole /src/barbican-tempest-plugin /src/neutron-tempest-plugin && \
+ /src/patrole /src/barbican-tempest-plugin /src/neutron-tempest-plugin \
+ /src/cinder-tempest-plugin /src/keystone-tempest-plugin \
+ /src/heat-tempest-plugin && \
mkdir -p /home/opnfv/functest/data/refstack && \
mkdir -p /etc/neutron /etc/glance && \
wget -q -O /etc/neutron/policy.json https://opendev.org/openstack/neutron/raw/branch/$OPENSTACK_TAG/etc/policy.json && \
diff --git a/docker/smoke/testcases.yaml b/docker/smoke/testcases.yaml
index 4fa5aaeae..f7707d66d 100644
--- a/docker/smoke/testcases.yaml
+++ b/docker/smoke/testcases.yaml
@@ -2,12 +2,11 @@
tiers:
-
name: smoke
- order: 1
description: >-
Set of basic Functional tests to validate the OPNFV scenarios.
testcases:
-
- case_name: neutron-tempest-plugin-api
+ case_name: tempest_neutron
project_name: functest
criteria: 100
blocking: false
@@ -24,6 +23,52 @@ tiers:
- '--concurrency=4'
-
+ case_name: tempest_cinder
+ project_name: functest
+ criteria: 100
+ blocking: false
+ description: >-
+ This test case runs the Tempest suite proposed by the
+ Cinder project.
+ run:
+ name: tempest_common
+ args:
+ mode:
+ '(?!.*test_incremental_backup)(^cinder_tempest_plugin.)'
+ option:
+ - '--concurrency=4'
+
+ -
+ case_name: tempest_keystone
+ project_name: functest
+ criteria: 100
+ blocking: false
+ description: >-
+ This test case runs the Tempest suite proposed by the
+ Keystone project.
+ run:
+ name: tempest_common
+ args:
+ mode: 'keystone_tempest_plugin.'
+ option:
+ - '--concurrency=4'
+
+ -
+ case_name: tempest_heat
+ project_name: functest
+ criteria: 100
+ blocking: false
+ description: >-
+ This test case runs the Tempest suite proposed by the
+ Heat project.
+ run:
+ name: tempest_heat
+ args:
+ mode: '^heat_tempest_plugin.tests'
+ option:
+ - '--concurrency=1'
+
+ -
case_name: rally_sanity
project_name: functest
criteria: 100
@@ -43,6 +88,7 @@ tiers:
criteria: 100
blocking: false
deny_skipping: true
+ tests_count: 219
description: >-
This test case runs a sub group of tests of the OpenStack
Defcore testcases.
@@ -80,13 +126,30 @@ tiers:
the OpenStack deployment.
https://github.com/openstack/tempest/blob/18.0.0/tox.ini#L84
run:
- name: tempest_scenario
+ name: tempest_common
args:
mode: '(?!.*\[.*\bslow\b.*\])(^tempest\.scenario)'
option:
- '--concurrency=1'
-
+ case_name: tempest_slow
+ project_name: functest
+ criteria: 100
+ blocking: false
+ description: >-
+ The list of test cases is generated by
+ Tempest automatically and depends on the parameters of
+ the OpenStack deployment.
+ https://github.com/openstack/tempest/blob/18.0.0/tox.ini#L84
+ run:
+ name: tempest_common
+ args:
+ mode: '(?=.*\[.*\bslow\b.*\])(^tempest\.)'
+ option:
+ - '--concurrency=1'
+
+ -
case_name: patrole
project_name: functest
criteria: 100
@@ -107,24 +170,6 @@ tiers:
- 'test_list_metadef_namespaces'
-
- case_name: neutron_trunk
- project_name: functest
- criteria: 100
- blocking: false
- description: >-
- This test case runs the neutron trunk subtest of the
- OpenStack Tempest suite. The list of test cases is
- generated by Tempest having as input the relevant
- testcase list file.
- run:
- name: tempest_common
- args:
- mode: 'neutron_tempest_plugin.(api|scenario).test_trunk'
- neutron_extensions:
- - trunk
- - trunk-details
-
- -
case_name: networking-bgpvpn
project_name: functest
criteria: 100
@@ -140,7 +185,7 @@ tiers:
neutron_extensions:
- bgpvpn
option:
- - '--concurrency=4'
+ - '--concurrency=3'
-
case_name: networking-sfc
@@ -162,7 +207,7 @@ tiers:
- '--concurrency=0'
-
- case_name: barbican
+ case_name: tempest_barbican
project_name: functest
criteria: 100
blocking: false
@@ -176,3 +221,5 @@ tiers:
'^barbican_tempest_plugin.((?!test_signed_image_upload_boot_failure).)*$'
services:
- barbican
+ option:
+ - '--concurrency=4'
diff --git a/docker/tempest/Accept-custom-registered-endpoints.patch b/docker/tempest/Accept-custom-registered-endpoints.patch
new file mode 100644
index 000000000..eba6ff436
--- /dev/null
+++ b/docker/tempest/Accept-custom-registered-endpoints.patch
@@ -0,0 +1,99 @@
+From 1d500e79156ada6bc6fdb628ed1da0efd4121f6a Mon Sep 17 00:00:00 2001
+From: Martin Kopec <mkopec@redhat.com>
+Date: Thu, 31 Oct 2019 13:56:42 +0000
+Subject: [PATCH 13/46] Accept custom registered endpoints
+
+The review drops usage of skip_path() filter in the related tests
+and uses raw_request() instead.
+
+Normally a swift url is organised as host:port/info and
+host:port/v1/AUTH_<tenant-id>, see
+https://docs.openstack.org/api-ref/object-store/
+But RadosGW API is organised as host:port/swift/info and
+host:port/swift/v1/AUTH_<tenant-id>, see
+https://docs.ceph.com/docs/master/radosgw/config-ref/#swift-settings
+
+Close-bug: 1799981
+Change-Id: I6a932639a05defe0f04c600afcc35a19662937af
+---
+ tempest/api/object_storage/test_crossdomain.py | 7 +++----
+ tempest/api/object_storage/test_healthcheck.py | 7 +++----
+ tempest/lib/services/object_storage/capabilities_client.py | 5 +++--
+ .../services/object_storage/test_capabilities_client.py | 2 +-
+ 4 files changed, 10 insertions(+), 11 deletions(-)
+
+diff --git a/tempest/api/object_storage/test_crossdomain.py b/tempest/api/object_storage/test_crossdomain.py
+index f61d9f891..1567e0640 100644
+--- a/tempest/api/object_storage/test_crossdomain.py
++++ b/tempest/api/object_storage/test_crossdomain.py
+@@ -34,13 +34,12 @@ class CrossdomainTest(base.BaseObjectTest):
+ def setUp(self):
+ super(CrossdomainTest, self).setUp()
+
+- # Turning http://.../v1/foobar into http://.../
+- self.account_client.skip_path()
+-
+ @decorators.idempotent_id('d1b8b031-b622-4010-82f9-ff78a9e915c7')
+ @utils.requires_ext(extension='crossdomain', service='object')
+ def test_get_crossdomain_policy(self):
+- resp, body = self.account_client.get("crossdomain.xml", {})
++ url = self.account_client._get_base_version_url() + "crossdomain.xml"
++ resp, body = self.account_client.raw_request(url, "GET")
++ self.account_client._error_checker(resp, body)
+ body = body.decode()
+
+ self.assertTrue(body.startswith(self.xml_start) and
+diff --git a/tempest/api/object_storage/test_healthcheck.py b/tempest/api/object_storage/test_healthcheck.py
+index a186f9ee6..8e9e4061d 100644
+--- a/tempest/api/object_storage/test_healthcheck.py
++++ b/tempest/api/object_storage/test_healthcheck.py
+@@ -22,13 +22,12 @@ class HealthcheckTest(base.BaseObjectTest):
+
+ def setUp(self):
+ super(HealthcheckTest, self).setUp()
+- # Turning http://.../v1/foobar into http://.../
+- self.account_client.skip_path()
+
+ @decorators.idempotent_id('db5723b1-f25c-49a9-bfeb-7b5640caf337')
+ def test_get_healthcheck(self):
+-
+- resp, _ = self.account_client.get("healthcheck", {})
++ url = self.account_client._get_base_version_url() + "healthcheck"
++ resp, body = self.account_client.raw_request(url, "GET")
++ self.account_client._error_checker(resp, body)
+
+ # The target of the request is not any Swift resource. Therefore, the
+ # existence of response header is checked without a custom matcher.
+diff --git a/tempest/lib/services/object_storage/capabilities_client.py b/tempest/lib/services/object_storage/capabilities_client.py
+index d31bbc299..f08bd9aea 100644
+--- a/tempest/lib/services/object_storage/capabilities_client.py
++++ b/tempest/lib/services/object_storage/capabilities_client.py
+@@ -21,9 +21,10 @@ from tempest.lib.common import rest_client
+ class CapabilitiesClient(rest_client.RestClient):
+
+ def list_capabilities(self):
+- self.skip_path()
+ try:
+- resp, body = self.get('info')
++ url = self._get_base_version_url() + 'info'
++ resp, body = self.raw_request(url, 'GET')
++ self._error_checker(resp, body)
+ finally:
+ self.reset_path()
+ body = json.loads(body)
+diff --git a/tempest/tests/lib/services/object_storage/test_capabilities_client.py b/tempest/tests/lib/services/object_storage/test_capabilities_client.py
+index b7f972a85..9df7c7c18 100644
+--- a/tempest/tests/lib/services/object_storage/test_capabilities_client.py
++++ b/tempest/tests/lib/services/object_storage/test_capabilities_client.py
+@@ -43,7 +43,7 @@ class TestCapabilitiesClient(base.BaseServiceTest):
+ }
+ self.check_service_client_function(
+ self.client.list_capabilities,
+- 'tempest.lib.common.rest_client.RestClient.get',
++ 'tempest.lib.common.rest_client.RestClient.raw_request',
+ resp,
+ bytes_body)
+
+--
+2.26.0.rc2
+
diff --git a/docker/tempest/Create-new-server-in-test_create_backup.patch b/docker/tempest/Create-new-server-in-test_create_backup.patch
new file mode 100644
index 000000000..1b86b0fc5
--- /dev/null
+++ b/docker/tempest/Create-new-server-in-test_create_backup.patch
@@ -0,0 +1,84 @@
+From 03eb38ce54aeec4bc4c1cb3475c6fb84661f8993 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?C=C3=A9dric=20Ollivier?= <cedric.ollivier@orange.com>
+Date: Tue, 21 Jul 2020 13:28:50 +0200
+Subject: [PATCH] Create new server in test_create_backup
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+test_reboot_server_hard sometimes fail in all gates [1].
+This hack could highlight if they are side effects between
+test_create_backup and test_reboot_server_hard.
+
+[1] http://artifacts.opnfv.org/functest/E5AZMH89OOK6/functest-opnfv-functest-smoke-cntt-hunter-tempest_full_cntt-run-142/tempest_full_cntt/tempest-report.html
+
+Change-Id: I203562f686b004094e5e18858004b7a2d26567a6
+Signed-off-by: CĂ©dric Ollivier <cedric.ollivier@orange.com>
+---
+ .../api/compute/servers/test_server_actions.py | 15 ++++++++-------
+ 1 file changed, 8 insertions(+), 7 deletions(-)
+
+diff --git a/tempest/api/compute/servers/test_server_actions.py b/tempest/api/compute/servers/test_server_actions.py
+index d477be0eb..c369311d3 100644
+--- a/tempest/api/compute/servers/test_server_actions.py
++++ b/tempest/api/compute/servers/test_server_actions.py
+@@ -443,6 +443,7 @@ class ServerActionsTestJSON(base.BaseV2ComputeTest):
+ # Check if glance v1 is available to determine which client to use. We
+ # prefer glance v1 for the compute API tests since the compute image
+ # API proxy was written for glance v1.
++ newserver = self.create_test_server(wait_until='ACTIVE')
+ if CONF.image_feature_enabled.api_v1:
+ glance_client = self.os_primary.image_client
+ elif CONF.image_feature_enabled.api_v2:
+@@ -453,7 +454,7 @@ class ServerActionsTestJSON(base.BaseV2ComputeTest):
+ '[image-feature-enabled].')
+
+ backup1 = data_utils.rand_name('backup-1')
+- resp = self.client.create_backup(self.server_id,
++ resp = self.client.create_backup(newserver['id'],
+ backup_type='daily',
+ rotation=2,
+ name=backup1)
+@@ -481,8 +482,8 @@ class ServerActionsTestJSON(base.BaseV2ComputeTest):
+ image1_id, 'active')
+
+ backup2 = data_utils.rand_name('backup-2')
+- waiters.wait_for_server_status(self.client, self.server_id, 'ACTIVE')
+- resp = self.client.create_backup(self.server_id,
++ waiters.wait_for_server_status(self.client, newserver['id'], 'ACTIVE')
++ resp = self.client.create_backup(newserver['id'],
+ backup_type='daily',
+ rotation=2,
+ name=backup2)
+@@ -499,7 +500,7 @@ class ServerActionsTestJSON(base.BaseV2ComputeTest):
+ properties = {
+ 'image_type': 'backup',
+ 'backup_type': "daily",
+- 'instance_uuid': self.server_id,
++ 'instance_uuid': newserver['id'],
+ }
+ params = {
+ 'status': 'active',
+@@ -524,8 +525,8 @@ class ServerActionsTestJSON(base.BaseV2ComputeTest):
+ # create the third one, due to the rotation is 2,
+ # the first one will be deleted
+ backup3 = data_utils.rand_name('backup-3')
+- waiters.wait_for_server_status(self.client, self.server_id, 'ACTIVE')
+- resp = self.client.create_backup(self.server_id,
++ waiters.wait_for_server_status(self.client, newserver['id'], 'ACTIVE')
++ resp = self.client.create_backup(newserver['id'],
+ backup_type='daily',
+ rotation=2,
+ name=backup3)
+@@ -536,7 +537,7 @@ class ServerActionsTestJSON(base.BaseV2ComputeTest):
+ image3_id = data_utils.parse_image_id(resp.response['location'])
+ self.addCleanup(glance_client.delete_image, image3_id)
+ # the first back up should be deleted
+- waiters.wait_for_server_status(self.client, self.server_id, 'ACTIVE')
++ waiters.wait_for_server_status(self.client, newserver['id'], 'ACTIVE')
+ glance_client.wait_for_resource_deletion(image1_id)
+ oldest_backup_exist = False
+ if CONF.image_feature_enabled.api_v1:
+--
+2.27.0
+
diff --git a/docker/tempest/Dockerfile b/docker/tempest/Dockerfile
index ffb9bd7e8..2298c4d0b 100644
--- a/docker/tempest/Dockerfile
+++ b/docker/tempest/Dockerfile
@@ -3,24 +3,34 @@ FROM opnfv/functest-core:hunter
ARG BRANCH=stable/hunter
ARG OPENSTACK_TAG=stable/rocky
ARG TEMPEST_TAG=21.0.0
-ARG RALLY_TAG=1.3.0
-ARG RALLY_OPENSTACK_TAG=1.3.0
-ARG UJSON_TAG=d25e024f481c5571d15f3c0c406a498ca0467cfd
+ARG RALLY_TAG=1.5.1
+ARG RALLY_OPENSTACK_TAG=1.5.0
+COPY Accept-custom-registered-endpoints.patch /tmp/Accept-custom-registered-endpoints.patch
+COPY Fixes-race-condition-in-test_add_remove_fixed_ip.patch /tmp/Fixes-race-condition-in-test_add_remove_fixed_ip.patch
+COPY object-storage-fix-and-cleanup-header-checks.patch /tmp/object-storage-fix-and-cleanup-header-checks.patch
+COPY Create-new-server-in-test_create_backup.patch /tmp/Create-new-server-in-test_create_backup.patch
+COPY Switch-to-threading.Thread-for-Rally-tasks.patch /tmp/Switch-to-threading.Thread-for-Rally-tasks.patch
RUN apk --no-cache add --virtual .build-deps --update \
python-dev build-base linux-headers libffi-dev \
openssl-dev libjpeg-turbo-dev && \
wget -q -O- https://opendev.org/openstack/requirements/raw/branch/$OPENSTACK_TAG/upper-constraints.txt > upper-constraints.txt && \
sed -i -E s/^tempest==+.*$/-e\ git+https:\\/\\/opendev.org\\/openstack\\/tempest@$TEMPEST_TAG#egg=tempest/ upper-constraints.txt && \
- sed -i -E s/^ujson==+.*$/-e\ git+https:\\/\\/github.com\\/esnme\\/ultrajson@$UJSON_TAG#egg=ujson/ upper-constraints.txt && \
+ sed -i -E /^ujson==+.*$/d upper-constraints.txt && \
+ sed -i -E /^kubernetes==+.*$/d upper-constraints.txt && \
case $(uname -m) in aarch*|arm*) sed -i -E /^PyNaCl=/d upper-constraints.txt ;; esac && \
wget -q -O- https://git.opnfv.org/functest/plain/upper-constraints.txt?h=$BRANCH > upper-constraints.opnfv.txt && \
sed -i -E /#egg=functest/d upper-constraints.opnfv.txt && \
+ sed -i -E /#egg=rally/d upper-constraints.opnfv.txt && \
+ sed -i -E /#egg=xrally-kubernetes/d upper-constraints.opnfv.txt && \
git init /src/rally && \
(cd /src/rally && \
git fetch --tags https://opendev.org/openstack/rally.git $RALLY_TAG && \
git checkout FETCH_HEAD) && \
update-requirements -s --source /src/openstack-requirements /src/rally/ && \
+ (cd /src/rally && patch -p1 < /tmp/Switch-to-threading.Thread-for-Rally-tasks.patch) && \
+ pip install --no-cache-dir --src /src -cupper-constraints.txt -cupper-constraints.opnfv.txt \
+ /src/rally && \
git init /src/rally-openstack && \
(cd /src/rally-openstack && \
git fetch --tags https://opendev.org/openstack/rally-openstack.git $RALLY_OPENSTACK_TAG && \
@@ -28,10 +38,22 @@ RUN apk --no-cache add --virtual .build-deps --update \
update-requirements -s --source /src/openstack-requirements /src/rally-openstack && \
pip install --no-cache-dir --src /src -cupper-constraints.txt -cupper-constraints.opnfv.txt \
tempest /src/rally-openstack && \
- pip install --no-cache-dir --src /src -cupper-constraints.txt -cupper-constraints.opnfv.txt \
- /src/rally && \
- rm -r upper-constraints.txt upper-constraints.opnfv.txt /src/rally /src/rally-openstack && \
+ rm -r upper-constraints.txt upper-constraints.opnfv.txt /src/rally /src/rally-openstack \
+ /tmp/Switch-to-threading.Thread-for-Rally-tasks.patch && \
mkdir -p /etc/rally && \
printf "[database]\nconnection = 'sqlite:////var/lib/rally/database/rally.sqlite'" > /etc/rally/rally.conf && \
mkdir -p /var/lib/rally/database && rally db create && \
+ (cd /src/tempest && \
+ git config --global user.email "opnfv-tech-discuss@lists.opnfv.org" && \
+ git config --global user.name "Functest" && \
+ patch -p1 < /tmp/Accept-custom-registered-endpoints.patch && \
+ patch -p1 < /tmp/object-storage-fix-and-cleanup-header-checks.patch && \
+ patch -p1 < /tmp/Fixes-race-condition-in-test_add_remove_fixed_ip.patch && \
+ patch -p1 < /tmp/Create-new-server-in-test_create_backup.patch && \
+ git commit -a -m "Backport critical bugfixes" && \
+ rm ~/.gitconfig) && \
+ rm /tmp/Accept-custom-registered-endpoints.patch \
+ /tmp/object-storage-fix-and-cleanup-header-checks.patch \
+ /tmp/Fixes-race-condition-in-test_add_remove_fixed_ip.patch \
+ /tmp/Create-new-server-in-test_create_backup.patch && \
apk del .build-deps
diff --git a/docker/tempest/Fixes-race-condition-in-test_add_remove_fixed_ip.patch b/docker/tempest/Fixes-race-condition-in-test_add_remove_fixed_ip.patch
new file mode 100644
index 000000000..a9f3c7e6d
--- /dev/null
+++ b/docker/tempest/Fixes-race-condition-in-test_add_remove_fixed_ip.patch
@@ -0,0 +1,165 @@
+From 61a3c8efa4a5e41dc6b5fd2d7a28a25555ebb54b Mon Sep 17 00:00:00 2001
+From: David SedlĂ¡k <dsedlak@redhat.com>
+Date: Wed, 30 Oct 2019 15:38:21 +0100
+Subject: [PATCH] Fixes race condition in test_add_remove_fixed_ip
+
+Currently race condition can occure in
+tempest.api.compute.servers.test_attach_interfaces.
+AttachInterfacesUnderV243Test.test_add_remove_fixed_ip
+when floating IP added during resource preparation doesn't appear in
+the list of original IPs that is created at the beggining of the test,
+which then confuses the test
+and floating ip is later recognized as fixed IP added in the test.
+more details including log:
+https://bugzilla.redhat.com/show_bug.cgi?id=1752416
+
+This change ensures possible floating IP added during server
+creation is always present in the set of original IPs and also
+during every comparasion of IPs.
+
+Closes-Bug: #1866179
+
+Change-Id: Ic3a3e0708714b6d6c9c226e641e1c520e5ebde9d
+Signed-off-by: David SedlĂ¡k <dsedlak@redhat.com>
+---
+
+diff --git a/tempest/api/compute/servers/test_attach_interfaces.py b/tempest/api/compute/servers/test_attach_interfaces.py
+index df8da07..c1af6c7 100644
+--- a/tempest/api/compute/servers/test_attach_interfaces.py
++++ b/tempest/api/compute/servers/test_attach_interfaces.py
+@@ -86,12 +86,16 @@
+ # apparently not enough? Add cleanup here.
+ self.addCleanup(self.delete_server, server['id'])
+ self._wait_for_validation(server, validation_resources)
++ try:
++ fip = set([validation_resources['floating_ip']['ip']])
++ except KeyError:
++ fip = ()
+ ifs = (self.interfaces_client.list_interfaces(server['id'])
+ ['interfaceAttachments'])
+ body = waiters.wait_for_interface_status(
+ self.interfaces_client, server['id'], ifs[0]['port_id'], 'ACTIVE')
+ ifs[0]['port_state'] = body['port_state']
+- return server, ifs
++ return server, ifs, fip
+
+
+ class AttachInterfacesTestJSON(AttachInterfacesTestBase):
+@@ -226,7 +230,7 @@
+ @decorators.idempotent_id('73fe8f02-590d-4bf1-b184-e9ca81065051')
+ @utils.services('network')
+ def test_create_list_show_delete_interfaces_by_network_port(self):
+- server, ifs = self._create_server_get_interfaces()
++ server, ifs, _ = self._create_server_get_interfaces()
+ interface_count = len(ifs)
+ self.assertGreater(interface_count, 0)
+
+@@ -268,7 +272,7 @@
+ raise self.skipException("Only owner network supports "
+ "creating interface by fixed ip.")
+
+- server, ifs = self._create_server_get_interfaces()
++ server, ifs, _ = self._create_server_get_interfaces()
+ interface_count = len(ifs)
+ self.assertGreater(interface_count, 0)
+
+@@ -354,9 +358,8 @@
+ not CONF.network.shared_physical_network):
+ raise self.skipException("Only owner network supports "
+ "creating interface by fixed ip.")
+-
+ # Add and Remove the fixed IP to server.
+- server, ifs = self._create_server_get_interfaces()
++ server, ifs, fip = self._create_server_get_interfaces()
+ original_interface_count = len(ifs) # This is the number of ports.
+ self.assertGreater(original_interface_count, 0)
+ # Get the starting list of IPs on the server.
+@@ -369,6 +372,9 @@
+ self.assertEqual(1, len(addresses), addresses) # number of networks
+ # Keep track of the original addresses so we can know which IP is new.
+ original_ips = [addr['addr'] for addr in list(addresses.values())[0]]
++ # Make sure the floating IP possibly assigned during
++ # server creation is always present in the set of original ips.
++ original_ips = set(original_ips).union(fip)
+ original_ip_count = len(original_ips)
+ self.assertGreater(original_ip_count, 0, addresses) # at least 1
+ network_id = ifs[0]['net_id']
+@@ -376,40 +382,22 @@
+ # fixed IP on the same network (and same port since we only have one
+ # port).
+ self.servers_client.add_fixed_ip(server['id'], networkId=network_id)
+- # Wait for the ips count to increase by one.
+
+- def _get_server_floating_ips():
+- _floating_ips = []
+- _server = self.os_primary.servers_client.show_server(
+- server['id'])['server']
+- for _ip_set in _server['addresses']:
+- for _ip in _server['addresses'][_ip_set]:
+- if _ip['OS-EXT-IPS:type'] == 'floating':
+- _floating_ips.append(_ip['addr'])
+- return _floating_ips
+-
+- def _wait_for_ip_increase():
++ def _wait_for_ip_change(expected_count):
+ _addresses = self.os_primary.servers_client.list_addresses(
+ server['id'])['addresses']
+- _ips = [addr['addr'] for addr in list(_addresses.values())[0]]
+- LOG.debug("Wait for IP increase. All IPs still associated to "
++ _ips = set([addr['addr'] for addr in list(_addresses.values())[0]])
++ # Make sure possible floating ip is always present in the set.
++ _ips = _ips.union(fip)
++ LOG.debug("Wait for change of IPs. All IPs still associated to "
+ "the server %(id)s: %(ips)s",
+ {'id': server['id'], 'ips': _ips})
+- if len(_ips) == original_ip_count + 1:
+- return True
+- elif len(_ips) == original_ip_count:
+- return False
+- # If not, lets remove any floating IP from the list and check again
+- _fips = _get_server_floating_ips()
+- _ips = [_ip for _ip in _ips if _ip not in _fips]
+- LOG.debug("Wait for IP increase. Fixed IPs still associated to "
+- "the server %(id)s: %(ips)s",
+- {'id': server['id'], 'ips': _ips})
+- return len(_ips) == original_ip_count + 1
++ return len(_ips) == expected_count
+
++ # Wait for the ips count to increase by one.
+ if not test_utils.call_until_true(
+- _wait_for_ip_increase, CONF.compute.build_timeout,
+- CONF.compute.build_interval):
++ _wait_for_ip_change, CONF.compute.build_timeout,
++ CONF.compute.build_interval, original_ip_count + 1):
+ raise lib_exc.TimeoutException(
+ 'Timed out while waiting for IP count to increase.')
+
+@@ -428,26 +416,8 @@
+ break
+ self.servers_client.remove_fixed_ip(server['id'], address=fixed_ip)
+ # Wait for the interface count to decrease by one.
+-
+- def _wait_for_ip_decrease():
+- _addresses = self.os_primary.servers_client.list_addresses(
+- server['id'])['addresses']
+- _ips = [addr['addr'] for addr in list(_addresses.values())[0]]
+- LOG.debug("Wait for IP decrease. All IPs still associated to "
+- "the server %(id)s: %(ips)s",
+- {'id': server['id'], 'ips': _ips})
+- if len(_ips) == original_ip_count:
+- return True
+- # If not, lets remove any floating IP from the list and check again
+- _fips = _get_server_floating_ips()
+- _ips = [_ip for _ip in _ips if _ip not in _fips]
+- LOG.debug("Wait for IP decrease. Fixed IPs still associated to "
+- "the server %(id)s: %(ips)s",
+- {'id': server['id'], 'ips': _ips})
+- return len(_ips) == original_ip_count
+-
+ if not test_utils.call_until_true(
+- _wait_for_ip_decrease, CONF.compute.build_timeout,
+- CONF.compute.build_interval):
++ _wait_for_ip_change, CONF.compute.build_timeout,
++ CONF.compute.build_interval, original_ip_count):
+ raise lib_exc.TimeoutException(
+ 'Timed out while waiting for IP count to decrease.')
diff --git a/docker/tempest/Switch-to-threading.Thread-for-Rally-tasks.patch b/docker/tempest/Switch-to-threading.Thread-for-Rally-tasks.patch
new file mode 100644
index 000000000..7c146c9ed
--- /dev/null
+++ b/docker/tempest/Switch-to-threading.Thread-for-Rally-tasks.patch
@@ -0,0 +1,50 @@
+From 7223c6c766d2cbd47c54048426c904a27b52e069 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?C=C3=A9dric=20Ollivier?= <cedric.ollivier@orange.com>
+Date: Wed, 3 Jun 2020 15:23:59 +0200
+Subject: [PATCH] Switch to threading.Thread() for Rally tasks
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+multiprocessing.Process() often fails due to thread crashes [1].
+It looks similar to gsutil release notes [2].
+
+[1] https://build.opnfv.org/ci/job/functest-opnfv-functest-benchmarking-cntt-latest-rally_full_cntt-run/35/console
+[2] https://github.com/GoogleCloudPlatform/gsutil/issues/548
+[3] https://github.com/GoogleCloudPlatform/gsutil/blob/master/CHANGES.md
+
+Change-Id: I582933832e23d188c7fa5999e713dd5d7e82d2da
+Signed-off-by: CĂ©dric Ollivier <cedric.ollivier@orange.com>
+(cherry picked from commit 9b07423c246e7e4ab9fa25851d79ce6986c10c2e)
+---
+ rally/task/runner.py | 7 ++++---
+ 1 file changed, 4 insertions(+), 3 deletions(-)
+
+diff --git a/rally/task/runner.py b/rally/task/runner.py
+index 55372e509..0f0245588 100644
+--- a/rally/task/runner.py
++++ b/rally/task/runner.py
+@@ -17,6 +17,7 @@ import abc
+ import collections
+ import copy
+ import multiprocessing
++import threading
+ import time
+
+ import six
+@@ -188,9 +189,9 @@ class ScenarioRunner(plugin.Plugin, validation.ValidatablePluginMixin):
+ for i in range(processes_to_start):
+ kwrgs = {"processes_to_start": processes_to_start,
+ "processes_counter": i}
+- process = multiprocessing.Process(target=worker_process,
+- args=next(worker_args_gen),
+- kwargs={"info": kwrgs})
++ process = threading.Thread(target=worker_process,
++ args=next(worker_args_gen),
++ kwargs={"info": kwrgs})
+ process.start()
+ process_pool.append(process)
+
+--
+2.26.2
+
diff --git a/docker/tempest/object-storage-fix-and-cleanup-header-checks.patch b/docker/tempest/object-storage-fix-and-cleanup-header-checks.patch
new file mode 100644
index 000000000..629a98174
--- /dev/null
+++ b/docker/tempest/object-storage-fix-and-cleanup-header-checks.patch
@@ -0,0 +1,171 @@
+From 42e111c7d8f1cdb5d51a1c4f2ce5c64c3d3471f1 Mon Sep 17 00:00:00 2001
+From: Thomas Morin <thomas.morin@orange.com>
+Date: Wed, 17 Jun 2020 18:08:49 +0200
+Subject: [PATCH] object storage: fix and cleanup header checks
+
+As explained in [1] it is not legitimate to require a Transfer-Encoding
+header in Swift responses. That prevents running some tests
+successfully in the case where Swift is behind a proxy/load-balancer
+that does not use any Transfer-Encoding in its responses.
+
+This change hence removes the checks for the presence of a
+"Transfer-Encoding" header, and replaces them by the use
+of the existing check methods, after modifying the
+custom_matcher checks on which these methods rely on to accept
+either a Content-Length or a Transfer-Encoding header.
+
+Some adaptation was also required to avoid trying to process 'etag'
+for DELETE requests.
+
+A side-effect of this change is a code simplification and
+cleanup since the specific header checks in the corresponding
+tests are replaced by the generic check methods.
+
+[1] https://bugs.launchpad.net/tempest/+bug/1819851/comments/3
+
+Closes-Bug: 1819851
+Change-Id: Iaccea41640a53b564f72cee73981e2e61d3e80ae
+---
+ .../api/object_storage/test_account_bulk.py | 37 ++-----------------
+ tempest/api/object_storage/test_object_slo.py | 13 +------
+ tempest/common/custom_matchers.py | 16 +++++++-
+ 3 files changed, 19 insertions(+), 47 deletions(-)
+
+diff --git a/tempest/api/object_storage/test_account_bulk.py b/tempest/api/object_storage/test_account_bulk.py
+index 6599e432f..80f790f51 100644
+--- a/tempest/api/object_storage/test_account_bulk.py
++++ b/tempest/api/object_storage/test_account_bulk.py
+@@ -16,7 +16,6 @@ import tarfile
+ import tempfile
+
+ from tempest.api.object_storage import base
+-from tempest.common import custom_matchers
+ from tempest.common import utils
+ from tempest.lib import decorators
+
+@@ -76,17 +75,7 @@ class BulkTest(base.BaseObjectTest):
+ resp = self._upload_archive(filepath)
+ self.containers.append(container_name)
+
+- # When uploading an archived file with the bulk operation, the response
+- # does not contain 'content-length' header. This is the special case,
+- # therefore the existence of response headers is checked without
+- # custom matcher.
+- self.assertIn('transfer-encoding', resp.response)
+- self.assertIn('content-type', resp.response)
+- self.assertIn('x-trans-id', resp.response)
+- self.assertIn('date', resp.response)
+-
+- # Check only the format of common headers with custom matcher
+- self.assertThat(resp.response, custom_matchers.AreAllWellFormatted())
++ self.assertHeaders(resp.response, 'Account', 'PUT')
+
+ param = {'format': 'json'}
+ resp, body = self.account_client.list_account_containers(param)
+@@ -113,17 +102,7 @@ class BulkTest(base.BaseObjectTest):
+ data = '%s/%s\n%s' % (container_name, object_name, container_name)
+ resp = self.bulk_client.delete_bulk_data(data=data)
+
+- # When deleting multiple files using the bulk operation, the response
+- # does not contain 'content-length' header. This is the special case,
+- # therefore the existence of response headers is checked without
+- # custom matcher.
+- self.assertIn('transfer-encoding', resp.response)
+- self.assertIn('content-type', resp.response)
+- self.assertIn('x-trans-id', resp.response)
+- self.assertIn('date', resp.response)
+-
+- # Check only the format of common headers with custom matcher
+- self.assertThat(resp.response, custom_matchers.AreAllWellFormatted())
++ self.assertHeaders(resp.response, 'Account', 'DELETE')
+
+ # Check if uploaded contents are completely deleted
+ self._check_contents_deleted(container_name)
+@@ -139,17 +118,7 @@ class BulkTest(base.BaseObjectTest):
+
+ resp = self.bulk_client.delete_bulk_data_with_post(data=data)
+
+- # When deleting multiple files using the bulk operation, the response
+- # does not contain 'content-length' header. This is the special case,
+- # therefore the existence of response headers is checked without
+- # custom matcher.
+- self.assertIn('transfer-encoding', resp.response)
+- self.assertIn('content-type', resp.response)
+- self.assertIn('x-trans-id', resp.response)
+- self.assertIn('date', resp.response)
+-
+- # Check only the format of common headers with custom matcher
+- self.assertThat(resp.response, custom_matchers.AreAllWellFormatted())
++ self.assertHeaders(resp.response, 'Account', 'POST')
+
+ # Check if uploaded contents are completely deleted
+ self._check_contents_deleted(container_name)
+diff --git a/tempest/api/object_storage/test_object_slo.py b/tempest/api/object_storage/test_object_slo.py
+index c66776e4e..8bb2e6e4b 100644
+--- a/tempest/api/object_storage/test_object_slo.py
++++ b/tempest/api/object_storage/test_object_slo.py
+@@ -17,7 +17,6 @@ import hashlib
+ from oslo_serialization import jsonutils as json
+
+ from tempest.api.object_storage import base
+-from tempest.common import custom_matchers
+ from tempest.common import utils
+ from tempest.lib.common.utils import data_utils
+ from tempest.lib.common.utils import test_utils
+@@ -160,17 +159,7 @@ class ObjectSloTest(base.BaseObjectTest):
+ object_name,
+ params=params_del)
+
+- # When deleting SLO using multipart manifest, the response contains
+- # not 'content-length' but 'transfer-encoding' header. This is the
+- # special case, therefore the existence of response headers is checked
+- # outside of custom matcher.
+- self.assertIn('transfer-encoding', resp)
+- self.assertIn('content-type', resp)
+- self.assertIn('x-trans-id', resp)
+- self.assertIn('date', resp)
+-
+- # Check only the format of common headers with custom matcher
+- self.assertThat(resp, custom_matchers.AreAllWellFormatted())
++ self.assertHeaders(resp, 'Object', 'DELETE')
+
+ resp, body = self.container_client.list_container_objects(
+ self.container_name)
+diff --git a/tempest/common/custom_matchers.py b/tempest/common/custom_matchers.py
+index c702d8896..f1adeab64 100644
+--- a/tempest/common/custom_matchers.py
++++ b/tempest/common/custom_matchers.py
+@@ -62,8 +62,9 @@ class ExistsAllResponseHeaders(object):
+ # [1] https://bugs.launchpad.net/swift/+bug/1537811
+ # [2] http://tracker.ceph.com/issues/13582
+ if ('content-length' not in actual and
++ 'transfer-encoding' not in actual and
+ self._content_length_required(actual)):
+- return NonExistentHeader('content-length')
++ return NonExistentHeaders(['content-length', 'transfer-encoding'])
+ if 'content-type' not in actual:
+ return NonExistentHeader('content-type')
+ if 'x-trans-id' not in actual:
+@@ -192,6 +193,19 @@ class NonExistentHeader(object):
+ return {}
+
+
++class NonExistentHeaders(object):
++ """Informs an error message in the case of missing certain headers"""
++
++ def __init__(self, headers):
++ self.headers = headers
++
++ def describe(self):
++ return "none of these headers exist: %s" % self.headers
++
++ def get_details(self):
++ return {}
++
++
+ class InvalidHeaderValue(object):
+ """Informs an error message when a header contains a bad value"""
+
+--
+2.27.0
+
diff --git a/docker/vnf/Dockerfile b/docker/vnf/Dockerfile
index 0fb83ff6e..b28486814 100644
--- a/docker/vnf/Dockerfile
+++ b/docker/vnf/Dockerfile
@@ -9,26 +9,23 @@ ARG CLOUDIFY_VIMS_TAG=gambia
ARG HEAT_VIMS_TAG=release-130
ARG VROUTER_TAG=fraser
ARG VROUTER_BP_TAG=9b76d46a388d32d4985797620e67c2ed3315b3e4
-ARG JUJU_TAG=tags/juju-2.3.9
-ARG JUJU_WAIT_TAG=2.6.4
ARG ABOT_CHARM=opnfv-fraser
-ARG GODEPS_TAG=404a7e748cd352bb0d7449dedc645546eebbfc6e
-
-ENV GOPATH /src/epc-requirements/go
-ENV GOBIN /src/epc-requirements/go/bin
-ENV PATH $GOBIN:$PATH
COPY clearwater-heat-singlenet-deps.patch /tmp/clearwater-heat-singlenet-deps.patch
RUN apk --no-cache add --update \
ruby ruby-bundler ruby-irb ruby-rdoc \
- procps libxslt libxml2 zlib libffi python3 go musl-dev && \
+ procps libxslt libxml2 zlib libffi python3 musl-dev && \
apk --no-cache add --virtual .build-deps --update \
- ruby-dev g++ make libxslt-dev libxml2-dev zlib-dev libffi-dev g++ make && \
+ ruby-dev g++ make libxslt-dev libxml2-dev zlib-dev libffi-dev && \
wget -q -O- https://opendev.org/openstack/requirements/raw/branch/$OPENSTACK_TAG/upper-constraints.txt > upper-constraints.txt && \
- sed -i -E s/^tempest==+.*$/-e\ git+https:\\/\\/opendev.org\\/openstack\\/tempest@$TEMPEST_TAG#egg=tempest/ upper-constraints.txt && \
+ sed -i -E /#egg=tempest/d upper-constraints.txt && \
+ sed -i -E /^ujson==+.*$/d upper-constraints.txt && \
+ sed -i -E /^kubernetes==+.*$/d upper-constraints.txt && \
case $(uname -m) in aarch*|arm*) sed -i -E /^PyNaCl=/d upper-constraints.txt ;; esac && \
wget -q -O- https://git.opnfv.org/functest/plain/upper-constraints.txt?h=$BRANCH > upper-constraints.opnfv.txt && \
sed -i -E /#egg=functest/d upper-constraints.opnfv.txt && \
+ sed -i -E /#egg=rally/d upper-constraints.opnfv.txt && \
+ sed -i -E /#egg=xrally-kubernetes/d upper-constraints.opnfv.txt && \
git clone --depth 1 -b $VIMS_TEST_TAG https://github.com/Metaswitch/clearwater-live-test /src/vims-test && \
sed -i s/unf_ext\ \(.*\)/unf_ext\ \(0.0.7.4\)/g /src/vims-test/Gemfile.lock && \
git init /src/vims-test/quaff && \
@@ -57,22 +54,10 @@ RUN apk --no-cache add --update \
(cd /home/opnfv/functest/data/router/opnfv-vnf-data && \
git fetch --tags https://github.com/oolorg/opnfv-vnf-data.git $VROUTER_TAG && \
git checkout FETCH_HEAD) && \
- case "$(uname -m)" in \
- "armv7l" | "aarch64") ;; \
- *) \
- git init /src/epc-requirements/abot_charm && \
- (cd /src/epc-requirements/abot_charm && \
- git fetch --tags https://github.com/RebacaInc/abot_charm.git $ABOT_CHARM && \
- git checkout FETCH_HEAD) && \
- python3 -m pip install --no-cache-dir --src /src -cupper-constraints.txt -cupper-constraints.opnfv.txt \
- juju-wait==$JUJU_WAIT_TAG && \
- go get -d github.com/rogpeppe/godeps && \
- (cd $GOPATH/src/github.com/rogpeppe/godeps && git checkout $GODEPS_TAG && go install -v github.com/rogpeppe/godeps) && \
- go get -d -v github.com/juju/juju/... || true && \
- (cd $GOPATH/src/github.com/juju/juju && git checkout $JUJU_TAG && godeps -u dependencies.tsv) && \
- go install -v github.com/juju/juju/... && \
- rm -r $GOPATH/src/ $GOPATH/pkg /src/epc-requirements/abot_charm/.git /root/.cache/go-build;; \
- esac && \
+ git init /src/epc-requirements/abot_charm && \
+ (cd /src/epc-requirements/abot_charm && \
+ git fetch --tags https://github.com/collivier/abot_charm.git $ABOT_CHARM && \
+ git checkout FETCH_HEAD) && \
(cd /src/vims-test && bundle config build.nokogiri --use-system-libraries && bundle install --system && bundle update rest-client) && \
rm -r upper-constraints.txt upper-constraints.opnfv.txt /src/vims-test/.git /src/cloudify_vims/.git /src/heat_vims/.git /src/vims-test/quaff/.git \
/src/vims-test/build-infra/.git /src/opnfv-vnf-vyos-blueprint/.git \
diff --git a/docker/vnf/clearwater-heat-singlenet-deps.patch b/docker/vnf/clearwater-heat-singlenet-deps.patch
index a1bc3db7b..0e075f9f8 100644
--- a/docker/vnf/clearwater-heat-singlenet-deps.patch
+++ b/docker/vnf/clearwater-heat-singlenet-deps.patch
@@ -1,5 +1,5 @@
diff --git a/bono.yaml b/bono.yaml
-index f0189cd..7566338 100644
+index f0189cd..cc03838 100644
--- a/bono.yaml
+++ b/bono.yaml
@@ -23,26 +23,6 @@ parameters:
@@ -101,7 +101,7 @@ index f0189cd..7566338 100644
template: |
#!/bin/bash
-@@ -159,33 +124,6 @@ resources:
+@@ -159,36 +124,8 @@ resources:
exec > >(tee -a /var/log/clearwater-heat-bono.log) 2>&1
set -x
@@ -133,9 +133,13 @@ index f0189cd..7566338 100644
- /etc/init.d/signaling_namespace
-
# Configure the APT software source.
- echo 'deb __repo_url__ binary/' > /etc/apt/sources.list.d/clearwater.list
- curl -L http://repo.cw-ngv.com/repo_key | apt-key add -
-@@ -196,11 +134,8 @@ resources:
+- echo 'deb __repo_url__ binary/' > /etc/apt/sources.list.d/clearwater.list
+- curl -L http://repo.cw-ngv.com/repo_key | apt-key add -
++ echo 'deb http://artifacts.opnfv.org/functest/clearwater/debian ./' > /etc/apt/sources.list.d/clearwater.list
+ apt-get update
+
+ # Configure /etc/clearwater/local_config.
+@@ -196,11 +133,8 @@ resources:
etcd_ip=__etcd_ip__
[ -n "$etcd_ip" ] || etcd_ip=__private_mgmt_ip__
cat > /etc/clearwater/local_config << EOF
@@ -149,7 +153,7 @@ index f0189cd..7566338 100644
public_hostname=__index__.bono.__zone__
etcd_cluster=$etcd_ip
EOF
-@@ -223,8 +158,8 @@ resources:
+@@ -223,8 +157,8 @@ resources:
while ! { nsupdate -y "__zone__:__dnssec_key__" -v << EOF
server __dns_mgmt_ip__
update add bono-__index__.__zone__. 30 $(ip2rr __public_mgmt_ip__)
@@ -160,7 +164,7 @@ index f0189cd..7566338 100644
update add __zone__. 30 NAPTR 0 0 "s" "SIP+D2T" "" _sip._tcp.__zone__.
update add __zone__. 30 NAPTR 0 0 "s" "SIP+D2U" "" _sip._udp.__zone__.
update add _sip._tcp.__zone__. 30 SRV 0 0 5060 __index__.bono.__zone__.
-@@ -241,10 +176,19 @@ resources:
+@@ -241,10 +175,19 @@ resources:
# Use the DNS server.
echo 'nameserver __dns_mgmt_ip__' > /etc/dnsmasq.resolv.conf
echo 'RESOLV_CONF=/etc/dnsmasq.resolv.conf' >> /etc/default/dnsmasq
@@ -182,7 +186,7 @@ index f0189cd..7566338 100644
outputs:
public_mgmt_ip:
description: IP address in public management network
-@@ -252,9 +196,3 @@ outputs:
+@@ -252,9 +195,3 @@ outputs:
private_mgmt_ip:
description: IP address in private management network
value: { get_attr: [ mgmt_port, fixed_ips, 0, ip_address ] }
@@ -428,7 +432,7 @@ index a155c60..dd90cb7 100644
etcd_ip: { get_attr: [ ellis, private_mgmt_ip ] }
index: __index__
diff --git a/dime.yaml b/dime.yaml
-index 642f19d..6ae2676 100644
+index 642f19d..d2b8b92 100644
--- a/dime.yaml
+++ b/dime.yaml
@@ -23,26 +23,6 @@ parameters:
@@ -535,7 +539,7 @@ index 642f19d..6ae2676 100644
template: |
#!/bin/bash
-@@ -156,33 +124,6 @@ resources:
+@@ -156,36 +124,8 @@ resources:
exec > >(tee -a /var/log/clearwater-heat-dime.log) 2>&1
set -x
@@ -567,9 +571,13 @@ index 642f19d..6ae2676 100644
- /etc/init.d/signaling_namespace
-
# Configure the APT software source.
- echo 'deb __repo_url__ binary/' > /etc/apt/sources.list.d/clearwater.list
- curl -L http://repo.cw-ngv.com/repo_key | apt-key add -
-@@ -193,11 +134,8 @@ resources:
+- echo 'deb __repo_url__ binary/' > /etc/apt/sources.list.d/clearwater.list
+- curl -L http://repo.cw-ngv.com/repo_key | apt-key add -
++ echo 'deb http://artifacts.opnfv.org/functest/clearwater/debian ./' > /etc/apt/sources.list.d/clearwater.list
+ apt-get update
+
+ # Configure /etc/clearwater/local_config.
+@@ -193,11 +133,8 @@ resources:
etcd_ip=__etcd_ip__
[ -n "$etcd_ip" ] || etcd_ip=__private_mgmt_ip__
cat > /etc/clearwater/local_config << EOF
@@ -583,7 +591,7 @@ index 642f19d..6ae2676 100644
public_hostname=dime-__index__.__zone__
etcd_cluster=$etcd_ip
EOF
-@@ -220,9 +158,9 @@ resources:
+@@ -220,9 +157,9 @@ resources:
while ! { nsupdate -y "__zone__:__dnssec_key__" -v << EOF
server __dns_mgmt_ip__
update add dime-__index__.__zone__. 30 $(ip2rr __public_mgmt_ip__)
@@ -596,7 +604,7 @@ index 642f19d..6ae2676 100644
send
EOF
} && [ $retries -lt 10 ]
-@@ -235,17 +173,22 @@ resources:
+@@ -235,17 +172,22 @@ resources:
# Use the DNS server.
echo 'nameserver __dns_mgmt_ip__' > /etc/dnsmasq.resolv.conf
echo 'RESOLV_CONF=/etc/dnsmasq.resolv.conf' >> /etc/default/dnsmasq
@@ -769,7 +777,7 @@ index 825ede1..f98ffa3 100644
description: DNS zone
value: { get_param: zone }
diff --git a/ellis.yaml b/ellis.yaml
-index 963352d..ff59895 100644
+index 963352d..2bd0730 100644
--- a/ellis.yaml
+++ b/ellis.yaml
@@ -44,9 +44,6 @@ parameters:
@@ -815,7 +823,17 @@ index 963352d..ff59895 100644
template: |
#!/bin/bash
-@@ -176,7 +183,7 @@ resources:
+@@ -134,8 +141,7 @@ resources:
+ set -x
+
+ # Configure the APT software source.
+- echo 'deb __repo_url__ binary/' > /etc/apt/sources.list.d/clearwater.list
+- curl -L http://repo.cw-ngv.com/repo_key | apt-key add -
++ echo 'deb http://artifacts.opnfv.org/functest/clearwater/debian ./' > /etc/apt/sources.list.d/clearwater.list
+ apt-get update
+
+ # Configure /etc/clearwater/local_config. Add xdms_hostname here to use Homer's management
+@@ -176,7 +182,7 @@ resources:
chronos_hostname=vellum.__zone__
ralf_session_store=vellum.__zone__
@@ -824,7 +842,7 @@ index 963352d..ff59895 100644
# Email server configuration
smtp_smarthost=localhost
-@@ -189,6 +196,8 @@ resources:
+@@ -189,6 +195,8 @@ resources:
turn_workaround=secret
ellis_api_key=secret
ellis_cookie_key=secret
@@ -833,7 +851,7 @@ index 963352d..ff59895 100644
EOF
/usr/share/clearwater/clearwater-config-manager/scripts/cw-config upload shared_config --autoconfirm --dir /tmp
-@@ -197,7 +206,6 @@ resources:
+@@ -197,7 +205,6 @@ resources:
# local_settings.py runs to pick up the configuration changes.
service clearwater-infrastructure restart
service ellis stop
@@ -841,7 +859,7 @@ index 963352d..ff59895 100644
# Function to give DNS record type and IP address for specified IP address
ip2rr() {
-@@ -228,6 +236,18 @@ resources:
+@@ -228,6 +235,18 @@ resources:
echo 'RESOLV_CONF=/etc/dnsmasq.resolv.conf' >> /etc/default/dnsmasq
service dnsmasq force-reload
@@ -861,7 +879,7 @@ index 963352d..ff59895 100644
public_mgmt_ip:
description: IP address in public management network
diff --git a/homer.yaml b/homer.yaml
-index 4337984..9a93cfb 100644
+index 4337984..d23adb5 100644
--- a/homer.yaml
+++ b/homer.yaml
@@ -23,26 +23,6 @@ parameters:
@@ -974,7 +992,7 @@ index 4337984..9a93cfb 100644
template: |
#!/bin/bash
-@@ -163,33 +124,6 @@ resources:
+@@ -163,36 +124,8 @@ resources:
exec > >(tee -a /var/log/clearwater-heat-homer.log) 2>&1
set -x
@@ -1006,9 +1024,13 @@ index 4337984..9a93cfb 100644
- /etc/init.d/signaling_namespace
-
# Configure the APT software source.
- echo 'deb __repo_url__ binary/' > /etc/apt/sources.list.d/clearwater.list
- curl -L http://repo.cw-ngv.com/repo_key | apt-key add -
-@@ -200,11 +134,8 @@ resources:
+- echo 'deb __repo_url__ binary/' > /etc/apt/sources.list.d/clearwater.list
+- curl -L http://repo.cw-ngv.com/repo_key | apt-key add -
++ echo 'deb http://artifacts.opnfv.org/functest/clearwater/debian ./' > /etc/apt/sources.list.d/clearwater.list
+ apt-get update
+
+ # Configure /etc/clearwater/local_config.
+@@ -200,11 +133,8 @@ resources:
etcd_ip=__etcd_ip__
[ -n "$etcd_ip" ] || etcd_ip=__private_mgmt_ip__
cat > /etc/clearwater/local_config << EOF
@@ -1022,7 +1044,7 @@ index 4337984..9a93cfb 100644
public_hostname=homer-__index__.__zone__
etcd_cluster=$etcd_ip
EOF
-@@ -227,7 +158,7 @@ resources:
+@@ -227,7 +157,7 @@ resources:
while ! { nsupdate -y "__zone__:__dnssec_key__" -v << EOF
server __dns_mgmt_ip__
update add homer-__index__.__zone__. 30 $(ip2rr __public_mgmt_ip__)
@@ -1031,11 +1053,12 @@ index 4337984..9a93cfb 100644
send
EOF
} && [ $retries -lt 10 ]
-@@ -238,13 +169,21 @@ resources:
+@@ -237,14 +167,22 @@ resources:
+ sleep 5
done
- # Use the DNS server.
- # Use the DNS server.
+ # Use the DNS server.
echo 'nameserver __dns_mgmt_ip__' > /etc/dnsmasq.resolv.conf
echo 'RESOLV_CONF=/etc/dnsmasq.resolv.conf' >> /etc/default/dnsmasq
- mkdir -p /etc/netns/signaling
@@ -1056,7 +1079,7 @@ index 4337984..9a93cfb 100644
outputs:
public_mgmt_ip:
description: IP address in public management network
-@@ -252,9 +191,3 @@ outputs:
+@@ -252,9 +190,3 @@ outputs:
private_mgmt_ip:
description: IP address in private management network
value: { get_attr: [ mgmt_port, fixed_ips, 0, ip_address ] }
@@ -1416,7 +1439,7 @@ index 5921d32..c73fe2b 100644
- value: { get_resource: vellum_sig_inbound }
+ value: { get_resource: base_mgmt }
diff --git a/sprout.yaml b/sprout.yaml
-index 9c533b7..2649e11 100644
+index 9c533b7..b51750b 100644
--- a/sprout.yaml
+++ b/sprout.yaml
@@ -23,26 +23,6 @@ parameters:
@@ -1516,7 +1539,7 @@ index 9c533b7..2649e11 100644
template: |
#!/bin/bash
-@@ -156,33 +124,6 @@ resources:
+@@ -156,36 +124,8 @@ resources:
exec > >(tee -a /var/log/clearwater-heat-sprout.log) 2>&1
set -x
@@ -1548,9 +1571,13 @@ index 9c533b7..2649e11 100644
- /etc/init.d/signaling_namespace
-
# Configure the APT software source.
- echo 'deb __repo_url__ binary/' > /etc/apt/sources.list.d/clearwater.list
- curl -L http://repo.cw-ngv.com/repo_key | apt-key add -
-@@ -193,11 +134,8 @@ resources:
+- echo 'deb __repo_url__ binary/' > /etc/apt/sources.list.d/clearwater.list
+- curl -L http://repo.cw-ngv.com/repo_key | apt-key add -
++ echo 'deb http://artifacts.opnfv.org/functest/clearwater/debian ./' > /etc/apt/sources.list.d/clearwater.list
+ apt-get update
+
+ # Configure /etc/clearwater/local_config.
+@@ -193,11 +133,8 @@ resources:
etcd_ip=__etcd_ip__
[ -n "$etcd_ip" ] || etcd_ip=__private_mgmt_ip__
cat > /etc/clearwater/local_config << EOF
@@ -1564,7 +1591,7 @@ index 9c533b7..2649e11 100644
public_hostname=__index__.sprout.__zone__
etcd_cluster=$etcd_ip
EOF
-@@ -220,10 +158,10 @@ resources:
+@@ -220,10 +157,10 @@ resources:
while ! { nsupdate -y "__zone__:__dnssec_key__" -v << EOF
server __dns_mgmt_ip__
update add sprout-__index__.__zone__. 30 $(ip2rr __public_mgmt_ip__)
@@ -1579,7 +1606,7 @@ index 9c533b7..2649e11 100644
update add sprout.__zone__. 30 NAPTR 0 0 "s" "SIP+D2T" "" _sip._tcp.sprout.__zone__.
update add _sip._tcp.sprout.__zone__. 30 SRV 0 0 5054 __index__.sprout.__zone__.
update add icscf.sprout.__zone__. 30 NAPTR 0 0 "s" "SIP+D2T" "" _sip._tcp.icscf.sprout.__zone__.
-@@ -242,17 +180,23 @@ resources:
+@@ -242,17 +179,23 @@ resources:
# Use the DNS server.
echo 'nameserver __dns_mgmt_ip__' > /etc/dnsmasq.resolv.conf
echo 'RESOLV_CONF=/etc/dnsmasq.resolv.conf' >> /etc/default/dnsmasq
@@ -1610,7 +1637,7 @@ index 9c533b7..2649e11 100644
- description: IP address in private signaling network
- value: { get_attr: [ sig_port, fixed_ips, 0, ip_address ] }
diff --git a/vellum.yaml b/vellum.yaml
-index aab71f9..375b3d8 100644
+index aab71f9..de15fcf 100644
--- a/vellum.yaml
+++ b/vellum.yaml
@@ -23,26 +23,6 @@ parameters:
@@ -1694,7 +1721,7 @@ index aab71f9..375b3d8 100644
user_data_format: RAW
user_data:
str_replace:
-@@ -141,47 +112,19 @@ resources:
+@@ -141,51 +112,22 @@ resources:
__zone__: { get_param: zone }
__public_mgmt_ip__: { get_attr: [ mgmt_floating_ip, floating_ip_address ] }
__private_mgmt_ip__: { get_attr: [ mgmt_port, fixed_ips, 0, ip_address ] }
@@ -1744,8 +1771,13 @@ index aab71f9..375b3d8 100644
+ sysctl -w net.ipv6.conf.lo.disable_ipv6=0
# Configure the APT software source.
- echo 'deb __repo_url__ binary/' > /etc/apt/sources.list.d/clearwater.list
-@@ -193,11 +136,8 @@ resources:
+- echo 'deb __repo_url__ binary/' > /etc/apt/sources.list.d/clearwater.list
+- curl -L http://repo.cw-ngv.com/repo_key | apt-key add -
++ echo 'deb http://artifacts.opnfv.org/functest/clearwater/debian ./' > /etc/apt/sources.list.d/clearwater.list
+ apt-get update
+
+ # Configure /etc/clearwater/local_config.
+@@ -193,11 +135,8 @@ resources:
etcd_ip=__etcd_ip__
[ -n "$etcd_ip" ] || etcd_ip=__private_mgmt_ip__
cat > /etc/clearwater/local_config << EOF
@@ -1759,7 +1791,7 @@ index aab71f9..375b3d8 100644
public_hostname=__index__.vellum.__zone__
etcd_cluster=$etcd_ip
EOF
-@@ -206,7 +146,7 @@ resources:
+@@ -206,7 +145,7 @@ resources:
mkdir -p /etc/chronos
cat > /etc/chronos/chronos.conf << EOF
[http]
@@ -1768,7 +1800,7 @@ index aab71f9..375b3d8 100644
bind-port = 7253
threads = 50
-@@ -218,7 +158,7 @@ resources:
+@@ -218,7 +157,7 @@ resources:
enabled = true
[dns]
@@ -1777,7 +1809,7 @@ index aab71f9..375b3d8 100644
EOF
# Now install the software.
-@@ -239,7 +179,7 @@ resources:
+@@ -239,7 +178,7 @@ resources:
while ! { nsupdate -y "__zone__:__dnssec_key__" -v << EOF
server __dns_mgmt_ip__
update add vellum-__index__.__zone__. 30 $(ip2rr __public_mgmt_ip__)
@@ -1786,7 +1818,7 @@ index aab71f9..375b3d8 100644
send
EOF
} && [ $retries -lt 10 ]
-@@ -252,10 +192,19 @@ resources:
+@@ -252,10 +191,19 @@ resources:
# Use the DNS server.
echo 'nameserver __dns_mgmt_ip__' > /etc/dnsmasq.resolv.conf
echo 'RESOLV_CONF=/etc/dnsmasq.resolv.conf' >> /etc/default/dnsmasq
@@ -1808,7 +1840,7 @@ index aab71f9..375b3d8 100644
outputs:
public_mgmt_ip:
description: IP address in public management network
-@@ -263,6 +212,3 @@ outputs:
+@@ -263,6 +211,3 @@ outputs:
private_mgmt_ip:
description: IP address in private management network
value: { get_attr: [ mgmt_port, fixed_ips, 0, ip_address ] }
diff --git a/docker/vnf/testcases.yaml b/docker/vnf/testcases.yaml
index 7d5548b38..6b483af6a 100644
--- a/docker/vnf/testcases.yaml
+++ b/docker/vnf/testcases.yaml
@@ -2,7 +2,6 @@
tiers:
-
name: vnf
- order: 4
description: >-
Collection of VNF test cases.
testcases:
@@ -24,6 +23,8 @@ tiers:
This test case deploys an OpenSource vIMS solution from
Clearwater using the Cloudify orchestrator. It also runs
some signaling traffic.
+ dependencies:
+ - NO_TENANT_NETWORK: '^(?![tT]rue$)'
run:
name: cloudify_ims
@@ -36,6 +37,8 @@ tiers:
This test case deploys an OpenSource vIMS solution from
Clearwater using the OpenStack Heat orchestrator.
It also runs some signaling traffic.
+ dependencies:
+ - NO_TENANT_NETWORK: '^(?![tT]rue$)'
run:
name: heat_ims
@@ -46,6 +49,8 @@ tiers:
blocking: false
description: >-
This test case is vRouter testing.
+ dependencies:
+ - NO_TENANT_NETWORK: '^(?![tT]rue$)'
run:
name: vyos_vrouter
@@ -57,5 +62,7 @@ tiers:
description: >-
vEPC validation with Juju as VNF manager and ABoT as test
executor.
+ dependencies:
+ - NO_TENANT_NETWORK: '^(?![tT]rue$)'
run:
name: juju_epc
diff --git a/docs/conf.py b/docs/conf.py
index 2be6a5ab7..7a8d1858d 100644
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -175,3 +175,5 @@ texinfo_documents = [
'One line description of project.',
'Miscellaneous'),
]
+
+spelling_word_list_filename = 'spelling_wordlist.txt'
diff --git a/docs/release/release-notes/functest-release.rst b/docs/release/release-notes/functest-release.rst
index 1623a69e4..d9dd52594 100644
--- a/docs/release/release-notes/functest-release.rst
+++ b/docs/release/release-notes/functest-release.rst
@@ -34,14 +34,17 @@ The internal test cases are:
* cinder_test
* odl
* tempest_smoke
- * neutron-tempest-plugin-api
+ * tempest_horizon
+ * tempest_neutron
+ * tempest_cinder
+ * tempest_keystone
+ * tempest_heat
* rally_sanity
* refstack_defcore
* patrole
- * neutron_trunk
* networking-bgpvpn
* networking-sfc
- * barbican
+ * tempest_barbican
* tempest_full
* tempest_scenario
* rally_full
@@ -64,8 +67,17 @@ Kubernetes
The internal test cases are:
+ * k8s_quick
* k8s_smoke
* k8s_conformance
+ * xrally_kubernetes
+ * kube_hunter
+ * kube_bench_master
+ * kube_bench_node
+ * xrally_kubernetes_full
+ * k8s_vims
+ * helm_vims
+ * cnf_conformance
The OPNFV projects integrated into Functest framework for automation are:
@@ -94,19 +106,23 @@ Software
* https://hub.docker.com/r/opnfv/functest-benchmarking
* https://hub.docker.com/r/opnfv/functest-features
* https://hub.docker.com/r/opnfv/functest-vnf
+ * https://hub.docker.com/r/opnfv/functest-smoke-cntt
+ * https://hub.docker.com/r/opnfv/functest-benchmarking-cntt
Functest Docker images (Kubernetes):
* https://hub.docker.com/r/opnfv/functest-kubernetes-healthcheck
* https://hub.docker.com/r/opnfv/functest-kubernetes-smoke
- * https://hub.docker.com/r/opnfv/functest-kubernetes-features
+ * https://hub.docker.com/r/opnfv/functest-kubernetes-security
+ * https://hub.docker.com/r/opnfv/functest-kubernetes-benchmarking
+ * https://hub.docker.com/r/opnfv/functest-kubernetes-cnf
Docker tag for hunter: hunter
Documents
---------
- * Functests Guides: https://functest.readthedocs.io/en/stable-hunter/
+ * Functest Guides: https://functest.readthedocs.io/en/stable-hunter/
* API Docs: https://functest-api.readthedocs.io/en/stable-hunter/
Version change
diff --git a/docs/release/release-notes/index.rst b/docs/release/release-notes/index.rst
index c19d55610..25c2cbdfb 100644
--- a/docs/release/release-notes/index.rst
+++ b/docs/release/release-notes/index.rst
@@ -10,5 +10,3 @@ Functest Release Notes
:maxdepth: 4
functest-release.rst
-
-Build date: |today|
diff --git a/docs/spelling_wordlist.txt b/docs/spelling_wordlist.txt
new file mode 100644
index 000000000..1fb7a6a8c
--- /dev/null
+++ b/docs/spelling_wordlist.txt
@@ -0,0 +1,164 @@
+ABot
+admin
+adminURL
+api
+auth
+backend
+barbican
+benchmarking
+Benchmarking
+bgpvpn
+cacerts
+chan
+ci
+cntt
+Clearwater
+clearwaterims
+cloudify
+Cloudify
+cnf
+cntt
+conf
+config
+dashboarding
+defcore
+Defcore
+Deliverables
+dhcp
+dockerhub
+enduser
+env
+EnvironmentVariable
+epc
+fds
+frontend
+functest
+Functest
+functionalities
+guestbook
+Guestbook
+healthcheck
+Healthcheck
+http
+https
+hunter
+Hunter
+ims
+ini
+init
+interop
+Interop
+interoperability
+intra
+iptables
+Istio
+jenkins
+Joid
+juju
+Juju
+jumphost
+Jumphost
+kube
+kubernetes
+Kubernetes
+kvm
+linux
+login
+metadata
+Metadata
+middleware
+multisite
+netready
+nfv
+nofeature
+noha
+nosdn
+octavia
+odl
+onboarding
+ons
+Opendaylight
+openims
+openstack
+Openstack
+opnfv
+orchestrator
+Orchestrator
+organised
+os
+ovs
+patrole
+Patrole
+plugin
+pre
+proxified
+Reachability
+reconnection
+redis
+refactored
+refactoring
+refstack
+Refstack
+repo
+resolv
+restapi
+Restconf
+runnable
+screenshot
+sdn
+sfc
+signalling
+singlevm
+src
+stor
+su
+subfolder
+subnet
+Subnet
+subnets
+sudo
+systemctl
+Telco
+tenantnetwork
+testcase
+testcases
+TLS
+toolchain
+txt
+un
+url
+userdata
+userid
+username
+usr
+util
+utils
+UUID
+vdb
+vepc
+vEPC
+verifier
+versioned
+vgpu
+vIMS
+vims
+Virtualised
+Virtualized
+vm
+VM
+vmready
+vmtp
+vnf
+Vnf
+vping
+vPing
+vRNC
+vrouter
+vRouter
+vyos
+wconsole
+wiki
+Workflow
+xrally
+xtesting
+yaml
diff --git a/docs/testing/developer/devguide/index.rst b/docs/testing/developer/devguide/index.rst
index 4091a39f9..dc6d32145 100644
--- a/docs/testing/developer/devguide/index.rst
+++ b/docs/testing/developer/devguide/index.rst
@@ -263,7 +263,7 @@ _`[3]`: https://github.com/opnfv/releng-testresults/tree/master/reporting
_`[4]`: https://wiki.opnfv.org/display/functest/2017+Beijing?preview=%2F11699623%2F11700523%2FTestAPI+-+test+results+collection+service.pptx
-_`[5]`: https://opnfv.biterg.io/login?nextUrl=%2Fgoto%2F283dba93ca18e95964f852c63af1d1ba
+_`[5]`: https://lfanalytics.io/projects/lfn%2Fopnfv/dashboard
_`[6]`: https://wiki.opnfv.org/pages/viewpage.action?pageId=7768932
diff --git a/docs/testing/user/configguide/ci.rst b/docs/testing/user/configguide/ci.rst
deleted file mode 100644
index aee51a835..000000000
--- a/docs/testing/user/configguide/ci.rst
+++ /dev/null
@@ -1,16 +0,0 @@
-.. SPDX-License-Identifier: CC-BY-4.0
-
-Integration in CI
-=================
-In CI we use the Docker images and execute the appropriate commands within the
-container from Jenkins.
-
-4 steps have been defined::
- * functest-cleanup: clean existing functest dockers on the jumphost
- * functest-daily: run dockers opnfv/functest-* (healthcheck, smoke, features,
- vnf)
- * functest-store-results: push logs to artifacts
-
-See `[1]`_ for details.
-
-.. _`[1]`: https://github.com/opnfv/releng/blob/master/jjb/functest/functest-daily-jobs.yaml
diff --git a/docs/testing/user/configguide/configguide.rst b/docs/testing/user/configguide/configguide.rst
index 13a8b591a..e65f358f7 100644
--- a/docs/testing/user/configguide/configguide.rst
+++ b/docs/testing/user/configguide/configguide.rst
@@ -13,10 +13,11 @@ Functest Dockers for OpenStack deployment
-----------------------------------------
Docker images are available on the dockerhub:
- * opnfv/functest-core
* opnfv/functest-healthcheck
* opnfv/functest-smoke
+ * opnfv/functest-smoke-cntt
* opnfv/functest-benchmarking
+ * opnfv/functest-benchmarking-cntt
* opnfv/functest-features
* opnfv/functest-vnf
@@ -28,8 +29,12 @@ cat env::
DEPLOY_SCENARIO=XXX # if not os-nosdn-nofeature-noha scenario
NAMESERVER=XXX # if not 8.8.8.8
- EXTERNAL_NETWORK=XXX # if not first network with router:external=True
- NEW_USER_ROLE=XXX # if not member
+ EXTERNAL_NETWORK=XXX # if not first network with router:external=True
+ DASHBOARD_URL=XXX # else tempest_horizon will be skipped
+ NEW_USER_ROLE=XXX # if not member
+ SDN_CONTROLLER_IP=XXX # if odl scenario
+ VOLUME_DEVICE_NAME=XXX # if not vdb
+ FLAVOR_EXTRA_SPECS=hw:mem_page_size:large # if fdio scenarios
See section on environment variables for details.
@@ -42,6 +47,7 @@ cat env_file::
export OS_PROJECT_NAME=XXX
export OS_PASSWORD=XXX
export OS_IDENTITY_API_VERSION=3
+ export OS_REGION_NAME=XXX
See section on OpenStack credentials for details.
@@ -52,6 +58,7 @@ Create a directory for the different images (attached as a Docker volume)::
images/cirros-0.4.0-aarch64-disk.img
images/cirros-0.4.0-x86_64-disk.img
images/cloudify-docker-manager-community-19.01.24.tar
+ images/Fedora-Cloud-Base-30-1.2.x86_64.qcow2
images/shaker-image-arm64.qcow2
images/shaker-image.qcow2
images/ubuntu-14.04-server-cloudimg-amd64-disk1.img
@@ -74,17 +81,18 @@ Results shall be displayed as follows::
+--------------------------+------------------+---------------------+------------------+----------------+
| TEST CASE | PROJECT | TIER | DURATION | RESULT |
+--------------------------+------------------+---------------------+------------------+----------------+
- | connection_check | functest | healthcheck | 00:01 | PASS |
+ | connection_check | functest | healthcheck | 00:03 | PASS |
| tenantnetwork1 | functest | healthcheck | 00:08 | PASS |
- | tenantnetwork2 | functest | healthcheck | 00:09 | PASS |
- | vmready1 | functest | healthcheck | 00:13 | PASS |
- | vmready2 | functest | healthcheck | 00:12 | PASS |
- | singlevm1 | functest | healthcheck | 00:48 | PASS |
- | singlevm2 | functest | healthcheck | 00:35 | PASS |
- | vping_ssh | functest | healthcheck | 00:53 | PASS |
- | vping_userdata | functest | healthcheck | 00:51 | PASS |
- | cinder_test | functest | healthcheck | 01:42 | PASS |
- | tempest_smoke | functest | healthcheck | 04:55 | PASS |
+ | tenantnetwork2 | functest | healthcheck | 00:16 | PASS |
+ | vmready1 | functest | healthcheck | 00:09 | PASS |
+ | vmready2 | functest | healthcheck | 00:10 | PASS |
+ | singlevm1 | functest | healthcheck | 00:51 | PASS |
+ | singlevm2 | functest | healthcheck | 00:41 | PASS |
+ | vping_ssh | functest | healthcheck | 00:56 | PASS |
+ | vping_userdata | functest | healthcheck | 00:42 | PASS |
+ | cinder_test | functest | healthcheck | 02:19 | PASS |
+ | tempest_smoke | functest | healthcheck | 07:02 | PASS |
+ | tempest_horizon | functest | healthcheck | 00:52 | PASS |
| odl | functest | healthcheck | 00:00 | SKIP |
+--------------------------+------------------+---------------------+------------------+----------------+
@@ -102,24 +110,52 @@ Run smoke suite::
Results shall be displayed as follows::
- +------------------------------------+------------------+---------------+------------------+----------------+
- | TEST CASE | PROJECT | TIER | DURATION | RESULT |
- +------------------------------------+------------------+---------------+------------------+----------------+
- | neutron-tempest-plugin-api | functest | smoke | 07:29 | PASS |
- | rally_sanity | functest | smoke | 13:07 | PASS |
- | refstack_defcore | functest | smoke | 06:32 | PASS |
- | tempest_full | functest | smoke | 33:01 | PASS |
- | tempest_scenario | functest | smoke | 11:31 | PASS |
- | patrole | functest | smoke | 02:14 | PASS |
- | neutron_trunk | functest | smoke | 00:00 | SKIP |
- | networking-bgpvpn | functest | smoke | 01:09 | PASS |
- | networking-sfc | functest | smoke | 00:00 | SKIP |
- | barbican | functest | smoke | 02:12 | PASS |
- +------------------------------------+------------------+---------------+------------------+----------------+
+ +---------------------------+------------------+---------------+------------------+----------------+
+ | TEST CASE | PROJECT | TIER | DURATION | RESULT |
+ +---------------------------+------------------+---------------+------------------+----------------+
+ | tempest_neutron | functest | smoke | 16:49 | PASS |
+ | tempest_cinder | functest | smoke | 01:39 | PASS |
+ | tempest_keystone | functest | smoke | 00:57 | PASS |
+ | tempest_heat | functest | smoke | 24:33 | PASS |
+ | rally_sanity | functest | smoke | 18:41 | PASS |
+ | refstack_defcore | functest | smoke | 10:38 | PASS |
+ | tempest_full | functest | smoke | 55:19 | PASS |
+ | tempest_scenario | functest | smoke | 11:06 | PASS |
+ | tempest_slow | functest | smoke | 61:39 | PASS |
+ | patrole | functest | smoke | 02:46 | PASS |
+ | networking-bgpvpn | functest | smoke | 00:00 | SKIP |
+ | networking-sfc | functest | smoke | 00:00 | SKIP |
+ | tempest_barbican | functest | smoke | 02:30 | PASS |
+ +---------------------------+------------------+---------------+------------------+----------------+
Note: if the scenario does not support some tests, they are indicated as SKIP.
See User guide for details.
+Testing smoke CNTT suite
+^^^^^^^^^^^^^^^^^^^^^^^^
+
+Run smoke-cntt suite::
+
+ sudo docker run --env-file env \
+ -v $(pwd)/openstack.creds:/home/opnfv/functest/conf/env_file \
+ -v $(pwd)/images:/home/opnfv/functest/images \
+ opnfv/functest-smoke-cntt:hunter
+
+Results shall be displayed as follows::
+
+ +-------------------------------+------------------+---------------+------------------+----------------+
+ | TEST CASE | PROJECT | TIER | DURATION | RESULT |
+ +-------------------------------+------------------+---------------+------------------+----------------+
+ | tempest_neutron_cntt | functest | smoke | 13:54 | PASS |
+ | tempest_cinder_cntt | functest | smoke | 01:46 | PASS |
+ | tempest_keystone_cntt | functest | smoke | 00:58 | PASS |
+ | tempest_heat_cntt | functest | smoke | 25:31 | PASS |
+ | rally_sanity_cntt | functest | smoke | 18:50 | PASS |
+ | tempest_full_cntt | functest | smoke | 44:32 | PASS |
+ | tempest_scenario_cntt | functest | smoke | 11:14 | PASS |
+ | tempest_slow_cntt | functest | smoke | 43:55 | PASS |
+ +-------------------------------+------------------+---------------+------------------+----------------+
+
Testing benchmarking suite
^^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -135,15 +171,36 @@ Results shall be displayed as follows::
+--------------------+------------------+----------------------+------------------+----------------+
| TEST CASE | PROJECT | TIER | DURATION | RESULT |
+--------------------+------------------+----------------------+------------------+----------------+
- | rally_full | functest | benchmarking | 121:55 | PASS |
- | rally_jobs | functest | benchmarking | 24:21 | PASS |
- | vmtp | functest | benchmarking | 00:00 | SKIP |
- | shaker | functest | benchmarking | 00:00 | SKIP |
+ | rally_full | functest | benchmarking | 108:34 | PASS |
+ | rally_jobs | functest | benchmarking | 22:07 | PASS |
+ | vmtp | functest | benchmarking | 15:38 | PASS |
+ | shaker | functest | benchmarking | 25:12 | PASS |
+--------------------+------------------+----------------------+------------------+----------------+
Note: if the scenario does not support some tests, they are indicated as SKIP.
See User guide for details.
+Testing benchmarking CNTT suite
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Run benchmarking-cntt suite::
+
+ sudo docker run --env-file env \
+ -v $(pwd)/openstack.creds:/home/opnfv/functest/conf/env_file \
+ -v $(pwd)/images:/home/opnfv/functest/images \
+ opnfv/functest-benchmarking:hunter
+
+Results shall be displayed as follows::
+
+ +-------------------------+------------------+----------------------+------------------+----------------+
+ | TEST CASE | PROJECT | TIER | DURATION | RESULT |
+ +-------------------------+------------------+----------------------+------------------+----------------+
+ | rally_full_cntt | functest | benchmarking | 106:60 | PASS |
+ | rally_jobs_cntt | functest | benchmarking | 21:16 | PASS |
+ | vmtp | functest | benchmarking | 16:15 | PASS |
+ | shaker | functest | benchmarking | 25:09 | PASS |
+ +-------------------------+------------------+----------------------+------------------+----------------+
+
Testing features suite
^^^^^^^^^^^^^^^^^^^^^^
@@ -184,21 +241,22 @@ Results shall be displayed as follows::
+----------------------+------------------+--------------+------------------+----------------+
| TEST CASE | PROJECT | TIER | DURATION | RESULT |
+----------------------+------------------+--------------+------------------+----------------+
- | cloudify | functest | vnf | 06:42 | PASS |
- | cloudify_ims | functest | vnf | 39:21 | PASS |
- | heat_ims | functest | vnf | 34:33 | PASS |
- | vyos_vrouter | functest | vnf | 20:20 | PASS |
- | juju_epc | functest | vnf | 56:55 | PASS |
+ | cloudify | functest | vnf | 04:35 | PASS |
+ | cloudify_ims | functest | vnf | 24:16 | PASS |
+ | heat_ims | functest | vnf | 30:36 | PASS |
+ | vyos_vrouter | functest | vnf | 15:37 | PASS |
+ | juju_epc | functest | vnf | 34:39 | PASS |
+----------------------+------------------+--------------+------------------+----------------+
Functest Dockers for Kubernetes deployment
------------------------------------------
Docker images are available on the dockerhub:
- * opnfv/functest-kubernetes-core
- * opnfv/functest-kubernetest-healthcheck
- * opnfv/functest-kubernetest-smoke
- * opnfv/functest-kubernetest-features
+ * opnfv/functest-kubernetes-healthcheck
+ * opnfv/functest-kubernetes-smoke
+ * opnfv/functest-kubernetes-security
+ * opnfv/functest-kubernetes-benchmarking
+ * opnfv/functest-kubernetes-cnf
Preparing your environment
^^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -224,7 +282,8 @@ Results shall be displayed as follows::
+-------------------+------------------+---------------------+------------------+----------------+
| TEST CASE | PROJECT | TIER | DURATION | RESULT |
+-------------------+------------------+---------------------+------------------+----------------+
- | k8s_smoke | functest | healthcheck | 02:27 | PASS |
+ | k8s_quick | functest | healthcheck | 00:20 | PASS |
+ | k8s_smoke | functest | healthcheck | 00:45 | PASS |
+-------------------+------------------+---------------------+------------------+----------------+
Testing smoke suite
@@ -238,34 +297,72 @@ Run smoke suite::
Results shall be displayed as follows::
- +-------------------------+------------------+---------------+------------------+----------------+
- | TEST CASE | PROJECT | TIER | DURATION | RESULT |
- +-------------------------+------------------+---------------+------------------+----------------+
- | k8s_conformance | functest | smoke | 57:14 | PASS |
- +-------------------------+------------------+---------------+------------------+----------------+
+ +---------------------------+------------------+---------------+------------------+----------------+
+ | TEST CASE | PROJECT | TIER | DURATION | RESULT |
+ +---------------------------+------------------+---------------+------------------+----------------+
+ | k8s_conformance | functest | smoke | 100:50 | PASS |
+ | xrally_kubernetes | functest | smoke | 13:19 | PASS |
+ +---------------------------+------------------+---------------+------------------+----------------+
-Testing features suite
+Testing security suite
^^^^^^^^^^^^^^^^^^^^^^
-Run features suite::
+Run smoke suite::
+
+ sudo docker run -it --env-file env \
+ -v $(pwd)/config:/root/.kube/config \
+ opnfv/functest-kubernetes-security:hunter
+
+Results shall be displayed as follows::
+
+ +---------------------------+------------------+------------------+------------------+----------------+
+ | TEST CASE | PROJECT | TIER | DURATION | RESULT |
+ +---------------------------+------------------+------------------+------------------+----------------+
+ | kube_hunter | functest | security | 00:19 | PASS |
+ | kube_bench_master | functest | security | 00:01 | PASS |
+ | kube_bench_node | functest | security | 00:01 | PASS |
+ +---------------------------+------------------+------------------+------------------+----------------+
+
+Testing benchmarking suite
+^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Run benchmarking suite::
+
+ sudo docker run -it --env-file env \
+ -v $(pwd)/config:/root/.kube/config \
+ opnfv/functest-kubernetes-benchmarking:hunter
+
+Results shall be displayed as follows::
+
+ +--------------------------------+------------------+----------------------+------------------+----------------+
+ | TEST CASE | PROJECT | TIER | DURATION | RESULT |
+ +--------------------------------+------------------+----------------------+------------------+----------------+
+ | xrally_kubernetes_full | functest | benchmarking | 37:48 | PASS |
+ +--------------------------------+------------------+----------------------+------------------+----------------+
+
+Testing cnf suite
+^^^^^^^^^^^^^^^^^
+
+Run cnf suite::
sudo docker run -it --env-file env \
-v $(pwd)/config:/root/.kube/config \
- opnfv/functest-kubernetes-features:hunter
+ opnfv/functest-kubernetes-cnf:hunter
Results shall be displayed as follows::
- +----------------------+------------------+------------------+------------------+----------------+
- | TEST CASE | PROJECT | TIER | DURATION | RESULT |
- +----------------------+------------------+------------------+------------------+----------------+
- | stor4nfv_k8s | stor4nfv | stor4nfv | 00:00 | SKIP |
- | clover_k8s | clover | clover | 00:00 | SKIP |
- +----------------------+------------------+------------------+------------------+----------------+
+ +-------------------------+------------------+--------------+------------------+----------------+
+ | TEST CASE | PROJECT | TIER | DURATION | RESULT |
+ +-------------------------+------------------+--------------+------------------+----------------+
+ | k8s_vims | functest | cnf | 09:09 | PASS |
+ | helm_vims | functest | cnf | 07:55 | PASS |
+ | cnf_conformance | functest | cnf | 02:18 | PASS |
+ +-------------------------+------------------+--------------+------------------+----------------+
Environment variables
=====================
-Several environement variables may be specified:
+Several environment variables may be specified:
* INSTALLER_IP=<Specific IP Address>
* DEPLOY_SCENARIO=<vim>-<controller>-<nfv_feature>-<ha_mode>
diff --git a/docs/testing/user/configguide/index.rst b/docs/testing/user/configguide/index.rst
index e7967adb3..e5963435e 100644
--- a/docs/testing/user/configguide/index.rst
+++ b/docs/testing/user/configguide/index.rst
@@ -11,7 +11,6 @@ Functest Installation Guide
intro.rst
prerequisites.rst
configguide.rst
- ci.rst
References
==========
@@ -20,4 +19,4 @@ References
IRC support channel: #opnfv-functest
-.. _`[1]`: https://github.com/opnfv/releng/blob/master/jjb/functest/functest-daily-jobs.yaml
+.. _`[1]`: https://github.com/opnfv/releng/blob/master/jjb/functest/functest.yaml
diff --git a/docs/testing/user/configguide/prerequisites.rst b/docs/testing/user/configguide/prerequisites.rst
index c7b52dc54..dab4a9e6b 100644
--- a/docs/testing/user/configguide/prerequisites.rst
+++ b/docs/testing/user/configguide/prerequisites.rst
@@ -92,4 +92,4 @@ should thus be known. Ensure you can reach each node in the SUT, from the
Jumphost using the 'ping' command using the respective IP address on the
public/external network for each node in the SUT. The details of how to
determine the needed IP addresses for each node in the SUT may vary according
-to the used installer and are therefore ommitted here.
+to the used installer and are therefore omitted here.
diff --git a/docs/testing/user/userguide/reporting.rst b/docs/testing/user/userguide/reporting.rst
index 7c8c48ece..8fad55d33 100644
--- a/docs/testing/user/userguide/reporting.rst
+++ b/docs/testing/user/userguide/reporting.rst
@@ -48,7 +48,7 @@ and features) corresponding to this scenario.
All the testcases (X) listed in the table are runnable on os-odl_l2-nofeature
scenarios.
Please note that other test cases (e.g. sfc_odl, bgpvpn) need ODL configuration
-addons and, as a consequence, specific scenario.
+add-ons and, as a consequence, specific scenario.
There are not considered as runnable on the generic odl_l2 scenario.
@@ -82,7 +82,7 @@ Therefore the scoring provides 3 types of indicators:
* the maturity: if the percentage (scoring/target scoring * 100) is high, it
means that all the tests are PASS
* the stability: as the number of iteration is included in the calculation,
- the pecentage can be high only if the scenario is run regularly (at least
+ the percentage can be high only if the scenario is run regularly (at least
more than 4 iterations over the last 10 days in CI)
In any case, the scoring is used to give feedback to the other projects and
diff --git a/docs/testing/user/userguide/test_details.rst b/docs/testing/user/userguide/test_details.rst
index 0315498ce..f28c6862b 100644
--- a/docs/testing/user/userguide/test_details.rst
+++ b/docs/testing/user/userguide/test_details.rst
@@ -454,6 +454,6 @@ The kubernetes testcases are distributed across various Tiers:
.. _`[11]`: https://robotframework.org/
.. _`[13]`: https://wiki.opnfv.org/display/PROJ/SNAPS-OO
.. _`[14]`: https://github.com/oolorg/opnfv-functest-vrouter
-.. _`[15]`: https://www.rebaca.com/what-we-do/abot-5g-network-simulator/
+.. _`[15]`: https://github.com/RebacaInc/abot_charm
.. _`[16]`: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-testing/e2e-tests.md
.. _`[17]`: https://github.com/Metaswitch/clearwater-heat/blob/release-129/clearwater.yaml
diff --git a/docs/testing/user/userguide/test_overview.rst b/docs/testing/user/userguide/test_overview.rst
index e18e32748..8aa05fb21 100644
--- a/docs/testing/user/userguide/test_overview.rst
+++ b/docs/testing/user/userguide/test_overview.rst
@@ -18,7 +18,7 @@ The current list of test suites can be distributed over 5 main domains:
Functest test suites are also distributed in the OPNFV testing categories:
healthcheck, smoke, features, components, performance, VNF, Stress tests.
-All the Healthcheck and smoke tests of a given scenario must be succesful to
+All the Healthcheck and smoke tests of a given scenario must be successful to
validate the scenario for the release.
+-------------+---------------+------------+----------------------------------+
@@ -129,7 +129,7 @@ validate the scenario for the release.
| VNF | vnf | cloudify | Example of a real VNF deployment |
| | | \_ims | to show the NFV capabilities of |
| | | | the platform. The IP Multimedia |
-| | | | Subsytem is a typical Telco test |
+| | | | Subsystem is a typical Telco test|
| | | | case, referenced by ETSI. |
| | | | It provides a fully functional |
| | | | VoIP System |
@@ -224,7 +224,7 @@ and DEPLOY_SCENARIO) to automatically determine the valid test cases, for each
given environment.
A convenience Functest CLI utility is also available to simplify setting up the
-Functest evironment, management of the OpenStack environment (e.g. resource
+Functest environment, management of the OpenStack environment (e.g. resource
clean-up) and for executing tests.
The Functest CLI organised the testcase into logical Tiers, which contain in
turn one or more testcases. The CLI allows execution of a single specified
diff --git a/docs/testing/user/userguide/test_results.rst b/docs/testing/user/userguide/test_results.rst
index bb28989ae..dca104a64 100644
--- a/docs/testing/user/userguide/test_results.rst
+++ b/docs/testing/user/userguide/test_results.rst
@@ -21,68 +21,105 @@ end of each suite and can be described as follow.
Healthcheck suite::
- +----------------------------+------------------+---------------------+------------------+----------------+
- | TEST CASE | PROJECT | TIER | DURATION | RESULT |
- +----------------------------+------------------+---------------------+------------------+----------------+
- | connection_check | functest | healthcheck | 00:07 | PASS |
- | api_check | functest | healthcheck | 07:46 | PASS |
- | snaps_health_check | functest | healthcheck | 00:36 | PASS |
- +----------------------------+------------------+---------------------+------------------+----------------+
+ +--------------------------+------------------+---------------------+------------------+----------------+
+ | TEST CASE | PROJECT | TIER | DURATION | RESULT |
+ +--------------------------+------------------+---------------------+------------------+----------------+
+ | connection_check | functest | healthcheck | 00:03 | PASS |
+ | tenantnetwork1 | functest | healthcheck | 00:08 | PASS |
+ | tenantnetwork2 | functest | healthcheck | 00:16 | PASS |
+ | vmready1 | functest | healthcheck | 00:09 | PASS |
+ | vmready2 | functest | healthcheck | 00:10 | PASS |
+ | singlevm1 | functest | healthcheck | 00:51 | PASS |
+ | singlevm2 | functest | healthcheck | 00:41 | PASS |
+ | vping_ssh | functest | healthcheck | 00:56 | PASS |
+ | vping_userdata | functest | healthcheck | 00:42 | PASS |
+ | cinder_test | functest | healthcheck | 02:19 | PASS |
+ | tempest_smoke | functest | healthcheck | 07:02 | PASS |
+ | tempest_horizon | functest | healthcheck | 00:52 | PASS |
+ | odl | functest | healthcheck | 00:00 | SKIP |
+ +--------------------------+------------------+---------------------+------------------+----------------+
Smoke suite::
- +------------------------------+------------------+---------------+------------------+----------------+
- | TEST CASE | PROJECT | TIER | DURATION | RESULT |
- +------------------------------+------------------+---------------+------------------+----------------+
- | vping_ssh | functest | smoke | 00:57 | PASS |
- | vping_userdata | functest | smoke | 00:33 | PASS |
- | tempest_smoke_serial | functest | smoke | 13:22 | PASS |
- | rally_sanity | functest | smoke | 24:07 | PASS |
- | refstack_defcore | functest | smoke | 05:21 | PASS |
- | patrole | functest | smoke | 04:29 | PASS |
- | snaps_smoke | functest | smoke | 46:54 | PASS |
- | odl | functest | smoke | 00:00 | SKIP |
- | neutron_trunk | functest | smoke | 00:00 | SKIP |
- +------------------------------+------------------+---------------+------------------+----------------+
-Features suite::
-
- +-----------------------------+------------------------+------------------+------------------+----------------+
- | TEST CASE | PROJECT | TIER | DURATION | RESULT |
- +-----------------------------+------------------------+------------------+------------------+----------------+
- | doctor-notification | doctor | features | 00:00 | SKIP |
- | bgpvpn | sdnvpn | features | 00:00 | SKIP |
- | functest-odl-sfc | sfc | features | 00:00 | SKIP |
- | barometercollectd | barometer | features | 00:00 | SKIP |
- | fds | fastdatastacks | features | 00:00 | SKIP |
- +-----------------------------+------------------------+------------------+------------------+----------------+
-
-Components suite::
-
- +-------------------------------+------------------+--------------------+------------------+----------------+
- | TEST CASE | PROJECT | TIER | DURATION | RESULT |
- +-------------------------------+------------------+--------------------+------------------+----------------+
- | tempest_full_parallel | functest | components | 48:28 | PASS |
- | rally_full | functest | components | 126:02 | PASS |
- +-------------------------------+------------------+--------------------+------------------+----------------+
+ +---------------------------+------------------+---------------+------------------+----------------+
+ | TEST CASE | PROJECT | TIER | DURATION | RESULT |
+ +---------------------------+------------------+---------------+------------------+----------------+
+ | tempest_neutron | functest | smoke | 16:49 | PASS |
+ | tempest_cinder | functest | smoke | 01:39 | PASS |
+ | tempest_keystone | functest | smoke | 00:57 | PASS |
+ | tempest_heat | functest | smoke | 24:33 | PASS |
+ | rally_sanity | functest | smoke | 18:41 | PASS |
+ | refstack_defcore | functest | smoke | 10:38 | PASS |
+ | tempest_full | functest | smoke | 55:19 | PASS |
+ | tempest_scenario | functest | smoke | 11:06 | PASS |
+ | tempest_slow | functest | smoke | 61:39 | PASS |
+ | patrole | functest | smoke | 02:46 | PASS |
+ | networking-bgpvpn | functest | smoke | 00:00 | SKIP |
+ | networking-sfc | functest | smoke | 00:00 | SKIP |
+ | tempest_barbican | functest | smoke | 02:30 | PASS |
+ +---------------------------+------------------+---------------+------------------+----------------+
+
+Smoke CNTT suite::
+
+ +-------------------------------+------------------+---------------+------------------+----------------+
+ | TEST CASE | PROJECT | TIER | DURATION | RESULT |
+ +-------------------------------+------------------+---------------+------------------+----------------+
+ | tempest_neutron_cntt | functest | smoke | 13:54 | PASS |
+ | tempest_cinder_cntt | functest | smoke | 01:46 | PASS |
+ | tempest_keystone_cntt | functest | smoke | 00:58 | PASS |
+ | tempest_heat_cntt | functest | smoke | 25:31 | PASS |
+ | rally_sanity_cntt | functest | smoke | 18:50 | PASS |
+ | tempest_full_cntt | functest | smoke | 44:32 | PASS |
+ | tempest_scenario_cntt | functest | smoke | 11:14 | PASS |
+ | tempest_slow_cntt | functest | smoke | 43:55 | PASS |
+ +-------------------------------+------------------+---------------+------------------+----------------+
+
+Benchmarking suite::
+
+ +--------------------+------------------+----------------------+------------------+----------------+
+ | TEST CASE | PROJECT | TIER | DURATION | RESULT |
+ +--------------------+------------------+----------------------+------------------+----------------+
+ | rally_full | functest | benchmarking | 108:34 | PASS |
+ | rally_jobs | functest | benchmarking | 22:07 | PASS |
+ | vmtp | functest | benchmarking | 15:38 | PASS |
+ | shaker | functest | benchmarking | 25:12 | PASS |
+ +--------------------+------------------+----------------------+------------------+----------------+
+
+Benchmarking CNTT suite::
+
+ +-------------------------+------------------+----------------------+------------------+----------------+
+ | TEST CASE | PROJECT | TIER | DURATION | RESULT |
+ +-------------------------+------------------+----------------------+------------------+----------------+
+ | rally_full_cntt | functest | benchmarking | 106:60 | PASS |
+ | rally_jobs_cntt | functest | benchmarking | 21:16 | PASS |
+ | vmtp | functest | benchmarking | 16:15 | PASS |
+ | shaker | functest | benchmarking | 25:09 | PASS |
+ +-------------------------+------------------+----------------------+------------------+----------------+
Vnf suite::
- +----------------------+------------------+--------------+------------------+----------------+
- | TEST CASE | PROJECT | TIER | DURATION | RESULT |
- +----------------------+------------------+--------------+------------------+----------------+
- | cloudify_ims | functest | vnf | 28:15 | PASS |
- | vyos_vrouter | functest | vnf | 17:59 | PASS |
- | juju_epc | functest | vnf | 46:44 | PASS |
- +----------------------+------------------+--------------+------------------+----------------+
+ +----------------------+------------------+--------------+------------------+----------------+
+ | TEST CASE | PROJECT | TIER | DURATION | RESULT |
+ +----------------------+------------------+--------------+------------------+----------------+
+ | cloudify | functest | vnf | 04:35 | PASS |
+ | cloudify_ims | functest | vnf | 24:16 | PASS |
+ | heat_ims | functest | vnf | 30:36 | PASS |
+ | vyos_vrouter | functest | vnf | 15:37 | PASS |
+ | juju_epc | functest | vnf | 34:39 | PASS |
+ +----------------------+------------------+--------------+------------------+----------------+
-Parser testcase::
+Features suite::
- +-----------------------+-----------------+------------------+------------------+----------------+
- | TEST CASE | PROJECT | TIER | DURATION | RESULT |
- +-----------------------+-----------------+------------------+------------------+----------------+
- | parser-basics | parser | features | 00:00 | SKIP |
- +-----------------------+-----------------+------------------+------------------+----------------+
+ +-----------------------------+------------------------+------------------+------------------+----------------+
+ | TEST CASE | PROJECT | TIER | DURATION | RESULT |
+ +-----------------------------+------------------------+------------------+------------------+----------------+
+ | doctor-notification | doctor | features | 00:00 | SKIP |
+ | bgpvpn | sdnvpn | features | 00:00 | SKIP |
+ | functest-odl-sfc | sfc | features | 00:00 | SKIP |
+ | barometercollectd | barometer | features | 00:00 | SKIP |
+ | fds | fastdatastacks | features | 00:00 | SKIP |
+ +-----------------------------+------------------------+------------------+------------------+----------------+
Functest Kubernetes test result::
@@ -97,28 +134,49 @@ Functest Kubernetes test result::
Kubernetes healthcheck suite::
- +-------------------+------------------+---------------------+------------------+----------------+
- | TEST CASE | PROJECT | TIER | DURATION | RESULT |
- +-------------------+------------------+---------------------+------------------+----------------+
- | k8s_smoke | functest | healthcheck | 01:54 | PASS |
- +-------------------+------------------+---------------------+------------------+----------------+
+ +-------------------+------------------+---------------------+------------------+----------------+
+ | TEST CASE | PROJECT | TIER | DURATION | RESULT |
+ +-------------------+------------------+---------------------+------------------+----------------+
+ | k8s_quick | functest | healthcheck | 00:20 | PASS |
+ | k8s_smoke | functest | healthcheck | 00:45 | PASS |
+ +-------------------+------------------+---------------------+------------------+----------------+
Kubernetes smoke suite::
- +-------------------------+------------------+---------------+------------------+----------------+
- | TEST CASE | PROJECT | TIER | DURATION | RESULT |
- +-------------------------+------------------+---------------+------------------+----------------+
- | k8s_conformance | functest | smoke | 57:47 | PASS |
- +-------------------------+------------------+---------------+------------------+----------------+
-
-Kubernetes features suite::
-
- +----------------------+------------------+------------------+------------------+----------------+
- | TEST CASE | PROJECT | TIER | DURATION | RESULT |
- +----------------------+------------------+------------------+------------------+----------------+
- | stor4nfv_k8s | stor4nfv | stor4nfv | 00:00 | SKIP |
- | clover_k8s | clover | clover | 00:00 | SKIP |
- +----------------------+------------------+------------------+------------------+----------------+
+ +---------------------------+------------------+---------------+------------------+----------------+
+ | TEST CASE | PROJECT | TIER | DURATION | RESULT |
+ +---------------------------+------------------+---------------+------------------+----------------+
+ | k8s_conformance | functest | smoke | 100:50 | PASS |
+ | xrally_kubernetes | functest | smoke | 13:19 | PASS |
+ +---------------------------+------------------+---------------+------------------+----------------+
+
+Kubernetes security suite::
+
+ +---------------------------+------------------+------------------+------------------+----------------+
+ | TEST CASE | PROJECT | TIER | DURATION | RESULT |
+ +---------------------------+------------------+------------------+------------------+----------------+
+ | kube_hunter | functest | security | 00:19 | PASS |
+ | kube_bench_master | functest | security | 00:01 | PASS |
+ | kube_bench_node | functest | security | 00:01 | PASS |
+ +---------------------------+------------------+------------------+------------------+----------------+
+
+Kubernetes benchmarking suite::
+
+ +--------------------------------+------------------+----------------------+------------------+----------------+
+ | TEST CASE | PROJECT | TIER | DURATION | RESULT |
+ +--------------------------------+------------------+----------------------+------------------+----------------+
+ | xrally_kubernetes_full | functest | benchmarking | 37:48 | PASS |
+ +--------------------------------+------------------+----------------------+------------------+----------------+
+
+Kubernetes cnf suite::
+
+ +-------------------------+------------------+--------------+------------------+----------------+
+ | TEST CASE | PROJECT | TIER | DURATION | RESULT |
+ +-------------------------+------------------+--------------+------------------+----------------+
+ | k8s_vims | functest | cnf | 09:09 | PASS |
+ | helm_vims | functest | cnf | 07:55 | PASS |
+ | cnf_conformance | functest | cnf | 02:18 | PASS |
+ +-------------------------+------------------+--------------+------------------+----------------+
Results are automatically pushed to the test results database, some additional
result files are pushed to OPNFV artifact web sites.
diff --git a/docs/testing/user/userguide/troubleshooting.rst b/docs/testing/user/userguide/troubleshooting.rst
index 3e1d044b2..389a0d2f5 100644
--- a/docs/testing/user/userguide/troubleshooting.rst
+++ b/docs/testing/user/userguide/troubleshooting.rst
@@ -133,8 +133,8 @@ here::
vPing_ssh- ERROR - Cannot establish connection to IP xxx.xxx.xxx.xxx. Aborting
If this is displayed, stop the test or wait for it to finish, if you have used
-the special method of test invocation with specific supression of OpenStack
-resource clean-up, as explained earler. It means that the Container can not
+the special method of test invocation with specific suppression of OpenStack
+resource clean-up, as explained earlier. It means that the Container can not
reach the Public/External IP assigned to the instance **opnfv-vping-2**. There
are many possible reasons, and they really depend on the chosen scenario. For
most of the ODL-L3 and ONOS scenarios this has been noticed and it is a known
@@ -158,7 +158,7 @@ container::
ping <public IP>
If the ping does not return anything, try to ping from the Host where the
-Docker container is running. If that solves the problem, check the iptable
+Docker container is running. If that solves the problem, check the iptables
rules because there might be some rules rejecting ICMP or TCP traffic
coming/going from/to the container.
@@ -261,29 +261,19 @@ Functest offers a possibility to test a customized list of Tempest test cases.
To enable that, add a new entry in docker/smoke/testcases.yaml on the
"smoke" container with the following content::
- -
- case_name: tempest_custom
- project_name: functest
- criteria: 100
- blocking: false
- description: >-
- The test case allows running a customized list of tempest
- test cases
- dependencies:
- installer: ''
- scenario: ''
- run:
- module: 'functest.opnfv_tests.openstack.tempest.tempest'
- class: 'TempestCustom'
-
-Also, a list of the Tempest test cases must be provided to the container or
-modify the existing one in
-/usr/lib/python2.7/site-packages/functest/opnfv_tests/openstack/tempest/custom_tests/test_list.txt
-
-Example of custom list of tests 'my-custom-tempest-tests.txt'::
-
- tempest.scenario.test_server_basic_ops.TestServerBasicOps.test_server_basic_ops[compute,id-7fff3fb3-91d8-4fd0-bd7d-0204f1f180ba,network,smoke]
- tempest.scenario.test_network_basic_ops.TestNetworkBasicOps.test_network_basic_ops[compute,id-f323b3ba-82f8-4db7-8ea6-6a895869ec49,network,smoke]
+ -
+ case_name: tempest_custom
+ project_name: functest
+ criteria: 100
+ blocking: false
+ description: >-
+ The test case allows running a customized list of tempest
+ test cases
+ run:
+ name: tempest_common
+ args:
+ mode: "tempest.scenario.test_server_basic_ops.TestServerBasicOps.test_server_basic_ops|\
+ tempest.scenario.test_network_basic_ops.TestNetworkBasicOps.test_network_basic_ops"
This is an example of running a customized list of Tempest tests in Functest::
@@ -291,10 +281,8 @@ This is an example of running a customized list of Tempest tests in Functest::
-v $(pwd)/openstack.creds:/home/opnfv/functest/conf/env_file \
-v $(pwd)/images:/home/opnfv/functest/images \
-v $(pwd)/my-custom-testcases.yaml:/usr/lib/python2.7/site-packages/functest/ci/testcases.yaml \
- -v $(pwd)/my-custom-tempest-tests.txt:/usr/lib/python2.7/site-packages/functest/opnfv_tests/openstack/tempest/custom_tests/test_list.txt \
opnfv/functest-smoke run_tests -t tempest_custom
-
Rally
^^^^^
diff --git a/functest/ci/config_aarch64_patch.yaml b/functest/ci/config_aarch64_patch.yaml
index 37ed406ce..7bdb9e717 100644
--- a/functest/ci/config_aarch64_patch.yaml
+++ b/functest/ci/config_aarch64_patch.yaml
@@ -2,80 +2,92 @@
os:
vmready1:
image: /home/opnfv/functest/images/cirros-0.4.0-aarch64-disk.img
-
vmready2:
image: /home/opnfv/functest/images/cirros-0.4.0-aarch64-disk.img
-
singlevm1:
image: /home/opnfv/functest/images/cirros-0.4.0-aarch64-disk.img
-
singlevm2:
image: /home/opnfv/functest/images/cirros-0.4.0-aarch64-disk.img
-
vping_ssh:
image: /home/opnfv/functest/images/cirros-0.4.0-aarch64-disk.img
-
vping_userdata:
image: /home/opnfv/functest/images/cirros-0.4.0-aarch64-disk.img
-
cinder_test:
image: /home/opnfv/functest/images/cirros-0.4.0-aarch64-disk.img
-
tempest_smoke:
image: /home/opnfv/functest/images/cirros-0.4.0-aarch64-disk.img
image_alt: /home/opnfv/functest/images/cirros-0.4.0-aarch64-disk.img
-
- neutron-tempest-plugin-api:
+ tempest_horizon:
image: /home/opnfv/functest/images/cirros-0.4.0-aarch64-disk.img
image_alt: /home/opnfv/functest/images/cirros-0.4.0-aarch64-disk.img
-
+ tempest_neutron:
+ image: /home/opnfv/functest/images/cirros-0.4.0-aarch64-disk.img
+ image_alt: /home/opnfv/functest/images/cirros-0.4.0-aarch64-disk.img
+ tempest_cinder:
+ image: /home/opnfv/functest/images/cirros-0.4.0-aarch64-disk.img
+ image_alt: /home/opnfv/functest/images/cirros-0.4.0-aarch64-disk.img
+ tempest_keystone:
+ image: /home/opnfv/functest/images/cirros-0.4.0-aarch64-disk.img
+ image_alt: /home/opnfv/functest/images/cirros-0.4.0-aarch64-disk.img
+ tempest_heat:
+ image: /home/opnfv/functest/images/cirros-0.4.0-aarch64-disk.img
+ image_alt:
+ /home/opnfv/functest/images/Fedora-Cloud-Base-30-1.2.aarch64.qcow2
+ rally_sanity:
+ image: /home/opnfv/functest/images/cirros-0.4.0-aarch64-disk.img
refstack_defcore:
image: /home/opnfv/functest/images/cirros-0.4.0-aarch64-disk.img
image_alt: /home/opnfv/functest/images/cirros-0.4.0-aarch64-disk.img
-
- patrole:
+ tempest_full:
+ image: /home/opnfv/functest/images/cirros-0.4.0-aarch64-disk.img
+ image_alt: /home/opnfv/functest/images/cirros-0.4.0-aarch64-disk.img
+ tempest_scenario:
image: /home/opnfv/functest/images/cirros-0.4.0-aarch64-disk.img
image_alt: /home/opnfv/functest/images/cirros-0.4.0-aarch64-disk.img
-
- vmtp:
- image:
- /home/opnfv/functest/images/ubuntu-14.04-server-cloudimg-arm64-uefi1.img
-
- shaker:
- image: /home/opnfv/functest/images/shaker-image-arm64.qcow2
-
- neutron_trunk:
+ tempest_slow:
+ image: /home/opnfv/functest/images/cirros-0.4.0-aarch64-disk.img
+ image_alt: /home/opnfv/functest/images/cirros-0.4.0-aarch64-disk.img
+ patrole:
image: /home/opnfv/functest/images/cirros-0.4.0-aarch64-disk.img
image_alt: /home/opnfv/functest/images/cirros-0.4.0-aarch64-disk.img
-
networking-bgpvpn:
image: /home/opnfv/functest/images/cirros-0.4.0-aarch64-disk.img
image_alt: /home/opnfv/functest/images/cirros-0.4.0-aarch64-disk.img
-
networking-sfc:
image: /home/opnfv/functest/images/cirros-0.4.0-aarch64-disk.img
image_alt: /home/opnfv/functest/images/cirros-0.4.0-aarch64-disk.img
-
- barbican:
+ tempest_barbican:
image: /home/opnfv/functest/images/cirros-0.4.0-aarch64-disk.img
image_alt: /home/opnfv/functest/images/cirros-0.4.0-aarch64-disk.img
-
- tempest_full:
+ tempest_neutron_cntt:
image: /home/opnfv/functest/images/cirros-0.4.0-aarch64-disk.img
image_alt: /home/opnfv/functest/images/cirros-0.4.0-aarch64-disk.img
-
- tempest_scenario:
+ tempest_cinder_cntt:
image: /home/opnfv/functest/images/cirros-0.4.0-aarch64-disk.img
image_alt: /home/opnfv/functest/images/cirros-0.4.0-aarch64-disk.img
-
- rally_sanity:
+ tempest_keystone_cntt:
+ image: /home/opnfv/functest/images/cirros-0.4.0-aarch64-disk.img
+ image_alt: /home/opnfv/functest/images/cirros-0.4.0-aarch64-disk.img
+ tempest_heat_cntt:
+ image: /home/opnfv/functest/images/cirros-0.4.0-aarch64-disk.img
+ image_alt:
+ /home/opnfv/functest/images/Fedora-Cloud-Base-30-1.2.aarch64.qcow2
+ rally_sanity_cntt:
+ image: /home/opnfv/functest/images/cirros-0.4.0-aarch64-disk.img
+ tempest_full_cntt:
image: /home/opnfv/functest/images/cirros-0.4.0-aarch64-disk.img
-
+ image_alt: /home/opnfv/functest/images/cirros-0.4.0-aarch64-disk.img
+ tempest_scenario_cntt:
+ image: /home/opnfv/functest/images/cirros-0.4.0-aarch64-disk.img
+ image_alt: /home/opnfv/functest/images/cirros-0.4.0-aarch64-disk.img
+ tempest_slow_cntt:
+ image: /home/opnfv/functest/images/cirros-0.4.0-aarch64-disk.img
+ image_alt: /home/opnfv/functest/images/cirros-0.4.0-aarch64-disk.img
rally_full:
image: /home/opnfv/functest/images/cirros-0.4.0-aarch64-disk.img
-
rally_jobs:
image: /home/opnfv/functest/images/cirros-0.4.0-aarch64-disk.img
-
- tempest:
- use_custom_flavors: 'True'
+ rally_full_cntt:
+ image: /home/opnfv/functest/images/cirros-0.4.0-aarch64-disk.img
+ rally_jobs_cntt:
+ image: /home/opnfv/functest/images/cirros-0.4.0-aarch64-disk.img
diff --git a/functest/ci/config_patch.yaml b/functest/ci/config_patch.yaml
index 1287d92a5..0256d6d4d 100644
--- a/functest/ci/config_patch.yaml
+++ b/functest/ci/config_patch.yaml
@@ -1,4 +1,151 @@
---
+gsma:
+ tempest_smoke:
+ flavor_alt_ram: 2048
+ flavor_alt_vcpus: 1
+ flavor_alt_disk: 40
+ tempest_horizon:
+ flavor_alt_ram: 2048
+ flavor_alt_vcpus: 1
+ flavor_alt_disk: 40
+ tempest_neutron:
+ flavor_alt_ram: 2048
+ flavor_alt_vcpus: 1
+ flavor_alt_disk: 40
+ tempest_cinder:
+ flavor_alt_ram: 2048
+ flavor_alt_vcpus: 1
+ flavor_alt_disk: 40
+ tempest_keystone:
+ flavor_alt_ram: 2048
+ flavor_alt_vcpus: 1
+ flavor_alt_disk: 40
+ tempest_heat:
+ flavor_alt_ram: 2048
+ flavor_alt_vcpus: 1
+ flavor_alt_disk: 40
+ rally_sanity:
+ flavor_alt_ram: 2048
+ flavor_alt_vcpus: 1
+ flavor_alt_disk: 40
+ refstack_defcore:
+ flavor_alt_ram: 2048
+ flavor_alt_vcpus: 1
+ flavor_alt_disk: 40
+ tempest_full:
+ flavor_alt_ram: 2048
+ flavor_alt_vcpus: 1
+ flavor_alt_disk: 40
+ tempest_scenario:
+ flavor_alt_ram: 2048
+ flavor_alt_vcpus: 1
+ flavor_alt_disk: 40
+ tempest_slow:
+ flavor_alt_ram: 2048
+ flavor_alt_vcpus: 1
+ flavor_alt_disk: 40
+ patrole:
+ flavor_alt_ram: 2048
+ flavor_alt_vcpus: 1
+ flavor_alt_disk: 40
+ networking-bgpvpn:
+ flavor_alt_ram: 2048
+ flavor_alt_vcpus: 1
+ flavor_alt_disk: 40
+ networking-sfc:
+ flavor_alt_ram: 2048
+ flavor_alt_vcpus: 1
+ flavor_alt_disk: 40
+ tempest_barbican:
+ flavor_alt_ram: 2048
+ flavor_alt_vcpus: 1
+ flavor_alt_disk: 40
+ tempest_neutron_cntt:
+ flavor_alt_ram: 2048
+ flavor_alt_vcpus: 1
+ flavor_alt_disk: 40
+ tempest_cinder_cntt:
+ flavor_alt_ram: 2048
+ flavor_alt_vcpus: 1
+ flavor_alt_disk: 40
+ tempest_keystone_cntt:
+ flavor_alt_ram: 2048
+ flavor_alt_vcpus: 1
+ flavor_alt_disk: 40
+ tempest_heat_cntt:
+ flavor_alt_ram: 2048
+ flavor_alt_vcpus: 1
+ flavor_alt_disk: 40
+ rally_sanity_cntt:
+ flavor_alt_ram: 2048
+ flavor_alt_vcpus: 1
+ flavor_alt_disk: 40
+ tempest_full_cntt:
+ flavor_alt_ram: 2048
+ flavor_alt_vcpus: 1
+ flavor_alt_disk: 40
+ tempest_scenario_cntt:
+ flavor_alt_ram: 2048
+ flavor_alt_vcpus: 1
+ flavor_alt_disk: 40
+ tempest_slow_cntt:
+ flavor_alt_ram: 2048
+ flavor_alt_vcpus: 1
+ flavor_alt_disk: 40
+ rally_full:
+ flavor_alt_ram: 2048
+ flavor_alt_vcpus: 1
+ flavor_alt_disk: 40
+ rally_jobs:
+ flavor_alt_ram: 2048
+ flavor_alt_vcpus: 1
+ flavor_alt_disk: 40
+ vmtp:
+ flavor_ram: 2048
+ flavor_vcpus: 1
+ flavor_disk: 40
+ shaker:
+ flavor_ram: 2048
+ flavor_vcpus: 1
+ flavor_disk: 40
+ rally_full_cntt:
+ flavor_alt_ram: 2048
+ flavor_alt_vcpus: 1
+ flavor_alt_disk: 40
+ rally_jobs_cntt:
+ flavor_alt_ram: 2048
+ flavor_alt_vcpus: 1
+ flavor_alt_disk: 40
+ cloudify:
+ flavor_ram: 4096
+ flavor_vcpus: 2
+ flavor_disk: 40
+ cloudify_ims:
+ flavor_ram: 4096
+ flavor_vcpus: 2
+ flavor_disk: 40
+ flavor_alt_ram: 2048
+ flavor_alt_vcpus: 1
+ flavor_alt_disk: 40
+ heat_ims:
+ flavor_ram: 2048
+ flavor_vcpus: 1
+ flavor_disk: 40
+ vyos_vrouter:
+ flavor_ram: 4096
+ flavor_vcpus: 2
+ flavor_disk: 40
+ flavor_alt_ram: 2048
+ flavor_alt_vcpus: 1
+ flavor_alt_disk: 40
+ juju_epc:
+ flavor_ram: 2048
+ flavor_vcpus: 1
+ flavor_disk: 40
+ flavor_alt_ram: 4096
+ flavor_alt_vcpus: 2
+ flavor_alt_disk: 40
+
fdio:
vmready1:
flavor_ram: 1024
@@ -16,34 +163,62 @@ fdio:
flavor_ram: 1024
tempest_smoke:
flavor_ram: 1024
- neutron-tempest-plugin-api:
+ tempest_horizon:
+ flavor_ram: 1024
+ tempest_neutron:
+ flavor_ram: 1024
+ tempest_cinder:
+ flavor_ram: 1024
+ tempest_keystone:
+ flavor_ram: 1024
+ tempest_heat:
+ flavor_ram: 1024
+ rally_sanity:
flavor_ram: 1024
refstack_defcore:
flavor_ram: 1024
- patrole:
+ tempest_full:
flavor_ram: 1024
- vmtp:
- flavor_ram: 2048
- shaker:
+ tempest_scenario:
+ flavor_ram: 1024
+ tempest_slow:
flavor_ram: 1024
- neutron_trunk:
+ patrole:
flavor_ram: 1024
networking-bgpvpn:
flavor_ram: 1024
networking-sfc:
flavor_ram: 1024
- barbican:
+ tempest_barbican:
flavor_ram: 1024
- tempest_full:
+ tempest_neutron_cntt:
flavor_ram: 1024
- tempest_scenario:
+ tempest_cinder_cntt:
flavor_ram: 1024
- rally_sanity:
+ tempest_keystone_cntt:
+ flavor_ram: 1024
+ tempest_heat_cntt:
+ flavor_ram: 1024
+ rally_sanity_cntt:
+ flavor_ram: 1024
+ tempest_full_cntt:
+ flavor_ram: 1024
+ tempest_scenario_cntt:
+ flavor_ram: 1024
+ tempest_slow_cntt:
flavor_ram: 1024
rally_full:
flavor_ram: 1024
rally_jobs:
flavor_ram: 1024
+ vmtp:
+ flavor_ram: 2048
+ shaker:
+ flavor_ram: 1024
+ rally_full_cntt:
+ flavor_ram: 1024
+ rally_jobs_cntt:
+ flavor_ram: 1024
ovs:
vmready1:
@@ -62,34 +237,62 @@ ovs:
flavor_ram: 1024
tempest_smoke:
flavor_ram: 1024
- neutron-tempest-plugin-api:
+ tempest_horizon:
+ flavor_ram: 1024
+ tempest_neutron:
+ flavor_ram: 1024
+ tempest_cinder:
+ flavor_ram: 1024
+ tempest_keystone:
+ flavor_ram: 1024
+ tempest_heat:
+ flavor_ram: 1024
+ rally_sanity:
flavor_ram: 1024
refstack_defcore:
flavor_ram: 1024
- patrole:
+ tempest_full:
flavor_ram: 1024
- vmtp:
- flavor_ram: 2048
- shaker:
+ tempest_scenario:
flavor_ram: 1024
- neutron_trunk:
+ tempest_slow:
+ flavor_ram: 1024
+ patrole:
flavor_ram: 1024
networking-bgpvpn:
flavor_ram: 1024
networking-sfc:
flavor_ram: 1024
- barbican:
+ tempest_barbican:
flavor_ram: 1024
- tempest_full:
+ tempest_neutron_cntt:
flavor_ram: 1024
- tempest_scenario:
+ tempest_cinder_cntt:
flavor_ram: 1024
- rally_sanity:
+ tempest_keystone_cntt:
+ flavor_ram: 1024
+ tempest_heat_cntt:
+ flavor_ram: 1024
+ rally_sanity_cntt:
+ flavor_ram: 1024
+ tempest_full_cntt:
+ flavor_ram: 1024
+ tempest_scenario_cntt:
+ flavor_ram: 1024
+ tempest_slow_cntt:
flavor_ram: 1024
rally_full:
flavor_ram: 1024
rally_jobs:
flavor_ram: 1024
+ vmtp:
+ flavor_ram: 2048
+ shaker:
+ flavor_ram: 1024
+ rally_full_cntt:
+ flavor_ram: 1024
+ rally_jobs_cntt:
+ flavor_ram: 1024
vio:
vmready1:
@@ -118,29 +321,56 @@ vio:
image_format: vmdk
image_alt: /home/opnfv/functest/images/cirros-0.4.0-x86_64-disk.vmdk
image_alt_format: vmdk
- neutron-tempest-plugin-api:
+ tempest_horizon:
image: /home/opnfv/functest/images/cirros-0.4.0-x86_64-disk.vmdk
image_format: vmdk
image_alt: /home/opnfv/functest/images/cirros-0.4.0-x86_64-disk.vmdk
image_alt_format: vmdk
+ tempest_neutron:
+ image: /home/opnfv/functest/images/cirros-0.4.0-x86_64-disk.vmdk
+ image_format: vmdk
+ image_alt: /home/opnfv/functest/images/cirros-0.4.0-x86_64-disk.vmdk
+ image_alt_format: vmdk
+ tempest_cinder:
+ image: /home/opnfv/functest/images/cirros-0.4.0-x86_64-disk.vmdk
+ image_format: vmdk
+ image_alt: /home/opnfv/functest/images/cirros-0.4.0-x86_64-disk.vmdk
+ image_alt_format: vmdk
+ tempest_keystone:
+ image: /home/opnfv/functest/images/cirros-0.4.0-x86_64-disk.vmdk
+ image_format: vmdk
+ image_alt: /home/opnfv/functest/images/cirros-0.4.0-x86_64-disk.vmdk
+ image_alt_format: vmdk
+ tempest_heat:
+ image: /home/opnfv/functest/images/cirros-0.4.0-x86_64-disk.vmdk
+ image_format: vmdk
+ image_alt:
+ /home/opnfv/functest/images/Fedora-Cloud-Base-30-1.2.x86_64.vmdk
+ image_alt_format: vmdk
+ rally_sanity:
+ image: /home/opnfv/functest/images/cirros-0.4.0-x86_64-disk.vmdk
+ image_format: vmdk
refstack_defcore:
image: /home/opnfv/functest/images/cirros-0.4.0-x86_64-disk.vmdk
image_format: vmdk
image_alt: /home/opnfv/functest/images/cirros-0.4.0-x86_64-disk.vmdk
image_alt_format: vmdk
- patrole:
+ tempest_full:
image: /home/opnfv/functest/images/cirros-0.4.0-x86_64-disk.vmdk
image_format: vmdk
image_alt: /home/opnfv/functest/images/cirros-0.4.0-x86_64-disk.vmdk
image_alt_format: vmdk
- vmtp:
- image:
- /home/opnfv/functest/images/ubuntu-14.04-server-cloudimg-amd64-disk1.vmdk
+ tempest_scenario:
+ image: /home/opnfv/functest/images/cirros-0.4.0-x86_64-disk.vmdk
image_format: vmdk
- shaker:
- image: /home/opnfv/functest/images/shaker-image.vmdk
+ image_alt: /home/opnfv/functest/images/cirros-0.4.0-x86_64-disk.vmdk
+ image_alt_format: vmdk
+ tempest_slow:
+ image: /home/opnfv/functest/images/cirros-0.4.0-x86_64-disk.vmdk
image_format: vmdk
- neutron_trunk:
+ image_alt: /home/opnfv/functest/images/cirros-0.4.0-x86_64-disk.vmdk
+ image_alt_format: vmdk
+ patrole:
image: /home/opnfv/functest/images/cirros-0.4.0-x86_64-disk.vmdk
image_format: vmdk
image_alt: /home/opnfv/functest/images/cirros-0.4.0-x86_64-disk.vmdk
@@ -155,30 +385,69 @@ vio:
image_format: vmdk
image_alt: /home/opnfv/functest/images/cirros-0.4.0-x86_64-disk.vmdk
image_alt_format: vmdk
- barbican:
+ tempest_barbican:
image: /home/opnfv/functest/images/cirros-0.4.0-x86_64-disk.vmdk
image_format: vmdk
image_alt: /home/opnfv/functest/images/cirros-0.4.0-x86_64-disk.vmdk
image_alt_format: vmdk
- tempest_full:
+ tempest_neutron_cntt:
image: /home/opnfv/functest/images/cirros-0.4.0-x86_64-disk.vmdk
image_format: vmdk
image_alt: /home/opnfv/functest/images/cirros-0.4.0-x86_64-disk.vmdk
image_alt_format: vmdk
- tempest_scenario:
+ tempest_cinder_cntt:
image: /home/opnfv/functest/images/cirros-0.4.0-x86_64-disk.vmdk
image_format: vmdk
image_alt: /home/opnfv/functest/images/cirros-0.4.0-x86_64-disk.vmdk
image_alt_format: vmdk
- rally_sanity:
+ tempest_keystone_cntt:
+ image: /home/opnfv/functest/images/cirros-0.4.0-x86_64-disk.vmdk
+ image_format: vmdk
+ image_alt: /home/opnfv/functest/images/cirros-0.4.0-x86_64-disk.vmdk
+ image_alt_format: vmdk
+ tempest_heat_cntt:
+ image: /home/opnfv/functest/images/cirros-0.4.0-x86_64-disk.vmdk
+ image_format: vmdk
+ image_alt:
+ /home/opnfv/functest/images/Fedora-Cloud-Base-30-1.2.x86_64.vmdk
+ image_alt_format: vmdk
+ rally_sanity_cntt:
+ image: /home/opnfv/functest/images/cirros-0.4.0-x86_64-disk.vmdk
+ image_format: vmdk
+ tempest_full_cntt:
+ image: /home/opnfv/functest/images/cirros-0.4.0-x86_64-disk.vmdk
+ image_format: vmdk
+ image_alt: /home/opnfv/functest/images/cirros-0.4.0-x86_64-disk.vmdk
+ image_alt_format: vmdk
+ tempest_scenario_cntt:
+ image: /home/opnfv/functest/images/cirros-0.4.0-x86_64-disk.vmdk
+ image_format: vmdk
+ image_alt: /home/opnfv/functest/images/cirros-0.4.0-x86_64-disk.vmdk
+ image_alt_format: vmdk
+ tempest_slow_cntt:
image: /home/opnfv/functest/images/cirros-0.4.0-x86_64-disk.vmdk
image_format: vmdk
+ image_alt: /home/opnfv/functest/images/cirros-0.4.0-x86_64-disk.vmdk
+ image_alt_format: vmdk
rally_full:
image: /home/opnfv/functest/images/cirros-0.4.0-x86_64-disk.vmdk
image_format: vmdk
rally_jobs:
image: /home/opnfv/functest/images/cirros-0.4.0-x86_64-disk.vmdk
image_format: vmdk
+ vmtp:
+ image:
+ /home/opnfv/functest/images/ubuntu-14.04-server-cloudimg-amd64-disk1.vmdk
+ image_format: vmdk
+ shaker:
+ image: /home/opnfv/functest/images/shaker-image.vmdk
+ image_format: vmdk
+ rally_full_cntt:
+ image: /home/opnfv/functest/images/cirros-0.4.0-x86_64-disk.vmdk
+ image_format: vmdk
+ rally_jobs_cntt:
+ image: /home/opnfv/functest/images/cirros-0.4.0-x86_64-disk.vmdk
+ image_format: vmdk
cloudify:
image:
/home/opnfv/functest/images/ubuntu-16.04-server-cloudimg-amd64-disk1.vmdk
diff --git a/functest/ci/download_images.sh b/functest/ci/download_images.sh
index 9612f85b2..19ab4ee7e 100644
--- a/functest/ci/download_images.sh
+++ b/functest/ci/download_images.sh
@@ -10,9 +10,9 @@ http://download.cirros-cloud.net/0.4.0/cirros-0.4.0-x86_64-disk.img
https://cloud-images.ubuntu.com/releases/14.04/release/ubuntu-14.04-server-cloudimg-amd64-disk1.img
https://cloud-images.ubuntu.com/releases/16.04/release/ubuntu-16.04-server-cloudimg-amd64-disk1.img
http://download.cirros-cloud.net/0.4.0/cirros-0.4.0-aarch64-disk.img
-https://cloud-images.ubuntu.com/releases/14.04/release/ubuntu-14.04-server-cloudimg-arm64-uefi1.img
http://repository.cloudifysource.org/cloudify/19.01.24/community-release/cloudify-docker-manager-community-19.01.24.tar
http://testresults.opnfv.org/functest/vyos-1.1.8-amd64.qcow2
-http://testresults.opnfv.org/functest/shaker-image.qcow2
-http://testresults.opnfv.org/functest/shaker-image-arm64.qcow2
+http://testresults.opnfv.org/functest/shaker-image-1.3.0+stretch.qcow2
+https://archives.fedoraproject.org/pub/archive/fedora/linux/releases/30/Cloud/x86_64/images/Fedora-Cloud-Base-30-1.2.x86_64.qcow2
+https://archives.fedoraproject.org/pub/archive/fedora/linux/releases/30/Cloud/aarch64/images/Fedora-Cloud-Base-30-1.2.aarch64.qcow2
EOF
diff --git a/functest/ci/logging.debug.ini b/functest/ci/logging.debug.ini
index 51ee359c6..d7a361df0 100644
--- a/functest/ci/logging.debug.ini
+++ b/functest/ci/logging.debug.ini
@@ -100,7 +100,7 @@ args=(sys.stdout,)
[handler_file]
class=FileHandler
-level=DEBUG
+level=INFO
formatter=standard
args=("/home/opnfv/functest/results/functest.log",)
diff --git a/functest/ci/logging.ini b/functest/ci/logging.ini
index 278529e22..dde079493 100644
--- a/functest/ci/logging.ini
+++ b/functest/ci/logging.ini
@@ -1,5 +1,5 @@
[loggers]
-keys=root,functest,api,ci,core,cli,opnfv_tests,utils,xtesting,xci,xcore,energy,xutils,sfc,baro,warnings
+keys=root,functest,api,ci,core,cli,opnfv_tests,utils,xtesting,xci,xcore,xutils,sfc,baro,warnings
[handlers]
keys=console,wconsole,file,null
@@ -61,11 +61,6 @@ level=NOTSET
handlers=console
qualname=xtesting.core
-[logger_energy]
-level=NOTSET
-handlers=wconsole
-qualname=xtesting.energy
-
[logger_xutils]
level=NOTSET
handlers=wconsole
@@ -106,7 +101,7 @@ args=(sys.stdout,)
[handler_file]
class=FileHandler
-level=DEBUG
+level=INFO
formatter=standard
args=("/home/opnfv/functest/results/functest.log",)
diff --git a/functest/ci/testcases.yaml b/functest/ci/testcases.yaml
index 5cb5a56fd..17a478df0 100644
--- a/functest/ci/testcases.yaml
+++ b/functest/ci/testcases.yaml
@@ -2,7 +2,6 @@
tiers:
-
name: healthcheck
- order: 0
description: >-
First tier to be executed to verify the basic
operations in the VIM.
@@ -28,6 +27,8 @@ tiers:
It creates and configures all tenant network ressources
required by advanced testcases (subnet, network and
router).
+ dependencies:
+ - NO_TENANT_NETWORK: '^(?![tT]rue$)'
run:
name: tenantnetwork1
@@ -40,6 +41,8 @@ tiers:
It creates new user/project before creating and configuring
all tenant network ressources required by a testcase
(subnet, network and router).
+ dependencies:
+ - NO_TENANT_NETWORK: '^(?![tT]rue$)'
run:
name: tenantnetwork2
@@ -163,14 +166,28 @@ tiers:
option:
- '--concurrency=4'
+ -
+ case_name: tempest_horizon
+ project_name: functest
+ criteria: 100
+ blocking: false
+ description: >-
+ This test case runs the Tempest suite proposed by the
+ Horizon project.
+ dependencies:
+ - DASHBOARD_URL: '^(?!\s*$).+'
+ run:
+ name: tempest_horizon
+ args:
+ mode: '^tempest_horizon.'
+
-
name: smoke
- order: 1
description: >-
Set of basic Functional tests to validate the OPNFV scenarios.
testcases:
-
- case_name: neutron-tempest-plugin-api
+ case_name: tempest_neutron
project_name: functest
criteria: 100
blocking: false
@@ -187,6 +204,52 @@ tiers:
- '--concurrency=4'
-
+ case_name: tempest_cinder
+ project_name: functest
+ criteria: 100
+ blocking: false
+ description: >-
+ This test case runs the Tempest suite proposed by the
+ Cinder project.
+ run:
+ name: tempest_common
+ args:
+ mode:
+ '(?!.*test_incremental_backup)(^cinder_tempest_plugin.)'
+ option:
+ - '--concurrency=4'
+
+ -
+ case_name: tempest_keystone
+ project_name: functest
+ criteria: 100
+ blocking: false
+ description: >-
+ This test case runs the Tempest suite proposed by the
+ Keystone project.
+ run:
+ name: tempest_common
+ args:
+ mode: 'keystone_tempest_plugin.'
+ option:
+ - '--concurrency=4'
+
+ -
+ case_name: tempest_heat
+ project_name: functest
+ criteria: 100
+ blocking: false
+ description: >-
+ This test case runs the Tempest suite proposed by the
+ Heat project.
+ run:
+ name: tempest_heat
+ args:
+ mode: '^heat_tempest_plugin.tests'
+ option:
+ - '--concurrency=1'
+
+ -
case_name: rally_sanity
project_name: functest
criteria: 100
@@ -206,6 +269,7 @@ tiers:
criteria: 100
blocking: false
deny_skipping: true
+ tests_count: 219
description: >-
This test case runs a sub group of tests of the OpenStack
Defcore testcases.
@@ -243,13 +307,30 @@ tiers:
the OpenStack deployment.
https://github.com/openstack/tempest/blob/18.0.0/tox.ini#L84
run:
- name: tempest_scenario
+ name: tempest_common
args:
mode: '(?!.*\[.*\bslow\b.*\])(^tempest\.scenario)'
option:
- '--concurrency=1'
-
+ case_name: tempest_slow
+ project_name: functest
+ criteria: 100
+ blocking: false
+ description: >-
+ The list of test cases is generated by
+ Tempest automatically and depends on the parameters of
+ the OpenStack deployment.
+ https://github.com/openstack/tempest/blob/18.0.0/tox.ini#L84
+ run:
+ name: tempest_common
+ args:
+ mode: '(?=.*\[.*\bslow\b.*\])(^tempest\.)'
+ option:
+ - '--concurrency=1'
+
+ -
case_name: patrole
project_name: functest
criteria: 100
@@ -270,24 +351,6 @@ tiers:
- 'test_list_metadef_namespaces'
-
- case_name: neutron_trunk
- project_name: functest
- criteria: 100
- blocking: false
- description: >-
- This test case runs the neutron trunk subtest of the
- OpenStack Tempest suite. The list of test cases is
- generated by Tempest having as input the relevant
- testcase list file.
- run:
- name: tempest_common
- args:
- mode: 'neutron_tempest_plugin.(api|scenario).test_trunk'
- neutron_extensions:
- - trunk
- - trunk-details
-
- -
case_name: networking-bgpvpn
project_name: functest
criteria: 100
@@ -303,7 +366,7 @@ tiers:
neutron_extensions:
- bgpvpn
option:
- - '--concurrency=4'
+ - '--concurrency=3'
-
case_name: networking-sfc
@@ -325,7 +388,7 @@ tiers:
- '--concurrency=0'
-
- case_name: barbican
+ case_name: tempest_barbican
project_name: functest
criteria: 100
blocking: false
@@ -339,10 +402,283 @@ tiers:
'^barbican_tempest_plugin.((?!test_signed_image_upload_boot_failure).)*$'
services:
- barbican
+ option:
+ - '--concurrency=4'
+
+ -
+ name: smoke_cntt
+ description: >-
+ Set of basic Functional tests to validate the OPNFV scenarios.
+ testcases:
+ -
+ case_name: tempest_neutron_cntt
+ project_name: functest
+ criteria: 100
+ blocking: false
+ deny_skipping: true
+ tests_count: 478
+ description: >-
+ This test case runs the Tempest suite proposed by the
+ Neutron project. The list of test cases is generated by
+ Tempest automatically and depends on the parameters of
+ the OpenStack deployment.
+ run:
+ name: tempest_common
+ args:
+ mode: "(?!.*admin.test_agent_availability_zone)\
+ (?!.*admin.test_dhcp_agent_scheduler)\
+ (?!.*admin.test_l3_agent_scheduler)\
+ (?!.*admin.test_logging)\
+ (?!.*admin.test_logging_negative)\
+ (?!.*admin.test_network_segment_range)\
+ (?!.*admin.test_ports.PortTestCasesAdmin.test_regenerate_mac_address)\
+ (?!.*admin.test_ports.PortTestCasesResourceRequest)\
+ (?!.*admin.test_routers_dvr)\
+ (?!.*admin.test_routers_flavors)\
+ (?!.*admin.test_routers_ha)\
+ (?!.*test_conntrack_helper)\
+ (?!.*test_floating_ips.FloatingIPPoolTestJSON)\
+ (?!.*test_floating_ips.FloatingIPTestJSON.test_create_update_floatingip_port_details)\
+ (?!.*test_metering_extensions)\
+ (?!.*test_metering_negative)\
+ (?!.*test_networks.NetworksSearchCriteriaTest.test_list_validation_filters)\
+ (?!.*test_networks.NetworksTestAdmin.test_create_tenant_network_vxlan)\
+ (?!.*test_networks.NetworksTestJSON.test_create_update_network_dns_domain)\
+ (?!.*test_ports.PortsTestJSON.test_create_port_with_propagate_uplink_status)\
+ (?!.*test_ports.PortsTestJSON.test_create_port_without_propagate_uplink_status)\
+ (?!.*test_ports.PortsTestJSON.test_create_update_port_with_dns_domain)\
+ (?!.*test_ports.PortsTestJSON.test_create_update_port_with_dns_name)\
+ (?!.*test_ports.PortsTestJSON.test_create_update_port_with_no_dns_name)\
+ (?!.*test_qos.QosMinimumBandwidthRuleTestJSON)\
+ (?!.*test_revisions.TestRevisions.test_update_dns_domain_bumps_revision)\
+ (?!.*test_revisions.TestRevisions.test_update_router_extra_attributes_bumps_revision)\
+ (?!.*test_router_interface_fip)\
+ (?!.*test_routers.DvrRoutersTest)\
+ (?!.*test_routers.HaRoutersTest)\
+ (?!.*test_routers.RoutersIpV6Test.test_extra_routes_atomic)\
+ (?!.*test_routers.RoutersTest.test_extra_routes_atomic)\
+ (?!.*test_routers_negative.DvrRoutersNegativeTest)\
+ (?!.*test_routers_negative.DvrRoutersNegativeTestExtended)\
+ (?!.*test_routers_negative.HaRoutersNegativeTest)\
+ (?!.*test_security_groups.RbacSharedSecurityGroupTest)\
+ (?!.*test_subnetpools.SubnetPoolsSearchCriteriaTest.test_list_validation_filters)\
+ (?!.*test_subnets.SubnetsSearchCriteriaTest.test_list_validation_filters)\
+ (?!.*test_timestamp.TestTimeStamp.test_segment_with_timestamp)\
+ (?!.*test_trunk.TrunkTestInheritJSONBase.test_add_subport)\
+ (?!.*test_trunk.TrunkTestMtusJSON)\
+ (?!.*test_trunk_negative.TrunkTestJSON.test_create_subport_invalid_inherit_network_segmentation_type)\
+ (?!.*test_trunk_negative.TrunkTestMtusJSON)\
+ (^neutron_tempest_plugin.api)"
+ option:
+ - '--concurrency=4'
+
+ -
+ case_name: tempest_cinder_cntt
+ project_name: functest
+ criteria: 100
+ blocking: false
+ deny_skipping: true
+ tests_count: 7
+ description: >-
+ This test case runs the Tempest suite proposed by the
+ Cinder project.
+ run:
+ name: tempest_common
+ args:
+ mode: "(?!.*test_incremental_backup)\
+ (?!.*test_consistencygroups)\
+ (^cinder_tempest_plugin.)"
+ option:
+ - '--concurrency=4'
+
+ -
+ case_name: tempest_keystone_cntt
+ project_name: functest
+ criteria: 100
+ blocking: false
+ deny_skipping: true
+ tests_count: 27
+ description: >-
+ This test case runs the Tempest suite proposed by the
+ Keystone project.
+ run:
+ name: tempest_common
+ args:
+ mode: "(?!.*api.identity.v3.test_oauth1_tokens)\
+ (?!.*scenario.test_federated_authentication)\
+ keystone_tempest_plugin."
+ option:
+ - '--concurrency=4'
+
+ -
+ case_name: tempest_heat_cntt
+ project_name: functest
+ criteria: 100
+ blocking: false
+ deny_skipping: true
+ tests_count: 112
+ description: >-
+ This test case runs the Tempest suite proposed by the
+ Heat project.
+ run:
+ name: tempest_heat
+ args:
+ mode: "(?!.*functional.test_lbaasv2)\
+ (?!.*functional.test_encryption_vol_type)\
+ (?!.*functional.test_remote_stack.RemoteStackTest)\
+ (?!.*RemoteStackTest.test_stack_create_with_cloud_credential)\
+ (?!.*scenario.test_aodh_alarm)\
+ (?!.*tests.scenario.test_autoscaling_lb)\
+ (?!.*scenario.test_autoscaling_lbv2)\
+ (?!.*scenario.test_remote_deeply_nested.RemoteDeeplyNestedStackTest)\
+ (?!.*scenario.test_server_software_config)\
+ (?!.*test_volumes.VolumeBackupRestoreIntegrationTest)\
+ (?!.*scenario.test_octavia_lbaas)\
+ (?!.*scenario.test_server_cfn_init)\
+ ^heat_tempest_plugin.tests"
+ option:
+ - '--concurrency=1'
+
+ -
+ case_name: rally_sanity_cntt
+ project_name: functest
+ criteria: 100
+ blocking: false
+ description: >-
+ This test case runs a sub group of tests of the OpenStack
+ Rally suite in smoke mode.
+ run:
+ name: rally_sanity
+ args:
+ tests:
+ - 'authenticate'
+ - 'glance'
+ - 'cinder'
+ - 'heat'
+ - 'keystone'
+ - 'neutron'
+ - 'nova'
+ - 'quotas'
+ - 'swift'
+
+ -
+ case_name: tempest_full_cntt
+ project_name: functest
+ criteria: 100
+ blocking: false
+ deny_skipping: true
+ tests_count: 1280
+ description: >-
+ The list of test cases is generated by
+ Tempest automatically and depends on the parameters of
+ the OpenStack deployment.
+ https://github.com/openstack/tempest/blob/18.0.0/tox.ini#L83
+ run:
+ name: tempest_common
+ args:
+ mode: "(?!.*test_fixed_ips)\
+ (?!.*test_fixed_ips_negative)\
+ (?!.*test_auto_allocate_network)(?!.*test_floating_ips_bulk)\
+ (?!.*test_flavors_microversions.FlavorsV255TestJSON)\
+ (?!.*test_flavors_microversions.FlavorsV261TestJSON)\
+ (?!.*test_live_migration.LiveAutoBlockMigrationV225Test.test_iscsi_volume)\
+ (?!.*test_live_migration.LiveAutoBlockMigrationV225Test.test_volume_backed_live_migration)\
+ (?!.*test_live_migration.LiveMigrationTest.test_iscsi_volume)\
+ (?!.*test_live_migration.LiveMigrationTest.test_volume_backed_live_migration)\
+ (?!.*test_live_migration.LiveMigrationRemoteConsolesV26Test)\
+ (?!.*test_quotas.QuotasAdminTestV257)\
+ (?!.*certificates.test_certificates)\
+ (?!.*test_quotas_negative.QuotasSecurityGroupAdminNegativeTest)\
+ (?!.*test_novnc)(?!.*test_server_personality)\
+ (?!.*test_servers.ServerShowV263Test.test_show_update_rebuild_list_server)\
+ (?!.*test_servers_microversions.ServerShowV254Test)\
+ (?!.*test_servers_microversions.ServerShowV257Test)\
+ (?!.*test_servers_negative.ServersNegativeTestJSON.test_personality_file_contents_not_encoded)\
+ (?!.*servers.test_virtual_interfaces)\
+ (?!.*test_server_actions.ServerActionsTestJSON.test_change_server_password)\
+ (?!.*test_server_actions.ServerActionsTestJSON.test_get_vnc_console)\
+ (?!.*test_server_actions.ServerActionsTestJSON.test_reboot_server_soft)\
+ (?!.*test_security_group_default_rules)\
+ (?!.*test_security_groups_negative.SecurityGroupsNegativeTestJSON.test_security_group_create_with_duplicate_name)\
+ (?!.*test_security_groups_negative.SecurityGroupsNegativeTestJSON.test_security_group_create_with_invalid_group_description)\
+ (?!.*test_security_groups_negative.SecurityGroupsNegativeTestJSON.test_security_group_create_with_invalid_group_name)\
+ (?!.*test_security_groups_negative.SecurityGroupsNegativeTestJSON.test_update_security_group_with_invalid_sg_des)\
+ (?!.*test_security_groups_negative.SecurityGroupsNegativeTestJSON.test_update_security_group_with_invalid_sg_id)\
+ (?!.*test_security_groups_negative.SecurityGroupsNegativeTestJSON.test_update_security_group_with_invalid_sg_name)\
+ (?!.*test_list_server_filters.ListServerFiltersTestJSON.test_list_servers_filtered_by_ip_regex)\
+ (?!.*compute.test_virtual_interfaces)(?!.*compute.test_virtual_interfaces_negative)\
+ (?!.*compute.test_networks)\
+ (?!.*test_attach_volume.AttachVolumeMultiAttach)\
+ (?!.*identity.admin.v2)(?!.*identity.v2)\
+ (?!.*image.v1)\
+ (?!.*admin.test_dhcp_agent_scheduler)\
+ (?!.*admin.test_routers_dvr)\
+ (?!.*test_metering_extensions)(?!.*network.test_tags)\
+ (?!.*test_routers_negative.DvrRoutersNegativeTest)\
+ (?!.*test_routers.RoutersIpV6Test.test_create_router_set_gateway_with_fixed_ip)\
+ (?!.*test_routers.RoutersTest.test_create_router_set_gateway_with_fixed_ip)\
+ (?!.*test_group_snapshots.GroupSnapshotsV319Test.test_reset_group_snapshot_status)\
+ (?!.*test_multi_backend)\
+ (?!.*test_snapshot_manage)\
+ (?!.*test_volume_retype.VolumeRetypeWithMigrationTest)\
+ (?!.*test_volume_delete_cascade.VolumesDeleteCascade.test_volume_from_snapshot_cascade_delete)\
+ (?!.*test_volumes_backup.VolumesBackupsTest.test_volume_backup_create_get_detailed_list_restore_delete)\
+ (?!.*test_volumes_extend.VolumesExtendAttachedTest.test_extend_attached_volume)\
+ (?!.*\\[.*\\bslow\\b.*\\])(^tempest.api)"
+ option:
+ - '--concurrency=4'
+
+ -
+ case_name: tempest_scenario_cntt
+ project_name: functest
+ criteria: 100
+ blocking: false
+ deny_skipping: true
+ tests_count: 9
+ description: >-
+ The list of test cases is generated by
+ Tempest automatically and depends on the parameters of
+ the OpenStack deployment.
+ https://github.com/openstack/tempest/blob/18.0.0/tox.ini#L84
+ run:
+ name: tempest_common
+ args:
+ mode: "\
+ (?!.*test_volume_boot_pattern.TestVolumeBootPattern.test_boot_server_from_encrypted_volume_luks)\
+ (?!.*\\[.*\\bslow\\b.*\\])(^tempest.scenario)"
+ option:
+ - '--concurrency=1'
+
+ -
+ case_name: tempest_slow_cntt
+ project_name: functest
+ criteria: 100
+ blocking: false
+ deny_skipping: true
+ tests_count: 43
+ description: >-
+ The list of test cases is generated by
+ Tempest automatically and depends on the parameters of
+ the OpenStack deployment.
+ https://github.com/openstack/tempest/blob/18.0.0/tox.ini#L84
+ run:
+ name: tempest_common
+ args:
+ mode: "(?!.*test_volume_swap)\
+ (?!.*test_server_personality)\
+ (?!.*test_container_sync.ContainerSyncTest.test_container_synchronization)\
+ (?!.*test_container_sync_middleware.ContainerSyncMiddlewareTest.test_container_synchronization)\
+ (?!.*test_encrypted_cinder_volumes)\
+ (?!.*test_minbw_allocation_placement)\
+ (?!.*test_network_basic_ops.TestNetworkBasicOps.test_router_rescheduling)\
+ (?!.*test_stamp_pattern.TestStampPattern.test_stamp_pattern)\
+ (?!.*test_volume_migrate_attached)\
+ (?=.*\\[.*\\bslow\\b.*\\])(^tempest.)"
+ option:
+ - '--concurrency=1'
-
name: benchmarking
- order: 2
description: >-
Run several OpenStack performance tools
https://docs.openstack.org/performance-docs/latest/methodologies/tools.html
@@ -369,6 +705,8 @@ tiers:
description: >-
This test case runs a group of Rally jobs used in
OpenStack gating
+ dependencies:
+ - NO_TENANT_NETWORK: '^(?![tT]rue$)'
run:
name: rally_jobs
args:
@@ -385,7 +723,7 @@ tiers:
perform ping connectivity, round trip time measurement
(latency) and TCP/UDP throughput
dependencies:
- - POD_ARCH: '^(?!aarch64$)'
+ - NO_TENANT_NETWORK: '^(?![tT]rue$)'
run:
name: vmtp
@@ -399,87 +737,55 @@ tiers:
like iperf, iperf3 and netperf (with help of flent). Shaker
is able to deploy OpenStack instances and networks in
different topologies.
+ dependencies:
+ - NO_TENANT_NETWORK: '^(?![tT]rue$)'
run:
name: shaker
-
- name: features
- order: 3
+ name: benchmarking_cntt
description: >-
- Test suites from feature projects
- integrated in functest
+ Run several OpenStack performance tools
+ https://docs.openstack.org/performance-docs/latest/methodologies/tools.html
testcases:
-
- case_name: doctor-notification
- project_name: doctor
+ case_name: rally_full_cntt
+ project_name: functest
criteria: 100
blocking: false
description: >-
- Test suite from Doctor project.
- dependencies:
- - INSTALLER_TYPE: '(apex)|(fuel)|(daisy)'
- - DEPLOY_SCENARIO: '^((?!fdio|nofeature).)*$'
+ This test case runs the full suite of scenarios of the
+ OpenStack Rally suite using several threads and iterations.
run:
- name: bashfeature
+ name: rally_full
args:
- cmd: 'doctor-test'
+ tests:
+ - 'authenticate'
+ - 'glance'
+ - 'cinder'
+ - 'heat'
+ - 'keystone'
+ - 'neutron'
+ - 'nova'
+ - 'quotas'
+ - 'swift'
-
- case_name: functest-odl-sfc
- project_name: sfc
- criteria: 100
- blocking: false
- description: >-
- Test suite for odl-sfc to test two chains with one SF and
- one chain with two SFs
- dependencies:
- - DEPLOY_SCENARIO: 'odl.*sfc'
- run:
- name: functest-odl-sfc
-
- -
- case_name: barometercollectd
- project_name: barometer
- enabled: false
- criteria: 100
- blocking: false
- description: >-
- Test suite for the Barometer project. Separate tests verify
- the proper configuration and basic functionality of all the
- collectd plugins as described in the Project Release Plan
- dependencies:
- - DEPLOY_SCENARIO: 'bar'
- run:
- name: barometercollectd
-
- -
- case_name: vgpu
+ case_name: rally_jobs_cntt
project_name: functest
criteria: 100
blocking: false
description: >-
- Test suite for the OpenStack vGPU feature
- dependencies:
- - DEPLOY_SCENARIO: 'vgpu'
- run:
- name: vgpu
-
- -
- case_name: stor4nfv_os
- project_name: stor4nfv
- criteria: 100
- blocking: false
- description: >-
- This tests are necessary demonstrate conformance
- of the OpenStack+Stor4NFV deployment.
- dependencies:
- - DEPLOY_SCENARIO: 'stor4nfv'
+ This test case runs a group of Rally jobs used in
+ OpenStack gating
run:
- name: stor4nfv_os
+ name: rally_jobs
+ args:
+ tests:
+ - 'neutron'
-
name: vnf
- order: 4
description: >-
Collection of VNF test cases.
testcases:
@@ -502,7 +808,7 @@ tiers:
Clearwater using the Cloudify orchestrator. It also runs
some signaling traffic.
dependencies:
- - DEPLOY_SCENARIO: 'os-.*-nofeature-.*ha'
+ - NO_TENANT_NETWORK: '^(?![tT]rue$)'
run:
name: cloudify_ims
@@ -515,6 +821,8 @@ tiers:
This test case deploys an OpenSource vIMS solution from
Clearwater using the OpenStack Heat orchestrator.
It also runs some signaling traffic.
+ dependencies:
+ - NO_TENANT_NETWORK: '^(?![tT]rue$)'
run:
name: heat_ims
@@ -525,6 +833,8 @@ tiers:
blocking: false
description: >-
This test case is vRouter testing.
+ dependencies:
+ - NO_TENANT_NETWORK: '^(?![tT]rue$)'
run:
name: vyos_vrouter
@@ -536,5 +846,7 @@ tiers:
description: >-
vEPC validation with Juju as VNF manager and ABoT as test
executor.
+ dependencies:
+ - NO_TENANT_NETWORK: '^(?![tT]rue$)'
run:
name: juju_epc
diff --git a/functest/core/cloudify.py b/functest/core/cloudify.py
index 21bfc937e..9e80f4b60 100644
--- a/functest/core/cloudify.py
+++ b/functest/core/cloudify.py
@@ -74,10 +74,11 @@ class Cloudify(singlevm.SingleVm2):
"--cap-add SYS_ADMIN --network=host {}".format(
os.path.basename(self.cloudify_archive),
self.cloudify_container))
- self.__logger.debug("output:\n%s", stdout.read())
- self.__logger.debug("error:\n%s", stderr.read())
+ self.__logger.debug("output:\n%s", stdout.read().decode("utf-8"))
+ self.__logger.debug("error:\n%s", stderr.read().decode("utf-8"))
self.cfy_client = CloudifyClient(
- host=self.fip.floating_ip_address,
+ host=self.fip.floating_ip_address if self.fip else (
+ self.sshvm.public_v4),
username='admin', password='admin', tenant='default_tenant')
self.__logger.info("Attemps running status of the Manager")
secret_key = "foo"
@@ -124,8 +125,8 @@ class Cloudify(singlevm.SingleVm2):
"cfy_manager_local:/etc/cloudify/ && "
"sudo docker exec cfy_manager_local "
"chmod 444 /etc/cloudify/cloudify_ims.pem")
- self.__logger.debug("output:\n%s", stdout.read())
- self.__logger.debug("error:\n%s", stderr.read())
+ self.__logger.debug("output:\n%s", stdout.read().decode("utf-8"))
+ self.__logger.debug("error:\n%s", stderr.read().decode("utf-8"))
def upload_cfy_plugins(self, yaml, wgn):
"""Upload Cloudify plugins"""
@@ -133,8 +134,8 @@ class Cloudify(singlevm.SingleVm2):
"sudo docker exec cfy_manager_local "
"cfy plugins upload -y {} {} && "
"sudo docker exec cfy_manager_local cfy status".format(yaml, wgn))
- self.__logger.debug("output:\n%s", stdout.read())
- self.__logger.debug("error:\n%s", stderr.read())
+ self.__logger.debug("output:\n%s", stdout.read().decode("utf-8"))
+ self.__logger.debug("error:\n%s", stderr.read().decode("utf-8"))
def kill_existing_execution(self, dep_name):
"""kill existing execution"""
diff --git a/functest/core/singlevm.py b/functest/core/singlevm.py
index 1da30de34..6d92e6eef 100644
--- a/functest/core/singlevm.py
+++ b/functest/core/singlevm.py
@@ -212,12 +212,14 @@ class VmReady1(tenantnetwork.TenantNetwork1):
vm1 = self.cloud.create_server(
name if name else '{}-vm_{}'.format(self.case_name, self.guid),
image=self.image.id, flavor=self.flavor.id,
- auto_ip=False, network=self.network.id,
+ auto_ip=False,
+ network=self.network.id if self.network else env.get(
+ "EXTERNAL_NETWORK"),
timeout=self.create_server_timeout, wait=True, **kwargs)
self.__logger.debug("vm: %s", vm1)
return vm1
- def check_regex_in_console(self, name, regex=' login: ', loop=1):
+ def check_regex_in_console(self, name, regex=' login: ', loop=6):
"""Wait for specific message in console
Returns: True or False on errors
@@ -257,6 +259,23 @@ class VmReady1(tenantnetwork.TenantNetwork1):
self.__logger.debug(
"Orphan security group %s in use", sec_group.id)
+ def count_hypervisors(self):
+ """Count hypervisors."""
+ if env.get('SKIP_DOWN_HYPERVISORS').lower() == 'false':
+ return len(self.orig_cloud.list_hypervisors())
+ return self.count_active_hypervisors()
+
+ def count_active_hypervisors(self):
+ """Count all hypervisors which are up."""
+ compute_cnt = 0
+ for hypervisor in self.orig_cloud.list_hypervisors():
+ if hypervisor['state'] == 'up':
+ compute_cnt += 1
+ else:
+ self.__logger.warning(
+ "%s is down", hypervisor['hypervisor_hostname'])
+ return compute_cnt
+
def run(self, **kwargs):
"""Boot the new VM
@@ -351,6 +370,8 @@ class SingleVm1(VmReady1):
ssh_connect_timeout = 1
ssh_connect_loops = 6
create_floating_ip_timeout = 120
+ check_console_loop = 6
+ check_console_regex = ' login: '
def __init__(self, **kwargs):
if "case_name" not in kwargs:
@@ -398,10 +419,12 @@ class SingleVm1(VmReady1):
- None on error
"""
assert vm1
- fip = self.cloud.create_floating_ip(
- network=self.ext_net.id, server=vm1, wait=True,
- timeout=self.create_floating_ip_timeout)
- self.__logger.debug("floating_ip: %s", fip)
+ fip = None
+ if env.get('NO_TENANT_NETWORK').lower() != 'true':
+ fip = self.cloud.create_floating_ip(
+ network=self.ext_net.id, server=vm1, wait=True,
+ timeout=self.create_floating_ip_timeout)
+ self.__logger.debug("floating_ip: %s", fip)
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.client.AutoAddPolicy())
for loop in range(self.ssh_connect_loops):
@@ -409,7 +432,7 @@ class SingleVm1(VmReady1):
p_console = self.cloud.get_server_console(vm1)
self.__logger.debug("vm console: \n%s", p_console)
ssh.connect(
- fip.floating_ip_address,
+ fip.floating_ip_address if fip else vm1.public_v4,
username=getattr(
config.CONF,
'{}_image_user'.format(self.case_name), self.username),
@@ -422,7 +445,7 @@ class SingleVm1(VmReady1):
except Exception as exc: # pylint: disable=broad-except
self.__logger.debug(
"try %s: cannot connect to %s: %s", loop + 1,
- fip.floating_ip_address, exc)
+ fip.floating_ip_address if fip else vm1.public_v4, exc)
time.sleep(9)
else:
self.__logger.error(
@@ -438,8 +461,8 @@ class SingleVm1(VmReady1):
Returns: echo exit codes
"""
(_, stdout, stderr) = self.ssh.exec_command('echo Hello World')
- self.__logger.debug("output:\n%s", stdout.read())
- self.__logger.debug("error:\n%s", stderr.read())
+ self.__logger.debug("output:\n%s", stdout.read().decode("utf-8"))
+ self.__logger.debug("error:\n%s", stderr.read().decode("utf-8"))
return stdout.channel.recv_exit_status()
def run(self, **kwargs):
@@ -464,10 +487,13 @@ class SingleVm1(VmReady1):
self.prepare()
self.sshvm = self.boot_vm(
key_name=self.keypair.id, security_groups=[self.sec.id])
- (self.fip, self.ssh) = self.connect(self.sshvm)
- if not self.execute():
- self.result = 100
- status = testcase.TestCase.EX_OK
+ if self.check_regex_in_console(
+ self.sshvm.name, regex=self.check_console_regex,
+ loop=self.check_console_loop):
+ (self.fip, self.ssh) = self.connect(self.sshvm)
+ if not self.execute():
+ self.result = 100
+ status = testcase.TestCase.EX_OK
except Exception: # pylint: disable=broad-except
self.__logger.exception('Cannot run %s', self.case_name)
finally:
diff --git a/functest/core/tenantnetwork.py b/functest/core/tenantnetwork.py
index ae739ac36..e340cea93 100644
--- a/functest/core/tenantnetwork.py
+++ b/functest/core/tenantnetwork.py
@@ -31,6 +31,7 @@ from xtesting.core import testcase
from functest.utils import config
from functest.utils import env
+from functest.utils import functest_utils
class NewProject(object):
@@ -56,7 +57,9 @@ class NewProject(object):
assert self.orig_cloud
assert self.case_name
self.password = ''.join(random.choice(
- string.ascii_letters + string.digits) for _ in range(30))
+ string.ascii_letters + string.digits +
+ '!()*+,-.<=>?@[]^_{|}~') for _ in range(30))
+ self.__logger.debug("password: %s", self.password)
self.domain = self.orig_cloud.get_domain(
name_or_id=self.orig_cloud.auth.get(
"project_domain_name", "Default"))
@@ -146,27 +149,28 @@ class TenantNetwork1(testcase.TestCase):
__logger = logging.getLogger(__name__)
cidr = '192.168.120.0/24'
shared_network = False
- allow_no_fip = False
def __init__(self, **kwargs):
if "case_name" not in kwargs:
kwargs["case_name"] = 'tenantnetwork1'
super(TenantNetwork1, self).__init__(**kwargs)
- self.res_dir = os.path.join(
- getattr(config.CONF, 'dir_results'), self.case_name)
+ self.dir_results = os.path.join(getattr(config.CONF, 'dir_results'))
+ self.res_dir = os.path.join(self.dir_results, self.case_name)
+ self.output_log_name = 'functest.log'
+ self.output_debug_log_name = 'functest.debug.log'
+ self.ext_net = None
try:
cloud_config = os_client_config.get_config()
self.cloud = self.orig_cloud = shade.OpenStackCloud(
cloud_config=cloud_config)
except Exception: # pylint: disable=broad-except
self.cloud = self.orig_cloud = None
- self.ext_net = None
self.__logger.exception("Cannot connect to Cloud")
- try:
- self.ext_net = self.get_external_network(self.cloud)
- except Exception: # pylint: disable=broad-except
- self.ext_net = None
- self.__logger.exception("Cannot get the external network")
+ if env.get('NO_TENANT_NETWORK').lower() != 'true':
+ try:
+ self.ext_net = self.get_external_network(self.cloud)
+ except Exception: # pylint: disable=broad-except
+ self.__logger.exception("Cannot get the external network")
self.guid = str(uuid.uuid4())
self.network = None
self.subnet = None
@@ -203,7 +207,7 @@ class TenantNetwork1(testcase.TestCase):
@staticmethod
def get_public_auth_url(cloud):
"""Get Keystone public endpoint"""
- keystone_id = cloud.search_services('keystone')[0].id
+ keystone_id = functest_utils.search_services(cloud, 'keystone')[0].id
endpoint = cloud.search_endpoints(
filters={'interface': 'public',
'service_id': keystone_id})[0].url
@@ -218,7 +222,7 @@ class TenantNetwork1(testcase.TestCase):
Raises: expection on error
"""
assert self.cloud
- if not self.allow_no_fip:
+ if env.get('NO_TENANT_NETWORK').lower() != 'true':
assert self.ext_net
provider = {}
if hasattr(config.CONF, '{}_network_type'.format(self.case_name)):
@@ -263,7 +267,8 @@ class TenantNetwork1(testcase.TestCase):
try:
assert self.cloud
self.start_time = time.time()
- self.create_network_resources()
+ if env.get('NO_TENANT_NETWORK').lower() != 'true':
+ self.create_network_resources()
self.result = 100
status = testcase.TestCase.EX_OK
except Exception: # pylint: disable=broad-except
diff --git a/functest/opnfv_tests/openstack/api/connection_check.py b/functest/opnfv_tests/openstack/api/connection_check.py
index 7a320b5e4..f3b35e9d9 100644
--- a/functest/opnfv_tests/openstack/api/connection_check.py
+++ b/functest/opnfv_tests/openstack/api/connection_check.py
@@ -17,6 +17,7 @@ import shade
from xtesting.core import testcase
from functest.utils import env
+from functest.utils import functest_utils
class ConnectionCheck(testcase.TestCase):
@@ -28,12 +29,14 @@ class ConnectionCheck(testcase.TestCase):
"list_endpoints", "list_floating_ip_pools", "list_floating_ips",
"list_hypervisors", "list_keypairs", "list_networks", "list_ports",
"list_role_assignments", "list_roles", "list_routers", "list_servers",
- "list_services", "list_subnets"]
+ "list_subnets"]
def __init__(self, **kwargs):
if "case_name" not in kwargs:
kwargs["case_name"] = 'connection_check'
super(ConnectionCheck, self).__init__(**kwargs)
+ self.output_log_name = 'functest.log'
+ self.output_debug_log_name = 'functest.debug.log'
try:
cloud_config = os_client_config.get_config()
self.cloud = shade.OpenStackCloud(cloud_config=cloud_config)
@@ -47,11 +50,12 @@ class ConnectionCheck(testcase.TestCase):
try:
assert self.cloud
self.start_time = time.time()
- if env.get('PUBLIC_ENDPOINT_ONLY').lower() == 'true':
- self.__logger.warning(
- "Listing services is skipped "
- "because the admin endpoints are unreachable")
- self.func_list.remove("list_services")
+ self.__logger.debug(
+ "list_services: %s", functest_utils.list_services(self.cloud))
+ if env.get('NO_TENANT_NETWORK').lower() == 'true':
+ self.func_list.remove("list_floating_ip_pools")
+ self.func_list.remove("list_floating_ips")
+ self.func_list.remove("list_routers")
for func in self.func_list:
self.__logger.debug(
"%s: %s", func, getattr(self.cloud, func)())
@@ -59,6 +63,7 @@ class ConnectionCheck(testcase.TestCase):
self.__logger.debug(
"list_service_providers: %s",
self.cloud._get_and_munchify('service_providers', data))
+ functest_utils.get_openstack_version(self.cloud)
self.result = 100
status = testcase.TestCase.EX_OK
except Exception: # pylint: disable=broad-except
diff --git a/functest/opnfv_tests/openstack/cinder/cinder_test.py b/functest/opnfv_tests/openstack/cinder/cinder_test.py
index bbed9a64f..d81bb100a 100644
--- a/functest/opnfv_tests/openstack/cinder/cinder_test.py
+++ b/functest/opnfv_tests/openstack/cinder/cinder_test.py
@@ -77,8 +77,10 @@ class CinderCheck(singlevm.SingleVm2):
self.logger.debug("ssh: %s", self.ssh)
(_, stdout, stderr) = self.ssh.exec_command(
"sh ~/write_data.sh {}".format(env.get('VOLUME_DEVICE_NAME')))
- self.logger.debug("volume_write stdout: %s", stdout.read())
- self.logger.debug("volume_write stderr: %s", stderr.read())
+ self.logger.debug(
+ "volume_write stdout: %s", stdout.read().decode("utf-8"))
+ self.logger.debug(
+ "volume_write stderr: %s", stderr.read().decode("utf-8"))
# Detach volume from VM 1
self.logger.info("Detach volume from VM 1")
self.cloud.detach_volume(
@@ -103,8 +105,10 @@ class CinderCheck(singlevm.SingleVm2):
self.logger.debug("ssh: %s", self.ssh2)
(_, stdout, stderr) = self.ssh2.exec_command(
"sh ~/read_data.sh {}".format(env.get('VOLUME_DEVICE_NAME')))
- self.logger.debug("read volume stdout: %s", stdout.read())
- self.logger.debug("read volume stderr: %s", stderr.read())
+ self.logger.debug(
+ "read volume stdout: %s", stdout.read().decode("utf-8"))
+ self.logger.debug(
+ "read volume stderr: %s", stderr.read().decode("utf-8"))
self.logger.info("Detach volume from VM 2")
self.cloud.detach_volume(
self.vm2, self.volume, timeout=self.volume_timeout)
diff --git a/functest/opnfv_tests/openstack/rally/blacklist.yaml b/functest/opnfv_tests/openstack/rally/blacklist.yaml
index 382e239f8..18727f73c 100644
--- a/functest/opnfv_tests/openstack/rally/blacklist.yaml
+++ b/functest/opnfv_tests/openstack/rally/blacklist.yaml
@@ -1,13 +1,5 @@
---
scenario:
- -
- scenarios:
- - '^os-' # all scenarios
- tests:
- # Following test occasionally fails due to race condition issue on
- # quota manipulation in nova.
- # Ref: https://bugs.launchpad.net/nova/+bug/1552622
- - 'Quotas.nova_update_and_delete'
functionality:
-
@@ -34,7 +26,14 @@ functionality:
tests:
- HeatStacks.create_and_delete_stack
- NovaServers.boot_and_associate_floating_ip
+ - NovaServers.boot_server_and_list_interfaces
- NovaServers.boot_server_associate_and_dissociate_floating_ip
- NeutronNetworks.create_and_delete_floating_ips
- NeutronNetworks.create_and_list_floating_ips
- NeutronNetworks.associate_and_dissociate_floating_ips
+ - NeutronNetworks.create_and_delete_routers
+ - NeutronNetworks.create_and_list_routers
+ - NeutronNetworks.create_and_show_routers
+ - NeutronNetworks.create_and_update_routers
+ - NeutronNetworks.set_and_clear_router_gateway
+ - Quotas.neutron_update
diff --git a/functest/opnfv_tests/openstack/rally/rally.py b/functest/opnfv_tests/openstack/rally/rally.py
index b450580c7..592809b76 100644
--- a/functest/opnfv_tests/openstack/rally/rally.py
+++ b/functest/opnfv_tests/openstack/rally/rally.py
@@ -22,10 +22,10 @@ import shutil
import subprocess
import time
-from threading import Timer
import pkg_resources
import prettytable
from ruamel.yaml import YAML
+import six
from six.moves import configparser
from xtesting.core import testcase
import yaml
@@ -33,6 +33,7 @@ import yaml
from functest.core import singlevm
from functest.utils import config
from functest.utils import env
+from functest.utils import functest_utils
LOGGER = logging.getLogger(__name__)
@@ -42,7 +43,7 @@ class RallyBase(singlevm.VmReady2):
# pylint: disable=too-many-instance-attributes, too-many-public-methods
stests = ['authenticate', 'glance', 'cinder', 'gnocchi', 'heat',
- 'keystone', 'neutron', 'nova', 'quotas']
+ 'keystone', 'neutron', 'nova', 'quotas', 'swift']
rally_conf_path = "/etc/rally/rally.conf"
rally_aar4_patch_path = pkg_resources.resource_filename(
@@ -67,7 +68,6 @@ class RallyBase(singlevm.VmReady2):
visibility = 'public'
shared_network = True
- allow_no_fip = True
task_timeout = 3600
def __init__(self, **kwargs):
@@ -94,14 +94,12 @@ class RallyBase(singlevm.VmReady2):
self.smoke = None
self.start_time = None
self.result = None
- self.details = None
self.compute_cnt = 0
self.flavor_alt = None
self.tests = []
self.run_cmd = ''
self.network_extensions = []
self.services = []
- self.task_aborted = False
def _build_task_args(self, test_file_name):
"""Build arguments for the Rally task."""
@@ -131,7 +129,14 @@ class RallyBase(singlevm.VmReady2):
if self.network:
task_args['netid'] = str(self.network.id)
else:
- task_args['netid'] = ''
+ LOGGER.warning(
+ 'No tenant network created. '
+ 'Trying EXTERNAL_NETWORK as a fallback')
+ if env.get("EXTERNAL_NETWORK"):
+ network = self.cloud.get_network(env.get("EXTERNAL_NETWORK"))
+ task_args['netid'] = str(network.id) if network else ''
+ else:
+ task_args['netid'] = ''
return task_args
@@ -234,20 +239,17 @@ class RallyBase(singlevm.VmReady2):
rconfig.write(config_file)
@staticmethod
- def get_task_id(cmd_raw):
+ def get_task_id(tag):
"""
Get task id from command rally result.
- :param cmd_raw:
+ :param tag:
:return: task_id as string
"""
- taskid_re = re.compile('^Task +(.*): started$')
- for line in cmd_raw.splitlines(True):
- line = line.strip()
- match = taskid_re.match(line.decode("utf-8"))
- if match:
- return match.group(1)
- return None
+ cmd = ["rally", "task", "list", "--tag", tag, "--uuids-only"]
+ output = subprocess.check_output(cmd).decode("utf-8").rstrip()
+ LOGGER.info("%s: %s", " ".join(cmd), output)
+ return output
@staticmethod
def task_succeed(json_raw):
@@ -425,30 +427,22 @@ class RallyBase(singlevm.VmReady2):
else:
LOGGER.info('Test scenario: "%s" Failed.', test_name)
- def kill_task(self, proc):
- """ Kill a task."""
- proc.kill()
- self.task_aborted = True
-
def run_task(self, test_name):
"""Run a task."""
LOGGER.info('Starting test scenario "%s" ...', test_name)
LOGGER.debug('running command: %s', self.run_cmd)
- proc = subprocess.Popen(self.run_cmd, stdout=subprocess.PIPE,
- stderr=subprocess.STDOUT)
- self.task_aborted = False
- timer = Timer(self.task_timeout, self.kill_task, [proc])
- timer.start()
- output = proc.communicate()[0]
- if self.task_aborted:
- LOGGER.error("Failed to complete task")
- raise Exception("Failed to complete task")
- timer.cancel()
- task_id = self.get_task_id(output)
+ if six.PY3:
+ # pylint: disable=no-member
+ subprocess.call(
+ self.run_cmd, timeout=self.task_timeout,
+ stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
+ else:
+ with open(os.devnull, 'wb') as devnull:
+ subprocess.call(self.run_cmd, stdout=devnull, stderr=devnull)
+ task_id = self.get_task_id(test_name)
LOGGER.debug('task_id : %s', task_id)
- if task_id is None:
+ if not task_id:
LOGGER.error("Failed to retrieve task_id")
- LOGGER.error("Result:\n%s", output.decode("utf-8"))
raise Exception("Failed to retrieve task id")
self._save_results(test_name, task_id)
@@ -524,11 +518,11 @@ class RallyBase(singlevm.VmReady2):
shutil.copytree(task_macro, macro_dir)
self.update_keystone_default_role()
- self.compute_cnt = len(self.cloud.list_hypervisors())
+ self.compute_cnt = self.count_hypervisors()
self.network_extensions = self.cloud.get_network_extensions()
self.flavor_alt = self.create_flavor_alt()
self.services = [service.name for service in
- self.cloud.list_services()]
+ functest_utils.list_services(self.cloud)]
LOGGER.debug("flavor: %s", self.flavor_alt)
@@ -538,7 +532,8 @@ class RallyBase(singlevm.VmReady2):
if self.file_is_empty(file_name):
LOGGER.info('No tests for scenario "%s"', test_name)
return False
- self.run_cmd = (["rally", "task", "start", "--abort-on-sla-failure",
+ self.run_cmd = (["rally", "task", "start", "--tag", test_name,
+ "--abort-on-sla-failure",
"--task", self.task_file, "--task-args",
str(self._build_task_args(test_name))])
return True
@@ -605,10 +600,10 @@ class RallyBase(singlevm.VmReady2):
LOGGER.info("Rally '%s' success_rate is %s%% in %s/%s modules",
self.case_name, success_rate, nb_modules,
len(self.summary))
- payload.append({'summary': {'duration': total_duration,
- 'nb tests': total_nb_tests,
- 'nb success': success_rate}})
- self.details = payload
+ self.details['summary'] = {'duration': total_duration,
+ 'nb tests': total_nb_tests,
+ 'nb success': success_rate}
+ self.details["modules"] = payload
@staticmethod
def export_task(file_name, export_type="html"):
@@ -810,26 +805,10 @@ class RallyJobs(RallyBase):
with open(result_file_name, 'w') as fname:
template.dump(cases, fname)
- @staticmethod
- def _remove_plugins_extra():
- inst_dir = getattr(config.CONF, 'dir_rally_inst')
- try:
- shutil.rmtree(os.path.join(inst_dir, 'plugins'))
- shutil.rmtree(os.path.join(inst_dir, 'extra'))
- except Exception: # pylint: disable=broad-except
- pass
-
def prepare_task(self, test_name):
"""Prepare resources for test run."""
- self._remove_plugins_extra()
jobs_dir = os.path.join(
getattr(config.CONF, 'dir_rally_data'), test_name, 'rally-jobs')
- inst_dir = getattr(config.CONF, 'dir_rally_inst')
- shutil.copytree(os.path.join(jobs_dir, 'plugins'),
- os.path.join(inst_dir, 'plugins'))
- shutil.copytree(os.path.join(jobs_dir, 'extra'),
- os.path.join(inst_dir, 'extra'))
-
task_name = self.task_yaml.get(test_name).get("task")
task = os.path.join(jobs_dir, task_name)
if not os.path.exists(task):
@@ -840,9 +819,6 @@ class RallyJobs(RallyBase):
os.makedirs(self.temp_dir)
task_file_name = os.path.join(self.temp_dir, task_name)
self.apply_blacklist(task, task_file_name)
- self.run_cmd = (["rally", "task", "start", "--task", task_file_name])
+ self.run_cmd = (["rally", "task", "start", "--tag", test_name,
+ "--task", task_file_name])
return True
-
- def clean(self):
- self._remove_plugins_extra()
- super(RallyJobs, self).clean()
diff --git a/functest/opnfv_tests/openstack/rally/scenario/full/opnfv-glance.yaml b/functest/opnfv_tests/openstack/rally/scenario/full/opnfv-glance.yaml
index dfc1fc156..993b83ff7 100644
--- a/functest/opnfv_tests/openstack/rally/scenario/full/opnfv-glance.yaml
+++ b/functest/opnfv_tests/openstack/rally/scenario/full/opnfv-glance.yaml
@@ -36,8 +36,9 @@
flavor:
name: {{ flavor_name }}
number_instances: 2
- nics:
- - net-id: {{ netid }}
+ boot_server_kwargs:
+ nics:
+ - net-id: {{ netid }}
context:
{{ user_context(tenants_amount, users_amount, use_existing_users) }}
quotas:
diff --git a/functest/opnfv_tests/openstack/rally/scenario/full/opnfv-neutron.yaml b/functest/opnfv_tests/openstack/rally/scenario/full/opnfv-neutron.yaml
index b8b1b9b6b..b2248d499 100644
--- a/functest/opnfv_tests/openstack/rally/scenario/full/opnfv-neutron.yaml
+++ b/functest/opnfv_tests/openstack/rally/scenario/full/opnfv-neutron.yaml
@@ -150,8 +150,6 @@
subnets_per_network: 1
context:
{% call user_context(tenants_amount, users_amount, use_existing_users) %}
- network:
- router: {}
quotas:
neutron:
network: -1
diff --git a/functest/opnfv_tests/openstack/rally/scenario/full/opnfv-nova.yaml b/functest/opnfv_tests/openstack/rally/scenario/full/opnfv-nova.yaml
index 8ef5b6cdc..187f2cfd2 100644
--- a/functest/opnfv_tests/openstack/rally/scenario/full/opnfv-nova.yaml
+++ b/functest/opnfv_tests/openstack/rally/scenario/full/opnfv-nova.yaml
@@ -39,9 +39,6 @@
- net-id: {{ netid }}
context:
{% call user_context(tenants_amount, users_amount, use_existing_users) %}
- network:
- networks_per_tenant: 1
- start_cidr: "100.1.0.0/25"
quotas:
{{ unlimited_neutron() }}
{{ unlimited_nova() }}
@@ -59,9 +56,6 @@
- net-id: {{ netid }}
context:
{% call user_context(tenants_amount, users_amount, use_existing_users) %}
- network:
- networks_per_tenant: 1
- start_cidr: "100.1.0.0/25"
quotas:
{{ unlimited_neutron() }}
{{ unlimited_nova() }}
@@ -80,9 +74,6 @@
- net-id: {{ netid }}
context:
{% call user_context(tenants_amount, users_amount, use_existing_users) %}
- network:
- networks_per_tenant: 1
- start_cidr: "100.1.0.0/25"
quotas:
{{ unlimited_neutron() }}
{{ unlimited_nova() }}
@@ -104,9 +95,6 @@
- net-id: {{ netid }}
context:
{% call user_context(tenants_amount, users_amount, use_existing_users) %}
- network:
- networks_per_tenant: 1
- start_cidr: "100.1.0.0/25"
quotas:
{{ unlimited_neutron() }}
{{ unlimited_nova() }}
@@ -124,9 +112,6 @@
- net-id: {{ netid }}
context:
{% call user_context(tenants_amount, users_amount, use_existing_users) %}
- network:
- networks_per_tenant: 1
- start_cidr: "100.1.0.0/25"
quotas:
{{ unlimited_neutron() }}
{{ unlimited_nova() }}
@@ -216,7 +201,7 @@
-
args:
{{ vm_params(image_name, flavor_name) }}
- size: 10
+ size: 1
block_migration: {{ block_migration }}
boot_server_kwargs:
nics:
@@ -251,14 +236,11 @@
-
args:
{{ vm_params(image_name, flavor_name) }}
- server_kwargs:
+ boot_server_kwargs:
nics:
- net-id: {{ netid }}
context:
{% call user_context(tenants_amount, users_amount, use_existing_users) %}
- network:
- networks_per_tenant: 1
- start_cidr: "100.1.0.0/25"
quotas:
{{ unlimited_neutron() }}
{{ unlimited_nova(keypairs=true) }}
@@ -277,9 +259,6 @@
- net-id: {{ netid }}
context:
{% call user_context(tenants_amount, users_amount, use_existing_users) %}
- network:
- networks_per_tenant: 1
- start_cidr: "100.1.0.0/25"
quotas:
{{ unlimited_volumes() }}
{{ unlimited_neutron() }}
@@ -301,9 +280,6 @@
- net-id: {{ netid }}
context:
{% call user_context(tenants_amount, users_amount, use_existing_users) %}
- network:
- networks_per_tenant: 1
- start_cidr: "100.1.0.0/25"
quotas:
{{ unlimited_neutron() }}
{{ unlimited_nova() }}
diff --git a/functest/opnfv_tests/openstack/rally/scenario/opnfv-quotas.yaml b/functest/opnfv_tests/openstack/rally/scenario/opnfv-quotas.yaml
index 3f0cf0840..dcb007c50 100644
--- a/functest/opnfv_tests/openstack/rally/scenario/opnfv-quotas.yaml
+++ b/functest/opnfv_tests/openstack/rally/scenario/opnfv-quotas.yaml
@@ -35,17 +35,6 @@
sla:
{{ no_failures_sla() }}
- Quotas.nova_update_and_delete:
- -
- args:
- max_quota: 1024
- context:
- {{ user_context(tenants_amount, users_amount, use_existing_users) }}
- runner:
- {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
- sla:
- {{ no_failures_sla() }}
-
Quotas.nova_update:
-
args:
diff --git a/functest/opnfv_tests/openstack/rally/scenario/opnfv-swift.yaml b/functest/opnfv_tests/openstack/rally/scenario/opnfv-swift.yaml
new file mode 100644
index 000000000..66d7cd24d
--- /dev/null
+++ b/functest/opnfv_tests/openstack/rally/scenario/opnfv-swift.yaml
@@ -0,0 +1,71 @@
+ SwiftObjects.create_container_and_object_then_list_objects:
+ -
+ args:
+ objects_per_container: 2
+ object_size: 5120
+ runner:
+ {{ constant_runner(concurrency=1, times=iterations, is_smoke=smoke) }}
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ roles:
+ - "admin"
+ sla:
+ {{ no_failures_sla() }}
+
+ SwiftObjects.list_objects_in_containers:
+ -
+ runner:
+ {{ constant_runner(concurrency=1, times=iterations, is_smoke=smoke) }}
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ roles:
+ - "admin"
+ swift_objects:
+ containers_per_tenant: 1
+ objects_per_container: 10
+ object_size: 1024
+ sla:
+ {{ no_failures_sla() }}
+
+ SwiftObjects.create_container_and_object_then_download_object:
+ -
+ args:
+ objects_per_container: 5
+ object_size: 1024
+ runner:
+ {{ constant_runner(concurrency=1, times=iterations, is_smoke=smoke) }}
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ roles:
+ - "admin"
+ sla:
+ {{ no_failures_sla() }}
+
+ SwiftObjects.create_container_and_object_then_delete_all:
+ -
+ args:
+ objects_per_container: 5
+ object_size: 102400
+ runner:
+ {{ constant_runner(concurrency=1, times=iterations, is_smoke=smoke) }}
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ roles:
+ - "admin"
+ sla:
+ {{ no_failures_sla() }}
+
+ SwiftObjects.list_and_download_objects_in_containers:
+ -
+ runner:
+ {{ constant_runner(concurrency=1, times=iterations, is_smoke=smoke) }}
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ roles:
+ - "admin"
+ swift_objects:
+ containers_per_tenant: 1
+ objects_per_container: 5
+ object_size: 10240
+ sla:
+ {{ no_failures_sla() }}
diff --git a/functest/opnfv_tests/openstack/rally/scenario/sanity/opnfv-glance.yaml b/functest/opnfv_tests/openstack/rally/scenario/sanity/opnfv-glance.yaml
index 1b61762f9..279e81439 100644
--- a/functest/opnfv_tests/openstack/rally/scenario/sanity/opnfv-glance.yaml
+++ b/functest/opnfv_tests/openstack/rally/scenario/sanity/opnfv-glance.yaml
@@ -36,8 +36,9 @@
flavor:
name: {{ flavor_name }}
number_instances: 2
- nics:
- - net-id: {{ netid }}
+ boot_server_kwargs:
+ nics:
+ - net-id: {{ netid }}
context:
{{ user_context(tenants_amount, users_amount, use_existing_users) }}
quotas:
diff --git a/functest/opnfv_tests/openstack/rally/scenario/sanity/opnfv-nova.yaml b/functest/opnfv_tests/openstack/rally/scenario/sanity/opnfv-nova.yaml
index 935f3841a..cba5c921f 100644
--- a/functest/opnfv_tests/openstack/rally/scenario/sanity/opnfv-nova.yaml
+++ b/functest/opnfv_tests/openstack/rally/scenario/sanity/opnfv-nova.yaml
@@ -15,7 +15,7 @@
-
args:
{{ vm_params(image_name, flavor_name) }}
- size: 10
+ size: 1
block_migration: {{ block_migration }}
boot_server_kwargs:
nics:
@@ -50,14 +50,11 @@
-
args:
{{ vm_params(image_name, flavor_name) }}
- server_kwargs:
+ boot_server_kwargs:
nics:
- net-id: {{ netid }}
context:
{% call user_context(tenants_amount, users_amount, use_existing_users) %}
- network:
- networks_per_tenant: 1
- start_cidr: "100.1.0.0/25"
quotas:
{{ unlimited_neutron() }}
{{ unlimited_nova(keypairs=true) }}
@@ -76,9 +73,6 @@
- net-id: {{ netid }}
context:
{% call user_context(tenants_amount, users_amount, use_existing_users) %}
- network:
- networks_per_tenant: 1
- start_cidr: "100.1.0.0/25"
quotas:
{{ unlimited_volumes() }}
{{ unlimited_neutron() }}
@@ -100,9 +94,6 @@
- net-id: {{ netid }}
context:
{% call user_context(tenants_amount, users_amount, use_existing_users) %}
- network:
- networks_per_tenant: 1
- start_cidr: "100.1.0.0/25"
quotas:
{{ unlimited_neutron() }}
{{ unlimited_nova() }}
@@ -128,7 +119,8 @@
-
args:
{{ vm_params(image_name, flavor_name) }}
- auto_assign_nic: true
+ nics:
+ - net-id: {{ netid }}
context:
{% call user_context(tenants_amount, users_amount, use_existing_users) %}
network: {}
diff --git a/functest/opnfv_tests/openstack/rally/task.yaml b/functest/opnfv_tests/openstack/rally/task.yaml
index 181fdb42e..3e692891a 100644
--- a/functest/opnfv_tests/openstack/rally/task.yaml
+++ b/functest/opnfv_tests/openstack/rally/task.yaml
@@ -42,3 +42,7 @@
{% if "heat" in service_list %}
{%- include "var/opnfv-heat.yaml"-%}
{% endif %}
+
+{% if "swift" in service_list %}
+{%- include "var/opnfv-swift.yaml"-%}
+{% endif %}
diff --git a/functest/opnfv_tests/openstack/shaker/shaker.py b/functest/opnfv_tests/openstack/shaker/shaker.py
index fd31ea689..917c65980 100644
--- a/functest/opnfv_tests/openstack/shaker/shaker.py
+++ b/functest/opnfv_tests/openstack/shaker/shaker.py
@@ -19,6 +19,7 @@ and list of tests to execute.
import logging
import os
+import json
import scp
from functest.core import singlevm
@@ -31,23 +32,26 @@ class Shaker(singlevm.SingleVm2):
__logger = logging.getLogger(__name__)
- filename = '/home/opnfv/functest/images/shaker-image.qcow2'
+ filename = '/home/opnfv/functest/images/shaker-image-1.3.0+stretch.qcow2'
flavor_ram = 512
flavor_vcpus = 1
flavor_disk = 3
- username = 'ubuntu'
+ username = 'debian'
port = 9000
ssh_connect_loops = 12
create_server_timeout = 300
+ check_console_loop = 12
shaker_timeout = '3600'
quota_instances = -1
+ quota_cores = -1
+ check_console_loop = 12
def __init__(self, **kwargs):
super(Shaker, self).__init__(**kwargs)
self.role = None
def check_requirements(self):
- if len(self.orig_cloud.list_hypervisors()) < 2:
+ if self.count_hypervisors() < 2:
self.__logger.warning("Shaker requires at least 2 hypervisors")
self.is_skipped = True
self.project.clean()
@@ -85,7 +89,8 @@ class Shaker(singlevm.SingleVm2):
domain=self.project.domain.id)
self.orig_cloud.set_compute_quotas(
self.project.project.name,
- instances=self.quota_instances)
+ instances=self.quota_instances,
+ cores=self.quota_cores)
scpc = scp.SCPClient(self.ssh.get_transport())
scpc.put('/home/opnfv/functest/conf/env_file', remote_path='~/')
if os.environ.get('OS_CACERT'):
@@ -100,10 +105,10 @@ class Shaker(singlevm.SingleVm2):
'unset OS_TENANT_NAME && '
'unset OS_TENANT_ID && '
'unset OS_ENDPOINT_TYPE && '
- 'export OS_PASSWORD={} && '
+ 'export OS_PASSWORD="{}" && '
'{}'
'env && '
- 'timeout {} shaker --image-name {} --flavor-name {} '
+ 'timeout {} shaker --debug --image-name {} --flavor-name {} '
'--server-endpoint {}:9000 --external-net {} --dns-nameservers {} '
'--scenario openstack/full_l2,'
'openstack/full_l3_east_west,'
@@ -127,6 +132,13 @@ class Shaker(singlevm.SingleVm2):
except scp.SCPException:
self.__logger.exception("cannot get report files")
return 1
+ with open(os.path.join(self.res_dir, 'report.json')) as json_file:
+ data = json.load(json_file)
+ for value in data["records"].values():
+ if value["status"] != "ok":
+ self.__logger.error(
+ "%s failed\n%s", value["scenario"], value["stderr"])
+ return 1
return stdout.channel.recv_exit_status()
def clean(self):
diff --git a/functest/opnfv_tests/openstack/tempest/custom_tests/blacklist.yaml b/functest/opnfv_tests/openstack/tempest/custom_tests/blacklist.yaml
index 8d7d6eca9..e559f619a 100644
--- a/functest/opnfv_tests/openstack/tempest/custom_tests/blacklist.yaml
+++ b/functest/opnfv_tests/openstack/tempest/custom_tests/blacklist.yaml
@@ -4,10 +4,9 @@
- os-ovn-nofeature-ha
- os-ovn-nofeature-noha
tests:
- - neutron_tempest_plugin.api.admin.test_agent_management
- neutron_tempest_plugin.api.admin.test_dhcp_agent_scheduler
+ - neutron_tempest_plugin.api.test_trunk.TrunkTestInheritJSONBase.test_add_subport
- patrole_tempest_plugin.tests.api.network.test_agents_rbac
- patrole_tempest_plugin.tests.api.network.test_networks_rbac.NetworksRbacTest.test_create_network_provider_network_type
- patrole_tempest_plugin.tests.api.network.test_networks_rbac.NetworksRbacTest.test_create_network_provider_segmentation_id
- - tempest.api.network.admin.test_agent_management
- tempest.api.network.admin.test_dhcp_agent_scheduler
diff --git a/functest/opnfv_tests/openstack/tempest/custom_tests/public_blacklist.yaml b/functest/opnfv_tests/openstack/tempest/custom_tests/public_blacklist.yaml
new file mode 100644
index 000000000..e53b577b2
--- /dev/null
+++ b/functest/opnfv_tests/openstack/tempest/custom_tests/public_blacklist.yaml
@@ -0,0 +1,15 @@
+---
+-
+ scenarios:
+ - os-*
+ tests:
+ - neutron_tempest_plugin.api.admin.test_floating_ips_admin_actions.FloatingIPAdminTestJSON.test_associate_floating_ip_with_port_from_another_project
+ - neutron_tempest_plugin.api.admin.test_quotas.QuotasTest.test_detail_quotas
+ - neutron_tempest_plugin.api.admin.test_quotas.QuotasTest.test_quotas
+ - neutron_tempest_plugin.api.admin.test_quotas_negative.QuotasAdminNegativeTestJSON.test_create_floatingip_when_quotas_is_full
+ - neutron_tempest_plugin.api.admin.test_quotas_negative.QuotasAdminNegativeTestJSON.test_create_network_when_quotas_is_full
+ - neutron_tempest_plugin.api.admin.test_quotas_negative.QuotasAdminNegativeTestJSON.test_create_port_when_quotas_is_full
+ - neutron_tempest_plugin.api.admin.test_quotas_negative.QuotasAdminNegativeTestJSON.test_create_router_when_quotas_is_full
+ - neutron_tempest_plugin.api.admin.test_quotas_negative.QuotasAdminNegativeTestJSON.test_create_security_group_rule_when_quotas_is_full
+ - neutron_tempest_plugin.api.admin.test_quotas_negative.QuotasAdminNegativeTestJSON.test_create_security_group_when_quotas_is_full
+ - neutron_tempest_plugin.api.admin.test_quotas_negative.QuotasAdminNegativeTestJSON.test_create_subnet_when_quotas_is_full
diff --git a/functest/opnfv_tests/openstack/tempest/custom_tests/tempest_conf.yaml b/functest/opnfv_tests/openstack/tempest/custom_tests/tempest_conf.yaml
index ab0bfa98e..758547359 100644
--- a/functest/opnfv_tests/openstack/tempest/custom_tests/tempest_conf.yaml
+++ b/functest/opnfv_tests/openstack/tempest/custom_tests/tempest_conf.yaml
@@ -2,25 +2,93 @@
compute:
max_microversion: 2.65
compute-feature-enabled:
- shelve: false
- vnc_console: false
- cold_migration: false
- block_migration_for_live_migration: true
+ attach_encrypted_volume: false
+ block_migration_for_live_migration: false
+ block_migrate_cinder_iscsi: false
+ change_password: false
+ cold_migration: true
+ config_drive: true
+ console_output: true
+ disk_config: true
+ enable_instance_password: true
+ interface_attach: true
+ live_migration: true
+ live_migrate_back_and_forth: false
+ metadata_service: true
+ pause: true
+ personality: false
+ rdp_console: false
+ rescue: true
+ resize: true
+ scheduler_available_filters: "RetryFilter,AvailabilityZoneFilter,\
+ ComputeFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter,\
+ ServerGroupAntiAffinityFilter,ServerGroupAffinityFilter,SameHostFilter,\
+ DifferentHostFilter"
+ serial_console: false
+ shelve: true
+ snapshot: true
+ spice_console: false
+ suspend: true
+ swap_volume: false
+ vnc_console: true
+ volume_backed_live_migration: false
+ volume_multiattach: false
identity:
+ auth_version: v3
user_unique_last_password_count: 2
user_lockout_duration: 10
user_lockout_failure_attempts: 2
identity-feature-enabled:
+ trust: true
api_v2: false
api_v2_admin: false
security_compliance: true
+ federation: false
+ external_idp: false
+ project_tags: true
+ application_credentials: true
image-feature-enabled:
api_v2: true
api_v1: false
+network-feature-enabled:
+ port_admin_state_change: true
+ port_security: true
placement:
- max_microversion: 1.30
+ max_microversion: "1.30"
+validation:
+ image_ssh_user: cirros
+ ssh_timeout: 196
+ ip_version_for_ssh: 4
+ run_validation: true
volume:
max_microversion: 3.55
- storage_protocol: iSCSI
+ storage_protocol: ceph
+ manage_volume_ref: source-name,volume-%s
+ manage_snapshot_ref: source-name,snapshot-%s
volume-feature-enabled:
+ multi_backend: false
backup: true
+ snapshot: true
+ clone: true
+ manage_snapshot: true
+ manage_volume: true
+ extend_attached_volume: false
+ consistency_group: false
+ volume_revert: true
+neutron_plugin_options:
+ agent_availability_zone: nova
+ available_type_drivers: flat,geneve,vlan,gre,local,vxlan
+ provider_vlans: foo,
+object-storage-feature-enabled:
+ discoverable_apis: "account_quotas,formpost,bulk_upload,bulk_delete,\
+ tempurl,crossdomain,container_quotas,staticweb,account_quotas,slo"
+ object_versioning: true
+ discoverability: true
+heat_plugin:
+ skip_functional_test_list: EncryptionVolTypeTest
+ skip_scenario_test_list: "AodhAlarmTest,SoftwareConfigIntegrationTest,\
+ VolumeBackupRestoreIntegrationTest,CfnInitIntegrationTest,\
+ LoadBalancerTest"
+ auth_version: 3
+heat_features_enabled:
+ multi_cloud: false
diff --git a/functest/opnfv_tests/openstack/tempest/custom_tests/tempest_conf_ovn.yaml b/functest/opnfv_tests/openstack/tempest/custom_tests/tempest_conf_ovn.yaml
new file mode 100644
index 000000000..37aa2810b
--- /dev/null
+++ b/functest/opnfv_tests/openstack/tempest/custom_tests/tempest_conf_ovn.yaml
@@ -0,0 +1,94 @@
+---
+compute:
+ max_microversion: 2.65
+compute-feature-enabled:
+ attach_encrypted_volume: false
+ block_migration_for_live_migration: false
+ block_migrate_cinder_iscsi: false
+ change_password: false
+ cold_migration: true
+ config_drive: true
+ console_output: true
+ disk_config: true
+ enable_instance_password: true
+ interface_attach: true
+ live_migration: true
+ live_migrate_back_and_forth: false
+ metadata_service: true
+ pause: true
+ personality: false
+ rdp_console: false
+ rescue: true
+ resize: true
+ scheduler_available_filters: "RetryFilter,AvailabilityZoneFilter,\
+ ComputeFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter,\
+ ServerGroupAntiAffinityFilter,ServerGroupAffinityFilter,SameHostFilter,\
+ DifferentHostFilter"
+ serial_console: false
+ shelve: true
+ snapshot: true
+ spice_console: false
+ suspend: true
+ swap_volume: false
+ vnc_console: true
+ volume_backed_live_migration: false
+ volume_multiattach: false
+identity:
+ auth_version: v3
+ user_unique_last_password_count: 2
+ user_lockout_duration: 10
+ user_lockout_failure_attempts: 2
+identity-feature-enabled:
+ trust: true
+ api_v2: false
+ api_v2_admin: false
+ security_compliance: true
+ federation: false
+ external_idp: false
+ project_tags: true
+ application_credentials: true
+image-feature-enabled:
+ api_v2: true
+ api_v1: false
+network-feature-enabled:
+ port_admin_state_change: true
+ port_security: true
+placement:
+ max_microversion: "1.30"
+validation:
+ image_ssh_user: cirros
+ ssh_timeout: 196
+ ip_version_for_ssh: 4
+ run_validation: true
+volume:
+ max_microversion: 3.55
+ storage_protocol: ceph
+ manage_volume_ref: source-name,volume-%s
+ manage_snapshot_ref: source-name,snapshot-%s
+volume-feature-enabled:
+ multi_backend: false
+ backup: true
+ snapshot: true
+ clone: true
+ manage_snapshot: true
+ manage_volume: true
+ extend_attached_volume: false
+ consistency_group: false
+ volume_revert: true
+neutron_plugin_options:
+ agent_availability_zone: nova
+ available_type_drivers: flat,geneve,vlan,local
+ provider_vlans: public,
+object-storage-feature-enabled:
+ discoverable_apis: "account_quotas,formpost,bulk_upload,bulk_delete,\
+ tempurl,crossdomain,container_quotas,staticweb,account_quotas,slo"
+ object_versioning: true
+ discoverability: true
+heat_plugin:
+ skip_functional_test_list: EncryptionVolTypeTest
+ skip_scenario_test_list: "AodhAlarmTest,SoftwareConfigIntegrationTest,\
+ VolumeBackupRestoreIntegrationTest,CfnInitIntegrationTest,\
+ LoadBalancerTest"
+ auth_version: 3
+heat_features_enabled:
+ multi_cloud: false
diff --git a/functest/opnfv_tests/openstack/tempest/tempest.py b/functest/opnfv_tests/openstack/tempest/tempest.py
index faf2ab5ca..48c673673 100644
--- a/functest/opnfv_tests/openstack/tempest/tempest.py
+++ b/functest/opnfv_tests/openstack/tempest/tempest.py
@@ -21,6 +21,7 @@ import subprocess
import time
import pkg_resources
+import six
from six.moves import configparser
from xtesting.core import testcase
import yaml
@@ -50,6 +51,9 @@ class TempestCommon(singlevm.VmReady2):
tempest_blacklist = pkg_resources.resource_filename(
'functest',
'opnfv_tests/openstack/tempest/custom_tests/blacklist.yaml')
+ tempest_public_blacklist = pkg_resources.resource_filename(
+ 'functest',
+ 'opnfv_tests/openstack/tempest/custom_tests/public_blacklist.yaml')
def __init__(self, **kwargs):
if "case_name" not in kwargs:
@@ -95,6 +99,7 @@ class TempestCommon(singlevm.VmReady2):
except Exception: # pylint: disable=broad-except
pass
self.deny_skipping = kwargs.get("deny_skipping", False)
+ self.tests_count = kwargs.get("tests_count", 0)
def check_services(self):
"""Check the mandatory services."""
@@ -195,9 +200,16 @@ class TempestCommon(singlevm.VmReady2):
cmd = ("rally verify list-verifiers | awk '/" +
getattr(config.CONF, 'tempest_verifier_name') +
"/ {print $2}'")
- proc = subprocess.Popen(cmd, shell=True,
- stdout=subprocess.PIPE,
- stderr=subprocess.STDOUT)
+ if six.PY3:
+ # pylint: disable=no-member
+ proc = subprocess.Popen(
+ cmd, shell=True, stdout=subprocess.PIPE,
+ stderr=subprocess.DEVNULL)
+ else:
+ with open(os.devnull, 'wb') as devnull:
+ proc = subprocess.Popen(
+ cmd, shell=True, stdout=subprocess.PIPE,
+ stderr=devnull)
verifier_uuid = proc.stdout.readline().rstrip()
return verifier_uuid.decode("utf-8")
@@ -266,27 +278,8 @@ class TempestCommon(singlevm.VmReady2):
# enable multinode tests
rconfig.set('compute', 'min_compute_nodes', compute_cnt)
rconfig.set('compute-feature-enabled', 'live_migration', True)
- filters = ['RetryFilter', 'AvailabilityZoneFilter', 'ComputeFilter',
- 'ComputeCapabilitiesFilter', 'ImagePropertiesFilter',
- 'ServerGroupAntiAffinityFilter',
- 'ServerGroupAffinityFilter']
- rconfig.set(
- 'compute-feature-enabled', 'scheduler_available_filters',
- functest_utils.convert_list_to_ini(filters))
if os.environ.get('OS_REGION_NAME'):
rconfig.set('identity', 'region', os.environ.get('OS_REGION_NAME'))
- if env.get("NEW_USER_ROLE").lower() != "member":
- rconfig.set(
- 'auth', 'tempest_roles',
- functest_utils.convert_list_to_ini([env.get("NEW_USER_ROLE")]))
- if not json.loads(env.get("USE_DYNAMIC_CREDENTIALS").lower()):
- rconfig.set('auth', 'use_dynamic_credentials', False)
- account_file = os.path.join(
- getattr(config.CONF, 'dir_functest_data'), 'accounts.yaml')
- assert os.path.exists(
- account_file), "{} doesn't exist".format(account_file)
- rconfig.set('auth', 'test_accounts_file', account_file)
- rconfig.set('identity', 'auth_version', 'v3')
rconfig.set('identity', 'admin_role', admin_role_name)
rconfig.set('identity', 'default_domain_id', domain_id)
if not rconfig.has_section('network'):
@@ -350,7 +343,7 @@ class TempestCommon(singlevm.VmReady2):
LOGGER.info("%s\n%s", cmd, output.decode("utf-8"))
os.remove('/etc/tempest.conf')
- def apply_tempest_blacklist(self):
+ def apply_tempest_blacklist(self, black_list):
"""Exclude blacklisted test cases."""
LOGGER.debug("Applying tempest blacklist...")
if os.path.exists(self.raw_list):
@@ -363,7 +356,7 @@ class TempestCommon(singlevm.VmReady2):
deploy_scenario = env.get('DEPLOY_SCENARIO')
if bool(deploy_scenario):
# if DEPLOY_SCENARIO is set we read the file
- black_list_file = open(self.tempest_blacklist)
+ black_list_file = open(black_list)
black_list_yaml = yaml.safe_load(black_list_file)
black_list_file.close()
for item in black_list_yaml:
@@ -500,14 +493,47 @@ class TempestCommon(singlevm.VmReady2):
with open(rally_conf, 'w') as config_file:
rconfig.write(config_file)
+ def update_auth_section(self):
+ """Update auth section in tempest.conf"""
+ rconfig = configparser.RawConfigParser()
+ rconfig.read(self.conf_file)
+ if not rconfig.has_section("auth"):
+ rconfig.add_section("auth")
+ if env.get("NEW_USER_ROLE").lower() != "member":
+ tempest_roles = []
+ if rconfig.has_option("auth", "tempest_roles"):
+ tempest_roles = functest_utils.convert_ini_to_list(
+ rconfig.get("auth", "tempest_roles"))
+ rconfig.set(
+ 'auth', 'tempest_roles',
+ functest_utils.convert_list_to_ini(
+ [env.get("NEW_USER_ROLE")] + tempest_roles))
+ if not json.loads(env.get("USE_DYNAMIC_CREDENTIALS").lower()):
+ rconfig.set('auth', 'use_dynamic_credentials', False)
+ account_file = os.path.join(
+ getattr(config.CONF, 'dir_functest_data'), 'accounts.yaml')
+ assert os.path.exists(
+ account_file), "{} doesn't exist".format(account_file)
+ rconfig.set('auth', 'test_accounts_file', account_file)
+ if env.get('NO_TENANT_NETWORK').lower() == 'true':
+ rconfig.set('auth', 'create_isolated_networks', False)
+ with open(self.conf_file, 'w') as config_file:
+ rconfig.write(config_file)
+
def update_network_section(self):
"""Update network section in tempest.conf"""
rconfig = configparser.RawConfigParser()
rconfig.read(self.conf_file)
- if not rconfig.has_section('network'):
- rconfig.add_section('network')
- rconfig.set('network', 'public_network_id', self.ext_net.id)
- rconfig.set('network', 'floating_network_name', self.ext_net.name)
+ if self.ext_net:
+ if not rconfig.has_section('network'):
+ rconfig.add_section('network')
+ rconfig.set('network', 'public_network_id', self.ext_net.id)
+ rconfig.set('network', 'floating_network_name', self.ext_net.name)
+ rconfig.set('network-feature-enabled', 'floating_ips', True)
+ else:
+ if not rconfig.has_section('network-feature-enabled'):
+ rconfig.add_section('network-feature-enabled')
+ rconfig.set('network-feature-enabled', 'floating_ips', False)
with open(self.conf_file, 'w') as config_file:
rconfig.write(config_file)
@@ -517,7 +543,24 @@ class TempestCommon(singlevm.VmReady2):
rconfig.read(self.conf_file)
if not rconfig.has_section('compute'):
rconfig.add_section('compute')
- rconfig.set('compute', 'fixed_network_name', self.network.name)
+ rconfig.set(
+ 'compute', 'fixed_network_name',
+ self.network.name if self.network else env.get("EXTERNAL_NETWORK"))
+ with open(self.conf_file, 'w') as config_file:
+ rconfig.write(config_file)
+
+ def update_validation_section(self):
+ """Update validation section in tempest.conf"""
+ rconfig = configparser.RawConfigParser()
+ rconfig.read(self.conf_file)
+ if not rconfig.has_section('validation'):
+ rconfig.add_section('validation')
+ rconfig.set(
+ 'validation', 'connect_method',
+ 'floating' if self.ext_net else 'fixed')
+ rconfig.set(
+ 'validation', 'network_for_ssh',
+ self.network.name if self.network else env.get("EXTERNAL_NETWORK"))
with open(self.conf_file, 'w') as config_file:
rconfig.write(config_file)
@@ -567,8 +610,8 @@ class TempestCommon(singlevm.VmReady2):
self.deployment_dir = self.get_verifier_deployment_dir(
self.verifier_id, self.deployment_id)
- compute_cnt = len(self.orig_cloud.list_hypervisors())
-
+ compute_cnt = self.count_hypervisors() if self.count_hypervisors(
+ ) <= 10 else 10
self.image_alt = self.publish_image_alt()
self.flavor_alt = self.create_flavor_alt()
LOGGER.debug("flavor: %s", self.flavor_alt)
@@ -585,8 +628,10 @@ class TempestCommon(singlevm.VmReady2):
flavor_alt_id=self.flavor_alt.id,
admin_role_name=self.role_name, cidr=self.cidr,
domain_id=self.project.domain.id)
+ self.update_auth_section()
self.update_network_section()
self.update_compute_section()
+ self.update_validation_section()
self.update_scenario_section()
self.backup_tempest_config(self.conf_file, self.res_dir)
@@ -603,7 +648,10 @@ class TempestCommon(singlevm.VmReady2):
shutil.copy("/etc/rally/rally.conf", self.res_dir)
self.configure(**kwargs)
self.generate_test_list(**kwargs)
- self.apply_tempest_blacklist()
+ self.apply_tempest_blacklist(TempestCommon.tempest_blacklist)
+ if env.get('PUBLIC_ENDPOINT_ONLY').lower() == 'true':
+ self.apply_tempest_blacklist(
+ TempestCommon.tempest_public_blacklist)
self.run_verifier_tests(**kwargs)
self.parse_verifier_result()
rally.RallyBase.verify_report(
@@ -637,16 +685,105 @@ class TempestCommon(singlevm.VmReady2):
skips = self.details.get("skipped_number", 0)
if skips > 0 and self.deny_skipping:
return testcase.TestCase.EX_TESTCASE_FAILED
+ if self.tests_count and (
+ self.details.get("tests_number", 0) != self.tests_count):
+ return testcase.TestCase.EX_TESTCASE_FAILED
return super(TempestCommon, self).is_successful()
-class TempestScenario(TempestCommon):
- """Tempest scenario testcase implementation class."""
+class TempestHorizon(TempestCommon):
+ """Tempest Horizon testcase implementation class."""
- quota_instances = -1
+ def configure(self, **kwargs):
+ super(TempestHorizon, self).configure(**kwargs)
+ rconfig = configparser.RawConfigParser()
+ rconfig.read(self.conf_file)
+ if not rconfig.has_section('dashboard'):
+ rconfig.add_section('dashboard')
+ rconfig.set('dashboard', 'dashboard_url', env.get('DASHBOARD_URL'))
+ with open(self.conf_file, 'w') as config_file:
+ rconfig.write(config_file)
+ self.backup_tempest_config(self.conf_file, self.res_dir)
- def run(self, **kwargs):
- self.orig_cloud.set_compute_quotas(
- self.project.project.name,
- instances=self.quota_instances)
- return super(TempestScenario, self).run(**kwargs)
+
+class TempestHeat(TempestCommon):
+ """Tempest Heat testcase implementation class."""
+
+ filename_alt = ('/home/opnfv/functest/images/'
+ 'Fedora-Cloud-Base-30-1.2.x86_64.qcow2')
+ flavor_alt_ram = 512
+ flavor_alt_vcpus = 1
+ flavor_alt_disk = 4
+
+ def __init__(self, **kwargs):
+ super(TempestHeat, self).__init__(**kwargs)
+ self.user2 = self.orig_cloud.create_user(
+ name='{}-user2_{}'.format(self.case_name, self.project.guid),
+ password=self.project.password,
+ domain_id=self.project.domain.id)
+ self.orig_cloud.grant_role(
+ self.role_name, user=self.user2.id,
+ project=self.project.project.id, domain=self.project.domain.id)
+ if not self.orig_cloud.get_role("heat_stack_owner"):
+ self.role = self.orig_cloud.create_role("heat_stack_owner")
+ self.orig_cloud.grant_role(
+ "heat_stack_owner", user=self.user2.id,
+ project=self.project.project.id,
+ domain=self.project.domain.id)
+
+ def configure(self, **kwargs):
+ assert self.user2
+ super(TempestHeat, self).configure(**kwargs)
+ rconfig = configparser.RawConfigParser()
+ rconfig.read(self.conf_file)
+ if not rconfig.has_section('heat_plugin'):
+ rconfig.add_section('heat_plugin')
+ # It fails if region and domain ids are unset
+ rconfig.set(
+ 'heat_plugin', 'region',
+ os.environ.get('OS_REGION_NAME', 'RegionOne'))
+ rconfig.set('heat_plugin', 'auth_url', os.environ["OS_AUTH_URL"])
+ rconfig.set('heat_plugin', 'project_domain_id', self.project.domain.id)
+ rconfig.set('heat_plugin', 'user_domain_id', self.project.domain.id)
+ rconfig.set(
+ 'heat_plugin', 'project_domain_name', self.project.domain.name)
+ rconfig.set(
+ 'heat_plugin', 'user_domain_name', self.project.domain.name)
+ rconfig.set('heat_plugin', 'username', self.user2.name)
+ rconfig.set('heat_plugin', 'password', self.project.password)
+ rconfig.set('heat_plugin', 'project_name', self.project.project.name)
+ rconfig.set('heat_plugin', 'admin_username', self.project.user.name)
+ rconfig.set('heat_plugin', 'admin_password', self.project.password)
+ rconfig.set(
+ 'heat_plugin', 'admin_project_name', self.project.project.name)
+ rconfig.set('heat_plugin', 'image_ref', self.image_alt.id)
+ rconfig.set('heat_plugin', 'instance_type', self.flavor_alt.id)
+ rconfig.set('heat_plugin', 'minimal_image_ref', self.image.id)
+ rconfig.set('heat_plugin', 'minimal_instance_type', self.flavor.id)
+ if self.ext_net:
+ rconfig.set(
+ 'heat_plugin', 'floating_network_name', self.ext_net.name)
+ if self.network:
+ rconfig.set('heat_plugin', 'fixed_network_name', self.network.name)
+ rconfig.set('heat_plugin', 'fixed_subnet_name', self.subnet.name)
+ rconfig.set('heat_plugin', 'network_for_ssh', self.network.name)
+ else:
+ LOGGER.warning(
+ 'No tenant network created. '
+ 'Trying EXTERNAL_NETWORK as a fallback')
+ rconfig.set(
+ 'heat_plugin', 'fixed_network_name',
+ env.get("EXTERNAL_NETWORK"))
+ rconfig.set(
+ 'heat_plugin', 'network_for_ssh', env.get("EXTERNAL_NETWORK"))
+ with open(self.conf_file, 'w') as config_file:
+ rconfig.write(config_file)
+ self.backup_tempest_config(self.conf_file, self.res_dir)
+
+ def clean(self):
+ """
+ Cleanup all OpenStack objects. Should be called on completion.
+ """
+ super(TempestHeat, self).clean()
+ if self.user2:
+ self.orig_cloud.delete_user(self.user2.id)
diff --git a/functest/opnfv_tests/openstack/vmtp/vmtp.py b/functest/opnfv_tests/openstack/vmtp/vmtp.py
index 2fd4e089c..cfbd134c7 100644
--- a/functest/opnfv_tests/openstack/vmtp/vmtp.py
+++ b/functest/opnfv_tests/openstack/vmtp/vmtp.py
@@ -33,6 +33,7 @@ from xtesting.core import testcase
from functest.core import singlevm
from functest.utils import env
+from functest.utils import functest_utils
class Vmtp(singlevm.VmReady2):
@@ -50,6 +51,7 @@ class Vmtp(singlevm.VmReady2):
flavor_vcpus = 1
flavor_disk = 0
create_server_timeout = 300
+ ssh_retry_timeout = 240
def __init__(self, **kwargs):
if "case_name" not in kwargs:
@@ -60,7 +62,7 @@ class Vmtp(singlevm.VmReady2):
(_, self.pubkey_filename) = tempfile.mkstemp()
def check_requirements(self):
- if len(self.orig_cloud.list_hypervisors()) < 2:
+ if self.count_hypervisors() < 2:
self.__logger.warning("Vmtp requires at least 2 hypervisors")
self.is_skipped = True
self.project.clean()
@@ -122,6 +124,7 @@ class Vmtp(singlevm.VmReady2):
self.guid)
vmtp_conf["dns_nameservers"] = [env.get('NAMESERVER')]
vmtp_conf["generic_retry_count"] = self.create_server_timeout // 2
+ vmtp_conf["ssh_retry_count"] = self.ssh_retry_timeout // 2
conf.write(yaml.dump(vmtp_conf))
def run_vmtp(self):
@@ -135,6 +138,8 @@ class Vmtp(singlevm.VmReady2):
OS_USERNAME=self.project.user.name,
OS_PROJECT_NAME=self.project.project.name,
OS_PROJECT_ID=self.project.project.id,
+ OS_PROJECT_DOMAIN_NAME=self.project.domain.name,
+ OS_USER_DOMAIN_NAME=self.project.domain.name,
OS_PASSWORD=self.project.password)
if not new_env["OS_AUTH_URL"].endswith(('v3', 'v3/')):
new_env["OS_AUTH_URL"] = "{}/v3".format(new_env["OS_AUTH_URL"])
@@ -145,6 +150,12 @@ class Vmtp(singlevm.VmReady2):
pass
cmd = ['vmtp', '-d', '--json', '{}/vmtp.json'.format(self.res_dir),
'-c', self.config]
+ if env.get("VMTP_HYPERVISORS"):
+ hypervisors = functest_utils.convert_ini_to_list(
+ env.get("VMTP_HYPERVISORS"))
+ for hypervisor in hypervisors:
+ cmd.extend(["--hypervisor", hypervisor])
+ self.__logger.debug("cmd: %s", cmd)
output = subprocess.check_output(
cmd, stderr=subprocess.STDOUT, env=new_env).decode("utf-8")
self.__logger.info("%s\n%s", " ".join(cmd), output)
diff --git a/functest/opnfv_tests/openstack/vping/vping_ssh.py b/functest/opnfv_tests/openstack/vping/vping_ssh.py
index 6420013a0..a7bbfc23c 100644
--- a/functest/opnfv_tests/openstack/vping/vping_ssh.py
+++ b/functest/opnfv_tests/openstack/vping/vping_ssh.py
@@ -44,12 +44,14 @@ class VPingSSH(singlevm.SingleVm2):
Returns: ping exit codes
"""
assert self.ssh
+ if not self.check_regex_in_console(self.vm2.name):
+ return 1
(_, stdout, stderr) = self.ssh.exec_command(
'ping -c 1 {}'.format(
self.vm2.private_v4 or self.vm2.addresses[
self.network.name][0].addr))
- self.__logger.info("output:\n%s", stdout.read())
- self.__logger.info("error:\n%s", stderr.read())
+ self.__logger.info("output:\n%s", stdout.read().decode("utf-8"))
+ self.__logger.info("error:\n%s", stderr.read().decode("utf-8"))
return stdout.channel.recv_exit_status()
def clean(self):
diff --git a/functest/opnfv_tests/vnf/epc/juju_epc.py b/functest/opnfv_tests/vnf/epc/juju_epc.py
index 7c8a925bb..5049bd0bb 100644
--- a/functest/opnfv_tests/vnf/epc/juju_epc.py
+++ b/functest/opnfv_tests/vnf/epc/juju_epc.py
@@ -14,11 +14,11 @@ import os
import time
import json
import re
-import subprocess
import sys
from copy import deepcopy
import pkg_resources
+import scp
from functest.core import singlevm
from functest.utils import config
@@ -42,7 +42,7 @@ CREDS_TEMPLATE2 = """credentials:
default-credential: abot-epc
abot-epc:
auth-type: userpass
- password: {pass}
+ password: '{pass}'
project-domain-name: {project_domain_n}
tenant-name: {tenant_n}"""
@@ -51,14 +51,14 @@ CREDS_TEMPLATE = """credentials:
default-credential: abot-epc
abot-epc:
auth-type: userpass
- password: {pass}
+ password: '{pass}'
project-domain-name: {project_domain_n}
tenant-name: {tenant_n}
user-domain-name: {user_domain_n}
username: {user_n}"""
-class JujuEpc(singlevm.VmReady2):
+class JujuEpc(singlevm.SingleVm2):
# pylint:disable=too-many-instance-attributes
"""Abot EPC deployed with JUJU Orchestrator Case"""
@@ -74,11 +74,10 @@ class JujuEpc(singlevm.VmReady2):
flavor_ram = 2048
flavor_vcpus = 1
flavor_disk = 10
-
flavor_alt_ram = 4096
flavor_alt_vcpus = 1
flavor_alt_disk = 10
-
+ username = 'ubuntu'
juju_timeout = '4800'
def __init__(self, **kwargs):
@@ -146,18 +145,20 @@ class JujuEpc(singlevm.VmReady2):
self.image_alt = None
self.flavor_alt = None
- def check_requirements(self):
- if not os.path.exists("/src/epc-requirements/go/bin/juju"):
- self.__logger.warn(
- "Juju cannot be cross-compiled (arm and arm64) from the time "
- "being")
- self.is_skipped = True
- self.project.clean()
- if env.get('NEW_USER_ROLE').lower() == "admin":
- self.__logger.warn(
- "Defining NEW_USER_ROLE=admin will easily break the testcase "
- "because Juju doesn't manage tenancy (e.g. subnet "
- "overlapping)")
+ def _install_juju(self):
+ (_, stdout, stderr) = self.ssh.exec_command(
+ 'sudo snap install juju --channel=2.3/stable --classic')
+ self.__logger.debug("stdout:\n%s", stdout.read().decode("utf-8"))
+ self.__logger.debug("stderr:\n%s", stderr.read().decode("utf-8"))
+ return not stdout.channel.recv_exit_status()
+
+ def _install_juju_wait(self):
+ (_, stdout, stderr) = self.ssh.exec_command(
+ 'sudo apt-get update && sudo apt-get install python3-pip -y && '
+ 'sudo pip3 install juju_wait===2.6.4')
+ self.__logger.debug("stdout:\n%s", stdout.read().decode("utf-8"))
+ self.__logger.debug("stderr:\n%s", stderr.read().decode("utf-8"))
+ return not stdout.channel.recv_exit_status()
def _register_cloud(self):
assert self.public_auth_url
@@ -169,9 +170,13 @@ class JujuEpc(singlevm.VmReady2):
'RegionOne')}
with open(clouds_yaml, 'w') as yfile:
yfile.write(CLOUD_TEMPLATE.format(**cloud_data))
- cmd = ['juju', 'add-cloud', 'abot-epc', '-f', clouds_yaml, '--replace']
- output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
- self.__logger.info("%s\n%s", " ".join(cmd), output.decode("utf-8"))
+ scpc = scp.SCPClient(self.ssh.get_transport())
+ scpc.put(clouds_yaml, remote_path='~/')
+ (_, stdout, stderr) = self.ssh.exec_command(
+ '/snap/bin/juju add-cloud abot-epc -f clouds.yaml --replace')
+ self.__logger.debug("stdout:\n%s", stdout.read().decode("utf-8"))
+ self.__logger.debug("stderr:\n%s", stderr.read().decode("utf-8"))
+ return not stdout.channel.recv_exit_status()
def _register_credentials(self):
self.__logger.info("Creating Credentials for Abot-epc .....")
@@ -186,46 +191,36 @@ class JujuEpc(singlevm.VmReady2):
"user_domain_name", "Default")}
with open(credentials_yaml, 'w') as yfile:
yfile.write(CREDS_TEMPLATE.format(**creds_data))
- cmd = ['juju', 'add-credential', 'abot-epc', '-f', credentials_yaml,
- '--replace']
- output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
- self.__logger.info("%s\n%s", " ".join(cmd), output.decode("utf-8"))
-
- def prepare(self):
- """Prepare testcase (Additional pre-configuration steps)."""
- assert self.public_auth_url
- self.__logger.info("Additional pre-configuration steps")
- try:
- os.makedirs(self.res_dir)
- except OSError as ex:
- if ex.errno != errno.EEXIST:
- self.__logger.exception("Cannot create %s", self.res_dir)
- raise Exception
-
- self.__logger.info("ENV:\n%s", env.string())
- self._register_cloud()
- self._register_credentials()
-
- def publish_image(self, name=None):
- image = super(JujuEpc, self).publish_image(name)
- cmd = ['juju', 'metadata', 'generate-image', '-d', '/root',
- '-i', image.id, '-s', 'xenial',
- '-r', self.cloud.region_name if self.cloud.region_name else (
- 'RegionOne'),
- '-u', self.public_auth_url]
- output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
- self.__logger.info("%s\n%s", " ".join(cmd), output.decode("utf-8"))
- return image
+ scpc = scp.SCPClient(self.ssh.get_transport())
+ scpc.put(credentials_yaml, remote_path='~/')
+ (_, stdout, stderr) = self.ssh.exec_command(
+ '/snap/bin/juju add-credential abot-epc -f credentials.yaml '
+ ' --replace --debug')
+ self.__logger.debug("stdout:\n%s", stdout.read().decode("utf-8"))
+ self.__logger.debug("stderr:\n%s", stderr.read().decode("utf-8"))
+ return not stdout.channel.recv_exit_status()
+
+ def _publish_image(self):
+ region_name = self.cloud.region_name if self.cloud.region_name else (
+ 'RegionOne')
+ (_, stdout, stderr) = self.ssh.exec_command(
+ '/snap/bin/juju metadata generate-image -d /home/ubuntu '
+ '-i {} -s xenial -r {} -u {}'.format(
+ self.image.id, region_name, self.public_auth_url))
+ self.__logger.debug("stdout:\n%s", stdout.read().decode("utf-8"))
+ self.__logger.debug("stderr:\n%s", stderr.read().decode("utf-8"))
+ return not stdout.channel.recv_exit_status()
def publish_image_alt(self, name=None):
image_alt = super(JujuEpc, self).publish_image_alt(name)
- cmd = ['juju', 'metadata', 'generate-image', '-d', '/root',
- '-i', image_alt.id, '-s', 'trusty',
- '-r', self.cloud.region_name if self.cloud.region_name else (
- 'RegionOne'),
- '-u', self.public_auth_url]
- output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
- self.__logger.info("%s\n%s", " ".join(cmd), output.decode("utf-8"))
+ region_name = self.cloud.region_name if self.cloud.region_name else (
+ 'RegionOne')
+ (_, stdout, stderr) = self.ssh.exec_command(
+ '/snap/bin/juju metadata generate-image -d /home/ubuntu '
+ '-i {} -s trusty -r {} -u {}'.format(
+ image_alt.id, region_name, self.public_auth_url))
+ self.__logger.debug("stdout:\n%s", stdout.read().decode("utf-8"))
+ self.__logger.debug("stderr:\n%s", stderr.read().decode("utf-8"))
return image_alt
def deploy_orchestrator(self): # pylint: disable=too-many-locals
@@ -234,47 +229,41 @@ class JujuEpc(singlevm.VmReady2):
Bootstrap juju
"""
+ self._publish_image()
self.image_alt = self.publish_image_alt()
self.flavor_alt = self.create_flavor_alt()
self.__logger.info("Starting Juju Bootstrap process...")
- try:
- cmd = ['timeout', '-t', JujuEpc.juju_timeout,
- 'juju', 'bootstrap',
- 'abot-epc/{}'.format(
- self.cloud.region_name if self.cloud.region_name else (
- 'RegionOne')),
- 'abot-controller',
- '--agent-version', '2.3.9',
- '--metadata-source', '/root',
- '--constraints', 'mem=2G',
- '--bootstrap-series', 'xenial',
- '--config', 'network={}'.format(self.network.id),
- '--config', 'ssl-hostname-verification=false',
- '--config', 'external-network={}'.format(self.ext_net.id),
- '--config', 'use-floating-ip=true',
- '--config', 'use-default-secgroup=true',
- '--debug']
- output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
- self.__logger.info("%s\n%s", " ".join(cmd), output.decode("utf-8"))
- except subprocess.CalledProcessError as cpe:
- self.__logger.error(
- "Exception with Juju Bootstrap: %s\n%s",
- cpe.cmd, cpe.output.decode("utf-8"))
- return False
- except Exception: # pylint: disable=broad-except
- self.__logger.exception("Some issue with Juju Bootstrap ...")
- return False
-
- return True
+ region_name = self.cloud.region_name if self.cloud.region_name else (
+ 'RegionOne')
+ (_, stdout, stderr) = self.ssh.exec_command(
+ 'timeout {} '
+ '/snap/bin/juju bootstrap abot-epc/{} abot-controller '
+ '--agent-version 2.3.9 --metadata-source /home/ubuntu '
+ '--constraints mem=2G --bootstrap-series xenial '
+ '--config network={} '
+ '--config ssl-hostname-verification=false '
+ '--config external-network={} '
+ '--config use-floating-ip=true '
+ '--config use-default-secgroup=true '
+ '--debug'.format(
+ JujuEpc.juju_timeout, region_name, self.network.id,
+ self.ext_net.id))
+ self.__logger.debug("stdout:\n%s", stdout.read().decode("utf-8"))
+ self.__logger.debug("stderr:\n%s", stderr.read().decode("utf-8"))
+ return not stdout.channel.recv_exit_status()
def check_app(self, name='abot-epc-basic', status='active'):
"""Check application status."""
- cmd = ['juju', 'status', '--format', 'short', name]
for i in range(10):
- output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
- self.__logger.info("%s\n%s", " ".join(cmd), output.decode("utf-8"))
+ (_, stdout, stderr) = self.ssh.exec_command(
+ '/snap/bin/juju status --format short {}'.format(name))
+ output = stdout.read().decode("utf-8")
+ self.__logger.debug("stdout:\n%s", output)
+ self.__logger.debug("stderr:\n%s", stderr.read().decode("utf-8"))
+ if stdout.channel.recv_exit_status():
+ continue
ret = re.search(
- r'(?=workload:({})\))'.format(status), output.decode("utf-8"))
+ r'(?=workload:({})\))'.format(status), output)
if ret:
self.__logger.info("%s workload is %s", name, status)
break
@@ -289,72 +278,80 @@ class JujuEpc(singlevm.VmReady2):
def deploy_vnf(self):
"""Deploy ABOT-OAI-EPC."""
self.__logger.info("Upload VNFD")
- descriptor = self.vnf['descriptor']
+ scpc = scp.SCPClient(self.ssh.get_transport())
+ scpc.put(
+ '/src/epc-requirements/abot_charm', remote_path='~/',
+ recursive=True)
self.__logger.info("Deploying Abot-epc bundle file ...")
- cmd = ['juju', 'deploy', '{}'.format(descriptor.get('file_name'))]
- output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
- self.__logger.info("%s\n%s", " ".join(cmd), output.decode("utf-8"))
- self.__logger.info("Waiting for instances .....")
- try:
- cmd = ['timeout', '-t', JujuEpc.juju_timeout, 'juju-wait']
- output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
- self.__logger.info("%s\n%s", " ".join(cmd), output.decode("utf-8"))
- self.__logger.info("Deployed Abot-epc on Openstack")
- except subprocess.CalledProcessError as cpe:
- self.__logger.error(
- "Exception with Juju VNF Deployment: %s\n%s",
- cpe.cmd, cpe.output.decode("utf-8"))
- return False
- except Exception: # pylint: disable=broad-except
- self.__logger.exception("Some issue with the VNF Deployment ..")
- return False
-
+ (_, stdout, stderr) = self.ssh.exec_command(
+ 'sudo mkdir -p /src/epc-requirements && '
+ 'sudo mv abot_charm /src/epc-requirements/abot_charm && '
+ '/snap/bin/juju deploy '
+ '/src/epc-requirements/abot_charm/functest-abot-epc-bundle/'
+ 'bundle.yaml')
+ self.__logger.debug("stdout:\n%s", stdout.read().decode("utf-8"))
+ self.__logger.debug("stderr:\n%s", stderr.read().decode("utf-8"))
+ if stdout.channel.recv_exit_status():
+ return not stdout.channel.recv_exit_status()
+ (_, stdout, stderr) = self.ssh.exec_command(
+ 'PATH=/snap/bin/:$PATH '
+ 'timeout {} juju-wait'.format(JujuEpc.juju_timeout))
+ self.__logger.debug("stdout:\n%s", stdout.read().decode("utf-8"))
+ self.__logger.debug("stderr:\n%s", stderr.read().decode("utf-8"))
+ if stdout.channel.recv_exit_status():
+ return not stdout.channel.recv_exit_status()
self.__logger.info("Checking status of ABot and EPC units ...")
- cmd = ['juju', 'status']
- output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
- self.__logger.debug("%s\n%s", " ".join(cmd), output.decode("utf-8"))
+ (_, stdout, stderr) = self.ssh.exec_command('/snap/bin/juju status')
+ output = stdout.read().decode("utf-8")
+ self.__logger.debug("stdout:\n%s", output)
+ self.__logger.debug("stderr:\n%s", stderr.read().decode("utf-8"))
+ if stdout.channel.recv_exit_status():
+ return not stdout.channel.recv_exit_status()
for app in ['abot-epc-basic', 'oai-epc', 'oai-hss']:
if not self.check_app(app):
return False
-
- self.__logger.info("Transferring the feature files to Abot_node ...")
- cmd = ['timeout', '-t', JujuEpc.juju_timeout,
- 'juju', 'scp', '--', '-r', '-v',
- '{}/featureFiles'.format(self.case_dir), 'abot-epc-basic/0:~/']
- output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
- self.__logger.info("%s\n%s", " ".join(cmd), output.decode("utf-8"))
-
- self.__logger.info("Copying the feature files within Abot_node ")
- cmd = ['timeout', '-t', JujuEpc.juju_timeout,
- 'juju', 'ssh', 'abot-epc-basic/0',
- 'sudo', 'cp', '-vfR', '~/featureFiles/*',
- '/etc/rebaca-test-suite/featureFiles']
- output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
- self.__logger.info("%s\n%s", " ".join(cmd), output.decode("utf-8"))
- return True
+ scpc = scp.SCPClient(self.ssh.get_transport())
+ scpc.put(
+ '{}/featureFiles'.format(self.case_dir), remote_path='~/',
+ recursive=True)
+ (_, stdout, stderr) = self.ssh.exec_command(
+ 'timeout {} /snap/bin/juju scp -- -r -v ~/featureFiles '
+ 'abot-epc-basic/0:/etc/rebaca-test-suite/'.format(
+ JujuEpc.juju_timeout))
+ output = stdout.read().decode("utf-8")
+ self.__logger.debug("stdout:\n%s", output)
+ self.__logger.debug("stderr:\n%s", stderr.read().decode("utf-8"))
+ return not stdout.channel.recv_exit_status()
def test_vnf(self):
"""Run test on ABoT."""
start_time = time.time()
- self.__logger.info("Running VNF Test cases....")
- cmd = ['juju', 'run-action', 'abot-epc-basic/0', 'run',
- 'tagnames={}'.format(self.details['test_vnf']['tag_name'])]
- output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
- self.__logger.info("%s\n%s", " ".join(cmd), output.decode("utf-8"))
-
- cmd = ['timeout', '-t', JujuEpc.juju_timeout, 'juju-wait']
- output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
- self.__logger.info("%s\n%s", " ".join(cmd), output.decode("utf-8"))
-
+ (_, stdout, stderr) = self.ssh.exec_command(
+ '/snap/bin/juju run-action abot-epc-basic/0 '
+ 'run tagnames={}'.format(self.details['test_vnf']['tag_name']))
+ self.__logger.debug("stdout:\n%s", stdout.read().decode("utf-8"))
+ self.__logger.debug("stderr:\n%s", stderr.read().decode("utf-8"))
+ if stdout.channel.recv_exit_status():
+ return not stdout.channel.recv_exit_status()
+ (_, stdout, stderr) = self.ssh.exec_command(
+ 'PATH=/snap/bin/:$PATH '
+ 'timeout {} juju-wait'.format(JujuEpc.juju_timeout))
+ self.__logger.debug("stdout:\n%s", stdout.read().decode("utf-8"))
+ self.__logger.debug("stderr:\n%s", stderr.read().decode("utf-8"))
+ if stdout.channel.recv_exit_status():
+ return not stdout.channel.recv_exit_status()
duration = time.time() - start_time
self.__logger.info("Getting results from Abot node....")
- cmd = ['timeout', '-t', JujuEpc.juju_timeout,
- 'juju', 'scp', '--', '-v',
- 'abot-epc-basic/0:'
- '/var/lib/abot-epc-basic/artifacts/TestResults.json',
- '{}/.'.format(self.res_dir)]
- output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
- self.__logger.info("%s\n%s", " ".join(cmd), output.decode("utf-8"))
+ (_, stdout, stderr) = self.ssh.exec_command(
+ 'timeout {} /snap/bin/juju scp -- -v abot-epc-basic/0:'
+ '/var/lib/abot-epc-basic/artifacts/TestResults.json .'.format(
+ JujuEpc.juju_timeout))
+ self.__logger.debug("stdout:\n%s", stdout.read().decode("utf-8"))
+ self.__logger.debug("stderr:\n%s", stderr.read().decode("utf-8"))
+ if stdout.channel.recv_exit_status():
+ return not stdout.channel.recv_exit_status()
+ scpc = scp.SCPClient(self.ssh.get_transport())
+ scpc.get('TestResults.json', self.res_dir)
self.__logger.info("Parsing the Test results...")
res = (process_abot_test_result('{}/TestResults.json'.format(
self.res_dir)))
@@ -369,44 +366,41 @@ class JujuEpc(singlevm.VmReady2):
short_result['failures'], short_result['skipped'])
return True
- def run(self, **kwargs):
- self.start_time = time.time()
+ def execute(self):
+ """Prepare testcase (Additional pre-configuration steps)."""
+ assert self.public_auth_url
+ self.__logger.info("Additional pre-configuration steps")
+ try:
+ os.makedirs(self.res_dir)
+ except OSError as ex:
+ if ex.errno != errno.EEXIST:
+ self.__logger.exception("Cannot create %s", self.res_dir)
+ raise Exception
+ self.__logger.info("ENV:\n%s", env.string())
try:
- assert super(JujuEpc, self).run(**kwargs) == self.EX_OK
- self.prepare()
- if (self.deploy_orchestrator() and
- self.deploy_vnf() and
- self.test_vnf()):
- self.stop_time = time.time()
- self.result = 100
- return self.EX_OK
- self.result = 0
- self.stop_time = time.time()
- return self.EX_TESTCASE_FAILED
+ assert self._install_juju()
+ assert self._install_juju_wait()
+ assert self._register_cloud()
+ assert self._register_credentials()
+ assert self.deploy_orchestrator()
+ assert self.deploy_vnf()
+ assert self.test_vnf()
except Exception: # pylint: disable=broad-except
- self.stop_time = time.time()
- self.__logger.exception("Exception on VNF testing")
- return self.EX_TESTCASE_FAILED
+ self.__logger.exception("juju_epc failed")
+ return 1
+ return 0
def clean(self):
"""Clean created objects/functions."""
- try:
- cmd = ['juju', 'debug-log', '--replay', '--no-tail']
- output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
- self.__logger.debug(
- "%s\n%s", " ".join(cmd), output.decode("utf-8"))
- self.__logger.info("Destroying Orchestrator...")
- cmd = ['timeout', '-t', JujuEpc.juju_timeout,
- 'juju', 'destroy-controller', '-y', 'abot-controller',
- '--destroy-all-models']
- output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
- self.__logger.info("%s\n%s", " ".join(cmd), output.decode("utf-8"))
- except subprocess.CalledProcessError as cpe:
- self.__logger.error(
- "Exception with Juju Cleanup: %s\n%s",
- cpe.cmd, cpe.output.decode("utf-8"))
- except Exception: # pylint: disable=broad-except
- self.__logger.exception("General issue during the undeployment ..")
+ (_, stdout, stderr) = self.ssh.exec_command(
+ '/snap/bin/juju debug-log --replay --no-tail')
+ self.__logger.debug("stdout:\n%s", stdout.read().decode("utf-8"))
+ self.__logger.debug("stderr:\n%s", stderr.read().decode("utf-8"))
+ (_, stdout, stderr) = self.ssh.exec_command(
+ '/snap/bin/juju destroy-controller -y abot-controller '
+ '--destroy-all-models')
+ self.__logger.debug("stdout:\n%s", stdout.read().decode("utf-8"))
+ self.__logger.debug("stderr:\n%s", stderr.read().decode("utf-8"))
for fip in self.cloud.list_floating_ips():
self.cloud.delete_floating_ip(fip.id)
if self.image_alt:
diff --git a/functest/opnfv_tests/vnf/ims/heat_ims.py b/functest/opnfv_tests/vnf/ims/heat_ims.py
index 9ea9c5627..61f56c3ac 100644
--- a/functest/opnfv_tests/vnf/ims/heat_ims.py
+++ b/functest/opnfv_tests/vnf/ims/heat_ims.py
@@ -162,7 +162,7 @@ class HeatIms(singlevm.VmReady2):
server.public_v4, username=username,
key_filename=self.key_filename, timeout=timeout)
(_, stdout, _) = ssh.exec_command('sudo monit summary')
- self.__logger.info("output:\n%s", stdout.read())
+ self.__logger.info("output:\n%s", stdout.read().decode("utf-8"))
ssh.close()
def deploy_vnf(self):
diff --git a/functest/opnfv_tests/vnf/router/cloudify_vrouter.py b/functest/opnfv_tests/vnf/router/cloudify_vrouter.py
index b449d2d81..e9e2dc627 100644
--- a/functest/opnfv_tests/vnf/router/cloudify_vrouter.py
+++ b/functest/opnfv_tests/vnf/router/cloudify_vrouter.py
@@ -40,6 +40,8 @@ class CloudifyVrouter(cloudify.Cloudify):
flavor_alt_vcpus = 1
flavor_alt_disk = 3
+ check_console_loop = 12
+
cop_yaml = ("https://github.com/cloudify-cosmo/cloudify-openstack-plugin/"
"releases/download/2.14.7/plugin.yaml")
cop_wgn = ("https://github.com/cloudify-cosmo/cloudify-openstack-plugin/"
diff --git a/functest/tests/unit/openstack/rally/test_rally.py b/functest/tests/unit/openstack/rally/test_rally.py
index 226d4e69a..5ab82ecc4 100644
--- a/functest/tests/unit/openstack/rally/test_rally.py
+++ b/functest/tests/unit/openstack/rally/test_rally.py
@@ -103,15 +103,19 @@ class OSRallyTesting(unittest.TestCase):
mock_method.assert_called()
mock_os_makedirs.assert_called()
- def test_get_task_id_default(self):
- cmd_raw = b'Task 1: started'
- self.assertEqual(self.rally_base.get_task_id(cmd_raw),
- '1')
-
- def test_get_task_id_missing_id(self):
- cmd_raw = b''
- self.assertEqual(self.rally_base.get_task_id(cmd_raw),
- None)
+ @mock.patch('subprocess.check_output', return_value=b'1\n')
+ def test_get_task_id_default(self, *args):
+ tag = 'nova'
+ self.assertEqual(self.rally_base.get_task_id(tag), '1')
+ args[0].assert_called_with(
+ ['rally', 'task', 'list', '--tag', tag, '--uuids-only'])
+
+ @mock.patch('subprocess.check_output', return_value=b'\n')
+ def test_get_task_id_missing_id(self, *args):
+ tag = 'nova'
+ self.assertEqual(self.rally_base.get_task_id(tag), '')
+ args[0].assert_called_with(
+ ['rally', 'task', 'list', '--tag', tag, '--uuids-only'])
def test_task_succeed_fail(self):
json_raw = json.dumps({})
@@ -291,8 +295,8 @@ class OSRallyTesting(unittest.TestCase):
def test_prepare_run_flavor_alt_creation_failed(self, *args):
# pylint: disable=unused-argument
self.rally_base.stests = ['test1', 'test2']
- with mock.patch.object(self.rally_base.cloud,
- 'list_hypervisors') as mock_list_hyperv, \
+ with mock.patch.object(self.rally_base, 'count_hypervisors') \
+ as mock_list_hyperv, \
mock.patch.object(self.rally_base, 'create_flavor_alt',
side_effect=Exception) \
as mock_create_flavor:
diff --git a/functest/tests/unit/openstack/tempest/test_tempest.py b/functest/tests/unit/openstack/tempest/test_tempest.py
index 87438ae7c..71aa5a257 100644
--- a/functest/tests/unit/openstack/tempest/test_tempest.py
+++ b/functest/tests/unit/openstack/tempest/test_tempest.py
@@ -125,7 +125,8 @@ class OSTempestTesting(unittest.TestCase):
return_value=['test1', 'test2']):
self.tempestcommon.tempest_blacklist = Exception
os.environ['DEPLOY_SCENARIO'] = 'deploy_scenario'
- self.tempestcommon.apply_tempest_blacklist()
+ self.tempestcommon.apply_tempest_blacklist(
+ self.tempestcommon.tempest_blacklist)
obj = mock_open()
obj.write.assert_any_call('test1\n')
obj.write.assert_any_call('test2\n')
@@ -147,7 +148,8 @@ class OSTempestTesting(unittest.TestCase):
mock.patch('functest.opnfv_tests.openstack.tempest.tempest.'
'yaml.safe_load', return_value=item_dict):
os.environ['DEPLOY_SCENARIO'] = 'deploy_scenario'
- self.tempestcommon.apply_tempest_blacklist()
+ self.tempestcommon.apply_tempest_blacklist(
+ self.tempestcommon.tempest_blacklist)
obj = mock_open()
obj.write.assert_any_call('test1\n')
self.assertFalse(obj.write.assert_any_call('test2\n'))
diff --git a/functest/tests/unit/openstack/vping/test_vping_ssh.py b/functest/tests/unit/openstack/vping/test_vping_ssh.py
index 05482ed6b..98afe0853 100644
--- a/functest/tests/unit/openstack/vping/test_vping_ssh.py
+++ b/functest/tests/unit/openstack/vping/test_vping_ssh.py
@@ -61,22 +61,38 @@ class VpingSSHTesting(unittest.TestCase):
'{}-vm2_{}'.format(self.vping.case_name, self.vping.guid),
security_groups=[self.vping.sec.id])
- def test_execute_exc(self):
- self.vping.vm2 = munch.Munch(private_v4='127.0.0.1')
+ @mock.patch('functest.opnfv_tests.openstack.vping.vping_ssh.VPingSSH.'
+ 'check_regex_in_console', return_value=True)
+ def test_execute_exc(self, *args):
+ self.vping.vm2 = munch.Munch(private_v4='127.0.0.1', name='foo')
self.vping.ssh = mock.Mock()
self.vping.ssh.exec_command.side_effect = ssh_exception.SSHException
with self.assertRaises(ssh_exception.SSHException):
self.vping.execute()
self.vping.ssh.exec_command.assert_called_once_with(
'ping -c 1 {}'.format(self.vping.vm2.private_v4))
+ args[0].assert_called_once_with('foo')
+
+ @mock.patch('functest.opnfv_tests.openstack.vping.vping_ssh.VPingSSH.'
+ 'check_regex_in_console', return_value=False)
+ def test_execute_exc2(self, *args):
+ self.vping.vm2 = munch.Munch(private_v4='127.0.0.1', name='foo')
+ self.vping.ssh = mock.Mock()
+ self.vping.execute()
+ self.vping.ssh.exec_command.assert_not_called()
+ args[0].assert_called_once_with('foo')
def _test_execute(self, ret=0):
- self.vping.vm2 = munch.Munch(private_v4='127.0.0.1')
+ self.vping.vm2 = munch.Munch(private_v4='127.0.0.1', name='foo')
self.vping.ssh = mock.Mock()
stdout = mock.Mock()
stdout.channel.recv_exit_status.return_value = ret
self.vping.ssh.exec_command.return_value = (None, stdout, mock.Mock())
- self.assertEqual(self.vping.execute(), ret)
+ with mock.patch('functest.opnfv_tests.openstack.vping.vping_ssh.'
+ 'VPingSSH.check_regex_in_console',
+ return_value=True) as mock_check:
+ self.assertEqual(self.vping.execute(), ret)
+ mock_check.assert_called_once_with('foo')
self.vping.ssh.exec_command.assert_called_once_with(
'ping -c 1 {}'.format(self.vping.vm2.private_v4))
diff --git a/functest/tests/unit/vnf/ims/test_clearwater.py b/functest/tests/unit/vnf/ims/test_clearwater.py
index 435b172db..f590a2857 100644
--- a/functest/tests/unit/vnf/ims/test_clearwater.py
+++ b/functest/tests/unit/vnf/ims/test_clearwater.py
@@ -38,6 +38,7 @@ class ClearwaterTesting(unittest.TestCase):
'cookies': ""}
self.mock_post_200.configure_mock(**attrs)
+
if __name__ == "__main__":
logging.disable(logging.CRITICAL)
unittest.main(verbosity=2)
diff --git a/functest/utils/env.py b/functest/utils/env.py
index ba8d6ce55..2e312726c 100644
--- a/functest/utils/env.py
+++ b/functest/utils/env.py
@@ -29,18 +29,19 @@ INPUTS = {
'NODE_NAME': env.INPUTS['NODE_NAME'],
'POD_ARCH': None,
'TEST_DB_URL': env.INPUTS['TEST_DB_URL'],
- 'ENERGY_RECORDER_API_URL': env.INPUTS['ENERGY_RECORDER_API_URL'],
- 'ENERGY_RECORDER_API_USER': env.INPUTS['ENERGY_RECORDER_API_USER'],
- 'ENERGY_RECORDER_API_PASSWORD': env.INPUTS['ENERGY_RECORDER_API_PASSWORD'],
'VOLUME_DEVICE_NAME': 'vdb',
'IMAGE_PROPERTIES': '',
'FLAVOR_EXTRA_SPECS': '',
'NAMESERVER': '8.8.8.8',
'NEW_USER_ROLE': 'Member',
'USE_DYNAMIC_CREDENTIALS': 'True',
- 'BLOCK_MIGRATION': 'True',
+ 'BLOCK_MIGRATION': 'False',
'CLEAN_ORPHAN_SECURITY_GROUPS': 'True',
- 'PUBLIC_ENDPOINT_ONLY': 'False'
+ 'SKIP_DOWN_HYPERVISORS': 'False',
+ 'PUBLIC_ENDPOINT_ONLY': 'False',
+ 'DASHBOARD_URL': '',
+ 'VMTP_HYPERVISORS': '',
+ 'NO_TENANT_NETWORK': 'False'
}
diff --git a/functest/utils/functest_utils.py b/functest/utils/functest_utils.py
index c953dca81..98121199b 100644
--- a/functest/utils/functest_utils.py
+++ b/functest/utils/functest_utils.py
@@ -11,10 +11,12 @@
from __future__ import print_function
import logging
+import os
import subprocess
import sys
import yaml
+from openstack.cloud import _utils
import six
LOGGER = logging.getLogger(__name__)
@@ -138,6 +140,51 @@ def get_openstack_version(cloud):
return "Unknown"
+def list_services(cloud):
+ # pylint: disable=protected-access
+ """Search Keystone services via $OS_INTERFACE.
+
+ It mainly conforms with `Shade
+ <https://docs.openstack.org/shade/latest>`_ but allows testing vs
+ public endpoints. It's worth mentioning that it doesn't support keystone
+ v2.
+
+ :returns: a list of ``munch.Munch`` containing the services description
+
+ :raises: ``OpenStackCloudException`` if something goes wrong during the
+ openstack API call.
+ """
+ url, key = '/services', 'services'
+ data = cloud._identity_client.get(
+ url, endpoint_filter={
+ 'interface': os.environ.get('OS_INTERFACE', 'public')},
+ error_message="Failed to list services")
+ services = cloud._get_and_munchify(key, data)
+ return _utils.normalize_keystone_services(services)
+
+
+def search_services(cloud, name_or_id=None, filters=None):
+ # pylint: disable=protected-access
+ """Search Keystone services ia $OS_INTERFACE.
+
+ It mainly conforms with `Shade
+ <https://docs.openstack.org/shade/latest>`_ but allows testing vs
+ public endpoints. It's worth mentioning that it doesn't support keystone
+ v2.
+
+ :param name_or_id: Name or id of the desired service.
+ :param filters: a dict containing additional filters to use. e.g.
+ {'type': 'network'}.
+
+ :returns: a list of ``munch.Munch`` containing the services description
+
+ :raises: ``OpenStackCloudException`` if something goes wrong during the
+ openstack API call.
+ """
+ services = list_services(cloud)
+ return _utils._filter_list(services, name_or_id, filters)
+
+
def convert_dict_to_ini(value):
"Convert dict to oslo.conf input"
assert isinstance(value, dict)
diff --git a/requirements.txt b/requirements.txt
index 402b931e2..6894e639b 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -3,7 +3,7 @@
# process, which may cause wedges in the gate later.
pbr!=2.1.0 # Apache-2.0
PyYAML # MIT
-requests # Apache-2.0
+requests!=2.20.0 # Apache-2.0
robotframework>=3.0
scp
cloudify-rest-client
diff --git a/rtd-requirements.txt b/rtd-requirements.txt
deleted file mode 100644
index feba192d4..000000000
--- a/rtd-requirements.txt
+++ /dev/null
@@ -1,3 +0,0 @@
-git+https://gerrit.opnfv.org/gerrit/snaps#egg=snaps
--r requirements.txt
--r test-requirements.txt
diff --git a/setup.cfg b/setup.cfg
index 7a7c41d61..2934bf11c 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -26,7 +26,8 @@ xtesting.testcase =
cinder_test = functest.opnfv_tests.openstack.cinder.cinder_test:CinderCheck
odl = functest.opnfv_tests.sdn.odl.odl:ODLTests
tempest_common = functest.opnfv_tests.openstack.tempest.tempest:TempestCommon
- tempest_scenario = functest.opnfv_tests.openstack.tempest.tempest:TempestScenario
+ tempest_horizon = functest.opnfv_tests.openstack.tempest.tempest:TempestHorizon
+ tempest_heat = functest.opnfv_tests.openstack.tempest.tempest:TempestHeat
rally_sanity = functest.opnfv_tests.openstack.rally.rally:RallySanity
refstack_defcore = functest.opnfv_tests.openstack.refstack.refstack:Refstack
patrole = functest.opnfv_tests.openstack.patrole.patrole:Patrole
diff --git a/test-requirements.txt b/test-requirements.txt
index db30c7f85..7e1b2b3e5 100644
--- a/test-requirements.txt
+++ b/test-requirements.txt
@@ -4,9 +4,10 @@
coverage!=4.4 # Apache-2.0
mock # BSD
nose # LGPL
-flake8<2.6.0,>=2.5.4 # MIT
+flake8 # MIT
pylint # GPLv2
-sphinx!=1.6.6,!=1.6.7 # BSD
+sphinx!=1.6.6,!=1.6.7,<2.0.0;python_version=='2.7' # BSD
+sphinx!=1.6.6,!=1.6.7;python_version>='3.4' # BSD
sphinx-rtd-theme
yamllint
ansible-lint
@@ -15,3 +16,4 @@ bashate # Apache-2.0
lfdocs-conf
sphinx-opnfv-theme
bandit
+sphinxcontrib-spelling
diff --git a/tox.ini b/tox.ini
index 54c45a6fd..87c7efa5f 100644
--- a/tox.ini
+++ b/tox.ini
@@ -2,6 +2,7 @@
envlist = docs,pep8,pylint,yamllint,ansiblelint,bashate,bandit,py27,py36,cover,perm
[testenv]
+pip_version = pip==20.2.4
usedevelop = True
deps =
-c{toxinidir}/upper-constraints.txt
@@ -30,6 +31,7 @@ commands =
sphinx-build -W -n -b html docs docs/build/html
sphinx-build -W -n -b linkcheck docs docs/_build/linkcheck
sphinx-build -W -n -b html -c docs/lfreleng docs docs/_build/html
+ sphinx-build -W -b spelling -Dextensions=sphinxcontrib.spelling docs docs/_build/spellcheck
[testenv:pep8]
basepython = python2.7
@@ -40,7 +42,7 @@ basepython = python2.7
commands =
pylint \
--ignore-imports=y --min-similarity-lines=10 \
- --disable=locally-disabled functest
+ --disable=locally-disabled,super-on-old-class functest
[testenv:yamllint]
basepython = python2.7
@@ -59,7 +61,7 @@ commands =
[testenv:ansiblelint]
basepython = python2.7
commands =
- ansible-lint ansible/site.yml
+ ansible-lint -x303 ansible/site.yml
[testenv:py36]
commands = nosetests functest/tests/unit
diff --git a/upper-constraints.txt b/upper-constraints.txt
index de66a3156..75a6aea3e 100644
--- a/upper-constraints.txt
+++ b/upper-constraints.txt
@@ -14,13 +14,21 @@ robotframework===3.0.2
robotframework-httplibrary===0.4.2
robotframework-requests===0.4.7
robotframework-sshlibrary===2.1.3;python_version=='2.7'
-ansible===2.3.2.0
-xtesting===0.62.0
+ansible===2.9.2
+xtesting===0.91.0
networking-bgpvpn===9.0.0
networking-sfc===7.0.0
neutron===13.0.2
-os-faults===0.1.18
+os-faults===0.2.1
bandit===1.1.0
ruamel.yaml.jinja2==0.2.2
+git+https://opendev.org/openstack/rally.git@1.5.1#egg=rally
+git+https://github.com/xrally/xrally-kubernetes.git@e4c605fa0ae0cae63ce5ac19ce8516f0bdd5e868#egg=xrally-kubernetes
pylint===1.9.5;python_version=='2.7'
pylint===2.3.1;python_version=='3.6'
+sphinxcontrib-spelling===4.2.1
+boto3===1.7.62
+ujson===2.0.3
+kubernetes===10.0.0
+ansible-lint===4.2.0
+setuptools_scm===5.0.1