summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--api/apidoc/functest.opnfv_tests.openstack.rst1
-rw-r--r--api/apidoc/functest.opnfv_tests.openstack.vgpu.rst17
-rw-r--r--api/apidoc/functest.opnfv_tests.openstack.vgpu.vgpu.rst7
-rw-r--r--build.sh23
-rw-r--r--docker/benchmarking-cntt/testcases.yaml1
-rw-r--r--docker/benchmarking/testcases.yaml1
-rw-r--r--docker/core/Dockerfile2
-rw-r--r--docker/healthcheck/testcases.yaml1
-rw-r--r--docker/smoke-cntt/testcases.yaml13
-rw-r--r--docker/smoke/testcases.yaml16
-rw-r--r--docker/vnf/Dockerfile30
-rw-r--r--docker/vnf/testcases.yaml1
-rw-r--r--docs/com/pres/rfp/index.html52
-rw-r--r--docs/com/pres/rfp/rfp.md97
-rw-r--r--docs/com/pres/vevent202010/index.html52
-rw-r--r--docs/com/pres/vevent202010/vevent202010.md62
-rw-r--r--docs/release/release-notes/functest-release.rst13
-rw-r--r--docs/release/release-notes/index.rst2
-rw-r--r--docs/spelling_wordlist.txt2
-rw-r--r--docs/testing/user/configguide/configguide.rst72
-rw-r--r--docs/testing/user/userguide/test_results.rst40
-rw-r--r--functest/ci/logging.debug.ini2
-rw-r--r--functest/ci/logging.ini2
-rw-r--r--functest/ci/testcases.yaml32
-rw-r--r--functest/core/cloudify.py12
-rw-r--r--functest/core/singlevm.py4
-rw-r--r--functest/opnfv_tests/openstack/cinder/cinder_test.py12
-rw-r--r--functest/opnfv_tests/openstack/vgpu/__init__.py0
-rw-r--r--functest/opnfv_tests/openstack/vgpu/vgpu.py53
-rw-r--r--functest/opnfv_tests/openstack/vping/vping_ssh.py4
-rw-r--r--functest/opnfv_tests/vnf/epc/juju_epc.py342
-rw-r--r--functest/opnfv_tests/vnf/ims/heat_ims.py2
-rw-r--r--setup.cfg1
-rw-r--r--upper-constraints.txt2
34 files changed, 635 insertions, 338 deletions
diff --git a/api/apidoc/functest.opnfv_tests.openstack.rst b/api/apidoc/functest.opnfv_tests.openstack.rst
index d035ce8..2a59e88 100644
--- a/api/apidoc/functest.opnfv_tests.openstack.rst
+++ b/api/apidoc/functest.opnfv_tests.openstack.rst
@@ -13,7 +13,6 @@ Subpackages
functest.opnfv_tests.openstack.refstack
functest.opnfv_tests.openstack.shaker
functest.opnfv_tests.openstack.tempest
- functest.opnfv_tests.openstack.vgpu
functest.opnfv_tests.openstack.vmtp
functest.opnfv_tests.openstack.vping
diff --git a/api/apidoc/functest.opnfv_tests.openstack.vgpu.rst b/api/apidoc/functest.opnfv_tests.openstack.vgpu.rst
deleted file mode 100644
index 7ba162d..0000000
--- a/api/apidoc/functest.opnfv_tests.openstack.vgpu.rst
+++ /dev/null
@@ -1,17 +0,0 @@
-functest.opnfv\_tests.openstack.vgpu package
-============================================
-
-Submodules
-----------
-
-.. toctree::
-
- functest.opnfv_tests.openstack.vgpu.vgpu
-
-Module contents
----------------
-
-.. automodule:: functest.opnfv_tests.openstack.vgpu
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/api/apidoc/functest.opnfv_tests.openstack.vgpu.vgpu.rst b/api/apidoc/functest.opnfv_tests.openstack.vgpu.vgpu.rst
deleted file mode 100644
index 9bb3b55..0000000
--- a/api/apidoc/functest.opnfv_tests.openstack.vgpu.vgpu.rst
+++ /dev/null
@@ -1,7 +0,0 @@
-functest.opnfv\_tests.openstack.vgpu.vgpu module
-================================================
-
-.. automodule:: functest.opnfv_tests.openstack.vgpu.vgpu
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/build.sh b/build.sh
index 2a3603b..5331a52 100644
--- a/build.sh
+++ b/build.sh
@@ -11,20 +11,17 @@ docker/benchmarking \
docker/vnf \
docker/smoke-cntt \
docker/benchmarking-cntt"}
-arm_dirs=${arm_dirs-"\
-docker/core \
-docker/healthcheck \
-docker/smoke \
-docker/benchmarking \
-docker/smoke-cntt \
-docker/benchmarking-cntt"}
+arm_dirs=${arm_dirs-${amd64_dirs}}
arm64_dirs=${arm64_dirs-${amd64_dirs}}
build_opts=("--pull=true" --no-cache "--force-rm=true")
find . -name Dockerfile -exec sed -i \
-e "s|opnfv/functest-core|${repo}/functest-core:amd64-latest|g" {} +
find . -name Dockerfile -exec sed -i \
- -e "s|opnfv/functest-tempest|${repo}/functest-tempest:amd64-latest|g" {} +
+ -e "s|opnfv/functest-smoke|${repo}/functest-smoke:amd64-latest|g" {} +
+find . -name Dockerfile -exec sed -i \
+ -e "s|opnfv/functest-benchmarking|\
+${repo}/functest-benchmarking:amd64-latest|g" {} +
for dir in ${amd64_dirs}; do
(cd "${dir}" &&
docker build "${build_opts[@]}" \
@@ -42,7 +39,10 @@ find . -name Dockerfile -exec sed -i \
find . -name Dockerfile -exec sed -i \
-e "s|opnfv/functest-core|${repo}/functest-core:arm64-latest|g" {} +
find . -name Dockerfile -exec sed -i \
- -e "s|opnfv/functest-tempest|${repo}/functest-tempest:arm64-latest|g" {} +
+ -e "s|opnfv/functest-smoke|${repo}/functest-smoke:arm64-latest|g" {} +
+find . -name Dockerfile -exec sed -i \
+ -e "s|opnfv/functest-benchmarking|\
+${repo}/functest-benchmarking:arm64-latest|g" {} +
for dir in ${arm64_dirs}; do
(cd "${dir}" && docker build "${build_opts[@]}" \
-t "${repo}/functest-${dir##**/}:arm64-latest" .)
@@ -60,7 +60,10 @@ find . -name Dockerfile -exec sed -i \
find . -name Dockerfile -exec sed -i \
-e "s|opnfv/functest-core|${repo}/functest-core:arm-latest|g" {} +
find . -name Dockerfile -exec sed -i \
- -e "s|opnfv/functest-tempest|${repo}/functest-tempest:arm-latest|g" {} +
+ -e "s|opnfv/functest-smoke|${repo}/functest-smoke:arm-latest|g" {} +
+find . -name Dockerfile -exec sed -i \
+ -e "s|opnfv/functest-benchmarking|\
+${repo}/functest-benchmarking:arm-latest|g" {} +
for dir in ${arm_dirs}; do
(cd "${dir}" && docker build "${build_opts[@]}" \
-t "${repo}/functest-${dir##**/}:arm-latest" .)
diff --git a/docker/benchmarking-cntt/testcases.yaml b/docker/benchmarking-cntt/testcases.yaml
index 5a29f13..f96fbee 100644
--- a/docker/benchmarking-cntt/testcases.yaml
+++ b/docker/benchmarking-cntt/testcases.yaml
@@ -2,7 +2,6 @@
tiers:
-
name: benchmarking_cntt
- order: 4
description: >-
Run several OpenStack performance tools
https://docs.openstack.org/performance-docs/latest/methodologies/tools.html
diff --git a/docker/benchmarking/testcases.yaml b/docker/benchmarking/testcases.yaml
index e63481e..ea58caa 100644
--- a/docker/benchmarking/testcases.yaml
+++ b/docker/benchmarking/testcases.yaml
@@ -2,7 +2,6 @@
tiers:
-
name: benchmarking
- order: 3
description: >-
Run several OpenStack performance tools
https://docs.openstack.org/performance-docs/latest/methodologies/tools.html
diff --git a/docker/core/Dockerfile b/docker/core/Dockerfile
index b933af5..80be9ee 100644
--- a/docker/core/Dockerfile
+++ b/docker/core/Dockerfile
@@ -11,7 +11,7 @@ RUN apk --no-cache add --update \
apk --no-cache add --virtual .build-deps --update \
python3-dev build-base linux-headers libffi-dev \
openssl-dev libjpeg-turbo-dev && \
- apk --no-cache add --update --upgrade py3-distlib \
+ apk --no-cache add --update py3-distlib\>=0.3.1 \
--repository=http://dl-cdn.alpinelinux.org/alpine/edge/main && \
git init /src/requirements && \
(cd /src/requirements && \
diff --git a/docker/healthcheck/testcases.yaml b/docker/healthcheck/testcases.yaml
index c2a6649..ff743f0 100644
--- a/docker/healthcheck/testcases.yaml
+++ b/docker/healthcheck/testcases.yaml
@@ -2,7 +2,6 @@
tiers:
-
name: healthcheck
- order: 0
description: >-
First tier to be executed to verify the basic
operations in the VIM.
diff --git a/docker/smoke-cntt/testcases.yaml b/docker/smoke-cntt/testcases.yaml
index 7b2a173..5ea0b70 100644
--- a/docker/smoke-cntt/testcases.yaml
+++ b/docker/smoke-cntt/testcases.yaml
@@ -2,7 +2,6 @@
tiers:
-
name: smoke_cntt
- order: 2
description: >-
Set of basic Functional tests to validate the OPNFV scenarios.
testcases:
@@ -12,7 +11,7 @@ tiers:
criteria: 100
blocking: false
deny_skipping: true
- tests_count: 517
+ tests_count: 523
description: >-
This test case runs the Tempest suite proposed by the
Neutron project. The list of test cases is generated by
@@ -80,7 +79,7 @@ tiers:
criteria: 100
blocking: false
deny_skipping: true
- tests_count: 11
+ tests_count: 9
description: >-
This test case runs the Tempest suite proposed by the
Cinder project.
@@ -89,6 +88,8 @@ tiers:
args:
mode: "(?!.*test_incremental_backup)\
(?!.*test_consistencygroups)\
+ (?!.*test_backup_crossproject_admin_negative)\
+ (?!.*test_backup_crossproject_user_negative)\
(^cinder_tempest_plugin.)"
option:
- '--concurrency=4'
@@ -167,7 +168,7 @@ tiers:
criteria: 100
blocking: false
deny_skipping: true
- tests_count: 1280
+ tests_count: 1276
description: >-
The list of test cases is generated by
Tempest automatically and depends on the parameters of
@@ -182,8 +183,12 @@ tiers:
(?!.*test_flavors_microversions.FlavorsV255TestJSON)\
(?!.*test_flavors_microversions.FlavorsV261TestJSON)\
(?!.*test_live_migration.LiveAutoBlockMigrationV225Test.test_iscsi_volume)\
+ (?!.*test_live_migration.LiveAutoBlockMigrationV225Test.test_live_block_migration)\
+ (?!.*test_live_migration.LiveAutoBlockMigrationV225Test.test_live_block_migration_paused)\
(?!.*test_live_migration.LiveAutoBlockMigrationV225Test.test_volume_backed_live_migration)\
(?!.*test_live_migration.LiveMigrationTest.test_iscsi_volume)\
+ (?!.*test_live_migration.LiveMigrationTest.test_live_block_migration)\
+ (?!.*test_live_migration.LiveMigrationTest.test_live_block_migration_paused)\
(?!.*test_live_migration.LiveMigrationTest.test_volume_backed_live_migration)\
(?!.*test_live_migration.LiveMigrationRemoteConsolesV26Test)\
(?!.*test_quotas.QuotasAdminTestV257)\
diff --git a/docker/smoke/testcases.yaml b/docker/smoke/testcases.yaml
index ec131e3..9b61b00 100644
--- a/docker/smoke/testcases.yaml
+++ b/docker/smoke/testcases.yaml
@@ -2,7 +2,6 @@
tiers:
-
name: smoke
- order: 1
description: >-
Set of basic Functional tests to validate the OPNFV scenarios.
testcases:
@@ -34,8 +33,10 @@ tiers:
run:
name: tempest_common
args:
- mode:
- '(?!.*test_incremental_backup)(^cinder_tempest_plugin.)'
+ mode: "(?!.*test_incremental_backup)\
+ (?!.*test_backup_crossproject_admin_negative)\
+ (?!.*test_backup_crossproject_user_negative)\
+ (^cinder_tempest_plugin.)"
option:
- '--concurrency=4'
@@ -220,6 +221,9 @@ tiers:
mode: "(?!.*test_unmanage_snapshot_rbac)\
(?!.*test_show_auto_allocated_topology)\
(?!.*test_delete_auto_allocated_topology)\
+ (?!.*test_create_network_provider_segmentation_id)\
+ (?!.*compute.test_floating_ips_rbac)\
+ (?!.*test_create_image_from_volume_backed_server)\
(?=.*[.*\bslow\b.*])\
(^patrole_tempest_plugin.tests.api.(compute|image|network|volume))"
option:
@@ -239,6 +243,9 @@ tiers:
mode: "(?!.*test_unmanage_snapshot_rbac)\
(?!.*test_show_auto_allocated_topology)\
(?!.*test_delete_auto_allocated_topology)\
+ (?!.*test_create_network_provider_segmentation_id)\
+ (?!.*compute.test_floating_ips_rbac)\
+ (?!.*test_create_image_from_volume_backed_server)\
(?=.*[.*\bslow\b.*])\
(^patrole_tempest_plugin.tests.api.(compute|image|network|volume))"
option:
@@ -258,6 +265,9 @@ tiers:
mode: "(?!.*test_unmanage_snapshot_rbac)\
(?!.*test_show_auto_allocated_topology)\
(?!.*test_delete_auto_allocated_topology)\
+ (?!.*test_create_network_provider_segmentation_id)\
+ (?!.*compute.test_floating_ips_rbac)\
+ (?!.*test_create_image_from_volume_backed_server)\
(?=.*[.*\bslow\b.*])\
(^patrole_tempest_plugin.tests.api.(compute|image|network|volume))"
option:
diff --git a/docker/vnf/Dockerfile b/docker/vnf/Dockerfile
index 88f3748..aa98b6c 100644
--- a/docker/vnf/Dockerfile
+++ b/docker/vnf/Dockerfile
@@ -6,19 +6,12 @@ ARG CLOUDIFY_VIMS_TAG=gambia
ARG HEAT_VIMS_TAG=release-130
ARG VROUTER_TAG=fraser
ARG VROUTER_BP_TAG=9b76d46a388d32d4985797620e67c2ed3315b3e4
-ARG JUJU_TAG=tags/juju-2.3.9
-ARG JUJU_WAIT_TAG=2.6.4
ARG ABOT_CHARM=opnfv-fraser
-ARG GODEPS_TAG=404a7e748cd352bb0d7449dedc645546eebbfc6e
-
-ENV GOPATH /src/epc-requirements/go
-ENV GOBIN /src/epc-requirements/go/bin
-ENV PATH $GOBIN:$PATH
COPY clearwater-heat-singlenet-deps.patch /tmp/clearwater-heat-singlenet-deps.patch
RUN apk --no-cache add --update \
ruby ruby-bundler ruby-irb ruby-rdoc \
- procps libxslt libxml2 zlib libffi go musl-dev && \
+ procps libxslt libxml2 zlib libffi musl-dev && \
apk --no-cache add --virtual .build-deps --update \
ruby-dev g++ make libxslt-dev libxml2-dev zlib-dev libffi-dev patch && \
git init /src/vims-test && \
@@ -52,23 +45,10 @@ RUN apk --no-cache add --update \
(cd /home/opnfv/functest/data/router/opnfv-vnf-data && \
git fetch --tags https://github.com/oolorg/opnfv-vnf-data.git $VROUTER_TAG && \
git checkout FETCH_HEAD) && \
- case "$(uname -m)" in \
- "armv7l" | "aarch64") ;; \
- *) \
- git init /src/epc-requirements/abot_charm && \
- (cd /src/epc-requirements/abot_charm && \
- git fetch --tags https://github.com/RebacaInc/abot_charm.git $ABOT_CHARM && \
- git checkout FETCH_HEAD) && \
- pip3 install --no-cache-dir --src /src -c/src/requirements/upper-constraints.txt \
- -c/src/functest/upper-constraints.txt \
- juju-wait==$JUJU_WAIT_TAG && \
- go get -d github.com/rogpeppe/godeps && \
- (cd $GOPATH/src/github.com/rogpeppe/godeps && git checkout $GODEPS_TAG && go install -v github.com/rogpeppe/godeps) && \
- go get -d -v github.com/juju/juju/... || true && \
- (cd $GOPATH/src/github.com/juju/juju && git checkout $JUJU_TAG && godeps -u dependencies.tsv) && \
- go install -v github.com/juju/juju/... && \
- rm -r $GOPATH/src/ $GOPATH/pkg /src/epc-requirements/abot_charm/.git /root/.cache/go-build;; \
- esac && \
+ git init /src/epc-requirements/abot_charm && \
+ (cd /src/epc-requirements/abot_charm && \
+ git fetch --tags https://github.com/collivier/abot_charm.git $ABOT_CHARM && \
+ git checkout FETCH_HEAD) && \
(cd /src/vims-test && bundle config build.nokogiri --use-system-libraries && bundle install --system && bundle update rest-client) && \
rm -r /src/vims-test/.git /src/cloudify_vims/.git /src/heat_vims/.git /src/vims-test/quaff/.git \
/src/vims-test/build-infra/.git /src/opnfv-vnf-vyos-blueprint/.git \
diff --git a/docker/vnf/testcases.yaml b/docker/vnf/testcases.yaml
index 9822a51..0d21c63 100644
--- a/docker/vnf/testcases.yaml
+++ b/docker/vnf/testcases.yaml
@@ -2,7 +2,6 @@
tiers:
-
name: vnf
- order: 5
description: >-
Collection of VNF test cases.
testcases:
diff --git a/docs/com/pres/rfp/index.html b/docs/com/pres/rfp/index.html
new file mode 100644
index 0000000..538c0c8
--- /dev/null
+++ b/docs/com/pres/rfp/index.html
@@ -0,0 +1,52 @@
+<html>
+<head>
+<title>OPNFV and CNTT in Orange RFP</title>
+<meta name="author" content="Cédric Ollivier">
+<meta name="viewport"
+ content="width=device-width, initial-scale=1.0, maximum-scale=1.0, user-scalable=no">
+<link rel="stylesheet" href="../reveal.js/css/reveal.css">
+<link rel="stylesheet" href="../reveal.js/css/theme/white.css">
+<link rel="stylesheet" href="../reveal.js/lib/css/zenburn.css">
+<script>
+var link = document.createElement( 'link' );
+link.rel = 'stylesheet';
+link.type = 'text/css';
+link.href = window.location.search.match( /print-pdf/gi ) ? '../reveal.js/css/print/pdf.css' : '../reveal.js/css/print/paper.css';
+document.getElementsByTagName( 'head' )[0].appendChild( link );
+</script>
+</head>
+<body>
+ <div class="reveal">
+ <div class="slides">
+ <section data-markdown="rfp.md" data-separator="^\n\n\n"
+ data-separator-vertical="^\n\n" data-separator-notes="^Note:"></section>
+ </div>
+ </div>
+ <script src="../reveal.js/lib/js/head.min.js"></script>
+ <script src="../reveal.js/js/reveal.js"></script>
+ <script>
+ Reveal.initialize({
+ dependencies : [ {
+ src : '../reveal.js/plugin/markdown/marked.js',
+ condition : function() {
+ return !!document.querySelector('[data-markdown]');
+ }
+ }, {
+ src : '../reveal.js/plugin/markdown/markdown.js',
+ condition : function() {
+ return !!document.querySelector('[data-markdown]');
+ }
+ }, {
+ src: '../reveal.js/plugin/highlight/highlight.js',
+ async: true,
+ callback: function() {
+ hljs.initHighlightingOnLoad();
+ }
+ }, {
+ src: '../reveal.js/plugin/notes/notes.js',
+ async: true
+ } ]
+ });
+ </script>
+</body>
+</html>
diff --git a/docs/com/pres/rfp/rfp.md b/docs/com/pres/rfp/rfp.md
new file mode 100644
index 0000000..66f7ba9
--- /dev/null
+++ b/docs/com/pres/rfp/rfp.md
@@ -0,0 +1,97 @@
+# OPNFV and CNTT in Orange RFP
+
+[Cédric Ollivier](mailto:cedric.ollivier@orange.com)
+
+2020/09/19
+
+
+
+## Vision and contributions
+
+
+### Our guidelines
+
+- **automate** to bring determinism and to meet the new
+ software release rate
+- test all software layers **independently** (OpenStack, Kubernetes and VNFs)
+- run all deployment and verification jobs in **our** continous integration
+ chains
+- leverage best **opensource** tools and practices
+
+**Integrate smoothly and deploy everywhere fastly**
+
+
+### How OpenSource helps?
+
+- **Functest** offers a collection of state-of-the-art virtual infrastructure
+ test suites
+- **Xtesting** helps assembling sparse test cases and accelerating the adoption
+ of CI/CD best practices
+- **XtestingCI** eases deploying anywhere plug-and-play CI/CD toolchains in a
+ few commands
+- **CNTT** defines conformance suites and its playbooks leveraging this 3
+ softwares
+
+**Any contribution is more than welcome!
+[[1]](https://www.linkedin.com/pulse/call-functest-cntt-rc1-contributions-c%25C3%25A9dric-ollivier/)
+[[2]](https://www.linkedin.com/pulse/call-functest-cntt-rc2-contributions-c%25C3%25A9dric-ollivier/)**
+
+
+
+## CNTT/OPNFV in Orange
+
+
+### A couple of RFP requirements
+
+- the **full** CNTT reference conformance for OpenStack results and outputs
+ (Orange CNTT Field Trial is in a very good shape
+ [[1]](http://testresults.opnfv.org/functest/field_trial/)
+ [[2]](https://www.linkedin.com/pulse/cntt-field-trials-c%C3%A9dric-ollivier/))
+- the **success** of the Functest Kubernetes test suites (now released as
+ part of CNTT RC2 Baraque)
+- **first** VNF test cases running in **our** continuous integration chain
+ thanks to Xtesting and XtestingCI
+
+**It's implementing Orange and CNTT targets**
+
+
+### Orange CNTT RC1 Field Trial
+
+- helped detecting a couple of issues in CNTT RC1
+- integrated cinder backup and nova instance_password in Orange IaaS
+- to fix 10 remaining single test failures (out 2000+ functional tests, 3 hours
+ benchmarking and 3 VNFs automatically onboarded and tested)
+- to enhance Functest juju_epc to pass proxies
+
+**99,999%**
+
+
+### Wish list
+
+- to integrate **more benchmarks** in CNTT conformance (e.g. disk benchmarking)
+- to switch from the current Kubernetes interoperability testing to a **true**
+ CNTT conformance suite
+- to build the first **VNF and CNF** conformance suites (**high priority**)
+
+**We need your contribution helps!
+[[1]](https://www.linkedin.com/pulse/call-functest-cntt-rc1-contributions-c%25C3%25A9dric-ollivier/)
+[[2]](https://www.linkedin.com/pulse/call-functest-cntt-rc2-contributions-c%25C3%25A9dric-ollivier/)**
+
+
+
+## Conclusion
+
+
+### Take aways
+
+- Orange leverages OPNFV and CNTT in RFP
+- we keep contributing in both specification and implementation streams for
+ the success of Network Function Virtualization
+- we expect more OPNFV and CNTT contributions especially for VNF and CNF
+ conformance suites, our initial CNTT target
+
+**Try CNTT reference suites, you will love them!**
+
+
+
+## Thank you!
diff --git a/docs/com/pres/vevent202010/index.html b/docs/com/pres/vevent202010/index.html
new file mode 100644
index 0000000..a24c721
--- /dev/null
+++ b/docs/com/pres/vevent202010/index.html
@@ -0,0 +1,52 @@
+<html>
+<head>
+<title>Orange CNTT RC1 Field Trial feedbacks</title>
+<meta name="author" content="Cédric Ollivier">
+<meta name="viewport"
+ content="width=device-width, initial-scale=1.0, maximum-scale=1.0, user-scalable=no">
+<link rel="stylesheet" href="../reveal.js/css/reveal.css">
+<link rel="stylesheet" href="../reveal.js/css/theme/white.css">
+<link rel="stylesheet" href="../reveal.js/lib/css/zenburn.css">
+<script>
+var link = document.createElement( 'link' );
+link.rel = 'stylesheet';
+link.type = 'text/css';
+link.href = window.location.search.match( /print-pdf/gi ) ? '../reveal.js/css/print/pdf.css' : '../reveal.js/css/print/paper.css';
+document.getElementsByTagName( 'head' )[0].appendChild( link );
+</script>
+</head>
+<body>
+ <div class="reveal">
+ <div class="slides">
+ <section data-markdown="vevent202010.md" data-separator="^\n\n\n"
+ data-separator-vertical="^\n\n" data-separator-notes="^Note:"></section>
+ </div>
+ </div>
+ <script src="../reveal.js/lib/js/head.min.js"></script>
+ <script src="../reveal.js/js/reveal.js"></script>
+ <script>
+ Reveal.initialize({
+ dependencies : [ {
+ src : '../reveal.js/plugin/markdown/marked.js',
+ condition : function() {
+ return !!document.querySelector('[data-markdown]');
+ }
+ }, {
+ src : '../reveal.js/plugin/markdown/markdown.js',
+ condition : function() {
+ return !!document.querySelector('[data-markdown]');
+ }
+ }, {
+ src: '../reveal.js/plugin/highlight/highlight.js',
+ async: true,
+ callback: function() {
+ hljs.initHighlightingOnLoad();
+ }
+ }, {
+ src: '../reveal.js/plugin/notes/notes.js',
+ async: true
+ } ]
+ });
+ </script>
+</body>
+</html>
diff --git a/docs/com/pres/vevent202010/vevent202010.md b/docs/com/pres/vevent202010/vevent202010.md
new file mode 100644
index 0000000..b128210
--- /dev/null
+++ b/docs/com/pres/vevent202010/vevent202010.md
@@ -0,0 +1,62 @@
+# Orange CNTT RC1 Field Trial
+
+[Cédric Ollivier](mailto:cedric.ollivier@orange.com)
+
+2020/10/14
+
+
+
+## CNTT RC1 Field Trial
+
+
+### Orange CNTT RC1 Field Trial
+
+- helped detecting a couple of issues in CNTT RC1
+- integrated cinder backup and nova instance_password in Orange IaaS
+- to fix 10 remaining single test failures (out 2000+ functional tests, 3 hours
+ benchmarking and 3 VNFs automatically onboarded and tested)
+- to enhance Functest juju_epc to pass proxies
+
+**99,999%**
+
+
+### Orange CNTT RC1 feedbacks
+
+- we easily executed the CNTT RC1 playbook asis and no clear bug was found in
+ CNTT RC1 Baldy
+- CNTT RC1 is now used in our Orange IaaS verification in addition to the
+ classical Functest containers
+- CNTT RC1 is also executed to verify all deployments before onboarding any VNF
+- CNTT RC1 Baldy and CNTT RC2 Baraque are listed in **Orange RFP requirements**
+ (see last ONES
+ [[1]](http://testresults.opnfv.org/functest/ONES2020NA_OPNFV-CNTT_RFP.1080p.mp4))
+
+
+
+## What else?
+
+
+### Wish list
+
+- to integrate **more benchmarks** in CNTT conformance (e.g. disk benchmarking)
+- to switch from the current Kubernetes interoperability testing to a **true**
+ CNTT conformance suite
+- to build the first **VNF and CNF** conformance suites (**high priority**)
+
+**We need your contribution helps!
+[[2]](https://www.linkedin.com/pulse/call-functest-cntt-rc1-contributions-c%25C3%25A9dric-ollivier/)
+[[3]](https://www.linkedin.com/pulse/call-functest-cntt-rc2-contributions-c%25C3%25A9dric-ollivier/)**
+
+
+### New Functest opportunities
+
+- to implement live monitoring of your OpenStack and Kubernetes deployments via
+ a subset of Functest (healthcheck? new Rally tasks?)
+- to implement new functest testcases to validate and verify OpenStack upgrades
+ (new Rally tasks)?
+
+**Try Functest Leguer, you will love it [[4]](https://www.linkedin.com/pulse/opnfv-functest-leguer-out-c%25C3%25A9dric-ollivier/)**
+
+
+
+## Thank you!
diff --git a/docs/release/release-notes/functest-release.rst b/docs/release/release-notes/functest-release.rst
index f6c3d12..00951b9 100644
--- a/docs/release/release-notes/functest-release.rst
+++ b/docs/release/release-notes/functest-release.rst
@@ -68,9 +68,17 @@ Kubernetes
The internal test cases are:
+ * k8s_quick
* k8s_smoke
- * xrally_kubernetes
* k8s_conformance
+ * xrally_kubernetes
+ * kube_hunter
+ * kube_bench_master
+ * kube_bench_node
+ * xrally_kubernetes_full
+ * k8s_vims
+ * helm_vims
+ * cnf_conformance
Release Data
============
@@ -100,6 +108,9 @@ Software
* https://hub.docker.com/r/opnfv/functest-kubernetes-healthcheck
* https://hub.docker.com/r/opnfv/functest-kubernetes-smoke
+ * https://hub.docker.com/r/opnfv/functest-kubernetes-security
+ * https://hub.docker.com/r/opnfv/functest-kubernetes-benchmarking
+ * https://hub.docker.com/r/opnfv/functest-kubernetes-cnf
Docker tag for master: latest
diff --git a/docs/release/release-notes/index.rst b/docs/release/release-notes/index.rst
index c19d556..25c2cbd 100644
--- a/docs/release/release-notes/index.rst
+++ b/docs/release/release-notes/index.rst
@@ -10,5 +10,3 @@ Functest Release Notes
:maxdepth: 4
functest-release.rst
-
-Build date: |today|
diff --git a/docs/spelling_wordlist.txt b/docs/spelling_wordlist.txt
index 94452dc..f4dcea5 100644
--- a/docs/spelling_wordlist.txt
+++ b/docs/spelling_wordlist.txt
@@ -14,6 +14,7 @@ ci
Clearwater
cloudify
Cloudify
+cnf
cntt
conf
config
@@ -128,6 +129,7 @@ vEPC
verifier
versioned
vIMS
+vims
Virtualised
Virtualized
vm
diff --git a/docs/testing/user/configguide/configguide.rst b/docs/testing/user/configguide/configguide.rst
index d63ddee..55585ed 100644
--- a/docs/testing/user/configguide/configguide.rst
+++ b/docs/testing/user/configguide/configguide.rst
@@ -13,7 +13,6 @@ Functest Dockers for OpenStack deployment
-----------------------------------------
Docker images are available on the dockerhub:
- * opnfv/functest-core
* opnfv/functest-healthcheck
* opnfv/functest-smoke
* opnfv/functest-smoke-cntt
@@ -124,9 +123,12 @@ Results shall be displayed as follows::
| tempest_full | functest | smoke | 41:52 | PASS |
| tempest_scenario | functest | smoke | 08:42 | PASS |
| tempest_slow | functest | smoke | 43:42 | PASS |
- | patrole | functest | smoke | 02:42 | PASS |
+ | patrole_admin | functest | smoke | 21:06 | PASS |
+ | patrole_member | functest | smoke | 21:23 | PASS |
+ | patrole_reader | functest | smoke | 21:56 | PASS |
| tempest_barbican | functest | smoke | 02:30 | PASS |
| tempest_octavia | functest | smoke | 00:00 | SKIP |
+ | tempest_cyborg | functest | smoke | 00:00 | SKIP |
+---------------------------+------------------+---------------+------------------+----------------+
Note: if the scenario does not support some tests, they are indicated as SKIP.
@@ -228,9 +230,11 @@ Functest Dockers for Kubernetes deployment
------------------------------------------
Docker images are available on the dockerhub:
- * opnfv/functest-kubernetes-core
* opnfv/functest-kubernetes-healthcheck
* opnfv/functest-kubernetes-smoke
+ * opnfv/functest-kubernetes-security
+ * opnfv/functest-kubernetes-benchmarking
+ * opnfv/functest-kubernetes-cnf
Preparing your environment
^^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -256,7 +260,8 @@ Results shall be displayed as follows::
+-------------------+------------------+---------------------+------------------+----------------+
| TEST CASE | PROJECT | TIER | DURATION | RESULT |
+-------------------+------------------+---------------------+------------------+----------------+
- | k8s_smoke | functest | healthcheck | 01:09 | PASS |
+ | k8s_quick | functest | healthcheck | 00:18 | PASS |
+ | k8s_smoke | functest | healthcheck | 01:14 | PASS |
+-------------------+------------------+---------------------+------------------+----------------+
Testing smoke suite
@@ -273,10 +278,65 @@ Results shall be displayed as follows::
+---------------------------+------------------+---------------+------------------+----------------+
| TEST CASE | PROJECT | TIER | DURATION | RESULT |
+---------------------------+------------------+---------------+------------------+----------------+
- | xrally_kubernetes | functest | smoke | 22:04 | PASS |
- | k8s_conformance | functest | smoke | 173:48 | PASS |
+ | k8s_conformance | functest | smoke | 94:26 | PASS |
+ | xrally_kubernetes | functest | smoke | 13:05 | PASS |
+---------------------------+------------------+---------------+------------------+----------------+
+Testing security suite
+^^^^^^^^^^^^^^^^^^^^^^
+
+Run smoke suite::
+
+ sudo docker run -it --env-file env \
+ -v $(pwd)/config:/root/.kube/config \
+ opnfv/functest-kubernetes-security
+
+Results shall be displayed as follows::
+
+ +---------------------------+------------------+------------------+------------------+----------------+
+ | TEST CASE | PROJECT | TIER | DURATION | RESULT |
+ +---------------------------+------------------+------------------+------------------+----------------+
+ | kube_hunter | functest | security | 00:19 | PASS |
+ | kube_bench_master | functest | security | 00:02 | PASS |
+ | kube_bench_node | functest | security | 00:01 | PASS |
+ +---------------------------+------------------+------------------+------------------+----------------+
+
+Testing benchmarking suite
+^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Run benchmarking suite::
+
+ sudo docker run -it --env-file env \
+ -v $(pwd)/config:/root/.kube/config \
+ opnfv/functest-kubernetes-benchmarking
+
+Results shall be displayed as follows::
+
+ +--------------------------------+------------------+----------------------+------------------+----------------+
+ | TEST CASE | PROJECT | TIER | DURATION | RESULT |
+ +--------------------------------+------------------+----------------------+------------------+----------------+
+ | xrally_kubernetes_full | functest | benchmarking | 34:16 | PASS |
+ +--------------------------------+------------------+----------------------+------------------+----------------+
+
+Testing cnf suite
+^^^^^^^^^^^^^^^^^
+
+Run cnf suite::
+
+ sudo docker run -it --env-file env \
+ -v $(pwd)/config:/root/.kube/config \
+ opnfv/functest-kubernetes-cnf
+
+Results shall be displayed as follows::
+
+ +-------------------------+------------------+--------------+------------------+----------------+
+ | TEST CASE | PROJECT | TIER | DURATION | RESULT |
+ +-------------------------+------------------+--------------+------------------+----------------+
+ | k8s_vims | functest | cnf | 09:06 | PASS |
+ | helm_vims | functest | cnf | 08:54 | PASS |
+ | cnf_conformance | functest | cnf | 02:00 | PASS |
+ +-------------------------+------------------+--------------+------------------+----------------+
+
Environment variables
=====================
diff --git a/docs/testing/user/userguide/test_results.rst b/docs/testing/user/userguide/test_results.rst
index 8d8f79f..10f87d8 100644
--- a/docs/testing/user/userguide/test_results.rst
+++ b/docs/testing/user/userguide/test_results.rst
@@ -56,9 +56,12 @@ Smoke suite::
| tempest_full | functest | smoke | 41:52 | PASS |
| tempest_scenario | functest | smoke | 08:42 | PASS |
| tempest_slow | functest | smoke | 43:42 | PASS |
- | patrole | functest | smoke | 02:42 | PASS |
+ | patrole_admin | functest | smoke | 21:06 | PASS |
+ | patrole_member | functest | smoke | 21:23 | PASS |
+ | patrole_reader | functest | smoke | 21:56 | PASS |
| tempest_barbican | functest | smoke | 02:30 | PASS |
| tempest_octavia | functest | smoke | 00:00 | SKIP |
+ | tempest_cyborg | functest | smoke | 00:00 | SKIP |
+---------------------------+------------------+---------------+------------------+----------------+
Smoke CNTT suite::
@@ -115,7 +118,8 @@ Kubernetes healthcheck suite::
+-------------------+------------------+---------------------+------------------+----------------+
| TEST CASE | PROJECT | TIER | DURATION | RESULT |
+-------------------+------------------+---------------------+------------------+----------------+
- | k8s_smoke | functest | healthcheck | 01:09 | PASS |
+ | k8s_quick | functest | healthcheck | 00:18 | PASS |
+ | k8s_smoke | functest | healthcheck | 01:14 | PASS |
+-------------------+------------------+---------------------+------------------+----------------+
Kubernetes smoke suite::
@@ -123,10 +127,38 @@ Kubernetes smoke suite::
+---------------------------+------------------+---------------+------------------+----------------+
| TEST CASE | PROJECT | TIER | DURATION | RESULT |
+---------------------------+------------------+---------------+------------------+----------------+
- | xrally_kubernetes | functest | smoke | 22:04 | PASS |
- | k8s_conformance | functest | smoke | 173:48 | PASS |
+ | k8s_conformance | functest | smoke | 94:26 | PASS |
+ | xrally_kubernetes | functest | smoke | 13:05 | PASS |
+---------------------------+------------------+---------------+------------------+----------------+
+Kubernetes security suite::
+
+ +---------------------------+------------------+------------------+------------------+----------------+
+ | TEST CASE | PROJECT | TIER | DURATION | RESULT |
+ +---------------------------+------------------+------------------+------------------+----------------+
+ | kube_hunter | functest | security | 00:19 | PASS |
+ | kube_bench_master | functest | security | 00:02 | PASS |
+ | kube_bench_node | functest | security | 00:01 | PASS |
+ +---------------------------+------------------+------------------+------------------+----------------+
+
+Kubernetes benchmarking suite::
+
+ +--------------------------------+------------------+----------------------+------------------+----------------+
+ | TEST CASE | PROJECT | TIER | DURATION | RESULT |
+ +--------------------------------+------------------+----------------------+------------------+----------------+
+ | xrally_kubernetes_full | functest | benchmarking | 34:16 | PASS |
+ +--------------------------------+------------------+----------------------+------------------+----------------+
+
+Kubernetes cnf suite::
+
+ +-------------------------+------------------+--------------+------------------+----------------+
+ | TEST CASE | PROJECT | TIER | DURATION | RESULT |
+ +-------------------------+------------------+--------------+------------------+----------------+
+ | k8s_vims | functest | cnf | 09:06 | PASS |
+ | helm_vims | functest | cnf | 08:54 | PASS |
+ | cnf_conformance | functest | cnf | 02:00 | PASS |
+ +-------------------------+------------------+--------------+------------------+----------------+
+
Results are automatically pushed to the test results database, some additional
result files are pushed to OPNFV artifact web sites.
diff --git a/functest/ci/logging.debug.ini b/functest/ci/logging.debug.ini
index 764a4e2..c926a50 100644
--- a/functest/ci/logging.debug.ini
+++ b/functest/ci/logging.debug.ini
@@ -95,7 +95,7 @@ args=(sys.stdout,)
[handler_file]
class=FileHandler
-level=DEBUG
+level=INFO
formatter=standard
args=("/home/opnfv/functest/results/functest.log",)
diff --git a/functest/ci/logging.ini b/functest/ci/logging.ini
index e2a4409..dde0794 100644
--- a/functest/ci/logging.ini
+++ b/functest/ci/logging.ini
@@ -101,7 +101,7 @@ args=(sys.stdout,)
[handler_file]
class=FileHandler
-level=DEBUG
+level=INFO
formatter=standard
args=("/home/opnfv/functest/results/functest.log",)
diff --git a/functest/ci/testcases.yaml b/functest/ci/testcases.yaml
index 6641883..282590a 100644
--- a/functest/ci/testcases.yaml
+++ b/functest/ci/testcases.yaml
@@ -2,7 +2,6 @@
tiers:
-
name: healthcheck
- order: 0
description: >-
First tier to be executed to verify the basic
operations in the VIM.
@@ -180,7 +179,6 @@ tiers:
-
name: smoke
- order: 1
description: >-
Set of basic Functional tests to validate the OPNFV scenarios.
testcases:
@@ -212,8 +210,10 @@ tiers:
run:
name: tempest_common
args:
- mode:
- '(?!.*test_incremental_backup)(^cinder_tempest_plugin.)'
+ mode: "(?!.*test_incremental_backup)\
+ (?!.*test_backup_crossproject_admin_negative)\
+ (?!.*test_backup_crossproject_user_negative)\
+ (^cinder_tempest_plugin.)"
option:
- '--concurrency=4'
@@ -398,6 +398,9 @@ tiers:
mode: "(?!.*test_unmanage_snapshot_rbac)\
(?!.*test_show_auto_allocated_topology)\
(?!.*test_delete_auto_allocated_topology)\
+ (?!.*test_create_network_provider_segmentation_id)\
+ (?!.*compute.test_floating_ips_rbac)\
+ (?!.*test_create_image_from_volume_backed_server)\
(?=.*[.*\bslow\b.*])\
(^patrole_tempest_plugin.tests.api.(compute|image|network|volume))"
option:
@@ -417,6 +420,9 @@ tiers:
mode: "(?!.*test_unmanage_snapshot_rbac)\
(?!.*test_show_auto_allocated_topology)\
(?!.*test_delete_auto_allocated_topology)\
+ (?!.*test_create_network_provider_segmentation_id)\
+ (?!.*compute.test_floating_ips_rbac)\
+ (?!.*test_create_image_from_volume_backed_server)\
(?=.*[.*\bslow\b.*])\
(^patrole_tempest_plugin.tests.api.(compute|image|network|volume))"
option:
@@ -436,6 +442,9 @@ tiers:
mode: "(?!.*test_unmanage_snapshot_rbac)\
(?!.*test_show_auto_allocated_topology)\
(?!.*test_delete_auto_allocated_topology)\
+ (?!.*test_create_network_provider_segmentation_id)\
+ (?!.*compute.test_floating_ips_rbac)\
+ (?!.*test_create_image_from_volume_backed_server)\
(?=.*[.*\bslow\b.*])\
(^patrole_tempest_plugin.tests.api.(compute|image|network|volume))"
option:
@@ -499,7 +508,6 @@ tiers:
-
name: smoke_cntt
- order: 2
description: >-
Set of basic Functional tests to validate the OPNFV scenarios.
testcases:
@@ -509,7 +517,7 @@ tiers:
criteria: 100
blocking: false
deny_skipping: true
- tests_count: 517
+ tests_count: 523
description: >-
This test case runs the Tempest suite proposed by the
Neutron project. The list of test cases is generated by
@@ -576,7 +584,7 @@ tiers:
criteria: 100
blocking: false
deny_skipping: true
- tests_count: 8
+ tests_count: 9
description: >-
This test case runs the Tempest suite proposed by the
Cinder project.
@@ -585,6 +593,8 @@ tiers:
args:
mode: "(?!.*test_incremental_backup)\
(?!.*test_consistencygroups)\
+ (?!.*test_backup_crossproject_admin_negative)\
+ (?!.*test_backup_crossproject_user_negative)\
(^cinder_tempest_plugin.)"
option:
- '--concurrency=4'
@@ -678,8 +688,12 @@ tiers:
(?!.*test_flavors_microversions.FlavorsV255TestJSON)\
(?!.*test_flavors_microversions.FlavorsV261TestJSON)\
(?!.*test_live_migration.LiveAutoBlockMigrationV225Test.test_iscsi_volume)\
+ (?!.*test_live_migration.LiveAutoBlockMigrationV225Test.test_live_block_migration)\
+ (?!.*test_live_migration.LiveAutoBlockMigrationV225Test.test_live_block_migration_paused)\
(?!.*test_live_migration.LiveAutoBlockMigrationV225Test.test_volume_backed_live_migration)\
(?!.*test_live_migration.LiveMigrationTest.test_iscsi_volume)\
+ (?!.*test_live_migration.LiveMigrationTest.test_live_block_migration)\
+ (?!.*test_live_migration.LiveMigrationTest.test_live_block_migration_paused)\
(?!.*test_live_migration.LiveMigrationTest.test_volume_backed_live_migration)\
(?!.*test_live_migration.LiveMigrationRemoteConsolesV26Test)\
(?!.*test_quotas.QuotasAdminTestV257)\
@@ -709,6 +723,7 @@ tiers:
(?!.*test_attach_volume.AttachVolumeMultiAttach)\
(?!.*identity.admin.v2)(?!.*identity.v2)\
(?!.*image.v1)\
+ (?!.*image.v2.test_images.ImportImagesTest)\
(?!.*admin.test_dhcp_agent_scheduler)\
(?!.*admin.test_routers_dvr)\
(?!.*test_metering_extensions)(?!.*network.test_tags)\
@@ -778,7 +793,6 @@ tiers:
-
name: benchmarking
- order: 3
description: >-
Run several OpenStack performance tools
https://docs.openstack.org/performance-docs/latest/methodologies/tools.html
@@ -843,7 +857,6 @@ tiers:
-
name: benchmarking_cntt
- order: 4
description: >-
Run several OpenStack performance tools
https://docs.openstack.org/performance-docs/latest/methodologies/tools.html
@@ -886,7 +899,6 @@ tiers:
-
name: vnf
- order: 5
description: >-
Collection of VNF test cases.
testcases:
diff --git a/functest/core/cloudify.py b/functest/core/cloudify.py
index b5bd1b3..0fb4f6e 100644
--- a/functest/core/cloudify.py
+++ b/functest/core/cloudify.py
@@ -74,8 +74,8 @@ class Cloudify(singlevm.SingleVm2):
"--cap-add SYS_ADMIN --network=host {}".format(
os.path.basename(self.cloudify_archive),
self.cloudify_container))
- self.__logger.debug("output:\n%s", stdout.read())
- self.__logger.debug("error:\n%s", stderr.read())
+ self.__logger.debug("output:\n%s", stdout.read().decode("utf-8"))
+ self.__logger.debug("error:\n%s", stderr.read().decode("utf-8"))
self.cfy_client = CloudifyClient(
host=self.fip.floating_ip_address,
username='admin', password='admin', tenant='default_tenant')
@@ -124,8 +124,8 @@ class Cloudify(singlevm.SingleVm2):
"cfy_manager_local:/etc/cloudify/ && "
"sudo docker exec cfy_manager_local "
"chmod 444 /etc/cloudify/cloudify_ims.pem")
- self.__logger.debug("output:\n%s", stdout.read())
- self.__logger.debug("error:\n%s", stderr.read())
+ self.__logger.debug("output:\n%s", stdout.read().decode("utf-8"))
+ self.__logger.debug("error:\n%s", stderr.read().decode("utf-8"))
def upload_cfy_plugins(self, yaml, wgn):
"""Upload Cloudify plugins"""
@@ -133,8 +133,8 @@ class Cloudify(singlevm.SingleVm2):
"sudo docker exec cfy_manager_local "
"cfy plugins upload -y {} {} && "
"sudo docker exec cfy_manager_local cfy status".format(yaml, wgn))
- self.__logger.debug("output:\n%s", stdout.read())
- self.__logger.debug("error:\n%s", stderr.read())
+ self.__logger.debug("output:\n%s", stdout.read().decode("utf-8"))
+ self.__logger.debug("error:\n%s", stderr.read().decode("utf-8"))
def kill_existing_execution(self, dep_name):
"""kill existing execution"""
diff --git a/functest/core/singlevm.py b/functest/core/singlevm.py
index af2fb61..bfaa53b 100644
--- a/functest/core/singlevm.py
+++ b/functest/core/singlevm.py
@@ -456,8 +456,8 @@ class SingleVm1(VmReady1):
Returns: echo exit codes
"""
(_, stdout, stderr) = self.ssh.exec_command('echo Hello World')
- self.__logger.debug("output:\n%s", stdout.read())
- self.__logger.debug("error:\n%s", stderr.read())
+ self.__logger.debug("output:\n%s", stdout.read().decode("utf-8"))
+ self.__logger.debug("error:\n%s", stderr.read().decode("utf-8"))
return stdout.channel.recv_exit_status()
def run(self, **kwargs):
diff --git a/functest/opnfv_tests/openstack/cinder/cinder_test.py b/functest/opnfv_tests/openstack/cinder/cinder_test.py
index bbed9a6..d81bb10 100644
--- a/functest/opnfv_tests/openstack/cinder/cinder_test.py
+++ b/functest/opnfv_tests/openstack/cinder/cinder_test.py
@@ -77,8 +77,10 @@ class CinderCheck(singlevm.SingleVm2):
self.logger.debug("ssh: %s", self.ssh)
(_, stdout, stderr) = self.ssh.exec_command(
"sh ~/write_data.sh {}".format(env.get('VOLUME_DEVICE_NAME')))
- self.logger.debug("volume_write stdout: %s", stdout.read())
- self.logger.debug("volume_write stderr: %s", stderr.read())
+ self.logger.debug(
+ "volume_write stdout: %s", stdout.read().decode("utf-8"))
+ self.logger.debug(
+ "volume_write stderr: %s", stderr.read().decode("utf-8"))
# Detach volume from VM 1
self.logger.info("Detach volume from VM 1")
self.cloud.detach_volume(
@@ -103,8 +105,10 @@ class CinderCheck(singlevm.SingleVm2):
self.logger.debug("ssh: %s", self.ssh2)
(_, stdout, stderr) = self.ssh2.exec_command(
"sh ~/read_data.sh {}".format(env.get('VOLUME_DEVICE_NAME')))
- self.logger.debug("read volume stdout: %s", stdout.read())
- self.logger.debug("read volume stderr: %s", stderr.read())
+ self.logger.debug(
+ "read volume stdout: %s", stdout.read().decode("utf-8"))
+ self.logger.debug(
+ "read volume stderr: %s", stderr.read().decode("utf-8"))
self.logger.info("Detach volume from VM 2")
self.cloud.detach_volume(
self.vm2, self.volume, timeout=self.volume_timeout)
diff --git a/functest/opnfv_tests/openstack/vgpu/__init__.py b/functest/opnfv_tests/openstack/vgpu/__init__.py
deleted file mode 100644
index e69de29..0000000
--- a/functest/opnfv_tests/openstack/vgpu/__init__.py
+++ /dev/null
diff --git a/functest/opnfv_tests/openstack/vgpu/vgpu.py b/functest/opnfv_tests/openstack/vgpu/vgpu.py
deleted file mode 100644
index c8180a4..0000000
--- a/functest/opnfv_tests/openstack/vgpu/vgpu.py
+++ /dev/null
@@ -1,53 +0,0 @@
-#!/usr/bin/env python
-
-# Copyright (c) 2018 Kontron and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-
-"""vGPU testcase implementation."""
-
-from __future__ import division
-
-import logging
-
-from functest.core import singlevm
-
-
-class VGPU(singlevm.SingleVm2):
- """OpenStack vGPU Test Case."""
-
- __logger = logging.getLogger(__name__)
-
- filename = ('/home/opnfv/functest/images/'
- 'ubuntu-16.04-server-cloudimg-amd64-disk1.img')
- flavor_ram = 4096
- flavor_vcpus = 2
- flavor_disk = 40
- flavor_extra_specs = {'resources:VGPU': '1'}
- username = 'ubuntu'
- ssh_connect_loops = 12
- create_server_timeout = 300
-
- def __init__(self, **kwargs):
- """Initialize vGPU testcase object."""
- if "case_name" not in kwargs:
- kwargs["case_name"] = "vgpu"
- super(VGPU, self).__init__(**kwargs)
-
- def execute(self):
- """
- Test if the vGPU exist.
- """
- (_, stdout, stderr) = self.ssh.exec_command('lspci')
- lspci_output = stdout.read()
- self.__logger.debug("output:\n%s", stdout.read())
- self.__logger.debug("error:\n%s", stderr.read())
- if ('VGA compatible controller: Intel' in lspci_output or
- 'VGA compatible controller: Nvidia' in lspci_output):
- self.__logger.info("The VM have a vGPU")
- return 0
- self.__logger.error("The VM haven't any vGPU")
- return 1
diff --git a/functest/opnfv_tests/openstack/vping/vping_ssh.py b/functest/opnfv_tests/openstack/vping/vping_ssh.py
index e6c07ee..a7bbfc2 100644
--- a/functest/opnfv_tests/openstack/vping/vping_ssh.py
+++ b/functest/opnfv_tests/openstack/vping/vping_ssh.py
@@ -50,8 +50,8 @@ class VPingSSH(singlevm.SingleVm2):
'ping -c 1 {}'.format(
self.vm2.private_v4 or self.vm2.addresses[
self.network.name][0].addr))
- self.__logger.info("output:\n%s", stdout.read())
- self.__logger.info("error:\n%s", stderr.read())
+ self.__logger.info("output:\n%s", stdout.read().decode("utf-8"))
+ self.__logger.info("error:\n%s", stderr.read().decode("utf-8"))
return stdout.channel.recv_exit_status()
def clean(self):
diff --git a/functest/opnfv_tests/vnf/epc/juju_epc.py b/functest/opnfv_tests/vnf/epc/juju_epc.py
index e08e7eb..5049bd0 100644
--- a/functest/opnfv_tests/vnf/epc/juju_epc.py
+++ b/functest/opnfv_tests/vnf/epc/juju_epc.py
@@ -14,11 +14,11 @@ import os
import time
import json
import re
-import subprocess
import sys
from copy import deepcopy
import pkg_resources
+import scp
from functest.core import singlevm
from functest.utils import config
@@ -58,7 +58,7 @@ CREDS_TEMPLATE = """credentials:
username: {user_n}"""
-class JujuEpc(singlevm.VmReady2):
+class JujuEpc(singlevm.SingleVm2):
# pylint:disable=too-many-instance-attributes
"""Abot EPC deployed with JUJU Orchestrator Case"""
@@ -74,11 +74,10 @@ class JujuEpc(singlevm.VmReady2):
flavor_ram = 2048
flavor_vcpus = 1
flavor_disk = 10
-
flavor_alt_ram = 4096
flavor_alt_vcpus = 1
flavor_alt_disk = 10
-
+ username = 'ubuntu'
juju_timeout = '4800'
def __init__(self, **kwargs):
@@ -146,18 +145,20 @@ class JujuEpc(singlevm.VmReady2):
self.image_alt = None
self.flavor_alt = None
- def check_requirements(self):
- if not os.path.exists("/src/epc-requirements/go/bin/juju"):
- self.__logger.warning(
- "Juju cannot be cross-compiled (arm and arm64) from the time "
- "being")
- self.is_skipped = True
- self.project.clean()
- if env.get('NEW_USER_ROLE').lower() == "admin":
- self.__logger.warning(
- "Defining NEW_USER_ROLE=admin will easily break the testcase "
- "because Juju doesn't manage tenancy (e.g. subnet "
- "overlapping)")
+ def _install_juju(self):
+ (_, stdout, stderr) = self.ssh.exec_command(
+ 'sudo snap install juju --channel=2.3/stable --classic')
+ self.__logger.debug("stdout:\n%s", stdout.read().decode("utf-8"))
+ self.__logger.debug("stderr:\n%s", stderr.read().decode("utf-8"))
+ return not stdout.channel.recv_exit_status()
+
+ def _install_juju_wait(self):
+ (_, stdout, stderr) = self.ssh.exec_command(
+ 'sudo apt-get update && sudo apt-get install python3-pip -y && '
+ 'sudo pip3 install juju_wait===2.6.4')
+ self.__logger.debug("stdout:\n%s", stdout.read().decode("utf-8"))
+ self.__logger.debug("stderr:\n%s", stderr.read().decode("utf-8"))
+ return not stdout.channel.recv_exit_status()
def _register_cloud(self):
assert self.public_auth_url
@@ -169,9 +170,13 @@ class JujuEpc(singlevm.VmReady2):
'RegionOne')}
with open(clouds_yaml, 'w') as yfile:
yfile.write(CLOUD_TEMPLATE.format(**cloud_data))
- cmd = ['juju', 'add-cloud', 'abot-epc', '-f', clouds_yaml, '--replace']
- output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
- self.__logger.info("%s\n%s", " ".join(cmd), output.decode("utf-8"))
+ scpc = scp.SCPClient(self.ssh.get_transport())
+ scpc.put(clouds_yaml, remote_path='~/')
+ (_, stdout, stderr) = self.ssh.exec_command(
+ '/snap/bin/juju add-cloud abot-epc -f clouds.yaml --replace')
+ self.__logger.debug("stdout:\n%s", stdout.read().decode("utf-8"))
+ self.__logger.debug("stderr:\n%s", stderr.read().decode("utf-8"))
+ return not stdout.channel.recv_exit_status()
def _register_credentials(self):
self.__logger.info("Creating Credentials for Abot-epc .....")
@@ -186,46 +191,36 @@ class JujuEpc(singlevm.VmReady2):
"user_domain_name", "Default")}
with open(credentials_yaml, 'w') as yfile:
yfile.write(CREDS_TEMPLATE.format(**creds_data))
- cmd = ['juju', 'add-credential', 'abot-epc', '-f', credentials_yaml,
- '--replace', '--debug']
- output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
- self.__logger.info("%s\n%s", " ".join(cmd), output.decode("utf-8"))
-
- def prepare(self):
- """Prepare testcase (Additional pre-configuration steps)."""
- assert self.public_auth_url
- self.__logger.info("Additional pre-configuration steps")
- try:
- os.makedirs(self.res_dir)
- except OSError as ex:
- if ex.errno != errno.EEXIST:
- self.__logger.exception("Cannot create %s", self.res_dir)
- raise Exception
-
- self.__logger.info("ENV:\n%s", env.string())
- self._register_cloud()
- self._register_credentials()
-
- def publish_image(self, name=None):
- image = super(JujuEpc, self).publish_image(name)
- cmd = ['juju', 'metadata', 'generate-image', '-d', '/root',
- '-i', image.id, '-s', 'xenial',
- '-r', self.cloud.region_name if self.cloud.region_name else (
- 'RegionOne'),
- '-u', self.public_auth_url]
- output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
- self.__logger.info("%s\n%s", " ".join(cmd), output.decode("utf-8"))
- return image
+ scpc = scp.SCPClient(self.ssh.get_transport())
+ scpc.put(credentials_yaml, remote_path='~/')
+ (_, stdout, stderr) = self.ssh.exec_command(
+ '/snap/bin/juju add-credential abot-epc -f credentials.yaml '
+ ' --replace --debug')
+ self.__logger.debug("stdout:\n%s", stdout.read().decode("utf-8"))
+ self.__logger.debug("stderr:\n%s", stderr.read().decode("utf-8"))
+ return not stdout.channel.recv_exit_status()
+
+ def _publish_image(self):
+ region_name = self.cloud.region_name if self.cloud.region_name else (
+ 'RegionOne')
+ (_, stdout, stderr) = self.ssh.exec_command(
+ '/snap/bin/juju metadata generate-image -d /home/ubuntu '
+ '-i {} -s xenial -r {} -u {}'.format(
+ self.image.id, region_name, self.public_auth_url))
+ self.__logger.debug("stdout:\n%s", stdout.read().decode("utf-8"))
+ self.__logger.debug("stderr:\n%s", stderr.read().decode("utf-8"))
+ return not stdout.channel.recv_exit_status()
def publish_image_alt(self, name=None):
image_alt = super(JujuEpc, self).publish_image_alt(name)
- cmd = ['juju', 'metadata', 'generate-image', '-d', '/root',
- '-i', image_alt.id, '-s', 'trusty',
- '-r', self.cloud.region_name if self.cloud.region_name else (
- 'RegionOne'),
- '-u', self.public_auth_url]
- output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
- self.__logger.info("%s\n%s", " ".join(cmd), output.decode("utf-8"))
+ region_name = self.cloud.region_name if self.cloud.region_name else (
+ 'RegionOne')
+ (_, stdout, stderr) = self.ssh.exec_command(
+ '/snap/bin/juju metadata generate-image -d /home/ubuntu '
+ '-i {} -s trusty -r {} -u {}'.format(
+ image_alt.id, region_name, self.public_auth_url))
+ self.__logger.debug("stdout:\n%s", stdout.read().decode("utf-8"))
+ self.__logger.debug("stderr:\n%s", stderr.read().decode("utf-8"))
return image_alt
def deploy_orchestrator(self): # pylint: disable=too-many-locals
@@ -234,47 +229,41 @@ class JujuEpc(singlevm.VmReady2):
Bootstrap juju
"""
+ self._publish_image()
self.image_alt = self.publish_image_alt()
self.flavor_alt = self.create_flavor_alt()
self.__logger.info("Starting Juju Bootstrap process...")
- try:
- cmd = ['timeout', JujuEpc.juju_timeout,
- 'juju', 'bootstrap',
- 'abot-epc/{}'.format(
- self.cloud.region_name if self.cloud.region_name else (
- 'RegionOne')),
- 'abot-controller',
- '--agent-version', '2.3.9',
- '--metadata-source', '/root',
- '--constraints', 'mem=2G',
- '--bootstrap-series', 'xenial',
- '--config', 'network={}'.format(self.network.id),
- '--config', 'ssl-hostname-verification=false',
- '--config', 'external-network={}'.format(self.ext_net.id),
- '--config', 'use-floating-ip=true',
- '--config', 'use-default-secgroup=true',
- '--debug']
- output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
- self.__logger.info("%s\n%s", " ".join(cmd), output.decode("utf-8"))
- except subprocess.CalledProcessError as cpe:
- self.__logger.error(
- "Exception with Juju Bootstrap: %s\n%s",
- cpe.cmd, cpe.output.decode("utf-8"))
- return False
- except Exception: # pylint: disable=broad-except
- self.__logger.exception("Some issue with Juju Bootstrap ...")
- return False
-
- return True
+ region_name = self.cloud.region_name if self.cloud.region_name else (
+ 'RegionOne')
+ (_, stdout, stderr) = self.ssh.exec_command(
+ 'timeout {} '
+ '/snap/bin/juju bootstrap abot-epc/{} abot-controller '
+ '--agent-version 2.3.9 --metadata-source /home/ubuntu '
+ '--constraints mem=2G --bootstrap-series xenial '
+ '--config network={} '
+ '--config ssl-hostname-verification=false '
+ '--config external-network={} '
+ '--config use-floating-ip=true '
+ '--config use-default-secgroup=true '
+ '--debug'.format(
+ JujuEpc.juju_timeout, region_name, self.network.id,
+ self.ext_net.id))
+ self.__logger.debug("stdout:\n%s", stdout.read().decode("utf-8"))
+ self.__logger.debug("stderr:\n%s", stderr.read().decode("utf-8"))
+ return not stdout.channel.recv_exit_status()
def check_app(self, name='abot-epc-basic', status='active'):
"""Check application status."""
- cmd = ['juju', 'status', '--format', 'short', name]
for i in range(10):
- output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
- self.__logger.info("%s\n%s", " ".join(cmd), output.decode("utf-8"))
+ (_, stdout, stderr) = self.ssh.exec_command(
+ '/snap/bin/juju status --format short {}'.format(name))
+ output = stdout.read().decode("utf-8")
+ self.__logger.debug("stdout:\n%s", output)
+ self.__logger.debug("stderr:\n%s", stderr.read().decode("utf-8"))
+ if stdout.channel.recv_exit_status():
+ continue
ret = re.search(
- r'(?=workload:({})\))'.format(status), output.decode("utf-8"))
+ r'(?=workload:({})\))'.format(status), output)
if ret:
self.__logger.info("%s workload is %s", name, status)
break
@@ -289,65 +278,80 @@ class JujuEpc(singlevm.VmReady2):
def deploy_vnf(self):
"""Deploy ABOT-OAI-EPC."""
self.__logger.info("Upload VNFD")
- descriptor = self.vnf['descriptor']
+ scpc = scp.SCPClient(self.ssh.get_transport())
+ scpc.put(
+ '/src/epc-requirements/abot_charm', remote_path='~/',
+ recursive=True)
self.__logger.info("Deploying Abot-epc bundle file ...")
- cmd = ['juju', 'deploy', '{}'.format(descriptor.get('file_name'))]
- output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
- self.__logger.info("%s\n%s", " ".join(cmd), output.decode("utf-8"))
- self.__logger.info("Waiting for instances .....")
- try:
- cmd = ['timeout', JujuEpc.juju_timeout, 'juju-wait']
- output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
- self.__logger.info("%s\n%s", " ".join(cmd), output.decode("utf-8"))
- self.__logger.info("Deployed Abot-epc on Openstack")
- except subprocess.CalledProcessError as cpe:
- self.__logger.error(
- "Exception with Juju VNF Deployment: %s\n%s",
- cpe.cmd, cpe.output.decode("utf-8"))
- return False
- except Exception: # pylint: disable=broad-except
- self.__logger.exception("Some issue with the VNF Deployment ..")
- return False
-
+ (_, stdout, stderr) = self.ssh.exec_command(
+ 'sudo mkdir -p /src/epc-requirements && '
+ 'sudo mv abot_charm /src/epc-requirements/abot_charm && '
+ '/snap/bin/juju deploy '
+ '/src/epc-requirements/abot_charm/functest-abot-epc-bundle/'
+ 'bundle.yaml')
+ self.__logger.debug("stdout:\n%s", stdout.read().decode("utf-8"))
+ self.__logger.debug("stderr:\n%s", stderr.read().decode("utf-8"))
+ if stdout.channel.recv_exit_status():
+ return not stdout.channel.recv_exit_status()
+ (_, stdout, stderr) = self.ssh.exec_command(
+ 'PATH=/snap/bin/:$PATH '
+ 'timeout {} juju-wait'.format(JujuEpc.juju_timeout))
+ self.__logger.debug("stdout:\n%s", stdout.read().decode("utf-8"))
+ self.__logger.debug("stderr:\n%s", stderr.read().decode("utf-8"))
+ if stdout.channel.recv_exit_status():
+ return not stdout.channel.recv_exit_status()
self.__logger.info("Checking status of ABot and EPC units ...")
- cmd = ['juju', 'status']
- output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
- self.__logger.debug("%s\n%s", " ".join(cmd), output.decode("utf-8"))
+ (_, stdout, stderr) = self.ssh.exec_command('/snap/bin/juju status')
+ output = stdout.read().decode("utf-8")
+ self.__logger.debug("stdout:\n%s", output)
+ self.__logger.debug("stderr:\n%s", stderr.read().decode("utf-8"))
+ if stdout.channel.recv_exit_status():
+ return not stdout.channel.recv_exit_status()
for app in ['abot-epc-basic', 'oai-epc', 'oai-hss']:
if not self.check_app(app):
return False
-
- self.__logger.info("Transferring the feature files to Abot_node ...")
- cmd = ['timeout', '60', 'juju', 'scp', '--', '-r', '-v',
- '{}/featureFiles'.format(self.case_dir),
- 'abot-epc-basic/0:/etc/rebaca-test-suite/']
- output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
- self.__logger.info("%s\n%s", " ".join(cmd), output.decode("utf-8"))
-
- return True
+ scpc = scp.SCPClient(self.ssh.get_transport())
+ scpc.put(
+ '{}/featureFiles'.format(self.case_dir), remote_path='~/',
+ recursive=True)
+ (_, stdout, stderr) = self.ssh.exec_command(
+ 'timeout {} /snap/bin/juju scp -- -r -v ~/featureFiles '
+ 'abot-epc-basic/0:/etc/rebaca-test-suite/'.format(
+ JujuEpc.juju_timeout))
+ output = stdout.read().decode("utf-8")
+ self.__logger.debug("stdout:\n%s", output)
+ self.__logger.debug("stderr:\n%s", stderr.read().decode("utf-8"))
+ return not stdout.channel.recv_exit_status()
def test_vnf(self):
"""Run test on ABoT."""
start_time = time.time()
- self.__logger.info("Running VNF Test cases....")
- cmd = ['juju', 'run-action', 'abot-epc-basic/0', 'run',
- 'tagnames={}'.format(self.details['test_vnf']['tag_name'])]
- output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
- self.__logger.info("%s\n%s", " ".join(cmd), output.decode("utf-8"))
-
- cmd = ['timeout', JujuEpc.juju_timeout, 'juju-wait']
- output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
- self.__logger.info("%s\n%s", " ".join(cmd), output.decode("utf-8"))
-
+ (_, stdout, stderr) = self.ssh.exec_command(
+ '/snap/bin/juju run-action abot-epc-basic/0 '
+ 'run tagnames={}'.format(self.details['test_vnf']['tag_name']))
+ self.__logger.debug("stdout:\n%s", stdout.read().decode("utf-8"))
+ self.__logger.debug("stderr:\n%s", stderr.read().decode("utf-8"))
+ if stdout.channel.recv_exit_status():
+ return not stdout.channel.recv_exit_status()
+ (_, stdout, stderr) = self.ssh.exec_command(
+ 'PATH=/snap/bin/:$PATH '
+ 'timeout {} juju-wait'.format(JujuEpc.juju_timeout))
+ self.__logger.debug("stdout:\n%s", stdout.read().decode("utf-8"))
+ self.__logger.debug("stderr:\n%s", stderr.read().decode("utf-8"))
+ if stdout.channel.recv_exit_status():
+ return not stdout.channel.recv_exit_status()
duration = time.time() - start_time
self.__logger.info("Getting results from Abot node....")
- cmd = ['timeout', JujuEpc.juju_timeout,
- 'juju', 'scp', '--', '-v',
- 'abot-epc-basic/0:'
- '/var/lib/abot-epc-basic/artifacts/TestResults.json',
- '{}/.'.format(self.res_dir)]
- output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
- self.__logger.info("%s\n%s", " ".join(cmd), output.decode("utf-8"))
+ (_, stdout, stderr) = self.ssh.exec_command(
+ 'timeout {} /snap/bin/juju scp -- -v abot-epc-basic/0:'
+ '/var/lib/abot-epc-basic/artifacts/TestResults.json .'.format(
+ JujuEpc.juju_timeout))
+ self.__logger.debug("stdout:\n%s", stdout.read().decode("utf-8"))
+ self.__logger.debug("stderr:\n%s", stderr.read().decode("utf-8"))
+ if stdout.channel.recv_exit_status():
+ return not stdout.channel.recv_exit_status()
+ scpc = scp.SCPClient(self.ssh.get_transport())
+ scpc.get('TestResults.json', self.res_dir)
self.__logger.info("Parsing the Test results...")
res = (process_abot_test_result('{}/TestResults.json'.format(
self.res_dir)))
@@ -362,45 +366,41 @@ class JujuEpc(singlevm.VmReady2):
short_result['failures'], short_result['skipped'])
return True
- def run(self, **kwargs):
- self.start_time = time.time()
+ def execute(self):
+ """Prepare testcase (Additional pre-configuration steps)."""
+ assert self.public_auth_url
+ self.__logger.info("Additional pre-configuration steps")
+ try:
+ os.makedirs(self.res_dir)
+ except OSError as ex:
+ if ex.errno != errno.EEXIST:
+ self.__logger.exception("Cannot create %s", self.res_dir)
+ raise Exception
+ self.__logger.info("ENV:\n%s", env.string())
try:
- assert super(JujuEpc, self).run(**kwargs) == self.EX_OK
- self.prepare()
- if (self.deploy_orchestrator() and
- self.deploy_vnf() and
- self.test_vnf()):
- self.stop_time = time.time()
- self.result = 100
- return self.EX_OK
- self.result = 0
- self.stop_time = time.time()
- return self.EX_TESTCASE_FAILED
+ assert self._install_juju()
+ assert self._install_juju_wait()
+ assert self._register_cloud()
+ assert self._register_credentials()
+ assert self.deploy_orchestrator()
+ assert self.deploy_vnf()
+ assert self.test_vnf()
except Exception: # pylint: disable=broad-except
- self.result = 0
- self.stop_time = time.time()
- self.__logger.exception("Exception on VNF testing")
- return self.EX_TESTCASE_FAILED
+ self.__logger.exception("juju_epc failed")
+ return 1
+ return 0
def clean(self):
"""Clean created objects/functions."""
- try:
- cmd = ['juju', 'debug-log', '--replay', '--no-tail']
- output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
- self.__logger.debug(
- "%s\n%s", " ".join(cmd), output.decode("utf-8"))
- self.__logger.info("Destroying Orchestrator...")
- cmd = ['timeout', JujuEpc.juju_timeout,
- 'juju', 'destroy-controller', '-y', 'abot-controller',
- '--destroy-all-models']
- output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
- self.__logger.info("%s\n%s", " ".join(cmd), output.decode("utf-8"))
- except subprocess.CalledProcessError as cpe:
- self.__logger.error(
- "Exception with Juju Cleanup: %s\n%s",
- cpe.cmd, cpe.output.decode("utf-8"))
- except Exception: # pylint: disable=broad-except
- self.__logger.exception("General issue during the undeployment ..")
+ (_, stdout, stderr) = self.ssh.exec_command(
+ '/snap/bin/juju debug-log --replay --no-tail')
+ self.__logger.debug("stdout:\n%s", stdout.read().decode("utf-8"))
+ self.__logger.debug("stderr:\n%s", stderr.read().decode("utf-8"))
+ (_, stdout, stderr) = self.ssh.exec_command(
+ '/snap/bin/juju destroy-controller -y abot-controller '
+ '--destroy-all-models')
+ self.__logger.debug("stdout:\n%s", stdout.read().decode("utf-8"))
+ self.__logger.debug("stderr:\n%s", stderr.read().decode("utf-8"))
for fip in self.cloud.list_floating_ips():
self.cloud.delete_floating_ip(fip.id)
if self.image_alt:
diff --git a/functest/opnfv_tests/vnf/ims/heat_ims.py b/functest/opnfv_tests/vnf/ims/heat_ims.py
index 4a57a74..4edb2dd 100644
--- a/functest/opnfv_tests/vnf/ims/heat_ims.py
+++ b/functest/opnfv_tests/vnf/ims/heat_ims.py
@@ -162,7 +162,7 @@ class HeatIms(singlevm.VmReady2):
server.public_v4, username=username,
key_filename=self.key_filename, timeout=timeout)
(_, stdout, _) = ssh.exec_command('sudo monit summary')
- self.__logger.info("output:\n%s", stdout.read())
+ self.__logger.info("output:\n%s", stdout.read().decode("utf-8"))
ssh.close()
def deploy_vnf(self):
diff --git a/setup.cfg b/setup.cfg
index da7bb2f..d6bdfd1 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -34,7 +34,6 @@ xtesting.testcase =
barbican = functest.opnfv_tests.openstack.barbican.barbican:Barbican
vmtp = functest.opnfv_tests.openstack.vmtp.vmtp:Vmtp
shaker = functest.opnfv_tests.openstack.shaker.shaker:Shaker
- vgpu = functest.opnfv_tests.openstack.vgpu.vgpu:VGPU
rally_full = functest.opnfv_tests.openstack.rally.rally:RallyFull
cloudify = functest.core.cloudify:Cloudify
cloudify_ims = functest.opnfv_tests.vnf.ims.cloudify_ims:CloudifyIms
diff --git a/upper-constraints.txt b/upper-constraints.txt
index 8772bab..420d93b 100644
--- a/upper-constraints.txt
+++ b/upper-constraints.txt
@@ -15,7 +15,7 @@ robotframework-httplibrary===0.4.2
robotframework-requests===0.5.0
robotframework-sshlibrary===3.3.0
ansible===2.9.2
-xtesting===0.87.0
+xtesting===0.91.0
git+https://github.com/PyCQA/bandit@3d0824676974e7e2e9635c10bc4f12e261f1dbdf#egg=bandit
bandit===1.1.0
ruamel.yaml.jinja2==0.2.2