summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rwxr-xr-xjjb/3rd_party_ci/download-netvirt-artifact.sh13
-rw-r--r--jjb/3rd_party_ci/odl-netvirt.yml9
-rwxr-xr-xjjb/daisy4nfv/daisy4nfv-download-artifact.sh10
-rw-r--r--jjb/dovetail/dovetail-ci-jobs.yml25
-rw-r--r--jjb/fuel/fuel-daily-jobs.yml102
-rwxr-xr-xjjb/functest/functest-loop.sh12
-rwxr-xr-xjjb/functest/set-functest-env.sh10
-rwxr-xr-xjjb/securedlab/check-jinja2.sh2
-rwxr-xr-xjjb/xci/bifrost-provision.sh4
-rwxr-xr-xjjb/xci/bifrost-verify.sh4
-rw-r--r--prototypes/bifrost/playbooks/opnfv-virtual.yaml7
-rw-r--r--prototypes/xci/file/install-ansible.sh136
-rwxr-xr-xprototypes/xci/xci-deploy.sh2
-rw-r--r--utils/create_pod_file.py26
-rwxr-xr-xutils/fetch_os_creds.sh2
15 files changed, 318 insertions, 46 deletions
diff --git a/jjb/3rd_party_ci/download-netvirt-artifact.sh b/jjb/3rd_party_ci/download-netvirt-artifact.sh
index 6aea01d2a..7ecf8d78d 100755
--- a/jjb/3rd_party_ci/download-netvirt-artifact.sh
+++ b/jjb/3rd_party_ci/download-netvirt-artifact.sh
@@ -6,11 +6,18 @@ set -o pipefail
ODL_ZIP=distribution-karaf-0.6.0-SNAPSHOT.zip
echo "Attempting to fetch the artifact location from ODL Jenkins"
-CHANGE_DETAILS_URL="https://git.opendaylight.org/gerrit/changes/netvirt~master~$GERRIT_CHANGE_ID/detail"
+if [ "$ODL_BRANCH" != 'master' ]; then
+ DIST=$(echo ${ODL_BRANCH} | sed -rn 's#([a-zA-Z]+)/([a-zA-Z]+)#\2#p')
+ ODL_BRANCH=$(echo ${ODL_BRANCH} | sed -rn 's#([a-zA-Z]+)/([a-zA-Z]+)#\1%2F\2#p')
+else
+ DIST='nitrogen'
+fi
+CHANGE_DETAILS_URL="https://git.opendaylight.org/gerrit/changes/netvirt~${ODL_BRANCH}~${GERRIT_CHANGE_ID}/detail"
# due to limitation with the Jenkins Gerrit Trigger, we need to use Gerrit REST API to get the change details
-ODL_BUILD_JOB_NUM=$(curl -s $CHANGE_DETAILS_URL | grep -Eo 'netvirt-distribution-check-carbon/[0-9]+' | tail -1 | grep -Eo [0-9]+)
+ODL_BUILD_JOB_NUM=$(curl --fail -s ${CHANGE_DETAILS_URL} | grep -Eo "netvirt-distribution-check-${DIST}/[0-9]+" | tail -1 | grep -Eo [0-9]+)
+DISTRO_CHECK_CONSOLE_LOG="https://logs.opendaylight.org/releng/jenkins092/netvirt-distribution-check-${DIST}/${ODL_BUILD_JOB_NUM}/console.log.gz"
+NETVIRT_ARTIFACT_URL=$(curl --fail -s --compressed ${DISTRO_CHECK_CONSOLE_LOG} | grep 'BUNDLE_URL' | cut -d = -f 2)
-NETVIRT_ARTIFACT_URL="https://jenkins.opendaylight.org/releng/job/netvirt-distribution-check-carbon/${ODL_BUILD_JOB_NUM}/artifact/${ODL_ZIP}"
echo -e "URL to artifact is\n\t$NETVIRT_ARTIFACT_URL"
echo "Downloading the artifact. This could take time..."
diff --git a/jjb/3rd_party_ci/odl-netvirt.yml b/jjb/3rd_party_ci/odl-netvirt.yml
index 470e4335e..a937acbed 100644
--- a/jjb/3rd_party_ci/odl-netvirt.yml
+++ b/jjb/3rd_party_ci/odl-netvirt.yml
@@ -12,6 +12,10 @@
branch: '{stream}'
gs-pathname: ''
disabled: false
+ - carbon:
+ branch: 'stable/carbon'
+ gs-pathname: ''
+ disabled: false
#####################################
# patch verification phases
#####################################
@@ -111,6 +115,7 @@
- name: 'odl-netvirt-verify-virtual-install-netvirt-{stream}'
current-parameters: false
predefined-parameters: |
+ ODL_BRANCH={branch}
BRANCH=$BRANCH
GERRIT_REFSPEC=$GERRIT_REFSPEC
GERRIT_CHANGE_NUMBER=$GERRIT_CHANGE_NUMBER
@@ -125,10 +130,10 @@
name: functest
condition: SUCCESSFUL
projects:
- - name: 'functest-netvirt-virtual-suite-{stream}'
+ - name: 'functest-netvirt-virtual-suite-master'
predefined-parameters: |
DEPLOY_SCENARIO=os-odl_l3-nofeature-ha
- FUNCTEST_SUITE_NAME=tempest_smoke_serial
+ FUNCTEST_SUITE_NAME=odl_netvirt
RC_FILE_PATH=$HOME/cloner-info/overcloudrc
node-parameters: true
kill-phase-on: FAILURE
diff --git a/jjb/daisy4nfv/daisy4nfv-download-artifact.sh b/jjb/daisy4nfv/daisy4nfv-download-artifact.sh
index 1cc0443ad..a64c80e5c 100755
--- a/jjb/daisy4nfv/daisy4nfv-download-artifact.sh
+++ b/jjb/daisy4nfv/daisy4nfv-download-artifact.sh
@@ -57,12 +57,18 @@ fi
# log info to console
echo "Downloading the $INSTALLER_TYPE artifact using URL http://$OPNFV_ARTIFACT_URL"
-echo "This could take some time..."
+echo "This could take some time... Now the time is $(date -u)"
echo "--------------------------------------------------------"
echo
# download the file
-curl -L -s -o $WORKSPACE/opnfv.bin http://$OPNFV_ARTIFACT_URL > gsutil.bin.log 2>&1
+if [[ "$NODE_NAME" =~ (zte) ]] && [ -x "$(command -v aria2c)" ]; then
+ DOWNLOAD_CMD="aria2c -x 3 --allow-overwrite=true -d $WORKSPACE -o opnfv.bin"
+else
+ DOWNLOAD_CMD="curl -L -s -o $WORKSPACE/opnfv.bin"
+fi
+
+$DOWNLOAD_CMD http://$OPNFV_ARTIFACT_URL > gsutil.bin.log 2>&1
# list the file
ls -al $WORKSPACE/opnfv.bin
diff --git a/jjb/dovetail/dovetail-ci-jobs.yml b/jjb/dovetail/dovetail-ci-jobs.yml
index 4998278c8..682948d8b 100644
--- a/jjb/dovetail/dovetail-ci-jobs.yml
+++ b/jjb/dovetail/dovetail-ci-jobs.yml
@@ -142,6 +142,31 @@
SUT: fuel
auto-trigger-name: 'daily-trigger-disabled'
<<: *master
+ - zte-pod1:
+ slave-label: zte-pod1
+ SUT: fuel
+ auto-trigger-name: 'daily-trigger-disabled'
+ <<: *master
+ - zte-pod2:
+ slave-label: zte-pod2
+ SUT: fuel
+ auto-trigger-name: 'daily-trigger-disabled'
+ <<: *master
+ - zte-pod3:
+ slave-label: zte-pod3
+ SUT: fuel
+ auto-trigger-name: 'daily-trigger-disabled'
+ <<: *master
+ - zte-pod1:
+ slave-label: zte-pod1
+ SUT: fuel
+ auto-trigger-name: 'daily-trigger-disabled'
+ <<: *danube
+ - zte-pod3:
+ slave-label: zte-pod3
+ SUT: fuel
+ auto-trigger-name: 'daily-trigger-disabled'
+ <<: *danube
#--------------------------------
testsuite:
- 'debug'
diff --git a/jjb/fuel/fuel-daily-jobs.yml b/jjb/fuel/fuel-daily-jobs.yml
index 32abad624..2fa868779 100644
--- a/jjb/fuel/fuel-daily-jobs.yml
+++ b/jjb/fuel/fuel-daily-jobs.yml
@@ -73,8 +73,8 @@
auto-trigger-name: 'fuel-{scenario}-{pod}-daily-{stream}-trigger'
- 'os-odl_l2-sfc-ha':
auto-trigger-name: 'fuel-{scenario}-{pod}-daily-{stream}-trigger'
- - 'os-odl_l2-bgpvpn-ha':
- auto-trigger-name: 'fuel-{scenario}-{pod}-daily-{stream}-trigger'
+ # - 'os-odl_l2-bgpvpn-ha':
+ # auto-trigger-name: 'fuel-{scenario}-{pod}-daily-{stream}-trigger'
- 'os-nosdn-kvm-ha':
auto-trigger-name: 'fuel-{scenario}-{pod}-daily-{stream}-trigger'
- 'os-nosdn-ovs-ha':
@@ -112,6 +112,7 @@
jobs:
- 'fuel-{scenario}-{pod}-daily-{stream}'
- 'fuel-deploy-{pod}-daily-{stream}'
+ - 'fuel-os-odl_l2-bgpvpn-ha-{pod}-daily-{stream}'
########################
# job templates
@@ -195,6 +196,103 @@
recipients: peter.barabas@ericsson.com fzhadaev@mirantis.com
- job-template:
+ name: 'fuel-os-odl_l2-bgpvpn-ha-{pod}-daily-{stream}'
+
+ disabled: '{obj:disabled}'
+
+ concurrent: false
+
+ properties:
+ - logrotate-default
+ - throttle:
+ enabled: true
+ max-total: 4
+ max-per-node: 1
+ option: 'project'
+ - build-blocker:
+ use-build-blocker: true
+ blocking-jobs:
+ - 'fuel-os-.*?-{pod}-daily-.*'
+ - 'fuel-os-.*?-{pod}-weekly-.*'
+ block-level: 'NODE'
+
+ wrappers:
+ - build-name:
+ name: '$BUILD_NUMBER - Scenario: os-odl_l2-bgpvpn-ha'
+
+ triggers:
+ - 'fuel-os-odl_l2-bgpvpn-ha-{pod}-daily-{stream}-trigger'
+
+ parameters:
+ - project-parameter:
+ project: '{project}'
+ branch: '{branch}'
+ - '{installer}-defaults'
+ - '{slave-label}-defaults':
+ installer: '{installer}'
+ - string:
+ name: DEPLOY_SCENARIO
+ default: "os-odl_l2-bgpvpn-ha"
+ - fuel-ci-parameter:
+ gs-pathname: '{gs-pathname}'
+
+ builders:
+ - description-setter:
+ description: "Built on $NODE_NAME"
+ - trigger-builds:
+ - project: 'fuel-deploy-{pod}-daily-{stream}'
+ current-parameters: false
+ predefined-parameters:
+ DEPLOY_SCENARIO=os-odl_l2-bgpvpn-ha
+ same-node: true
+ block: true
+ - trigger-builds:
+ - project: 'functest-fuel-{pod}-daily-{stream}'
+ current-parameters: false
+ predefined-parameters:
+ DEPLOY_SCENARIO=os-odl_l2-bgpvpn-ha
+ same-node: true
+ block: true
+ block-thresholds:
+ build-step-failure-threshold: 'never'
+ failure-threshold: 'never'
+ unstable-threshold: 'FAILURE'
+ - trigger-builds:
+ - project: 'yardstick-fuel-{pod}-daily-{stream}'
+ current-parameters: false
+ predefined-parameters:
+ DEPLOY_SCENARIO=os-odl_l2-bgpvpn-ha
+ block: true
+ same-node: true
+ block-thresholds:
+ build-step-failure-threshold: 'never'
+ failure-threshold: 'never'
+ unstable-threshold: 'FAILURE'
+ # 1.dovetail only master by now, not sync with A/B/C branches
+ # 2.here the stream means the SUT stream, dovetail stream is defined in its own job
+ # 3.only debug testsuite here(includes basic testcase,
+ # i.e. refstack ipv6 vpn test cases from functest, HA test case
+ # from yardstick)
+ # 4.not used for release criteria or compliance,
+ # only to debug the dovetail tool bugs with fuel bgpvpn scenario
+ - trigger-builds:
+ - project: 'dovetail-fuel-{pod}-proposed_tests-{stream}'
+ current-parameters: false
+ predefined-parameters:
+ DEPLOY_SCENARIO=os-odl_l2-bgpvpn-ha
+ block: true
+ same-node: true
+ block-thresholds:
+ build-step-failure-threshold: 'never'
+ failure-threshold: 'never'
+ unstable-threshold: 'FAILURE'
+
+ publishers:
+ - email:
+ recipients: peter.barabas@ericsson.com fzhadaev@mirantis.com matthew.lijun@huawei.com
+
+
+- job-template:
name: 'fuel-deploy-{pod}-daily-{stream}'
disabled: '{obj:disabled}'
diff --git a/jjb/functest/functest-loop.sh b/jjb/functest/functest-loop.sh
index 893c428a2..869c3956c 100755
--- a/jjb/functest/functest-loop.sh
+++ b/jjb/functest/functest-loop.sh
@@ -1,15 +1,9 @@
#!/bin/bash
set +e
-branch=${GIT_BRANCH##*/}
-[[ "$PUSH_RESULTS_TO_DB" == "true" ]] && flags+="-r"
-if [[ "$BRANCH" =~ 'brahmaputra' ]]; then
- cmd="${FUNCTEST_REPO_DIR}/docker/run_tests.sh -s ${flags}"
-elif [[ "$BRANCH" =~ 'colorado' ]]; then
- cmd="python ${FUNCTEST_REPO_DIR}/ci/run_tests.py -t all ${flags}"
-else
- cmd="python ${FUNCTEST_REPO_DIR}/functest/ci/run_tests.py -t all ${flags}"
-fi
+
+cmd="python ${FUNCTEST_REPO_DIR}/functest/ci/run_tests.py -t all ${flags}"
+
container_id=$(docker ps -a | grep opnfv/functest | awk '{print $1}' | head -1)
docker exec $container_id $cmd
diff --git a/jjb/functest/set-functest-env.sh b/jjb/functest/set-functest-env.sh
index 569f371c4..1acf0a2ad 100755
--- a/jjb/functest/set-functest-env.sh
+++ b/jjb/functest/set-functest-env.sh
@@ -112,12 +112,8 @@ if [ $(docker ps | grep "${FUNCTEST_IMAGE}:${DOCKER_TAG}" | wc -l) == 0 ]; then
echo "The container ${FUNCTEST_IMAGE} with ID=${container_id} has not been properly started. Exiting..."
exit 1
fi
-if [[ "$BRANCH" =~ 'brahmaputra' ]]; then
- cmd="${FUNCTEST_REPO_DIR}/docker/prepare_env.sh"
-elif [[ "$BRANCH" =~ 'colorado' ]]; then
- cmd="python ${FUNCTEST_REPO_DIR}/ci/prepare_env.py start"
-else
- cmd="python ${FUNCTEST_REPO_DIR}/functest/ci/prepare_env.py start"
-fi
+
+cmd="python ${FUNCTEST_REPO_DIR}/functest/ci/prepare_env.py start"
+
echo "Executing command inside the docker: ${cmd}"
docker exec ${container_id} ${cmd}
diff --git a/jjb/securedlab/check-jinja2.sh b/jjb/securedlab/check-jinja2.sh
index 84907e5eb..57650ec28 100755
--- a/jjb/securedlab/check-jinja2.sh
+++ b/jjb/securedlab/check-jinja2.sh
@@ -5,5 +5,5 @@ for lab_configs in $(find labs/ -name 'pod.yaml'); do
while IFS= read -r jinja_templates; do
echo "./utils/generate_config.py -y $lab_configs -j $jinja_templates"
./utils/generate_config.py -y $lab_configs -j $jinja_templates
- done < <(find installers/ -name 'pod_config.yaml.j2')
+ done < <(find installers/ -name '*.j2')
done
diff --git a/jjb/xci/bifrost-provision.sh b/jjb/xci/bifrost-provision.sh
index 4724c2ee5..b37da9059 100755
--- a/jjb/xci/bifrost-provision.sh
+++ b/jjb/xci/bifrost-provision.sh
@@ -82,13 +82,13 @@ sudo -E ./scripts/destroy-env.sh
# provision VMs for the flavor
cd /opt/bifrost
-sudo -E ./scripts/bifrost-provision.sh
+./scripts/bifrost-provision.sh
# list the provisioned VMs
cd /opt/bifrost
source env-vars
ironic node-list
-virsh list
+sudo -H -E virsh list
echo "OpenStack nodes are provisioned!"
# here we have to do something in order to capture what was the working sha1
diff --git a/jjb/xci/bifrost-verify.sh b/jjb/xci/bifrost-verify.sh
index 29af7ca3b..18019a7cb 100755
--- a/jjb/xci/bifrost-verify.sh
+++ b/jjb/xci/bifrost-verify.sh
@@ -117,10 +117,10 @@ sudo -H -E ./scripts/destroy-env.sh
# provision 3 VMs; xcimaster, controller, and compute
cd /opt/bifrost
-sudo -H -E ./scripts/bifrost-provision.sh
+./scripts/bifrost-provision.sh
# list the provisioned VMs
cd /opt/bifrost
source env-vars
ironic node-list
-virsh list
+sudo -H -E virsh list
diff --git a/prototypes/bifrost/playbooks/opnfv-virtual.yaml b/prototypes/bifrost/playbooks/opnfv-virtual.yaml
index 699c96698..94de628a6 100644
--- a/prototypes/bifrost/playbooks/opnfv-virtual.yaml
+++ b/prototypes/bifrost/playbooks/opnfv-virtual.yaml
@@ -59,12 +59,7 @@
dib_packages: "{{ lookup('env', 'DIB_OS_PACKAGES') }}"
when: create_image_via_dib | bool == true and transform_boot_image | bool == false
- role: bifrost-keystone-client-config
- # NOTE(hwoarang): This should be ansible_env.SUDO_USER like in the
- # upstream playbook. However, we run ansible as root (ie with sudo)
- # so clouds.yaml will be placed in the user's home directory (see
- # the bifrost-keystone-client-config role) and then ansible will look
- # for one in /root and fail. As such we hardcode the user to be 'root'.
- user: "root"
+ user: "{{ ansible_env.SUDO_USER }}"
clouds:
bifrost:
config_username: "{{ ironic.keystone.default_username }}"
diff --git a/prototypes/xci/file/install-ansible.sh b/prototypes/xci/file/install-ansible.sh
new file mode 100644
index 000000000..daa7f516d
--- /dev/null
+++ b/prototypes/xci/file/install-ansible.sh
@@ -0,0 +1,136 @@
+#!/bin/bash
+# NOTE(hwoarang): Most parts of this this file were taken from the
+# bifrost repository (scripts/install-deps.sh). This script contains all
+# the necessary distro specific code to install ansible and it's dependencies.
+
+set -eu
+
+declare -A PKG_MAP
+
+CHECK_CMD_PKGS=(
+ libffi
+ libopenssl
+ net-tools
+ python-devel
+)
+
+# Check zypper before apt-get in case zypper-aptitude
+# is installed
+if [ -x '/usr/bin/zypper' ]; then
+ OS_FAMILY="Suse"
+ INSTALLER_CMD="sudo -H -E zypper install -y"
+ CHECK_CMD="zypper search --match-exact --installed"
+ PKG_MAP=(
+ [gcc]=gcc
+ [git]=git
+ [libffi]=libffi-devel
+ [libopenssl]=libopenssl-devel
+ [net-tools]=net-tools
+ [python]=python
+ [python-devel]=python-devel
+ [venv]=python-virtualenv
+ [wget]=wget
+ )
+ EXTRA_PKG_DEPS=( python-xml )
+ # NOTE (cinerama): we can't install python without removing this package
+ # if it exists
+ if $(${CHECK_CMD} patterns-openSUSE-minimal_base-conflicts &> /dev/null); then
+ sudo -H zypper remove -y patterns-openSUSE-minimal_base-conflicts
+ fi
+elif [ -x '/usr/bin/apt-get' ]; then
+ OS_FAMILY="Debian"
+ INSTALLER_CMD="sudo -H -E apt-get -y install"
+ CHECK_CMD="dpkg -l"
+ PKG_MAP=( [gcc]=gcc
+ [git]=git
+ [libffi]=libffi-dev
+ [libopenssl]=libssl-dev
+ [net-tools]=net-tools
+ [python]=python-minimal
+ [python-devel]=libpython-dev
+ [venv]=python-virtualenv
+ [wget]=wget
+ )
+ EXTRA_PKG_DEPS=()
+elif [ -x '/usr/bin/dnf' ] || [ -x '/usr/bin/yum' ]; then
+ OS_FAMILY="RedHat"
+ PKG_MANAGER=$(which dnf || which yum)
+ INSTALLER_CMD="sudo -H -E ${PKG_MANAGER} -y install"
+ CHECK_CMD="rpm -q"
+ PKG_MAP=(
+ [gcc]=gcc
+ [git]=git
+ [libffi]=libffi-devel
+ [libopenssl]=openssl-devel
+ [net-tools]=net-tools
+ [python]=python
+ [python-devel]=python-devel
+ [venv]=python-virtualenv
+ [wget]=wget
+ )
+ EXTRA_PKG_DEPS=()
+else
+ echo "ERROR: Supported package manager not found. Supported: apt,yum,zypper"
+fi
+
+if ! $(python --version &>/dev/null); then
+ ${INSTALLER_CMD} ${PKG_MAP[python]}
+fi
+if ! $(gcc -v &>/dev/null); then
+ ${INSTALLER_CMD} ${PKG_MAP[gcc]}
+fi
+if ! $(git --version &>/dev/null); then
+ ${INSTALLER_CMD} ${PKG_MAP[git]}
+fi
+if ! $(wget --version &>/dev/null); then
+ ${INSTALLER_CMD} ${PKG_MAP[wget]}
+fi
+
+for pkg in ${CHECK_CMD_PKGS[@]}; do
+ if ! $(${CHECK_CMD} ${PKG_MAP[$pkg]} &>/dev/null); then
+ ${INSTALLER_CMD} ${PKG_MAP[$pkg]}
+ fi
+done
+
+if [ -n "${EXTRA_PKG_DEPS-}" ]; then
+ for pkg in ${EXTRA_PKG_DEPS}; do
+ if ! $(${CHECK_CMD} ${pkg} &>/dev/null); then
+ ${INSTALLER_CMD} ${pkg}
+ fi
+ done
+fi
+
+# If we're using a venv, we need to work around sudo not
+# keeping the path even with -E.
+PYTHON=$(which python)
+
+# To install python packages, we need pip.
+#
+# We can't use the apt packaged version of pip since
+# older versions of pip are incompatible with
+# requests, one of our indirect dependencies (bug 1459947).
+#
+# Note(cinerama): We use pip to install an updated pip plus our
+# other python requirements. pip breakages can seriously impact us,
+# so we've chosen to install/upgrade pip here rather than in
+# requirements (which are synced automatically from the global ones)
+# so we can quickly and easily adjust version parameters.
+# See bug 1536627.
+#
+# Note(cinerama): If pip is linked to pip3, the rest of the install
+# won't work. Remove the alternatives. This is due to ansible's
+# python 2.x requirement.
+if [[ $(readlink -f /etc/alternatives/pip) =~ "pip3" ]]; then
+ sudo -H update-alternatives --remove pip $(readlink -f /etc/alternatives/pip)
+fi
+
+if ! which pip; then
+ wget -O /tmp/get-pip.py https://bootstrap.pypa.io/get-pip.py
+ sudo -H -E ${PYTHON} /tmp/get-pip.py
+fi
+
+PIP=$(which pip)
+
+sudo -H -E ${PIP} install "pip>6.0"
+
+pip install ansible==$XCI_ANSIBLE_PIP_VERSION
diff --git a/prototypes/xci/xci-deploy.sh b/prototypes/xci/xci-deploy.sh
index 2fd9be022..718ed73c2 100755
--- a/prototypes/xci/xci-deploy.sh
+++ b/prototypes/xci/xci-deploy.sh
@@ -50,7 +50,7 @@ echo "-------------------------------------------------------------------------"
#-------------------------------------------------------------------------------
# Install ansible on localhost
#-------------------------------------------------------------------------------
-pip install ansible==$XCI_ANSIBLE_PIP_VERSION
+source file/install-ansible.sh
# TODO: The xci playbooks can be put into a playbook which will be done later.
diff --git a/utils/create_pod_file.py b/utils/create_pod_file.py
index 22943fc97..7e30cc639 100644
--- a/utils/create_pod_file.py
+++ b/utils/create_pod_file.py
@@ -63,17 +63,27 @@ def create_file(handler):
node_list = []
index = 1
for node in nodes:
- if node.roles[0].lower() == "controller":
- node_info = {'name': "node%s" % index, 'role': node.roles[0],
- 'ip': node.ip, 'user': 'root'}
+ try:
+ if node.roles[0].lower() == "controller":
+ node_info = {'name': "node%s" % index, 'role': node.roles[0],
+ 'ip': node.ip, 'user': 'root'}
+ node_list.append(node_info)
+ index += 1
+ except Exception:
+ node_info = {'name': node.name, 'role': 'unknown', 'ip': node.ip,
+ 'user': 'root'}
node_list.append(node_info)
- index += 1
for node in nodes:
- if node.roles[0].lower() == "compute":
- node_info = {'name': "node%s" % index, 'role': node.roles[0],
- 'ip': node.ip, 'user': 'root'}
+ try:
+ if node.roles[0].lower() == "compute":
+ node_info = {'name': "node%s" % index, 'role': node.roles[0],
+ 'ip': node.ip, 'user': 'root'}
+ node_list.append(node_info)
+ index += 1
+ except Exception:
+ node_info = {'name': node.name, 'role': 'unknown', 'ip': node.ip,
+ 'user': 'root'}
node_list.append(node_info)
- index += 1
if args.INSTALLER_TYPE == 'compass':
for item in node_list:
item['password'] = 'root'
diff --git a/utils/fetch_os_creds.sh b/utils/fetch_os_creds.sh
index 3ec2d1ee8..6a382a56c 100755
--- a/utils/fetch_os_creds.sh
+++ b/utils/fetch_os_creds.sh
@@ -138,7 +138,7 @@ elif [ "$installer_type" == "apex" ]; then
if [ -f /root/.ssh/id_rsa ]; then
chmod 600 /root/.ssh/id_rsa
fi
- sudo scp $ssh_options root@$installer_ip:/home/stack/overcloudrc_v3 $dest_path
+ sudo scp $ssh_options root@$installer_ip:/home/stack/overcloudrc.v3 $dest_path
elif [ "$installer_type" == "compass" ]; then
verify_connectivity $installer_ip