summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorFatih Degirmenci <fatih.degirmenci@ericsson.com>2017-03-21 22:16:18 +0100
committerFatih Degirmenci <fatih.degirmenci@ericsson.com>2017-03-22 10:20:02 +0100
commit02dedae8348cd88f80b0ccae867572171e52f3bc (patch)
treea20f6089c36ec3271ffd532fbf1f8cff61a3e386
parentaab72dd7c774290cc7529a2d5f823d89529945d3 (diff)
xci: Updates to releng/bifrost to make it work on Jenkins
- get rid of BAREMETAL_DATA_FILE and use BIFROST_INVENTORY_SOURCE so it works both for master and ocata for osa-bifrost. - set BIFROST_INVENTORY_SOURCE file according to branch bifrost is executed from for osa-bifrost. - explicitly set what the SSH public key file is as ansible copies the wrong public key to nodes if bifrost is executed on Jenkins using sudo. - set branches if they are not set so the scripts can be used manually as well. - rename jumphost to xcimaster so people do not mix it with actual jumphost located in Pharos PODs. Change-Id: Iff7631fa99816ad75316b62c5ac20714f67cd86a Signed-off-by: Fatih Degirmenci <fatih.degirmenci@ericsson.com>
-rwxr-xr-xjjb/xci/bifrost-verify.sh2
-rwxr-xr-xjjb/xci/xci-provision.sh8
-rw-r--r--prototypes/bifrost/README.md2
-rw-r--r--prototypes/bifrost/playbooks/inventory/group_vars/baremetal53
-rwxr-xr-xprototypes/bifrost/scripts/osa-bifrost-deployment.sh17
-rwxr-xr-xprototypes/bifrost/scripts/test-bifrost-deployment.sh5
6 files changed, 73 insertions, 14 deletions
diff --git a/jjb/xci/bifrost-verify.sh b/jjb/xci/bifrost-verify.sh
index e0c50907a..762466825 100755
--- a/jjb/xci/bifrost-verify.sh
+++ b/jjb/xci/bifrost-verify.sh
@@ -115,7 +115,7 @@ sudo /bin/cp -rf /opt/releng/prototypes/bifrost/* /opt/bifrost/
cd /opt/bifrost
sudo -E ./scripts/destroy-env.sh
-# provision 3 VMs; jumphost, controller, and compute
+# provision 3 VMs; xcimaster, controller, and compute
cd /opt/bifrost
sudo -E ./scripts/test-bifrost-deployment.sh
diff --git a/jjb/xci/xci-provision.sh b/jjb/xci/xci-provision.sh
index 4308c7ef8..e474093b8 100755
--- a/jjb/xci/xci-provision.sh
+++ b/jjb/xci/xci-provision.sh
@@ -43,9 +43,9 @@ sudo /bin/rm -rf /opt/bifrost /opt/openstack-ansible /opt/stack /opt/releng /opt
# Fix up permissions
fix_ownership
-# clone all the repos first and checkout the patch afterwards
-OPENSTACK_BRANCH=${OPENSTACK_BRANCH:-master}
-OPNFV_BRANCH=${OPNFV_BRANCH:-master}
+# ensure the branches to use are set
+export OPENSTACK_BRANCH=${OPENSTACK_BRANCH:-master}
+export OPNFV_BRANCH=${OPNFV_BRANCH:-master}
sudo git clone -b $OPENSTACK_BRANCH https://git.openstack.org/openstack/bifrost /opt/bifrost
sudo git clone -b $OPNFV_BRANCH https://gerrit.opnfv.org/gerrit/releng /opt/releng
@@ -63,7 +63,7 @@ sudo /bin/cp -rf /opt/releng/prototypes/bifrost/* /opt/bifrost/
cd /opt/bifrost
sudo -E ./scripts/destroy-env.sh
-# provision 6 VMs; jumphost, controller00, controller01, controller02, compute00, and compute01
+# provision 6 VMs; xcimaster, controller00, controller01, controller02, compute00, and compute01
cd /opt/bifrost
sudo -E ./scripts/osa-bifrost-deployment.sh
diff --git a/prototypes/bifrost/README.md b/prototypes/bifrost/README.md
index 0ba49d46d..dc1417a86 100644
--- a/prototypes/bifrost/README.md
+++ b/prototypes/bifrost/README.md
@@ -31,7 +31,7 @@ Please follow that steps:
cd /opt/bifrost
sudo ./scripts/destroy-env.sh
-8. Run deployment script to spin up 3 vms with bifrost: jumphost, controller and compute::
+8. Run deployment script to spin up 3 vms with bifrost: xcimaster, controller and compute::
cd /opt/bifrost
sudo ./scripts/test-bifrost-deployment.sh
diff --git a/prototypes/bifrost/playbooks/inventory/group_vars/baremetal b/prototypes/bifrost/playbooks/inventory/group_vars/baremetal
new file mode 100644
index 000000000..008b04d11
--- /dev/null
+++ b/prototypes/bifrost/playbooks/inventory/group_vars/baremetal
@@ -0,0 +1,53 @@
+---
+# The ironic API URL for bifrost operations. Defaults to localhost.
+# ironic_url: "http://localhost:6385/"
+
+# The network interface that bifrost will be operating on. Defaults
+# to virbr0 in roles, can be overridden here.
+# network_interface: "virbr0"
+
+# The path to the SSH key to be utilized for testing and burn-in
+# to configuration drives. When set, it should be set in both baremetal
+# and localhost groups, however this is only an override to the default.
+
+# workaround for opnfv ci until we can fix non-root use
+ssh_public_key_path: "/root/.ssh/id_rsa.pub"
+
+# Normally this user should be root, however if cirros is used,
+# a user may wish to define a specific user for testing VM
+# connectivity during a test sequence
+testing_user: root
+
+# The default port to download files via. Required for IPA URL generation.
+# Presently the defaults are located in the roles, however if changed both
+# the localhost and baremetal group files must be updated.
+# file_url_port: 8080
+
+# IPA Image parameters. If these are changed, they must be changed in
+# Both localhost and baremetal groups. Presently the defaults
+# in each role should be sufficent for proper operation.
+# ipa_kernel: "{{http_boot_folder}}/coreos_production_pxe.vmlinuz"
+# ipa_ramdisk: "{{http_boot_folder}}/coreos_production_pxe_image-oem.cpio.gz"
+# ipa_kernel_url: "http://{{ hostvars[inventory_hostname]['ansible_' + network_interface]['ipv4']['address'] }}:{{file_url_port}}/coreos_production_pxe.vmlinuz"
+# ipa_ramdisk_url: "http://{{ hostvars[inventory_hostname]['ansible_' + network_interface]['ipv4']['address'] }}:{{file_url_port}}/coreos_production_pxe_image-oem.cpio.gz"
+
+# The http_boot_folder defines the root folder for the webserver.
+# If this setting is changed, it must be applied to both the baremetal
+# and localhost groups. Presently the role defaults are set to the value
+# below.
+# http_boot_folder: /httpboot
+
+# The settings for the name of the image to be deployed along with the
+# on disk location are below. If changed, these settings must be applied
+# to both the baremetal and localhost groups. If the file is already on
+# disk, then the image generation will not take place, otherwise an image
+# will be generated using diskimage-builder.
+# deploy_image_filename: "deployment_image.qcow2"
+# deploy_image: "{{http_boot_folder}}/{{deploy_image_filename}}"
+
+# Under normal circumstances, the os_ironic_node module does not wait for
+# the node to reach active state before continuing with the deployment
+# process. This means we may have to timeout, to figure out a deployment
+# failed. Change wait_for_node_deploy to true to cause bifrost to wait for
+# Ironic to show the instance in Active state.
+wait_for_node_deploy: false
diff --git a/prototypes/bifrost/scripts/osa-bifrost-deployment.sh b/prototypes/bifrost/scripts/osa-bifrost-deployment.sh
index c92bd9d4e..33ad10887 100755
--- a/prototypes/bifrost/scripts/osa-bifrost-deployment.sh
+++ b/prototypes/bifrost/scripts/osa-bifrost-deployment.sh
@@ -18,10 +18,18 @@ ENABLE_VENV="false"
USE_DHCP="false"
USE_VENV="false"
BUILD_IMAGE=true
-export BAREMETAL_DATA_FILE=${BAREMETAL_DATA_FILE:-'/tmp/baremetal.json'}
-export BIFROST_INVENTORY_SOURCE=${BIFROST_INVENTORY_SOURCE:-'/tmp/baremetal.csv'}
PROVISION_WAIT_TIMEOUT=${PROVISION_WAIT_TIMEOUT:-3600}
+# ensure the branch is set
+export OPENSTACK_BRANCH=${OPENSTACK_BRANCH:-master}
+
+# ensure the right inventory files is used based on branch
+if [ $OPENSTACK_BRANCH = "master" ]; then
+ export BIFROST_INVENTORY_SOURCE=${BIFROST_INVENTORY_SOURCE:-'/tmp/baremetal.json'}
+else
+ export BIFROST_INVENTORY_SOURCE=${BIFROST_INVENTORY_SOURCE:-'/tmp/baremetal.csv'}
+fi
+
# Set defaults for ansible command-line options to drive the different
# tests.
@@ -34,7 +42,7 @@ PROVISION_WAIT_TIMEOUT=${PROVISION_WAIT_TIMEOUT:-3600}
# use cirros.
TEST_VM_NUM_NODES=6
-export TEST_VM_NODE_NAMES="jumphost controller00 controller01 controller02 compute00 compute01"
+export TEST_VM_NODE_NAMES="xcimaster controller00 controller01 controller02 compute00 compute01"
export VM_DOMAIN_TYPE="kvm"
# 8 vCPU, 60 GB HDD are minimum equipment
export VM_CPU=${VM_CPU:-8}
@@ -107,8 +115,7 @@ ${ANSIBLE} \
-e test_vm_num_nodes=${TEST_VM_NUM_NODES} \
-e test_vm_memory_size=${VM_MEMORY_SIZE} \
-e enable_venv=${ENABLE_VENV} \
- -e test_vm_domain_type=${VM_DOMAIN_TYPE} \
- -e baremetal_json_file=${BAREMETAL_DATA_FILE}
+ -e test_vm_domain_type=${VM_DOMAIN_TYPE}
# Execute the installation and VM startup test.
${ANSIBLE} \
diff --git a/prototypes/bifrost/scripts/test-bifrost-deployment.sh b/prototypes/bifrost/scripts/test-bifrost-deployment.sh
index 2e33bc164..83cf1cc1b 100755
--- a/prototypes/bifrost/scripts/test-bifrost-deployment.sh
+++ b/prototypes/bifrost/scripts/test-bifrost-deployment.sh
@@ -18,9 +18,8 @@ ENABLE_VENV="false"
USE_DHCP="false"
USE_VENV="false"
BUILD_IMAGE=true
-export BAREMETAL_DATA_FILE=${BAREMETAL_DATA_FILE:-'/tmp/baremetal.json'}
-export BIFROST_INVENTORY_SOURCE=${BIFROST_INVENTORY_SOURCE:-'/tmp/baremetal.csv'}
PROVISION_WAIT_TIMEOUT=${PROVISION_WAIT_TIMEOUT:-3600}
+BAREMETAL_DATA_FILE=${BAREMETAL_DATA_FILE:-'/tmp/baremetal.json'}
# Set defaults for ansible command-line options to drive the different
# tests.
@@ -34,7 +33,7 @@ PROVISION_WAIT_TIMEOUT=${PROVISION_WAIT_TIMEOUT:-3600}
# use cirros.
TEST_VM_NUM_NODES=3
-export TEST_VM_NODE_NAMES="jumphost.opnfvlocal controller00.opnfvlocal compute00.opnfvlocal"
+export TEST_VM_NODE_NAMES="xcimaster controller00 compute00"
export VM_DOMAIN_TYPE="kvm"
export VM_CPU=${VM_CPU:-4}
export VM_DISK=${VM_DISK:-100}