summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rwxr-xr-xjjb/apex/apex-deploy.sh2
-rwxr-xr-xjjb/apex/apex-upload-artifact.sh4
-rw-r--r--jjb/armband/armband-ci-jobs.yml70
-rw-r--r--jjb/daisy4nfv/daisy4nfv-verify-jobs.yml12
-rw-r--r--jjb/functest/functest-ci-jobs.yml8
-rw-r--r--jjb/opnfv/slave-params.yml18
-rw-r--r--jjb/yardstick/yardstick-ci-jobs.yml18
-rwxr-xr-xprototypes/bifrost/scripts/destroy-env.sh2
-rw-r--r--prototypes/puppet-infracloud/deploy_on_baremetal.md57
9 files changed, 180 insertions, 11 deletions
diff --git a/jjb/apex/apex-deploy.sh b/jjb/apex/apex-deploy.sh
index 72fa6f6f0..e21387ac6 100755
--- a/jjb/apex/apex-deploy.sh
+++ b/jjb/apex/apex-deploy.sh
@@ -3,7 +3,7 @@ set -o errexit
set -o nounset
set -o pipefail
-APEX_PKGS="common undercloud opendaylight-sfc onos"
+APEX_PKGS="common undercloud onos"
IPV6_FLAG=False
# log info to console
diff --git a/jjb/apex/apex-upload-artifact.sh b/jjb/apex/apex-upload-artifact.sh
index 0dd112bc8..f54e4c55a 100755
--- a/jjb/apex/apex-upload-artifact.sh
+++ b/jjb/apex/apex-upload-artifact.sh
@@ -49,13 +49,13 @@ echo "ISO Upload Complete!"
RPM_INSTALL_PATH=$BUILD_DIRECTORY/noarch
RPM_LIST=$RPM_INSTALL_PATH/$(basename $OPNFV_RPM_URL)
VERSION_EXTENSION=$(echo $(basename $OPNFV_RPM_URL) | sed 's/opnfv-apex-//')
-for pkg in common undercloud opendaylight-sfc onos; do
+for pkg in common undercloud onos; do
RPM_LIST+=" ${RPM_INSTALL_PATH}/opnfv-apex-${pkg}-${VERSION_EXTENSION}"
done
SRPM_INSTALL_PATH=$BUILD_DIRECTORY
SRPM_LIST=$SRPM_INSTALL_PATH/$(basename $OPNFV_SRPM_URL)
VERSION_EXTENSION=$(echo $(basename $OPNFV_SRPM_URL) | sed 's/opnfv-apex-//')
-for pkg in common undercloud opendaylight-sfc onos; do
+for pkg in common undercloud onos; do
SRPM_LIST+=" ${SRPM_INSTALL_PATH}/opnfv-apex-${pkg}-${VERSION_EXTENSION}"
done
}
diff --git a/jjb/armband/armband-ci-jobs.yml b/jjb/armband/armband-ci-jobs.yml
index d4fa5da94..2122959a9 100644
--- a/jjb/armband/armband-ci-jobs.yml
+++ b/jjb/armband/armband-ci-jobs.yml
@@ -50,6 +50,10 @@
slave-label: arm-pod2
installer: fuel
<<: *colorado
+ - arm-pod3:
+ slave-label: arm-pod3
+ installer: fuel
+ <<: *colorado
#--------------------------------
# master
#--------------------------------
@@ -57,6 +61,10 @@
slave-label: arm-pod2
installer: fuel
<<: *master
+ - arm-pod3:
+ slave-label: arm-pod3
+ installer: fuel
+ <<: *master
#--------------------------------
# scenarios
#--------------------------------
@@ -413,3 +421,65 @@
name: 'fuel-os-odl_l2-sfc-noha-arm-pod2-colorado-trigger'
triggers:
- timed: ''
+#----------------------------------------------------------
+# Enea Armband POD 3 Triggers running against master branch
+#----------------------------------------------------------
+- trigger:
+ name: 'fuel-os-odl_l2-nofeature-ha-arm-pod3-master-trigger'
+ triggers:
+ - timed: ''
+- trigger:
+ name: 'fuel-os-nosdn-nofeature-ha-arm-pod3-master-trigger'
+ triggers:
+ - timed: ''
+- trigger:
+ name: 'fuel-os-odl_l3-nofeature-ha-arm-pod3-master-trigger'
+ triggers:
+ - timed: ''
+- trigger:
+ name: 'fuel-os-odl_l2-bgpvpn-ha-arm-pod3-master-trigger'
+ triggers:
+ - timed: ''
+- trigger:
+ name: 'fuel-os-odl_l2-nofeature-noha-arm-pod3-master-trigger'
+ triggers:
+ - timed: ''
+- trigger:
+ name: 'fuel-os-odl_l2-sfc-ha-arm-pod3-master-trigger'
+ triggers:
+ - timed: ''
+- trigger:
+ name: 'fuel-os-odl_l2-sfc-noha-arm-pod3-master-trigger'
+ triggers:
+ - timed: ''
+#---------------------------------------------------------------
+# Enea Armband POD 3 Triggers running against colorado branch
+#---------------------------------------------------------------
+- trigger:
+ name: 'fuel-os-odl_l2-nofeature-ha-arm-pod3-colorado-trigger'
+ triggers:
+ - timed: ''
+- trigger:
+ name: 'fuel-os-nosdn-nofeature-ha-arm-pod3-colorado-trigger'
+ triggers:
+ - timed: ''
+- trigger:
+ name: 'fuel-os-odl_l3-nofeature-ha-arm-pod3-colorado-trigger'
+ triggers:
+ - timed: ''
+- trigger:
+ name: 'fuel-os-odl_l2-bgpvpn-ha-arm-pod3-colorado-trigger'
+ triggers:
+ - timed: ''
+- trigger:
+ name: 'fuel-os-odl_l2-nofeature-noha-arm-pod3-colorado-trigger'
+ triggers:
+ - timed: ''
+- trigger:
+ name: 'fuel-os-odl_l2-sfc-ha-arm-pod3-colorado-trigger'
+ triggers:
+ - timed: ''
+- trigger:
+ name: 'fuel-os-odl_l2-sfc-noha-arm-pod3-colorado-trigger'
+ triggers:
+ - timed: ''
diff --git a/jjb/daisy4nfv/daisy4nfv-verify-jobs.yml b/jjb/daisy4nfv/daisy4nfv-verify-jobs.yml
index e81e300c9..7c47d9f69 100644
--- a/jjb/daisy4nfv/daisy4nfv-verify-jobs.yml
+++ b/jjb/daisy4nfv/daisy4nfv-verify-jobs.yml
@@ -1,9 +1,7 @@
- project:
name: 'daisy4nfv-verify-jobs'
- project: 'daisy4nfv'
-
- installer: 'daisy4nfv'
+ project: 'daisy'
#####################################
# branch definitions
#####################################
@@ -184,25 +182,25 @@
# builder macros
#####################################
- builder:
- name: 'daisy4nfv-verify-basic-macro'
+ name: 'daisy-verify-basic-macro'
builders:
- shell:
!include-raw: ./daisy4nfv-basic.sh
- builder:
- name: 'daisy4nfv-verify-build-macro'
+ name: 'daisy-verify-build-macro'
builders:
- shell:
!include-raw: ./daisy4nfv-build.sh
- builder:
- name: 'daisy4nfv-verify-deploy-virtual-macro'
+ name: 'daisy-verify-deploy-virtual-macro'
builders:
- shell:
!include-raw: ./daisy4nfv-virtual-deploy.sh
- builder:
- name: 'daisy4nfv-verify-smoke-test-macro'
+ name: 'daisy-verify-smoke-test-macro'
builders:
- shell: |
#!/bin/bash
diff --git a/jjb/functest/functest-ci-jobs.yml b/jjb/functest/functest-ci-jobs.yml
index 348779308..afeb1f92e 100644
--- a/jjb/functest/functest-ci-jobs.yml
+++ b/jjb/functest/functest-ci-jobs.yml
@@ -140,6 +140,10 @@
slave-label: '{pod}'
installer: fuel
<<: *master
+ - arm-pod3:
+ slave-label: '{pod}'
+ installer: fuel
+ <<: *master
- zte-pod1:
slave-label: '{pod}'
installer: fuel
@@ -164,6 +168,10 @@
slave-label: '{pod}'
installer: fuel
<<: *colorado
+ - arm-pod3:
+ slave-label: '{pod}'
+ installer: fuel
+ <<: *colorado
# PODs for verify jobs triggered by each patch upload
- ool-virtual1:
slave-label: '{pod}'
diff --git a/jjb/opnfv/slave-params.yml b/jjb/opnfv/slave-params.yml
index 7eca41a6d..b46960fa7 100644
--- a/jjb/opnfv/slave-params.yml
+++ b/jjb/opnfv/slave-params.yml
@@ -611,6 +611,24 @@
default: ssh://jenkins-enea@gerrit.opnfv.org:29418/securedlab
description: 'Base URI to the configuration directory'
- parameter:
+ name: 'arm-pod3-defaults'
+ parameters:
+ - node:
+ name: SLAVE_NAME
+ description: 'Slave name on Jenkins'
+ allowed-slaves:
+ - arm-pod3
+ default-slaves:
+ - arm-pod3
+ - string:
+ name: GIT_BASE
+ default: https://gerrit.opnfv.org/gerrit/$PROJECT
+ description: 'Git URL to use on this Jenkins Slave'
+ - string:
+ name: LAB_CONFIG_URL
+ default: ssh://jenkins-enea@gerrit.opnfv.org:29418/securedlab
+ description: 'Base URI to the configuration directory'
+- parameter:
name: 'intel-virtual6-defaults'
parameters:
- node:
diff --git a/jjb/yardstick/yardstick-ci-jobs.yml b/jjb/yardstick/yardstick-ci-jobs.yml
index 962ea4743..c10daabe0 100644
--- a/jjb/yardstick/yardstick-ci-jobs.yml
+++ b/jjb/yardstick/yardstick-ci-jobs.yml
@@ -172,6 +172,16 @@
installer: fuel
auto-trigger-name: 'daily-trigger-disabled'
<<: *colorado
+ - arm-pod3:
+ slave-label: '{pod}'
+ installer: fuel
+ auto-trigger-name: 'daily-trigger-disabled'
+ <<: *master
+ - arm-pod3:
+ slave-label: '{pod}'
+ installer: fuel
+ auto-trigger-name: 'daily-trigger-disabled'
+ <<: *colorado
- orange-pod2:
slave-label: '{pod}'
installer: joid
@@ -423,6 +433,14 @@
description: 'Arguments to use in order to choose the backend DB'
- parameter:
+ name: 'yardstick-params-arm-pod3'
+ parameters:
+ - string:
+ name: YARDSTICK_DB_BACKEND
+ default: '-i 104.197.68.199:8086'
+ description: 'Arguments to use in order to choose the backend DB'
+
+- parameter:
name: 'yardstick-params-virtual'
parameters:
- string:
diff --git a/prototypes/bifrost/scripts/destroy-env.sh b/prototypes/bifrost/scripts/destroy-env.sh
index 674645777..cdc55df1b 100755
--- a/prototypes/bifrost/scripts/destroy-env.sh
+++ b/prototypes/bifrost/scripts/destroy-env.sh
@@ -46,7 +46,7 @@ fi
rm -rf /var/lib/libvirt/images/*.qcow2
echo "restarting services"
-service dnsmasq restart
+service dnsmasq restart || true
service libvirtd restart
service ironic-api restart
service ironic-conductor start
diff --git a/prototypes/puppet-infracloud/deploy_on_baremetal.md b/prototypes/puppet-infracloud/deploy_on_baremetal.md
new file mode 100644
index 000000000..334dff4d2
--- /dev/null
+++ b/prototypes/puppet-infracloud/deploy_on_baremetal.md
@@ -0,0 +1,57 @@
+How to deploy Infra Cloud on baremetal
+==================================
+
+Install bifrost controller
+--------------------------
+First step for deploying Infra Cloud is to install the bifrost controller. This can be virtualized, doesn't need to be on baremetal.
+To achieve that, first we can create a virtual machine with libvirt, with the proper network setup. This VM needs to share one physical interface (the PXE boot one), with the servers for the controller and compute nodes.
+Please follow documentation on: [https://git.openstack.org/cgit/openstack/bifrost/tree/tools/virsh_dev_env/README.md](https://git.openstack.org/cgit/openstack/bifrost/tree/tools/virsh_dev_env/README.md) to get sample templates and instructions for creating the bifrost VM.
+
+Once the **baremetal** VM is finished, you can login by ssh and start installing bifrost there. To proceed, follow this steps:
+
+ 1. Change to root user, install git
+ 2. Clone releng project (cd /opt, git clone https://gerrit.opnfv.org/gerrit/releng)
+ 3. cd /opt/releng/prototypes/puppet-infracloud
+ 4. Copy hiera to the right folder (cp hiera/common_baremetal.yaml /var/lib/hiera/common.yaml)
+ 5. Ensure hostname is properly set ( hostnamectl set-hostname baremetal.opnfvlocal , hostname -f )
+ 6. Install puppet and modules ( ./install_puppet.sh , ./install_modules.sh )
+ 7. Apply puppet to install bifrost (puppet apply manifests/site.pp --modulepath=/etc/puppet/modules:/opt/releng/prototypes/puppet-infracloud/modules)
+
+ With these steps you will have a bifrost controller up and running.
+
+Deploy baremetal servers
+--------------------------
+Once you have bifrost controller ready, you need to use it to start deployment of the baremetal servers.
+On the same bifrost VM, follow these steps:
+
+ 1. Source bifrost env vars: source /opt/stack/bifrost/env-vars
+ 2. Export baremetal servers inventory: export BIFROST_INVENTORY-SOURCE=/opt/stack/baremetal.json
+ 3. Enroll the servers: ansible-playbook -vvv -i inventory/bifrost_inventory.py enroll-dynamic.yaml -e @/etc/bifrost/bifrost_global_vars
+ 4. Deploy the servers: ansible-playbook -vvv -i inventory/bifrost_inventory.py deploy-dynamic.yaml -e @/etc/bifrost/bifrost_global_vars
+ 5. Wait until they are on **active** state, check it with: ironic node-list
+
+In case of some server needing to be redeployed, you can reset it and redeploy again with:
+
+ 1. ironic node-set-provision-state <name_of_server> deleted
+ 2. Wait and check with ironic node-list until the server is on **available** state
+ 3. Redeploy again: ansible-playbook -vvv -i inventory/bifrost_inventory.py deploy-dynamic.yaml -e @/etc/bifrost/bifrost_global_vars
+
+Deploy baremetal servers
+--------------------------
+Once all the servers are on **active** state, they can be accessed by ssh and InfraCloud manifests can be deployed on them, to properly deploy a controller and a compute.
+On each of those, follow that steps:
+
+ 1. ssh from the bifrost controller to their external ips: ssh root@172.30.13.90
+ 2. cd /opt, clone releng project (git clone https://gerrit.opnfv.org/gerrit/releng)
+ 3. Copy hiera to the right folder ( cp hiera/common_baremetal.yaml /var/lib/hiera/common.yaml)
+ 4. Install modules: ./install_modules.sh
+ 5. Apply puppet: puppet apply manifests/site.pp --modulepath=/etc/puppet/modules:/opt/releng/prototypes/puppet-infracloud/modules
+
+Once this has been done on controller and compute, you will have a working cloud. To start working with it, follow that steps:
+
+ 1. Ensure that controller00.opnfvlocal resolves properly to the external IP (this is already done in the bifrost controller)
+ 2. Copy releng/prototypes/puppet-infracloud/creds/clouds.yaml to $HOME/.config/openstack/clouds.yaml
+ 3. Install python-openstackclient
+ 4. Specify the cloud you want to use: export OS_CLOUD=opnfvlocal
+ 5. Now you can start operating in your cloud with openstack-client: openstack flavor list
+