summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rwxr-xr-xci/00-maasdeploy.sh3
-rwxr-xr-xci/01-bootstrap.sh2
-rwxr-xr-xci/02-deploybundle.sh15
-rwxr-xr-xci/03-maasdeploy.sh387
-rwxr-xr-xci/clean.sh2
-rwxr-xr-xci/deploy.sh6
6 files changed, 402 insertions, 13 deletions
diff --git a/ci/00-maasdeploy.sh b/ci/00-maasdeploy.sh
index 6bd22583..7d9e0070 100755
--- a/ci/00-maasdeploy.sh
+++ b/ci/00-maasdeploy.sh
@@ -321,6 +321,7 @@ elif [ -e ~/.juju/deployconfig.yaml ]; then
cp ~/.juju/deployconfig.yaml ./deployconfig.yaml
fi
+if [ -e ./deployconfig.yaml ]; then
enableiflist=`grep "interface-enable" deployconfig.yaml | cut -d ' ' -f 4 `
datanet=`grep "dataNetwork" deployconfig.yaml | cut -d ' ' -f 4 | sed -e 's/ //'`
stornet=`grep "storageNetwork" deployconfig.yaml | cut -d ' ' -f 4 | sed -e 's/ //'`
@@ -361,7 +362,7 @@ fi
# Add the cloud and controller credentials for MAAS for that lab.
jujuver=`juju --version`
-if [ "$jujuver" > "2" ]; then
+if [[ "$jujuver" > "2" ]]; then
addcloud
addcredential
fi
diff --git a/ci/01-bootstrap.sh b/ci/01-bootstrap.sh
index 5ec5e68e..f09c8670 100755
--- a/ci/01-bootstrap.sh
+++ b/ci/01-bootstrap.sh
@@ -6,7 +6,7 @@ set -ex
jujuver=`juju --version`
-if [ "$jujuver" < "2" ]; then
+if [[ "$jujuver" < "2" ]]; then
juju bootstrap --debug --to bootstrap.maas
sleep 5
#disable juju gui until xenial charms are in charm store.
diff --git a/ci/02-deploybundle.sh b/ci/02-deploybundle.sh
index d08dae6c..ad7c6ff1 100755
--- a/ci/02-deploybundle.sh
+++ b/ci/02-deploybundle.sh
@@ -103,18 +103,15 @@ if [ "$osdomname" != "None" ]; then
var=$var"_"publicapi
fi
-if [ "$jujuver" < "2" ]; then
- #lets generate the bundle for all target using genBundle.py
- python genBundle.py -j 1 -l deployconfig.yaml -s $var > bundles.yaml
- #keep the back in cloud for later debugging.
- pastebinit bundles.yaml || true
+#lets generate the bundle for all target using genBundle.py
+python genBundle.py -l deployconfig.yaml -s $var > bundles.yaml
+#keep the back in cloud for later debugging.
+pastebinit bundles.yaml || true
+
+if [[ "$jujuver" < "2" ]]; then
echo "... Deployment Started ...."
juju-deployer -vW -d -t 7200 -r 5 -c bundles.yaml $opnfvdistro-"$openstack"
else
- #lets generate the bundle for all target using genBundle.py
- python genBundle.py -j 2 -l deployconfig.yaml -s $var > bundles.yaml
- #keep the back in cloud for later debugging.
- pastebinit bundles.yaml || true
# with JUJU 2.0 bundles has to be deployed only once.
juju deploy bundles.yaml --debug
sleep 120
diff --git a/ci/03-maasdeploy.sh b/ci/03-maasdeploy.sh
new file mode 100755
index 00000000..f50775c9
--- /dev/null
+++ b/ci/03-maasdeploy.sh
@@ -0,0 +1,387 @@
+#!/bin/bash
+#placeholder for deployment script.
+set -ex
+
+virtinstall=0
+labname=$1
+
+#install the packages needed
+sudo apt-add-repository ppa:opnfv-team/proposed -y
+sudo apt-add-repository ppa:maas-deployers/stable -y
+sudo apt-add-repository ppa:juju/stable -y
+sudo apt-add-repository ppa:maas/stable -y
+sudo apt-add-repository cloud-archive:mitaka -y
+sudo apt-get update -y
+sudo apt-get dist-upgrade -y
+sudo apt-get install openssh-server bzr git maas-deployer juju juju-deployer \
+ maas-cli python-pip python-psutil python-openstackclient \
+ python-congressclient gsutil charm-tools pastebinit juju-core -y
+
+#first parameter should be custom and second should be either
+# absolute location of file (including file name) or url of the
+# file to download.
+
+labname=$1
+labfile=$2
+
+#
+# Config preparation
+#
+
+# Get labconfig and generate deployment.yaml for MAAS and deployconfig.yaml
+case "$labname" in
+ intelpod[569]|orangepod[12]|cengnpod[12] )
+ array=(${labname//pod/ })
+ cp ../labconfig/${array[0]}/pod${array[1]}/labconfig.yaml .
+ python genMAASConfig.py -l labconfig.yaml > deployment.yaml
+ python genDeploymentConfig.py -l labconfig.yaml > deployconfig.yaml
+ ;;
+ 'attvirpod1' )
+ cp ../labconfig/att/virpod1/labconfig.yaml .
+ python genMAASConfig.py -l labconfig.yaml > deployment.yaml
+ python genDeploymentConfig.py -l labconfig.yaml > deployconfig.yaml
+ ;;
+ 'juniperpod1' )
+ cp maas/juniper/pod1/deployment.yaml ./deployment.yaml
+ ;;
+ 'custom')
+ if [ -e $labfile ]; then
+ cp $labfile ./labconfig.yaml || true
+ else
+ wget $labconfigfile -t 3 -T 10 -O ./labconfig.yaml || true
+ count=`wc -l labconfig.yaml | cut -d " " -f 1`
+ if [ $count -lt 10 ]; then
+ rm -rf labconfig.yaml
+ fi
+ fi
+ if [ ! -e ./labconfig.yaml ]; then
+ virtinstall=1
+ else
+ python genMAASConfig.py -l labconfig.yaml > deployment.yaml
+ python genDeploymentConfig.py -l labconfig.yaml > deployconfig.yaml
+ labname=`grep "maas_name" deployment.yaml | cut -d ':' -f 2 | sed -e 's/ //'`
+ fi
+ ;;
+ * )
+ virtinstall=1
+ ;;
+esac
+
+# In the case of a virtual deployment get deployment.yaml and deployconfig.yaml
+if [ "$virtinstall" -eq 1 ]; then
+ labname="default"
+ ./cleanvm.sh || true
+ cp ../labconfig/default/deployment.yaml ./
+ cp ../labconfig/default/labconfig.yaml ./
+ cp ../labconfig/default/deployconfig.yaml ./
+fi
+
+#
+# Prepare local environment to avoid password asking
+#
+
+# make sure no password asked during the deployment.
+echo "$USER ALL=(ALL) NOPASSWD:ALL" > 90-joid-init
+
+if [ -e /etc/sudoers.d/90-joid-init ]; then
+ sudo cp /etc/sudoers.d/90-joid-init 91-joid-init
+ sudo chown $USER:$USER 91-joid-init
+ sudo chmod 660 91-joid-init
+ sudo cat 90-joid-init >> 91-joid-init
+ sudo chown root:root 91-joid-init
+ sudo mv 91-joid-init /etc/sudoers.d/
+else
+ sudo chown root:root 90-joid-init
+ sudo mv 90-joid-init /etc/sudoers.d/
+fi
+
+if [ ! -e $HOME/.ssh/id_rsa ]; then
+ ssh-keygen -N '' -f $HOME/.ssh/id_rsa
+fi
+
+echo "... Deployment of maas Started ...."
+
+#
+# Virsh preparation
+#
+
+# define the pool and try to start even though its already exist.
+# For fresh install this may or may not there.
+sudo apt-get install libvirt-bin -y
+sudo adduser $USER libvirtd
+sudo virsh pool-define-as default --type dir --target /var/lib/libvirt/images/ || true
+sudo virsh pool-start default || true
+sudo virsh pool-autostart default || true
+
+# In case of virtual install set network
+if [ "$virtinstall" -eq 1 ]; then
+ sudo virsh net-dumpxml default > default-net-org.xml
+ sudo sed -i '/dhcp/d' default-net-org.xml
+ sudo sed -i '/range/d' default-net-org.xml
+ sudo virsh net-define default-net-org.xml
+ sudo virsh net-destroy default
+ sudo virsh net-start default
+fi
+
+# Ensure virsh can connect without ssh auth
+cat $HOME/.ssh/id_rsa.pub >> $HOME/.ssh/authorized_keys
+
+
+#
+# Cleanup, juju init and config backup
+#
+
+# To avoid problem between apiclient/maas_client and apiclient from google
+# we remove the package google-api-python-client from yardstick installer
+if [ $(pip list |grep google-api-python-client |wc -l) == 1 ]; then
+ sudo pip uninstall google-api-python-client
+fi
+
+#create backup directory
+mkdir ~/joid_config/ || true
+mkdir ~/.juju/ || true
+
+# Init Juju
+juju init -f || true
+
+#
+# MAAS deploy
+#
+
+installmaas(){
+ sudo apt-get install maas -y
+
+
+}
+
+configuremaas(){
+ sudo maas createadmin --username=ubuntu --email=ubuntu@ubuntu.com
+}
+
+addnodes(){
+
+
+}
+
+sudo maas-deployer -c deployment.yaml -d --force
+
+sudo chown $USER:$USER environments.yaml
+
+echo "... Deployment of maas finish ...."
+
+# Backup deployment.yaml and deployconfig.yaml in .juju folder
+
+cp ./environments.yaml ~/.juju/
+cp ./environments.yaml ~/joid_config/
+
+if [ -e ./deployconfig.yaml ]; then
+ cp ./deployconfig.yaml ~/.juju/
+ cp ./labconfig.yaml ~/.juju/
+ cp ./deployconfig.yaml ~/joid_config/
+ cp ./labconfig.yaml ~/joid_config/
+fi
+
+if [ -e ./deployment.yaml ]; then
+ cp ./deployment.yaml ~/.juju/
+ cp ./deployment.yaml ~/joid_config/
+fi
+
+#
+# MAAS Customization
+#
+
+maas_ip=`grep " ip_address" deployment.yaml | cut -d ':' -f 2 | sed -e 's/ //'`
+apikey=`grep maas-oauth: environments.yaml | cut -d "'" -f 2`
+maas login maas http://${maas_ip}/MAAS/api/1.0 ${apikey}
+maas maas sshkeys new key="`cat $HOME/.ssh/id_rsa.pub`"
+
+#Added the Qtip public to run the Qtip test after install on bare metal nodes.
+#maas maas sshkeys new key="`cat ./maas/sshkeys/QtipKey.pub`"
+#maas maas sshkeys new key="`cat ./maas/sshkeys/DominoKey.pub`"
+
+#adding compute and control nodes VM to MAAS for virtual deployment purpose.
+if [ "$virtinstall" -eq 1 ]; then
+ # create two more VMs to do the deployment.
+ sudo virt-install --connect qemu:///system --name node1-control --ram 8192 --vcpus 4 --disk size=120,format=qcow2,bus=virtio,io=native,pool=default --network bridge=virbr0,model=virtio --network bridge=virbr0,model=virtio --boot network,hd,menu=off --noautoconsole --vnc --print-xml | tee node1-control
+
+ sudo virt-install --connect qemu:///system --name node2-compute --ram 8192 --vcpus 4 --disk size=120,format=qcow2,bus=virtio,io=native,pool=default --network bridge=virbr0,model=virtio --network bridge=virbr0,model=virtio --boot network,hd,menu=off --noautoconsole --vnc --print-xml | tee node2-compute
+
+ sudo virt-install --connect qemu:///system --name node5-compute --ram 8192 --vcpus 4 --disk size=120,format=qcow2,bus=virtio,io=native,pool=default --network bridge=virbr0,model=virtio --network bridge=virbr0,model=virtio --boot network,hd,menu=off --noautoconsole --vnc --print-xml | tee node5-compute
+
+ node1controlmac=`grep "mac address" node1-control | head -1 | cut -d "'" -f 2`
+ node2computemac=`grep "mac address" node2-compute | head -1 | cut -d "'" -f 2`
+ node5computemac=`grep "mac address" node5-compute | head -1 | cut -d "'" -f 2`
+
+ sudo virsh -c qemu:///system define --file node1-control
+ sudo virsh -c qemu:///system define --file node2-compute
+ sudo virsh -c qemu:///system define --file node5-compute
+
+ maas maas tags new name='control'
+ maas maas tags new name='compute'
+
+ controlnodeid=`maas maas nodes new autodetect_nodegroup='yes' name='node1-control' tags='control' hostname='node1-control' power_type='virsh' mac_addresses=$node1controlmac power_parameters_power_address='qemu+ssh://'$USER'@192.168.122.1/system' architecture='amd64/generic' power_parameters_power_id='node1-control' | grep system_id | cut -d '"' -f 4 `
+
+ maas maas tag update-nodes control add=$controlnodeid
+
+ computenodeid=`maas maas nodes new autodetect_nodegroup='yes' name='node2-compute' tags='compute' hostname='node2-compute' power_type='virsh' mac_addresses=$node2computemac power_parameters_power_address='qemu+ssh://'$USER'@192.168.122.1/system' architecture='amd64/generic' power_parameters_power_id='node2-compute' | grep system_id | cut -d '"' -f 4 `
+
+ maas maas tag update-nodes compute add=$computenodeid
+
+ computenodeid=`maas maas nodes new autodetect_nodegroup='yes' name='node5-compute' tags='compute' hostname='node5-compute' power_type='virsh' mac_addresses=$node5computemac power_parameters_power_address='qemu+ssh://'$USER'@192.168.122.1/system' architecture='amd64/generic' power_parameters_power_id='node5-compute' | grep system_id | cut -d '"' -f 4 `
+
+ maas maas tag update-nodes compute add=$computenodeid
+fi
+
+#
+# Functions for MAAS network customization
+#
+
+#Below function will mark the interfaces in Auto mode to enbled by MAAS
+enableautomode() {
+ listofnodes=`maas maas nodes list | grep system_id | cut -d '"' -f 4`
+ for nodes in $listofnodes
+ do
+ maas maas interface link-subnet $nodes $1 mode=$2 subnet=$3
+ done
+}
+
+#Below function will mark the interfaces in Auto mode to enbled by MAAS
+# using hostname of the node added into MAAS
+enableautomodebyname() {
+ if [ ! -z "$4" ]; then
+ for i in `seq 1 7`;
+ do
+ nodes=`maas maas nodes list | grep system_id | cut -d '"' -f 4`
+ if [ ! -z "$nodes" ]; then
+ maas maas interface link-subnet $nodes $1 mode=$2 subnet=$3
+ fi
+ done
+ fi
+}
+
+#Below function will create vlan and update interface with the new vlan
+# will return the vlan id created
+crvlanupdsubnet() {
+ newvlanid=`maas maas vlans create $2 name=$3 vid=$4 | grep resource | cut -d '/' -f 6 `
+ maas maas subnet update $5 vlan=$newvlanid
+ eval "$1"="'$newvlanid'"
+}
+
+#Below function will create interface with new vlan and bind to physical interface
+crnodevlanint() {
+ listofnodes=`maas maas nodes list | grep system_id | cut -d '"' -f 4`
+
+ for nodes in $listofnodes
+ do
+ parentid=`maas maas interface read $nodes $2 | grep interfaces | cut -d '/' -f 8`
+ maas maas interfaces create-vlan $nodes vlan=$1 parent=$parentid
+ done
+ }
+
+#function for JUJU envronment
+
+addcredential() {
+ controllername=`awk 'NR==1{print $2}' environments.yaml`
+ cloudname=`awk 'NR==1{print $2}' environments.yaml`
+
+ echo "credentials:" > credential.yaml
+ echo " $controllername:" >> credential.yaml
+ echo " opnfv-credentials:" >> credential.yaml
+ echo " auth-type: oauth1" >> credential.yaml
+ echo " maas-oauth: $apikey" >> credential.yaml
+
+ juju add-credential $controllername -f credential.yaml --replace
+}
+
+addcloud() {
+ controllername=`awk 'NR==1{print $2}' environments.yaml`
+ cloudname=`awk 'NR==1{print $2}' environments.yaml`
+
+ echo "clouds:" > maas-cloud.yaml
+ echo " $cloudname:" >> maas-cloud.yaml
+ echo " type: maas" >> maas-cloud.yaml
+ echo " auth-types: [oauth1]" >> maas-cloud.yaml
+ echo " endpoint: http://$maas_ip/MAAS" >> maas-cloud.yaml
+
+ juju add-cloud $cloudname maas-cloud.yaml --replace
+}
+
+
+#
+# VLAN customization
+#
+
+case "$labname" in
+ 'intelpod9' )
+ maas refresh
+ crvlanupdsubnet vlan904 fabric-1 "MgmtNetwork" 904 2 || true
+ crvlanupdsubnet vlan905 fabric-2 "PublicNetwork" 905 3 || true
+ crnodevlanint $vlan905 eth1 || true
+ crnodevlanint $vlan905 eth3 || true
+ enableautomodebyname eth1.905 AUTO "10.9.15.0/24" || true
+ enableautomodebyname eth3.905 AUTO "10.9.15.0/24" || true
+ enableautomodebyname eth0 AUTO "10.9.12.0/24" || true
+ enableautomodebyname eth2 AUTO "10.9.12.0/24" || true
+ ;;
+esac
+
+#
+# Enable MAAS nodes interfaces
+#
+
+#read interface needed in Auto mode and enable it. Will be rmeoved once auto enablement will be implemented in the maas-deployer.
+if [ -e ~/joid_config/deployconfig.yaml ]; then
+ cp ~/joid_config/deployconfig.yaml ./deployconfig.yaml
+elif [ -e ~/.juju/deployconfig.yaml ]; then
+ cp ~/.juju/deployconfig.yaml ./deployconfig.yaml
+fi
+
+ enableiflist=`grep "interface-enable" deployconfig.yaml | cut -d ' ' -f 4 `
+ datanet=`grep "dataNetwork" deployconfig.yaml | cut -d ' ' -f 4 | sed -e 's/ //'`
+ stornet=`grep "storageNetwork" deployconfig.yaml | cut -d ' ' -f 4 | sed -e 's/ //'`
+ pubnet=`grep "publicNetwork" deployconfig.yaml | cut -d ' ' -f 4 | sed -e 's/ //'`
+
+ # split EXTERNAL_NETWORK=first ip;last ip; gateway;network
+
+ if [ "$datanet" != "''" ]; then
+ EXTNET=(${enableiflist//,/ })
+ i="0"
+ while [ ! -z "${EXTNET[i]}" ];
+ do
+ enableautomode ${EXTNET[i]} AUTO $datanet || true
+ i=$[$i+1]
+ done
+ fi
+ if [ "$stornet" != "''" ]; then
+ EXTNET=(${enableiflist//,/ })
+ i="0"
+ while [ ! -z "${EXTNET[i]}" ];
+ do
+ enableautomode ${EXTNET[i]} AUTO $stornet || true
+ i=$[$i+1]
+ done
+ fi
+ if [ "$pubnet" != "''" ]; then
+ EXTNET=(${enableiflist//,/ })
+ i="0"
+ while [ ! -z "${EXTNET[i]}" ];
+ do
+ enableautomode ${EXTNET[i]} AUTO $pubnet || true
+ i=$[$i+1]
+ done
+ fi
+fi
+
+
+# Add the cloud and controller credentials for MAAS for that lab.
+jujuver=`juju --version`
+
+if [ "$jujuver" > "2" ]; then
+ addcloud
+ addcredential
+fi
+
+#
+# End of scripts
+#
+echo " .... MAAS deployment finished successfully ...."
diff --git a/ci/clean.sh b/ci/clean.sh
index 3c0a71e4..4b308fd0 100755
--- a/ci/clean.sh
+++ b/ci/clean.sh
@@ -9,7 +9,7 @@ fi
jujuver=`juju --version`
-if [ "$jujuver" > "2" ]; then
+if [[ "$jujuver" > "2" ]]; then
controllername=`awk 'NR==1{print $2}' environments.yaml`
cloudname=`awk 'NR==1{print $2}' environments.yaml`
juju kill-controller $controllername --timeout 10s -y || true
diff --git a/ci/deploy.sh b/ci/deploy.sh
index 66a9e8f8..09427476 100755
--- a/ci/deploy.sh
+++ b/ci/deploy.sh
@@ -137,6 +137,10 @@ deploy() {
createresource
fi
+ if [[ "$jujuver" > "2" ]]; then
+ juju model-config default-series=$opnfvdistro enable-os-refresh-update=false enable-os-upgrade=false
+ fi
+
#bootstrap the node
./01-bootstrap.sh
@@ -179,7 +183,7 @@ echo "...... deployment finished ......."
# creating heat domain after puching the public API into /etc/hosts
-if [ "$jujuver" > "2" ]; then
+if [[ "$jujuver" > "2" ]]; then
status=`juju run-action do heat/0 domain-setup`
echo $status
else