summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rwxr-xr-xci/deploy/deploy.sh152
-rwxr-xr-xci/kolla-build.sh82
-rw-r--r--deploy/config/vm_environment/zte-virtual2/network.yml8
-rw-r--r--deploy/daisy.conf2
-rw-r--r--deploy/deploy.py78
-rw-r--r--deploy/tempest.py20
-rw-r--r--docs/development/design/ci.rst40
-rw-r--r--docs/development/design/index.rst1
-rw-r--r--docs/development/design/multicast.rst7
-rw-r--r--docs/release/configguide/index.rst2
-rw-r--r--docs/release/configguide/installerconfig.rst6
-rw-r--r--docs/release/configguide/kollaconfig.rst56
-rw-r--r--docs/release/installation/bmdeploy.rst35
-rw-r--r--docs/release/installation/index.rst3
-rw-r--r--docs/release/installation/recovery.rst80
-rw-r--r--docs/release/installation/upgrade.rst92
-rw-r--r--docs/release/installation/vmdeploy.rst41
-rw-r--r--templates/virtual_environment/vms/all_in_one.xml2
-rw-r--r--tools/daisy-post-fip.sh63
19 files changed, 631 insertions, 139 deletions
diff --git a/ci/deploy/deploy.sh b/ci/deploy/deploy.sh
index 2b6843c6..a104230d 100755
--- a/ci/deploy/deploy.sh
+++ b/ci/deploy/deploy.sh
@@ -32,6 +32,7 @@ OPTIONS:
-w Workdir for temporary usage, optional
-h Print this message and exit
-s Deployment scenario
+ -S Skip recreate Daisy VM during deployment
Description:
Deploys the Daisy4NFV on the indicated lab resource
@@ -62,6 +63,7 @@ POD_NAME=''
TARGET_HOSTS_NUM=0
DRY_RUN=0
IS_BARE=1
+SKIP_DEPLOY_DAISY=0
VM_MULTINODE=("computer01" "computer02" "controller02" "controller03" "controller01")
VALID_DEPLOY_SCENARIO=("os-nosdn-nofeature-noha" "os-nosdn-nofeature-ha" "os-odl_l3-nofeature-noha"
"os-odl_l2-nofeature-noha" "os-odl_l3-nofeature-ha" "os-odl_l2-nofeature-ha"
@@ -74,7 +76,7 @@ VALID_DEPLOY_SCENARIO=("os-nosdn-nofeature-noha" "os-nosdn-nofeature-ha" "os-odl
############################################################################
# BEGIN of main
#
-while getopts "b:B:Dd:n:l:p:r:w:s:h" OPTION
+while getopts "b:B:Dd:n:l:p:r:w:s:Sh" OPTION
do
case $OPTION in
b)
@@ -104,6 +106,9 @@ do
s)
DEPLOY_SCENARIO=${OPTARG}
;;
+ S)
+ SKIP_DEPLOY_DAISY=1
+ ;;
h)
usage
exit 0
@@ -272,6 +277,7 @@ function create_node
virsh net-define $net_template
virsh net-autostart $net_name
virsh net-start $net_name
+
virsh define $vms_template
virsh start $vms_name
}
@@ -310,20 +316,34 @@ function update_config
fi
}
-function clean_up_virtual_env()
+function clean_up_target_vms()
{
local vms=$(virsh list --all | tail -n +3 | awk '{print $2}')
local active_vms=$(virsh list | tail -n +3 | awk '{print $2}')
- for vm_name in ${VM_MULTINODE[@]} all_in_one daisy; do
+ for vm_name in ${VM_MULTINODE[@]} all_in_one; do
if [[ $(echo $vms | tr " " "\n" | grep ^$vm_name$) ]]; then
[[ $(echo $active_vms | tr " " "\n" | grep ^$vm_name$) ]] && virsh destroy $vm_name
virsh undefine $vm_name
fi
done
+}
+
+function clean_up_daisy_vm()
+{
+ local vms=$(virsh list --all | tail -n +3 | awk '{print $2}')
+ local active_vms=$(virsh list | tail -n +3 | awk '{print $2}')
+ vm_name=daisy
+ if [[ $(echo $vms | tr " " "\n" | grep ^$vm_name$) ]]; then
+ [[ $(echo $active_vms | tr " " "\n" | grep ^$vm_name$) ]] && virsh destroy $vm_name
+ virsh undefine $vm_name
+ fi
+}
+function clean_up_daisy_vnetworks()
+{
local nets=$(virsh net-list --all | tail -n +3 |awk '{print $1}')
local active_nets=$(virsh net-list | tail -n +3 |awk '{print $1}')
- for net_template in ${VMDELOY_DAISY_SERVER_NET} ${VMDEPLOY_TARGET_NODE_NET} ${VMDEPLOY_TARGET_KEEPALIVED_NET}; do
+ for net_template in ${VMDELOY_DAISY_SERVER_NET}; do
network_name=$(grep "<name>" $net_template | awk -F "<|>" '{print $3}')
if [[ $(echo $nets | tr " " "\n" | grep ^$network_name$) ]]; then
[[ $(echo $active_nets | tr " " "\n" | grep ^$network_name$) ]] && virsh net-destroy $network_name
@@ -332,49 +352,93 @@ function clean_up_virtual_env()
done
}
-echo "====== clean up all node and network ======"
-if [ $IS_BARE == 0 ];then
- clean_up_virtual_env
-else
- virsh destroy daisy
- virsh undefine daisy
-fi
+function clean_up_target_vnetworks()
+{
+ local nets=$(virsh net-list --all | tail -n +3 |awk '{print $1}')
+ local active_nets=$(virsh net-list | tail -n +3 |awk '{print $1}')
+ for net_template in ${VMDEPLOY_TARGET_NODE_NET} ${VMDEPLOY_TARGET_KEEPALIVED_NET}; do
+ network_name=$(grep "<name>" $net_template | awk -F "<|>" '{print $3}')
+ if [[ $(echo $nets | tr " " "\n" | grep ^$network_name$) ]]; then
+ [[ $(echo $active_nets | tr " " "\n" | grep ^$network_name$) ]] && virsh net-destroy $network_name
+ virsh net-undefine $network_name
+ fi
+ done
+}
-echo "====== create daisy node ======"
-$CREATE_QCOW2_PATH/daisy-img-modify.sh -c $CREATE_QCOW2_PATH/centos-img-modify.sh -w $WORKDIR -a $DAISY_IP $PARAS_IMAGE
-if [ $IS_BARE == 0 ];then
- create_node $VMDELOY_DAISY_SERVER_NET daisy1 $VMDEPLOY_DAISY_SERVER_VM daisy
-else
- virsh define $BMDEPLOY_DAISY_SERVER_VM
- virsh start daisy
-fi
-#wait for the daisy1 network start finished for execute trustme.sh
-#here sleep 40 just needed in Dell blade server
-#for E9000 blade server we only have to sleep 20
-sleep 40
-
-echo "====== install daisy ======"
-$DEPLOY_PATH/trustme.sh $DAISY_IP $DAISY_PASSWD
-ssh $SSH_PARAS $DAISY_IP "if [[ -f ${REMOTE_SPACE} || -d ${REMOTE_SPACE} ]]; then rm -fr ${REMOTE_SPACE}; fi"
-scp -r $WORKSPACE root@$DAISY_IP:${REMOTE_SPACE}
-ssh $SSH_PARAS $DAISY_IP "mkdir -p /home/daisy_install"
-update_config $WORKSPACE/deploy/daisy.conf daisy_management_ip $DAISY_IP
-scp $WORKSPACE/deploy/daisy.conf root@$DAISY_IP:/home/daisy_install
-ssh $SSH_PARAS $DAISY_IP "${REMOTE_SPACE}/opnfv.bin install"
-rc=$?
-if [ $rc -ne 0 ]; then
- echo "daisy install failed"
- exit 1
-else
- echo "daisy install successfully"
-fi
+function create_daisy_vm_and_networks()
+{
+ echo "====== Create Daisy VM ======"
+ $CREATE_QCOW2_PATH/daisy-img-modify.sh -c $CREATE_QCOW2_PATH/centos-img-modify.sh -w $WORKDIR -a $DAISY_IP $PARAS_IMAGE
+ if [ $IS_BARE == 0 ];then
+ create_node $VMDELOY_DAISY_SERVER_NET daisy1 $VMDEPLOY_DAISY_SERVER_VM daisy
+ else
+ virsh define $BMDEPLOY_DAISY_SERVER_VM
+ virsh start daisy
+ fi
+
+ #wait for the daisy1 network start finished for execute trustme.sh
+ #here sleep 40 just needed in Dell blade server
+ #for E9000 blade server we only have to sleep 20
+ sleep 40
+}
-echo "====== generate known_hosts file in daisy vm ======"
-touch $WORKSPACE/known_hosts
-scp $WORKSPACE/known_hosts root@$DAISY_IP:/root/.ssh/
+function clean_up_daisy_vm_and_networks()
+{
+ echo "====== Clean up Daisy VM and networks ======"
+ clean_up_daisy_vm
+ if [ $IS_BARE == 0 ];then
+ clean_up_daisy_vnetworks
+ fi
+}
+
+function clean_up_target_vms_and_networks()
+{
+ echo "====== Clean up all target VMs and networks ======"
+ if [ $IS_BARE == 0 ];then
+ clean_up_target_vms
+ clean_up_target_vnetworks
+ fi
+}
+
+function install_daisy()
+{
+ echo "====== install daisy ======"
+ $DEPLOY_PATH/trustme.sh $DAISY_IP $DAISY_PASSWD
+ ssh $SSH_PARAS $DAISY_IP "if [[ -f ${REMOTE_SPACE} || -d ${REMOTE_SPACE} ]]; then rm -fr ${REMOTE_SPACE}; fi"
+ scp -r $WORKSPACE root@$DAISY_IP:${REMOTE_SPACE}
+ ssh $SSH_PARAS $DAISY_IP "mkdir -p /home/daisy_install"
+ update_config $WORKSPACE/deploy/daisy.conf daisy_management_ip $DAISY_IP
+ scp $WORKSPACE/deploy/daisy.conf root@$DAISY_IP:/home/daisy_install
+ ssh $SSH_PARAS $DAISY_IP "${REMOTE_SPACE}/opnfv.bin install"
+ rc=$?
+ if [ $rc -ne 0 ]; then
+ echo "daisy install failed"
+ exit 1
+ else
+ echo "daisy install successfully"
+ fi
+
+ #TODO: Why need this?
+ echo "====== generate known_hosts file in daisy vm ======"
+ touch $WORKSPACE/known_hosts
+ scp $WORKSPACE/known_hosts root@$DAISY_IP:/root/.ssh/
+}
+
+function config_daisy()
+{
+ echo "====== add relate config for Daisy and Kolla ======"
+ ssh $SSH_PARAS $DAISY_IP "bash $REMOTE_SPACE/deploy/prepare.sh -n $NETWORK -b $IS_BARE"
+}
+
+clean_up_target_vms_and_networks
+
+if [ ! $SKIP_DEPLOY_DAISY -eq 1 ]; then
+ clean_up_daisy_vm_and_networks
+ create_daisy_vm_and_networks
+ install_daisy
+ config_daisy
+fi
-echo "====== add relate config of kolla ======"
-ssh $SSH_PARAS $DAISY_IP "bash $REMOTE_SPACE/deploy/prepare.sh -n $NETWORK -b $IS_BARE"
echo "====== prepare cluster and pxe ======"
ssh $SSH_PARAS $DAISY_IP "python ${REMOTE_SPACE}/deploy/tempest.py --dha $DHA --network $NETWORK --cluster 'yes'"
diff --git a/ci/kolla-build.sh b/ci/kolla-build.sh
index c7ea8884..5784a17e 100755
--- a/ci/kolla-build.sh
+++ b/ci/kolla-build.sh
@@ -9,19 +9,14 @@
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-# Build OpenStack container images as well as extension images.
-# Parameters: $1 kolla git url, for example,
-# https://git.openstack.org/openstack/kolla
-# $2 kolla branch, for example, stable/newton
-# $3 kolla tag, for example, 3.0.2
-
set -o errexit
set -o nounset
set -o pipefail
-KOLLA_GIT=$1
-KOLLA_BRANCH=$2
-KOLLA_TAG=$3
+KOLLA_GIT="https://github.com/huzhijiang/kolla.git"
+KOLLA_BRANCH="stable/ocata"
+KOLLA_TAG=
+EXT_TAG=
KOLLA_GIT_VERSION=
KOLLA_IMAGE_VERSION=
KOLLA_GIT_DIR=/tmp/kolla-git
@@ -29,6 +24,56 @@ REGISTRY_VOLUME_DIR=/tmp/registry
BUILD_OUTPUT_DIR=/tmp/kolla-build-output
REGISTRY_SERVER_NAME=daisy-registry
+function usage
+{
+cat << EOF
+xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
+`basename $0`: Build Daisy4NFV's Kolla image package
+
+usage: `basename $0` [options]
+
+OPTIONS:
+ -l Kolla git repo location
+ -b Kolla git repo branch
+ -t Kolla git repo code tag(base version of image)
+ -e user defined tag extension(extended version)
+
+Examples:
+sudo `basename $0` -l https://git.openstack.org/openstack/kolla
+ -b stable/ocata
+ -t 4.0.2
+ -e 1
+xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
+EOF
+}
+
+while getopts "l:b:t:e:h" OPTION
+do
+ case $OPTION in
+ l)
+ KOLLA_GIT=${OPTARG}
+ ;;
+ b)
+ KOLLA_BRANCH=${OPTARG}
+ ;;
+ t)
+ KOLLA_TAG=${OPTARG}
+ ;;
+ e)
+ EXT_TAG=${OPTARG}
+ ;;
+ h)
+ usage
+ exit 0
+ ;;
+ *)
+ echo "${OPTION} is not a valid argument"
+ usage
+ exit 1
+ ;;
+ esac
+done
+
function pre_check {
echo "Pre setup"
if [ $KOLLA_BRANCH == "stable/mitaka" ] ; then
@@ -183,9 +228,14 @@ function update_kolla_code {
popd
}
+function config_kolla {
+ rm -rf /etc/kolla/kolla-build.conf
+ KOLLA_IMAGE_VERSION="${KOLLA_IMAGE_VERSION}.${EXT_TAG}"
+}
+
function start_build {
echo "Start to build Kolla image"
- REGISTRY_PARAM="--registry 127.0.0.1:5000 --push"
+ REGISTRY_PARAM="--registry 127.0.0.1:5000 --push --tag $KOLLA_IMAGE_VERSION"
pushd $KOLLA_GIT_DIR/kolla
# Some of the images may be failed to built out but is OK
@@ -196,17 +246,6 @@ function start_build {
popd
}
-function usage {
- echo "Usage: $0 https://git.openstack.org/openstack/kolla stable/ocata"
-}
-
-if [ "$1" == "" -o "$2" == "" ] ; then
- usage
- exit 1
-fi
-
-
-
exitcode=""
error_trap()
{
@@ -230,6 +269,7 @@ pre_check
# Try to cleanup images of the last failed run, if any.
cleanup_kolla_image
update_kolla_code
+config_kolla
cleanup_kolla_image
# Make sure there is no garbage in the registry server.
diff --git a/deploy/config/vm_environment/zte-virtual2/network.yml b/deploy/config/vm_environment/zte-virtual2/network.yml
index 7b4a501c..0bbbae80 100644
--- a/deploy/config/vm_environment/zte-virtual2/network.yml
+++ b/deploy/config/vm_environment/zte-virtual2/network.yml
@@ -46,12 +46,6 @@ networks:
- start: '10.20.11.3'
end: '10.20.11.10'
name: 'TENANT'
- - cidr: '100.20.11.0/24'
- gateway: '100.20.11.1'
- ip_ranges:
- - start: '100.20.11.3'
- end: '100.20.11.10'
- name: 'HEARTBEAT'
interfaces:
- name: 'EXTERNAL'
interface: 'ens8'
@@ -63,7 +57,5 @@ interfaces:
interface: 'ens3'
- name: 'TENANT'
interface: 'ens3'
- - name: 'HEARTBEAT'
- interface: 'ens9'
internal_vip: '10.20.11.11'
public_vip: '10.20.11.11'
diff --git a/deploy/daisy.conf b/deploy/daisy.conf
index fe48925f..1a41c516 100644
--- a/deploy/daisy.conf
+++ b/deploy/daisy.conf
@@ -16,7 +16,7 @@ os_install_type=pxe
[PXE]
#Set to 'yes' if you want to build a PXE server, otherwise to 'no'.
-build_pxe=yes
+build_pxe=no
#the nic name, to build a PXE server on this nic.
eth_name=ens3
diff --git a/deploy/deploy.py b/deploy/deploy.py
index e8c9434b..71c39742 100644
--- a/deploy/deploy.py
+++ b/deploy/deploy.py
@@ -52,34 +52,31 @@ from environment import (
class DaisyDeployment(object):
- def __init__(self, lab_name, pod_name, deploy_file, net_file, bin_file,
- daisy_only, cleanup_only, remote_dir, work_dir, storage_dir,
- pxe_bridge, deploy_log, scenario):
- self.lab_name = lab_name
- self.pod_name = pod_name
-
- self.src_deploy_file = deploy_file
- self.scenario = scenario
- self.deploy_struct = self._construct_final_deploy_conf(scenario)
- self.deploy_file, self.deploy_file_name = self._construct_final_deploy_file(self.deploy_struct, work_dir)
-
- if not cleanup_only:
- self.net_file = net_file
- self.net_file_name = os.path.basename(net_file)
- with open(net_file) as yaml_file:
+ def __init__(self, **kwargs):
+ self.lab_name = kwargs['lab_name']
+ self.pod_name = kwargs['pod_name']
+ self.src_deploy_file = kwargs['deploy_file']
+ self.net_file = kwargs['net_file']
+ self.bin_file = kwargs['bin_file']
+ self.daisy_only = kwargs['daisy_only']
+ self.cleanup_only = kwargs['cleanup_only']
+ self.remote_dir = kwargs['remote_dir']
+ self.work_dir = kwargs['work_dir']
+ self.storage_dir = kwargs['storage_dir']
+ self.pxe_bridge = kwargs['pxe_bridge']
+ self.deploy_log = kwargs['deploy_log']
+ self.scenario = kwargs['scenario']
+
+ self.deploy_struct = self._construct_final_deploy_conf(self.scenario)
+ self.deploy_file, self.deploy_file_name = self._construct_final_deploy_file(self.deploy_struct, self.work_dir)
+
+ if not self.cleanup_only:
+ self.net_file_name = os.path.basename(self.net_file)
+ with open(self.net_file) as yaml_file:
self.net_struct = yaml.safe_load(yaml_file)
else:
self.net_struct = None
- self.bin_file = bin_file
- self.daisy_only = daisy_only
- self.cleanup_only = cleanup_only
- self.remote_dir = remote_dir
- self.work_dir = work_dir
- self.storage_dir = storage_dir
- self.pxe_bridge = pxe_bridge
- self.deploy_log = deploy_log
-
result = deploy_schema_validate(self.deploy_struct)
if result:
LE(result)
@@ -209,16 +206,17 @@ class DaisyDeployment(object):
def config_arg_parser():
- parser = argparse.ArgumentParser()
+ parser = argparse.ArgumentParser(prog='python %s' % __file__,
+ description='NOTE: You need ROOT privilege to run this script.')
- parser.add_argument('-lab', dest='lab_name', action='store', nargs='?',
- default=None,
+ parser.add_argument('-lab', dest='lab_name', action='store',
+ default=None, required=True,
help='Lab Name')
- parser.add_argument('-pod', dest='pod_name', action='store', nargs='?',
- default=None,
+ parser.add_argument('-pod', dest='pod_name', action='store',
+ default=None, required=True,
help='Pod Name')
- parser.add_argument('-bin', dest='bin_file', action='store', nargs='?',
+ parser.add_argument('-bin', dest='bin_file', action='store',
default=path_join(WORKSPACE, 'opnfv.bin'),
help='OPNFV Daisy BIN File')
@@ -228,28 +226,25 @@ def config_arg_parser():
parser.add_argument('-co', dest='cleanup_only', action='store_true',
default=False,
help='Cleanup VMs and Virtual Networks')
- # parser.add_argument('-nd', dest='no_daisy', action='store_true',
- # default=False,
- # help='Do not install Daisy Server when it exists')
- parser.add_argument('-rdir', dest='remote_dir', action='store', nargs='?',
+ parser.add_argument('-rdir', dest='remote_dir', action='store',
default='/home/daisy',
help='Code directory on Daisy Server')
- parser.add_argument('-wdir', dest='work_dir', action='store', nargs='?',
+ parser.add_argument('-wdir', dest='work_dir', action='store',
default='/tmp/workdir',
help='Temporary working directory')
- parser.add_argument('-sdir', dest='storage_dir', action='store', nargs='?',
+ parser.add_argument('-sdir', dest='storage_dir', action='store',
default='/home/qemu/vms',
help='Storage directory for VM images')
- parser.add_argument('-B', dest='pxe_bridge', action='store', nargs='?',
+ parser.add_argument('-B', dest='pxe_bridge', action='store',
default='pxebr',
help='Linux Bridge for booting up the Daisy Server VM '
'[default: pxebr]')
- parser.add_argument('-log', dest='deploy_log', action='store', nargs='?',
+ parser.add_argument('-log', dest='deploy_log', action='store',
default=path_join(WORKSPACE, 'deploy.log'),
- help='Path and name of the deployment log file')
- parser.add_argument('-s', dest='scenario', action='store', nargs='?',
+ help='Deployment log file')
+ parser.add_argument('-s', dest='scenario', action='store',
default='os-nosdn-nofeature-noha',
help='Deployment scenario')
return parser
@@ -259,6 +254,8 @@ def parse_arguments():
parser = config_arg_parser()
args = parser.parse_args()
+ check_sudo_privilege()
+
save_log_to_file(args.deploy_log)
LI(args)
@@ -294,7 +291,6 @@ def parse_arguments():
def main():
- check_sudo_privilege()
kwargs = parse_arguments()
deploy = DaisyDeployment(**kwargs)
deploy.run()
diff --git a/deploy/tempest.py b/deploy/tempest.py
index 9117a187..1cc00c19 100644
--- a/deploy/tempest.py
+++ b/deploy/tempest.py
@@ -15,10 +15,13 @@ import get_conf
import traceback
import time
import os
+import ConfigParser
daisy_version = 1.0
daisyrc_path = "/root/daisyrc_admin"
+daisy_conf_path = "/home/daisy_install/daisy.conf"
iso_path = "/var/lib/daisy/kolla/"
+deployment_interface = "ens3"
cluster_name = "clustertest"
_CLI_OPTS = [
@@ -51,6 +54,13 @@ def print_bar(msg):
print ("--------------------------------------------")
+def get_configure_from_daisyconf(section, key):
+ config = ConfigParser.ConfigParser()
+ config.read(daisy_conf_path)
+ option_value = config.get(section, key)
+ return option_value
+
+
def get_endpoint(file_path):
for line in open(file_path):
if 'OS_ENDPOINT' in line:
@@ -64,6 +74,7 @@ client = daisy_client.Client(version=daisy_version, endpoint=daisy_endpoint)
def prepare_install():
+ global deployment_interface
try:
print("get config...")
conf = cfg.ConfigOpts()
@@ -79,6 +90,9 @@ def prepare_install():
print("cluster_id=%s." % cluster_id)
print("update network...")
update_network(cluster_id, network_map)
+ print("build pxe server to install os...")
+ deployment_interface = get_configure_from_daisyconf("PXE", "eth_name")
+ build_pxe_for_discover(cluster_id)
elif conf['host'] and conf['host'] == 'yes':
isbare = False if 'isbare' in conf and conf['isbare'] == 0 else True
print("discover host...")
@@ -125,6 +139,12 @@ def prepare_install():
print_bar("Everything is done!")
+def build_pxe_for_discover(cluster_id):
+ cluster_meta = {'cluster_id': cluster_id,
+ 'deployment_interface': deployment_interface}
+ client.install.install(**cluster_meta)
+
+
def install_os_for_vm_step1(cluster_id):
cluster_meta = {'cluster_id': cluster_id,
'pxe_only': "true"}
diff --git a/docs/development/design/ci.rst b/docs/development/design/ci.rst
new file mode 100644
index 00000000..39a8a63f
--- /dev/null
+++ b/docs/development/design/ci.rst
@@ -0,0 +1,40 @@
+.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+.. http://creativecommons.org/licenses/by/4.0
+
+CI Job Introduction
+===================
+
+CI Base Architech
+-----------------
+
+https://wiki.opnfv.org/display/INF/CI+Evolution
+
+Project Gating And Daily Deployment Test
+----------------------------------------
+
+To save time, currently, Daisy4NFV does not run deployment test in gate job which simply builds and
+uploads artifacts to low confidence level repo. The project deployment test is triggered on a daily
+basis. If the artifact passes the test, then it will be promoted to the high confidence level repo.
+
+The low confidence level artifacts are bin files in http://artifacts.opnfv.org/daisy.html named like
+"daisy/opnfv-Gerrit-39495.bin", while the high confidence level artifacts are named like
+"daisy/opnfv-2017-08-20_08-00-04.bin".
+
+The daily project deployment status can be found at
+
+https://build.opnfv.org/ci/job/daisy-daily-master/
+
+Production CI
+-------------
+
+The status of Daisy4NFV's CI/CD which running on OPNFV production CI environments(both B/M and VM)
+can be found at
+
+https://build.opnfv.org/ci/job/daisy-os-nosdn-nofeature-ha-baremetal-daily-master/
+https://build.opnfv.org/ci/job/daisy-os-odl-nofeature-ha-baremetal-daily-master/
+https://build.opnfv.org/ci/job/daisy-os-nosdn-nofeature-ha-virtual-daily-master/
+https://build.opnfv.org/ci/job/daisy-os-odl-nofeature-ha-virtual-daily-master/
+
+Dashboard for taking a glance on CI health status in a more intuitive way can be found at
+
+http://testresults.opnfv.org/reporting/functest/release/master/index-status-daisy.html
diff --git a/docs/development/design/index.rst b/docs/development/design/index.rst
index bc5e9f40..5d9af98e 100644
--- a/docs/development/design/index.rst
+++ b/docs/development/design/index.rst
@@ -12,5 +12,6 @@ Design Docs for Daisy4nfv
:numbered:
:maxdepth: 2
+ ci.rst
multicast.rst
diff --git a/docs/development/design/multicast.rst b/docs/development/design/multicast.rst
index 89422fe6..f865edab 100644
--- a/docs/development/design/multicast.rst
+++ b/docs/development/design/multicast.rst
@@ -1,5 +1,8 @@
-Detailed Design
-===============
+.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+.. http://creativecommons.org/licenses/by/4.0
+
+Kolla Image Multicast Design
+============================
Protocol Design
---------------
diff --git a/docs/release/configguide/index.rst b/docs/release/configguide/index.rst
index 7b531f45..a4829422 100644
--- a/docs/release/configguide/index.rst
+++ b/docs/release/configguide/index.rst
@@ -13,4 +13,4 @@ Release notes for Daisy4nfv
:maxdepth: 2
installerconfig.rst
-
+ kollaconfig.rst
diff --git a/docs/release/configguide/installerconfig.rst b/docs/release/configguide/installerconfig.rst
index f6a01b71..a8ef8144 100644
--- a/docs/release/configguide/installerconfig.rst
+++ b/docs/release/configguide/installerconfig.rst
@@ -10,7 +10,7 @@
Abstract
========
-This document compiles the release notes for the D 2.0 release of
+This document compiles the release notes for the E 1.0 release of
OPNFV when using Daisy as a deployment tool.
@@ -28,9 +28,7 @@ daisy.conf file.Then put the right configured daisy.conf file in the
3. "os_install_type" field just support "pxe" for now.
-4. Daisy now use pxe server to install the os, so "build_pxe" must set to "yes".
- If the value in daisy.conf in your env of /home/daisy_install/ dir is "no",
- you must change this field to "yes" manually before installing Daisy.
+4. Daisy now use pxe server to install the os, the "build_pxe" item must set to "no".
5. "eth_name" field is the pxe server interface, and this field is required when
the "build_pxe" field set to "yes".This should be set to the interface
diff --git a/docs/release/configguide/kollaconfig.rst b/docs/release/configguide/kollaconfig.rst
new file mode 100644
index 00000000..6da50ed3
--- /dev/null
+++ b/docs/release/configguide/kollaconfig.rst
@@ -0,0 +1,56 @@
+
+.. This document is protected/licensed under the following conditions
+.. (c) Sun Jing (ZTE corporation)
+.. Licensed under a Creative Commons Attribution 4.0 International License.
+.. You should have received a copy of the license along with this work.
+.. If not, see <http://creativecommons.org/licenses/by/4.0/>.
+
+
+OpenStack Configuration Guide
+=============================
+
+Before The First Deployment
+---------------------------
+
+When executing deploy.sh, before doing real deployment, Daisy utilizes
+Kolla's service configuration functionality [1] to specify the following
+changes to the default OpenStack configuration which comes from Kolla as
+default.
+
+a) If is it is a VM deployment, set virt_type=qemu amd cpu_mode=none for
+nova-compute.conf.
+
+b) In nova-api.conf set default_floating_pool to the name of the external
+network which will be created by Daisy after deployment for nova-api.conf.
+
+c) In heat-api.conf and heat-engine.conf, set deferred_auth_method to
+trusts and unset trusts_delegated_roles.
+
+Those above changes are requirements of OPNFV or environment's
+constraints. So it is not recommended to change them. But if the user
+wants to add more specific configurations to OpenStack services before
+doing real deployment, we suggest to do it in the same way as deploy.sh
+do. Currently, this means hacking into deploy/prepare.sh or
+deploy/prepare/execute.py then add config file as described in [1].
+
+Notes:
+Suggest to pass the first deployment first, then reconfigure and deploy
+again.
+
+
+After The First Deployment
+--------------------------
+
+After the first time of deployment of OpenStack, its configurations can
+also be changed and applied by using Kolla's service configuration
+functionality [1]. But user has to issue Kolla's command to do it in this
+release:
+
+
+.. code-block:: console
+ cd /home/kolla_install/kolla-ansible/
+ ./tools/kolla-ansible reconfigure -i /home/kolla_install/kolla-ansible/ansible/inventory/multinode
+
+
+
+[1] https://docs.openstack.org/kolla-ansible/latest/advanced-configuration.html#openstack-service-configuration-in-kolla
diff --git a/docs/release/installation/bmdeploy.rst b/docs/release/installation/bmdeploy.rst
index 47a8e121..ddb30f22 100644
--- a/docs/release/installation/bmdeploy.rst
+++ b/docs/release/installation/bmdeploy.rst
@@ -130,13 +130,33 @@ Start Deployment (Bare Metal Deployment)
(1) Git clone the latest daisy4nfv code from opnfv: "git clone https://gerrit.opnfv.org/gerrit/daisy"
-(2) Download latest bin file(such as opnfv-2017-06-06_23-00-04.bin) of daisy from http://artifacts.opnfv.org/daisy.html and change the bin file name(such as opnfv-2017-06-06_23-00-04.bin) to opnfv.bin
+(2) Download latest bin file(such as opnfv-2017-06-06_23-00-04.bin) of daisy from
+http://artifacts.opnfv.org/daisy.html and change the bin file name(such as opnfv-2017-06-06_23-00-04.bin)
+to opnfv.bin. Check the https://build.opnfv.org/ci/job/daisy-os-odl-nofeature-ha-baremetal-daily-master/,
+and if the 'snaps_health_check' of functest result is 'PASS',
+you can use this verify-passed bin to deploy the openstack in your own environment
+
+(3) Assumed cloned dir is $workdir, which laid out like below:
+[root@daisyserver daisy]# ls
+ci deploy docker INFO LICENSE requirements.txt templates tests tox.ini
+code deploy.log docs known_hosts setup.py test-requirements.txt tools
+Make sure the opnfv.bin file is in $workdir
+
+(4) Enter into the $workdir, which laid out like below:
+[root@daisyserver daisy]# ls
+ci code deploy docker docs INFO LICENSE requirements.txt setup.py templates test-requirements.txt tests tools tox.ini
+Create folder of labs/zte/pod2/daisy/config in $workdir
+
+(5) Move the ./deploy/config/bm_environment/zte-baremetal1/deploy.yml and
+./deploy/config/bm_environment/zte-baremetal1/network.yml
+to labs/zte/pod2/daisy/config dir.
-(3) Make sure the opnfv.bin file is in daisy4nfv code dir
-
-(4) Create folder of labs/zte/pod2/daisy/config in daisy4nfv code dir
-
-(5) Move the ./deploy/config/bm_environment/zte-baremetal1/deploy.yml and ./deploy/config/bm_environment/zte-baremetal1/network.yml to labs/zte/pod2/daisy/config dir.
+Note:
+If selinux is disabled on the host, please delete all xml files section of below lines in dir templates/physical_environment/vms/
+ <seclabel type='dynamic' model='selinux' relabel='yes'>
+ <label>system_u:system_r:svirt_t:s0:c182,c195</label>
+ <imagelabel>system_u:object_r:svirt_image_t:s0:c182,c195</imagelabel>
+ </seclabel>
(6) Config the bridge in jumperserver,make sure the daisy vm can connect to the targetnode,use the command below:
brctl addbr br7
@@ -147,4 +167,5 @@ service network restart
(7) Run the script deploy.sh in daisy/ci/deploy/ with command:
sudo ./ci/deploy/deploy.sh -b ../daisy -l zte -p pod2 -s os-nosdn-nofeature-noha
-(8) When deploy successfully,the floating ip of openstack is 10.20.7.11,the login account is "admin" and the password is "keystone"
+(8) When deploy successfully,the floating ip of openstack is 10.20.7.11,
+the login account is "admin" and the password is "keystone"
diff --git a/docs/release/installation/index.rst b/docs/release/installation/index.rst
index 8c5a3da7..20d1e3b7 100644
--- a/docs/release/installation/index.rst
+++ b/docs/release/installation/index.rst
@@ -15,4 +15,5 @@ OPNFV Daisy4nfv Installation Guide
installation_guide.rst
bmdeploy.rst
vmdeploy.rst
-
+ recovery.rst
+ upgrade.rst
diff --git a/docs/release/installation/recovery.rst b/docs/release/installation/recovery.rst
new file mode 100644
index 00000000..7a49e693
--- /dev/null
+++ b/docs/release/installation/recovery.rst
@@ -0,0 +1,80 @@
+.. This work is licensed under a Creative Commons Attribution 4.0 International Licence.
+.. http://creativecommons.org/licenses/by/4.0
+
+Deployment Error Recovery Guide
+===============================
+
+Deployment may fail due to different kinds of reasons, such as Daisy VM creation
+error, target nodes failure during OS installation, or Kolla deploy command
+error. Different errors can be grouped into several error levels. We define
+Recovery Levels below to fulfill recover requirements in different error levels.
+
+1. Recovery Level 0
+-------------------
+
+This level restart whole deployment again. Mainly to retry to solve errors such
+as Daisy VM creation failed. For example we use the following command to do
+virtual deployment(in the jump host):
+
+
+.. code-block:: console
+
+ sudo ./ci/deploy/deploy.sh -b ./ -l zte -p virtual1 -s os-nosdn-nofeature-ha
+
+
+
+If command failed because of Daisy VM creation error, then redo above command
+will restart whole deployment which includes rebuild the daisy VM image and
+restart Daisy VM.
+
+
+2. Recovery Level 1
+-------------------
+
+If Daisy VM was created successfully, but bugs was encountered in Daisy code
+or software of target OS which prevent deployment from being done, in this case,
+the user or the developer does not want to recreate the Daisy VM again during
+next deployment process but just to modify some pieces of code in it. To achieve
+this, he/she can redo deployment by deleting all clusters and hosts first(in the
+Daisy VM):
+
+
+.. code-block:: console
+
+ source /root/daisyrc_admin
+ for i in `daisy cluster-list | awk -F "|" '{print $2}' | sed -n '4p' | tr -d " "`;do daisy cluster-delete $i;done
+ for i in `daisy host-list | awk -F "|" '{print $2}'| grep -o "[^ ]\+\( \+[^ ]\+\)*"|tail -n +2`;do daisy host-delete $i;done
+
+
+
+Then, adjust deployment command as below and run it again(in the jump host):
+
+
+.. code-block:: console
+
+ sudo ./ci/deploy/deploy.sh -S -b ./ -l zte -p virtual1 -s os-nosdn-nofeature-ha
+
+
+
+Pay attention to the "-S" argument above, it lets the deployment process to
+skip re-creating Daisy VM and use the existing one.
+
+
+3. Recovery Level 2
+-------------------
+
+If both Daisy VM and target node's OS are OK, but error ocurred when doing
+OpenStack deployment, then there is even no need to re-install target OS for
+the deployment retrying. In this level, all we need to do is just retry the
+Daisy deployment command as follows(in the Daisy VM):
+
+
+.. code-block:: console
+
+ source /root/daisyrc_admin
+ daisy uninstall <cluster-id>
+ daisy install <cluster-id>
+
+
+
+This basically do kolla-ansible destroy and kolla-asnible deploy.
diff --git a/docs/release/installation/upgrade.rst b/docs/release/installation/upgrade.rst
new file mode 100644
index 00000000..23b53e21
--- /dev/null
+++ b/docs/release/installation/upgrade.rst
@@ -0,0 +1,92 @@
+.. This work is licensed under a Creative Commons Attribution 4.0 International Licence.
+.. http://creativecommons.org/licenses/by/4.0
+
+OpenStack Minor Version Update Guide
+====================================
+
+Thanks for the Kolla's kolla-ansible upgrade function, Daisy enable to
+update OpenStack minor version as the follows:
+
+1. Get new version file only from Daisy team.
+Since Daisy's Kolla images are build by meeting the OPNFV requirements
+and have their own file packaging layout, Daisy requires user to
+always use Kolla image file built by Daisy team. Currently, it can be
+got from http://120.24.17.215/.
+
+2. Put new version file into /var/lib/daisy/versionfile/kolla/, for
+example:
+/var/lib/daisy/versionfile/kolla/kolla-image-ocata-170811155446.tgz
+
+3. Add version file to Daisy's version management database then get the
+version ID.
+
+
+.. code-block:: console
+
+ [root@daisy ~]# source /root/daisyrc_admin
+ [root@daisy ~]# daisy version-add kolla-image-ocata-170811155446.tgz kolla
+ +-------------+--------------------------------------+
+ | Property | Value |
+ +-------------+--------------------------------------+
+ | checksum | None |
+ | created_at | 2017-08-28T06:45:25.000000 |
+ | description | None |
+ | id | 8be92587-34d7-43e8-9862-a5288c651079 |
+ | name | kolla-image-ocata-170811155446.tgz |
+ | owner | None |
+ | size | 0 |
+ | status | unused |
+ | target_id | None |
+ | type | kolla |
+ | updated_at | 2017-08-28T06:45:25.000000 |
+ | version | None |
+ +-------------+--------------------------------------+
+
+
+
+4. Get cluster ID
+
+
+.. code-block:: console
+
+ [root@daisy ~]# daisy cluster-list
+ +--------------------------------------+-------------+...
+ | ID | Name |...
+ +--------------------------------------+-------------+...
+ | d4c1e0d3-c4b8-4745-aab0-0510e62f0ebb | clustertest |...
+ +--------------------------------------+-------------+...
+
+
+
+5. Issuing update command passing cluster ID and version ID
+
+
+
+.. code-block:: console
+
+ [root@daisy ~]# daisy update d4c1e0d3-c4b8-4745-aab0-0510e62f0ebb --update-object kolla --version-id 8be92587-34d7-43e8-9862-a5288c651079
+ +----------+--------------+
+ | Property | Value |
+ +----------+--------------+
+ | status | begin update |
+ +----------+--------------+
+
+
+6. Since step 5's command is non-blocking, the user need to run the
+following command to get updating progress.
+
+
+
+.. code-block:: console
+
+ [root@daisy ~]# daisy host-list --cluster-id d4c1e0d3-c4b8-4745-aab0-0510e62f0ebb
+ ...+---------------+-------------+-------------------------+
+ ...| Role_progress | Role_status | Role_messages |
+ ...+---------------+-------------+-------------------------+
+ ...| 0 | updating | prechecking envirnoment |
+ ...+---------------+-------------+-------------------------+
+
+
+
+Notes. The above command returns many fields. User only have to take care
+about the Role_xxx fields in this case.
diff --git a/docs/release/installation/vmdeploy.rst b/docs/release/installation/vmdeploy.rst
index 5da3949b..64d16a96 100644
--- a/docs/release/installation/vmdeploy.rst
+++ b/docs/release/installation/vmdeploy.rst
@@ -134,20 +134,45 @@ HeartBeat network is selected,and if it is configured in network.yml,the keepali
Start Deployment (Virtual Deployment)
-------------------------------------
-(1) Git clone the latest daisy4nfv code from opnfv: "git clone https://gerrit.opnfv.org/gerrit/daisy"
+(1) Git clone the latest daisy4nfv code from opnfv: "git clone https://gerrit.opnfv.org/gerrit/daisy",
+make sure the current branch is master
-(2) Download latest bin file(such as opnfv-2017-06-06_23-00-04.bin) of daisy from http://artifacts.opnfv.org/daisy.html and change the bin file name(such as opnfv-2017-06-06_23-00-04.bin) to opnfv.bin
+(2) Download latest bin file(such as opnfv-2017-06-06_23-00-04.bin) of daisy from
+http://artifacts.opnfv.org/daisy.html and change the bin file name(such as opnfv-2017-06-06_23-00-04.bin)
+to opnfv.bin. Check the https://build.opnfv.org/ci/job/daisy-os-odl-nofeature-ha-baremetal-daily-master/,
+and if the 'snaps_health_check' of functest result is 'PASS',
+you can use this verify-passed bin to deploy the openstack in your own environment
-(3) Make sure the opnfv.bin file is in daisy4nfv code dir
+(3) Assumed cloned dir is $workdir, which laid out like below:
+[root@daisyserver daisy]# ls
+ci code deploy docker docs INFO LICENSE requirements.txt setup.py templates test-requirements.txt tests tools tox.ini
+Make sure the opnfv.bin file is in $workdir
-(4) Create folder of labs/zte/virtual1/daisy/config in daisy4nfv code dir
+(4) Enter into $workdir, Create folder of labs/zte/virtual1/daisy/config in $workdir
-(5) Move the daisy/deploy/config/vm_environment/zte-virtual1/deploy.yml and daisy/deploy/config/vm_environment/zte-virtual1/network.yml to labs/zte/virtual1/daisy/config dir.
+(5) Move the deploy/config/vm_environment/zte-virtual1/deploy.yml and
+deploy/config/vm_environment/zte-virtual1/network.yml to
+labs/zte/virtual1/daisy/config dir.
Note:
-zte-virtual1 config file is just for all-in-one deployment,if you want to deploy openstack with five node(1 lb node and 4 computer nodes),change the zte-virtual1 to zte-virtual2
+zte-virtual1 config file deploy openstack with five nodes(3 lb nodes and 2 computer nodes),
+if you want to deploy an all-in-one openstack, change the zte-virtual1 to zte-virtual2
+
+Note:
+If selinux is disabled on the host, please delete all xml files section of below lines in dir templates/virtual_environment/vms/
+ <seclabel type='dynamic' model='selinux' relabel='yes'>
+ <label>system_u:system_r:svirt_t:s0:c182,c195</label>
+ <imagelabel>system_u:object_r:svirt_image_t:s0:c182,c195</imagelabel>
+ </seclabel>
(6) Run the script deploy.sh in daisy/ci/deploy/ with command:
-sudo ./ci/deploy/deploy.sh -b ../daisy -l zte -p virtual1 -s os-nosdn-nofeature-noha
+sudo ./ci/deploy/deploy.sh -b ./ -l zte -p virtual1 -s os-nosdn-nofeature-ha
+
+Note:
+The value after -p parameter(virtual1) is get from labs/zte/virtual1/daisy/config/
+The value after -l parameter(zte) is get from labs/
+The value after -s "os-nosdn-nofeature-ha" used for deploy multinode openstack
+The value after -s "os-nosdn-nofeature-noha" used for deploy all-in-one openstack
-(7) When deploy successfully,the floating ip of openstack is 10.20.11.11,the login account is "admin" and the password is "keystone"
+(7) When deploy successfully,the floating ip of openstack is 10.20.11.11,
+the login account is "admin" and the password is "keystone"
diff --git a/templates/virtual_environment/vms/all_in_one.xml b/templates/virtual_environment/vms/all_in_one.xml
index 15f0305c..99725958 100644
--- a/templates/virtual_environment/vms/all_in_one.xml
+++ b/templates/virtual_environment/vms/all_in_one.xml
@@ -2,7 +2,7 @@
<name>all_in_one</name>
<memory unit='KiB'>16777216</memory>
<currentMemory unit='KiB'>16777216</currentMemory>
- <vcpu placement='static'>4</vcpu>
+ <vcpu placement='static'>8</vcpu>
<resource>
<partition>/machine</partition>
</resource>
diff --git a/tools/daisy-post-fip.sh b/tools/daisy-post-fip.sh
new file mode 100644
index 00000000..80d79e38
--- /dev/null
+++ b/tools/daisy-post-fip.sh
@@ -0,0 +1,63 @@
+#!/bin/bash
+
+##############################################################################
+# Copyright (c) 2016 ZTE Coreporation and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+IMAGE_NAME=TestVM
+
+# Sanitize language settings to avoid commands bailing out
+# with "unsupported locale setting" errors.
+unset LANG
+unset LANGUAGE
+LC_ALL=C
+export LC_ALL
+for i in curl openstack; do
+ if [[ ! $(type ${i} 2>/dev/null) ]]; then
+ if [ "${i}" == 'curl' ]; then
+ echo "Please install ${i} before proceeding"
+ else
+ echo "Please install python-${i}client before proceeding"
+ fi
+ exit
+ fi
+done
+
+# Move to top level directory
+REAL_PATH=$(python -c "import os,sys;print os.path.realpath('$0')")
+cd "$(dirname "$REAL_PATH")/.."
+
+# Test for credentials set
+if [[ "${OS_USERNAME}" == "" ]]; then
+ echo "No Keystone credentials specified. Try running source openrc"
+ exit
+fi
+
+echo "Configuring tenant network."
+
+openstack network create --provider-network-type vxlan demo-net
+openstack subnet create --subnet-range 10.0.0.0/24 --network demo-net \
+ --gateway 10.0.0.1 --dns-nameserver 8.8.8.8 demo-subnet
+DEMO_NET_ID=$(openstack network list | awk '/ demo-net / {print $2}')
+
+openstack router create demo-router
+openstack router add subnet demo-router demo-subnet
+openstack router set --external-gateway admin_external demo-router
+
+openstack floating ip create admin_external
+DEMO_FIP=$(openstack floating ip list | awk '/ None / {print $4}')
+
+openstack server create --image ${IMAGE_NAME} --flavor m1.micro \
+ --nic net-id=${DEMO_NET_ID} demo1
+
+# Wait for guest ready to accept FIP, seems need it.
+sleep 10
+
+openstack server add floating ip demo1 ${DEMO_FIP}
+
+echo "Now you can test ping ${DEMO_FIP} from external network"