summaryrefslogtreecommitdiffstats
path: root/ci
diff options
context:
space:
mode:
authorNarinder Gupta <narinder.gupta@canonical.com>2016-12-21 10:34:14 -0600
committerNarinder Gupta <narinder.gupta@canonical.com>2016-12-21 10:44:28 -0600
commit735846bdb8e4623c24fc045a79ce82389fdff70c (patch)
tree36e96d4ea1fe4676a4031e96ff026c2b6362ea73 /ci
parente1a69393af42bbe9faf77e318e3cd6c84b264a1b (diff)
Added kubernetes core bundle support.
Change-Id: Ic2c5c4e1b76ac1caa8a4d2373661ebeb83e8971b Signed-off-by: Narinder Gupta <narinder.gupta@canonical.com>
Diffstat (limited to 'ci')
-rwxr-xr-xci/02-deploybundle.sh88
-rw-r--r--ci/config_tpl/juju2/bundlek8_tpl/bundle.yaml22
-rw-r--r--ci/config_tpl/juju2/bundlek8_tpl/easyrsa.yaml6
-rw-r--r--ci/config_tpl/juju2/bundlek8_tpl/etcd.yaml6
-rw-r--r--ci/config_tpl/juju2/bundlek8_tpl/kubernetes.yaml15
-rw-r--r--ci/config_tpl/juju2/bundlek8_tpl/relations.yaml9
-rw-r--r--ci/config_tpl/juju2/bundlek8_tpl/subordinate.yaml4
-rwxr-xr-xci/deploy.sh41
-rw-r--r--ci/genK8Bundle.py200
-rwxr-xr-xci/kubernates/fetch-charms.sh17
10 files changed, 356 insertions, 52 deletions
diff --git a/ci/02-deploybundle.sh b/ci/02-deploybundle.sh
index c96e45f3..8589c6ae 100755
--- a/ci/02-deploybundle.sh
+++ b/ci/02-deploybundle.sh
@@ -10,9 +10,14 @@ opnfvlab=$3
opnfvsdn=$4
opnfvfeature=$5
opnfvdistro=$6
+opnfvmodel=$7
-#copy and download charms
-cp $opnfvsdn/fetch-charms.sh ./fetch-charms.sh
+if [[ "$opnfvmodel" = "openstack" ]]; then
+ #copy and download charms
+ cp $opnfvsdn/fetch-charms.sh ./fetch-charms.sh
+else
+ cp kubernates/fetch-charms.sh ./fetch-charms.sh
+fi
jujuver=`juju --version`
@@ -21,9 +26,10 @@ sed -i -- "s|distro=trusty|distro=$opnfvdistro|g" ./fetch-charms.sh
./fetch-charms.sh $opnfvdistro
-tar xvf common/scaleio.tar -C ./$opnfvdistro/ --strip=2 juju-scaleio/trusty/
-
-osdomname=''
+if [[ "$opnfvmodel" = "openstack" ]]; then
+ tar xvf common/scaleio.tar -C ./$opnfvdistro/ --strip=2 juju-scaleio/trusty/
+ osdomname=''
+fi
#check whether charms are still executing the code even juju-deployer says installed.
check_status() {
@@ -47,32 +53,34 @@ check_status() {
#read the value from deployment.yaml
-if [ -e ./deployment.yaml ]; then
- if [ -e ./deployconfig.yaml ]; then
- extport=`grep "ext-port" deployconfig.yaml | cut -d ' ' -f 4 | sed -e 's/ //' | tr ',' ' '`
- datanet=`grep "dataNetwork" deployconfig.yaml | cut -d ' ' -f 4 | sed -e 's/ //'`
- admnet=`grep "admNetwork" deployconfig.yaml | cut -d ' ' -f 4 | sed -e 's/ //'`
- cephdisk=`grep "ceph-disk" deployconfig.yaml | cut -d ':' -f 2 | sed -e 's/ //'`
- osdomname=`grep "os-domain-name" deployconfig.yaml | cut -d ':' -f 2 | sed -e 's/ //'`
- fi
-
- workmutiple=`maas maas nodes list | grep "cpu_count" | cut -d ':' -f 2 | sed -e 's/ //' | tr ',' ' '`
- max=0
- for v in ${workmutiple[@]}; do
- if (( $v > $max )); then max=$v; fi;
- done
- echo $max
-
- if [ "$max" -lt 4 ];then
- workmutiple=1.1
- elif [ "$max" -lt 33 ]; then
- workmutiple=0.25
- elif [ "$max" -lt 73 ]; then
- workmutiple=0.1
- else
- workmutiple=0.05
+if [[ "$opnfvmodel" = "openstack" ]]; then
+ if [ -e ./deployment.yaml ]; then
+ if [ -e ./deployconfig.yaml ]; then
+ extport=`grep "ext-port" deployconfig.yaml | cut -d ' ' -f 4 | sed -e 's/ //' | tr ',' ' '`
+ datanet=`grep "dataNetwork" deployconfig.yaml | cut -d ' ' -f 4 | sed -e 's/ //'`
+ admnet=`grep "admNetwork" deployconfig.yaml | cut -d ' ' -f 4 | sed -e 's/ //'`
+ cephdisk=`grep "ceph-disk" deployconfig.yaml | cut -d ':' -f 2 | sed -e 's/ //'`
+ osdomname=`grep "os-domain-name" deployconfig.yaml | cut -d ':' -f 2 | sed -e 's/ //'`
+ fi
+
+ workmutiple=`maas maas nodes list | grep "cpu_count" | cut -d ':' -f 2 | sed -e 's/ //' | tr ',' ' '`
+ max=0
+ for v in ${workmutiple[@]}; do
+ if (( $v > $max )); then max=$v; fi;
+ done
+ echo $max
+
+ if [ "$max" -lt 4 ];then
+ workmutiple=1.1
+ elif [ "$max" -lt 33 ]; then
+ workmutiple=0.25
+ elif [ "$max" -lt 73 ]; then
+ workmutiple=0.1
+ else
+ workmutiple=0.05
+ fi
+ sed -i "s/worker_multiplier: 1.0/worker_multiplier: ${workmutiple}/g" default_deployment_config.yaml
fi
- sed -i "s/worker_multiplier: 1.0/worker_multiplier: ${workmutiple}/g" default_deployment_config.yaml
fi
case "$opnfvlab" in
@@ -96,15 +104,23 @@ for feature in $opnfvfeature; do
fi
done
-#update source if trusty is target distribution
-var=os-$opnfvsdn-$fea-$opnfvtype"-"$opnfvdistro"_"$openstack
+if [[ "$opnfvmodel" = "openstack" ]]; then
+ #update source if trusty is target distribution
+ var=os-$opnfvsdn-$fea-$opnfvtype"-"$opnfvdistro"_"$openstack
+
+ if [ "$osdomname" != "None" ]; then
+ var=$var"_"publicapi
+ fi
+fi
-if [ "$osdomname" != "None" ]; then
- var=$var"_"publicapi
+if [[ "$opnfvmodel" = "openstack" ]]; then
+ #lets generate the bundle for all target using genBundle.py
+ python genBundle.py -l deployconfig.yaml -s $var > bundles.yaml
+else
+ #lets generate the bundle for k8 target using genK8Bundle.py
+ python genK8Bundle.py -l deployconfig.yaml -s $var > bundles.yaml
fi
-#lets generate the bundle for all target using genBundle.py
-python genBundle.py -l deployconfig.yaml -s $var > bundles.yaml
#keep the back in cloud for later debugging.
pastebinit bundles.yaml || true
diff --git a/ci/config_tpl/juju2/bundlek8_tpl/bundle.yaml b/ci/config_tpl/juju2/bundlek8_tpl/bundle.yaml
new file mode 100644
index 00000000..7af74afa
--- /dev/null
+++ b/ci/config_tpl/juju2/bundlek8_tpl/bundle.yaml
@@ -0,0 +1,22 @@
+# vim: set ts=2 et:
+# The machine constraints for each service in this bundle
+# have been commented out so you don't run into quota
+# problems on public clouds. Modify and uncomment the
+# constraints: lines for each service to reflect your
+# deployment before moving to production.
+#
+ series: {{ ubuntu.release }}
+ services:
+ nodes:
+ charm: "cs:{{ ubuntu.release }}/ubuntu"
+ num_units: {{ opnfv.units }}
+ ntp:
+ charm: "./{{ ubuntu.release }}/ntp"
+{% include 'kubernetes.yaml' %}
+{% include 'easyrsa.yaml' %}
+{% include 'etcd.yaml' %}
+{% include 'subordinate.yaml' %}
+
+ relations:
+ - [ 'ntp:juju-info', 'nodes:juju-info' ]
+{% include 'relations.yaml' %}
diff --git a/ci/config_tpl/juju2/bundlek8_tpl/easyrsa.yaml b/ci/config_tpl/juju2/bundlek8_tpl/easyrsa.yaml
new file mode 100644
index 00000000..f5162ad7
--- /dev/null
+++ b/ci/config_tpl/juju2/bundlek8_tpl/easyrsa.yaml
@@ -0,0 +1,6 @@
+ easyrsa:
+ charm: "./{{ ubuntu.release }}/easyrsa"
+ num_units: 1
+ to:
+ - "lxd:nodes/0"
+
diff --git a/ci/config_tpl/juju2/bundlek8_tpl/etcd.yaml b/ci/config_tpl/juju2/bundlek8_tpl/etcd.yaml
new file mode 100644
index 00000000..ba99d0e9
--- /dev/null
+++ b/ci/config_tpl/juju2/bundlek8_tpl/etcd.yaml
@@ -0,0 +1,6 @@
+ etcd:
+ charm: "./{{ ubuntu.release }}/etcd"
+ num_units: 1
+ to:
+ - "nodes/0"
+
diff --git a/ci/config_tpl/juju2/bundlek8_tpl/kubernetes.yaml b/ci/config_tpl/juju2/bundlek8_tpl/kubernetes.yaml
new file mode 100644
index 00000000..3953734b
--- /dev/null
+++ b/ci/config_tpl/juju2/bundlek8_tpl/kubernetes.yaml
@@ -0,0 +1,15 @@
+ kubernetes-master:
+ charm: "./{{ ubuntu.release }}/kubernetes-master"
+ num_units: 1
+ expose: true
+ to:
+ - "nodes/0"
+
+ kubernetes-worker:
+ charm: "./{{ ubuntu.release }}/kubernetes-worker"
+ num_units: {{ opnfv.units - 1 }}
+ expose: true
+ to:
+{% for unit_id in range(1, opnfv.units) %}
+ - "nodes/{{ unit_id }}"
+{% endfor %}
diff --git a/ci/config_tpl/juju2/bundlek8_tpl/relations.yaml b/ci/config_tpl/juju2/bundlek8_tpl/relations.yaml
new file mode 100644
index 00000000..fd56090c
--- /dev/null
+++ b/ci/config_tpl/juju2/bundlek8_tpl/relations.yaml
@@ -0,0 +1,9 @@
+ - [ "kubernetes-master:kube-api-endpoint", "kubernetes-worker:kube-api-endpoint" ]
+ - [ "kubernetes-master:cluster-dns", "kubernetes-worker:kube-dns" ]
+ - [ "kubernetes-master:certificates", "easyrsa:client" ]
+ - [ "kubernetes-master:etcd", "etcd:db" ]
+ - [ "kubernetes-worker:certificates", "easyrsa:client" ]
+ - [ "flannel:etcd", "etcd:db" ]
+ - [ "flannel:cni", "kubernetes-master:cni" ]
+ - [ "flannel:cni", "kubernetes-worker:cni" ]
+ - [ "etcd:certificates", "easyrsa:client" ]
diff --git a/ci/config_tpl/juju2/bundlek8_tpl/subordinate.yaml b/ci/config_tpl/juju2/bundlek8_tpl/subordinate.yaml
new file mode 100644
index 00000000..b800d59b
--- /dev/null
+++ b/ci/config_tpl/juju2/bundlek8_tpl/subordinate.yaml
@@ -0,0 +1,4 @@
+
+ flannel:
+ charm: ./{{ ubuntu.release }}/flannel
+
diff --git a/ci/deploy.sh b/ci/deploy.sh
index 4c24f069..d99c53e3 100755
--- a/ci/deploy.sh
+++ b/ci/deploy.sh
@@ -13,6 +13,7 @@ opnfvrel=c
opnfvfeature=none
opnfvdistro=xenial
opnfvarch=amd64
+opnfvmodel=openstack
jujuver=`juju --version`
@@ -31,9 +32,10 @@ usage() { echo "Usage: $0 [-s <nosdn|odl|opencontrail>]
[-f <ipv6,dpdk,lxd,dvr>]
[-d <trusty|xenial>]
[-a <amd64>]
+ [-m <openstack|kubernates>]
[-r <a|b>]" 1>&2 exit 1; }
-while getopts ":s:t:o:l:h:r:f:d:a:" opt; do
+while getopts ":s:t:o:l:h:r:f:d:a:m:" opt; do
case "${opt}" in
s)
opnfvsdn=${OPTARG}
@@ -59,6 +61,9 @@ while getopts ":s:t:o:l:h:r:f:d:a:" opt; do
a)
opnfvarch=${OPTARG}
;;
+ m)
+ opnfvmodel=${OPTARG}
+ ;;
h)
usage
;;
@@ -145,7 +150,7 @@ deploy() {
fi
#case default deploy the opnfv platform:
- ./02-deploybundle.sh $opnfvtype $openstack $opnfvlab $opnfvsdn $opnfvfeature $opnfvdistro
+ ./02-deploybundle.sh $opnfvtype $openstack $opnfvlab $opnfvsdn $opnfvfeature $opnfvdistro $opnfvmodel
}
#check whether charms are still executing the code even juju-deployer says installed.
@@ -166,9 +171,10 @@ check_status() {
fi
done
- juju expose ceph-radosgw
- #juju ssh ceph/0 \ 'sudo radosgw-admin user create --uid="ubuntu" --display-name="Ubuntu Ceph"'
-
+ if [[ "$opnfvmodel" = "openstack" ]]; then
+ juju expose ceph-radosgw || true
+ #juju ssh ceph/0 \ 'sudo radosgw-admin user create --uid="ubuntu" --display-name="Ubuntu Ceph"'
+ fi
echo "...... deployment finishing ......."
}
@@ -179,20 +185,23 @@ check_status
echo "...... deployment finished ......."
-./openstack.sh "$opnfvsdn" "$opnfvlab" "$opnfvdistro" "$openstack" || true
+if [[ "$opnfvmodel" = "openstack" ]]; then
+ ./openstack.sh "$opnfvsdn" "$opnfvlab" "$opnfvdistro" "$openstack" || true
-# creating heat domain after puching the public API into /etc/hosts
+ # creating heat domain after puching the public API into /etc/hosts
-if [[ "$jujuver" > "2" ]]; then
- status=`juju run-action heat/0 domain-setup`
- echo $status
-else
- status=`juju action do heat/0 domain-setup`
- echo $status
-fi
+ if [[ "$jujuver" > "2" ]]; then
+ status=`juju run-action heat/0 domain-setup`
+ echo $status
+ else
+ status=`juju action do heat/0 domain-setup`
+ echo $status
+ fi
-sudo ../juju/get-cloud-images || true
-../juju/joid-configure-openstack || true
+ sudo ../juju/get-cloud-images || true
+ ../juju/joid-configure-openstack || true
+
+fi
echo "...... finished ......."
diff --git a/ci/genK8Bundle.py b/ci/genK8Bundle.py
new file mode 100644
index 00000000..439d52f9
--- /dev/null
+++ b/ci/genK8Bundle.py
@@ -0,0 +1,200 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+"""
+This script generates a juju deployer bundle based on
+scenario name, and lab config file.
+
+Parameters:
+ -s, --scenario : scenario name
+ -l, --lab : lab config file
+"""
+
+from optparse import OptionParser
+from jinja2 import Environment, FileSystemLoader
+from distutils.version import LooseVersion, StrictVersion
+import os
+import subprocess
+import random
+import yaml
+import sys
+
+#
+# Parse parameters
+#
+
+parser = OptionParser()
+parser.add_option("-s", "--scenario", dest="scenario", help="scenario name")
+parser.add_option("-l", "--lab", dest="lab", help="lab config file")
+(options, args) = parser.parse_args()
+scenario = options.scenario
+labconfig_file = options.lab
+
+#
+# Set Path and configs path
+#
+
+scenarioconfig_file = 'default_deployment_config.yaml'
+# Capture our current directory
+jujuver = subprocess.check_output(["juju", "--version"])
+
+TPL_DIR = os.path.dirname(os.path.abspath(__file__))+'/config_tpl/juju2/bundlek8_tpl'
+
+#
+# Prepare variables
+#
+
+# Prepare a storage for passwords
+passwords_store = dict()
+
+#
+# Local Functions
+#
+
+
+def load_yaml(filepath):
+ """Load YAML file"""
+ with open(filepath, 'r') as stream:
+ try:
+ return yaml.load(stream)
+ except yaml.YAMLError as exc:
+ print(exc)
+
+#
+# Templates functions
+#
+
+
+def unit_qty():
+ """Return quantity of units to deploy"""
+ global config
+ if config['os']['ha']['mode'] == 'ha':
+ return config['os']['ha']['cluster_size']
+ else:
+ return 1
+
+
+def unit_ceph_qty():
+ """Return size of the ceph cluster"""
+ global config
+ if config['os']['ha']['mode'] == 'ha':
+ return config['os']['ha']['cluster_size']
+ else:
+ if config['opnfv']['units'] >= 3:
+ return config['os']['ha']['cluster_size']
+ else:
+ return 2
+
+def unit_scaleio_qty():
+ """Return size of the scaleio cluster"""
+ return 3
+
+def to_select(qty=False):
+ """Return a random list of machines numbers to deploy"""
+ global config
+ if not qty:
+ qty = config['os']['ha']['cluster_size'] if \
+ config['os']['ha']['mode'] == 'ha' else 1
+ if config['os']['hyperconverged']:
+ return random.sample(range(0, config['opnfv']['units']), qty)
+ else:
+ return random.sample(range(0, qty), qty)
+
+
+def get_password(key, length=16, special=False):
+ """Return a new random password or a already created one"""
+ global passwords_store
+ if key not in passwords_store.keys():
+ alphabet = "abcdefghijklmnopqrstuvwxyz"
+ upperalphabet = alphabet.upper()
+ char_list = alphabet + upperalphabet + '0123456789'
+ pwlist = []
+ if special:
+ char_list += "+-,;./:?!*"
+ for i in range(length):
+ pwlist.append(char_list[random.randrange(len(char_list))])
+ random.shuffle(pwlist)
+ passwords_store[key] = "".join(pwlist)
+ return passwords_store[key]
+
+#
+# Config import
+#
+
+# Load scenario Config
+config = load_yaml(scenarioconfig_file)
+# Load lab Config
+config.update(load_yaml(labconfig_file))
+
+# We transform array to hash for an easier work
+config['opnfv']['spaces_dict'] = dict()
+for space in config['opnfv']['spaces']:
+ config['opnfv']['spaces_dict'][space['type']] = space
+config['opnfv']['storage_dict'] = dict()
+for storage in config['opnfv']['storage']:
+ config['opnfv']['storage_dict'][storage['type']] = storage
+
+#
+# Parse scenario name
+#
+
+# Set default scenario name
+if not scenario:
+ scenario = "k8-nosdn-baremetal-core"
+
+# Parse scenario name
+try:
+ sc = scenario.split('-')
+ (sdn, features, hamode) = sc[1:4]
+ features = features.split('_')
+ if len(sc) > 4:
+ extra = sc[4].split('_')
+ else:
+ extra = []
+except ValueError as err:
+ print('Error: Bad scenario name syntax, use '
+ '"k8-nosdn-baremetal-core" format')
+ sys.exit(1)
+
+#
+# Update config with scenario name
+#
+
+if 'dpdk' in features:
+ config['os']['network']['dpdk'] = True
+
+# Set beta option from extra
+if 'hugepages' in extra:
+ config['os']['beta']['huge_pages'] = True
+if 'mitaka' in extra:
+ config['os']['release'] = 'mitaka'
+if 'xenial' in extra:
+ config['ubuntu']['release'] = 'xenial'
+
+#
+# Transform template to bundle.yaml according to config
+#
+
+# Create the jinja2 environment.
+env = Environment(loader=FileSystemLoader(TPL_DIR),
+ trim_blocks=True)
+template = env.get_template('bundle.yaml')
+
+# Add functions
+env.globals.update(get_password=get_password)
+env.globals.update(unit_qty=unit_qty)
+env.globals.update(unit_ceph_qty=unit_ceph_qty)
+env.globals.update(unit_scaleio_qty=unit_scaleio_qty)
+env.globals.update(to_select=to_select)
+
+# Render the template
+output = template.render(**config)
+
+# Check output syntax
+try:
+ yaml.load(output)
+except yaml.YAMLError as exc:
+ print(exc)
+
+# print output
+print(output)
diff --git a/ci/kubernates/fetch-charms.sh b/ci/kubernates/fetch-charms.sh
new file mode 100755
index 00000000..2a3d7536
--- /dev/null
+++ b/ci/kubernates/fetch-charms.sh
@@ -0,0 +1,17 @@
+#!/bin/bash -ex
+
+distro=$1
+mkdir -p $distro
+
+function build {
+ sudo apt-get install charm-tools -y
+ (cd $distro/charm-$1; charm build -s $distro -obuild src)
+ mv $distro/charm-$1/build/$distro/$1 $distro
+}
+
+# openstack
+charm pull cs:~containers/kubernetes-master $distro/kubernetes-master
+charm pull cs:~containers/kubernetes-worker $distro/kubernetes-worker
+charm pull cs:~containers/flannel $distro/flannel
+charm pull cs:~containers/etcd $distro/etcd
+charm pull cs:~containers/easyrca $distro/easyrca