summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--INFO2
-rw-r--r--docs/release/userguide/clearwater-project.rst168
-rw-r--r--docs/release/userguide/img/blink01.pngbin0 -> 136470 bytes
-rw-r--r--docs/release/userguide/img/blink02.pngbin0 -> 181973 bytes
-rw-r--r--docs/release/userguide/img/blink03.pngbin0 -> 131215 bytes
-rw-r--r--docs/release/userguide/img/blink04.pngbin0 -> 73794 bytes
-rw-r--r--docs/release/userguide/img/call.pngbin0 -> 147388 bytes
-rw-r--r--docs/release/userguide/img/twinkle01.pngbin0 -> 184693 bytes
-rw-r--r--docs/release/userguide/img/twinkle02.pngbin0 -> 175079 bytes
-rw-r--r--docs/release/userguide/img/twinkle03.pngbin0 -> 184875 bytes
-rw-r--r--src/arm/kubernetes_sriov/README.rst16
-rwxr-xr-xsrc/arm/kubernetes_sriov/k8s-build.sh32
-rwxr-xr-xsrc/arm/kubernetes_sriov/k8s-deploy.sh33
-rwxr-xr-xsrc/arm/kubernetes_sriov/setup.sh7
-rw-r--r--src/helm-charts/clearwater/Chart.yaml11
-rw-r--r--src/helm-charts/clearwater/README.md19
-rw-r--r--src/helm-charts/clearwater/templates/NOTES.txt19
-rw-r--r--src/helm-charts/clearwater/templates/astaire-depl.yaml54
-rw-r--r--src/helm-charts/clearwater/templates/astaire-svc.yaml11
-rw-r--r--src/helm-charts/clearwater/templates/bono-depl.yaml66
-rw-r--r--src/helm-charts/clearwater/templates/bono-svc.yaml27
-rw-r--r--src/helm-charts/clearwater/templates/cassandra-depl.yaml38
-rw-r--r--src/helm-charts/clearwater/templates/cassandra-svc.yaml17
-rw-r--r--src/helm-charts/clearwater/templates/chronos-depl.yaml55
-rw-r--r--src/helm-charts/clearwater/templates/chronos-svc.yaml11
-rw-r--r--src/helm-charts/clearwater/templates/ellis-depl.yaml35
-rw-r--r--src/helm-charts/clearwater/templates/ellis-svc.yaml12
-rw-r--r--src/helm-charts/clearwater/templates/env-vars-cm.yaml6
-rw-r--r--src/helm-charts/clearwater/templates/etcd-depl.yaml59
-rw-r--r--src/helm-charts/clearwater/templates/etcd-svc.yaml17
-rw-r--r--src/helm-charts/clearwater/templates/homer-depl.yaml35
-rw-r--r--src/helm-charts/clearwater/templates/homer-svc.yaml11
-rw-r--r--src/helm-charts/clearwater/templates/homestead-depl.yaml51
-rw-r--r--src/helm-charts/clearwater/templates/homestead-prov-depl.yaml39
-rw-r--r--src/helm-charts/clearwater/templates/homestead-prov-svc.yaml11
-rw-r--r--src/helm-charts/clearwater/templates/homestead-svc.yaml11
-rw-r--r--src/helm-charts/clearwater/templates/ralf-depl.yaml51
-rw-r--r--src/helm-charts/clearwater/templates/ralf-svc.yaml11
-rw-r--r--src/helm-charts/clearwater/templates/sprout-depl.yaml51
-rw-r--r--src/helm-charts/clearwater/templates/sprout-svc.yaml13
-rw-r--r--src/helm-charts/clearwater/values.yaml7
-rw-r--r--src/vagrant/kubeadm_clearwater/Vagrantfile2
-rwxr-xr-xsrc/vagrant/kubeadm_clearwater/clearwater_setup.sh (renamed from src/vagrant/kubeadm_clearwater/examples/create_and_apply.sh)38
-rw-r--r--src/vagrant/kubeadm_clearwater/custom-bono-svc/bono-svc.yaml25
-rwxr-xr-xsrc/vagrant/kubeadm_clearwater/deploy.sh5
-rw-r--r--src/vagrant/kubeadm_clearwater/host_setup.sh6
-rw-r--r--src/vagrant/kubeadm_clearwater/master_setup.sh9
-rwxr-xr-xsrc/vagrant/kubeadm_clearwater/tests/clearwater-live-test.sh46
-rw-r--r--src/vagrant/kubeadm_clearwater/worker_setup.sh2
-rwxr-xr-xsrc/vagrant/kubeadm_istio/istio/deploy.sh39
-rw-r--r--src/vagrant/kubeadm_istio/master_setup.sh23
-rw-r--r--src/vagrant/kubeadm_kata/host_setup.sh9
-rw-r--r--src/vagrant/kubeadm_kata/kata_setup.sh42
-rw-r--r--src/vagrant/kubeadm_kata/master_setup.sh2
-rw-r--r--src/vagrant/kubeadm_kata/worker_setup.sh4
-rwxr-xr-xsrc/vagrant/kubeadm_onap/onap_setup.sh1
-rwxr-xr-xsrc/vagrant/setup_vagrant.sh8
57 files changed, 1198 insertions, 69 deletions
diff --git a/INFO b/INFO
index 1b68162..739f15f 100644
--- a/INFO
+++ b/INFO
@@ -22,12 +22,14 @@ akapadia@aarnanetworks.com
srupanagunta@gmail.com
ruijing.guo@gmail.com
chenjiankun1@huawei.com
+trevor.tao@arm.com
Link to TSC approval of the project: http://meetbot.opnfv.org/meetings/opnfv-meeting/2016/opnfv-meeting.2016-12-13-14.59.html
Link(s) to approval of additional committers:
http://meetbot.opnfv.org/meetings/opnfv-meeting/2017/opnfv-meeting.2017-04-11-13.59.html
https://lists.opnfv.org/pipermail/opnfv-tech-discuss/2017-June/016505.html
https://lists.opnfv.org/pipermail/opnfv-tech-discuss/2017-August/017629.html
+https://lists.opnfv.org/pipermail/opnfv-tech-discuss/2018-February/020156.html
Link to approval of renaming project:
http://meetbot.opnfv.org/meetings/opnfv-meeting/2017/opnfv-meeting.2017-08-15-12.59.txt
diff --git a/docs/release/userguide/clearwater-project.rst b/docs/release/userguide/clearwater-project.rst
index 6a5ac60..38f1c7a 100644
--- a/docs/release/userguide/clearwater-project.rst
+++ b/docs/release/userguide/clearwater-project.rst
@@ -1,24 +1,25 @@
+***********************************
Clearwater implementation for OPNFV
-===================================
+***********************************
CONTAINER4NFV setup a Kubernetes cluster on VMs running with Vagrant and kubeadm.
kubeadm assumes you have a set of machines (virtual or bare metal) that are up and running. In this way we can get a cluster with one master node and 2 workers (default). If you want to increase the number of workers nodes, please check the Vagrantfile inside the project.
-Is Clearwater suitable for Network Functions Virtualization?
+*Is Clearwater suitable for Network Functions Virtualization?*
Network Functions Virtualization or NFV is, without any doubt, the hottest topic in the telco network space right now. It’s an approach to building telco networks that moves away from proprietary boxes wherever possible to use software components running on industry-standard virtualized IT infrastructures. Over time, many telcos expect to run all their network functions operating at Layer 2 and above in an NFV environment, including IMS. Since Clearwater was designed from the ground up to run in virtualized environments and take full advantage of the flexibility of the Cloud, it is extremely well suited for NFV. Almost all of the ongoing trials of Clearwater with major network operators are closely associated with NFV-related initiatives.
About Clearwater
-----------------
+################
-[Clearwater](http://www.projectclearwater.org/about-clearwater/) follows [IMS](https://en.wikipedia.org/wiki/IP_Multimedia_Subsystem) architectural principles and supports all of the key standardized interfaces expected of an IMS core network. But unlike traditional implementations of IMS, Clearwater was designed from the ground up for the Cloud. By incorporating design patterns and open source software components that have been proven in many global Web applications, Clearwater achieves an unprecedented combination of massive scalability and exceptional cost-effectiveness.
+`Clearwater <http://www.projectclearwater.org/about-clearwater/>`_ follows `IMS <https://en.wikipedia.org/wiki/IP_Multimedia_Subsystem>`_ architectural principles and supports all of the key standardized interfaces expected of an IMS core network. But unlike traditional implementations of IMS, Clearwater was designed from the ground up for the Cloud. By incorporating design patterns and open source software components that have been proven in many global Web applications, Clearwater achieves an unprecedented combination of massive scalability and exceptional cost-effectiveness.
Clearwater provides SIP-based call control for voice and video communications and for SIP-based messaging applications. You can use Clearwater as a standalone solution for mass-market VoIP services, relying on its built-in set of basic calling features and standalone susbscriber database, or you can deploy Clearwater as an IMS core in conjunction with other elements such as Telephony Application Servers and a Home Subscriber Server.
-Clearwater was designed from the ground up to be optimized for deployment in virtualized and cloud environments. It leans heavily on established design patterns for building and deploying massively scalable web applications, adapting these design patterns to fit the constraints of SIP and IMS. [The Clearwater architecture](http://www.projectclearwater.org/technical/clearwater-architecture/) therefore has some similarities to the traditional IMS architecture but is not identical.
+Clearwater was designed from the ground up to be optimized for deployment in virtualized and cloud environments. It leans heavily on established design patterns for building and deploying massively scalable web applications, adapting these design patterns to fit the constraints of SIP and IMS. `The Clearwater architecture <http://www.projectclearwater.org/technical/clearwater-architecture/>`_ therefore has some similarities to the traditional IMS architecture but is not identical.
- All components are horizontally scalable using simple, stateless load-balancing.
- All long lived state is stored on dedicated “Vellum” nodes which make use of cloud-optimized storage technologies such as Cassandra. No long lived state is stored on other production nodes, making it quick and easy to dynamically scale the clusters and minimizing the impact if a node is lost.
@@ -27,8 +28,163 @@ Clearwater was designed from the ground up to be optimized for deployment in vir
Clearwater Architecture
------------------------
+#######################
.. image:: img/clearwater_architecture.png
:width: 800px
:alt: Clearwater Architecture
+
+
+**********
+Quickstart
+**********
+
+This repository contains instructions and resources for deploying Metaswitch's Clearwater project with Kubernetes.
+
+
+If you need more information about Clearwater project please checkout our
+[documentation](https://github.com/opnfv/container4nfv/blob/master/docs/release/userguide/clearwater-project.rst)
+or the `official repository <https://github.com/Metaswitch/clearwater-docker>`_.
+
+
+Exposed Services
+################
+
+
+The deployment exposes:
+
+ - the Ellis web UI on port 30080 for self-provisioning.
+ - STUN/TURN on port 3478 for media relay.
+ - SIP on port 5060 for service.
+ - SIP/WebSocket on port 5062 for service.
+
+SIP devices can register with bono.:5060 and the Ellis provisioning interface can be accessed at port 30080.
+
+
+Prerequirement
+##############
+
+Install Docker and Vagrant
+********************************************
+
+CONTAINER4NFV uses ``setup_vagrant.sh`` to install all resource used by this repository.
+
+::
+
+ container4nfv/src/vagrant# ./setup_vagrant.sh -b libvirt
+
+Instalation
+##############
+
+Deploy Clearwater with kubeadm
+********************************************
+
+Check ``clearwater/clearwater_setup.sh`` for details about k8s deployment.
+
+
+::
+
+ container4nfv/src/vagrant/kubeadm_clearwater# ./deploy.sh
+
+
+Destroy
+##########
+
+::
+
+ container4nfv/src/vagrant# ./cleanup.sh
+
+
+Making calls through Clearwater
+###############################
+
+
+Connect to Ellis service
+********************************************
+It's important to connect to Ellis to generate the SIP username, password and domain we will use with the SIP client.
+Use your <master ip addres> + port 30080 (k8s default port). If you are not which Ellis's url is, please check inside your master node.
+
+::
+
+ kubeadm_clearwater# vagrant ssh master
+ master@vagrant# ifconfig eth0 | grep "inet addr" | cut -d ':' -f 2 | cut -d ' ' -f 1
+ 192.168.121.3
+
+In your browser connect to `<master_ip>:30080` (ex. 192.168.121.3:30080).
+
+
+After that, signup and generate two users. The signup key is **secret**. Ellis will automatically allocate you a new number and display
+its password to you. Remember this password as it will only be displayed once.
+From now on, we will use <username> to refer to the SIP username (e.g. 6505551234) and <password> to refer to the password.
+
+
+Config and install two SIP clients
+********************************************
+We'll use both Twinkle and Blink SIP client. , since we are going to try this out inside a LAN network.
+This is, of course, only a local test inside a LAN network. Configure the clients may be a little bit trickie, so we add some screenshots:
+
+
+Blink setup
+********************************************
+1. Add <username> and <password>.
+
+.. image:: img/blink01.png
+ :width: 800px
+ :alt: Blink SIP client
+
+
+2. Configure a proxy to k8s.
+
+
+.. image:: img/blink02.png
+ :width: 800px
+ :alt: Blink SIP client
+
+
+3. Configure the network to use TCP only.
+
+
+.. image:: img/blink03.png
+ :width: 800px
+ :alt: Blink SIP client
+
+
+.. image:: img/blink04.png
+ :width: 800px
+ :alt: Blink SIP client
+
+
+Twinkle setup
+********************************************
+
+1. Configure a proxy to k8s.
+
+
+.. image:: img/twinkle01.png
+ :width: 800px
+ :alt: Twinkle SIP client
+
+
+2. Add <username> and <password>.
+
+
+.. image:: img/twinkle02.png
+ :width: 800px
+ :alt: Twinkle SIP client
+
+
+3. Configure the network to use TCP only.
+
+
+.. image:: img/twinkle03.png
+ :width: 800px
+ :alt: Twinkle SIP client
+
+
+Make the call
+********************************************
+
+
+.. image:: img/call.png
+ :width: 800px
+ :alt: Call
diff --git a/docs/release/userguide/img/blink01.png b/docs/release/userguide/img/blink01.png
new file mode 100644
index 0000000..ac74788
--- /dev/null
+++ b/docs/release/userguide/img/blink01.png
Binary files differ
diff --git a/docs/release/userguide/img/blink02.png b/docs/release/userguide/img/blink02.png
new file mode 100644
index 0000000..7eb8d46
--- /dev/null
+++ b/docs/release/userguide/img/blink02.png
Binary files differ
diff --git a/docs/release/userguide/img/blink03.png b/docs/release/userguide/img/blink03.png
new file mode 100644
index 0000000..ae6220a
--- /dev/null
+++ b/docs/release/userguide/img/blink03.png
Binary files differ
diff --git a/docs/release/userguide/img/blink04.png b/docs/release/userguide/img/blink04.png
new file mode 100644
index 0000000..17511b5
--- /dev/null
+++ b/docs/release/userguide/img/blink04.png
Binary files differ
diff --git a/docs/release/userguide/img/call.png b/docs/release/userguide/img/call.png
new file mode 100644
index 0000000..ec4cdbf
--- /dev/null
+++ b/docs/release/userguide/img/call.png
Binary files differ
diff --git a/docs/release/userguide/img/twinkle01.png b/docs/release/userguide/img/twinkle01.png
new file mode 100644
index 0000000..e424d51
--- /dev/null
+++ b/docs/release/userguide/img/twinkle01.png
Binary files differ
diff --git a/docs/release/userguide/img/twinkle02.png b/docs/release/userguide/img/twinkle02.png
new file mode 100644
index 0000000..8d95bae
--- /dev/null
+++ b/docs/release/userguide/img/twinkle02.png
Binary files differ
diff --git a/docs/release/userguide/img/twinkle03.png b/docs/release/userguide/img/twinkle03.png
new file mode 100644
index 0000000..4b4b5c7
--- /dev/null
+++ b/docs/release/userguide/img/twinkle03.png
Binary files differ
diff --git a/src/arm/kubernetes_sriov/README.rst b/src/arm/kubernetes_sriov/README.rst
new file mode 100644
index 0000000..fde2f51
--- /dev/null
+++ b/src/arm/kubernetes_sriov/README.rst
@@ -0,0 +1,16 @@
+.. This work is licensed under a Creative Commons Attribution 4.0 International
+.. License.
+.. http://creativecommons.org/licenses/by/4.0
+.. (c) OPNFV, arm Limited.
+
+.. _Flannel: https://github.com/coreos/flannel
+.. _SRIOV: https://github.com/hustcat/sriov-cni
+
+===============================================
+Kubernetes Deployment with SRIOV CNI
+===============================================
+
+The scenario would deploy pods with SRIOV/Mltus/Flannel CNI.
+In this case, "eth0" would be used as the default interface, and the 2nd interface named "net0" would
+used as data plane.
+
diff --git a/src/arm/kubernetes_sriov/k8s-build.sh b/src/arm/kubernetes_sriov/k8s-build.sh
new file mode 100755
index 0000000..bc99e30
--- /dev/null
+++ b/src/arm/kubernetes_sriov/k8s-build.sh
@@ -0,0 +1,32 @@
+#!/bin/bash
+set -e
+
+sudo apt-get install -y docker.io libvirt-bin virt-manager qemu qemu-efi
+
+WORKSPACE=`pwd`
+if [ ! -d "$WORKSPACE/compass4nfv" ]; then
+ git clone https://gerrit.opnfv.org/gerrit/compass4nfv
+fi
+
+cd compass4nfv
+
+WORKSPACE=`pwd`
+
+COMPASS_WORK_DIR=$WORKSPACE/../compass-work
+mkdir -p $COMPASS_WORK_DIR
+if [ ! -d "$WORKSPACE/work" ]; then
+ ln -s $COMPASS_WORK_DIR work
+fi
+
+#TODO: remove workaround after patches merged
+if [ ! -f "$WORKSPACE/patched" ]; then
+
+ git checkout a360411cb8c775dffa24a4157cec2b566cbde6f3
+ curl http://people.linaro.org/~yibo.cai/compass/0001-deploy-cobbler-drop-tcp_tw_recycle-in-sysctl.conf.patch | git apply || true
+ curl http://people.linaro.org/~yibo.cai/compass/0002-docker-compose-support-aarch64.patch | git apply || true
+ curl http://people.linaro.org/~yibo.cai/compass/0004-add-a-multus-with-sriov-interfaces-installation.patch | git apply || true
+ touch "$WORKSPACE/patched"
+fi
+
+# build tarball
+COMPASS_ISO_REPO='http://people.linaro.org/~yibo.cai/compass' ./build.sh
diff --git a/src/arm/kubernetes_sriov/k8s-deploy.sh b/src/arm/kubernetes_sriov/k8s-deploy.sh
new file mode 100755
index 0000000..f625d22
--- /dev/null
+++ b/src/arm/kubernetes_sriov/k8s-deploy.sh
@@ -0,0 +1,33 @@
+#!/bin/bash
+set -e
+
+#sudo apt-get install -y docker.io libvirt-bin virt-manager qemu qemu-efi
+
+#!/bin/bash
+cd compass4nfv
+
+export ADAPTER_OS_PATTERN='(?i)CentOS-7.*arm.*'
+export OS_VERSION="centos7"
+export KUBERNETES_VERSION="v1.7.3"
+export DHA="deploy/conf/vm_environment/k8-nosdn-nofeature-noha.yml"
+export NETWORK="deploy/conf/network_cfg_sriov.yaml"
+export VIRT_NUMBER=2 VIRT_CPUS=2 VIRT_MEM=4096 VIRT_DISK=50G
+
+# enable sriov cni deployment
+echo "Set sriov cni scenario"
+sed -i.bak 's/^kube_network_plugin:.*$/kube_network_plugin: sriov/' \
+ deploy/adapters/ansible/kubernetes/roles/kargo/files/extra-vars-aarch64.yml
+
+./deploy.sh
+
+set -ex
+
+# basic test: ssh to master, check k8s node status
+sshpass -p root ssh root@10.1.0.50 kubectl get nodes 2>/dev/null | grep -i ready
+
+# scenario specific tests
+# show two nics in container
+sshpass -p root ssh root@10.1.0.50 \
+ kubectl create -f /etc/kubernetes/sriov-test-pod.yml && \
+ sleep 30 && \
+ kubectl exec multus-test1 -- sh -c "ping -c 3 192.168.123.31"
diff --git a/src/arm/kubernetes_sriov/setup.sh b/src/arm/kubernetes_sriov/setup.sh
new file mode 100755
index 0000000..b33e990
--- /dev/null
+++ b/src/arm/kubernetes_sriov/setup.sh
@@ -0,0 +1,7 @@
+#!/bin/bash
+
+./k8s-build.sh
+
+sleep 2
+
+./k8s-deploy.sh
diff --git a/src/helm-charts/clearwater/Chart.yaml b/src/helm-charts/clearwater/Chart.yaml
index e69de29..1482dd5 100644
--- a/src/helm-charts/clearwater/Chart.yaml
+++ b/src/helm-charts/clearwater/Chart.yaml
@@ -0,0 +1,11 @@
+apiVersion: v1
+description: Helm chart for Clearwater
+name: clearwater-project
+version: 0.1.0
+source:
+ - https://github.com/Metaswitch/clearwater-docker/
+maintainers:
+ - name: Laura Sofia Enriquez
+ email: lsofia.enriquez@gmail.com
+ - name: Yujun Zhang
+ email: zhang.yujunz@zte.com.cn
diff --git a/src/helm-charts/clearwater/README.md b/src/helm-charts/clearwater/README.md
index e69de29..8a509de 100644
--- a/src/helm-charts/clearwater/README.md
+++ b/src/helm-charts/clearwater/README.md
@@ -0,0 +1,19 @@
+# Metaswitch Clearwater vIMS Chart
+
+Based on [Metaswitch's Clearwater](https://github.com/Metaswitch/clearwater-docker) k8s configuration.
+
+
+## Configuration
+
+The following tables lists the configurable parameters of the chart and their default values.
+
+
+Parameter | Description | Default
+--- | --- | ---
+`image.path` | dockerhub respository | `enriquetaso`
+`image.tag` | docker image tag | `latest`
+`config.configmaps` | Custom configmap | `env-vars`
+`config.zone` | Custom namespace | `default.svc.cluster.local`
+`config.ip` | MANDATORY: Should be repaced with external ip | `None`
+
+
diff --git a/src/helm-charts/clearwater/templates/NOTES.txt b/src/helm-charts/clearwater/templates/NOTES.txt
index e69de29..6756fa8 100644
--- a/src/helm-charts/clearwater/templates/NOTES.txt
+++ b/src/helm-charts/clearwater/templates/NOTES.txt
@@ -0,0 +1,19 @@
+Thank you for installing {{ .Chart.Name }}.
+
+Your release is named {{ .Release.Name }}.
+
+To learn more about the release, try:
+
+ $ helm status {{ .Release.Name }}
+ $ helm get {{ .Release.Name }}
+
+The deployment exposes:
+
+ - the Ellis web UI on port 30080 for self-provisioning.
+ - STUN/TURN on port 3478 for media relay.
+ - SIP on port 5060 for service.
+ - SIP/WebSocket on port 5062 for service.
+
+SIP devices can register with bono.:5060 and the Ellis provisioning interface can be accessed at port 30080.
+
+Make a call: http://clearwater.readthedocs.io/en/stable/Making_your_first_call.html
diff --git a/src/helm-charts/clearwater/templates/astaire-depl.yaml b/src/helm-charts/clearwater/templates/astaire-depl.yaml
new file mode 100644
index 0000000..94c4855
--- /dev/null
+++ b/src/helm-charts/clearwater/templates/astaire-depl.yaml
@@ -0,0 +1,54 @@
+apiVersion: extensions/v1beta1
+kind: Deployment
+metadata:
+ name: astaire
+spec:
+ replicas: 1
+ template:
+ metadata:
+ labels:
+ service: astaire
+ app: astaire
+ spec:
+ terminationGracePeriodSeconds: 120
+ containers:
+ - image: "{{ .Values.image.path }}/astaire:{{ .Values.image.tag }}"
+ imagePullPolicy: Always
+ name: astaire
+ ports:
+ - containerPort: 22
+ - containerPort: 11211
+ - containerPort: 11311
+ envFrom:
+ - configMapRef:
+ name: {{ .Values.config.configmaps }}
+ env:
+ - name: MY_POD_IP
+ valueFrom:
+ fieldRef:
+ fieldPath: status.podIP
+ livenessProbe:
+ tcpSocket:
+ port: 11311
+ periodSeconds: 10
+ failureThreshold: 9
+ readinessProbe:
+ tcpSocket:
+ port: 11311
+ volumeMounts:
+ - name: astairelogs
+ mountPath: /var/log/astaire
+ lifecycle:
+ preStop:
+ exec:
+ command: ["/bin/bash", "-c", "/usr/bin/pre-stop"]
+ - image: busybox
+ name: tailer
+ command: [ "tail", "-F", "/var/log/astaire/astaire_current.txt" ]
+ volumeMounts:
+ - name: astairelogs
+ mountPath: /var/log/astaire
+ volumes:
+ - name: astairelogs
+ emptyDir: {}
+ restartPolicy: Always
diff --git a/src/helm-charts/clearwater/templates/astaire-svc.yaml b/src/helm-charts/clearwater/templates/astaire-svc.yaml
new file mode 100644
index 0000000..e82dcdd
--- /dev/null
+++ b/src/helm-charts/clearwater/templates/astaire-svc.yaml
@@ -0,0 +1,11 @@
+apiVersion: v1
+kind: Service
+metadata:
+ name: astaire
+spec:
+ ports:
+ - name: "http-astaire"
+ port: 11311
+ selector:
+ service: astaire
+ clusterIP: None
diff --git a/src/helm-charts/clearwater/templates/bono-depl.yaml b/src/helm-charts/clearwater/templates/bono-depl.yaml
new file mode 100644
index 0000000..94020ed
--- /dev/null
+++ b/src/helm-charts/clearwater/templates/bono-depl.yaml
@@ -0,0 +1,66 @@
+apiVersion: extensions/v1beta1
+kind: Deployment
+metadata:
+ name: bono
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ service: bono
+ template:
+ metadata:
+ labels:
+ service: bono
+ snmp: enabled
+ app: bono
+ spec:
+ containers:
+ - image: "{{ .Values.image.path }}/bono:{{ .Values.image.tag }}"
+ imagePullPolicy: Always
+ name: bono
+ ports:
+ - containerPort: 22
+ - containerPort: 3478
+ - containerPort: 5060
+ - containerPort: 5062
+ - containerPort: 5060
+ protocol: "UDP"
+ - containerPort: 5062
+ protocol: "UDP"
+ envFrom:
+ - configMapRef:
+ name: {{ .Values.config.configmaps }}
+ env:
+ - name: PUBLIC_IP
+ value: {{ .Values.config.ip }}
+ - name: MY_POD_IP
+ valueFrom:
+ fieldRef:
+ fieldPath: status.podIP
+ livenessProbe:
+ exec:
+ command: ["nc", "-z", "-w", "5", "127.0.0.1", "5060"]
+ initialDelaySeconds: 30
+ readinessProbe:
+ tcpSocket:
+ port: 5060
+ livenessProbe:
+ exec:
+ command: ["/bin/bash", "/usr/share/kubernetes/liveness.sh", "3478 5060 5062"]
+ initialDelaySeconds: 30
+ readinessProbe:
+ exec:
+ command: ["/bin/bash", "/usr/share/kubernetes/liveness.sh", "3478 5060 5062"]
+ volumeMounts:
+ - name: bonologs
+ mountPath: /var/log/bono
+ - image: busybox
+ name: tailer
+ command: [ "tail", "-F", "/var/log/bono/bono_current.txt" ]
+ volumeMounts:
+ - name: bonologs
+ mountPath: /var/log/bono
+ volumes:
+ - name: bonologs
+ emptyDir: {}
+ restartPolicy: Always
diff --git a/src/helm-charts/clearwater/templates/bono-svc.yaml b/src/helm-charts/clearwater/templates/bono-svc.yaml
new file mode 100644
index 0000000..3b3da5c
--- /dev/null
+++ b/src/helm-charts/clearwater/templates/bono-svc.yaml
@@ -0,0 +1,27 @@
+apiVersion: v1
+kind: Service
+metadata:
+ name: bono
+spec:
+ externalIPs:
+ - {{ .Values.config.ip }}
+ loadBalancerIP: {{ .Values.config.ip }}
+ ports:
+ - name: "tcp-3478"
+ port: 3478
+ protocol: TCP
+ targetPort: 3478
+ - name: "tcp-5060"
+ port: 5060
+ protocol: TCP
+ targetPort: 5060
+ - name: "tcp-5062"
+ port: 5062
+ protocol: TCP
+ targetPort: 5062
+ selector:
+ service: bono
+ sessionAffinity: None
+ type: ClusterIP
+status:
+ loadBalancer: {}
diff --git a/src/helm-charts/clearwater/templates/cassandra-depl.yaml b/src/helm-charts/clearwater/templates/cassandra-depl.yaml
new file mode 100644
index 0000000..4a7f6c8
--- /dev/null
+++ b/src/helm-charts/clearwater/templates/cassandra-depl.yaml
@@ -0,0 +1,38 @@
+apiVersion: extensions/v1beta1
+kind: Deployment
+metadata:
+ name: cassandra
+spec:
+ replicas: 3
+ template:
+ metadata:
+ labels:
+ service: cassandra
+ app: cassandra
+ spec:
+ containers:
+ - image: "{{ .Values.image.path }}/cassandra:{{ .Values.image.tag }}"
+ imagePullPolicy: Always
+ name: cassandra
+ ports:
+ - containerPort: 22
+ - containerPort: 7001
+ - containerPort: 9042
+ - containerPort: 9160
+ envFrom:
+ - configMapRef:
+ name: {{ .Values.config.configmaps }}
+ env:
+ - name: MY_POD_IP
+ valueFrom:
+ fieldRef:
+ fieldPath: status.podIP
+ livenessProbe:
+ exec:
+ command: ["/bin/bash", "/usr/share/kubernetes/liveness.sh", "7000 9042 9160"]
+ # Cassandra can take a very, very long time to start up
+ initialDelaySeconds: 600
+ readinessProbe:
+ exec:
+ command: ["/bin/bash", "/usr/share/kubernetes/liveness.sh", "7000 9042 9160"]
+ restartPolicy: Always
diff --git a/src/helm-charts/clearwater/templates/cassandra-svc.yaml b/src/helm-charts/clearwater/templates/cassandra-svc.yaml
new file mode 100644
index 0000000..7cb9892
--- /dev/null
+++ b/src/helm-charts/clearwater/templates/cassandra-svc.yaml
@@ -0,0 +1,17 @@
+apiVersion: v1
+kind: Service
+metadata:
+ name: cassandra
+spec:
+ ports:
+ - name: "http-7001"
+ port: 7001
+ - name: "http-7000"
+ port: 7000
+ - name: "http-9042"
+ port: 9042
+ - name: "http-9160"
+ port: 9160
+ selector:
+ service: cassandra
+ clusterIP: None
diff --git a/src/helm-charts/clearwater/templates/chronos-depl.yaml b/src/helm-charts/clearwater/templates/chronos-depl.yaml
new file mode 100644
index 0000000..2f65ad8
--- /dev/null
+++ b/src/helm-charts/clearwater/templates/chronos-depl.yaml
@@ -0,0 +1,55 @@
+apiVersion: extensions/v1beta1
+kind: Deployment
+metadata:
+ labels:
+ service: chronos
+ name: chronos
+spec:
+ replicas: 1
+ template:
+ metadata:
+ labels:
+ service: chronos
+ app: chronos
+ spec:
+ terminationGracePeriodSeconds: 120
+ containers:
+ - image: "{{ .Values.image.path }}/chronos:{{ .Values.image.tag }}"
+ imagePullPolicy: Always
+ name: chronos
+ ports:
+ - containerPort: 22
+ - containerPort: 7253
+ envFrom:
+ - configMapRef:
+ name: {{ .Values.config.configmaps }}
+ env:
+ - name: MY_POD_IP
+ valueFrom:
+ fieldRef:
+ fieldPath: status.podIP
+ livenessProbe:
+ tcpSocket:
+ port: 7253
+ periodSeconds: 10
+ failureThreshold: 9
+ readinessProbe:
+ tcpSocket:
+ port: 7253
+ volumeMounts:
+ - name: chronoslogs
+ mountPath: /var/log/chronos
+ lifecycle:
+ preStop:
+ exec:
+ command: ["/bin/bash", "-c", "/usr/bin/pre-stop"]
+ - image: busybox
+ name: tailer
+ command: [ "tail", "-F", "/var/log/chronos/chronos_current.txt" ]
+ volumeMounts:
+ - name: chronoslogs
+ mountPath: /var/log/chronos
+ volumes:
+ - name: chronoslogs
+ emptyDir: {}
+ restartPolicy: Always
diff --git a/src/helm-charts/clearwater/templates/chronos-svc.yaml b/src/helm-charts/clearwater/templates/chronos-svc.yaml
new file mode 100644
index 0000000..3815b28
--- /dev/null
+++ b/src/helm-charts/clearwater/templates/chronos-svc.yaml
@@ -0,0 +1,11 @@
+apiVersion: v1
+kind: Service
+metadata:
+ name: chronos
+spec:
+ ports:
+ - name: "http-7253"
+ port: 7253
+ selector:
+ service: chronos
+ clusterIP: None
diff --git a/src/helm-charts/clearwater/templates/ellis-depl.yaml b/src/helm-charts/clearwater/templates/ellis-depl.yaml
new file mode 100644
index 0000000..e231bf1
--- /dev/null
+++ b/src/helm-charts/clearwater/templates/ellis-depl.yaml
@@ -0,0 +1,35 @@
+apiVersion: extensions/v1beta1
+kind: Deployment
+metadata:
+ name: ellis
+spec:
+ replicas: 1
+ template:
+ metadata:
+ labels:
+ service: ellis
+ app: ellis
+ spec:
+ containers:
+ - image: "{{ .Values.image.path }}/ellis:{{ .Values.image.tag }}"
+ imagePullPolicy: Always
+ name: ellis
+ ports:
+ - containerPort: 22
+ - containerPort: 80
+ envFrom:
+ - configMapRef:
+ name: {{ .Values.config.configmaps }}
+ env:
+ - name: MY_POD_IP
+ valueFrom:
+ fieldRef:
+ fieldPath: status.podIP
+ livenessProbe:
+ tcpSocket:
+ port: 80
+ initialDelaySeconds: 30
+ readinessProbe:
+ tcpSocket:
+ port: 80
+ restartPolicy: Always
diff --git a/src/helm-charts/clearwater/templates/ellis-svc.yaml b/src/helm-charts/clearwater/templates/ellis-svc.yaml
new file mode 100644
index 0000000..60e24d5
--- /dev/null
+++ b/src/helm-charts/clearwater/templates/ellis-svc.yaml
@@ -0,0 +1,12 @@
+apiVersion: v1
+kind: Service
+metadata:
+ name: ellis
+spec:
+ type: NodePort
+ ports:
+ - name: "http-ellis"
+ port: 80
+ nodePort: 30080
+ selector:
+ service: ellis
diff --git a/src/helm-charts/clearwater/templates/env-vars-cm.yaml b/src/helm-charts/clearwater/templates/env-vars-cm.yaml
new file mode 100644
index 0000000..3f25432
--- /dev/null
+++ b/src/helm-charts/clearwater/templates/env-vars-cm.yaml
@@ -0,0 +1,6 @@
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ .Values.config.configmaps }}
+data:
+ ZONE: {{ .Values.config.zone }}
diff --git a/src/helm-charts/clearwater/templates/etcd-depl.yaml b/src/helm-charts/clearwater/templates/etcd-depl.yaml
new file mode 100644
index 0000000..5d6e184
--- /dev/null
+++ b/src/helm-charts/clearwater/templates/etcd-depl.yaml
@@ -0,0 +1,59 @@
+apiVersion: extensions/v1beta1
+kind: Deployment
+metadata:
+ name: etcd
+spec:
+ replicas: 1
+ template:
+ metadata:
+ creationTimestamp: null
+ labels:
+ instance-type: etcd-pod
+ app: etcd-pod
+ spec:
+ containers:
+ - args:
+ - --name
+ - $(MY_POD_NAME)
+ - --advertise-client-urls
+ - http://$(MY_POD_IP):2379,http://$(MY_POD_IP):4001
+ - --listen-client-urls
+ - http://0.0.0.0:2379,http://0.0.0.0:4001
+ - --initial-advertise-peer-urls
+ - http://$(MY_POD_IP):2380
+ - --listen-peer-urls
+ - http://0.0.0.0:2380
+ # By default use a single pod cluster
+ - --initial-cluster
+ - $(MY_POD_NAME)=http://$(MY_POD_IP):2380
+ # Alternatively multi-pod clusters can be supported Using central discvovery. Run e.g.
+ # curl https://discovery.etcd.io/new?size=3 | sed s/https/http/
+ # to get a discovery URL for a 3 pod cluster, substitute the returned value below, and
+ # set replicas: 3 above.
+ #- --discovery
+ #- <URL returned by command above>
+ - --initial-cluster-state
+ - new
+ env:
+ - name: MY_POD_IP
+ valueFrom:
+ fieldRef:
+ fieldPath: status.podIP
+ - name: MY_POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ image: quay.io/coreos/etcd:v2.2.5
+ name: etcd
+ ports:
+ - containerPort: 2379
+ - containerPort: 2380
+ - containerPort: 4001
+ livenessProbe:
+ tcpSocket:
+ port: 4001
+ initialDelaySeconds: 300
+ readinessProbe:
+ tcpSocket:
+ port: 4001
+ restartPolicy: Always
diff --git a/src/helm-charts/clearwater/templates/etcd-svc.yaml b/src/helm-charts/clearwater/templates/etcd-svc.yaml
new file mode 100644
index 0000000..0c02b62
--- /dev/null
+++ b/src/helm-charts/clearwater/templates/etcd-svc.yaml
@@ -0,0 +1,17 @@
+apiVersion: v1
+kind: Service
+metadata:
+ name: etcd
+ labels:
+ instance-type: etcd-pod
+spec:
+ ports:
+ - name: "http-etcd-client"
+ port: 2379
+ - name: "http-etcd-server"
+ port: 2380
+ - name: "http-4001"
+ port: 4001
+ selector:
+ instance-type: etcd-pod
+ clusterIP: None
diff --git a/src/helm-charts/clearwater/templates/homer-depl.yaml b/src/helm-charts/clearwater/templates/homer-depl.yaml
new file mode 100644
index 0000000..c9a292e
--- /dev/null
+++ b/src/helm-charts/clearwater/templates/homer-depl.yaml
@@ -0,0 +1,35 @@
+apiVersion: extensions/v1beta1
+kind: Deployment
+metadata:
+ name: homer
+spec:
+ replicas: 1
+ template:
+ metadata:
+ labels:
+ service: homer
+ app: homer
+ spec:
+ containers:
+ - image: "{{ .Values.image.path }}/homer:{{ .Values.image.tag }}"
+ imagePullPolicy: Always
+ name: homer
+ ports:
+ - containerPort: 22
+ - containerPort: 7888
+ envFrom:
+ - configMapRef:
+ name: {{ .Values.config.configmaps }}
+ env:
+ - name: MY_POD_IP
+ valueFrom:
+ fieldRef:
+ fieldPath: status.podIP
+ livenessProbe:
+ tcpSocket:
+ port: 7888
+ initialDelaySeconds: 30
+ readinessProbe:
+ tcpSocket:
+ port: 7888
+ restartPolicy: Always
diff --git a/src/helm-charts/clearwater/templates/homer-svc.yaml b/src/helm-charts/clearwater/templates/homer-svc.yaml
new file mode 100644
index 0000000..8acc0ed
--- /dev/null
+++ b/src/helm-charts/clearwater/templates/homer-svc.yaml
@@ -0,0 +1,11 @@
+apiVersion: v1
+kind: Service
+metadata:
+ name: homer
+spec:
+ ports:
+ - name: "http-7888"
+ port: 7888
+ selector:
+ service: homer
+ clusterIP: None
diff --git a/src/helm-charts/clearwater/templates/homestead-depl.yaml b/src/helm-charts/clearwater/templates/homestead-depl.yaml
new file mode 100644
index 0000000..590ea51
--- /dev/null
+++ b/src/helm-charts/clearwater/templates/homestead-depl.yaml
@@ -0,0 +1,51 @@
+apiVersion: extensions/v1beta1
+kind: Deployment
+metadata:
+ name: homestead
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ service: homestead
+ template:
+ metadata:
+ labels:
+ service: homestead
+ snmp: enabled
+ app: homestead
+ spec:
+ containers:
+ - image: "{{ .Values.image.path }}/homestead:{{ .Values.image.tag }}"
+ imagePullPolicy: Always
+ name: homestead
+ ports:
+ - containerPort: 22
+ - containerPort: 8888
+ envFrom:
+ - configMapRef:
+ name: env-vars
+ env:
+ - name: MY_POD_IP
+ valueFrom:
+ fieldRef:
+ fieldPath: status.podIP
+ livenessProbe:
+ exec:
+ command: ["/bin/bash", "/usr/share/kubernetes/liveness.sh", "8888"]
+ initialDelaySeconds: 60
+ readinessProbe:
+ exec:
+ command: ["/bin/bash", "/usr/share/kubernetes/liveness.sh", "8888"]
+ volumeMounts:
+ - name: homesteadlogs
+ mountPath: /var/log/homestead
+ - image: busybox
+ name: tailer
+ command: [ "tail", "-F", "/var/log/homestead/homestead_current.txt" ]
+ volumeMounts:
+ - name: homesteadlogs
+ mountPath: /var/log/homestead
+ volumes:
+ - name: homesteadlogs
+ emptyDir: {}
+ restartPolicy: Always
diff --git a/src/helm-charts/clearwater/templates/homestead-prov-depl.yaml b/src/helm-charts/clearwater/templates/homestead-prov-depl.yaml
new file mode 100644
index 0000000..ecf9f8d
--- /dev/null
+++ b/src/helm-charts/clearwater/templates/homestead-prov-depl.yaml
@@ -0,0 +1,39 @@
+apiVersion: extensions/v1beta1
+kind: Deployment
+metadata:
+ name: homestead-prov
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ service: homestead-prov
+ template:
+ metadata:
+ labels:
+ service: homestead-prov
+ snmp: enabled
+ app: homestead-pro
+ spec:
+ containers:
+ - image: "{{ .Values.image.path }}/homestead-prov:{{ .Values.image.tag }}"
+ imagePullPolicy: Always
+ name: homestead-prov
+ ports:
+ - containerPort: 22
+ - containerPort: 8889
+ envFrom:
+ - configMapRef:
+ name: {{ .Values.config.configmaps }}
+ env:
+ - name: MY_POD_IP
+ valueFrom:
+ fieldRef:
+ fieldPath: status.podIP
+ livenessProbe:
+ exec:
+ command: ["/bin/bash", "/usr/share/clearwater/bin/poll_homestead-prov.sh"]
+ initialDelaySeconds: 60
+ readinessProbe:
+ exec:
+ command: ["/bin/bash", "/usr/share/clearwater/bin/poll_homestead-prov.sh"]
+ restartPolicy: Always
diff --git a/src/helm-charts/clearwater/templates/homestead-prov-svc.yaml b/src/helm-charts/clearwater/templates/homestead-prov-svc.yaml
new file mode 100644
index 0000000..4ce2dd9
--- /dev/null
+++ b/src/helm-charts/clearwater/templates/homestead-prov-svc.yaml
@@ -0,0 +1,11 @@
+apiVersion: v1
+kind: Service
+metadata:
+ name: homestead-prov
+spec:
+ ports:
+ - name: "http-8889"
+ port: 8889
+ selector:
+ service: homestead-prov
+ clusterIP: None
diff --git a/src/helm-charts/clearwater/templates/homestead-svc.yaml b/src/helm-charts/clearwater/templates/homestead-svc.yaml
new file mode 100644
index 0000000..7684d2e
--- /dev/null
+++ b/src/helm-charts/clearwater/templates/homestead-svc.yaml
@@ -0,0 +1,11 @@
+apiVersion: v1
+kind: Service
+metadata:
+ name: homestead
+spec:
+ ports:
+ - name: "http-8888"
+ port: 8888
+ selector:
+ service: homestead
+ clusterIP: None
diff --git a/src/helm-charts/clearwater/templates/ralf-depl.yaml b/src/helm-charts/clearwater/templates/ralf-depl.yaml
new file mode 100644
index 0000000..8efcc5e
--- /dev/null
+++ b/src/helm-charts/clearwater/templates/ralf-depl.yaml
@@ -0,0 +1,51 @@
+apiVersion: extensions/v1beta1
+kind: Deployment
+metadata:
+ name: ralf
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ service: ralf
+ template:
+ metadata:
+ labels:
+ service: ralf
+ snmp: enabled
+ app: ralf
+ spec:
+ containers:
+ - image: "{{ .Values.image.path }}/ralf:{{ .Values.image.tag }}"
+ imagePullPolicy: Always
+ name: ralf
+ ports:
+ - containerPort: 22
+ - containerPort: 10888
+ envFrom:
+ - configMapRef:
+ name: {{ .Values.config.configmaps }}
+ env:
+ - name: MY_POD_IP
+ valueFrom:
+ fieldRef:
+ fieldPath: status.podIP
+ livenessProbe:
+ tcpSocket:
+ port: 10888
+ initialDelaySeconds: 30
+ readinessProbe:
+ tcpSocket:
+ port: 10888
+ volumeMounts:
+ - name: ralflogs
+ mountPath: /var/log/ralf
+ - image: busybox
+ name: tailer
+ command: [ "tail", "-F", "/var/log/ralf/ralf_current.txt" ]
+ volumeMounts:
+ - name: ralflogs
+ mountPath: /var/log/ralf
+ volumes:
+ - name: ralflogs
+ emptyDir: {}
+ restartPolicy: Always
diff --git a/src/helm-charts/clearwater/templates/ralf-svc.yaml b/src/helm-charts/clearwater/templates/ralf-svc.yaml
new file mode 100644
index 0000000..9fc44c3
--- /dev/null
+++ b/src/helm-charts/clearwater/templates/ralf-svc.yaml
@@ -0,0 +1,11 @@
+apiVersion: v1
+kind: Service
+metadata:
+ name: ralf
+spec:
+ ports:
+ - name: "http-10888"
+ port: 10888
+ selector:
+ service: ralf
+ clusterIP: None
diff --git a/src/helm-charts/clearwater/templates/sprout-depl.yaml b/src/helm-charts/clearwater/templates/sprout-depl.yaml
new file mode 100644
index 0000000..da2989c
--- /dev/null
+++ b/src/helm-charts/clearwater/templates/sprout-depl.yaml
@@ -0,0 +1,51 @@
+apiVersion: extensions/v1beta1
+kind: Deployment
+metadata:
+ name: sprout
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ service: sprout
+ template:
+ metadata:
+ labels:
+ service: sprout
+ snmp: enabled
+ app: sprout
+ spec:
+ containers:
+ - image: "{{ .Values.image.path }}/sprout:{{ .Values.image.tag }}"
+ imagePullPolicy: Always
+ name: sprout
+ ports:
+ - containerPort: 22
+ envFrom:
+ - configMapRef:
+ name: {{ .Values.config.configmaps }}
+ env:
+ - name: MY_POD_IP
+ valueFrom:
+ fieldRef:
+ fieldPath: status.podIP
+ livenessProbe:
+ exec:
+ command: ["/bin/bash", "/usr/share/kubernetes/liveness.sh", "5052 5054"]
+ initialDelaySeconds: 30
+ periodSeconds: 3
+ readinessProbe:
+ exec:
+ command: ["/bin/bash", "/usr/share/kubernetes/liveness.sh", "5052 5054"]
+ volumeMounts:
+ - name: sproutlogs
+ mountPath: /var/log/sprout
+ - image: busybox
+ name: tailer
+ command: [ "tail", "-F", "/var/log/sprout/sprout_current.txt" ]
+ volumeMounts:
+ - name: sproutlogs
+ mountPath: /var/log/sprout
+ volumes:
+ - name: sproutlogs
+ emptyDir: {}
+ restartPolicy: Always
diff --git a/src/helm-charts/clearwater/templates/sprout-svc.yaml b/src/helm-charts/clearwater/templates/sprout-svc.yaml
new file mode 100644
index 0000000..092a51c
--- /dev/null
+++ b/src/helm-charts/clearwater/templates/sprout-svc.yaml
@@ -0,0 +1,13 @@
+apiVersion: v1
+kind: Service
+metadata:
+ name: sprout
+spec:
+ ports:
+ - name: "http-5052"
+ port: 5052
+ - name: "http-5054"
+ port: 5054
+ selector:
+ service: sprout
+ clusterIP: None
diff --git a/src/helm-charts/clearwater/values.yaml b/src/helm-charts/clearwater/values.yaml
index e69de29..ce789ee 100644
--- a/src/helm-charts/clearwater/values.yaml
+++ b/src/helm-charts/clearwater/values.yaml
@@ -0,0 +1,7 @@
+image:
+ path: enriquetaso
+ tag: latest
+config:
+ configmaps: env-vars
+ zone: default.svc.cluster.local
+ ip: None
diff --git a/src/vagrant/kubeadm_clearwater/Vagrantfile b/src/vagrant/kubeadm_clearwater/Vagrantfile
index 9320074..3ed02d5 100644
--- a/src/vagrant/kubeadm_clearwater/Vagrantfile
+++ b/src/vagrant/kubeadm_clearwater/Vagrantfile
@@ -5,7 +5,7 @@ Vagrant.configure("2") do |config|
config.vm.box = "ceph/ubuntu-xenial"
config.vm.provider :libvirt do |libvirt|
- libvirt.memory = 4096
+ libvirt.memory = 8192
libvirt.cpus = 4
end
diff --git a/src/vagrant/kubeadm_clearwater/examples/create_and_apply.sh b/src/vagrant/kubeadm_clearwater/clearwater_setup.sh
index fdbb2b1..e579773 100755
--- a/src/vagrant/kubeadm_clearwater/examples/create_and_apply.sh
+++ b/src/vagrant/kubeadm_clearwater/clearwater_setup.sh
@@ -17,28 +17,50 @@
set -ex
+static_ip=$(ifconfig eth0 | grep "inet addr" | cut -d ':' -f 2 | cut -d ' ' -f 1)
+echo "STATIC_IP is $static_ip."
+
git clone --recursive https://github.com/Metaswitch/clearwater-docker.git
# Set the configmaps
-kubectl create configmap env-vars --from-literal=ZONE=default.svc.cluster.local --from-literal=ADDITIONAL_SHARED_CONFIG=hss_hostname=hss.example.com\\nhss_realm=example.com
+kubectl create configmap env-vars --from-literal=ZONE=default.svc.cluster.local
-# Genereta the yamls
+# Generate the yamls
cd clearwater-docker/kubernetes/
-#./k8s-gencfg --image_path=<path to your repo> --image_tag=<tag for the images you want to use>
./k8s-gencfg --image_path=enriquetaso --image_tag=latest
+# Expose Ellis
+# The Ellis provisioning interface can then be accessed on static_ip:30080
+cat ellis-svc.yaml | sed "s/clusterIP: None/type: NodePort/" > ellis-svc.yaml.new
+cat ellis-svc.yaml.new | sed "s/port: 80/port: 80\n nodePort: 30080/" > ellis-svc.yaml
+rm ellis-svc.yaml.new
+
+# Bono configuration
+# Have a static external IP address available that the load balancer can use
+cp /vagrant/custom-bono-svc/bono-svc.yaml .
+sed -ie "6s/$/\n - $static_ip/" bono-svc.yaml
+sed -ie "7s/$/\n loadBalancerIP: $static_ip/" bono-svc.yaml
-# Apply yamls
cd
kubectl apply -f clearwater-docker/kubernetes
kubectl get nodes
kubectl get services
kubectl get pods
kubectl get rc
+sleep 60
+
+r="1"
+while [ $r != "0" ]
+do
+ kubectl get pods
+ r=$( kubectl get pods | grep Pending | wc -l)
+ sleep 60
+done
-r="0"
-while [ $r != "13" ]
+q="1"
+while [ $q != "0" ]
do
- r=$(kubectl get pods | grep Running | wc -l)
- sleep 60
+ kubectl get pods
+ q=$( kubectl get pods | grep ContainerCreating | wc -l)
+ sleep 60
done
diff --git a/src/vagrant/kubeadm_clearwater/custom-bono-svc/bono-svc.yaml b/src/vagrant/kubeadm_clearwater/custom-bono-svc/bono-svc.yaml
new file mode 100644
index 0000000..9280b0f
--- /dev/null
+++ b/src/vagrant/kubeadm_clearwater/custom-bono-svc/bono-svc.yaml
@@ -0,0 +1,25 @@
+apiVersion: v1
+kind: Service
+metadata:
+ name: bono
+spec:
+ externalIPs:
+ ports:
+ - name: "3478"
+ port: 3478
+ protocol: TCP
+ targetPort: 3478
+ - name: "5060"
+ port: 5060
+ protocol: TCP
+ targetPort: 5060
+ - name: "5062"
+ port: 5062
+ protocol: TCP
+ targetPort: 5062
+ selector:
+ service: bono
+ sessionAffinity: None
+ type: ClusterIP
+status:
+ loadBalancer: {}
diff --git a/src/vagrant/kubeadm_clearwater/deploy.sh b/src/vagrant/kubeadm_clearwater/deploy.sh
index 844a750..54644a3 100755
--- a/src/vagrant/kubeadm_clearwater/deploy.sh
+++ b/src/vagrant/kubeadm_clearwater/deploy.sh
@@ -6,4 +6,7 @@ DIR="$(dirname `readlink -f $0`)"
cd $DIR
../cleanup.sh
vagrant up
-vagrant ssh master -c "/vagrant/examples/create_and_apply.sh"
+vagrant ssh master -c "/vagrant/clearwater_setup.sh"
+
+# Run tests
+vagrant ssh master -c "/vagrant/tests/clearwater-live-test.sh"
diff --git a/src/vagrant/kubeadm_clearwater/host_setup.sh b/src/vagrant/kubeadm_clearwater/host_setup.sh
index b86a618..c1a23eb 100644
--- a/src/vagrant/kubeadm_clearwater/host_setup.sh
+++ b/src/vagrant/kubeadm_clearwater/host_setup.sh
@@ -21,9 +21,9 @@ cat <<EOF | sudo tee /etc/apt/sources.list.d/kubernetes.list
deb http://apt.kubernetes.io/ kubernetes-xenial main
EOF
sudo apt-get update
-sudo apt-get install -y --allow-downgrades docker-engine=1.12.6-0~ubuntu-xenial kubelet=1.7.0-00 kubeadm=1.7.0-00 kubectl=1.7.0-00 kubernetes-cni=0.5.1-00
+sudo apt-get install -y --allow-downgrades docker-engine=1.12.6-0~ubuntu-xenial kubelet=1.9.1-00 kubeadm=1.9.1-00 kubectl=1.9.1-00 kubernetes-cni=0.6.0-00
-sudo rm -rf /var/lib/kubelet
-sudo systemctl stop kubelet
+sudo swapoff -a
sudo systemctl daemon-reload
+sudo systemctl stop kubelet
sudo systemctl start kubelet
diff --git a/src/vagrant/kubeadm_clearwater/master_setup.sh b/src/vagrant/kubeadm_clearwater/master_setup.sh
index 7fa2ad8..b181582 100644
--- a/src/vagrant/kubeadm_clearwater/master_setup.sh
+++ b/src/vagrant/kubeadm_clearwater/master_setup.sh
@@ -3,11 +3,8 @@
set -ex
sudo kubeadm init --apiserver-advertise-address=192.168.1.10 --service-cidr=10.96.0.0/16 --pod-network-cidr=10.32.0.0/12 --token 8c5adc.1cec8dbf339093f0
-sudo cp /etc/kubernetes/admin.conf $HOME/
-sudo chown $(id -u):$(id -g) $HOME/admin.conf
-export KUBECONFIG=$HOME/admin.conf
-echo "export KUBECONFIG=$HOME/admin.conf" >> $HOME/.bash_profile
+mkdir ~/.kube
+sudo cp /etc/kubernetes/admin.conf $HOME/.kube/config
+sudo chown $(id -u):$(id -g) $HOME/.kube/config
kubectl apply -f http://git.io/weave-kube-1.6
-#kubectl apply -f http://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
-#kubectl apply -f http://docs.projectcalico.org/v2.1/getting-started/kubernetes/installation/hosted/kubeadm/1.6/calico.yaml
diff --git a/src/vagrant/kubeadm_clearwater/tests/clearwater-live-test.sh b/src/vagrant/kubeadm_clearwater/tests/clearwater-live-test.sh
new file mode 100755
index 0000000..6e5238e
--- /dev/null
+++ b/src/vagrant/kubeadm_clearwater/tests/clearwater-live-test.sh
@@ -0,0 +1,46 @@
+#!/bin/bash
+#
+# Copyright (c) 2017 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+set -ex
+
+# http://clearwater.readthedocs.io/en/latest/Running_the_live_tests.html
+sudo apt-get install build-essential bundler git --yes
+sudo apt install gnupg2 --yes
+gpg2 --recv-keys 409B6B1796C275462A1703113804BB82D39DC0E3
+curl -L https://get.rvm.io | bash -s stable
+
+source ~/.rvm/scripts/rvm
+rvm autolibs enable
+rvm install 1.9.3
+rvm use 1.9.3
+
+
+# Setup ruby and gems
+git clone https://github.com/Metaswitch/clearwater-live-test.git
+cd clearwater-live-test/
+cd quaff/ && git clone https://github.com/Metaswitch/quaff.git
+cd ..
+bundle install
+
+# Get Ellis ip
+ellisip=$(kubectl get services ellis -o json | grep clusterIP | cut -f4 -d'"')
+
+# Get Ellis ip
+bonoip=$(kubectl get services bono -o json | grep clusterIP | cut -f4 -d'"')
+
+# Run the tests
+rake test[default.svc.cluster.local] SIGNUP_CODE=secret PROXY=$bonoip ELLIS=$ellisip
diff --git a/src/vagrant/kubeadm_clearwater/worker_setup.sh b/src/vagrant/kubeadm_clearwater/worker_setup.sh
index b68d800..74e4178 100644
--- a/src/vagrant/kubeadm_clearwater/worker_setup.sh
+++ b/src/vagrant/kubeadm_clearwater/worker_setup.sh
@@ -1,4 +1,4 @@
#!/bin/bash
set -ex
-sudo kubeadm join --token 8c5adc.1cec8dbf339093f0 192.168.1.10:6443 || true
+sudo kubeadm join --discovery-token-unsafe-skip-ca-verification --token 8c5adc.1cec8dbf339093f0 192.168.1.10:6443 || true
diff --git a/src/vagrant/kubeadm_istio/istio/deploy.sh b/src/vagrant/kubeadm_istio/istio/deploy.sh
index ed873f5..3dd1426 100755
--- a/src/vagrant/kubeadm_istio/istio/deploy.sh
+++ b/src/vagrant/kubeadm_istio/istio/deploy.sh
@@ -17,23 +17,50 @@
set -ex
-# Deploy istio 0.4.0
+# Get latest istio version, refer: https://git.io/getLatestIstio
+if [ "x${ISTIO_VERSION}" = "x" ] ; then
+ ISTIO_VERSION=$(curl -L -s https://api.github.com/repos/istio/istio/releases/latest | \
+ grep tag_name | sed "s/ *\"tag_name\": *\"\(.*\)\",*/\1/")
+fi
+
+ISTIO_DIR_NAME="istio-$ISTIO_VERSION"
+
cd /vagrant
curl -L https://git.io/getLatestIstio | sh -
-mv istio-0.4.0 istio-source
+mv $ISTIO_DIR_NAME istio-source
cd /vagrant/istio-source/
-export PATH=$PWD/bin:$PATH
+
+# Persistently append istioctl bin path to PATH env
+echo 'export PATH="$PATH:/vagrant/istio-source/bin"' >> ~/.bashrc
+echo "source <(kubectl completion bash)" >> ~/.bashrc
+source ~/.bashrc
+
kubectl apply -f install/kubernetes/istio.yaml
+# Install the sidecar injection configmap
+./install/kubernetes/webhook-create-signed-cert.sh \
+ --service istio-sidecar-injector \
+ --namespace istio-system \
+ --secret sidecar-injector-certs
+kubectl apply -f install/kubernetes/istio-sidecar-injector-configmap-release.yaml
+
+# Install the sidecar injector webhook
+cat install/kubernetes/istio-sidecar-injector.yaml | \
+ ./install/kubernetes/webhook-patch-ca-bundle.sh > \
+ install/kubernetes/istio-sidecar-injector-with-ca-bundle.yaml
+kubectl apply -f install/kubernetes/istio-sidecar-injector-with-ca-bundle.yaml
+kubectl -n istio-system get deployment -listio=sidecar-injector
+
# Validate the installation
kubectl get svc -n istio-system
kubectl get pods -n istio-system
+kubectl get namespace -L istio-injection
-r="0"
-while [ $r -ne "4" ]
+r="1"
+while [ $r -ne "0" ]
do
kubectl get pods -n istio-system
- r=$(kubectl get pods -n istio-system | grep Running | wc -l)
+ r=$(kubectl get pods -n istio-system | egrep -v 'NAME|Running' | wc -l)
sleep 60
done
diff --git a/src/vagrant/kubeadm_istio/master_setup.sh b/src/vagrant/kubeadm_istio/master_setup.sh
index b181582..f308244 100644
--- a/src/vagrant/kubeadm_istio/master_setup.sh
+++ b/src/vagrant/kubeadm_istio/master_setup.sh
@@ -2,9 +2,32 @@
set -ex
+ADMISSION_CONTROL="Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,NodeRestriction,ResourceQuota"
+KUBE_APISERVER_CONF="/etc/kubernetes/manifests/kube-apiserver.yaml"
+
sudo kubeadm init --apiserver-advertise-address=192.168.1.10 --service-cidr=10.96.0.0/16 --pod-network-cidr=10.32.0.0/12 --token 8c5adc.1cec8dbf339093f0
mkdir ~/.kube
sudo cp /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
kubectl apply -f http://git.io/weave-kube-1.6
+
+# Enable mutating webhook admission controller
+# kube-apiserver will be automatically restarted by kubelet when its manifest file update.
+# https://istio.io/docs/setup/kubernetes/sidecar-injection.html
+sudo sed -i "s/admission-control=.*/admission-control=$ADMISSION_CONTROL/g" $KUBE_APISERVER_CONF
+
+set +e
+# wait for kube-apiserver restart
+r="1"
+while [ $r -ne "0" ]
+do
+ sleep 2
+ kubectl version > /dev/null
+ r=$?
+done
+set -e
+
+# check if admissionregistration.k8s.io/v1beta1 API is enabled
+kubectl api-versions | grep admissionregistration
+
diff --git a/src/vagrant/kubeadm_kata/host_setup.sh b/src/vagrant/kubeadm_kata/host_setup.sh
index f9e1a76..d2af951 100644
--- a/src/vagrant/kubeadm_kata/host_setup.sh
+++ b/src/vagrant/kubeadm_kata/host_setup.sh
@@ -25,19 +25,14 @@ cat << EOF | sudo tee /etc/hosts
192.168.1.23 worker3
EOF
-sudo apt-key adv --keyserver hkp://ha.pool.sks-keyservers.net:80 --recv-keys 58118E89F3A912897C070ADBF76221572C52609D
-sudo apt-key adv -k 58118E89F3A912897C070ADBF76221572C52609D
-cat << EOF | sudo tee /etc/apt/sources.list.d/docker.list
-deb [arch=amd64] https://apt.dockerproject.org/repo ubuntu-xenial main
-EOF
-
curl -s http://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -
cat <<EOF | sudo tee /etc/apt/sources.list.d/kubernetes.list
deb http://apt.kubernetes.io/ kubernetes-xenial main
EOF
sudo apt-get update
-sudo apt-get install -y --allow-downgrades docker-engine=1.12.6-0~ubuntu-xenial kubelet=1.7.0-00 kubeadm=1.7.0-00 kubectl=1.7.0-00 kubernetes-cni=0.5.1-00
+sudo apt-get install -y kubelet kubeadm kubectl kubernetes-cni
+sudo swapoff -a
sudo systemctl stop kubelet
sudo rm -rf /var/lib/kubelet
sudo systemctl daemon-reload
diff --git a/src/vagrant/kubeadm_kata/kata_setup.sh b/src/vagrant/kubeadm_kata/kata_setup.sh
index 9682f3a..c14d844 100644
--- a/src/vagrant/kubeadm_kata/kata_setup.sh
+++ b/src/vagrant/kubeadm_kata/kata_setup.sh
@@ -17,33 +17,27 @@
set -ex
-wget https://storage.googleapis.com/golang/go1.8.3.linux-amd64.tar.gz
-sudo tar -xvf go1.8.3.linux-amd64.tar.gz -C /usr/local/
-mkdir -p $HOME/go/src
-export GOPATH=$HOME/go
-export PATH=$PATH:/usr/local/go/bin:$GOPATH/bin
-
-go get github.com/clearcontainers/tests
-cd $GOPATH/src/github.com/clearcontainers/tests/.ci
-
-echo "Install dependencies"
-bash -f ./setup_env_ubuntu.sh
-
-echo "Install shim"
-bash -f ./install_shim.sh
-
-echo "Install proxy"
-bash -f ./install_proxy.sh
-
-echo "Install runtime"
-bash -f ./install_runtime.sh
-
-echo "Install CRI-O"
-bash -f ./install_crio.sh
+cat << EOF | sudo tee /etc/apt/sources.list.d/cc-oci-runtime.list
+deb http://download.opensuse.org/repositories/home:/clearcontainers:/clear-containers-3/xUbuntu_16.04/ /
+EOF
+curl -fsSL http://download.opensuse.org/repositories/home:/clearcontainers:/clear-containers-3/xUbuntu_16.04/Release.key | sudo apt-key add -
+sudo apt-get update
+sudo apt-get install -y cc-oci-runtime
+
+echo | sudo add-apt-repository ppa:projectatomic/ppa
+sudo apt-get update
+sudo apt-get install -y cri-o
+sudo sed -i 's,runtime_untrusted_workload.*,runtime_untrusted_workload = "/usr/bin/cc-runtime",' /etc/crio/crio.conf
+sudo sed -i 's,cgroup_manager.*,cgroup_manager = "cgroupfs",' /etc/crio/crio.conf
+sudo sed -i 's,default_workload_trust.*,default_workload_trust = "untrusted",' /etc/crio/crio.conf
+sudo sed -i 's,^registries.*,registries = [ "docker.io",' /etc/crio/crio.conf
+sudo systemctl enable crio
+sudo systemctl daemon-reload
+sudo systemctl restart crio
sudo systemctl stop kubelet
echo "Modify kubelet systemd configuration to use CRI-O"
k8s_systemd_file="/etc/systemd/system/kubelet.service.d/10-kubeadm.conf"
-sudo sed -i '/KUBELET_AUTHZ_ARGS/a Environment="KUBELET_EXTRA_ARGS=--container-runtime=remote --container-runtime-endpoint=/var/run/crio.sock --runtime-request-timeout=30m"' "$k8s_systemd_file"
+sudo sed -i '/KUBELET_AUTHZ_ARGS/a Environment="KUBELET_EXTRA_ARGS=--container-runtime=remote --container-runtime-endpoint=/var/run/crio/crio.sock --runtime-request-timeout=30m"' "$k8s_systemd_file"
sudo systemctl daemon-reload
sudo systemctl start kubelet
diff --git a/src/vagrant/kubeadm_kata/master_setup.sh b/src/vagrant/kubeadm_kata/master_setup.sh
index 3748f01..41dadf0 100644
--- a/src/vagrant/kubeadm_kata/master_setup.sh
+++ b/src/vagrant/kubeadm_kata/master_setup.sh
@@ -17,7 +17,7 @@
set -ex
-sudo kubeadm init --apiserver-advertise-address=192.168.1.10 --service-cidr=10.96.0.0/16 --pod-network-cidr=10.32.0.0/12 --token 8c5adc.1cec8dbf339093f0
+sudo kubeadm init --skip-preflight-checks --apiserver-advertise-address=192.168.1.10 --service-cidr=10.96.0.0/16 --pod-network-cidr=10.32.0.0/12 --token 8c5adc.1cec8dbf339093f0
mkdir ~/.kube
sudo cp /etc/kubernetes/admin.conf .kube/config
sudo chown $(id -u):$(id -g) ~/.kube/config
diff --git a/src/vagrant/kubeadm_kata/worker_setup.sh b/src/vagrant/kubeadm_kata/worker_setup.sh
index a6e4bf4..6145793 100644
--- a/src/vagrant/kubeadm_kata/worker_setup.sh
+++ b/src/vagrant/kubeadm_kata/worker_setup.sh
@@ -16,7 +16,9 @@
#
set -ex
-sudo kubeadm join --token 8c5adc.1cec8dbf339093f0 192.168.1.10:6443 || true
+sudo kubeadm join --discovery-token-unsafe-skip-ca-verification \
+ --token 8c5adc.1cec8dbf339093f0 192.168.1.10:6443 \
+ --ignore-preflight-errors=SystemVerification,FileContent--proc-sys-net-bridge-bridge-nf-call-iptables
sudo apt-get install -y putty-tools
mkdir ~/.kube
diff --git a/src/vagrant/kubeadm_onap/onap_setup.sh b/src/vagrant/kubeadm_onap/onap_setup.sh
index b876580..4dfe1e1 100755
--- a/src/vagrant/kubeadm_onap/onap_setup.sh
+++ b/src/vagrant/kubeadm_onap/onap_setup.sh
@@ -39,4 +39,5 @@ echo "y\n" | plink -ssh -pw vagrant vagrant@worker1 "sudo rm -rf /dockerdata-nfs
cd ~/oom/kubernetes/config && ./createConfig.sh -n onap
while true; do sleep 30; kubectl get pods --all-namespaces | grep onap | wc -l | grep "^0$" && break; done
source ~/oom/kubernetes/oneclick/setenv.bash
+sed -i "s/aaiServiceClusterIp:.*/aaiServiceClusterIp: 10.96.0.254/" ~/oom/kubernetes/aai/values.yaml
cd ~/oom/kubernetes/oneclick && ./createAll.bash -n onap
diff --git a/src/vagrant/setup_vagrant.sh b/src/vagrant/setup_vagrant.sh
index 2dc5ae0..fcde052 100755
--- a/src/vagrant/setup_vagrant.sh
+++ b/src/vagrant/setup_vagrant.sh
@@ -30,9 +30,9 @@ ${USER} ALL = (root) NOPASSWD:ALL
EOF
sudo apt-get update -y
sudo apt-get install -y git unzip
- wget https://releases.hashicorp.com/vagrant/1.8.7/vagrant_1.8.7_x86_64.deb
- sudo dpkg -i vagrant_1.8.7_x86_64.deb
- rm -rf vagrant_1.8.7_x86_64.deb
+ wget https://releases.hashicorp.com/vagrant/2.0.2/vagrant_2.0.2_x86_64.deb
+ sudo dpkg -i vagrant_2.0.2_x86_64.deb
+ rm -rf vagrant_2.0.2_x86_64.deb
sudo apt-get install -y virtualbox
@@ -41,7 +41,7 @@ EOF
sudo apt-get update
sudo apt-get build-dep vagrant ruby-libvirt -y
sudo apt-get install -y bridge-utils qemu libvirt-bin ebtables dnsmasq
- sudo apt-get install -y libxslt-dev libxml2-dev libvirt-dev zlib1g-dev ruby-dev
+ sudo apt-get install -y libffi-dev libxslt-dev libxml2-dev libvirt-dev zlib1g-dev ruby-dev
vagrant plugin install vagrant-libvirt
sudo adduser ${USER} libvirtd
sudo service libvirtd restart