summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--INFO2
-rw-r--r--INFO.yaml64
-rwxr-xr-xci/deploy.sh21
-rw-r--r--docs/arm/container4nfv_openwrt_demo_deployment.rst318
-rw-r--r--docs/arm/hardware_platform_awareness.rst171
-rw-r--r--docs/release/release-notes/release-notes.rst8
-rw-r--r--docs/release/userguide/clearwater-project.rst168
-rw-r--r--docs/release/userguide/img/blink01.pngbin0 -> 136470 bytes
-rw-r--r--docs/release/userguide/img/blink02.pngbin0 -> 181973 bytes
-rw-r--r--docs/release/userguide/img/blink03.pngbin0 -> 131215 bytes
-rw-r--r--docs/release/userguide/img/blink04.pngbin0 -> 73794 bytes
-rw-r--r--docs/release/userguide/img/call.pngbin0 -> 147388 bytes
-rw-r--r--docs/release/userguide/img/twinkle01.pngbin0 -> 184693 bytes
-rw-r--r--docs/release/userguide/img/twinkle02.pngbin0 -> 175079 bytes
-rw-r--r--docs/release/userguide/img/twinkle03.pngbin0 -> 184875 bytes
-rw-r--r--docs/release/userguide/snort.rst33
-rw-r--r--src/arm/cni-deploy/.gitignore1
-rw-r--r--src/arm/cni-deploy/deploy.yml32
-rw-r--r--src/arm/cni-deploy/inventory/inventory.cfg18
-rw-r--r--src/arm/cni-deploy/roles/flannel/files/cni-flannel-ds.yml86
-rw-r--r--src/arm/cni-deploy/roles/flannel/tasks/main.yml12
-rw-r--r--src/arm/cni-deploy/roles/multus/files/10-multus.conf13
-rw-r--r--src/arm/cni-deploy/roles/multus/files/clusterrole.yml16
-rw-r--r--src/arm/cni-deploy/roles/multus/files/crdnetwork.yml15
-rw-r--r--src/arm/cni-deploy/roles/multus/files/flannel-obj.yml13
-rw-r--r--src/arm/cni-deploy/roles/multus/handlers/main.yml4
-rw-r--r--src/arm/cni-deploy/roles/multus/tasks/crd.yml44
-rw-r--r--src/arm/cni-deploy/roles/multus/tasks/main.yml24
-rw-r--r--src/arm/cni-deploy/roles/multus/templates/macvlan-obj.yml.j222
-rw-r--r--src/arm/cni-deploy/roles/multus/templates/multus-testpod.yml.j219
-rw-r--r--src/arm/cni-deploy/roles/sriov/tasks/crd.yml13
-rw-r--r--src/arm/cni-deploy/roles/sriov/tasks/main.yml12
-rw-r--r--src/arm/cni-deploy/roles/sriov/templates/sriov-obj.yml.j225
-rw-r--r--src/arm/cni-deploy/roles/sriov/templates/sriov-testpod.yml.j219
-rw-r--r--src/arm/cni-deploy/roles/vhost-vpp/files/0001-net-virtio-ethdev.patch16
-rw-r--r--src/arm/cni-deploy/roles/vhost-vpp/files/Dockerfile.vpp1710-dpdk170824
-rwxr-xr-xsrc/arm/cni-deploy/roles/vhost-vpp/files/setvpp.sh30
-rw-r--r--src/arm/cni-deploy/roles/vhost-vpp/files/startup.conf21
-rw-r--r--src/arm/cni-deploy/roles/vhost-vpp/files/vhostuser-obj.yml28
-rw-r--r--src/arm/cni-deploy/roles/vhost-vpp/tasks/crd.yml13
-rw-r--r--src/arm/cni-deploy/roles/vhost-vpp/tasks/main.yml18
-rw-r--r--src/arm/cni-deploy/roles/vhost-vpp/tasks/vpp.yml47
-rw-r--r--src/arm/cni-deploy/roles/vhost-vpp/templates/vpp-testpod.yml.j268
-rw-r--r--src/arm/cni-deploy/vars/global20
-rw-r--r--src/arm/kubernetes_sriov/README.rst16
-rwxr-xr-xsrc/arm/kubernetes_sriov/k8s-build.sh32
-rwxr-xr-xsrc/arm/kubernetes_sriov/k8s-deploy.sh33
-rwxr-xr-xsrc/arm/kubernetes_sriov/setup.sh7
-rwxr-xr-xsrc/arm/kubernetes_vpp_vhostuser/deploy-cni.sh16
-rwxr-xr-xsrc/arm/kubernetes_vpp_vhostuser/k8s-build.sh25
-rwxr-xr-xsrc/arm/kubernetes_vpp_vhostuser/k8s-deploy.sh17
-rwxr-xr-xsrc/arm/kubernetes_vpp_vhostuser/setup.sh11
-rw-r--r--src/arm/openwrt_demo/1_buildimage/Dockerfile22
-rw-r--r--src/arm/openwrt_demo/1_buildimage/resources/bin/getips24
-rw-r--r--src/arm/openwrt_demo/1_buildimage/resources/bin/setroutes26
-rw-r--r--src/arm/openwrt_demo/1_buildimage/resources/config/firewall149
-rw-r--r--src/arm/openwrt_demo/1_buildimage/resources/config/firewall.user9
-rw-r--r--src/arm/openwrt_demo/1_buildimage/resources/config/network27
-rw-r--r--src/arm/openwrt_demo/1_buildimage/resources/config/uhttpd24
-rw-r--r--src/arm/openwrt_demo/1_buildimage/resources/ipsec/ipsec.conf29
-rw-r--r--src/arm/openwrt_demo/1_buildimage/resources/ipsec/ipsec.secrets5
-rw-r--r--src/arm/openwrt_demo/1_buildimage/resources/keys/server-root-ca.pem30
-rw-r--r--src/arm/openwrt_demo/1_buildimage/resources/keys/server-root-key.pem51
-rw-r--r--src/arm/openwrt_demo/1_buildimage/resources/keys/vpn-server-cert.pem31
-rw-r--r--src/arm/openwrt_demo/1_buildimage/resources/keys/vpn-server-key.pem51
-rw-r--r--src/arm/openwrt_demo/1_buildimage/resources/strongswan/charon-logging.conf62
-rw-r--r--src/arm/openwrt_demo/1_buildimage/resources/strongswan/charon.conf281
-rw-r--r--src/arm/openwrt_demo/1_buildimage/resources/strongswan/pool.conf12
-rw-r--r--src/arm/openwrt_demo/1_buildimage/resources/strongswan/starter.conf10
-rw-r--r--src/arm/openwrt_demo/1_buildimage/resources/strongswan/tools.conf21
-rw-r--r--src/helm-charts/clearwater/Chart.yaml11
-rw-r--r--src/helm-charts/clearwater/README.md19
-rw-r--r--src/helm-charts/clearwater/templates/NOTES.txt19
-rw-r--r--src/helm-charts/clearwater/templates/astaire-depl.yaml54
-rw-r--r--src/helm-charts/clearwater/templates/astaire-svc.yaml11
-rw-r--r--src/helm-charts/clearwater/templates/bono-depl.yaml66
-rw-r--r--src/helm-charts/clearwater/templates/bono-svc.yaml27
-rw-r--r--src/helm-charts/clearwater/templates/cassandra-depl.yaml38
-rw-r--r--src/helm-charts/clearwater/templates/cassandra-svc.yaml17
-rw-r--r--src/helm-charts/clearwater/templates/chronos-depl.yaml55
-rw-r--r--src/helm-charts/clearwater/templates/chronos-svc.yaml11
-rw-r--r--src/helm-charts/clearwater/templates/ellis-depl.yaml35
-rw-r--r--src/helm-charts/clearwater/templates/ellis-svc.yaml12
-rw-r--r--src/helm-charts/clearwater/templates/env-vars-cm.yaml6
-rw-r--r--src/helm-charts/clearwater/templates/etcd-depl.yaml59
-rw-r--r--src/helm-charts/clearwater/templates/etcd-svc.yaml17
-rw-r--r--src/helm-charts/clearwater/templates/homer-depl.yaml35
-rw-r--r--src/helm-charts/clearwater/templates/homer-svc.yaml11
-rw-r--r--src/helm-charts/clearwater/templates/homestead-depl.yaml51
-rw-r--r--src/helm-charts/clearwater/templates/homestead-prov-depl.yaml39
-rw-r--r--src/helm-charts/clearwater/templates/homestead-prov-svc.yaml11
-rw-r--r--src/helm-charts/clearwater/templates/homestead-svc.yaml11
-rw-r--r--src/helm-charts/clearwater/templates/ralf-depl.yaml51
-rw-r--r--src/helm-charts/clearwater/templates/ralf-svc.yaml11
-rw-r--r--src/helm-charts/clearwater/templates/sprout-depl.yaml51
-rw-r--r--src/helm-charts/clearwater/templates/sprout-svc.yaml13
-rw-r--r--src/helm-charts/clearwater/values.yaml7
-rw-r--r--src/vagrant/kubeadm_basic/host_setup.sh2
-rw-r--r--src/vagrant/kubeadm_clearwater/Vagrantfile2
-rwxr-xr-xsrc/vagrant/kubeadm_clearwater/clearwater_setup.sh66
-rw-r--r--src/vagrant/kubeadm_clearwater/custom-bono-svc/bono-svc.yaml25
-rwxr-xr-xsrc/vagrant/kubeadm_clearwater/deploy.sh5
-rw-r--r--src/vagrant/kubeadm_clearwater/host_setup.sh6
-rw-r--r--src/vagrant/kubeadm_clearwater/master_setup.sh9
-rwxr-xr-xsrc/vagrant/kubeadm_clearwater/tests/clearwater-live-test.sh46
-rw-r--r--src/vagrant/kubeadm_clearwater/worker_setup.sh2
-rw-r--r--src/vagrant/kubeadm_istio/host_setup.sh2
-rwxr-xr-xsrc/vagrant/kubeadm_istio/istio/bookinfo.sh7
-rwxr-xr-xsrc/vagrant/kubeadm_istio/istio/clean_bookinfo.sh2
-rwxr-xr-xsrc/vagrant/kubeadm_istio/istio/deploy.sh29
-rw-r--r--src/vagrant/kubeadm_istio/master_setup.sh23
-rwxr-xr-xsrc/vagrant/kubeadm_kata/examples/nginx-app.sh7
-rw-r--r--src/vagrant/kubeadm_kata/examples/nginx-app.yaml2
-rw-r--r--src/vagrant/kubeadm_kata/host_setup.sh36
-rw-r--r--src/vagrant/kubeadm_kata/kata_setup.sh50
-rw-r--r--src/vagrant/kubeadm_kata/master_setup.sh13
-rw-r--r--src/vagrant/kubeadm_kata/worker_setup.sh27
-rw-r--r--src/vagrant/kubeadm_multus/host_setup.sh2
-rw-r--r--src/vagrant/kubeadm_onap/Vagrantfile23
-rwxr-xr-xsrc/vagrant/kubeadm_onap/host_setup.sh15
-rwxr-xr-xsrc/vagrant/kubeadm_onap/master_setup.sh31
-rwxr-xr-xsrc/vagrant/kubeadm_onap/onap_setup.sh52
-rw-r--r--src/vagrant/kubeadm_onap/registry_setup.sh30
-rw-r--r--src/vagrant/kubeadm_onap/setup_swap.sh5
-rw-r--r--src/vagrant/kubeadm_onap/setup_tunnel.sh3
-rwxr-xr-xsrc/vagrant/kubeadm_onap/worker_setup.sh18
-rw-r--r--src/vagrant/kubeadm_ovsdpdk/host_setup.sh2
-rw-r--r--src/vagrant/kubeadm_snort/Vagrantfile29
-rwxr-xr-xsrc/vagrant/kubeadm_snort/deploy.sh9
-rw-r--r--src/vagrant/kubeadm_snort/host_setup.sh29
-rw-r--r--src/vagrant/kubeadm_snort/master_setup.sh10
-rwxr-xr-xsrc/vagrant/kubeadm_snort/snort/snort-setup.sh (renamed from src/vagrant/kubeadm_clearwater/examples/create_and_apply.sh)17
-rw-r--r--src/vagrant/kubeadm_snort/snort/snort.yaml32
-rw-r--r--src/vagrant/kubeadm_snort/worker_setup.sh4
-rw-r--r--src/vagrant/kubeadm_virtlet/host_setup.sh2
-rwxr-xr-xsrc/vagrant/setup_vagrant.sh8
136 files changed, 3721 insertions, 188 deletions
diff --git a/INFO b/INFO
index 1b68162..739f15f 100644
--- a/INFO
+++ b/INFO
@@ -22,12 +22,14 @@ akapadia@aarnanetworks.com
srupanagunta@gmail.com
ruijing.guo@gmail.com
chenjiankun1@huawei.com
+trevor.tao@arm.com
Link to TSC approval of the project: http://meetbot.opnfv.org/meetings/opnfv-meeting/2016/opnfv-meeting.2016-12-13-14.59.html
Link(s) to approval of additional committers:
http://meetbot.opnfv.org/meetings/opnfv-meeting/2017/opnfv-meeting.2017-04-11-13.59.html
https://lists.opnfv.org/pipermail/opnfv-tech-discuss/2017-June/016505.html
https://lists.opnfv.org/pipermail/opnfv-tech-discuss/2017-August/017629.html
+https://lists.opnfv.org/pipermail/opnfv-tech-discuss/2018-February/020156.html
Link to approval of renaming project:
http://meetbot.opnfv.org/meetings/opnfv-meeting/2017/opnfv-meeting.2017-08-15-12.59.txt
diff --git a/INFO.yaml b/INFO.yaml
new file mode 100644
index 0000000..ca6975d
--- /dev/null
+++ b/INFO.yaml
@@ -0,0 +1,64 @@
+---
+project: 'Container4NFV'
+project_creation_date: 'Dec 13, 2016'
+project_category: 'Integration & Testing'
+lifecycle_state: 'Incubation'
+project_lead: &opnfv_container4nfv_ptl
+ name: 'Xuan Jia'
+ email: 'jason.jiaxuan@gmail.com'
+ company: 'gmail'
+ id: 'xuanjia'
+ timezone: ''
+primary_contact: *opnfv_container4nfv_ptl
+issue_tracking:
+ type: 'jira'
+ url: 'https://jira.opnfv.org/projects/[container4nfv]'
+ key: '[container4nfv]'
+mailing_list:
+ type: 'mailman2'
+ url: 'opnfv-tech-discuss@lists.opnfv.org'
+ tag: '[container4nfv]'
+realtime_discussion:
+ type: irc
+ server: 'freenode.net'
+ channel: '#opnfv-container4nfv'
+meetings:
+ - type: 'gotomeeting+irc'
+ agenda: # eg: 'https://wiki.opnfv.org/display/'
+ url: # eg: 'https://global.gotomeeting.com/join/819733085'
+ server: 'freenode.net'
+ channel: '#opnfv-meeting'
+ repeats: 'weekly'
+ time: # eg: '16:00 UTC'
+repositories:
+ - 'container4nfv'
+committers:
+ - <<: *opnfv_container4nfv_ptl
+ - name: 'Jack Chan'
+ email: 'chenjiankun1@huawei.com'
+ company: 'huawei.com'
+ id: 'chenjiankun'
+ - name: 'Gergely Csatari'
+ email: 'gergely.csatari@nokia.com'
+ company: 'nokia.com'
+ id: 'csatari'
+ - name: 'Jun Li'
+ email: 'lijun_1203@126.com'
+ company: '126.com'
+ id: 'MatthewLi'
+ - name: 'Ruijing Guo'
+ email: 'ruijing.guo@intel.com'
+ company: 'intel.com'
+ id: 'Ruijing'
+ - name: 'Xuan Jia'
+ email: 'jason.jiaxuan@gmail.com'
+ company: 'gmail.com'
+ id: 'xuanjia'
+ - name: 'peng yu'
+ email: 'yu.peng36@zte.com.cn'
+ company: 'zte.com.cn'
+ id: 'YuPengZTE'
+tsc:
+ # yamllint disable rule:line-length
+ approval: 'http//meetbot.opnfv.org/meetings/opnfv-meeting/2016/opnfv-meeting.2016-12-13-14.59.html'
+ # yamllint enable rule:line-length
diff --git a/ci/deploy.sh b/ci/deploy.sh
index 0069bd7..a5aec16 100755
--- a/ci/deploy.sh
+++ b/ci/deploy.sh
@@ -17,8 +17,19 @@
set -ex
-../src/vagrant/kubeadm_basic/deploy.sh
-../src/vagrant/kubeadm_kata/deploy.sh
-../src/vagrant/kubeadm_multus/deploy.sh
-../src/vagrant/kubeadm_virtlet/deploy.sh
-../src/vagrant/kubeadm_ovsdpdk/deploy.sh
+# Scenario sequence rules:
+# - stable first
+# - less time consuming first
+SCENARIOS="
+ kubeadm_virtlet
+ kubeadm_ovsdpdk
+ kubeadm_kata
+"
+
+for SCENARIO in $SCENARIOS; do
+ START=$(date +%s)
+ ../src/vagrant/${SCENARIO}/deploy.sh
+ END=$(date +%s)
+ DIFF=$(( $END - $START ))
+ echo "Scenario $SCENARIO tooks $DIFF seconds."
+done
diff --git a/docs/arm/container4nfv_openwrt_demo_deployment.rst b/docs/arm/container4nfv_openwrt_demo_deployment.rst
new file mode 100644
index 0000000..3e56a84
--- /dev/null
+++ b/docs/arm/container4nfv_openwrt_demo_deployment.rst
@@ -0,0 +1,318 @@
+.. This work is licensed under a Creative Commons Attribution 4.0 International
+.. License.
+.. http://creativecommons.org/licenses/by/4.0
+.. (c) OPNFV, Arm Limited.
+
+
+
+===============================================
+Container4NFV Openwrt Demo Deployment on Arm Server
+===============================================
+
+Abstract
+========
+
+This document gives a brief introduction on how to deploy openwrt services with multiple networking interfaces on Arm platform.
+
+Introduction
+============
+.. _sriov_cni: https://github.com/hustcat/sriov-cni
+.. _Flannel: https://github.com/coreos/flannel
+.. _Multus: https://github.com/Intel-Corp/multus-cni
+.. _cni: https://github.com/containernetworking/cni
+.. _kubeadm: https://kubernetes.io/docs/setup/independent/create-cluster-kubeadm/
+.. _openwrt: https://github.com/openwrt/openwrt
+
+The OpenWrt Project is a Linux operating system targeting embedded devices.
+Also it is a famouse open source router project.
+
+We use it as a demo to show how to deploy an open source vCPE in Kubernetes.
+For Lan port, we configured flannel cni for it. And for Wan port, we configured sriov cni for it.
+
+For demo purpose, I suggest that we use Kubeadm to deploy a Kubernetes cluster firstly.
+
+Cluster
+=======
+
+Cluster Info
+
+In this case, we deploy master and slave as one node.
+Suppose it to be: 192.168.1.2
+
+In 192.168.1.2, 2 NIC as required.
+Suppose it to be: eth0, eth1. eth0 is used to be controle plane, and eth1 is used to be data plane.
+
+Deploy Kubernetes
+-----------------
+Please see link(https://kubernetes.io/docs/setup/independent/create-cluster-kubeadm/) as reference.
+
+Creat CRD
+---------
+Please make sure that CRD was added for Kubernetes cluster.
+Here we name it as crdnetwork.yaml:
+
+::
+ apiVersion: apiextensions.k8s.io/v1beta1
+ kind: CustomResourceDefinition
+ metadata:
+ # name must match the spec fields below, and be in the form: <plural>.<group>
+ name: networks.kubernetes.com
+ spec:
+ # group name to use for REST API: /apis/<group>/<version>
+ group: kubernetes.com
+ # version name to use for REST API: /apis/<group>/<version>
+ version: v1
+ # either Namespaced or Cluster
+ scope: Namespaced
+ names:
+ # plural name to be used in the URL: /apis/<group>/<version>/<plural>
+ plural: networks
+ # singular name to be used as an alias on the CLI and for display
+ singular: network
+ # kind is normally the CamelCased singular type. Your resource manifests use this.
+ kind: Network
+ # shortNames allow shorter string to match your resource on the CLI
+ shortNames:
+ - net
+
+command:
+
+::
+ kubectl create -f crdnetwork.yaml
+
+Create Flannel-network for Control Plane
+----------------------------------------
+Create flannel network as control plane.
+Here we name it as flannel-network.yaml:
+
+::
+ apiVersion: "kubernetes.com/v1"
+ kind: Network
+ metadata:
+ name: flannel-conf
+ plugin: flannel
+ args: '[
+ {
+ "masterplugin": true,
+ "delegate": {
+ "isDefaultGateway": true
+ }
+ }
+ ]'
+
+command:
+
+::
+ kubectl create -f flannel-network.yaml
+
+Create Sriov-network for Data Plane
+-----------------------------------
+Create sriov network with PF mode as data plane.
+Here we name it as sriov-network.yaml:
+
+::
+ apiVersion: "kubernetes.com/v1"
+ kind: Network
+ metadata:
+ name: sriov-conf
+ plugin: sriov
+ args: '[
+ {
+ "master": "eth1",
+ "pfOnly": true,
+ "ipam": {
+ "type": "dhcp",
+ }
+ }
+ ]'
+
+command:
+
+::
+ kubectl create -f sriov-network.yaml
+
+CNI Installation
+================
+.. _CNI: https://github.com/containernetworking/plugins
+Firstly, we should deploy all CNI plugins. The build process is following:
+
+
+::
+ git clone https://github.com/containernetworking/plugins.git
+ cd plugins
+ ./build.sh
+ cp bin/* /opt/cni/bin
+
+.. _Multus: https://github.com/Intel-Corp/multus-cni
+
+To deploy control plane and data plane interfaces, besides the Flannel CNI and SRIOV CNI,
+we need to deploy the Multus_. The build process of it is as:
+
+::
+ git clone https://github.com/Intel-Corp/multus-cni.git
+ cd multus-cni
+ ./build
+ cp bin/multus /opt/cni/bin
+
+To use the Multus_ CNI,
+we should put the Multus CNI binary to /opt/cni/bin/ where the Flannel CNI and SRIOV
+CNIs are put.
+
+.. _SRIOV: https://github.com/hustcat/sriov-cni
+The build process of it is as:
+
+::
+ git clone https://github.com/hustcat/sriov-cni.git
+ cd sriov-cni
+ ./build
+ cp bin/* /opt/cni/bin
+
+We also need to enable DHCP client for Wan port.
+So we should enable dhcp cni for it.
+
+::
+ /opt/cni/bin/dhcp daemon &
+
+CNI Configuration
+=================
+The following multus CNI configuration is located in /etc/cni/net.d/, here we name it
+as multus-cni.conf:
+
+::
+ {
+ "name": "minion-cni-network",
+ "type": "multus",
+ "kubeconfig": "/etc/kubernetes/admin.conf",
+ "delegates": [{
+ "type": "flannel",
+ "masterplugin": true,
+ "delegate": {
+ "isDefaultGateway": true
+ }
+ }]
+ }
+
+command:
+
+::
+ step1, remove all files in /etc/cni/net.d/
+ rm /etc/cni/net.d/* -rf
+
+ step2, copy /etc/kubernetes/admin.conf into each nodes.
+
+ step3, copy multus-cni.conf into /etc/cni/net.d/
+
+ step4, restart kubelet
+ systemctl restart kubelet
+
+
+Configuring Pod with Control Plane and Data Plane
+=================================================
+
+1, Save the below following YAML to openwrt-vpn-multus.yaml.
+In this case flannle-conf network object act as the primary network.
+
+::
+ apiVersion: v1
+ kind: ReplicationController
+ metadata:
+ name: openwrtvpn1
+ spec:
+ replicas: 1
+ template:
+ metadata:
+ name: openwrtvpn1
+ labels:
+ app: openwrtvpn1
+ annotations:
+ networks: '[
+ { "name": "flannel-conf" },
+ { "name": "sriov-conf" }
+ ]'
+ spec:
+ containers:
+ - name: openwrtvpn1
+ image: "younglook/openwrt-demo:arm64"
+ imagePullPolicy: "IfNotPresent"
+ command: ["/sbin/init"]
+ securityContext:
+ capabilities:
+ add:
+ - NET_ADMIN
+ stdin: true
+ tty: true
+ ports:
+ - containerPort: 80
+ - containerPort: 4500
+ - containerPort: 500
+ ---
+ apiVersion: v1
+ kind: Service
+ metadata:
+ name: openwrtvpn1
+ spec: # specification of the pod's contents
+ type: NodePort
+ selector:
+ app: openwrtvpn1
+ ports: [
+ {
+ "name": "floatingu",
+ "protocol": "UDP",
+ "port": 4500,
+ "targetPort": 4500
+ },
+ {
+ "name": "actualu",
+ "protocol": "UDP",
+ "port": 500,
+ "targetPort": 500
+ },
+ {
+ "name": "web",
+ "protocol": "TCP",
+ "port": 80,
+ "targetPort": 80
+ },
+ ]
+
+2, Create Pod
+
+::
+ command:
+ kubectl create -f openwrt-vpn-multus.yaml
+
+3, Get the details of the running pod from the master
+
+::
+ # kubectl get pods
+ NAME READY STATUS RESTARTS AGE
+ openwrtvpn1 1/1 Running 0 30s
+
+Verifying Pod Network
+=====================
+
+::
+ # kubectl exec openwrtvpn1 -- ip a
+ 1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue qlen 1000
+ link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
+ inet 127.0.0.1/8 scope host lo
+ valid_lft forever preferred_lft forever
+ inet6 ::1/128 scope host
+ valid_lft forever preferred_lft forever
+ 3: eth0@if124: <BROADCAST,MULTICAST,UP,LOWER_UP,M-DOWN> mtu 1450 qdisc noqueue
+ link/ether 0a:58:0a:e9:40:2a brd ff:ff:ff:ff:ff:ff
+ inet 10.233.64.42/24 scope global eth0
+ valid_lft forever preferred_lft forever
+ inet6 fe80::8e6:32ff:fed3:7645/64 scope link
+ valid_lft forever preferred_lft forever
+ 4: net0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast qlen 1000
+ link/ether 52:54:00:d4:d2:e5 brd ff:ff:ff:ff:ff:ff
+ inet 192.168.123.2/24 scope global net0
+ valid_lft forever preferred_lft forever
+ inet6 fe80::5054:ff:fed4:d2e5/64 scope link
+ valid_lft forever preferred_lft forever
+
+Contacts
+========
+
+Bin Lu: bin.lu@arm.com
diff --git a/docs/arm/hardware_platform_awareness.rst b/docs/arm/hardware_platform_awareness.rst
new file mode 100644
index 0000000..e1d3cbe
--- /dev/null
+++ b/docs/arm/hardware_platform_awareness.rst
@@ -0,0 +1,171 @@
+ARM64 Hardware Platform Awareness
+=================================
+
+This document describes Arm64 specific features for HPA
+
+
+1. ARM64 ELF hwcaps
+-------------------
+The majority of hwcaps are intended to indicate the presence of features
+which are described by architected ID registers inaccessible to
+userspace code at EL0. These hwcaps are defined in terms of ID register
+fields, and should be interpreted with reference to the definition of
+these fields in the ARM Architecture Reference Manual.
+
+HWCAP_FP
+ Floating-point.
+ Functionality implied by ID_AA64PFR0_EL1.FP == 0b0000.
+
+HWCAP_ASIMD
+ Advanced SIMD.
+ Functionality implied by ID_AA64PFR0_EL1.AdvSIMD == 0b0000.
+
+HWCAP_EVTSTRM
+ The generic timer is configured to generate events at a frequency of
+ approximately 100KHz.
+
+HWCAP_AES
+ Advanced Encryption Standard.
+ Functionality implied by ID_AA64ISAR1_EL1.AES == 0b0001.
+
+HWCAP_PMULL
+ Polynomial multiply long (vector)
+ Functionality implied by ID_AA64ISAR1_EL1.AES == 0b0010.
+
+HWCAP_SHA1
+ SHA1 hash update accelerator.
+ Functionality implied by ID_AA64ISAR0_EL1.SHA1 == 0b0001.
+
+HWCAP_SHA2
+ SHA2 hash update accelerator.
+ Functionality implied by ID_AA64ISAR0_EL1.SHA2 == 0b0001.
+
+HWCAP_CRC32
+ CRC32 instruction.
+ Functionality implied by ID_AA64ISAR0_EL1.CRC32 == 0b0001.
+
+HWCAP_ATOMICS
+ Atomics instruction.
+ Functionality implied by ID_AA64ISAR0_EL1.Atomic == 0b0010.
+
+HWCAP_FPHP
+ Instructions to convert between half-precision and single-precision, and between half-precision and double-precision.
+ Functionality implied by ID_AA64PFR0_EL1.FP == 0b0001.
+
+HWCAP_ASIMDHP
+ Indicates whether the Advanced SIMD and Floating-point extension supports half-precision floating-point conversion operations.
+ Functionality implied by ID_AA64PFR0_EL1.AdvSIMD == 0b0001.
+
+HWCAP_CPUID
+ EL0 access to certain ID registers is available, to the extent
+ described by Documentation/arm64/cpu-feature-registers.txt.
+ These ID registers may imply the availability of features.
+
+HWCAP_ASIMDRDM
+ Indicates whether Rounding Double Multiply (RDM) instructions are implemented for Advanced SIMD.
+ Functionality implied by ID_AA64ISAR0_EL1.RDM == 0b0001.
+
+HWCAP_JSCVT
+ ARMv8.3 adds support for a new instruction to perform conversion
+ from double precision floating point to integer to match the
+ architected behaviour of the equivalent Javascript conversion.
+ Functionality implied by ID_AA64ISAR1_EL1.JSCVT == 0b0001.
+
+HWCAP_FCMA
+ ARM v8.3 adds support for new instructions to aid floating-point
+ multiplication and addition of complex numbers.
+ Functionality implied by ID_AA64ISAR1_EL1.FCMA == 0b0001.
+
+HWCAP_LRCPC
+ ARMv8.3 adds new instructions to support Release Consistent
+ processor consistent (RCpc) model, which is weaker than the
+ RCsc model.
+ Functionality implied by ID_AA64ISAR1_EL1.LRCPC == 0b0001.
+
+HWCAP_DCPOP
+ The ARMv8.2-DCPoP feature introduces persistent memory support to the
+ architecture, by defining a point of persistence in the memory
+ hierarchy, and a corresponding cache maintenance operation, DC CVAP.
+ Functionality implied by ID_AA64ISAR1_EL1.DPB == 0b0001.
+
+HWCAP_SHA3
+ Secure Hash Standard3 (SHA3)
+ Functionality implied by ID_AA64ISAR0_EL1.SHA3 == 0b0001.
+
+HWCAP_SM3
+ Commercial Cryptography Scheme.
+ Functionality implied by ID_AA64ISAR0_EL1.SM3 == 0b0001.
+
+HWCAP_SM4
+ Commercial Cryptography Scheme.
+ Functionality implied by ID_AA64ISAR0_EL1.SM4 == 0b0001.
+
+HWCAP_ASIMDDP
+ Performing dot product of 8bit elements in each 32bit element
+ of two vectors and accumulating the result into a third vector.
+ Functionality implied by ID_AA64ISAR0_EL1.DP == 0b0001.
+
+HWCAP_SHA512
+ Secure Hash Standard
+ Functionality implied by ID_AA64ISAR0_EL1.SHA2 == 0b0002.
+
+HWCAP_SVE
+ Scalable Vector Extension (SVE) is a vector extension for
+ AArch64 execution mode for the A64 instruction set of the Armv8 architecture.
+ Functionality implied by ID_AA64PFR0_EL1.SVE == 0b0001.
+
+2. ARM64 Memory Partitioning and Monitoring (MPAM)
+--------------------------------------------------
+Armv8.4-A adds a feature called Memory Partitioning and Monitoring (MPAM). This has several uses.
+Some system designs require running multiple applications or multiple virtual machines concurrently on a system
+where the memory system is shared and where the performance of some applications or some virtual machines must
+be only minimally affected by other applications or virtual machines. These scenarios are common in enterprise
+networking and server systems.
+This proposal addresses these scenarios with two approaches that work together under software control:
+- Memory/Cache system resource partitioning
+- Performance resource monitoring
+
+3. Arm Power State Coordination Interface (PSCI)
+------------------------------------------------
+PSCI has the following intended uses:
+- Provides a generic interface that supervisory software can use to
+manage power in the following situations:
+- Core idle management.
+- Dynamic addition of cores to and removal of cores from the
+system, often referred to as hotplug.
+- Secondary core boot.
+- Moving trusted OS context from one core to another.
+- System shutdown and reset.
+- Provides an interface that supervisory software can use in conjunction
+with Firmware Table (FDT and ACPI) descriptions to support the
+generalization of power management code.
+
+4. Arm TrustZone
+----------------
+Arm TrustZone technology provides system-wide hardware isolation for trusted software.
+The family of TrustZone technologies can be integrated into any Arm Cortex-A core,
+supporting high-performance applications processors, with TrustZone technology for Cortex-A processors.
+
+5. Arm CPU Info Detection
+-------------------------
+Computing resources should be collected by NFV COE, such as:
+- Arm specific:
+ CPU Part: indicates the primary part number.
+ For example:
+ 0xD09 Cortex-A73 processor.
+
+ CPU Architecture: indicates the architecture code.
+ For example:
+ 0xF Defined by CPUID scheme.
+
+ CPU Variant: indicates the variant number of the processor.
+ This is the major revision number n in the rn part of
+ the rnpn description of the product revision status.
+
+ CPU Implementer: indicates the implementer code.
+ For example:
+ 0x41 ASCII character 'A' - implementer is ARM Limited.
+
+ CPU Revision: indicates the minor revision number of the processor.
+ This is the minor revision number n in the pn part of
+ the rnpn description of the product revision status.
diff --git a/docs/release/release-notes/release-notes.rst b/docs/release/release-notes/release-notes.rst
index 51711f8..5656715 100644
--- a/docs/release/release-notes/release-notes.rst
+++ b/docs/release/release-notes/release-notes.rst
@@ -9,3 +9,11 @@ Container4NFV E release Notes
2. Container architecture options
3. Joid could support Kubernetes
4. Using vagrant tool to setup an env with DPDK enabled.
+
+==================================
+Container4NFV F release Notes
+==================================
+1. Enable Multus in Kubernetes
+2. Enable SR-IOV in Kubernetes
+3. Support ARM platform
+
diff --git a/docs/release/userguide/clearwater-project.rst b/docs/release/userguide/clearwater-project.rst
index 6a5ac60..38f1c7a 100644
--- a/docs/release/userguide/clearwater-project.rst
+++ b/docs/release/userguide/clearwater-project.rst
@@ -1,24 +1,25 @@
+***********************************
Clearwater implementation for OPNFV
-===================================
+***********************************
CONTAINER4NFV setup a Kubernetes cluster on VMs running with Vagrant and kubeadm.
kubeadm assumes you have a set of machines (virtual or bare metal) that are up and running. In this way we can get a cluster with one master node and 2 workers (default). If you want to increase the number of workers nodes, please check the Vagrantfile inside the project.
-Is Clearwater suitable for Network Functions Virtualization?
+*Is Clearwater suitable for Network Functions Virtualization?*
Network Functions Virtualization or NFV is, without any doubt, the hottest topic in the telco network space right now. It’s an approach to building telco networks that moves away from proprietary boxes wherever possible to use software components running on industry-standard virtualized IT infrastructures. Over time, many telcos expect to run all their network functions operating at Layer 2 and above in an NFV environment, including IMS. Since Clearwater was designed from the ground up to run in virtualized environments and take full advantage of the flexibility of the Cloud, it is extremely well suited for NFV. Almost all of the ongoing trials of Clearwater with major network operators are closely associated with NFV-related initiatives.
About Clearwater
-----------------
+################
-[Clearwater](http://www.projectclearwater.org/about-clearwater/) follows [IMS](https://en.wikipedia.org/wiki/IP_Multimedia_Subsystem) architectural principles and supports all of the key standardized interfaces expected of an IMS core network. But unlike traditional implementations of IMS, Clearwater was designed from the ground up for the Cloud. By incorporating design patterns and open source software components that have been proven in many global Web applications, Clearwater achieves an unprecedented combination of massive scalability and exceptional cost-effectiveness.
+`Clearwater <http://www.projectclearwater.org/about-clearwater/>`_ follows `IMS <https://en.wikipedia.org/wiki/IP_Multimedia_Subsystem>`_ architectural principles and supports all of the key standardized interfaces expected of an IMS core network. But unlike traditional implementations of IMS, Clearwater was designed from the ground up for the Cloud. By incorporating design patterns and open source software components that have been proven in many global Web applications, Clearwater achieves an unprecedented combination of massive scalability and exceptional cost-effectiveness.
Clearwater provides SIP-based call control for voice and video communications and for SIP-based messaging applications. You can use Clearwater as a standalone solution for mass-market VoIP services, relying on its built-in set of basic calling features and standalone susbscriber database, or you can deploy Clearwater as an IMS core in conjunction with other elements such as Telephony Application Servers and a Home Subscriber Server.
-Clearwater was designed from the ground up to be optimized for deployment in virtualized and cloud environments. It leans heavily on established design patterns for building and deploying massively scalable web applications, adapting these design patterns to fit the constraints of SIP and IMS. [The Clearwater architecture](http://www.projectclearwater.org/technical/clearwater-architecture/) therefore has some similarities to the traditional IMS architecture but is not identical.
+Clearwater was designed from the ground up to be optimized for deployment in virtualized and cloud environments. It leans heavily on established design patterns for building and deploying massively scalable web applications, adapting these design patterns to fit the constraints of SIP and IMS. `The Clearwater architecture <http://www.projectclearwater.org/technical/clearwater-architecture/>`_ therefore has some similarities to the traditional IMS architecture but is not identical.
- All components are horizontally scalable using simple, stateless load-balancing.
- All long lived state is stored on dedicated “Vellum” nodes which make use of cloud-optimized storage technologies such as Cassandra. No long lived state is stored on other production nodes, making it quick and easy to dynamically scale the clusters and minimizing the impact if a node is lost.
@@ -27,8 +28,163 @@ Clearwater was designed from the ground up to be optimized for deployment in vir
Clearwater Architecture
------------------------
+#######################
.. image:: img/clearwater_architecture.png
:width: 800px
:alt: Clearwater Architecture
+
+
+**********
+Quickstart
+**********
+
+This repository contains instructions and resources for deploying Metaswitch's Clearwater project with Kubernetes.
+
+
+If you need more information about Clearwater project please checkout our
+[documentation](https://github.com/opnfv/container4nfv/blob/master/docs/release/userguide/clearwater-project.rst)
+or the `official repository <https://github.com/Metaswitch/clearwater-docker>`_.
+
+
+Exposed Services
+################
+
+
+The deployment exposes:
+
+ - the Ellis web UI on port 30080 for self-provisioning.
+ - STUN/TURN on port 3478 for media relay.
+ - SIP on port 5060 for service.
+ - SIP/WebSocket on port 5062 for service.
+
+SIP devices can register with bono.:5060 and the Ellis provisioning interface can be accessed at port 30080.
+
+
+Prerequirement
+##############
+
+Install Docker and Vagrant
+********************************************
+
+CONTAINER4NFV uses ``setup_vagrant.sh`` to install all resource used by this repository.
+
+::
+
+ container4nfv/src/vagrant# ./setup_vagrant.sh -b libvirt
+
+Instalation
+##############
+
+Deploy Clearwater with kubeadm
+********************************************
+
+Check ``clearwater/clearwater_setup.sh`` for details about k8s deployment.
+
+
+::
+
+ container4nfv/src/vagrant/kubeadm_clearwater# ./deploy.sh
+
+
+Destroy
+##########
+
+::
+
+ container4nfv/src/vagrant# ./cleanup.sh
+
+
+Making calls through Clearwater
+###############################
+
+
+Connect to Ellis service
+********************************************
+It's important to connect to Ellis to generate the SIP username, password and domain we will use with the SIP client.
+Use your <master ip addres> + port 30080 (k8s default port). If you are not which Ellis's url is, please check inside your master node.
+
+::
+
+ kubeadm_clearwater# vagrant ssh master
+ master@vagrant# ifconfig eth0 | grep "inet addr" | cut -d ':' -f 2 | cut -d ' ' -f 1
+ 192.168.121.3
+
+In your browser connect to `<master_ip>:30080` (ex. 192.168.121.3:30080).
+
+
+After that, signup and generate two users. The signup key is **secret**. Ellis will automatically allocate you a new number and display
+its password to you. Remember this password as it will only be displayed once.
+From now on, we will use <username> to refer to the SIP username (e.g. 6505551234) and <password> to refer to the password.
+
+
+Config and install two SIP clients
+********************************************
+We'll use both Twinkle and Blink SIP client. , since we are going to try this out inside a LAN network.
+This is, of course, only a local test inside a LAN network. Configure the clients may be a little bit trickie, so we add some screenshots:
+
+
+Blink setup
+********************************************
+1. Add <username> and <password>.
+
+.. image:: img/blink01.png
+ :width: 800px
+ :alt: Blink SIP client
+
+
+2. Configure a proxy to k8s.
+
+
+.. image:: img/blink02.png
+ :width: 800px
+ :alt: Blink SIP client
+
+
+3. Configure the network to use TCP only.
+
+
+.. image:: img/blink03.png
+ :width: 800px
+ :alt: Blink SIP client
+
+
+.. image:: img/blink04.png
+ :width: 800px
+ :alt: Blink SIP client
+
+
+Twinkle setup
+********************************************
+
+1. Configure a proxy to k8s.
+
+
+.. image:: img/twinkle01.png
+ :width: 800px
+ :alt: Twinkle SIP client
+
+
+2. Add <username> and <password>.
+
+
+.. image:: img/twinkle02.png
+ :width: 800px
+ :alt: Twinkle SIP client
+
+
+3. Configure the network to use TCP only.
+
+
+.. image:: img/twinkle03.png
+ :width: 800px
+ :alt: Twinkle SIP client
+
+
+Make the call
+********************************************
+
+
+.. image:: img/call.png
+ :width: 800px
+ :alt: Call
diff --git a/docs/release/userguide/img/blink01.png b/docs/release/userguide/img/blink01.png
new file mode 100644
index 0000000..ac74788
--- /dev/null
+++ b/docs/release/userguide/img/blink01.png
Binary files differ
diff --git a/docs/release/userguide/img/blink02.png b/docs/release/userguide/img/blink02.png
new file mode 100644
index 0000000..7eb8d46
--- /dev/null
+++ b/docs/release/userguide/img/blink02.png
Binary files differ
diff --git a/docs/release/userguide/img/blink03.png b/docs/release/userguide/img/blink03.png
new file mode 100644
index 0000000..ae6220a
--- /dev/null
+++ b/docs/release/userguide/img/blink03.png
Binary files differ
diff --git a/docs/release/userguide/img/blink04.png b/docs/release/userguide/img/blink04.png
new file mode 100644
index 0000000..17511b5
--- /dev/null
+++ b/docs/release/userguide/img/blink04.png
Binary files differ
diff --git a/docs/release/userguide/img/call.png b/docs/release/userguide/img/call.png
new file mode 100644
index 0000000..ec4cdbf
--- /dev/null
+++ b/docs/release/userguide/img/call.png
Binary files differ
diff --git a/docs/release/userguide/img/twinkle01.png b/docs/release/userguide/img/twinkle01.png
new file mode 100644
index 0000000..e424d51
--- /dev/null
+++ b/docs/release/userguide/img/twinkle01.png
Binary files differ
diff --git a/docs/release/userguide/img/twinkle02.png b/docs/release/userguide/img/twinkle02.png
new file mode 100644
index 0000000..8d95bae
--- /dev/null
+++ b/docs/release/userguide/img/twinkle02.png
Binary files differ
diff --git a/docs/release/userguide/img/twinkle03.png b/docs/release/userguide/img/twinkle03.png
new file mode 100644
index 0000000..4b4b5c7
--- /dev/null
+++ b/docs/release/userguide/img/twinkle03.png
Binary files differ
diff --git a/docs/release/userguide/snort.rst b/docs/release/userguide/snort.rst
new file mode 100644
index 0000000..9bb6b3b
--- /dev/null
+++ b/docs/release/userguide/snort.rst
@@ -0,0 +1,33 @@
+================
+ Snort
+================
+
+----------
+ What is Snort?
+----------
+
+`Snort <https://www.snort.org/>`_. is an open source network intrusion prevention system, capable
+of performing real-time traffic analysis and packet logging on IP
+networks. It can perform protocol analysis, content searching/matching,
+and can be used to detect a variety of attacks and probes, such as buffer
+overflows, stealth port scans, CGI attacks, SMB probes, OS fingerprinting
+attempts, and much more.
+
+----------
+ What can I do with Snort?
+----------
+
+Snort has three primary uses: It can be used as a straight packet sniffer
+like tcpdump, a packet logger (useful for network traffic debugging, etc),
+or as a full blown network intrusion prevention system.
+
+----------
+ How Snort works?
+----------
+
+Snort works with rules. Rules are a different methodology for performing
+detection, which bring the advantage of 0-day detection to the table.
+Unlike signatures, rules are based on detecting the actual vulnerability,
+not an exploit or a unique piece of data. Developing a rule requires an
+acute understanding of how the vulnerability actually works.
+
diff --git a/src/arm/cni-deploy/.gitignore b/src/arm/cni-deploy/.gitignore
new file mode 100644
index 0000000..a8b42eb
--- /dev/null
+++ b/src/arm/cni-deploy/.gitignore
@@ -0,0 +1 @@
+*.retry
diff --git a/src/arm/cni-deploy/deploy.yml b/src/arm/cni-deploy/deploy.yml
new file mode 100644
index 0000000..c54353a
--- /dev/null
+++ b/src/arm/cni-deploy/deploy.yml
@@ -0,0 +1,32 @@
+---
+- name: Fixup default flannel
+ hosts: kube-master
+ gather_facts: "no"
+ vars_files:
+ - "vars/global"
+ roles:
+ - {role: flannel, tags: [flannel]}
+
+- name: Deploy Multus CNI
+ hosts: all
+ gather_facts: "no"
+ vars_files:
+ - "vars/global"
+ roles:
+ - {role: multus, tags: [multus]}
+
+- name: Deploy SRIOV CNI
+ hosts: all
+ gather_facts: "no"
+ vars_files:
+ - "vars/global"
+ roles:
+ - {role: sriov, tags: [sriov]}
+
+- name: Deploy Vhostuser CNI and VPP
+ hosts: all
+ gather_facts: "yes"
+ vars_files:
+ - "vars/global"
+ roles:
+ - {role: vhost-vpp, tags: [vhost-vpp]}
diff --git a/src/arm/cni-deploy/inventory/inventory.cfg b/src/arm/cni-deploy/inventory/inventory.cfg
new file mode 100644
index 0000000..cd8bb25
--- /dev/null
+++ b/src/arm/cni-deploy/inventory/inventory.cfg
@@ -0,0 +1,18 @@
+# compass-tasks: /opt/kargo_k8s/inventory/inventory.cfg
+
+[all]
+host2 ansible_ssh_host=10.1.0.51 ansible_ssh_pass=root ansible_user=root
+host1 ansible_ssh_host=10.1.0.50 ansible_ssh_pass=root ansible_user=root
+
+[kube-master]
+host1
+
+[etcd]
+host1
+
+[kube-node]
+host2
+
+[k8s-cluster:children]
+kube-node
+kube-master
diff --git a/src/arm/cni-deploy/roles/flannel/files/cni-flannel-ds.yml b/src/arm/cni-deploy/roles/flannel/files/cni-flannel-ds.yml
new file mode 100644
index 0000000..a99983b
--- /dev/null
+++ b/src/arm/cni-deploy/roles/flannel/files/cni-flannel-ds.yml
@@ -0,0 +1,86 @@
+---
+apiVersion: extensions/v1beta1
+kind: DaemonSet
+metadata:
+ name: kube-flannel
+ namespace: "kube-system"
+ labels:
+ tier: node
+ k8s-app: flannel
+spec:
+ template:
+ metadata:
+ labels:
+ tier: node
+ k8s-app: flannel
+ spec:
+ serviceAccountName: flannel
+ containers:
+ - name: kube-flannel
+ image: quay.io/coreos/flannel:v0.9.1-arm64
+ imagePullPolicy: IfNotPresent
+ resources:
+ limits:
+ cpu: 300m
+ memory: 500M
+ requests:
+ cpu: 150m
+ memory: 64M
+ command: ["/opt/bin/flanneld", "--ip-masq", "--kube-subnet-mgr"]
+ securityContext:
+ privileged: true
+ env:
+ - name: POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ - name: POD_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ volumeMounts:
+ - name: run
+ mountPath: /run
+ - name: cni
+ mountPath: /etc/cni/net.d
+ - name: flannel-cfg
+ mountPath: /etc/kube-flannel/
+ # - name: install-cni
+ # image: linaro/flannel-cni-arm64:v0.3.0
+ # command: ["/install-cni.sh"]
+ # env:
+ # # The CNI network config to install on each node.
+ # - name: CNI_NETWORK_CONFIG
+ # valueFrom:
+ # configMapKeyRef:
+ # name: kube-flannel-cfg
+ # key: cni-conf.json
+ # - name: CNI_CONF_NAME
+ # value: "10-flannel.conflist"
+ # volumeMounts:
+ # - name: cni
+ # mountPath: /host/etc/cni/net.d
+ # - name: host-cni-bin
+ # mountPath: /host/opt/cni/bin/
+ hostNetwork: true
+ tolerations:
+ - key: node-role.kubernetes.io/master
+ operator: Exists
+ effect: NoSchedule
+ volumes:
+ - name: run
+ hostPath:
+ path: /run
+ - name: cni
+ hostPath:
+ path: /etc/cni/net.d
+ - name: flannel-cfg
+ configMap:
+ name: kube-flannel-cfg
+ # - name: host-cni-bin
+ # hostPath:
+ # path: /opt/cni/bin
+ updateStrategy:
+ rollingUpdate:
+ maxUnavailable: 20%
+ type: RollingUpdate
diff --git a/src/arm/cni-deploy/roles/flannel/tasks/main.yml b/src/arm/cni-deploy/roles/flannel/tasks/main.yml
new file mode 100644
index 0000000..4f1a910
--- /dev/null
+++ b/src/arm/cni-deploy/roles/flannel/tasks/main.yml
@@ -0,0 +1,12 @@
+---
+- name: Copy flannel daemonset file
+ copy:
+ src: cni-flannel-ds.yml
+ dest: /tmp/cni-flannel-ds.yml
+
+- name: Apply flannel daemonset
+ shell: kubectl apply -f /tmp/cni-flannel-ds.yml
+ ignore_errors: "yes"
+
+- name: Sleep 10 seconds
+ wait_for: timeout=10
diff --git a/src/arm/cni-deploy/roles/multus/files/10-multus.conf b/src/arm/cni-deploy/roles/multus/files/10-multus.conf
new file mode 100644
index 0000000..3726413
--- /dev/null
+++ b/src/arm/cni-deploy/roles/multus/files/10-multus.conf
@@ -0,0 +1,13 @@
+{
+ "name": "multus-cni-network",
+ "type": "multus",
+ "kubeconfig": "/etc/kubernetes/node-kubeconfig.yaml",
+ "delegates": [{
+ "type": "flannel",
+ "masterplugin": true,
+ "delegate": {
+ "isDefaultGateway": true
+ }
+ }]
+}
+
diff --git a/src/arm/cni-deploy/roles/multus/files/clusterrole.yml b/src/arm/cni-deploy/roles/multus/files/clusterrole.yml
new file mode 100644
index 0000000..fb056d4
--- /dev/null
+++ b/src/arm/cni-deploy/roles/multus/files/clusterrole.yml
@@ -0,0 +1,16 @@
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: multus-crd-overpowered
+rules:
+ - apiGroups:
+ - '*'
+ resources:
+ - '*'
+ verbs:
+ - '*'
+ - nonResourceURLs:
+ - '*'
+ verbs:
+ - '*'
diff --git a/src/arm/cni-deploy/roles/multus/files/crdnetwork.yml b/src/arm/cni-deploy/roles/multus/files/crdnetwork.yml
new file mode 100644
index 0000000..9aefdb8
--- /dev/null
+++ b/src/arm/cni-deploy/roles/multus/files/crdnetwork.yml
@@ -0,0 +1,15 @@
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: networks.kubernetes.com
+spec:
+ group: kubernetes.com
+ version: v1
+ scope: Namespaced
+ names:
+ plural: networks
+ singular: network
+ kind: Network
+ shortNames:
+ - net
diff --git a/src/arm/cni-deploy/roles/multus/files/flannel-obj.yml b/src/arm/cni-deploy/roles/multus/files/flannel-obj.yml
new file mode 100644
index 0000000..bd7891d
--- /dev/null
+++ b/src/arm/cni-deploy/roles/multus/files/flannel-obj.yml
@@ -0,0 +1,13 @@
+---
+apiVersion: "kubernetes.com/v1"
+kind: Network
+metadata:
+ name: flannel-networkobj
+plugin: flannel
+args: '[
+ {
+ "delegate": {
+ "isDefaultGateway": true
+ }
+ }
+]'
diff --git a/src/arm/cni-deploy/roles/multus/handlers/main.yml b/src/arm/cni-deploy/roles/multus/handlers/main.yml
new file mode 100644
index 0000000..8474d34
--- /dev/null
+++ b/src/arm/cni-deploy/roles/multus/handlers/main.yml
@@ -0,0 +1,4 @@
+- name: Restart kubelet
+ service:
+ name: kubelet
+ state: restarted
diff --git a/src/arm/cni-deploy/roles/multus/tasks/crd.yml b/src/arm/cni-deploy/roles/multus/tasks/crd.yml
new file mode 100644
index 0000000..cacf98a
--- /dev/null
+++ b/src/arm/cni-deploy/roles/multus/tasks/crd.yml
@@ -0,0 +1,44 @@
+---
+- name: Copy yaml files
+ copy:
+ src: "{{ item }}"
+ dest: "/tmp/{{ item }}"
+ with_items:
+ - clusterrole.yml
+ - crdnetwork.yml
+ - flannel-obj.yml
+
+- name: Copy macvlan template
+ template:
+ src: macvlan-obj.yml.j2
+ dest: /tmp/macvlan-obj.yml
+
+- name: Copy Multus testpod template
+ template:
+ src: multus-testpod.yml.j2
+ dest: /root/multus-testpod.yml
+
+- name: Create cluster role
+ shell: kubectl apply -f /tmp/clusterrole.yml
+
+- name: Check if role binding is created
+ shell: kubectl get clusterrolebinding multus-node-{{ item }}
+ register: check_rb
+ ignore_errors: "yes"
+ with_items: "{{ groups['all'] }}"
+
+- name: Create role binding
+ shell: >
+ kubectl create clusterrolebinding multus-node-{{ item }}
+ --clusterrole=multus-crd-overpowered
+ --user=system:node:{{ item }}
+ when: check_rb is failed
+ with_items: "{{ groups['all'] }}"
+
+- name: Create network CRD
+ shell: kubectl apply -f /tmp/crdnetwork.yml
+
+- name: Create flannel and macvlan network objects
+ shell: >
+ kubectl apply -f /tmp/flannel-obj.yml &&
+ kubectl apply -f /tmp/macvlan-obj.yml
diff --git a/src/arm/cni-deploy/roles/multus/tasks/main.yml b/src/arm/cni-deploy/roles/multus/tasks/main.yml
new file mode 100644
index 0000000..a200215
--- /dev/null
+++ b/src/arm/cni-deploy/roles/multus/tasks/main.yml
@@ -0,0 +1,24 @@
+---
+- name: Build Multus CNI
+ shell: >
+ docker run --rm --network host -v /opt/cni/bin:/opt/cni/bin golang:1.9
+ bash -c "git clone {{ multus_repo }} multus_cni && cd multus_cni &&
+ git checkout {{ multus_commit }} && ./build && cp bin/multus /opt/cni/bin/"
+ args:
+ creates: /opt/cni/bin/multus
+
+- name: Remove default CNI configuration
+ shell: rm -f /etc/cni/net.d/*
+ args:
+ warn: "no"
+
+- name: Set Multus as default CNI
+ copy:
+ src: 10-multus.conf
+ dest: /etc/cni/net.d/
+ notify:
+ - Restart kubelet
+
+- name: Import CRD task
+ import_tasks: crd.yml
+ when: inventory_hostname == groups["kube-master"][0]
diff --git a/src/arm/cni-deploy/roles/multus/templates/macvlan-obj.yml.j2 b/src/arm/cni-deploy/roles/multus/templates/macvlan-obj.yml.j2
new file mode 100644
index 0000000..b5a549f
--- /dev/null
+++ b/src/arm/cni-deploy/roles/multus/templates/macvlan-obj.yml.j2
@@ -0,0 +1,22 @@
+---
+apiVersion: "kubernetes.com/v1"
+kind: Network
+metadata:
+ name: macvlan-networkobj
+plugin: macvlan
+args: '[
+ {
+ "master": "{{ macvlan_master }}",
+ "mode": "vepa",
+ "ipam": {
+ "type": "host-local",
+ "subnet": "{{ macvlan_subnet }}",
+ "rangeStart": "{{ macvlan_range_start }}",
+ "rangeEnd": "{{ macvlan_range_end }}",
+ "routes": [
+ { "dst": "0.0.0.0/0" }
+ ],
+ "gateway": "{{ macvlan_gateway }}"
+ }
+ }
+]'
diff --git a/src/arm/cni-deploy/roles/multus/templates/multus-testpod.yml.j2 b/src/arm/cni-deploy/roles/multus/templates/multus-testpod.yml.j2
new file mode 100644
index 0000000..4884846
--- /dev/null
+++ b/src/arm/cni-deploy/roles/multus/templates/multus-testpod.yml.j2
@@ -0,0 +1,19 @@
+---
+apiVersion: v1
+kind: Pod
+metadata:
+ name: multus-test
+ annotations:
+ networks: '[
+ { "name": "flannel-networkobj" },
+ { "name": "macvlan-networkobj" }
+ ]'
+spec:
+ containers:
+ - name: multus-test
+ image: "busybox"
+ command: ["sleep", "100d"]
+ stdin: true
+ tty: true
+ nodeSelector:
+ kubernetes.io/hostname: "{{ groups['kube-node'][0] }}"
diff --git a/src/arm/cni-deploy/roles/sriov/tasks/crd.yml b/src/arm/cni-deploy/roles/sriov/tasks/crd.yml
new file mode 100644
index 0000000..5cc7892
--- /dev/null
+++ b/src/arm/cni-deploy/roles/sriov/tasks/crd.yml
@@ -0,0 +1,13 @@
+---
+- name: Copy SRIOV template
+ template:
+ src: sriov-obj.yml.j2
+ dest: /tmp/sriov-obj.yml
+
+- name: Copy SRIOV testpod template
+ template:
+ src: sriov-testpod.yml.j2
+ dest: /root/sriov-testpod.yml
+
+- name: Create SRIOV network object
+ shell: kubectl apply -f /tmp/sriov-obj.yml
diff --git a/src/arm/cni-deploy/roles/sriov/tasks/main.yml b/src/arm/cni-deploy/roles/sriov/tasks/main.yml
new file mode 100644
index 0000000..9c190ad
--- /dev/null
+++ b/src/arm/cni-deploy/roles/sriov/tasks/main.yml
@@ -0,0 +1,12 @@
+---
+- name: Build SRIOV CNI
+ shell: >
+ docker run --rm --network host -v /opt/cni/bin:/opt/cni/bin golang:1.9
+ bash -c "git clone {{ sriov_repo }} sriov_cni && cd sriov_cni &&
+ git checkout {{ sriov_commit }} && ./build && cp bin/sriov /opt/cni/bin/"
+ args:
+ creates: /opt/cni/bin/sriov
+
+- name: Import CRD task
+ import_tasks: crd.yml
+ when: inventory_hostname == groups["kube-master"][0]
diff --git a/src/arm/cni-deploy/roles/sriov/templates/sriov-obj.yml.j2 b/src/arm/cni-deploy/roles/sriov/templates/sriov-obj.yml.j2
new file mode 100644
index 0000000..6c67968
--- /dev/null
+++ b/src/arm/cni-deploy/roles/sriov/templates/sriov-obj.yml.j2
@@ -0,0 +1,25 @@
+---
+apiVersion: "kubernetes.com/v1"
+kind: Network
+metadata:
+ name: sriov-networkobj
+plugin: sriov
+args: '[
+ {
+ "master": "{{ sriov_master }}",
+ "pfOnly": true,
+ "if0name": "net0",
+ "ipam": {
+ "type": "host-local",
+ "subnet": "{{ sriov_subnet }}",
+ "rangeStart": "{{ sriov_range_start }}",
+ "rangeEnd": "{{ sriov_range_end }}",
+ "routes": [
+ {
+ "dst": "0.0.0.0/0"
+ }
+ ],
+ "gateway": "{{ sriov_gateway }}"
+ }
+ }
+]'
diff --git a/src/arm/cni-deploy/roles/sriov/templates/sriov-testpod.yml.j2 b/src/arm/cni-deploy/roles/sriov/templates/sriov-testpod.yml.j2
new file mode 100644
index 0000000..c1d01bc
--- /dev/null
+++ b/src/arm/cni-deploy/roles/sriov/templates/sriov-testpod.yml.j2
@@ -0,0 +1,19 @@
+---
+apiVersion: v1
+kind: Pod
+metadata:
+ name: sriov-test
+ annotations:
+ networks: '[
+ { "name": "flannel-networkobj" },
+ { "name": "sriov-networkobj" }
+ ]'
+spec:
+ containers:
+ - name: sriov-test
+ image: "busybox"
+ command: ["sleep", "100d"]
+ stdin: true
+ tty: true
+ nodeSelector:
+ kubernetes.io/hostname: "{{ groups['kube-node'][0] }}"
diff --git a/src/arm/cni-deploy/roles/vhost-vpp/files/0001-net-virtio-ethdev.patch b/src/arm/cni-deploy/roles/vhost-vpp/files/0001-net-virtio-ethdev.patch
new file mode 100644
index 0000000..171ff4d
--- /dev/null
+++ b/src/arm/cni-deploy/roles/vhost-vpp/files/0001-net-virtio-ethdev.patch
@@ -0,0 +1,16 @@
+diff --git a/drivers/net/virtio/virtio_ethdev.c b/drivers/net/virtio/virtio_ethdev.c
+index e320811..c1b1640 100644
+--- a/drivers/net/virtio/virtio_ethdev.c
++++ b/drivers/net/virtio/virtio_ethdev.c
+@@ -1754,6 +1754,11 @@ virtio_dev_start(struct rte_eth_dev *dev)
+ virtqueue_notify(rxvq->vq);
+ }
+
++ for (i = 0; i < dev->data->nb_tx_queues; i++) {
++ txvq = dev->data->tx_queues[i];
++ virtqueue_notify(txvq->vq);
++ }
++
+ PMD_INIT_LOG(DEBUG, "Notified backend at initialization");
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
diff --git a/src/arm/cni-deploy/roles/vhost-vpp/files/Dockerfile.vpp1710-dpdk1708 b/src/arm/cni-deploy/roles/vhost-vpp/files/Dockerfile.vpp1710-dpdk1708
new file mode 100644
index 0000000..2f83534
--- /dev/null
+++ b/src/arm/cni-deploy/roles/vhost-vpp/files/Dockerfile.vpp1710-dpdk1708
@@ -0,0 +1,24 @@
+FROM ubuntu:xenial
+
+RUN apt-get update && \
+ apt-get install -y git make openssl libcrypto++-dev libnuma-dev && \
+ apt-get autoclean
+
+RUN git clone https://gerrit.fd.io/r/vpp -b stable/1710 /root/vpp-1710
+
+WORKDIR /root/vpp-1710
+COPY ./0001-net-virtio-ethdev.patch dpdk/dpdk-17.08_patches/0001-net-virtio-ethdev.patch
+RUN sed -i "s/sudo -E //g" Makefile
+RUN make UNATTENDED=yes install-dep
+
+WORKDIR /root/vpp-1710/build-root
+RUN ./bootstrap.sh
+RUN make PLATFORM=vpp TAG=vpp_debug vpp-install
+RUN mkdir -p /etc/vpp && \
+ cp /root/vpp-1710/src/vpp/conf/startup.conf /etc/vpp/startup.conf && \
+ cp /root/vpp-1710/build-root/install-vpp_debug-native/vpp/bin/* /usr/bin && \
+ ln -s /root/vpp-1710/build-root/install-vpp_debug-native/vpp/lib64/vpp_plugins /usr/lib/vpp_plugins
+RUN groupadd vpp
+
+ENV PATH "$PATH:/root/vpp-1710/build-root/install-vpp_debug-native/dpdk/bin"
+ENV PATH "$PATH:/root/vpp-1710/build-root/install-vpp_debug-native/vpp/bin"
diff --git a/src/arm/cni-deploy/roles/vhost-vpp/files/setvpp.sh b/src/arm/cni-deploy/roles/vhost-vpp/files/setvpp.sh
new file mode 100755
index 0000000..15b0d27
--- /dev/null
+++ b/src/arm/cni-deploy/roles/vhost-vpp/files/setvpp.sh
@@ -0,0 +1,30 @@
+#!/bin/bash
+
+set -x
+
+cid=`sed -ne '/hostname/p' /proc/1/task/1/mountinfo | awk -F '/' '{print $6}'`
+cid_s=${cid:0:12}
+filename=${cid_s}-net1.json
+ifstring=`cat /vhost-user-net-plugin/${cid}/${cid_s}-net1.json | awk -F ',' '{print $4}'`
+ifmac=`echo ${ifstring} | awk -F '\"' '{print $4}'`
+
+ipstr=$(cat /vhost-user-net-plugin/${cid}/${cid_s}-net1-ip4.conf |grep "ipAddr")
+ipaddr=$(echo $ipstr | awk -F '\"' '{print $4}')
+ipaddr1=$(echo $ipaddr | cut -d / -f 1)
+
+vdev_str="vdev virtio_user0,path=/vhost-user-net-plugin/$cid/$cid_s-net1,mac=$ifmac"
+
+sed -i.bak '/# dpdk/a\dpdk \{' /etc/vpp/startup.conf
+sed -i.bak "/# vdev eth_bond1,mode=1/a\\$vdev_str" /etc/vpp/startup.conf
+sed -i.bak '/# socket-mem/a\\}' /etc/vpp/startup.conf
+
+vpp -c /etc/vpp/startup.conf &
+
+sleep 40
+
+vppctl set int state VirtioUser0/0/0 up
+vppctl set int ip address VirtioUser0/0/0 ${ipaddr1}/24
+vppctl show int
+vppctl show int address
+
+echo ${ipaddr1} > /vhost-user-net-plugin/$(hostname)
diff --git a/src/arm/cni-deploy/roles/vhost-vpp/files/startup.conf b/src/arm/cni-deploy/roles/vhost-vpp/files/startup.conf
new file mode 100644
index 0000000..ae86e38
--- /dev/null
+++ b/src/arm/cni-deploy/roles/vhost-vpp/files/startup.conf
@@ -0,0 +1,21 @@
+unix {
+ nodaemon
+ log /tmp/vpp.log
+ full-coredump
+ cli-listen /run/vpp/cli.sock
+ gid vpp
+}
+api-trace {
+ on
+}
+api-segment {
+ gid vpp
+}
+cpu {
+ main-core 1
+ corelist-workers 2-3
+ workers 2
+}
+dpdk {
+ uio-driver vfio-pci
+}
diff --git a/src/arm/cni-deploy/roles/vhost-vpp/files/vhostuser-obj.yml b/src/arm/cni-deploy/roles/vhost-vpp/files/vhostuser-obj.yml
new file mode 100644
index 0000000..1e9bc66
--- /dev/null
+++ b/src/arm/cni-deploy/roles/vhost-vpp/files/vhostuser-obj.yml
@@ -0,0 +1,28 @@
+---
+apiVersion: "kubernetes.com/v1"
+kind: Network
+metadata:
+ name: vhostuser-networkobj
+plugin: vhostuser
+args: '[
+ {
+ "type": "vhostuser",
+ "name": "vhostuser-network",
+ "if0name": "net1",
+ "vhost": {
+ "vhost_tool": "/opt/cni/bin/vpp-config.py"
+ },
+ "ipam": {
+ "type": "host-local",
+ "subnet": "10.56.217.0/24",
+ "rangeStart": "10.56.217.131",
+ "rangeEnd": "10.56.217.190",
+ "routes": [
+ {
+ "dst": "0.0.0.0/0"
+ }
+ ],
+ "gateway": "10.56.217.1"
+ }
+ }
+]'
diff --git a/src/arm/cni-deploy/roles/vhost-vpp/tasks/crd.yml b/src/arm/cni-deploy/roles/vhost-vpp/tasks/crd.yml
new file mode 100644
index 0000000..ad36c90
--- /dev/null
+++ b/src/arm/cni-deploy/roles/vhost-vpp/tasks/crd.yml
@@ -0,0 +1,13 @@
+---
+- name: Copy Vhostuser yaml
+ copy:
+ src: vhostuser-obj.yml
+ dest: /tmp/vhostuser-obj.yml
+
+- name: Copy VPP testpod template
+ template:
+ src: vpp-testpod.yml.j2
+ dest: /root/vpp-testpod.yml
+
+- name: Create Vhostuser network object
+ shell: kubectl apply -f /tmp/vhostuser-obj.yml
diff --git a/src/arm/cni-deploy/roles/vhost-vpp/tasks/main.yml b/src/arm/cni-deploy/roles/vhost-vpp/tasks/main.yml
new file mode 100644
index 0000000..df890ea
--- /dev/null
+++ b/src/arm/cni-deploy/roles/vhost-vpp/tasks/main.yml
@@ -0,0 +1,18 @@
+---
+- name: Build Vhostuser CNI
+ shell: >
+ docker run --rm --network host -v /opt/cni/bin:/opt/cni/bin golang:1.9
+ bash -c "git clone {{ vhostuser_repo }} vhostuser_cni && cd vhostuser_cni
+ && git checkout {{ vhostuser_commit }} && ./build
+ && cp bin/vhostuser /opt/cni/bin/
+ && cp tests/vpp-config-debug.py /opt/cni/bin/vpp-config.py"
+ args:
+ creates: /opt/cni/bin/vhostuser
+
+- name: Import CRD task
+ import_tasks: crd.yml
+ when: inventory_hostname == groups["kube-master"][0]
+
+- name: Import VPP task
+ import_tasks: vpp.yml
+ when: inventory_hostname in groups["kube-node"]
diff --git a/src/arm/cni-deploy/roles/vhost-vpp/tasks/vpp.yml b/src/arm/cni-deploy/roles/vhost-vpp/tasks/vpp.yml
new file mode 100644
index 0000000..7f5be05
--- /dev/null
+++ b/src/arm/cni-deploy/roles/vhost-vpp/tasks/vpp.yml
@@ -0,0 +1,47 @@
+---
+- name: Create dest directories
+ file:
+ path: "{{ item }}"
+ state: directory
+ with_items:
+ - /tmp/vpp1710/
+ - /var/lib/cni/vhostuser/
+ - /etc/vpp/
+
+- name: Copy VPP files
+ copy:
+ src: "{{ item.src }}"
+ dest: "{{ item.dest }}"
+ with_items:
+ - {src: "Dockerfile.vpp1710-dpdk1708", dest: "/tmp/vpp1710/Dockerfile"}
+ - {src: "0001-net-virtio-ethdev.patch", dest: "/tmp/vpp1710/0001-net-virtio-ethdev.patch"}
+ - {src: "setvpp.sh", dest: "/var/lib/cni/vhostuser/setvpp.sh"}
+ - {src: "startup.conf", dest: "/etc/vpp/startup.conf"}
+
+- name: Check if VPP image exists
+ shell: docker inspect --type=image vpp-1710:virtio-patched > /dev/null 2>&1
+ ignore_errors: "yes"
+ register: check_vpp
+
+- name: Building VPP container. Be patient...
+ shell: docker build -t vpp-1710:virtio-patched --network host .
+ args:
+ chdir: /tmp/vpp1710/
+ when: check_vpp is failed
+
+- name: Copy VPP binaries to host
+ shell: >
+ docker run --rm -v /root/vpp-1710/build-root:/root/vpp-host vpp-1710:virtio-patched
+ /bin/cp -a /root/vpp-1710/build-root/install-vpp_debug-native /root/vpp-host
+ && /bin/cp /root/vpp-1710/build-root/install-vpp_debug-native/vpp/bin/* /usr/bin
+ && /bin/rm -rf /usr/lib/vpp_plugins
+ && ln -s /root/vpp-1710/build-root/install-vpp_debug-native/vpp/lib64/vpp_plugins /usr/lib/vpp_plugins
+ && (groupadd vpp || true)
+
+- name: Copy libcrypto.so.1.0.0 for CentOS
+ shell: >
+ docker run --rm -v /usr/lib64:/root/lib64-centos vpp-1710:virtio-patched
+ /bin/cp /lib/aarch64-linux-gnu/libcrypto.so.1.0.0 /root/lib64-centos/
+ args:
+ creates: /usr/lib64/libcrypto.so.1.0.0
+ when: ansible_os_family == "RedHat"
diff --git a/src/arm/cni-deploy/roles/vhost-vpp/templates/vpp-testpod.yml.j2 b/src/arm/cni-deploy/roles/vhost-vpp/templates/vpp-testpod.yml.j2
new file mode 100644
index 0000000..2efd4e0
--- /dev/null
+++ b/src/arm/cni-deploy/roles/vhost-vpp/templates/vpp-testpod.yml.j2
@@ -0,0 +1,68 @@
+---
+apiVersion: v1
+kind: Pod
+metadata:
+ name: vpp-test1
+ annotations:
+ networks: '[
+ { "name": "flannel-networkobj" },
+ { "name": "vhostuser-networkobj" }
+ ]'
+spec:
+ containers:
+ - name: vpp-test1
+ image: vpp-1710:virtio-patched
+ imagePullPolicy: "Never"
+ stdin: true
+ terminationMessagePath: /dev/termination-log
+ tty: true
+ securityContext:
+ privileged: true
+ volumeMounts:
+ - mountPath: /vhost-user-net-plugin
+ name: vhost-user-net-plugin
+ - mountPath: /mnt/huge
+ name: huge
+ nodeSelector:
+ kubernetes.io/hostname: "{{ groups['kube-node'][0] }}"
+ volumes:
+ - name: vhost-user-net-plugin
+ hostPath:
+ path: /var/lib/cni/vhostuser
+ - name: huge
+ hostPath:
+ path: /mnt/huge
+---
+apiVersion: v1
+kind: Pod
+metadata:
+ name: vpp-test2
+ annotations:
+ networks: '[
+ { "name": "flannel-networkobj" },
+ { "name": "vhostuser-networkobj" }
+ ]'
+spec:
+ containers:
+ - name: vpp-test2
+ image: vpp-1710:virtio-patched
+ imagePullPolicy: "Never"
+ stdin: true
+ terminationMessagePath: /dev/termination-log
+ tty: true
+ securityContext:
+ privileged: true
+ volumeMounts:
+ - mountPath: /vhost-user-net-plugin
+ name: vhost-user-net-plugin
+ - mountPath: /mnt/huge
+ name: huge
+ nodeSelector:
+ kubernetes.io/hostname: "{{ groups['kube-node'][0] }}"
+ volumes:
+ - name: vhost-user-net-plugin
+ hostPath:
+ path: /var/lib/cni/vhostuser
+ - name: huge
+ hostPath:
+ path: /mnt/huge
diff --git a/src/arm/cni-deploy/vars/global b/src/arm/cni-deploy/vars/global
new file mode 100644
index 0000000..35d76b4
--- /dev/null
+++ b/src/arm/cni-deploy/vars/global
@@ -0,0 +1,20 @@
+multus_repo: https://github.com/Intel-Corp/multus-cni
+multus_commit: 61959e04
+
+sriov_repo: https://github.com/hustcat/sriov-cni
+sriov_commit: 8b7ed984
+
+vhostuser_repo: https://github.com/yibo-cai/vhost-user-net-plugin
+vhostuser_commit: e8dc9d8e
+
+macvlan_master: eth2
+macvlan_subnet: 192.168.166.0/24
+macvlan_range_start: 192.168.166.11
+macvlan_range_end: 192.168.166.30
+macvlan_gateway: 192.168.166.1
+
+sriov_master: eth2
+sriov_subnet: 192.168.166.0/24
+sriov_range_start: 192.168.166.31
+sriov_range_end: 192.168.166.50
+sriov_gateway: 192.168.166.1
diff --git a/src/arm/kubernetes_sriov/README.rst b/src/arm/kubernetes_sriov/README.rst
new file mode 100644
index 0000000..fde2f51
--- /dev/null
+++ b/src/arm/kubernetes_sriov/README.rst
@@ -0,0 +1,16 @@
+.. This work is licensed under a Creative Commons Attribution 4.0 International
+.. License.
+.. http://creativecommons.org/licenses/by/4.0
+.. (c) OPNFV, arm Limited.
+
+.. _Flannel: https://github.com/coreos/flannel
+.. _SRIOV: https://github.com/hustcat/sriov-cni
+
+===============================================
+Kubernetes Deployment with SRIOV CNI
+===============================================
+
+The scenario would deploy pods with SRIOV/Mltus/Flannel CNI.
+In this case, "eth0" would be used as the default interface, and the 2nd interface named "net0" would
+used as data plane.
+
diff --git a/src/arm/kubernetes_sriov/k8s-build.sh b/src/arm/kubernetes_sriov/k8s-build.sh
new file mode 100755
index 0000000..bc99e30
--- /dev/null
+++ b/src/arm/kubernetes_sriov/k8s-build.sh
@@ -0,0 +1,32 @@
+#!/bin/bash
+set -e
+
+sudo apt-get install -y docker.io libvirt-bin virt-manager qemu qemu-efi
+
+WORKSPACE=`pwd`
+if [ ! -d "$WORKSPACE/compass4nfv" ]; then
+ git clone https://gerrit.opnfv.org/gerrit/compass4nfv
+fi
+
+cd compass4nfv
+
+WORKSPACE=`pwd`
+
+COMPASS_WORK_DIR=$WORKSPACE/../compass-work
+mkdir -p $COMPASS_WORK_DIR
+if [ ! -d "$WORKSPACE/work" ]; then
+ ln -s $COMPASS_WORK_DIR work
+fi
+
+#TODO: remove workaround after patches merged
+if [ ! -f "$WORKSPACE/patched" ]; then
+
+ git checkout a360411cb8c775dffa24a4157cec2b566cbde6f3
+ curl http://people.linaro.org/~yibo.cai/compass/0001-deploy-cobbler-drop-tcp_tw_recycle-in-sysctl.conf.patch | git apply || true
+ curl http://people.linaro.org/~yibo.cai/compass/0002-docker-compose-support-aarch64.patch | git apply || true
+ curl http://people.linaro.org/~yibo.cai/compass/0004-add-a-multus-with-sriov-interfaces-installation.patch | git apply || true
+ touch "$WORKSPACE/patched"
+fi
+
+# build tarball
+COMPASS_ISO_REPO='http://people.linaro.org/~yibo.cai/compass' ./build.sh
diff --git a/src/arm/kubernetes_sriov/k8s-deploy.sh b/src/arm/kubernetes_sriov/k8s-deploy.sh
new file mode 100755
index 0000000..f625d22
--- /dev/null
+++ b/src/arm/kubernetes_sriov/k8s-deploy.sh
@@ -0,0 +1,33 @@
+#!/bin/bash
+set -e
+
+#sudo apt-get install -y docker.io libvirt-bin virt-manager qemu qemu-efi
+
+#!/bin/bash
+cd compass4nfv
+
+export ADAPTER_OS_PATTERN='(?i)CentOS-7.*arm.*'
+export OS_VERSION="centos7"
+export KUBERNETES_VERSION="v1.7.3"
+export DHA="deploy/conf/vm_environment/k8-nosdn-nofeature-noha.yml"
+export NETWORK="deploy/conf/network_cfg_sriov.yaml"
+export VIRT_NUMBER=2 VIRT_CPUS=2 VIRT_MEM=4096 VIRT_DISK=50G
+
+# enable sriov cni deployment
+echo "Set sriov cni scenario"
+sed -i.bak 's/^kube_network_plugin:.*$/kube_network_plugin: sriov/' \
+ deploy/adapters/ansible/kubernetes/roles/kargo/files/extra-vars-aarch64.yml
+
+./deploy.sh
+
+set -ex
+
+# basic test: ssh to master, check k8s node status
+sshpass -p root ssh root@10.1.0.50 kubectl get nodes 2>/dev/null | grep -i ready
+
+# scenario specific tests
+# show two nics in container
+sshpass -p root ssh root@10.1.0.50 \
+ kubectl create -f /etc/kubernetes/sriov-test-pod.yml && \
+ sleep 30 && \
+ kubectl exec multus-test1 -- sh -c "ping -c 3 192.168.123.31"
diff --git a/src/arm/kubernetes_sriov/setup.sh b/src/arm/kubernetes_sriov/setup.sh
new file mode 100755
index 0000000..b33e990
--- /dev/null
+++ b/src/arm/kubernetes_sriov/setup.sh
@@ -0,0 +1,7 @@
+#!/bin/bash
+
+./k8s-build.sh
+
+sleep 2
+
+./k8s-deploy.sh
diff --git a/src/arm/kubernetes_vpp_vhostuser/deploy-cni.sh b/src/arm/kubernetes_vpp_vhostuser/deploy-cni.sh
new file mode 100755
index 0000000..941b917
--- /dev/null
+++ b/src/arm/kubernetes_vpp_vhostuser/deploy-cni.sh
@@ -0,0 +1,16 @@
+#!/bin/bash -e
+
+cd ../cni-deploy
+
+DEPLOY_SCENARIO="k8-vpp-nofeature-noha"
+
+export ANSIBLE_HOST_KEY_CHECKING=False
+
+virtualenv .venv
+source .venv/bin/activate
+pip install ansible==2.6.1
+
+#deploy flannel, multus
+ansible-playbook -i inventory/inventory.cfg deploy.yml --tags flannel,multus
+#deploy vhost-vpp
+ansible-playbook -i inventory/inventory.cfg deploy.yml --tags vhost-vpp
diff --git a/src/arm/kubernetes_vpp_vhostuser/k8s-build.sh b/src/arm/kubernetes_vpp_vhostuser/k8s-build.sh
new file mode 100755
index 0000000..fa7aa53
--- /dev/null
+++ b/src/arm/kubernetes_vpp_vhostuser/k8s-build.sh
@@ -0,0 +1,25 @@
+#!/bin/bash
+set -e
+
+
+sudo apt-get install -y docker.io libvirt-bin virt-manager qemu qemu-efi
+
+WORKSPACE=`pwd`
+if [ ! -d "$WORKSPACE/compass4nfv" ]; then
+ git clone https://gerrit.opnfv.org/gerrit/compass4nfv
+fi
+
+#rm -rf compass4nfv
+#git clone https://gerrit.opnfv.org/gerrit/compass4nfv
+
+cd compass4nfv
+
+COMPASS_WORK_DIR=$WORKSPACE/../compass-work
+mkdir -p $COMPASS_WORK_DIR
+ln -s $COMPASS_WORK_DIR work
+
+sudo docker rm -f `docker ps | grep compass | cut -f1 -d' '` || true
+
+curl -s http://people.linaro.org/~yibo.cai/compass/compass4nfv-arm64-fixup.sh | bash || true
+
+./build.sh
diff --git a/src/arm/kubernetes_vpp_vhostuser/k8s-deploy.sh b/src/arm/kubernetes_vpp_vhostuser/k8s-deploy.sh
new file mode 100755
index 0000000..21082b3
--- /dev/null
+++ b/src/arm/kubernetes_vpp_vhostuser/k8s-deploy.sh
@@ -0,0 +1,17 @@
+#!/bin/bash
+set -e
+
+cd compass4nfv
+
+
+export ADAPTER_OS_PATTERN='(?i)CentOS-7.*arm.*'
+export OS_VERSION="centos7"
+export KUBERNETES_VERSION="v1.9.1"
+
+
+#For virtual environment:
+export DHA="deploy/conf/vm_environment/k8-nosdn-nofeature-noha.yml"
+export NETWORK="deploy/conf/vm_environment/network.yml"
+export VIRT_NUMBER=2 VIRT_CPUS=8 VIRT_MEM=8192 VIRT_DISK=50G
+
+./deploy.sh
diff --git a/src/arm/kubernetes_vpp_vhostuser/setup.sh b/src/arm/kubernetes_vpp_vhostuser/setup.sh
new file mode 100755
index 0000000..ae30803
--- /dev/null
+++ b/src/arm/kubernetes_vpp_vhostuser/setup.sh
@@ -0,0 +1,11 @@
+#!/bin/bash
+echo "Now build:"
+./k8s-build.sh
+
+sleep 2
+echo "Now deploy VMs:"
+./k8s-deploy.sh
+
+sleep 2
+echo "Now deploy vpp_vhostuser:"
+./deploy-cni.sh
diff --git a/src/arm/openwrt_demo/1_buildimage/Dockerfile b/src/arm/openwrt_demo/1_buildimage/Dockerfile
new file mode 100644
index 0000000..5b6fc22
--- /dev/null
+++ b/src/arm/openwrt_demo/1_buildimage/Dockerfile
@@ -0,0 +1,22 @@
+FROM openwrt/build/base
+
+ADD resources /root/resources
+
+RUN mkdir -p /root/certs/keys \
+ && mv /root/resources/keys/* /root/certs/keys/ \
+ && mv /root/certs/keys/vpn-server-cert.pem /etc/ipsec.d/certs/ \
+ && mv /root/certs/keys/vpn-server-key.pem /etc/ipsec.d/private/ \
+ && mv /root/resources/strongswan/* /etc/strongswan.d/ \
+ && mv /root/resources/ipsec/* /etc/ \
+ && mv /root/resources/config/firewall /etc/config/ \
+ && mv /root/resources/config/network /etc/config/ \
+ && mv /root/resources/config/uhttpd /etc/config/ \
+ && mv /root/resources/config/firewall.user /etc/ \
+ && mv /root/resources/bin/* /etc/init.d/ \
+ && ln -s /etc/init.d/getips /etc/rc.d/S20getips \
+ && ln -s /etc/init.d/getips /etc/rc.d/K90getips \
+ && ln -s /etc/init.d/setroutes /etc/rc.d/S99setroutes \
+ && ln -s /etc/init.d/setroutes /etc/rc.d/K99ysetroutes \
+ && rm -rf /root/resources/
+
+USER root
diff --git a/src/arm/openwrt_demo/1_buildimage/resources/bin/getips b/src/arm/openwrt_demo/1_buildimage/resources/bin/getips
new file mode 100644
index 0000000..3c68e95
--- /dev/null
+++ b/src/arm/openwrt_demo/1_buildimage/resources/bin/getips
@@ -0,0 +1,24 @@
+#!/bin/sh
+
+nwfn='/etc/config/network'
+gwPost=".1"
+nwPost=".0"
+
+ethname='eth0'
+ipeth=$(ifconfig $ethname |grep "inet addr" | cut -d: -f2 | awk '{print $1}')
+dirtyIp=$(grep ipaddr $nwfn | grep -v "127.0.0.1" | awk '{print $3}' | sed "s/'//g" | awk 'NR==1')
+dirtyGw=$(grep gateway $nwfn | grep -v "127.0.0.1" | awk '{print $3}' | sed "s/'//g" | awk 'NR==1')
+expNetPrefix=$(echo $ipeth | cut -d. -f 1,2,3)
+expGw=$expNetPrefix$gwPost
+sed -i "s/$dirtyIp/$ipeth/g" $nwfn
+sed -i "s/$dirtyGw/$expGw/g" $nwfn
+
+
+ethname='net0'
+ipeth=$(ifconfig $ethname |grep "inet addr" | cut -d: -f2 | awk '{print $1}')
+dirtyIp=$(grep ipaddr $nwfn | grep -v "127.0.0.1" | awk '{print $3}' | sed "s/'//g" | awk 'NR==2')
+dirtyGw=$(grep gateway $nwfn | grep -v "127.0.0.1" | awk '{print $3}' | sed "s/'//g" | awk 'NR==2')
+expNetPrefix=$(echo $ipeth | cut -d. -f 1,2,3)
+expGw=$expNetPrefix$gwPost
+sed -i "s/$dirtyIp/$ipeth/g" $nwfn
+sed -i "s/$dirtyGw/$expGw/g" $nwfn
diff --git a/src/arm/openwrt_demo/1_buildimage/resources/bin/setroutes b/src/arm/openwrt_demo/1_buildimage/resources/bin/setroutes
new file mode 100644
index 0000000..540a235
--- /dev/null
+++ b/src/arm/openwrt_demo/1_buildimage/resources/bin/setroutes
@@ -0,0 +1,26 @@
+#!/bin/sh
+
+nwfn='/etc/config/network'
+gwPost=".1"
+nwPost=".0"
+maskPost="/16"
+defaultgw="0.0.0.0/0"
+
+ethname='eth0'
+ipeth=$(ifconfig $ethname |grep "inet addr" | cut -d: -f2 | awk '{print $1}')
+expGwPrefix=$(echo $ipeth | cut -d. -f 1,2,3)
+expGw=$expGwPrefix$gwPost
+expNetPrefix=$(echo $ipeth | cut -d. -f 1,2)
+expNet=$expNetPrefix$nwPost$nwPost$maskPost
+echo "$expNet, $expGw, $ethname"
+ip route add $expNet via $expGw dev $ethname
+
+
+ethname='net0'
+ipeth=$(ifconfig $ethname |grep "inet addr" | cut -d: -f2 | awk '{print $1}')
+expGwPrefix=$(echo $ipeth | cut -d. -f 1,2,3)
+expGw=$expGwPrefix$gwPost
+expNetPrefix=$(echo $ipeth | cut -d. -f 1,2)
+expNet=$expNetPrefix$nwPost$nwPost$maskPost
+ip route add $expNet via $expGw dev $ethname
+ip route add $defaultgw via $expGw
diff --git a/src/arm/openwrt_demo/1_buildimage/resources/config/firewall b/src/arm/openwrt_demo/1_buildimage/resources/config/firewall
new file mode 100644
index 0000000..faa8851
--- /dev/null
+++ b/src/arm/openwrt_demo/1_buildimage/resources/config/firewall
@@ -0,0 +1,149 @@
+
+config rule
+ option name '-testcustomer'
+ option src '*'
+ option src_ip '192.168.10.1/32'
+ option dest '*'
+ option dest_ip '151.101.0.0/16'
+ option target 'REJECT'
+
+config rule
+ option name 'Allow-DHCP-Renew'
+ option src 'wan'
+ option proto 'udp'
+ option dest_port '68'
+ option target 'ACCEPT'
+ option family 'ipv4'
+
+config rule
+ option name 'Allow-Ping'
+ option src 'wan'
+ option proto 'icmp'
+ option icmp_type 'echo-request'
+ option family 'ipv4'
+ option target 'ACCEPT'
+
+config rule
+ option name 'Allow-IGMP'
+ option src 'wan'
+ option proto 'igmp'
+ option family 'ipv4'
+ option target 'ACCEPT'
+
+config rule
+ option name 'Allow-DHCPv6'
+ option src 'wan'
+ option proto 'udp'
+ option src_ip 'fc00::/6'
+ option dest_ip 'fc00::/6'
+ option dest_port '546'
+ option family 'ipv6'
+ option target 'ACCEPT'
+
+config rule
+ option name 'Allow-MLD'
+ option src 'wan'
+ option proto 'icmp'
+ option src_ip 'fe80::/10'
+ list icmp_type '130/0'
+ list icmp_type '131/0'
+ list icmp_type '132/0'
+ list icmp_type '143/0'
+ option family 'ipv6'
+ option target 'ACCEPT'
+
+config rule
+ option name 'Allow-ICMPv6-Input'
+ option src 'wan'
+ option proto 'icmp'
+ list icmp_type 'echo-request'
+ list icmp_type 'echo-reply'
+ list icmp_type 'destination-unreachable'
+ list icmp_type 'packet-too-big'
+ list icmp_type 'time-exceeded'
+ list icmp_type 'bad-header'
+ list icmp_type 'unknown-header-type'
+ list icmp_type 'router-solicitation'
+ list icmp_type 'neighbour-solicitation'
+ list icmp_type 'router-advertisement'
+ list icmp_type 'neighbour-advertisement'
+ option limit '1000/sec'
+ option family 'ipv6'
+ option target 'ACCEPT'
+
+config rule
+ option name 'Allow-ICMPv6-Forward'
+ option src 'wan'
+ option dest '*'
+ option proto 'icmp'
+ list icmp_type 'echo-request'
+ list icmp_type 'echo-reply'
+ list icmp_type 'destination-unreachable'
+ list icmp_type 'packet-too-big'
+ list icmp_type 'time-exceeded'
+ list icmp_type 'bad-header'
+ list icmp_type 'unknown-header-type'
+ option limit '1000/sec'
+ option family 'ipv6'
+ option target 'ACCEPT'
+
+config rule
+ option target 'ACCEPT'
+ option src 'lan'
+ option proto 'esp'
+ option src_ip '192.168.10.0/24'
+ option dest '*'
+ option name 'ipsecin'
+
+config rule
+ option target 'ACCEPT'
+ option proto 'esp'
+ option src '*'
+ option dest 'lan'
+ option dest_ip '192.168.10.0/24'
+ option name 'ipsecout'
+
+config rule
+ option target 'ACCEPT'
+ option proto 'udp'
+ option src 'lan'
+ option dest_port '500'
+ option name 'ipsec'
+
+config rule
+ option target 'ACCEPT'
+ option name '-ipsecnat'
+ option proto 'udp'
+ option src 'lan'
+ option dest_port '4500'
+
+config defaults
+ option syn_flood '1'
+ option input 'ACCEPT'
+ option output 'ACCEPT'
+ option forward 'REJECT'
+
+config zone
+ option name 'lan'
+ list network 'lan'
+ option input 'ACCEPT'
+ option output 'ACCEPT'
+ option forward 'ACCEPT'
+
+config zone
+ option name 'wan'
+ list network 'wan'
+ list network 'wan6'
+ option input 'REJECT'
+ option output 'ACCEPT'
+ option forward 'REJECT'
+ option masq '1'
+ option mtu_fix '1'
+
+config forwarding
+ option src 'lan'
+ option dest 'wan'
+
+config include
+ option path '/etc/firewall.user'
+
diff --git a/src/arm/openwrt_demo/1_buildimage/resources/config/firewall.user b/src/arm/openwrt_demo/1_buildimage/resources/config/firewall.user
new file mode 100644
index 0000000..ab61136
--- /dev/null
+++ b/src/arm/openwrt_demo/1_buildimage/resources/config/firewall.user
@@ -0,0 +1,9 @@
+# This file is interpreted as shell script.
+# Put your custom iptables rules here, they will
+# be executed with each firewall (re-)start.
+
+# Internal uci firewall chains are flushed and recreated on reload, so
+# put custom rules into the root chains e.g. INPUT or FORWARD or into the
+# special user chains, e.g. input_wan_rule or postrouting_lan_rule.
+iptables -t nat -A POSTROUTING -s 192.168.10.0/24 -o eth0 -m policy --pol ipsec --dir out -j ACCEPT
+iptables -t nat -A POSTROUTING -s 192.168.10.0/24 -o eth0 -j MASQUERADE
diff --git a/src/arm/openwrt_demo/1_buildimage/resources/config/network b/src/arm/openwrt_demo/1_buildimage/resources/config/network
new file mode 100644
index 0000000..eef18e8
--- /dev/null
+++ b/src/arm/openwrt_demo/1_buildimage/resources/config/network
@@ -0,0 +1,27 @@
+
+config interface 'loopback'
+ option ifname 'lo'
+ option proto 'static'
+ option ipaddr '127.0.0.1'
+ option netmask '255.0.0.0'
+
+config globals 'globals'
+ option ula_prefix 'fd5f:b3f4:4633::/48'
+
+config interface 'lan'
+ option ifname 'eth0'
+ option proto 'static'
+ option ipaddr '10.244.1.42'
+ option netmask '255.255.255.0'
+ option gateway '10.244.1.1'
+
+config interface 'wan'
+ option ifname 'net0'
+ option proto 'dhcp'
+
+config route 'r6'
+ option interface 'eth0'
+ option target '10.244.0.0'
+ option netmask '255.255.0.0'
+ option gateway '10.244.1.1'
+
diff --git a/src/arm/openwrt_demo/1_buildimage/resources/config/uhttpd b/src/arm/openwrt_demo/1_buildimage/resources/config/uhttpd
new file mode 100644
index 0000000..fe0691d
--- /dev/null
+++ b/src/arm/openwrt_demo/1_buildimage/resources/config/uhttpd
@@ -0,0 +1,24 @@
+
+config uhttpd 'main'
+ list listen_http '0.0.0.0:80'
+ option redirect_https '1'
+ option home '/www'
+ option rfc1918_filter '1'
+ option max_requests '3'
+ option max_connections '100'
+ option cert '/etc/uhttpd.crt'
+ option key '/etc/uhttpd.key'
+ option cgi_prefix '/cgi-bin'
+ option script_timeout '60'
+ option network_timeout '30'
+ option http_keepalive '20'
+ option tcp_keepalive '1'
+ option ubus_prefix '/ubus'
+
+config cert 'px5g'
+ option days '730'
+ option bits '2048'
+ option country 'ZZ'
+ option state 'Somewhere'
+ option location 'Unknown'
+ option commonname 'OpenWrt'
diff --git a/src/arm/openwrt_demo/1_buildimage/resources/ipsec/ipsec.conf b/src/arm/openwrt_demo/1_buildimage/resources/ipsec/ipsec.conf
new file mode 100644
index 0000000..9310276
--- /dev/null
+++ b/src/arm/openwrt_demo/1_buildimage/resources/ipsec/ipsec.conf
@@ -0,0 +1,29 @@
+config setup
+ charondebug="ike 1, knl 1, cfg 0"
+ uniqueids=no
+
+conn ikev2-vpn
+ auto=add
+ compress=no
+ type=tunnel
+ keyexchange=ikev2
+ fragmentation=yes
+ forceencaps=yes
+ ike=aes256-sha1-modp1024,3des-sha1-modp1024!
+ esp=aes256-sha1,3des-sha1!
+ dpdaction=clear
+ dpddelay=300s
+ rekey=no
+ left=%any
+ leftid=testvpn
+ leftcert=/etc/ipsec.d/certs/vpn-server-cert.pem
+ leftsendcert=always
+ leftsubnet=0.0.0.0/0
+ right=%any
+ rightid=%any
+ rightauth=eap-mschapv2
+ rightdns=8.8.8.8,8.8.4.4
+ rightsourceip=192.168.10.0/24
+ rightsendcert=never
+ eap_identity=%identity
+
diff --git a/src/arm/openwrt_demo/1_buildimage/resources/ipsec/ipsec.secrets b/src/arm/openwrt_demo/1_buildimage/resources/ipsec/ipsec.secrets
new file mode 100644
index 0000000..da553b7
--- /dev/null
+++ b/src/arm/openwrt_demo/1_buildimage/resources/ipsec/ipsec.secrets
@@ -0,0 +1,5 @@
+# /etc/ipsec.secrets - strongSwan IPsec secrets file
+testvpn : RSA "/etc/ipsec.d/private/vpn-server-key.pem"
+test %any% : EAP "arm"
+test2 %any% : EAP "arm"
+test3 %any% : EAP "arm"
diff --git a/src/arm/openwrt_demo/1_buildimage/resources/keys/server-root-ca.pem b/src/arm/openwrt_demo/1_buildimage/resources/keys/server-root-ca.pem
new file mode 100644
index 0000000..f1b654d
--- /dev/null
+++ b/src/arm/openwrt_demo/1_buildimage/resources/keys/server-root-ca.pem
@@ -0,0 +1,30 @@
+-----BEGIN CERTIFICATE-----
+MIIFLDCCAxSgAwIBAgIIPOALX6JnyDUwDQYJKoZIhvcNAQEMBQAwNDELMAkGA1UE
+BhMCQ04xEzARBgNVBAoTClZQTiBTZXJ2ZXIxEDAOBgNVBAMTB3Rlc3R2cG4wHhcN
+MTcwNDE4MDgwNzA3WhcNMjcwNDE2MDgwNzA3WjA0MQswCQYDVQQGEwJDTjETMBEG
+A1UEChMKVlBOIFNlcnZlcjEQMA4GA1UEAxMHdGVzdHZwbjCCAiIwDQYJKoZIhvcN
+AQEBBQADggIPADCCAgoCggIBAPg9YIcsvvXz9Zqepxx+92yBf/YkPFewortfvMay
+boF6AnJtGJB0JSpSm+ZPSrCk6GtBYvQit70OCwnXXDJQ/PDdE2aal1fD6/5wHugW
+yPWgNFIW1SuzEjxiTIIb13wYuJJ9eIev5jptyqn+obTwPHlKc8NfLGy22qpwR6i+
+y2Za4nblZUWuQkMaGshRJVF0SijGuIStZ0//JyzCge4ZPLVxcvSBhN/922FmZCog
+L9HNrI+1Q32Cv97UB5N4k3U2PWgzWGJWG4GVYKePIEniMPfdwHjwcinlAZ0WD3CA
+7shA5+0DjH/NX57H+0+QE4/CGT71jBziM0cBhNRd7S80nIy/WKNaVseuVa4Z3T9F
+XkfPPl0udCSEBwSOiURgAQNh2lljL3T4okb/MueuhMiR1mQfS/NXx0zbl95R7UcJ
+CAsZKhLxJ3eE0cxX9q+VWsXKizXAyi1puIXTfM6+xU1X1/c2FZtDIarxMqoBQylC
+H/HLJLwuoiROAEqtZLFYSAHkCDG9sDQ+UeD8fogL388R12L6+JaXf7UEFawByjEZ
+MEzRkUtt1fLXl0O3HuaoQKa5Q+sKSk/IYTynvRR7znEaVnZnK3yty+yNPBYdrsR4
+N/EXq6rIWwR6vdRxAae/BlOpKN76mFlN6g5DXBvguxFgEPbvAe7Eb/Hs7v9WPUni
+i9IlAgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0G
+A1UdDgQWBBSO1seRVuAcyN00/9Xlp19EPxlJiDANBgkqhkiG9w0BAQwFAAOCAgEA
+sZfhybZRUOzUrQOtyLhLinUbuOtj6lF9E9lxT9Wh8lu5hN2jXj5fh2c5THWMDvns
+4cIYO+3hbFYDRdL7lrtMLQU9YwhFGXixgCqBj3PxRdsiha0xSIVbTzFe8O01HA/l
+62KsGjZ2UWpTC2FYvmszDseAjcZ+SCCeQRyNrBdZ5UPsAnq5xnf6X+9JlRxuF4OE
+H6XPQJNhtx06VRD3dWSTkyNlmAARCXJKCCG/3s35ccSDG7AnMnc6b6uU0IEQ1WE1
+csvpHDdt2ianYMbafGlcL/B3UcvkpFPQ6aAQH2x5Kx6nUWOmBwksHcF/x8UtIAKJ
+QDRAGraefyMpHgTdLqm4FRjWsf5uoRvvjwvnKb4Waz59RUgKnBv5vVKVUSFhn0rf
+gHqDN969fkzbkedokxjUzMS+nNXBaI9zhVLlBpClXBdNUMamuUlPFq5jI+YF/G8J
+kDjoi+l9D/yCmMVTNdLn2WyXVmy2kJvovB7RsquPzcXlYJksGOSPq3EJC9iAtYz7
+NSgIHG9mnBAAetKRD3OMLSwYj2UyVOOKmOodiC1xbhT8+F/3F/d4vpN2oBdkfUgJ
+LmdIoLcgPe0mamfCwYY1pSibpDXRaXkrozoZBl+6r2PFQITLNiYGbjxpdyJffHAg
+rAqFWYHrARbKNHNUsV2TfOJD4XmAAdZO7YwZ3gNmniY=
+-----END CERTIFICATE-----
diff --git a/src/arm/openwrt_demo/1_buildimage/resources/keys/server-root-key.pem b/src/arm/openwrt_demo/1_buildimage/resources/keys/server-root-key.pem
new file mode 100644
index 0000000..48056be
--- /dev/null
+++ b/src/arm/openwrt_demo/1_buildimage/resources/keys/server-root-key.pem
@@ -0,0 +1,51 @@
+-----BEGIN RSA PRIVATE KEY-----
+MIIJJwIBAAKCAgEA+D1ghyy+9fP1mp6nHH73bIF/9iQ8V7Ciu1+8xrJugXoCcm0Y
+kHQlKlKb5k9KsKToa0Fi9CK3vQ4LCddcMlD88N0TZpqXV8Pr/nAe6BbI9aA0UhbV
+K7MSPGJMghvXfBi4kn14h6/mOm3Kqf6htPA8eUpzw18sbLbaqnBHqL7LZlriduVl
+Ra5CQxoayFElUXRKKMa4hK1nT/8nLMKB7hk8tXFy9IGE3/3bYWZkKiAv0c2sj7VD
+fYK/3tQHk3iTdTY9aDNYYlYbgZVgp48gSeIw993AePByKeUBnRYPcIDuyEDn7QOM
+f81fnsf7T5ATj8IZPvWMHOIzRwGE1F3tLzScjL9Yo1pWx65VrhndP0VeR88+XS50
+JIQHBI6JRGABA2HaWWMvdPiiRv8y566EyJHWZB9L81fHTNuX3lHtRwkICxkqEvEn
+d4TRzFf2r5VaxcqLNcDKLWm4hdN8zr7FTVfX9zYVm0MhqvEyqgFDKUIf8cskvC6i
+JE4ASq1ksVhIAeQIMb2wND5R4Px+iAvfzxHXYvr4lpd/tQQVrAHKMRkwTNGRS23V
+8teXQ7ce5qhAprlD6wpKT8hhPKe9FHvOcRpWdmcrfK3L7I08Fh2uxHg38Rerqshb
+BHq91HEBp78GU6ko3vqYWU3qDkNcG+C7EWAQ9u8B7sRv8ezu/1Y9SeKL0iUCAwEA
+AQKCAgBNP3xMVEZQb0xcg0ZpfbEtGNdjFz+X4iWhvVcXVetBa2Bbj0t3mE0AcJiH
+AOGzOn4A8mYCptMah8Yzl8re9Yjgw0sIQM8bxqInmWhkvMJofSQK74QCh0UDeWtp
+iZRyz5aQL29Ueg5g3E2WvOBBWAjZjaucfn9qjTRamXoTLtxIy7txWE09c8625uay
+s12zjUaOjdhZoURnBnWAXj7kgwH7TISDRdK9iVe9ZYmB+mYnGaO7TKLl6cwfYUfC
+QmFQtkJBrMiyQS1qE7vyKH3ZwAOQ/naoq9o640KvSXAgiF7F/jyt6s7L7nL1DDJO
+Pf14XORSTUL+sf1W+UgGdfwbFnooTXUf/SdaWC5KTYYxUwaTXcfYvFl/UJjhDXZ3
+C2n1szJ7MF0yOlUeUsmbGGCJwCq7kwvMZk8faueiqB2R5m741EI+LekEiktySWbg
+/J16sp3/4BGHq8OAXCl0FwPvlYGygT9F2GRKzMZX9oJkEryvMp+ObO6ViJrMAah7
+1gIFTVlubN483NjWjKGh79fdcfaccGU8l66COauXp6J2aFeSC9ajEfBggUiOWGiQ
+XazTUGlzW6pDHDaQB8kBcjZru3brb2j+6tX9ipVp3hpDTpymvNvtGS3v0V0Oyfta
+qbadqiRAlo+8vaGkxkEGhoFwWJ6rRatW7qc4ZpcxNFO4scGPeQKCAQEA/dcqVDAC
+oK105KLv9T0B9iGvoJeupJLswnNZbqzxQD5HDh9imjNxowJGgUat4FtSOYJP1eA2
+gFqmEIIduRYPNNmGxaT9ctSz1sO3Qz6+2zyJ2Xk3HTEwY3QLns8beANNvwjvqgJp
+vKgFWsY0GqD8XKHzeZYdAadxi2ZjZPRi0MU04daahp6WNsWNNexnRQHVSp0w5p6x
+FwSxKmmo/1x897rZf4R7Uc9nLwom9nYaKBR2HeqR+ySXGJUIyQ7E9lBkphVLYoDq
+AlIpQ0GycwJmHkWkCgmBOXfgPsYMe/Ir8WfXUabzrOyk8wGKFp4/QJYGeKizFqip
+2tQkBAxmpiIJuwKCAQEA+loDk83z+zJY8YmOlqcBJ7obid3SAjJr69MuYJuuv985
+SQhOhwLye8xvcW77oQfW2jjP/i425IcJAqCJCgpyJebSttrXnygYETbbQUhyfDcd
+zlgFHri9Wmu5gOskEsdUsoB+NjR6m6KcDbeYuiCQKkPOc0ITG6NtwE1RTvaW36kr
+bPN2YXEfK7+tA/i2LnGA2O5z0FJ+Nlnx1pJ9rAqpkpR04tocMX0NTJ637xVtGDFX
+r8Y2RtOaQeTfjDA+7v4syeSuOIch96iR89swsC13kujtMoS3G0CuCeL569JC31I7
+A/IvG5mWTdiDfDxq9sv0s2whRJwjHi+L0Xa+QyNlnwKCAQAi3usOs6W4wvta6VND
+gkUBtfD1g8DXFOP3dncjsBhYNfX257LY8hY7SXW8DqSWPJVYFyG2hN2X1lwXyngg
+0/n0zako/5hdrQCjkTFcyILZhUB+optCpF48W1W5VEQ2wWVtx+F8nmY+J2rM5IuF
+2PWyGAFlg4yqjIEZoFApLzVf7qdsGtoRgjmqfor+jGJHZZASdvOfys8TFW7tH6S6
+p873DTERxnZWb8KCAMgHdYP0W5M6Wt4A/S7QjrCtRh0ipTqeYjB/8Ku08+p9Nco4
+6Gx03iZBxrp81Y31salHYaZNvHEk42V4LO4f/+cjYkvYKIPtEWfAxhzHVfs4nyd+
+zRA/AoIBAEDk7GB31nKazmtt2MQ8bhQ6LcFC+pkPMOJkT3VDZbzexB6mRJTCstBc
+YdbpidhoC81tRJ0Cpb//MNq5ekxcANLKTnyPpazf2706lwMJIIQKVXOTZWBdStgR
+bHh6e1NS0CWlIRIz8EQ/lmwH11MH9da+1NkTm5hieKSMZjMtwFYhp9wKD/maNRZG
+DTcmVTMcwOV6ihLKD2VPU1znhCQAb4xLZzEWkJBTdgsSaWNUDn9i6vPpUVBysV27
+UicoqmeRA1MiL/b/MFLeI1cuziQc5Q3zyuh5dm1eCr8NUvNKAYOZ8SpIsOVanpd3
+ND4T+zYWEEwiD02Vm5TLhla5jQAiQMkCggEAbH5n7lyHfMl+Mft+EI58wPG85+Ih
+592FGSpQGYw4KtBhYOpD4mfoEi4PHHB72S41LSIAIbksXrXjIlpxlRUJWha0TxvJ
++/8fwMA1GRZ87eAwo4lkx+8kBt/5ipH0w+zO8mt83HYZz7Cnl3yU3iejSk6jeIAN
+12PYo17GetZWkuNu5PgwuaiaT/Hoy6WcFbCB+U3p+s7e93Fdk8sDrUq06P9Lpkp1
+goeAxtKKgpelmpBSKyDm3biYbd+32SrCp2wiMr30K1ttDAh8rinaS44atEnG58Ep
+8XTWeLsOVo72l3mvnLGTTzrmW9rFcCXTN7zeRugl7lehINn9bkg64kz57A==
+-----END RSA PRIVATE KEY-----
diff --git a/src/arm/openwrt_demo/1_buildimage/resources/keys/vpn-server-cert.pem b/src/arm/openwrt_demo/1_buildimage/resources/keys/vpn-server-cert.pem
new file mode 100644
index 0000000..7edbbe1
--- /dev/null
+++ b/src/arm/openwrt_demo/1_buildimage/resources/keys/vpn-server-cert.pem
@@ -0,0 +1,31 @@
+-----BEGIN CERTIFICATE-----
+MIIFQDCCAyigAwIBAgIILxhLDcigK7IwDQYJKoZIhvcNAQEMBQAwNDELMAkGA1UE
+BhMCQ04xEzARBgNVBAoTClZQTiBTZXJ2ZXIxEDAOBgNVBAMTB3Rlc3R2cG4wHhcN
+MTcwNDE4MDgwNzE1WhcNMjIwNDE3MDgwNzE1WjA0MQswCQYDVQQGEwJDTjETMBEG
+A1UEChMKVlBOIFNlcnZlcjEQMA4GA1UEAxMHdGVzdHZwbjCCAiIwDQYJKoZIhvcN
+AQEBBQADggIPADCCAgoCggIBAJ1m81Tj1/QJCw8rD3euk69ffLBxGh5sZ8vCn0dM
+mSXzU0xI5wv6Ss5tJsVCvesr741K3x+hgj6cdLj0UneGpSKz3ULn0+m7gACM401o
+Ms51aVEagz+O0fe9wWDZ+82xMXAw/bSvrMs34co8OofKF26WH6mPHxSkCU6edudm
+063zwQwlmvqeFhoxUvZtM65iUSQZrWuxBZkmEPfwfZz8E8v94xs40QicYl/gOoPP
+sgbzlsLQEqJAGrhC8HsMaNicr8n2Iie1PBxfhTdn/nqA4oQCrp5az28xGrjsNVXJ
+teTZTo0Nyg60bMbdR7rN5StWdDolzd/DKr8Jy3J/7xbgGHDftDnqMKLtsUPe+4Mi
+euLw3y1DkOZGt85dw05C/LbRupaZL3Yk7ehi+xPzNC6e3ssqKNjbffjtqDh3Ol3b
+5QmhBUoULWDzB9wSfwHueOFPptOK2c2pQh7U2bPcalXMwf6sCWdx3TokniLvAhxH
+8alBINZJ7ZSgA9vyH1KUzT5+5nXhPayXOXwvIEqNvig84bApCglIkO6jty1jZ79X
+Nd4TwOWuJSav4WQn3+t+5GWvrZzsuABzLruUcWTwdNA64Yw4AzwJoU6RZMbcHGPf
+bAofOtXn7H7ncrvWAahpFDmNge0GBsXSmTp01FBMEOdnnRG2b+C8dJyZpNPlr2si
+5oKJAgMBAAGjVjBUMB8GA1UdIwQYMBaAFI7Wx5FW4BzI3TT/1eWnX0Q/GUmIMBIG
+A1UdEQQLMAmCB3Rlc3R2cG4wHQYDVR0lBBYwFAYIKwYBBQUHAwEGCCsGAQUFCAIC
+MA0GCSqGSIb3DQEBDAUAA4ICAQBPGakMBK6Wsc1zAwkogsKtnStU1tq1IaOAUfpN
+cANuP0n3gsAD3aFFYKHsecL2nC2YO+HgmXZNJUTlcSftVSA+roZKqS6gxEwH75xC
+ponFnqrVnzEP7mLTA4/DQGfTRcGBTY5JEr9BUZsl+sD5XeekAKQOtTq2du+1tFQU
+aJlqwv39a+D7dPGfue2jHlIC48b0HyFpL7gGPidB9QDWjKVC8ZBaf0RDqNy70Qyh
+a1iAbSAsWzHvEvwkUAVyk8+oRNwd0IPmbRyKZXLNXIqHsYmdXgfK7o+vF1Qv30rn
+U2OwFqpGLsmo7CGI9fDjWUqoGn5hJJppvvP3cjXqhgMsa/dxel9dQMs8ERIO4rkP
+YJUmH5RSZwyc1iAfikaAHFRy0zauK1sHX2DPg+xyY/FzU4bfdKQTZYEBzIgBoN4q
+fmGY2EuApH/Z4BAGk9RostQIOmXcbm0/PAZDMgCS7Ms7ONbm9y2dssuY5f2rURBh
+xsANB/D8lzTzHFOtxwgTRFuQ69SO8Q7htKK/+bGe2YhqgFi53M6FT2EDOiCPfG4t
+d437KMXyQzXSkBJYVwSM5xHvc1xMWH14YK2AZFbmCRGp9Iv5GJBd04Eb9ziU0iDi
+DtUoqjP9XWO3nf7CiJPIna6G0LXYDKjNz1vUzbmLeDnw8hSqQJbn7lp4VqF1pI0o
+taHEkA==
+-----END CERTIFICATE-----
diff --git a/src/arm/openwrt_demo/1_buildimage/resources/keys/vpn-server-key.pem b/src/arm/openwrt_demo/1_buildimage/resources/keys/vpn-server-key.pem
new file mode 100644
index 0000000..6d48ac4
--- /dev/null
+++ b/src/arm/openwrt_demo/1_buildimage/resources/keys/vpn-server-key.pem
@@ -0,0 +1,51 @@
+-----BEGIN RSA PRIVATE KEY-----
+MIIJKAIBAAKCAgEAnWbzVOPX9AkLDysPd66Tr198sHEaHmxny8KfR0yZJfNTTEjn
+C/pKzm0mxUK96yvvjUrfH6GCPpx0uPRSd4alIrPdQufT6buAAIzjTWgyznVpURqD
+P47R973BYNn7zbExcDD9tK+syzfhyjw6h8oXbpYfqY8fFKQJTp5252bTrfPBDCWa
++p4WGjFS9m0zrmJRJBmta7EFmSYQ9/B9nPwTy/3jGzjRCJxiX+A6g8+yBvOWwtAS
+okAauELwewxo2JyvyfYiJ7U8HF+FN2f+eoDihAKunlrPbzEauOw1Vcm15NlOjQ3K
+DrRsxt1Hus3lK1Z0OiXN38MqvwnLcn/vFuAYcN+0Oeowou2xQ977gyJ64vDfLUOQ
+5ka3zl3DTkL8ttG6lpkvdiTt6GL7E/M0Lp7eyyoo2Nt9+O2oOHc6XdvlCaEFShQt
+YPMH3BJ/Ae544U+m04rZzalCHtTZs9xqVczB/qwJZ3HdOiSeIu8CHEfxqUEg1knt
+lKAD2/IfUpTNPn7mdeE9rJc5fC8gSo2+KDzhsCkKCUiQ7qO3LWNnv1c13hPA5a4l
+Jq/hZCff637kZa+tnOy4AHMuu5RxZPB00DrhjDgDPAmhTpFkxtwcY99sCh861efs
+fudyu9YBqGkUOY2B7QYGxdKZOnTUUEwQ52edEbZv4Lx0nJmk0+WvayLmgokCAwEA
+AQKCAgACn+UiPEeH8GUp8EsVG65R1AGndwarQnM+IBgJZ4cdDVhCmxYYL+jyqs+v
+pcfhLaBpqUQ8b1Q1cKTXxyzUr6RIsSSuZot07VKVH8RJWy8QByrrBmpGbhPFTI7r
+4KKiCMilC7oVRYHNTy3xh3d8jL5Uh/C4oklvmIx2WO6CYrIYUip88NJrXVALc9kD
+wSklvkpcU93INYwnzuJ6TJJ+9+s+wT8PKi9pHs4bS2kVCiqYUqmSmhyEw6p2ZK5a
+HOg8JsEIePVTlxeXdlvJg1nzJ2ZE3X8Ve/iTbJy66pG5RRsRI8hQp43VQ4VV+7oE
+F5tmRuN6qzyfii4dVUpaSx9nW3P+L3hES6B7y4cE6hmQH09/qkpXoyb3JkXyRGZv
+QLkm/7ywSwmxjsS2a3YmpLffEZZHgccunWZAvCHHb2L4Cf17VY3hB1q7Bc+ANKIr
+KT32fUqH5MAt3usY0J3eaJ8RheKh9irJWxju/fxKFFQA9vesBxl1hZ3GYC7NQQky
+RAffn1FPW5e3OOrBfp374q1TxLVqmMtY+djh1OqlLaoXoAU+3+qrkM+Kt4mZVThz
+0G2MSujgLnDf00nddLMyNApnbcYoa1/mTYKaSpkTkHZpXjvBWn/GapK5NGfdDFiM
+JzW5J/Y8aXhBujdmW7/QxDW6aLcRKTtyML2wiYDDA/SyVfTukQKCAQEAzpUXU9W8
+dZ7Na1LXuNxAJWx4peRCV9UUE+6XQlcchPb6slLfTGrjSp1Bkgz8YR4yuypq8KG3
+DTEw5BPjkamgsYE8yzu1Etv6j2uGo/F+C7RdsRIu2jjGXrGFH+DNc+hEvuMEMk4x
+c9S1Xf4Gazllcmy01qJ+OZR2vvCdysU46IR+OnlZ9hH0b3DSOmhDMEJhArvjalVu
+4GPHGlC+0+y/wF5kXiQoPp4CzSNQ8Gho/KuhcCH+lqwkA2qqht/TzXvzeHvyiOVY
++cWCwdkYdxEt932OEfGw3hlfiyvsHIrq8svG7c+6VwKc0R9hHSuUQCDhbtnb4nT2
+lqwyBG3D6xKxWQKCAQEAww4bxRLpgwnCgqgtpK8buTj/DdesiQIDA5fAU4eYKKnL
+RHFfTiBU8pg55UONiQP0AciGefKWkfRaoX34/Jk27oJiA4MoWOOd71W1d7tjeq6C
+oVC0Jw7zxrq/LlrmbIz2jzOPP5upw9KyCApzAfv6Q3eOHuYpTCV7FTpeQQ2FObVu
+W4CsjiAudMt5OZDOEsv2Uxd44NUxHhQd3+uz+G/2VhF711LkNLrf7X5toDGTn3nJ
+R8QptNYMojf+p3rud24pFdGGuZ0hyqTrK94TBheMOM93LENNcevnBjPwQ7PO6tDE
+Y2SrigE1CSGFYOR/HvqrpXOKvM1xAPByx/6JesiEsQKCAQBprc9vLanpKcHAI3MD
+uHiALItTof9mWzSYNbffUhze0FHTI53jw9Jeey/t/QKm1AHzyXFHhBLWhtGR+7Kw
+82unIovtE7A/45S8Ba+s8n8ekbhUOw8Ix36DNqD5e9DeeHWiiRO+gE3ACZJ2cNrr
+w0LoVD/2hM25uv88Em9GKbpBCHZih23D+c9nqvmAs5GbgHmMIn3mCapc0+4owiG8
+3CID0MXbevezgLXCJ0zijycWCt7dNCa/AXSy4sA1mw8I0V3txsp9yYXI0IdhjyN6
+1akEMJCbEV7/X0+HLILu3wnuBtzPDzMuC8IZIMpXV9HRNIDeakiYAmmbDp/PsC9H
+dBqRAoIBAQCbQa6e9gfCktEteLoj/HG/w/tYRFSEFYLaqD6g/iwCKeyuxOMMZ7XW
+B48FyvhsmCXwCXHovUxWTr6ZDpFSVo4f2M41Z3+FCWBb8cfoztJHA4Lc7kUHVeJ6
+S4kDV71Tp/xVTb/27Gt7gEjPF6olaTDx5MbOF3vFrYvEANqnQyDJJ334/XncAweX
+VaJfTMCKu6iMyQEhTPC0tWR2KMHuvQfByFbftI4K3riA7IJL4UpUxPaO1jgwRbR2
+psVe//2yOJAhWs63Dbio+Q5rs29HCRVG3vRH2iZZyGDyUgMrkILh61x2lNnplj5l
+zzXAQwBgYzyfDFHhKFGLYtiqEhPSFKtxAoIBAEbqAGd7MrKmBNiXKo22EFeaZg3N
+w7yr9EVkuGKBWSy5pwQJ2o7lGUjEXsrUzNnWiRTuLkaGnngfgqY9L1PMQJQ65fas
+OGY34pguI8OiLPtveUuKL3dAMN5eeKV7kdpDZgtVKvWAANNWW7oaZV7OQT3wpqF1
+G19ObcrCVKfws3CDFO2KcUFbNesNubTuACCzB/j/jfx/X4UI5kkdZJxrcK6sK88G
+nqTFe0KNm9ud6HQe0eqmusk/jmf8ifOSHAONT8I9JBmpo4vpxSqoADnEajWc6MJt
+UTCk/WTnd6hG6ER6VuhhqzWjJ/dSrEpIR0LxGkHp9hO68c/BeVAbOmS6Q5Y=
+-----END RSA PRIVATE KEY-----
diff --git a/src/arm/openwrt_demo/1_buildimage/resources/strongswan/charon-logging.conf b/src/arm/openwrt_demo/1_buildimage/resources/strongswan/charon-logging.conf
new file mode 100644
index 0000000..c91421d
--- /dev/null
+++ b/src/arm/openwrt_demo/1_buildimage/resources/strongswan/charon-logging.conf
@@ -0,0 +1,62 @@
+charon {
+
+ # Section to define file loggers, see LOGGER CONFIGURATION in
+ # strongswan.conf(5).
+ filelog {
+
+ # <filename> is the full path to the log file.
+ # <filename> {
+
+ # Loglevel for a specific subsystem.
+ # <subsystem> = <default>
+
+ # If this option is enabled log entries are appended to the existing
+ # file.
+ # append = yes
+
+ # Default loglevel.
+ # default = 1
+
+ # Enabling this option disables block buffering and enables line
+ # buffering.
+ # flush_line = no
+
+ # Prefix each log entry with the connection name and a unique
+ # numerical identifier for each IKE_SA.
+ # ike_name = no
+
+ # Prefix each log entry with a timestamp. The option accepts a
+ # format string as passed to strftime(3).
+ # time_format =
+
+ # }
+
+ }
+
+ # Section to define syslog loggers, see LOGGER CONFIGURATION in
+ # strongswan.conf(5).
+ syslog {
+
+ # Identifier for use with openlog(3).
+ # identifier =
+
+ # <facility> is one of the supported syslog facilities, see LOGGER
+ # CONFIGURATION in strongswan.conf(5).
+ # <facility> {
+
+ # Loglevel for a specific subsystem.
+ # <subsystem> = <default>
+
+ # Default loglevel.
+ # default = 1
+
+ # Prefix each log entry with the connection name and a unique
+ # numerical identifier for each IKE_SA.
+ # ike_name = no
+
+ # }
+
+ }
+
+}
+
diff --git a/src/arm/openwrt_demo/1_buildimage/resources/strongswan/charon.conf b/src/arm/openwrt_demo/1_buildimage/resources/strongswan/charon.conf
new file mode 100644
index 0000000..5cab2b1
--- /dev/null
+++ b/src/arm/openwrt_demo/1_buildimage/resources/strongswan/charon.conf
@@ -0,0 +1,281 @@
+# Options for the charon IKE daemon.
+charon {
+
+ # Maximum number of half-open IKE_SAs for a single peer IP.
+ # block_threshold = 5
+
+ # Whether relations in validated certificate chains should be cached in
+ # memory.
+ # cert_cache = yes
+
+ # Send Cisco Unity vendor ID payload (IKEv1 only).
+ # cisco_unity = no
+
+ # Close the IKE_SA if setup of the CHILD_SA along with IKE_AUTH failed.
+ # close_ike_on_child_failure = no
+
+ # Number of half-open IKE_SAs that activate the cookie mechanism.
+ # cookie_threshold = 10
+
+ # Use ANSI X9.42 DH exponent size or optimum size matched to cryptographic
+ # strength.
+ # dh_exponent_ansi_x9_42 = yes
+
+ # DNS server assigned to peer via configuration payload (CP).
+ # dns1 =
+
+ # DNS server assigned to peer via configuration payload (CP).
+ # dns2 =
+
+ # Enable Denial of Service protection using cookies and aggressiveness
+ # checks.
+ # dos_protection = yes
+
+ # Compliance with the errata for RFC 4753.
+ # ecp_x_coordinate_only = yes
+
+ # Free objects during authentication (might conflict with plugins).
+ # flush_auth_cfg = no
+
+ # Maximum size (in bytes) of a sent fragment when using the proprietary
+ # IKEv1 fragmentation extension.
+ # fragment_size = 512
+
+ # Name of the group the daemon changes to after startup.
+ # group =
+
+ # Timeout in seconds for connecting IKE_SAs (also see IKE_SA_INIT DROPPING).
+ # half_open_timeout = 30
+
+ # Enable hash and URL support.
+ # hash_and_url = no
+
+ # Allow IKEv1 Aggressive Mode with pre-shared keys as responder.
+ # i_dont_care_about_security_and_use_aggressive_mode_psk = no
+
+ # A space-separated list of routing tables to be excluded from route
+ # lookups.
+ # ignore_routing_tables =
+
+ # Maximum number of IKE_SAs that can be established at the same time before
+ # new connection attempts are blocked.
+ # ikesa_limit = 0
+
+ # Number of exclusively locked segments in the hash table.
+ # ikesa_table_segments = 1
+
+ # Size of the IKE_SA hash table.
+ # ikesa_table_size = 1
+
+ # Whether to close IKE_SA if the only CHILD_SA closed due to inactivity.
+ # inactivity_close_ike = no
+
+ # Limit new connections based on the current number of half open IKE_SAs,
+ # see IKE_SA_INIT DROPPING in strongswan.conf(5).
+ # init_limit_half_open = 0
+
+ # Limit new connections based on the number of queued jobs.
+ # init_limit_job_load = 0
+
+ # Causes charon daemon to ignore IKE initiation requests.
+ # initiator_only = no
+
+ # Install routes into a separate routing table for established IPsec
+ # tunnels.
+ # install_routes = yes
+
+ # Install virtual IP addresses.
+ # install_virtual_ip = yes
+
+ # The name of the interface on which virtual IP addresses should be
+ # installed.
+ # install_virtual_ip_on =
+
+ # Check daemon, libstrongswan and plugin integrity at startup.
+ # integrity_test = no
+
+ # A comma-separated list of network interfaces that should be ignored, if
+ # interfaces_use is specified this option has no effect.
+ # interfaces_ignore =
+
+ # A comma-separated list of network interfaces that should be used by
+ # charon. All other interfaces are ignored.
+ # interfaces_use =
+
+ # NAT keep alive interval.
+ # keep_alive = 20s
+
+ # Plugins to load in the IKE daemon charon.
+ # load =
+
+ # Determine plugins to load via each plugin's load option.
+ # load_modular = no
+
+ # Maximum packet size accepted by charon.
+ # max_packet = 10000
+
+ # Enable multiple authentication exchanges (RFC 4739).
+ # multiple_authentication = yes
+
+ # WINS servers assigned to peer via configuration payload (CP).
+ # nbns1 =
+
+ # WINS servers assigned to peer via configuration payload (CP).
+ # nbns2 =
+
+ # UDP port used locally. If set to 0 a random port will be allocated.
+ # port = 500
+
+ # UDP port used locally in case of NAT-T. If set to 0 a random port will be
+ # allocated. Has to be different from charon.port, otherwise a random port
+ # will be allocated.
+ # port_nat_t = 4500
+
+ # Process RTM_NEWROUTE and RTM_DELROUTE events.
+ # process_route = yes
+
+ # Delay in ms for receiving packets, to simulate larger RTT.
+ # receive_delay = 0
+
+ # Delay request messages.
+ # receive_delay_request = yes
+
+ # Delay response messages.
+ # receive_delay_response = yes
+
+ # Specific IKEv2 message type to delay, 0 for any.
+ # receive_delay_type = 0
+
+ # Size of the AH/ESP replay window, in packets.
+ # replay_window = 32
+
+ # Base to use for calculating exponential back off, see IKEv2 RETRANSMISSION
+ # in strongswan.conf(5).
+ # retransmit_base = 1.8
+
+ # Timeout in seconds before sending first retransmit.
+ # retransmit_timeout = 4.0
+
+ # Number of times to retransmit a packet before giving up.
+ # retransmit_tries = 5
+
+ # Interval to use when retrying to initiate an IKE_SA (e.g. if DNS
+ # resolution failed), 0 to disable retries.
+ # retry_initiate_interval = 0
+
+ # Initiate CHILD_SA within existing IKE_SAs.
+ # reuse_ikesa = yes
+
+ # Numerical routing table to install routes to.
+ # routing_table =
+
+ # Priority of the routing table.
+ # routing_table_prio =
+
+ # Delay in ms for sending packets, to simulate larger RTT.
+ # send_delay = 0
+
+ # Delay request messages.
+ # send_delay_request = yes
+
+ # Delay response messages.
+ # send_delay_response = yes
+
+ # Specific IKEv2 message type to delay, 0 for any.
+ # send_delay_type = 0
+
+ # Send strongSwan vendor ID payload
+ # send_vendor_id = no
+
+ # Number of worker threads in charon.
+ # threads = 16
+
+ # Name of the user the daemon changes to after startup.
+ # user =
+
+ crypto_test {
+
+ # Benchmark crypto algorithms and order them by efficiency.
+ # bench = no
+
+ # Buffer size used for crypto benchmark.
+ # bench_size = 1024
+
+ # Number of iterations to test each algorithm.
+ # bench_time = 50
+
+ # Test crypto algorithms during registration (requires test vectors
+ # provided by the test-vectors plugin).
+ # on_add = no
+
+ # Test crypto algorithms on each crypto primitive instantiation.
+ # on_create = no
+
+ # Strictly require at least one test vector to enable an algorithm.
+ # required = no
+
+ # Whether to test RNG with TRUE quality; requires a lot of entropy.
+ # rng_true = no
+
+ }
+
+ host_resolver {
+
+ # Maximum number of concurrent resolver threads (they are terminated if
+ # unused).
+ # max_threads = 3
+
+ # Minimum number of resolver threads to keep around.
+ # min_threads = 0
+
+ }
+
+ leak_detective {
+
+ # Includes source file names and line numbers in leak detective output.
+ # detailed = yes
+
+ # Threshold in bytes for leaks to be reported (0 to report all).
+ # usage_threshold = 10240
+
+ # Threshold in number of allocations for leaks to be reported (0 to
+ # report all).
+ # usage_threshold_count = 0
+
+ }
+
+ processor {
+
+ # Section to configure the number of reserved threads per priority class
+ # see JOB PRIORITY MANAGEMENT in strongswan.conf(5).
+ priority_threads {
+
+ }
+
+ }
+
+ tls {
+
+ # List of TLS encryption ciphers.
+ # cipher =
+
+ # List of TLS key exchange methods.
+ # key_exchange =
+
+ # List of TLS MAC algorithms.
+ # mac =
+
+ # List of TLS cipher suites.
+ # suites =
+
+ }
+
+ x509 {
+
+ # Discard certificates with unsupported or unknown critical extensions.
+ # enforce_critical = yes
+
+ }
+
+}
+
diff --git a/src/arm/openwrt_demo/1_buildimage/resources/strongswan/pool.conf b/src/arm/openwrt_demo/1_buildimage/resources/strongswan/pool.conf
new file mode 100644
index 0000000..297c0f8
--- /dev/null
+++ b/src/arm/openwrt_demo/1_buildimage/resources/strongswan/pool.conf
@@ -0,0 +1,12 @@
+pool {
+
+ # Database URI for the database that stores IP pools and configuration
+ # attributes. If it contains a password, make sure to adjust the
+ # permissions of the config file accordingly.
+ # database =
+
+ # Plugins to load in ipsec pool tool.
+ # load =
+
+}
+
diff --git a/src/arm/openwrt_demo/1_buildimage/resources/strongswan/starter.conf b/src/arm/openwrt_demo/1_buildimage/resources/strongswan/starter.conf
new file mode 100644
index 0000000..8465f7e
--- /dev/null
+++ b/src/arm/openwrt_demo/1_buildimage/resources/strongswan/starter.conf
@@ -0,0 +1,10 @@
+starter {
+
+ # Plugins to load in starter.
+ # load =
+
+ # Disable charon plugin load option warning.
+ # load_warning = yes
+
+}
+
diff --git a/src/arm/openwrt_demo/1_buildimage/resources/strongswan/tools.conf b/src/arm/openwrt_demo/1_buildimage/resources/strongswan/tools.conf
new file mode 100644
index 0000000..a3ab099
--- /dev/null
+++ b/src/arm/openwrt_demo/1_buildimage/resources/strongswan/tools.conf
@@ -0,0 +1,21 @@
+openac {
+
+ # Plugins to load in ipsec openac tool.
+ # load =
+
+}
+
+pki {
+
+ # Plugins to load in ipsec pki tool.
+ # load =
+
+}
+
+scepclient {
+
+ # Plugins to load in ipsec scepclient tool.
+ # load =
+
+}
+
diff --git a/src/helm-charts/clearwater/Chart.yaml b/src/helm-charts/clearwater/Chart.yaml
index e69de29..1482dd5 100644
--- a/src/helm-charts/clearwater/Chart.yaml
+++ b/src/helm-charts/clearwater/Chart.yaml
@@ -0,0 +1,11 @@
+apiVersion: v1
+description: Helm chart for Clearwater
+name: clearwater-project
+version: 0.1.0
+source:
+ - https://github.com/Metaswitch/clearwater-docker/
+maintainers:
+ - name: Laura Sofia Enriquez
+ email: lsofia.enriquez@gmail.com
+ - name: Yujun Zhang
+ email: zhang.yujunz@zte.com.cn
diff --git a/src/helm-charts/clearwater/README.md b/src/helm-charts/clearwater/README.md
index e69de29..8a509de 100644
--- a/src/helm-charts/clearwater/README.md
+++ b/src/helm-charts/clearwater/README.md
@@ -0,0 +1,19 @@
+# Metaswitch Clearwater vIMS Chart
+
+Based on [Metaswitch's Clearwater](https://github.com/Metaswitch/clearwater-docker) k8s configuration.
+
+
+## Configuration
+
+The following tables lists the configurable parameters of the chart and their default values.
+
+
+Parameter | Description | Default
+--- | --- | ---
+`image.path` | dockerhub respository | `enriquetaso`
+`image.tag` | docker image tag | `latest`
+`config.configmaps` | Custom configmap | `env-vars`
+`config.zone` | Custom namespace | `default.svc.cluster.local`
+`config.ip` | MANDATORY: Should be repaced with external ip | `None`
+
+
diff --git a/src/helm-charts/clearwater/templates/NOTES.txt b/src/helm-charts/clearwater/templates/NOTES.txt
index e69de29..6756fa8 100644
--- a/src/helm-charts/clearwater/templates/NOTES.txt
+++ b/src/helm-charts/clearwater/templates/NOTES.txt
@@ -0,0 +1,19 @@
+Thank you for installing {{ .Chart.Name }}.
+
+Your release is named {{ .Release.Name }}.
+
+To learn more about the release, try:
+
+ $ helm status {{ .Release.Name }}
+ $ helm get {{ .Release.Name }}
+
+The deployment exposes:
+
+ - the Ellis web UI on port 30080 for self-provisioning.
+ - STUN/TURN on port 3478 for media relay.
+ - SIP on port 5060 for service.
+ - SIP/WebSocket on port 5062 for service.
+
+SIP devices can register with bono.:5060 and the Ellis provisioning interface can be accessed at port 30080.
+
+Make a call: http://clearwater.readthedocs.io/en/stable/Making_your_first_call.html
diff --git a/src/helm-charts/clearwater/templates/astaire-depl.yaml b/src/helm-charts/clearwater/templates/astaire-depl.yaml
new file mode 100644
index 0000000..94c4855
--- /dev/null
+++ b/src/helm-charts/clearwater/templates/astaire-depl.yaml
@@ -0,0 +1,54 @@
+apiVersion: extensions/v1beta1
+kind: Deployment
+metadata:
+ name: astaire
+spec:
+ replicas: 1
+ template:
+ metadata:
+ labels:
+ service: astaire
+ app: astaire
+ spec:
+ terminationGracePeriodSeconds: 120
+ containers:
+ - image: "{{ .Values.image.path }}/astaire:{{ .Values.image.tag }}"
+ imagePullPolicy: Always
+ name: astaire
+ ports:
+ - containerPort: 22
+ - containerPort: 11211
+ - containerPort: 11311
+ envFrom:
+ - configMapRef:
+ name: {{ .Values.config.configmaps }}
+ env:
+ - name: MY_POD_IP
+ valueFrom:
+ fieldRef:
+ fieldPath: status.podIP
+ livenessProbe:
+ tcpSocket:
+ port: 11311
+ periodSeconds: 10
+ failureThreshold: 9
+ readinessProbe:
+ tcpSocket:
+ port: 11311
+ volumeMounts:
+ - name: astairelogs
+ mountPath: /var/log/astaire
+ lifecycle:
+ preStop:
+ exec:
+ command: ["/bin/bash", "-c", "/usr/bin/pre-stop"]
+ - image: busybox
+ name: tailer
+ command: [ "tail", "-F", "/var/log/astaire/astaire_current.txt" ]
+ volumeMounts:
+ - name: astairelogs
+ mountPath: /var/log/astaire
+ volumes:
+ - name: astairelogs
+ emptyDir: {}
+ restartPolicy: Always
diff --git a/src/helm-charts/clearwater/templates/astaire-svc.yaml b/src/helm-charts/clearwater/templates/astaire-svc.yaml
new file mode 100644
index 0000000..e82dcdd
--- /dev/null
+++ b/src/helm-charts/clearwater/templates/astaire-svc.yaml
@@ -0,0 +1,11 @@
+apiVersion: v1
+kind: Service
+metadata:
+ name: astaire
+spec:
+ ports:
+ - name: "http-astaire"
+ port: 11311
+ selector:
+ service: astaire
+ clusterIP: None
diff --git a/src/helm-charts/clearwater/templates/bono-depl.yaml b/src/helm-charts/clearwater/templates/bono-depl.yaml
new file mode 100644
index 0000000..94020ed
--- /dev/null
+++ b/src/helm-charts/clearwater/templates/bono-depl.yaml
@@ -0,0 +1,66 @@
+apiVersion: extensions/v1beta1
+kind: Deployment
+metadata:
+ name: bono
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ service: bono
+ template:
+ metadata:
+ labels:
+ service: bono
+ snmp: enabled
+ app: bono
+ spec:
+ containers:
+ - image: "{{ .Values.image.path }}/bono:{{ .Values.image.tag }}"
+ imagePullPolicy: Always
+ name: bono
+ ports:
+ - containerPort: 22
+ - containerPort: 3478
+ - containerPort: 5060
+ - containerPort: 5062
+ - containerPort: 5060
+ protocol: "UDP"
+ - containerPort: 5062
+ protocol: "UDP"
+ envFrom:
+ - configMapRef:
+ name: {{ .Values.config.configmaps }}
+ env:
+ - name: PUBLIC_IP
+ value: {{ .Values.config.ip }}
+ - name: MY_POD_IP
+ valueFrom:
+ fieldRef:
+ fieldPath: status.podIP
+ livenessProbe:
+ exec:
+ command: ["nc", "-z", "-w", "5", "127.0.0.1", "5060"]
+ initialDelaySeconds: 30
+ readinessProbe:
+ tcpSocket:
+ port: 5060
+ livenessProbe:
+ exec:
+ command: ["/bin/bash", "/usr/share/kubernetes/liveness.sh", "3478 5060 5062"]
+ initialDelaySeconds: 30
+ readinessProbe:
+ exec:
+ command: ["/bin/bash", "/usr/share/kubernetes/liveness.sh", "3478 5060 5062"]
+ volumeMounts:
+ - name: bonologs
+ mountPath: /var/log/bono
+ - image: busybox
+ name: tailer
+ command: [ "tail", "-F", "/var/log/bono/bono_current.txt" ]
+ volumeMounts:
+ - name: bonologs
+ mountPath: /var/log/bono
+ volumes:
+ - name: bonologs
+ emptyDir: {}
+ restartPolicy: Always
diff --git a/src/helm-charts/clearwater/templates/bono-svc.yaml b/src/helm-charts/clearwater/templates/bono-svc.yaml
new file mode 100644
index 0000000..3b3da5c
--- /dev/null
+++ b/src/helm-charts/clearwater/templates/bono-svc.yaml
@@ -0,0 +1,27 @@
+apiVersion: v1
+kind: Service
+metadata:
+ name: bono
+spec:
+ externalIPs:
+ - {{ .Values.config.ip }}
+ loadBalancerIP: {{ .Values.config.ip }}
+ ports:
+ - name: "tcp-3478"
+ port: 3478
+ protocol: TCP
+ targetPort: 3478
+ - name: "tcp-5060"
+ port: 5060
+ protocol: TCP
+ targetPort: 5060
+ - name: "tcp-5062"
+ port: 5062
+ protocol: TCP
+ targetPort: 5062
+ selector:
+ service: bono
+ sessionAffinity: None
+ type: ClusterIP
+status:
+ loadBalancer: {}
diff --git a/src/helm-charts/clearwater/templates/cassandra-depl.yaml b/src/helm-charts/clearwater/templates/cassandra-depl.yaml
new file mode 100644
index 0000000..4a7f6c8
--- /dev/null
+++ b/src/helm-charts/clearwater/templates/cassandra-depl.yaml
@@ -0,0 +1,38 @@
+apiVersion: extensions/v1beta1
+kind: Deployment
+metadata:
+ name: cassandra
+spec:
+ replicas: 3
+ template:
+ metadata:
+ labels:
+ service: cassandra
+ app: cassandra
+ spec:
+ containers:
+ - image: "{{ .Values.image.path }}/cassandra:{{ .Values.image.tag }}"
+ imagePullPolicy: Always
+ name: cassandra
+ ports:
+ - containerPort: 22
+ - containerPort: 7001
+ - containerPort: 9042
+ - containerPort: 9160
+ envFrom:
+ - configMapRef:
+ name: {{ .Values.config.configmaps }}
+ env:
+ - name: MY_POD_IP
+ valueFrom:
+ fieldRef:
+ fieldPath: status.podIP
+ livenessProbe:
+ exec:
+ command: ["/bin/bash", "/usr/share/kubernetes/liveness.sh", "7000 9042 9160"]
+ # Cassandra can take a very, very long time to start up
+ initialDelaySeconds: 600
+ readinessProbe:
+ exec:
+ command: ["/bin/bash", "/usr/share/kubernetes/liveness.sh", "7000 9042 9160"]
+ restartPolicy: Always
diff --git a/src/helm-charts/clearwater/templates/cassandra-svc.yaml b/src/helm-charts/clearwater/templates/cassandra-svc.yaml
new file mode 100644
index 0000000..7cb9892
--- /dev/null
+++ b/src/helm-charts/clearwater/templates/cassandra-svc.yaml
@@ -0,0 +1,17 @@
+apiVersion: v1
+kind: Service
+metadata:
+ name: cassandra
+spec:
+ ports:
+ - name: "http-7001"
+ port: 7001
+ - name: "http-7000"
+ port: 7000
+ - name: "http-9042"
+ port: 9042
+ - name: "http-9160"
+ port: 9160
+ selector:
+ service: cassandra
+ clusterIP: None
diff --git a/src/helm-charts/clearwater/templates/chronos-depl.yaml b/src/helm-charts/clearwater/templates/chronos-depl.yaml
new file mode 100644
index 0000000..2f65ad8
--- /dev/null
+++ b/src/helm-charts/clearwater/templates/chronos-depl.yaml
@@ -0,0 +1,55 @@
+apiVersion: extensions/v1beta1
+kind: Deployment
+metadata:
+ labels:
+ service: chronos
+ name: chronos
+spec:
+ replicas: 1
+ template:
+ metadata:
+ labels:
+ service: chronos
+ app: chronos
+ spec:
+ terminationGracePeriodSeconds: 120
+ containers:
+ - image: "{{ .Values.image.path }}/chronos:{{ .Values.image.tag }}"
+ imagePullPolicy: Always
+ name: chronos
+ ports:
+ - containerPort: 22
+ - containerPort: 7253
+ envFrom:
+ - configMapRef:
+ name: {{ .Values.config.configmaps }}
+ env:
+ - name: MY_POD_IP
+ valueFrom:
+ fieldRef:
+ fieldPath: status.podIP
+ livenessProbe:
+ tcpSocket:
+ port: 7253
+ periodSeconds: 10
+ failureThreshold: 9
+ readinessProbe:
+ tcpSocket:
+ port: 7253
+ volumeMounts:
+ - name: chronoslogs
+ mountPath: /var/log/chronos
+ lifecycle:
+ preStop:
+ exec:
+ command: ["/bin/bash", "-c", "/usr/bin/pre-stop"]
+ - image: busybox
+ name: tailer
+ command: [ "tail", "-F", "/var/log/chronos/chronos_current.txt" ]
+ volumeMounts:
+ - name: chronoslogs
+ mountPath: /var/log/chronos
+ volumes:
+ - name: chronoslogs
+ emptyDir: {}
+ restartPolicy: Always
diff --git a/src/helm-charts/clearwater/templates/chronos-svc.yaml b/src/helm-charts/clearwater/templates/chronos-svc.yaml
new file mode 100644
index 0000000..3815b28
--- /dev/null
+++ b/src/helm-charts/clearwater/templates/chronos-svc.yaml
@@ -0,0 +1,11 @@
+apiVersion: v1
+kind: Service
+metadata:
+ name: chronos
+spec:
+ ports:
+ - name: "http-7253"
+ port: 7253
+ selector:
+ service: chronos
+ clusterIP: None
diff --git a/src/helm-charts/clearwater/templates/ellis-depl.yaml b/src/helm-charts/clearwater/templates/ellis-depl.yaml
new file mode 100644
index 0000000..e231bf1
--- /dev/null
+++ b/src/helm-charts/clearwater/templates/ellis-depl.yaml
@@ -0,0 +1,35 @@
+apiVersion: extensions/v1beta1
+kind: Deployment
+metadata:
+ name: ellis
+spec:
+ replicas: 1
+ template:
+ metadata:
+ labels:
+ service: ellis
+ app: ellis
+ spec:
+ containers:
+ - image: "{{ .Values.image.path }}/ellis:{{ .Values.image.tag }}"
+ imagePullPolicy: Always
+ name: ellis
+ ports:
+ - containerPort: 22
+ - containerPort: 80
+ envFrom:
+ - configMapRef:
+ name: {{ .Values.config.configmaps }}
+ env:
+ - name: MY_POD_IP
+ valueFrom:
+ fieldRef:
+ fieldPath: status.podIP
+ livenessProbe:
+ tcpSocket:
+ port: 80
+ initialDelaySeconds: 30
+ readinessProbe:
+ tcpSocket:
+ port: 80
+ restartPolicy: Always
diff --git a/src/helm-charts/clearwater/templates/ellis-svc.yaml b/src/helm-charts/clearwater/templates/ellis-svc.yaml
new file mode 100644
index 0000000..60e24d5
--- /dev/null
+++ b/src/helm-charts/clearwater/templates/ellis-svc.yaml
@@ -0,0 +1,12 @@
+apiVersion: v1
+kind: Service
+metadata:
+ name: ellis
+spec:
+ type: NodePort
+ ports:
+ - name: "http-ellis"
+ port: 80
+ nodePort: 30080
+ selector:
+ service: ellis
diff --git a/src/helm-charts/clearwater/templates/env-vars-cm.yaml b/src/helm-charts/clearwater/templates/env-vars-cm.yaml
new file mode 100644
index 0000000..3f25432
--- /dev/null
+++ b/src/helm-charts/clearwater/templates/env-vars-cm.yaml
@@ -0,0 +1,6 @@
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ .Values.config.configmaps }}
+data:
+ ZONE: {{ .Values.config.zone }}
diff --git a/src/helm-charts/clearwater/templates/etcd-depl.yaml b/src/helm-charts/clearwater/templates/etcd-depl.yaml
new file mode 100644
index 0000000..5d6e184
--- /dev/null
+++ b/src/helm-charts/clearwater/templates/etcd-depl.yaml
@@ -0,0 +1,59 @@
+apiVersion: extensions/v1beta1
+kind: Deployment
+metadata:
+ name: etcd
+spec:
+ replicas: 1
+ template:
+ metadata:
+ creationTimestamp: null
+ labels:
+ instance-type: etcd-pod
+ app: etcd-pod
+ spec:
+ containers:
+ - args:
+ - --name
+ - $(MY_POD_NAME)
+ - --advertise-client-urls
+ - http://$(MY_POD_IP):2379,http://$(MY_POD_IP):4001
+ - --listen-client-urls
+ - http://0.0.0.0:2379,http://0.0.0.0:4001
+ - --initial-advertise-peer-urls
+ - http://$(MY_POD_IP):2380
+ - --listen-peer-urls
+ - http://0.0.0.0:2380
+ # By default use a single pod cluster
+ - --initial-cluster
+ - $(MY_POD_NAME)=http://$(MY_POD_IP):2380
+ # Alternatively multi-pod clusters can be supported Using central discvovery. Run e.g.
+ # curl https://discovery.etcd.io/new?size=3 | sed s/https/http/
+ # to get a discovery URL for a 3 pod cluster, substitute the returned value below, and
+ # set replicas: 3 above.
+ #- --discovery
+ #- <URL returned by command above>
+ - --initial-cluster-state
+ - new
+ env:
+ - name: MY_POD_IP
+ valueFrom:
+ fieldRef:
+ fieldPath: status.podIP
+ - name: MY_POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ image: quay.io/coreos/etcd:v2.2.5
+ name: etcd
+ ports:
+ - containerPort: 2379
+ - containerPort: 2380
+ - containerPort: 4001
+ livenessProbe:
+ tcpSocket:
+ port: 4001
+ initialDelaySeconds: 300
+ readinessProbe:
+ tcpSocket:
+ port: 4001
+ restartPolicy: Always
diff --git a/src/helm-charts/clearwater/templates/etcd-svc.yaml b/src/helm-charts/clearwater/templates/etcd-svc.yaml
new file mode 100644
index 0000000..0c02b62
--- /dev/null
+++ b/src/helm-charts/clearwater/templates/etcd-svc.yaml
@@ -0,0 +1,17 @@
+apiVersion: v1
+kind: Service
+metadata:
+ name: etcd
+ labels:
+ instance-type: etcd-pod
+spec:
+ ports:
+ - name: "http-etcd-client"
+ port: 2379
+ - name: "http-etcd-server"
+ port: 2380
+ - name: "http-4001"
+ port: 4001
+ selector:
+ instance-type: etcd-pod
+ clusterIP: None
diff --git a/src/helm-charts/clearwater/templates/homer-depl.yaml b/src/helm-charts/clearwater/templates/homer-depl.yaml
new file mode 100644
index 0000000..c9a292e
--- /dev/null
+++ b/src/helm-charts/clearwater/templates/homer-depl.yaml
@@ -0,0 +1,35 @@
+apiVersion: extensions/v1beta1
+kind: Deployment
+metadata:
+ name: homer
+spec:
+ replicas: 1
+ template:
+ metadata:
+ labels:
+ service: homer
+ app: homer
+ spec:
+ containers:
+ - image: "{{ .Values.image.path }}/homer:{{ .Values.image.tag }}"
+ imagePullPolicy: Always
+ name: homer
+ ports:
+ - containerPort: 22
+ - containerPort: 7888
+ envFrom:
+ - configMapRef:
+ name: {{ .Values.config.configmaps }}
+ env:
+ - name: MY_POD_IP
+ valueFrom:
+ fieldRef:
+ fieldPath: status.podIP
+ livenessProbe:
+ tcpSocket:
+ port: 7888
+ initialDelaySeconds: 30
+ readinessProbe:
+ tcpSocket:
+ port: 7888
+ restartPolicy: Always
diff --git a/src/helm-charts/clearwater/templates/homer-svc.yaml b/src/helm-charts/clearwater/templates/homer-svc.yaml
new file mode 100644
index 0000000..8acc0ed
--- /dev/null
+++ b/src/helm-charts/clearwater/templates/homer-svc.yaml
@@ -0,0 +1,11 @@
+apiVersion: v1
+kind: Service
+metadata:
+ name: homer
+spec:
+ ports:
+ - name: "http-7888"
+ port: 7888
+ selector:
+ service: homer
+ clusterIP: None
diff --git a/src/helm-charts/clearwater/templates/homestead-depl.yaml b/src/helm-charts/clearwater/templates/homestead-depl.yaml
new file mode 100644
index 0000000..590ea51
--- /dev/null
+++ b/src/helm-charts/clearwater/templates/homestead-depl.yaml
@@ -0,0 +1,51 @@
+apiVersion: extensions/v1beta1
+kind: Deployment
+metadata:
+ name: homestead
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ service: homestead
+ template:
+ metadata:
+ labels:
+ service: homestead
+ snmp: enabled
+ app: homestead
+ spec:
+ containers:
+ - image: "{{ .Values.image.path }}/homestead:{{ .Values.image.tag }}"
+ imagePullPolicy: Always
+ name: homestead
+ ports:
+ - containerPort: 22
+ - containerPort: 8888
+ envFrom:
+ - configMapRef:
+ name: env-vars
+ env:
+ - name: MY_POD_IP
+ valueFrom:
+ fieldRef:
+ fieldPath: status.podIP
+ livenessProbe:
+ exec:
+ command: ["/bin/bash", "/usr/share/kubernetes/liveness.sh", "8888"]
+ initialDelaySeconds: 60
+ readinessProbe:
+ exec:
+ command: ["/bin/bash", "/usr/share/kubernetes/liveness.sh", "8888"]
+ volumeMounts:
+ - name: homesteadlogs
+ mountPath: /var/log/homestead
+ - image: busybox
+ name: tailer
+ command: [ "tail", "-F", "/var/log/homestead/homestead_current.txt" ]
+ volumeMounts:
+ - name: homesteadlogs
+ mountPath: /var/log/homestead
+ volumes:
+ - name: homesteadlogs
+ emptyDir: {}
+ restartPolicy: Always
diff --git a/src/helm-charts/clearwater/templates/homestead-prov-depl.yaml b/src/helm-charts/clearwater/templates/homestead-prov-depl.yaml
new file mode 100644
index 0000000..ecf9f8d
--- /dev/null
+++ b/src/helm-charts/clearwater/templates/homestead-prov-depl.yaml
@@ -0,0 +1,39 @@
+apiVersion: extensions/v1beta1
+kind: Deployment
+metadata:
+ name: homestead-prov
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ service: homestead-prov
+ template:
+ metadata:
+ labels:
+ service: homestead-prov
+ snmp: enabled
+ app: homestead-pro
+ spec:
+ containers:
+ - image: "{{ .Values.image.path }}/homestead-prov:{{ .Values.image.tag }}"
+ imagePullPolicy: Always
+ name: homestead-prov
+ ports:
+ - containerPort: 22
+ - containerPort: 8889
+ envFrom:
+ - configMapRef:
+ name: {{ .Values.config.configmaps }}
+ env:
+ - name: MY_POD_IP
+ valueFrom:
+ fieldRef:
+ fieldPath: status.podIP
+ livenessProbe:
+ exec:
+ command: ["/bin/bash", "/usr/share/clearwater/bin/poll_homestead-prov.sh"]
+ initialDelaySeconds: 60
+ readinessProbe:
+ exec:
+ command: ["/bin/bash", "/usr/share/clearwater/bin/poll_homestead-prov.sh"]
+ restartPolicy: Always
diff --git a/src/helm-charts/clearwater/templates/homestead-prov-svc.yaml b/src/helm-charts/clearwater/templates/homestead-prov-svc.yaml
new file mode 100644
index 0000000..4ce2dd9
--- /dev/null
+++ b/src/helm-charts/clearwater/templates/homestead-prov-svc.yaml
@@ -0,0 +1,11 @@
+apiVersion: v1
+kind: Service
+metadata:
+ name: homestead-prov
+spec:
+ ports:
+ - name: "http-8889"
+ port: 8889
+ selector:
+ service: homestead-prov
+ clusterIP: None
diff --git a/src/helm-charts/clearwater/templates/homestead-svc.yaml b/src/helm-charts/clearwater/templates/homestead-svc.yaml
new file mode 100644
index 0000000..7684d2e
--- /dev/null
+++ b/src/helm-charts/clearwater/templates/homestead-svc.yaml
@@ -0,0 +1,11 @@
+apiVersion: v1
+kind: Service
+metadata:
+ name: homestead
+spec:
+ ports:
+ - name: "http-8888"
+ port: 8888
+ selector:
+ service: homestead
+ clusterIP: None
diff --git a/src/helm-charts/clearwater/templates/ralf-depl.yaml b/src/helm-charts/clearwater/templates/ralf-depl.yaml
new file mode 100644
index 0000000..8efcc5e
--- /dev/null
+++ b/src/helm-charts/clearwater/templates/ralf-depl.yaml
@@ -0,0 +1,51 @@
+apiVersion: extensions/v1beta1
+kind: Deployment
+metadata:
+ name: ralf
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ service: ralf
+ template:
+ metadata:
+ labels:
+ service: ralf
+ snmp: enabled
+ app: ralf
+ spec:
+ containers:
+ - image: "{{ .Values.image.path }}/ralf:{{ .Values.image.tag }}"
+ imagePullPolicy: Always
+ name: ralf
+ ports:
+ - containerPort: 22
+ - containerPort: 10888
+ envFrom:
+ - configMapRef:
+ name: {{ .Values.config.configmaps }}
+ env:
+ - name: MY_POD_IP
+ valueFrom:
+ fieldRef:
+ fieldPath: status.podIP
+ livenessProbe:
+ tcpSocket:
+ port: 10888
+ initialDelaySeconds: 30
+ readinessProbe:
+ tcpSocket:
+ port: 10888
+ volumeMounts:
+ - name: ralflogs
+ mountPath: /var/log/ralf
+ - image: busybox
+ name: tailer
+ command: [ "tail", "-F", "/var/log/ralf/ralf_current.txt" ]
+ volumeMounts:
+ - name: ralflogs
+ mountPath: /var/log/ralf
+ volumes:
+ - name: ralflogs
+ emptyDir: {}
+ restartPolicy: Always
diff --git a/src/helm-charts/clearwater/templates/ralf-svc.yaml b/src/helm-charts/clearwater/templates/ralf-svc.yaml
new file mode 100644
index 0000000..9fc44c3
--- /dev/null
+++ b/src/helm-charts/clearwater/templates/ralf-svc.yaml
@@ -0,0 +1,11 @@
+apiVersion: v1
+kind: Service
+metadata:
+ name: ralf
+spec:
+ ports:
+ - name: "http-10888"
+ port: 10888
+ selector:
+ service: ralf
+ clusterIP: None
diff --git a/src/helm-charts/clearwater/templates/sprout-depl.yaml b/src/helm-charts/clearwater/templates/sprout-depl.yaml
new file mode 100644
index 0000000..da2989c
--- /dev/null
+++ b/src/helm-charts/clearwater/templates/sprout-depl.yaml
@@ -0,0 +1,51 @@
+apiVersion: extensions/v1beta1
+kind: Deployment
+metadata:
+ name: sprout
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ service: sprout
+ template:
+ metadata:
+ labels:
+ service: sprout
+ snmp: enabled
+ app: sprout
+ spec:
+ containers:
+ - image: "{{ .Values.image.path }}/sprout:{{ .Values.image.tag }}"
+ imagePullPolicy: Always
+ name: sprout
+ ports:
+ - containerPort: 22
+ envFrom:
+ - configMapRef:
+ name: {{ .Values.config.configmaps }}
+ env:
+ - name: MY_POD_IP
+ valueFrom:
+ fieldRef:
+ fieldPath: status.podIP
+ livenessProbe:
+ exec:
+ command: ["/bin/bash", "/usr/share/kubernetes/liveness.sh", "5052 5054"]
+ initialDelaySeconds: 30
+ periodSeconds: 3
+ readinessProbe:
+ exec:
+ command: ["/bin/bash", "/usr/share/kubernetes/liveness.sh", "5052 5054"]
+ volumeMounts:
+ - name: sproutlogs
+ mountPath: /var/log/sprout
+ - image: busybox
+ name: tailer
+ command: [ "tail", "-F", "/var/log/sprout/sprout_current.txt" ]
+ volumeMounts:
+ - name: sproutlogs
+ mountPath: /var/log/sprout
+ volumes:
+ - name: sproutlogs
+ emptyDir: {}
+ restartPolicy: Always
diff --git a/src/helm-charts/clearwater/templates/sprout-svc.yaml b/src/helm-charts/clearwater/templates/sprout-svc.yaml
new file mode 100644
index 0000000..092a51c
--- /dev/null
+++ b/src/helm-charts/clearwater/templates/sprout-svc.yaml
@@ -0,0 +1,13 @@
+apiVersion: v1
+kind: Service
+metadata:
+ name: sprout
+spec:
+ ports:
+ - name: "http-5052"
+ port: 5052
+ - name: "http-5054"
+ port: 5054
+ selector:
+ service: sprout
+ clusterIP: None
diff --git a/src/helm-charts/clearwater/values.yaml b/src/helm-charts/clearwater/values.yaml
index e69de29..ce789ee 100644
--- a/src/helm-charts/clearwater/values.yaml
+++ b/src/helm-charts/clearwater/values.yaml
@@ -0,0 +1,7 @@
+image:
+ path: enriquetaso
+ tag: latest
+config:
+ configmaps: env-vars
+ zone: default.svc.cluster.local
+ ip: None
diff --git a/src/vagrant/kubeadm_basic/host_setup.sh b/src/vagrant/kubeadm_basic/host_setup.sh
index c1a23eb..524a967 100644
--- a/src/vagrant/kubeadm_basic/host_setup.sh
+++ b/src/vagrant/kubeadm_basic/host_setup.sh
@@ -21,7 +21,7 @@ cat <<EOF | sudo tee /etc/apt/sources.list.d/kubernetes.list
deb http://apt.kubernetes.io/ kubernetes-xenial main
EOF
sudo apt-get update
-sudo apt-get install -y --allow-downgrades docker-engine=1.12.6-0~ubuntu-xenial kubelet=1.9.1-00 kubeadm=1.9.1-00 kubectl=1.9.1-00 kubernetes-cni=0.6.0-00
+sudo apt-get install -y --allow-unauthenticated --allow-downgrades docker-engine=1.12.6-0~ubuntu-xenial kubelet=1.9.1-00 kubeadm=1.9.1-00 kubectl=1.9.1-00 kubernetes-cni=0.6.0-00
sudo swapoff -a
sudo systemctl daemon-reload
diff --git a/src/vagrant/kubeadm_clearwater/Vagrantfile b/src/vagrant/kubeadm_clearwater/Vagrantfile
index 9320074..3ed02d5 100644
--- a/src/vagrant/kubeadm_clearwater/Vagrantfile
+++ b/src/vagrant/kubeadm_clearwater/Vagrantfile
@@ -5,7 +5,7 @@ Vagrant.configure("2") do |config|
config.vm.box = "ceph/ubuntu-xenial"
config.vm.provider :libvirt do |libvirt|
- libvirt.memory = 4096
+ libvirt.memory = 8192
libvirt.cpus = 4
end
diff --git a/src/vagrant/kubeadm_clearwater/clearwater_setup.sh b/src/vagrant/kubeadm_clearwater/clearwater_setup.sh
new file mode 100755
index 0000000..e579773
--- /dev/null
+++ b/src/vagrant/kubeadm_clearwater/clearwater_setup.sh
@@ -0,0 +1,66 @@
+#!/bin/bash
+#
+# Copyright (c) 2017 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+set -ex
+
+static_ip=$(ifconfig eth0 | grep "inet addr" | cut -d ':' -f 2 | cut -d ' ' -f 1)
+echo "STATIC_IP is $static_ip."
+
+git clone --recursive https://github.com/Metaswitch/clearwater-docker.git
+
+# Set the configmaps
+kubectl create configmap env-vars --from-literal=ZONE=default.svc.cluster.local
+
+# Generate the yamls
+cd clearwater-docker/kubernetes/
+./k8s-gencfg --image_path=enriquetaso --image_tag=latest
+
+# Expose Ellis
+# The Ellis provisioning interface can then be accessed on static_ip:30080
+cat ellis-svc.yaml | sed "s/clusterIP: None/type: NodePort/" > ellis-svc.yaml.new
+cat ellis-svc.yaml.new | sed "s/port: 80/port: 80\n nodePort: 30080/" > ellis-svc.yaml
+rm ellis-svc.yaml.new
+
+# Bono configuration
+# Have a static external IP address available that the load balancer can use
+cp /vagrant/custom-bono-svc/bono-svc.yaml .
+sed -ie "6s/$/\n - $static_ip/" bono-svc.yaml
+sed -ie "7s/$/\n loadBalancerIP: $static_ip/" bono-svc.yaml
+
+cd
+kubectl apply -f clearwater-docker/kubernetes
+kubectl get nodes
+kubectl get services
+kubectl get pods
+kubectl get rc
+sleep 60
+
+r="1"
+while [ $r != "0" ]
+do
+ kubectl get pods
+ r=$( kubectl get pods | grep Pending | wc -l)
+ sleep 60
+done
+
+q="1"
+while [ $q != "0" ]
+do
+ kubectl get pods
+ q=$( kubectl get pods | grep ContainerCreating | wc -l)
+ sleep 60
+done
diff --git a/src/vagrant/kubeadm_clearwater/custom-bono-svc/bono-svc.yaml b/src/vagrant/kubeadm_clearwater/custom-bono-svc/bono-svc.yaml
new file mode 100644
index 0000000..9280b0f
--- /dev/null
+++ b/src/vagrant/kubeadm_clearwater/custom-bono-svc/bono-svc.yaml
@@ -0,0 +1,25 @@
+apiVersion: v1
+kind: Service
+metadata:
+ name: bono
+spec:
+ externalIPs:
+ ports:
+ - name: "3478"
+ port: 3478
+ protocol: TCP
+ targetPort: 3478
+ - name: "5060"
+ port: 5060
+ protocol: TCP
+ targetPort: 5060
+ - name: "5062"
+ port: 5062
+ protocol: TCP
+ targetPort: 5062
+ selector:
+ service: bono
+ sessionAffinity: None
+ type: ClusterIP
+status:
+ loadBalancer: {}
diff --git a/src/vagrant/kubeadm_clearwater/deploy.sh b/src/vagrant/kubeadm_clearwater/deploy.sh
index 844a750..54644a3 100755
--- a/src/vagrant/kubeadm_clearwater/deploy.sh
+++ b/src/vagrant/kubeadm_clearwater/deploy.sh
@@ -6,4 +6,7 @@ DIR="$(dirname `readlink -f $0`)"
cd $DIR
../cleanup.sh
vagrant up
-vagrant ssh master -c "/vagrant/examples/create_and_apply.sh"
+vagrant ssh master -c "/vagrant/clearwater_setup.sh"
+
+# Run tests
+vagrant ssh master -c "/vagrant/tests/clearwater-live-test.sh"
diff --git a/src/vagrant/kubeadm_clearwater/host_setup.sh b/src/vagrant/kubeadm_clearwater/host_setup.sh
index b86a618..524a967 100644
--- a/src/vagrant/kubeadm_clearwater/host_setup.sh
+++ b/src/vagrant/kubeadm_clearwater/host_setup.sh
@@ -21,9 +21,9 @@ cat <<EOF | sudo tee /etc/apt/sources.list.d/kubernetes.list
deb http://apt.kubernetes.io/ kubernetes-xenial main
EOF
sudo apt-get update
-sudo apt-get install -y --allow-downgrades docker-engine=1.12.6-0~ubuntu-xenial kubelet=1.7.0-00 kubeadm=1.7.0-00 kubectl=1.7.0-00 kubernetes-cni=0.5.1-00
+sudo apt-get install -y --allow-unauthenticated --allow-downgrades docker-engine=1.12.6-0~ubuntu-xenial kubelet=1.9.1-00 kubeadm=1.9.1-00 kubectl=1.9.1-00 kubernetes-cni=0.6.0-00
-sudo rm -rf /var/lib/kubelet
-sudo systemctl stop kubelet
+sudo swapoff -a
sudo systemctl daemon-reload
+sudo systemctl stop kubelet
sudo systemctl start kubelet
diff --git a/src/vagrant/kubeadm_clearwater/master_setup.sh b/src/vagrant/kubeadm_clearwater/master_setup.sh
index 7fa2ad8..b181582 100644
--- a/src/vagrant/kubeadm_clearwater/master_setup.sh
+++ b/src/vagrant/kubeadm_clearwater/master_setup.sh
@@ -3,11 +3,8 @@
set -ex
sudo kubeadm init --apiserver-advertise-address=192.168.1.10 --service-cidr=10.96.0.0/16 --pod-network-cidr=10.32.0.0/12 --token 8c5adc.1cec8dbf339093f0
-sudo cp /etc/kubernetes/admin.conf $HOME/
-sudo chown $(id -u):$(id -g) $HOME/admin.conf
-export KUBECONFIG=$HOME/admin.conf
-echo "export KUBECONFIG=$HOME/admin.conf" >> $HOME/.bash_profile
+mkdir ~/.kube
+sudo cp /etc/kubernetes/admin.conf $HOME/.kube/config
+sudo chown $(id -u):$(id -g) $HOME/.kube/config
kubectl apply -f http://git.io/weave-kube-1.6
-#kubectl apply -f http://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
-#kubectl apply -f http://docs.projectcalico.org/v2.1/getting-started/kubernetes/installation/hosted/kubeadm/1.6/calico.yaml
diff --git a/src/vagrant/kubeadm_clearwater/tests/clearwater-live-test.sh b/src/vagrant/kubeadm_clearwater/tests/clearwater-live-test.sh
new file mode 100755
index 0000000..6e5238e
--- /dev/null
+++ b/src/vagrant/kubeadm_clearwater/tests/clearwater-live-test.sh
@@ -0,0 +1,46 @@
+#!/bin/bash
+#
+# Copyright (c) 2017 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+set -ex
+
+# http://clearwater.readthedocs.io/en/latest/Running_the_live_tests.html
+sudo apt-get install build-essential bundler git --yes
+sudo apt install gnupg2 --yes
+gpg2 --recv-keys 409B6B1796C275462A1703113804BB82D39DC0E3
+curl -L https://get.rvm.io | bash -s stable
+
+source ~/.rvm/scripts/rvm
+rvm autolibs enable
+rvm install 1.9.3
+rvm use 1.9.3
+
+
+# Setup ruby and gems
+git clone https://github.com/Metaswitch/clearwater-live-test.git
+cd clearwater-live-test/
+cd quaff/ && git clone https://github.com/Metaswitch/quaff.git
+cd ..
+bundle install
+
+# Get Ellis ip
+ellisip=$(kubectl get services ellis -o json | grep clusterIP | cut -f4 -d'"')
+
+# Get Ellis ip
+bonoip=$(kubectl get services bono -o json | grep clusterIP | cut -f4 -d'"')
+
+# Run the tests
+rake test[default.svc.cluster.local] SIGNUP_CODE=secret PROXY=$bonoip ELLIS=$ellisip
diff --git a/src/vagrant/kubeadm_clearwater/worker_setup.sh b/src/vagrant/kubeadm_clearwater/worker_setup.sh
index b68d800..74e4178 100644
--- a/src/vagrant/kubeadm_clearwater/worker_setup.sh
+++ b/src/vagrant/kubeadm_clearwater/worker_setup.sh
@@ -1,4 +1,4 @@
#!/bin/bash
set -ex
-sudo kubeadm join --token 8c5adc.1cec8dbf339093f0 192.168.1.10:6443 || true
+sudo kubeadm join --discovery-token-unsafe-skip-ca-verification --token 8c5adc.1cec8dbf339093f0 192.168.1.10:6443 || true
diff --git a/src/vagrant/kubeadm_istio/host_setup.sh b/src/vagrant/kubeadm_istio/host_setup.sh
index c1a23eb..524a967 100644
--- a/src/vagrant/kubeadm_istio/host_setup.sh
+++ b/src/vagrant/kubeadm_istio/host_setup.sh
@@ -21,7 +21,7 @@ cat <<EOF | sudo tee /etc/apt/sources.list.d/kubernetes.list
deb http://apt.kubernetes.io/ kubernetes-xenial main
EOF
sudo apt-get update
-sudo apt-get install -y --allow-downgrades docker-engine=1.12.6-0~ubuntu-xenial kubelet=1.9.1-00 kubeadm=1.9.1-00 kubectl=1.9.1-00 kubernetes-cni=0.6.0-00
+sudo apt-get install -y --allow-unauthenticated --allow-downgrades docker-engine=1.12.6-0~ubuntu-xenial kubelet=1.9.1-00 kubeadm=1.9.1-00 kubectl=1.9.1-00 kubernetes-cni=0.6.0-00
sudo swapoff -a
sudo systemctl daemon-reload
diff --git a/src/vagrant/kubeadm_istio/istio/bookinfo.sh b/src/vagrant/kubeadm_istio/istio/bookinfo.sh
index b61ea4e..ad8c120 100755
--- a/src/vagrant/kubeadm_istio/istio/bookinfo.sh
+++ b/src/vagrant/kubeadm_istio/istio/bookinfo.sh
@@ -23,6 +23,9 @@ export PATH=$PWD/bin:$PATH
# Run the test application: bookinfo
kubectl apply -f <(istioctl kube-inject -f samples/bookinfo/kube/bookinfo.yaml)
+# Define the ingress gateway for the application
+istioctl create -f samples/bookinfo/routing/bookinfo-gateway.yaml
+
# Wait for bookinfo deployed
kubectl get services
kubectl get pods
@@ -30,12 +33,12 @@ kubectl get pods
r="0"
while [ $r -ne "6" ]
do
+ sleep 30
kubectl get pods
r=$(kubectl get pods | grep Running | wc -l)
- sleep 60
done
# Validate the bookinfo app
-export GATEWAY_URL=$(kubectl get po -l istio=ingress -n istio-system -o 'jsonpath={.items[0].status.hostIP}'):$(kubectl get svc istio-ingress -n istio-system -o 'jsonpath={.spec.ports[0].nodePort}')
+export GATEWAY_URL=$(kubectl get po -l istio=ingressgateway -n istio-system -o 'jsonpath={.items[0].status.hostIP}'):$(kubectl get svc istio-ingressgateway -n istio-system -o 'jsonpath={.spec.ports[0].nodePort}')
curl -o /dev/null -s -w "%{http_code}\n" http://${GATEWAY_URL}/productpage
diff --git a/src/vagrant/kubeadm_istio/istio/clean_bookinfo.sh b/src/vagrant/kubeadm_istio/istio/clean_bookinfo.sh
index b5bfc08..ede825f 100755
--- a/src/vagrant/kubeadm_istio/istio/clean_bookinfo.sh
+++ b/src/vagrant/kubeadm_istio/istio/clean_bookinfo.sh
@@ -21,7 +21,7 @@ cd /vagrant/istio-source/
export PATH=$PWD/bin:$PATH
# Clean up bookinfo
-samples/bookinfo/kube/cleanup.sh
+echo "" | samples/bookinfo/kube/cleanup.sh
istioctl get routerules
kubectl get pods
diff --git a/src/vagrant/kubeadm_istio/istio/deploy.sh b/src/vagrant/kubeadm_istio/istio/deploy.sh
index ed873f5..84af41b 100755
--- a/src/vagrant/kubeadm_istio/istio/deploy.sh
+++ b/src/vagrant/kubeadm_istio/istio/deploy.sh
@@ -17,23 +17,36 @@
set -ex
-# Deploy istio 0.4.0
+# Get latest istio version, refer: https://git.io/getLatestIstio
+if [ "x${ISTIO_VERSION}" = "x" ] ; then
+ ISTIO_VERSION=$(curl -L -s https://api.github.com/repos/istio/istio/releases/latest | \
+ grep tag_name | sed "s/ *\"tag_name\": *\"\(.*\)\",*/\1/")
+fi
+
+ISTIO_DIR_NAME="istio-$ISTIO_VERSION"
+
cd /vagrant
curl -L https://git.io/getLatestIstio | sh -
-mv istio-0.4.0 istio-source
+mv $ISTIO_DIR_NAME istio-source
cd /vagrant/istio-source/
-export PATH=$PWD/bin:$PATH
-kubectl apply -f install/kubernetes/istio.yaml
+
+# Persistently append istioctl bin path to PATH env
+echo 'export PATH="$PATH:/vagrant/istio-source/bin"' >> ~/.bashrc
+echo "source <(kubectl completion bash)" >> ~/.bashrc
+source ~/.bashrc
+
+kubectl apply -f install/kubernetes/istio-demo.yaml
# Validate the installation
kubectl get svc -n istio-system
kubectl get pods -n istio-system
+kubectl get namespace -L istio-injection
-r="0"
-while [ $r -ne "4" ]
+r="1"
+while [ $r -ne "0" ]
do
+ sleep 30
kubectl get pods -n istio-system
- r=$(kubectl get pods -n istio-system | grep Running | wc -l)
- sleep 60
+ r=$(kubectl get pods -n istio-system | egrep -v 'NAME|Running' | wc -l)
done
diff --git a/src/vagrant/kubeadm_istio/master_setup.sh b/src/vagrant/kubeadm_istio/master_setup.sh
index b181582..f308244 100644
--- a/src/vagrant/kubeadm_istio/master_setup.sh
+++ b/src/vagrant/kubeadm_istio/master_setup.sh
@@ -2,9 +2,32 @@
set -ex
+ADMISSION_CONTROL="Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,NodeRestriction,ResourceQuota"
+KUBE_APISERVER_CONF="/etc/kubernetes/manifests/kube-apiserver.yaml"
+
sudo kubeadm init --apiserver-advertise-address=192.168.1.10 --service-cidr=10.96.0.0/16 --pod-network-cidr=10.32.0.0/12 --token 8c5adc.1cec8dbf339093f0
mkdir ~/.kube
sudo cp /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
kubectl apply -f http://git.io/weave-kube-1.6
+
+# Enable mutating webhook admission controller
+# kube-apiserver will be automatically restarted by kubelet when its manifest file update.
+# https://istio.io/docs/setup/kubernetes/sidecar-injection.html
+sudo sed -i "s/admission-control=.*/admission-control=$ADMISSION_CONTROL/g" $KUBE_APISERVER_CONF
+
+set +e
+# wait for kube-apiserver restart
+r="1"
+while [ $r -ne "0" ]
+do
+ sleep 2
+ kubectl version > /dev/null
+ r=$?
+done
+set -e
+
+# check if admissionregistration.k8s.io/v1beta1 API is enabled
+kubectl api-versions | grep admissionregistration
+
diff --git a/src/vagrant/kubeadm_kata/examples/nginx-app.sh b/src/vagrant/kubeadm_kata/examples/nginx-app.sh
index 96d776c..a66b7ca 100755
--- a/src/vagrant/kubeadm_kata/examples/nginx-app.sh
+++ b/src/vagrant/kubeadm_kata/examples/nginx-app.sh
@@ -20,6 +20,11 @@ kubectl get nodes
kubectl get services
kubectl get pods
kubectl get rc
-sleep 180
+r=0
+while [ "$r" -eq "0" ]
+do
+ sleep 30
+ r=$(kubectl get pods | grep Running | wc -l)
+done
svcip=$(kubectl get services nginx -o json | grep clusterIP | cut -f4 -d'"')
wget http://$svcip
diff --git a/src/vagrant/kubeadm_kata/examples/nginx-app.yaml b/src/vagrant/kubeadm_kata/examples/nginx-app.yaml
index f80881a..9de4ef4 100644
--- a/src/vagrant/kubeadm_kata/examples/nginx-app.yaml
+++ b/src/vagrant/kubeadm_kata/examples/nginx-app.yaml
@@ -23,6 +23,8 @@ spec:
metadata:
labels:
app: nginx
+ annotations:
+ io.kubernetes.cri.untrusted-workload: "true"
spec:
containers:
- name: nginx
diff --git a/src/vagrant/kubeadm_kata/host_setup.sh b/src/vagrant/kubeadm_kata/host_setup.sh
index f9e1a76..02bb296 100644
--- a/src/vagrant/kubeadm_kata/host_setup.sh
+++ b/src/vagrant/kubeadm_kata/host_setup.sh
@@ -25,20 +25,42 @@ cat << EOF | sudo tee /etc/hosts
192.168.1.23 worker3
EOF
-sudo apt-key adv --keyserver hkp://ha.pool.sks-keyservers.net:80 --recv-keys 58118E89F3A912897C070ADBF76221572C52609D
-sudo apt-key adv -k 58118E89F3A912897C070ADBF76221572C52609D
-cat << EOF | sudo tee /etc/apt/sources.list.d/docker.list
-deb [arch=amd64] https://apt.dockerproject.org/repo ubuntu-xenial main
-EOF
-
curl -s http://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -
cat <<EOF | sudo tee /etc/apt/sources.list.d/kubernetes.list
deb http://apt.kubernetes.io/ kubernetes-xenial main
EOF
sudo apt-get update
-sudo apt-get install -y --allow-downgrades docker-engine=1.12.6-0~ubuntu-xenial kubelet=1.7.0-00 kubeadm=1.7.0-00 kubectl=1.7.0-00 kubernetes-cni=0.5.1-00
+sudo apt-get install -y --allow-unauthenticated kubelet=1.10.5-00 kubeadm=1.10.5-00 kubectl=1.10.5-00 kubernetes-cni=0.6.0-00
+
+sudo swapoff -a
sudo systemctl stop kubelet
sudo rm -rf /var/lib/kubelet
sudo systemctl daemon-reload
sudo systemctl start kubelet
+
+
+sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 5EDB1B62EC4926EA
+sudo apt-get update -y
+sudo apt-get install software-properties-common -y
+sudo apt-add-repository cloud-archive:queens -y
+sudo apt-get update -y
+
+#sudo apt-get build-dep dkms -y
+sudo apt-get install python-six openssl python-pip -y
+sudo -H pip install --upgrade pip
+sudo -H pip install ovs
+#sudo apt-get install openvswitch-datapath-dkms -y
+sudo apt-get install openvswitch-switch openvswitch-common -y
+sudo apt-get install ovn-central ovn-common ovn-host -y
+sudo modprobe vport-geneve
+
+wget https://storage.googleapis.com/golang/go1.8.3.linux-amd64.tar.gz
+sudo tar -xvf go1.8.3.linux-amd64.tar.gz -C /usr/local/
+mkdir -p $HOME/go/src
+export GOPATH=$HOME/go
+export PATH=$PATH:/usr/local/go/bin:$GOPATH/bin
+git clone https://github.com/openvswitch/ovn-kubernetes -b v0.3.0
+cd ovn-kubernetes/go-controller
+make
+sudo make install
diff --git a/src/vagrant/kubeadm_kata/kata_setup.sh b/src/vagrant/kubeadm_kata/kata_setup.sh
index 9682f3a..53a2bbf 100644
--- a/src/vagrant/kubeadm_kata/kata_setup.sh
+++ b/src/vagrant/kubeadm_kata/kata_setup.sh
@@ -17,33 +17,27 @@
set -ex
-wget https://storage.googleapis.com/golang/go1.8.3.linux-amd64.tar.gz
-sudo tar -xvf go1.8.3.linux-amd64.tar.gz -C /usr/local/
-mkdir -p $HOME/go/src
-export GOPATH=$HOME/go
-export PATH=$PATH:/usr/local/go/bin:$GOPATH/bin
+sudo sh -c "echo 'deb http://download.opensuse.org/repositories/home:/katacontainers:/release/xUbuntu_$(lsb_release -rs)/ /' > /etc/apt/sources.list.d/kata-containers.list"
+curl -sL http://download.opensuse.org/repositories/home:/katacontainers:/release/xUbuntu_$(lsb_release -rs)/Release.key | sudo apt-key add -
+sudo -E apt-get update
+sudo -E apt-get -y install kata-runtime kata-proxy kata-shim
+sudo -E apt-get -y install libseccomp2
+
+wget http://storage.googleapis.com/cri-containerd-release/cri-containerd-1.1.0.linux-amd64.tar.gz >& /dev/null
+sudo tar -C / -xzf cri-containerd-1.1.0.linux-amd64.tar.gz
+sudo systemctl start containerd
+sudo mkdir -p /opt/cni/bin
+sudo mkdir -p /etc/cni/net.d
+sudo mkdir -p /etc/containerd
+containerd config default | sudo tee /etc/containerd/config.toml
+sudo sed -i "/.*untrusted_workload_runtime.*/,+5s/runtime_type.*/runtime_type=\"io.containerd.runtime.v1.linux\"/" /etc/containerd/config.toml
+sudo sed -i "/.*untrusted_workload_runtime.*/,+5s/runtime_engine.*/runtime_engine=\"kata-runtime\"/" /etc/containerd/config.toml
+sudo systemctl restart containerd
+
+cat << EOF | sudo tee /etc/systemd/system/kubelet.service.d/0-containerd.conf
+[Service]
+Environment="KUBELET_EXTRA_ARGS=--container-runtime=remote --runtime-request-timeout=15m --container-runtime-endpoint=unix:///run/containerd/containerd.sock"
+EOF
-go get github.com/clearcontainers/tests
-cd $GOPATH/src/github.com/clearcontainers/tests/.ci
-
-echo "Install dependencies"
-bash -f ./setup_env_ubuntu.sh
-
-echo "Install shim"
-bash -f ./install_shim.sh
-
-echo "Install proxy"
-bash -f ./install_proxy.sh
-
-echo "Install runtime"
-bash -f ./install_runtime.sh
-
-echo "Install CRI-O"
-bash -f ./install_crio.sh
-
-sudo systemctl stop kubelet
-echo "Modify kubelet systemd configuration to use CRI-O"
-k8s_systemd_file="/etc/systemd/system/kubelet.service.d/10-kubeadm.conf"
-sudo sed -i '/KUBELET_AUTHZ_ARGS/a Environment="KUBELET_EXTRA_ARGS=--container-runtime=remote --container-runtime-endpoint=/var/run/crio.sock --runtime-request-timeout=30m"' "$k8s_systemd_file"
sudo systemctl daemon-reload
-sudo systemctl start kubelet
+sudo systemctl restart kubelet
diff --git a/src/vagrant/kubeadm_kata/master_setup.sh b/src/vagrant/kubeadm_kata/master_setup.sh
index 3748f01..42b3aee 100644
--- a/src/vagrant/kubeadm_kata/master_setup.sh
+++ b/src/vagrant/kubeadm_kata/master_setup.sh
@@ -17,18 +17,11 @@
set -ex
-sudo kubeadm init --apiserver-advertise-address=192.168.1.10 --service-cidr=10.96.0.0/16 --pod-network-cidr=10.32.0.0/12 --token 8c5adc.1cec8dbf339093f0
+sudo kubeadm init --skip-preflight-checks --apiserver-advertise-address=192.168.1.10 --service-cidr=10.96.0.0/16 --pod-network-cidr=10.32.0.0/12 --token 8c5adc.1cec8dbf339093f0
mkdir ~/.kube
sudo cp /etc/kubernetes/admin.conf .kube/config
sudo chown $(id -u):$(id -g) ~/.kube/config
-kubectl apply -f http://git.io/weave-kube-1.6
+nohup /usr/bin/kubectl proxy --address=0.0.0.0 --accept-hosts=.* --port=8080 & sleep 1
-r=1
-while [ "$r" -ne "0" ]
-do
- sleep 30
- r=$(kubectl get pods -n kube-system | grep weave-net | grep -v Run | wc -l)
-done
-
-sudo systemctl restart crio
+sudo ovnkube -k8s-kubeconfig /home/vagrant/.kube/config -net-controller -loglevel=4 -k8s-apiserver=http://192.168.1.10:8080 -logfile=/var/log/openvswitch/ovnkube.log -init-master=master -cluster-subnet=10.32.0.0/12 -service-cluster-ip-range=10.96.0.0/16 -nodeport -nb-address=tcp://192.168.1.10:6631 -sb-address=tcp://192.168.1.10:6632 &
diff --git a/src/vagrant/kubeadm_kata/worker_setup.sh b/src/vagrant/kubeadm_kata/worker_setup.sh
index a6e4bf4..63d42a5 100644
--- a/src/vagrant/kubeadm_kata/worker_setup.sh
+++ b/src/vagrant/kubeadm_kata/worker_setup.sh
@@ -16,16 +16,25 @@
#
set -ex
-sudo kubeadm join --token 8c5adc.1cec8dbf339093f0 192.168.1.10:6443 || true
+sudo kubeadm join --discovery-token-unsafe-skip-ca-verification \
+ --token 8c5adc.1cec8dbf339093f0 192.168.1.10:6443 \
+ --ignore-preflight-errors=SystemVerification,CRI,FileContent--proc-sys-net-bridge-bridge-nf-call-iptables
sudo apt-get install -y putty-tools
mkdir ~/.kube
-r=1
-while [ "$r" -ne "0" ]
-do
- sleep 30
- echo "y\n" | plink -ssh -pw vagrant vagrant@master "cat ~/.kube/config" > ~/.kube/config || true
- r=$(kubectl get pods -n kube-system | grep weave-net | grep -v Run | wc -l)
-done
+echo "y\n" | plink -ssh -pw vagrant vagrant@master "cat ~/.kube/config" > ~/.kube/config || true
-sudo systemctl restart crio
+CENTRAL_IP=192.168.1.10
+NODE_NAME=$(hostname)
+TOKEN="8c5adc.1cec8dbf339093f0"
+
+sudo ovnkube -k8s-kubeconfig /home/vagrant/.kube/config -loglevel=4 \
+ -logfile="/var/log/openvswitch/ovnkube.log" \
+ -k8s-apiserver="http://$CENTRAL_IP:8080" \
+ -init-node="$NODE_NAME" \
+ -nodeport \
+ -nb-address="tcp://$CENTRAL_IP:6631" \
+ -sb-address="tcp://$CENTRAL_IP:6632" -k8s-token="$TOKEN" \
+ -init-gateways \
+ -service-cluster-ip-range=10.96.0.0/16 \
+ -cluster-subnet=10.32.0.0/12 &
diff --git a/src/vagrant/kubeadm_multus/host_setup.sh b/src/vagrant/kubeadm_multus/host_setup.sh
index c1a23eb..524a967 100644
--- a/src/vagrant/kubeadm_multus/host_setup.sh
+++ b/src/vagrant/kubeadm_multus/host_setup.sh
@@ -21,7 +21,7 @@ cat <<EOF | sudo tee /etc/apt/sources.list.d/kubernetes.list
deb http://apt.kubernetes.io/ kubernetes-xenial main
EOF
sudo apt-get update
-sudo apt-get install -y --allow-downgrades docker-engine=1.12.6-0~ubuntu-xenial kubelet=1.9.1-00 kubeadm=1.9.1-00 kubectl=1.9.1-00 kubernetes-cni=0.6.0-00
+sudo apt-get install -y --allow-unauthenticated --allow-downgrades docker-engine=1.12.6-0~ubuntu-xenial kubelet=1.9.1-00 kubeadm=1.9.1-00 kubectl=1.9.1-00 kubernetes-cni=0.6.0-00
sudo swapoff -a
sudo systemctl daemon-reload
diff --git a/src/vagrant/kubeadm_onap/Vagrantfile b/src/vagrant/kubeadm_onap/Vagrantfile
index fe24252..699f607 100644
--- a/src/vagrant/kubeadm_onap/Vagrantfile
+++ b/src/vagrant/kubeadm_onap/Vagrantfile
@@ -1,17 +1,17 @@
-$num_workers=1
+$num_workers=4
Vagrant.require_version ">= 1.8.6"
Vagrant.configure("2") do |config|
- config.vm.box = "yk0/ubuntu-xenial"
- config.vm.provision "shell", path: "host_setup.sh", privileged: false
+ config.vm.box = "ceph/ubuntu-xenial"
config.vm.define "master" do |config|
config.vm.hostname = "master"
+ config.vm.provision "shell", path: "host_setup.sh", privileged: false
config.vm.provision "shell", path: "master_setup.sh", privileged: false
config.vm.network :private_network, ip: "192.168.0.10"
config.vm.provider :libvirt do |libvirt|
- libvirt.memory = 4096
+ libvirt.memory = 8192
libvirt.cpus = 4
end
end
@@ -19,23 +19,14 @@ Vagrant.configure("2") do |config|
(1 .. $num_workers).each do |i|
config.vm.define vm_name = "worker%d" % [i] do |config|
config.vm.hostname = vm_name
+ config.vm.provision "shell", path: "host_setup.sh", privileged: false
config.vm.provision "shell", path: "worker_setup.sh", privileged: false
config.vm.network :private_network, ip: "192.168.0.#{i+20}"
config.vm.provider :libvirt do |libvirt|
- libvirt.memory = 81920
- libvirt.cpus = 32
+ libvirt.memory = 40960
+ libvirt.cpus = 16
end
end
end
- config.vm.define "onap" do |config|
- config.vm.hostname = "onap"
- config.vm.provision "shell", path: "onap_setup.sh", privileged: false
- config.vm.network :private_network, ip: "192.168.0.5"
- config.vm.provider :libvirt do |libvirt|
- libvirt.memory = 2048
- libvirt.cpus = 1
- end
- end
-
end
diff --git a/src/vagrant/kubeadm_onap/host_setup.sh b/src/vagrant/kubeadm_onap/host_setup.sh
index 87b0062..9cfd266 100755
--- a/src/vagrant/kubeadm_onap/host_setup.sh
+++ b/src/vagrant/kubeadm_onap/host_setup.sh
@@ -4,13 +4,15 @@ set -ex
cat << EOF | sudo tee /etc/hosts
127.0.0.1 localhost
-192.168.0.5 onap
192.168.0.10 master
192.168.0.21 worker1
192.168.0.22 worker2
192.168.0.23 worker3
+192.168.0.24 worker4
EOF
+sudo ifconfig eth1 mtu 1400
+
sudo apt-key adv --keyserver hkp://ha.pool.sks-keyservers.net:80 --recv-keys 58118E89F3A912897C070ADBF76221572C52609D
sudo apt-key adv -k 58118E89F3A912897C070ADBF76221572C52609D
cat << EOF | sudo tee /etc/apt/sources.list.d/docker.list
@@ -22,18 +24,17 @@ cat <<EOF | sudo tee /etc/apt/sources.list.d/kubernetes.list
deb http://apt.kubernetes.io/ kubernetes-xenial main
EOF
sudo apt-get update
-sudo apt-get install -y --allow-downgrades docker-engine=1.12.6-0~ubuntu-xenial kubelet=1.7.0-00 kubeadm=1.7.0-00 kubectl=1.7.0-00 kubernetes-cni=0.5.1-00
+sudo apt-get install -y --allow-unauthenticated --allow-downgrades docker-engine=1.12.6-0~ubuntu-xenial kubelet=1.9.1-00 kubeadm=1.9.1-00 kubectl=1.9.1-00 kubernetes-cni=0.6.0-00
-sudo systemctl stop docker
cat << EOF | sudo tee /etc/docker/daemon.json
{
- "storage-driver": "overlay"
+ "insecure-registries" : [ "nexus3.onap.org:10001" ]
}
EOF
sudo systemctl daemon-reload
-sudo systemctl start docker
+sudo systemctl restart docker
-sudo systemctl stop kubelet
-sudo rm -rf /var/lib/kubelet
+sudo swapoff -a
sudo systemctl daemon-reload
+sudo systemctl stop kubelet
sudo systemctl start kubelet
diff --git a/src/vagrant/kubeadm_onap/master_setup.sh b/src/vagrant/kubeadm_onap/master_setup.sh
index fa451a2..8840541 100755
--- a/src/vagrant/kubeadm_onap/master_setup.sh
+++ b/src/vagrant/kubeadm_onap/master_setup.sh
@@ -1,13 +1,28 @@
#!/bin/bash
-
set -ex
-sudo kubeadm init --apiserver-advertise-address=192.168.0.10 --service-cidr=10.96.0.0/24 --pod-network-cidr=10.32.0.0/12 --token 8c5adc.1cec8dbf339093f0
+sudo apt-get -y install ntp
+cat << EOF | sudo tee /etc/ntp.conf
+server 127.127.1.0
+fudge 127.127.1.0 stratum 10
+EOF
+sudo service ntp restart
+
+sudo apt install nfs-kernel-server -y
+sudo mkdir /dockerdata-nfs
+sudo chmod 777 /dockerdata-nfs
+cat << EOF | sudo tee /etc/exports
+/dockerdata-nfs *(rw,sync,no_subtree_check,no_root_squash)
+EOF
+sudo systemctl restart nfs-kernel-server.service
+
+sudo kubeadm init --apiserver-advertise-address=192.168.0.10 --service-cidr=10.96.0.0/16 --pod-network-cidr=10.244.0.0/16 --token 8c5adc.1cec8dbf339093f0
mkdir ~/.kube
-sudo cp /etc/kubernetes/admin.conf ~/.kube/config
-sudo chown $(id -u):$(id -g) ~/.kube/config
+sudo cp /etc/kubernetes/admin.conf $HOME/.kube/config
+sudo chown $(id -u):$(id -g) $HOME/.kube/config
+
+wget https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
+sed -i "s/kube-subnet-mgr/kube-subnet-mgr\n - --iface=eth1/" kube-flannel.yml
+kubectl apply -f kube-flannel.yml
-kubectl apply -f http://git.io/weave-kube-1.6
-curl https://raw.githubusercontent.com/kubernetes/helm/master/scripts/get | bash
-helm init
-kubectl create clusterrolebinding --user system:serviceaccount:kube-system:default kube-system-cluster-admin --clusterrole cluster-admin
+/vagrant/onap_setup.sh
diff --git a/src/vagrant/kubeadm_onap/onap_setup.sh b/src/vagrant/kubeadm_onap/onap_setup.sh
index b876580..e4edd8f 100755
--- a/src/vagrant/kubeadm_onap/onap_setup.sh
+++ b/src/vagrant/kubeadm_onap/onap_setup.sh
@@ -2,41 +2,19 @@
set -ex
-sudo apt-get install -y putty-tools python-openstackclient
-mkdir ~/.kube
-r=0
-while [ "$r" == "0" ]
-do
- sleep 30
- echo "y\n" | plink -ssh -pw vagrant vagrant@master "cat ~/.kube/config" > ~/.kube/config || true
- r=$(kubectl get pods -n kube-system | grep "tiller-deploy.*Run" | wc -l)
-done
+kubectl create clusterrolebinding --user system:serviceaccount:kube-system:default kube-system-cluster-admin --clusterrole cluster-admin
+wget https://storage.googleapis.com/kubernetes-helm/helm-v2.8.2-linux-amd64.tar.gz
+tar xzvf helm-v2.8.2-linux-amd64.tar.gz
+sudo mv linux-amd64/helm /usr/local/bin/
+helm init
+helm serve &
+helm repo remove stable
+helm repo add local http://127.0.0.1:8879
-curl https://raw.githubusercontent.com/kubernetes/helm/master/scripts/get | bash
-git clone http://gerrit.onap.org/r/oom
-cd oom; git checkout amsterdam
-source /vagrant/openstack/openrc
-cat <<EOF | tee ~/oom/kubernetes/config/onap-parameters.yaml
-OPENSTACK_UBUNTU_14_IMAGE: "ubuntu1404"
-OPENSTACK_PUBLIC_NET_ID: "e8f51956-00dd-4425-af36-045716781ffc"
-OPENSTACK_OAM_NETWORK_ID: "d4769dfb-c9e4-4f72-b3d6-1d18f4ac4ee6"
-OPENSTACK_OAM_SUBNET_ID: "191f7580-acf6-4c2b-8ec0-ba7d99b3bc4e"
-OPENSTACK_OAM_NETWORK_CIDR: "10.0.0.0/16"
-OPENSTACK_USERNAME: "admin"
-OPENSTACK_API_KEY: "adim"
-OPENSTACK_TENANT_NAME: "admin"
-OPENSTACK_TENANT_ID: "47899782ed714295b1151681fdfd51f5"
-OPENSTACK_REGION: "RegionOne"
-OPENSTACK_KEYSTONE_URL: "http://192.168.0.30:5000/v2.0"
-OPENSTACK_FLAVOUR_MEDIUM: "m1.medium"
-OPENSTACK_SERVICE_TENANT_NAME: "service"
-DMAAP_TOPIC: "AUTO"
-DEMO_ARTIFACTS_VERSION: "1.1.0-SNAPSHOT"
-EOF
-cd ~/oom/kubernetes/oneclick && ./deleteAll.bash -n onap || true
-(kubectl delete ns onap; helm del --purge onap-config) || true
-echo "y\n" | plink -ssh -pw vagrant vagrant@worker1 "sudo rm -rf /dockerdata-nfs/onap"
-cd ~/oom/kubernetes/config && ./createConfig.sh -n onap
-while true; do sleep 30; kubectl get pods --all-namespaces | grep onap | wc -l | grep "^0$" && break; done
-source ~/oom/kubernetes/oneclick/setenv.bash
-cd ~/oom/kubernetes/oneclick && ./createAll.bash -n onap
+git clone -b beijing http://gerrit.onap.org/r/oom
+cd oom/kubernetes
+
+sudo apt-get install make -y
+make all
+sleep 300
+helm install local/onap -n dev --namespace onap
diff --git a/src/vagrant/kubeadm_onap/registry_setup.sh b/src/vagrant/kubeadm_onap/registry_setup.sh
new file mode 100644
index 0000000..669268b
--- /dev/null
+++ b/src/vagrant/kubeadm_onap/registry_setup.sh
@@ -0,0 +1,30 @@
+#!/bin/bash
+set -ex
+
+sudo apt-get update -y
+sudo apt install -y jq docker.io
+
+NEXUS_REPO=nexus3.onap.org:10001
+LOCAL_REPO=192.168.0.2:5000
+
+cat << EOF | sudo tee /etc/docker/daemon.json
+{
+ "insecure-registries" : [ "$LOCAL_REPO" ]
+}
+EOF
+sudo systemctl daemon-reload
+sudo systemctl restart docker
+
+sudo docker run -d -p 5000:5000 --restart=always --name registry registry:2
+
+dockers=$(curl -X GET https://$NEXUS_REPO/v2/_catalog | jq -r ".repositories[]")
+for d in $dockers
+do
+ tags=$(curl -X GET https://$NEXUS_REPO/v2/$d/tags/list | jq -r ".tags[]")
+ for t in $tags
+ do
+ sudo docker pull $NEXUS_REPO/$d:$t
+ sudo docker tag $NEXUS_REPO/$d:$t $LOCAL_REPO/$d:$t
+ sudo docker push $LOCAL_REPO/$d:$t
+ done
+done
diff --git a/src/vagrant/kubeadm_onap/setup_swap.sh b/src/vagrant/kubeadm_onap/setup_swap.sh
new file mode 100644
index 0000000..c2432b7
--- /dev/null
+++ b/src/vagrant/kubeadm_onap/setup_swap.sh
@@ -0,0 +1,5 @@
+sudo swapoff -a
+sudo fallocate -l 50G /swapfile
+sudo mkswap /swapfile
+sudo swapon /swapfile
+sudo swapon --show
diff --git a/src/vagrant/kubeadm_onap/setup_tunnel.sh b/src/vagrant/kubeadm_onap/setup_tunnel.sh
new file mode 100644
index 0000000..3a6ef75
--- /dev/null
+++ b/src/vagrant/kubeadm_onap/setup_tunnel.sh
@@ -0,0 +1,3 @@
+sudo ip link add tunnel0 type gretap local <local> remote <remote>
+sudo ifconfig tunnel0 up
+sudo brctl addif <br> tunnel0
diff --git a/src/vagrant/kubeadm_onap/worker_setup.sh b/src/vagrant/kubeadm_onap/worker_setup.sh
index aa60df3..e65a65c 100755
--- a/src/vagrant/kubeadm_onap/worker_setup.sh
+++ b/src/vagrant/kubeadm_onap/worker_setup.sh
@@ -1,11 +1,15 @@
#!/bin/bash
-
set -ex
-sudo mkdir /dockerdata-nfs
-sudo chmod 755 /dockerdata-nfs
-sudo kubeadm join --token 8c5adc.1cec8dbf339093f0 192.168.0.10:6443 || true
+sudo apt-get -y install ntp
+cat << EOF | sudo tee /etc/ntp.conf
+pool master
+EOF
+sudo service ntp restart
-sudo apt-get install -y putty-tools
-mkdir ~/.kube
-echo "y\n" | plink -ssh -pw vagrant vagrant@master "cat ~/.kube/config" > ~/.kube/config
+sudo kubeadm join --discovery-token-unsafe-skip-ca-verification --token 8c5adc.1cec8dbf339093f0 192.168.0.10:6443 || true
+
+sudo apt-get install nfs-common -y
+sudo mkdir /dockerdata-nfs
+sudo chmod 777 /dockerdata-nfs
+sudo mount master:/dockerdata-nfs /dockerdata-nfs
diff --git a/src/vagrant/kubeadm_ovsdpdk/host_setup.sh b/src/vagrant/kubeadm_ovsdpdk/host_setup.sh
index b86a618..b2ee85c 100644
--- a/src/vagrant/kubeadm_ovsdpdk/host_setup.sh
+++ b/src/vagrant/kubeadm_ovsdpdk/host_setup.sh
@@ -21,7 +21,7 @@ cat <<EOF | sudo tee /etc/apt/sources.list.d/kubernetes.list
deb http://apt.kubernetes.io/ kubernetes-xenial main
EOF
sudo apt-get update
-sudo apt-get install -y --allow-downgrades docker-engine=1.12.6-0~ubuntu-xenial kubelet=1.7.0-00 kubeadm=1.7.0-00 kubectl=1.7.0-00 kubernetes-cni=0.5.1-00
+sudo apt-get install -y --allow-unauthenticated --allow-downgrades docker-engine=1.12.6-0~ubuntu-xenial kubelet=1.7.0-00 kubeadm=1.7.0-00 kubectl=1.7.0-00 kubernetes-cni=0.5.1-00
sudo rm -rf /var/lib/kubelet
sudo systemctl stop kubelet
diff --git a/src/vagrant/kubeadm_snort/Vagrantfile b/src/vagrant/kubeadm_snort/Vagrantfile
new file mode 100644
index 0000000..9320074
--- /dev/null
+++ b/src/vagrant/kubeadm_snort/Vagrantfile
@@ -0,0 +1,29 @@
+$num_workers=2
+
+Vagrant.require_version ">= 1.8.6"
+Vagrant.configure("2") do |config|
+
+ config.vm.box = "ceph/ubuntu-xenial"
+ config.vm.provider :libvirt do |libvirt|
+ libvirt.memory = 4096
+ libvirt.cpus = 4
+ end
+
+ config.vm.synced_folder "../..", "/src"
+ config.vm.provision "shell", path: "host_setup.sh", privileged: false
+
+ config.vm.define "master" do |config|
+ config.vm.hostname = "master"
+ config.vm.provision "shell", path: "master_setup.sh", privileged: false
+ config.vm.network :private_network, ip: "192.168.1.10"
+ end
+
+ (1 .. $num_workers).each do |i|
+ config.vm.define vm_name = "worker%d" % [i] do |config|
+ config.vm.hostname = vm_name
+ config.vm.provision "shell", path: "worker_setup.sh", privileged: false
+ config.vm.network :private_network, ip: "192.168.1.#{i+20}"
+ end
+ end
+
+end
diff --git a/src/vagrant/kubeadm_snort/deploy.sh b/src/vagrant/kubeadm_snort/deploy.sh
new file mode 100755
index 0000000..e1e16d6
--- /dev/null
+++ b/src/vagrant/kubeadm_snort/deploy.sh
@@ -0,0 +1,9 @@
+#!/bin/bash
+
+set -ex
+DIR="$(dirname `readlink -f $0`)"
+
+cd $DIR
+../cleanup.sh
+vagrant up
+vagrant ssh master -c "/vagrant/snort/snort-setup.sh"
diff --git a/src/vagrant/kubeadm_snort/host_setup.sh b/src/vagrant/kubeadm_snort/host_setup.sh
new file mode 100644
index 0000000..524a967
--- /dev/null
+++ b/src/vagrant/kubeadm_snort/host_setup.sh
@@ -0,0 +1,29 @@
+#!/bin/bash
+
+set -ex
+
+cat << EOF | sudo tee /etc/hosts
+127.0.0.1 localhost
+192.168.1.10 master
+192.168.1.21 worker1
+192.168.1.22 worker2
+192.168.1.23 worker3
+EOF
+
+sudo apt-key adv --keyserver hkp://ha.pool.sks-keyservers.net:80 --recv-keys 58118E89F3A912897C070ADBF76221572C52609D
+sudo apt-key adv -k 58118E89F3A912897C070ADBF76221572C52609D
+cat << EOF | sudo tee /etc/apt/sources.list.d/docker.list
+deb [arch=amd64] https://apt.dockerproject.org/repo ubuntu-xenial main
+EOF
+
+curl -s http://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -
+cat <<EOF | sudo tee /etc/apt/sources.list.d/kubernetes.list
+deb http://apt.kubernetes.io/ kubernetes-xenial main
+EOF
+sudo apt-get update
+sudo apt-get install -y --allow-unauthenticated --allow-downgrades docker-engine=1.12.6-0~ubuntu-xenial kubelet=1.9.1-00 kubeadm=1.9.1-00 kubectl=1.9.1-00 kubernetes-cni=0.6.0-00
+
+sudo swapoff -a
+sudo systemctl daemon-reload
+sudo systemctl stop kubelet
+sudo systemctl start kubelet
diff --git a/src/vagrant/kubeadm_snort/master_setup.sh b/src/vagrant/kubeadm_snort/master_setup.sh
new file mode 100644
index 0000000..972768f
--- /dev/null
+++ b/src/vagrant/kubeadm_snort/master_setup.sh
@@ -0,0 +1,10 @@
+#!/bin/bash
+
+set -ex
+
+sudo kubeadm init --apiserver-advertise-address=192.168.1.10 --service-cidr=10.96.0.0/16 --pod-network-cidr=10.32.0.0/12 --token 8c5adc.1cec8dbf339093f0
+mkdir ~/.kube
+sudo cp /etc/kubernetes/admin.conf $HOME/.kube/config
+sudo chown $(id -u):$(id -g) $HOME/.kube/config
+
+kubectl apply -f https://raw.githubusercontent.com/weaveworks/weave/master/prog/weave-kube/weave-daemonset-k8s-1.6.yaml
diff --git a/src/vagrant/kubeadm_clearwater/examples/create_and_apply.sh b/src/vagrant/kubeadm_snort/snort/snort-setup.sh
index fdbb2b1..08ae663 100755
--- a/src/vagrant/kubeadm_clearwater/examples/create_and_apply.sh
+++ b/src/vagrant/kubeadm_snort/snort/snort-setup.sh
@@ -17,27 +17,14 @@
set -ex
-git clone --recursive https://github.com/Metaswitch/clearwater-docker.git
-
-# Set the configmaps
-kubectl create configmap env-vars --from-literal=ZONE=default.svc.cluster.local --from-literal=ADDITIONAL_SHARED_CONFIG=hss_hostname=hss.example.com\\nhss_realm=example.com
-
-# Genereta the yamls
-cd clearwater-docker/kubernetes/
-#./k8s-gencfg --image_path=<path to your repo> --image_tag=<tag for the images you want to use>
-./k8s-gencfg --image_path=enriquetaso --image_tag=latest
-
-
-# Apply yamls
-cd
-kubectl apply -f clearwater-docker/kubernetes
+kubectl create -f /vagrant/snort/snort.yaml
kubectl get nodes
kubectl get services
kubectl get pods
kubectl get rc
r="0"
-while [ $r != "13" ]
+while [ $r -ne "2" ]
do
r=$(kubectl get pods | grep Running | wc -l)
sleep 60
diff --git a/src/vagrant/kubeadm_snort/snort/snort.yaml b/src/vagrant/kubeadm_snort/snort/snort.yaml
new file mode 100644
index 0000000..60dede2
--- /dev/null
+++ b/src/vagrant/kubeadm_snort/snort/snort.yaml
@@ -0,0 +1,32 @@
+apiVersion: v1
+kind: Service
+metadata:
+ name: snort-service
+ labels:
+ app: snort
+spec:
+ type: NodePort
+ ports:
+ - port: 80
+ protocol: TCP
+ name: http
+ selector:
+ app: snort
+---
+apiVersion: v1
+kind: ReplicationController
+metadata:
+ name: snort-pod
+spec:
+ replicas: 2
+ template:
+ metadata:
+ labels:
+ app: snort
+ spec:
+ containers:
+ - name: snort
+ image: frapsoft/snort
+ args: ["-v"]
+ ports:
+ - containerPort: 80
diff --git a/src/vagrant/kubeadm_snort/worker_setup.sh b/src/vagrant/kubeadm_snort/worker_setup.sh
new file mode 100644
index 0000000..74e4178
--- /dev/null
+++ b/src/vagrant/kubeadm_snort/worker_setup.sh
@@ -0,0 +1,4 @@
+#!/bin/bash
+
+set -ex
+sudo kubeadm join --discovery-token-unsafe-skip-ca-verification --token 8c5adc.1cec8dbf339093f0 192.168.1.10:6443 || true
diff --git a/src/vagrant/kubeadm_virtlet/host_setup.sh b/src/vagrant/kubeadm_virtlet/host_setup.sh
index b86a618..b2ee85c 100644
--- a/src/vagrant/kubeadm_virtlet/host_setup.sh
+++ b/src/vagrant/kubeadm_virtlet/host_setup.sh
@@ -21,7 +21,7 @@ cat <<EOF | sudo tee /etc/apt/sources.list.d/kubernetes.list
deb http://apt.kubernetes.io/ kubernetes-xenial main
EOF
sudo apt-get update
-sudo apt-get install -y --allow-downgrades docker-engine=1.12.6-0~ubuntu-xenial kubelet=1.7.0-00 kubeadm=1.7.0-00 kubectl=1.7.0-00 kubernetes-cni=0.5.1-00
+sudo apt-get install -y --allow-unauthenticated --allow-downgrades docker-engine=1.12.6-0~ubuntu-xenial kubelet=1.7.0-00 kubeadm=1.7.0-00 kubectl=1.7.0-00 kubernetes-cni=0.5.1-00
sudo rm -rf /var/lib/kubelet
sudo systemctl stop kubelet
diff --git a/src/vagrant/setup_vagrant.sh b/src/vagrant/setup_vagrant.sh
index 2dc5ae0..fcde052 100755
--- a/src/vagrant/setup_vagrant.sh
+++ b/src/vagrant/setup_vagrant.sh
@@ -30,9 +30,9 @@ ${USER} ALL = (root) NOPASSWD:ALL
EOF
sudo apt-get update -y
sudo apt-get install -y git unzip
- wget https://releases.hashicorp.com/vagrant/1.8.7/vagrant_1.8.7_x86_64.deb
- sudo dpkg -i vagrant_1.8.7_x86_64.deb
- rm -rf vagrant_1.8.7_x86_64.deb
+ wget https://releases.hashicorp.com/vagrant/2.0.2/vagrant_2.0.2_x86_64.deb
+ sudo dpkg -i vagrant_2.0.2_x86_64.deb
+ rm -rf vagrant_2.0.2_x86_64.deb
sudo apt-get install -y virtualbox
@@ -41,7 +41,7 @@ EOF
sudo apt-get update
sudo apt-get build-dep vagrant ruby-libvirt -y
sudo apt-get install -y bridge-utils qemu libvirt-bin ebtables dnsmasq
- sudo apt-get install -y libxslt-dev libxml2-dev libvirt-dev zlib1g-dev ruby-dev
+ sudo apt-get install -y libffi-dev libxslt-dev libxml2-dev libvirt-dev zlib1g-dev ruby-dev
vagrant plugin install vagrant-libvirt
sudo adduser ${USER} libvirtd
sudo service libvirtd restart