summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.gitignore2
-rw-r--r--INFO35
-rw-r--r--INFO.yaml25
-rwxr-xr-xci/deploy.sh10
-rw-r--r--docs/arm/container4nfv_on_arm.rst14
-rw-r--r--docs/arm/container4nfv_openwrt_demo_deployment.rst318
-rw-r--r--docs/arm/data_plane_dpdk_deployment.rst56
-rw-r--r--docs/arm/data_plane_sriov_pf_deployment.rst267
-rw-r--r--docs/arm/multi_flannel_intfs_deployment.rst186
-rw-r--r--docs/conf.py1
-rw-r--r--docs/conf.yaml3
-rw-r--r--docs/development/gapanalysis/gap-analysis-architecture-options.rst69
-rw-r--r--docs/development/gapanalysis/gap-analysis-kubernetes-v1.5.rst4
-rw-r--r--docs/development/gapanalysis/gap-analysis-openstack.rst6
-rw-r--r--docs/development/gapanalysis/gap-analysis-opnfv-installer.rst4
-rw-r--r--docs/development/gapanalysis/index.rst8
-rw-r--r--docs/development/ngvsrequirements/ngvs-requirements-document.rst13
-rw-r--r--docs/index.rst17
-rw-r--r--docs/release/index.rst15
-rw-r--r--docs/release/release-notes/index.rst13
-rw-r--r--docs/release/release-notes/release-notes.rst16
-rw-r--r--docs/release/scenarios/k8-nosdn-virtlet-noha/index.rst18
-rw-r--r--docs/release/scenarios/k8-nosdn-virtlet-noha/scenario.description.rst (renamed from docs/release/userguide/virlet.rst)0
-rw-r--r--docs/release/scenarios/k8-ovn-kata-noha/index.rst18
-rw-r--r--docs/release/scenarios/k8-ovn-kata-noha/scenario.description.rst (renamed from docs/release/userguide/kata.rst)0
-rw-r--r--docs/release/userguide/index.rst11
-rw-r--r--docs/release/userguide/scenario.rst15
-rw-r--r--docs/release/userguide/snort.rst33
-rw-r--r--docs/requirements.txt2
-rw-r--r--src/arm/cni-deploy/.gitignore1
-rw-r--r--src/arm/cni-deploy/deploy.yml32
-rw-r--r--src/arm/cni-deploy/inventory/inventory.cfg18
-rw-r--r--src/arm/cni-deploy/roles/flannel/files/cni-flannel-ds.yml86
-rw-r--r--src/arm/cni-deploy/roles/flannel/tasks/main.yml12
-rw-r--r--src/arm/cni-deploy/roles/multus/files/10-multus.conf13
-rw-r--r--src/arm/cni-deploy/roles/multus/files/clusterrole.yml16
-rw-r--r--src/arm/cni-deploy/roles/multus/files/crdnetwork.yml15
-rw-r--r--src/arm/cni-deploy/roles/multus/files/flannel-obj.yml13
-rw-r--r--src/arm/cni-deploy/roles/multus/handlers/main.yml4
-rw-r--r--src/arm/cni-deploy/roles/multus/tasks/crd.yml44
-rw-r--r--src/arm/cni-deploy/roles/multus/tasks/main.yml24
-rw-r--r--src/arm/cni-deploy/roles/multus/templates/macvlan-obj.yml.j222
-rw-r--r--src/arm/cni-deploy/roles/multus/templates/multus-testpod.yml.j219
-rw-r--r--src/arm/cni-deploy/roles/sriov/tasks/crd.yml13
-rw-r--r--src/arm/cni-deploy/roles/sriov/tasks/main.yml12
-rw-r--r--src/arm/cni-deploy/roles/sriov/templates/sriov-obj.yml.j225
-rw-r--r--src/arm/cni-deploy/roles/sriov/templates/sriov-testpod.yml.j219
-rw-r--r--src/arm/cni-deploy/roles/vhost-vpp/files/0001-net-virtio-ethdev.patch16
-rw-r--r--src/arm/cni-deploy/roles/vhost-vpp/files/Dockerfile.vpp1710-dpdk170824
-rwxr-xr-xsrc/arm/cni-deploy/roles/vhost-vpp/files/setvpp.sh30
-rw-r--r--src/arm/cni-deploy/roles/vhost-vpp/files/startup.conf21
-rw-r--r--src/arm/cni-deploy/roles/vhost-vpp/files/vhostuser-obj.yml28
-rw-r--r--src/arm/cni-deploy/roles/vhost-vpp/tasks/crd.yml13
-rw-r--r--src/arm/cni-deploy/roles/vhost-vpp/tasks/main.yml18
-rw-r--r--src/arm/cni-deploy/roles/vhost-vpp/tasks/vpp.yml47
-rw-r--r--src/arm/cni-deploy/roles/vhost-vpp/templates/vpp-testpod.yml.j268
-rw-r--r--src/arm/cni-deploy/vars/global20
-rw-r--r--src/arm/edge/gateway/MACCHIATObin/README.rst70
-rw-r--r--src/arm/edge/gateway/MACCHIATObin/defconfig-mcbin-edge590
-rw-r--r--src/arm/edge/gateway/MACCHIATObin/setup-macbin-kernel.sh74
-rwxr-xr-xsrc/arm/kubernetes_vpp_vhostuser/deploy-cni.sh16
-rwxr-xr-xsrc/arm/kubernetes_vpp_vhostuser/k8s-build.sh25
-rwxr-xr-xsrc/arm/kubernetes_vpp_vhostuser/k8s-deploy.sh17
-rwxr-xr-xsrc/arm/kubernetes_vpp_vhostuser/setup.sh11
-rw-r--r--src/arm/openwrt_demo/1_buildimage/Dockerfile22
-rw-r--r--src/arm/openwrt_demo/1_buildimage/resources/bin/getips24
-rw-r--r--src/arm/openwrt_demo/1_buildimage/resources/bin/setroutes26
-rw-r--r--src/arm/openwrt_demo/1_buildimage/resources/config/firewall149
-rw-r--r--src/arm/openwrt_demo/1_buildimage/resources/config/firewall.user9
-rw-r--r--src/arm/openwrt_demo/1_buildimage/resources/config/network27
-rw-r--r--src/arm/openwrt_demo/1_buildimage/resources/config/uhttpd24
-rw-r--r--src/arm/openwrt_demo/1_buildimage/resources/ipsec/ipsec.conf29
-rw-r--r--src/arm/openwrt_demo/1_buildimage/resources/ipsec/ipsec.secrets5
-rw-r--r--src/arm/openwrt_demo/1_buildimage/resources/keys/server-root-ca.pem30
-rw-r--r--src/arm/openwrt_demo/1_buildimage/resources/keys/server-root-key.pem51
-rw-r--r--src/arm/openwrt_demo/1_buildimage/resources/keys/vpn-server-cert.pem31
-rw-r--r--src/arm/openwrt_demo/1_buildimage/resources/keys/vpn-server-key.pem51
-rw-r--r--src/arm/openwrt_demo/1_buildimage/resources/strongswan/charon-logging.conf62
-rw-r--r--src/arm/openwrt_demo/1_buildimage/resources/strongswan/charon.conf281
-rw-r--r--src/arm/openwrt_demo/1_buildimage/resources/strongswan/pool.conf12
-rw-r--r--src/arm/openwrt_demo/1_buildimage/resources/strongswan/starter.conf10
-rw-r--r--src/arm/openwrt_demo/1_buildimage/resources/strongswan/tools.conf21
-rw-r--r--src/cni/multus/install_cni.sh12
-rw-r--r--src/vagrant/kubeadm/Vagrantfile34
-rwxr-xr-xsrc/vagrant/kubeadm/deploy.sh10
-rw-r--r--src/vagrant/kubeadm/host_setup.sh32
-rwxr-xr-xsrc/vagrant/kubeadm/istio/bookinfo.sh (renamed from src/vagrant/kubeadm_istio/istio/bookinfo.sh)7
-rwxr-xr-xsrc/vagrant/kubeadm/istio/clean_bookinfo.sh (renamed from src/vagrant/kubeadm_istio/istio/clean_bookinfo.sh)6
-rwxr-xr-xsrc/vagrant/kubeadm/istio/deploy.sh (renamed from src/vagrant/kubeadm_istio/istio/deploy.sh)20
-rwxr-xr-xsrc/vagrant/kubeadm/istio/istio.sh6
-rw-r--r--src/vagrant/kubeadm/kata/containerd.service22
-rw-r--r--src/vagrant/kubeadm/kata/kata_setup.sh54
-rwxr-xr-xsrc/vagrant/kubeadm/kata/nginx-app.sh33
-rw-r--r--src/vagrant/kubeadm/kata/nginx-app.yaml33
-rw-r--r--src/vagrant/kubeadm/master_setup.sh32
-rw-r--r--src/vagrant/kubeadm/multus/Dockerfile10
-rw-r--r--src/vagrant/kubeadm/multus/busybox.yaml (renamed from src/vagrant/kubeadm_multus/examples/busybox.yaml)0
-rw-r--r--src/vagrant/kubeadm/multus/cni_multus.yml (renamed from src/cni/multus/kube_cni_multus.yml)27
-rwxr-xr-xsrc/vagrant/kubeadm/multus/multus.sh (renamed from src/vagrant/kubeadm_multus/examples/multus.sh)2
-rw-r--r--src/vagrant/kubeadm/registry_setup.sh23
-rw-r--r--src/vagrant/kubeadm/virtlet/cirros-vm.yaml42
-rw-r--r--src/vagrant/kubeadm/virtlet/images.yaml3
-rw-r--r--src/vagrant/kubeadm/virtlet/virtlet-ds.yaml521
-rwxr-xr-xsrc/vagrant/kubeadm/virtlet/virtlet.sh21
-rw-r--r--src/vagrant/kubeadm/virtlet/virtlet_setup.sh10
-rw-r--r--src/vagrant/kubeadm/worker_setup.sh8
-rw-r--r--src/vagrant/kubeadm_app/Vagrantfile (renamed from src/vagrant/kubeadm_multus/Vagrantfile)2
-rwxr-xr-xsrc/vagrant/kubeadm_app/app_setup.sh65
-rwxr-xr-xsrc/vagrant/kubeadm_app/create_images.sh10
-rw-r--r--src/vagrant/kubeadm_app/custom-bono-svc/bono-svc.yaml25
-rw-r--r--src/vagrant/kubeadm_app/custom-bono-svc/deployment-svc.yaml82
-rwxr-xr-xsrc/vagrant/kubeadm_app/deploy.sh12
-rw-r--r--src/vagrant/kubeadm_app/host_setup.sh (renamed from src/vagrant/kubeadm_multus/host_setup.sh)2
-rw-r--r--src/vagrant/kubeadm_app/master_setup.sh10
-rwxr-xr-xsrc/vagrant/kubeadm_app/setup_vagrant.sh97
-rwxr-xr-xsrc/vagrant/kubeadm_app/tests/clearwater-live-test.sh46
-rw-r--r--src/vagrant/kubeadm_app/worker_setup.sh (renamed from src/vagrant/kubeadm_istio/worker_setup.sh)0
-rw-r--r--src/vagrant/kubeadm_basic/Vagrantfile4
-rw-r--r--src/vagrant/kubeadm_basic/host_setup.sh19
-rw-r--r--src/vagrant/kubeadm_basic/worker_setup.sh1
-rw-r--r--src/vagrant/kubeadm_clearwater/host_setup.sh2
-rwxr-xr-xsrc/vagrant/kubeadm_istio/deploy.sh12
-rw-r--r--src/vagrant/kubeadm_istio/master_setup.sh33
-rwxr-xr-xsrc/vagrant/kubeadm_kata/examples/nginx-app.sh7
-rw-r--r--src/vagrant/kubeadm_kata/examples/nginx-app.yaml2
-rw-r--r--src/vagrant/kubeadm_kata/host_setup.sh29
-rw-r--r--src/vagrant/kubeadm_kata/kata_setup.sh42
-rw-r--r--src/vagrant/kubeadm_kata/master_setup.sh11
-rw-r--r--src/vagrant/kubeadm_kata/worker_setup.sh25
-rw-r--r--src/vagrant/kubeadm_multus/master_setup.sh12
-rw-r--r--src/vagrant/kubeadm_onap/Vagrantfile23
-rwxr-xr-xsrc/vagrant/kubeadm_onap/host_setup.sh15
-rwxr-xr-xsrc/vagrant/kubeadm_onap/master_setup.sh31
-rwxr-xr-xsrc/vagrant/kubeadm_onap/onap_setup.sh53
-rw-r--r--src/vagrant/kubeadm_onap/registry_setup.sh30
-rw-r--r--src/vagrant/kubeadm_onap/setup_swap.sh5
-rw-r--r--src/vagrant/kubeadm_onap/setup_tunnel.sh3
-rwxr-xr-xsrc/vagrant/kubeadm_onap/worker_setup.sh18
-rw-r--r--src/vagrant/kubeadm_ovsdpdk/host_setup.sh2
-rw-r--r--src/vagrant/kubeadm_snort/Vagrantfile (renamed from src/vagrant/kubeadm_istio/Vagrantfile)0
-rwxr-xr-xsrc/vagrant/kubeadm_snort/deploy.sh (renamed from src/vagrant/kubeadm_multus/deploy.sh)2
-rw-r--r--src/vagrant/kubeadm_snort/host_setup.sh (renamed from src/vagrant/kubeadm_istio/host_setup.sh)2
-rw-r--r--src/vagrant/kubeadm_snort/master_setup.sh10
-rwxr-xr-xsrc/vagrant/kubeadm_snort/snort/snort-setup.sh31
-rw-r--r--src/vagrant/kubeadm_snort/snort/snort.yaml32
-rw-r--r--src/vagrant/kubeadm_snort/worker_setup.sh (renamed from src/vagrant/kubeadm_multus/worker_setup.sh)0
-rw-r--r--src/vagrant/kubeadm_virtlet/examples/cirros-vm.yaml25
-rw-r--r--src/vagrant/kubeadm_virtlet/examples/images.yaml2
-rw-r--r--src/vagrant/kubeadm_virtlet/examples/virtlet-ds.yaml457
-rw-r--r--src/vagrant/kubeadm_virtlet/host_setup.sh29
-rw-r--r--src/vagrant/kubeadm_virtlet/virtlet/etc/systemd/system/criproxy.service11
-rw-r--r--src/vagrant/kubeadm_virtlet/virtlet/etc/systemd/system/dockershim.service11
-rw-r--r--src/vagrant/kubeadm_virtlet/virtlet/etc/systemd/system/kubelet.service.d/20-criproxy.conf2
-rw-r--r--src/vagrant/kubeadm_virtlet/worker_setup.sh20
-rwxr-xr-xsrc/vagrant/setup_vagrant.sh1
-rw-r--r--tox.ini17
156 files changed, 5025 insertions, 844 deletions
diff --git a/.gitignore b/.gitignore
index a977916..99b2d4d 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1 +1,3 @@
.vagrant/
+.tox
+docs/_build/*
diff --git a/INFO b/INFO
deleted file mode 100644
index 739f15f..0000000
--- a/INFO
+++ /dev/null
@@ -1,35 +0,0 @@
-Project: Container4NFV
-Repo: container4nfv
-Project Creation Date:Dec 13, 2016
-Project Category: Integration & Testing
-Lifecycle State:Incubation
-Primary Contact: Xuan Jia ( jiaxuan@chinamobile.com )
-Project Lead: Xuan Jia ( jiaxuan@chinamobile.com )
-Jira Project Name: Container Integrated For NFV
-Jira Project Prefix: [container4nfv]
-mailing list tag [container4nfv]
-IRC: Server:freenode.net Channel:#opnfv-container
-Repository: container4nfv
-
-Committers:
-jiaxuan@chinamobile.com
-gergely.csatari@nokia.com
-yu.peng36@zte.com.cn
-lijun_1203@126.com
-wassim.haddad@ericsson.com
-heikki.mahkonen@ericsson.com
-akapadia@aarnanetworks.com
-srupanagunta@gmail.com
-ruijing.guo@gmail.com
-chenjiankun1@huawei.com
-trevor.tao@arm.com
-
-Link to TSC approval of the project: http://meetbot.opnfv.org/meetings/opnfv-meeting/2016/opnfv-meeting.2016-12-13-14.59.html
-Link(s) to approval of additional committers:
-http://meetbot.opnfv.org/meetings/opnfv-meeting/2017/opnfv-meeting.2017-04-11-13.59.html
-https://lists.opnfv.org/pipermail/opnfv-tech-discuss/2017-June/016505.html
-https://lists.opnfv.org/pipermail/opnfv-tech-discuss/2017-August/017629.html
-https://lists.opnfv.org/pipermail/opnfv-tech-discuss/2018-February/020156.html
-
-Link to approval of renaming project:
-http://meetbot.opnfv.org/meetings/opnfv-meeting/2017/opnfv-meeting.2017-08-15-12.59.txt
diff --git a/INFO.yaml b/INFO.yaml
index d34e421..257f730 100644
--- a/INFO.yaml
+++ b/INFO.yaml
@@ -4,6 +4,11 @@ project_creation_date: 'Dec 13, 2016'
project_category: 'Integration & Testing'
lifecycle_state: 'Incubation'
project_lead: &opnfv_container4nfv_ptl
+ name: 'Xuan Jia'
+ email: 'jason.jiaxuan@gmail.com'
+ company: 'gmail'
+ id: 'xuanjia'
+ timezone: ''
primary_contact: *opnfv_container4nfv_ptl
issue_tracking:
type: 'jira'
@@ -49,11 +54,21 @@ committers:
email: 'jason.jiaxuan@gmail.com'
company: 'gmail.com'
id: 'xuanjia'
- - name: 'peng yu'
- email: 'yu.peng36@zte.com.cn'
- company: 'zte.com.cn'
- id: 'YuPengZTE'
+ - name: 'Zijin Tao'
+ email: 'Trevor.Tao@arm.com'
+ company: 'arm.com'
+ id: 'trevortao'
+ - name: 'Lu Bin'
+ email: 'Bin.Lu@arm.com'
+ company: 'arm.com'
+ id: 'lubinsz'
tsc:
# yamllint disable rule:line-length
approval: 'http//meetbot.opnfv.org/meetings/opnfv-meeting/2016/opnfv-meeting.2016-12-13-14.59.html'
- # yamllint enable rule:line-length
+ changes:
+ - type:
+ name:
+ link: 'https://lists.opnfv.org/pipermail/opnfv-tech-discuss/2018-February/020156.html'
+ - type:
+ name:
+ link: 'https://lists.opnfv.org/g/opnfv-tech-discuss/topic/nominate_lubin_as_a_committer/27367271?p=,,,20,0,0,0::recentpostdate%2Fsticky,,,20,2,0,27367271'
diff --git a/ci/deploy.sh b/ci/deploy.sh
index 58f8385..74115cc 100755
--- a/ci/deploy.sh
+++ b/ci/deploy.sh
@@ -20,17 +20,13 @@ set -ex
# Scenario sequence rules:
# - stable first
# - less time consuming first
-SCENARIOS="kubeadm_basic
- kubeadm_virtlet
- kubeadm_ovsdpdk
- kubeadm_istio
+SCENARIOS="
+ kubeadm
"
-DEFAULT_TIMEOUT=3600
-
for SCENARIO in $SCENARIOS; do
START=$(date +%s)
- timeout ${DEFAULT_TIMEOUT} ../src/vagrant/${SCENARIO}/deploy.sh
+ ../src/vagrant/${SCENARIO}/deploy.sh
END=$(date +%s)
DIFF=$(( $END - $START ))
echo "Scenario $SCENARIO tooks $DIFF seconds."
diff --git a/docs/arm/container4nfv_on_arm.rst b/docs/arm/container4nfv_on_arm.rst
index 854f17f..3c04664 100644
--- a/docs/arm/container4nfv_on_arm.rst
+++ b/docs/arm/container4nfv_on_arm.rst
@@ -242,7 +242,8 @@ Functest
--------
.. _functest: https://wiki.opnfv.org/display/functest/Opnfv+Functional+Testing
-.. _Danube: http://docs.opnfv.org/en/stable-danube/submodules/functest/docs/testing/user/userguide/index.html
+.. _Danube: :doc:`<functest:testing/user/userguide>`
+
The Functest project provides comprehensive testing methodology, test suites and test cases to test and verify OPNFV Platform functionality
that covers the VIM and NFVI components.
@@ -251,8 +252,17 @@ Functest for Container4NFV could used to verify the basic VIM functionality to s
the Danube_ release, there are 4 domains(VIM, Controllers, Features, VNF) and 5 tiers(healthcheck, smoke, features, components, vnf) and more
than 20 test cases.
-But now the Functest has not been extended to support Kubernetes, which is still under developing.
+Functest-kubernetes
+--------
+
+.. _Functest-kubernetes: https://wiki.opnfv.org/display/functest/Opnfv+Functional+Testing
+
+Functest-kubernetes_ is part of Functest. Compared with the functest, it pays more attention to verifying the kubernetes environments
+functionality, but not the OPNFV platform functionality. The latest functest-kubernetes has been enabled on arm64 platform.
+In functest-kubernetes tests, there are 3 different types of cases. One is health-check case which is used for checking the kubernetes cluster
+minimal functional requirements. One is smoke case which is used for checking the kubernetes conformance. The last type is feature
+test case which depends on different scenarios.
Current Status and Future Plan
==============================
diff --git a/docs/arm/container4nfv_openwrt_demo_deployment.rst b/docs/arm/container4nfv_openwrt_demo_deployment.rst
new file mode 100644
index 0000000..3e56a84
--- /dev/null
+++ b/docs/arm/container4nfv_openwrt_demo_deployment.rst
@@ -0,0 +1,318 @@
+.. This work is licensed under a Creative Commons Attribution 4.0 International
+.. License.
+.. http://creativecommons.org/licenses/by/4.0
+.. (c) OPNFV, Arm Limited.
+
+
+
+===============================================
+Container4NFV Openwrt Demo Deployment on Arm Server
+===============================================
+
+Abstract
+========
+
+This document gives a brief introduction on how to deploy openwrt services with multiple networking interfaces on Arm platform.
+
+Introduction
+============
+.. _sriov_cni: https://github.com/hustcat/sriov-cni
+.. _Flannel: https://github.com/coreos/flannel
+.. _Multus: https://github.com/Intel-Corp/multus-cni
+.. _cni: https://github.com/containernetworking/cni
+.. _kubeadm: https://kubernetes.io/docs/setup/independent/create-cluster-kubeadm/
+.. _openwrt: https://github.com/openwrt/openwrt
+
+The OpenWrt Project is a Linux operating system targeting embedded devices.
+Also it is a famouse open source router project.
+
+We use it as a demo to show how to deploy an open source vCPE in Kubernetes.
+For Lan port, we configured flannel cni for it. And for Wan port, we configured sriov cni for it.
+
+For demo purpose, I suggest that we use Kubeadm to deploy a Kubernetes cluster firstly.
+
+Cluster
+=======
+
+Cluster Info
+
+In this case, we deploy master and slave as one node.
+Suppose it to be: 192.168.1.2
+
+In 192.168.1.2, 2 NIC as required.
+Suppose it to be: eth0, eth1. eth0 is used to be controle plane, and eth1 is used to be data plane.
+
+Deploy Kubernetes
+-----------------
+Please see link(https://kubernetes.io/docs/setup/independent/create-cluster-kubeadm/) as reference.
+
+Creat CRD
+---------
+Please make sure that CRD was added for Kubernetes cluster.
+Here we name it as crdnetwork.yaml:
+
+::
+ apiVersion: apiextensions.k8s.io/v1beta1
+ kind: CustomResourceDefinition
+ metadata:
+ # name must match the spec fields below, and be in the form: <plural>.<group>
+ name: networks.kubernetes.com
+ spec:
+ # group name to use for REST API: /apis/<group>/<version>
+ group: kubernetes.com
+ # version name to use for REST API: /apis/<group>/<version>
+ version: v1
+ # either Namespaced or Cluster
+ scope: Namespaced
+ names:
+ # plural name to be used in the URL: /apis/<group>/<version>/<plural>
+ plural: networks
+ # singular name to be used as an alias on the CLI and for display
+ singular: network
+ # kind is normally the CamelCased singular type. Your resource manifests use this.
+ kind: Network
+ # shortNames allow shorter string to match your resource on the CLI
+ shortNames:
+ - net
+
+command:
+
+::
+ kubectl create -f crdnetwork.yaml
+
+Create Flannel-network for Control Plane
+----------------------------------------
+Create flannel network as control plane.
+Here we name it as flannel-network.yaml:
+
+::
+ apiVersion: "kubernetes.com/v1"
+ kind: Network
+ metadata:
+ name: flannel-conf
+ plugin: flannel
+ args: '[
+ {
+ "masterplugin": true,
+ "delegate": {
+ "isDefaultGateway": true
+ }
+ }
+ ]'
+
+command:
+
+::
+ kubectl create -f flannel-network.yaml
+
+Create Sriov-network for Data Plane
+-----------------------------------
+Create sriov network with PF mode as data plane.
+Here we name it as sriov-network.yaml:
+
+::
+ apiVersion: "kubernetes.com/v1"
+ kind: Network
+ metadata:
+ name: sriov-conf
+ plugin: sriov
+ args: '[
+ {
+ "master": "eth1",
+ "pfOnly": true,
+ "ipam": {
+ "type": "dhcp",
+ }
+ }
+ ]'
+
+command:
+
+::
+ kubectl create -f sriov-network.yaml
+
+CNI Installation
+================
+.. _CNI: https://github.com/containernetworking/plugins
+Firstly, we should deploy all CNI plugins. The build process is following:
+
+
+::
+ git clone https://github.com/containernetworking/plugins.git
+ cd plugins
+ ./build.sh
+ cp bin/* /opt/cni/bin
+
+.. _Multus: https://github.com/Intel-Corp/multus-cni
+
+To deploy control plane and data plane interfaces, besides the Flannel CNI and SRIOV CNI,
+we need to deploy the Multus_. The build process of it is as:
+
+::
+ git clone https://github.com/Intel-Corp/multus-cni.git
+ cd multus-cni
+ ./build
+ cp bin/multus /opt/cni/bin
+
+To use the Multus_ CNI,
+we should put the Multus CNI binary to /opt/cni/bin/ where the Flannel CNI and SRIOV
+CNIs are put.
+
+.. _SRIOV: https://github.com/hustcat/sriov-cni
+The build process of it is as:
+
+::
+ git clone https://github.com/hustcat/sriov-cni.git
+ cd sriov-cni
+ ./build
+ cp bin/* /opt/cni/bin
+
+We also need to enable DHCP client for Wan port.
+So we should enable dhcp cni for it.
+
+::
+ /opt/cni/bin/dhcp daemon &
+
+CNI Configuration
+=================
+The following multus CNI configuration is located in /etc/cni/net.d/, here we name it
+as multus-cni.conf:
+
+::
+ {
+ "name": "minion-cni-network",
+ "type": "multus",
+ "kubeconfig": "/etc/kubernetes/admin.conf",
+ "delegates": [{
+ "type": "flannel",
+ "masterplugin": true,
+ "delegate": {
+ "isDefaultGateway": true
+ }
+ }]
+ }
+
+command:
+
+::
+ step1, remove all files in /etc/cni/net.d/
+ rm /etc/cni/net.d/* -rf
+
+ step2, copy /etc/kubernetes/admin.conf into each nodes.
+
+ step3, copy multus-cni.conf into /etc/cni/net.d/
+
+ step4, restart kubelet
+ systemctl restart kubelet
+
+
+Configuring Pod with Control Plane and Data Plane
+=================================================
+
+1, Save the below following YAML to openwrt-vpn-multus.yaml.
+In this case flannle-conf network object act as the primary network.
+
+::
+ apiVersion: v1
+ kind: ReplicationController
+ metadata:
+ name: openwrtvpn1
+ spec:
+ replicas: 1
+ template:
+ metadata:
+ name: openwrtvpn1
+ labels:
+ app: openwrtvpn1
+ annotations:
+ networks: '[
+ { "name": "flannel-conf" },
+ { "name": "sriov-conf" }
+ ]'
+ spec:
+ containers:
+ - name: openwrtvpn1
+ image: "younglook/openwrt-demo:arm64"
+ imagePullPolicy: "IfNotPresent"
+ command: ["/sbin/init"]
+ securityContext:
+ capabilities:
+ add:
+ - NET_ADMIN
+ stdin: true
+ tty: true
+ ports:
+ - containerPort: 80
+ - containerPort: 4500
+ - containerPort: 500
+ ---
+ apiVersion: v1
+ kind: Service
+ metadata:
+ name: openwrtvpn1
+ spec: # specification of the pod's contents
+ type: NodePort
+ selector:
+ app: openwrtvpn1
+ ports: [
+ {
+ "name": "floatingu",
+ "protocol": "UDP",
+ "port": 4500,
+ "targetPort": 4500
+ },
+ {
+ "name": "actualu",
+ "protocol": "UDP",
+ "port": 500,
+ "targetPort": 500
+ },
+ {
+ "name": "web",
+ "protocol": "TCP",
+ "port": 80,
+ "targetPort": 80
+ },
+ ]
+
+2, Create Pod
+
+::
+ command:
+ kubectl create -f openwrt-vpn-multus.yaml
+
+3, Get the details of the running pod from the master
+
+::
+ # kubectl get pods
+ NAME READY STATUS RESTARTS AGE
+ openwrtvpn1 1/1 Running 0 30s
+
+Verifying Pod Network
+=====================
+
+::
+ # kubectl exec openwrtvpn1 -- ip a
+ 1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue qlen 1000
+ link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
+ inet 127.0.0.1/8 scope host lo
+ valid_lft forever preferred_lft forever
+ inet6 ::1/128 scope host
+ valid_lft forever preferred_lft forever
+ 3: eth0@if124: <BROADCAST,MULTICAST,UP,LOWER_UP,M-DOWN> mtu 1450 qdisc noqueue
+ link/ether 0a:58:0a:e9:40:2a brd ff:ff:ff:ff:ff:ff
+ inet 10.233.64.42/24 scope global eth0
+ valid_lft forever preferred_lft forever
+ inet6 fe80::8e6:32ff:fed3:7645/64 scope link
+ valid_lft forever preferred_lft forever
+ 4: net0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast qlen 1000
+ link/ether 52:54:00:d4:d2:e5 brd ff:ff:ff:ff:ff:ff
+ inet 192.168.123.2/24 scope global net0
+ valid_lft forever preferred_lft forever
+ inet6 fe80::5054:ff:fed4:d2e5/64 scope link
+ valid_lft forever preferred_lft forever
+
+Contacts
+========
+
+Bin Lu: bin.lu@arm.com
diff --git a/docs/arm/data_plane_dpdk_deployment.rst b/docs/arm/data_plane_dpdk_deployment.rst
index 1f6317c..72e4011 100644
--- a/docs/arm/data_plane_dpdk_deployment.rst
+++ b/docs/arm/data_plane_dpdk_deployment.rst
@@ -114,33 +114,35 @@ Configuring Pod with Control Plane and Data Plane with DPDK Acceration
1, Save the below following YAML to dpdk.yaml.
::
- apiVersion: v1
- kind: Pod
- metadata:
- name: dpdk
- spec:
- nodeSelector:
- beta.kubernetes.io/arch: arm64
- containers:
- - name: dpdk
- image: younglook/dpdk:arm64
- command: [ "bash", "-c", "/usr/bin/l2fwd --huge-unlink -l 6-7 -n 4 --file-prefix=container -- -p 3" ]
- stdin: true
- tty: true
- securityContext:
- privileged: true
- volumeMounts:
- - mountPath: /dev/vfio
- name: vfio
- - mountPath: /mnt/huge
- name: huge
- volumes:
- - name: vfio
- hostPath:
- path: /dev/vfio
- - name: huge
- hostPath:
- path: /mnt/huge
+ .. code-block:: yaml
+
+ apiVersion: v1
+ kind: Pod
+ metadata:
+ name: dpdk
+ spec:
+ nodeSelector:
+ beta.kubernetes.io/arch: arm64
+ containers:
+ - name: dpdk
+ image: younglook/dpdk:arm64
+ command: [ "bash", "-c", "/usr/bin/l2fwd --huge-unlink -l 6-7 -n 4 --file-prefix=container -- -p 3" ]
+ stdin: true
+ tty: true
+ securityContext:
+ privileged: true
+ volumeMounts:
+ - mountPath: /dev/vfio
+ name: vfio
+ - mountPath: /mnt/huge
+ name: huge
+ volumes:
+ - name: vfio
+ hostPath:
+ path: /dev/vfio
+ - name: huge
+ hostPath:
+ path: /mnt/huge
2, Create Pod
diff --git a/docs/arm/data_plane_sriov_pf_deployment.rst b/docs/arm/data_plane_sriov_pf_deployment.rst
index 7cbd4d7..946d81f 100644
--- a/docs/arm/data_plane_sriov_pf_deployment.rst
+++ b/docs/arm/data_plane_sriov_pf_deployment.rst
@@ -16,14 +16,14 @@ This document gives a brief introduction on how to deploy SRIOV CNI with PF mode
Introduction
============
-.. _sriov_cni: https://github.com/hustcat/sriov-cni
-.. _Flannel: https://github.com/coreos/flannel
-.. _Multus: https://github.com/Intel-Corp/multus-cni
-.. _cni: https://github.com/containernetworking/cni
-.. _kubeadm: https://kubernetes.io/docs/setup/independent/create-cluster-kubeadm/
-.. _k8s-crd: https://kubernetes.io/docs/concepts/api-extension/custom-resources/
-.. _arm64: https://github.com/kubernetes/website/pull/6511
-.. _files: https://github.com/kubernetes/website/pull/6511/files
+.. _sriov_cni: https://github.com/hustcat/sriov-cni
+.. _Flannel: https://github.com/coreos/flannel
+.. _Multus: https://github.com/Intel-Corp/multus-cni
+.. _cni-description: https://github.com/containernetworking/cni
+.. _kubeadm: https://kubernetes.io/docs/setup/independent/create-cluster-kubeadm/
+.. _k8s-crd: https://kubernetes.io/docs/concepts/api-extension/custom-resources/
+.. _arm64: https://github.com/kubernetes/website/pull/6511
+.. _files: https://github.com/kubernetes/website/pull/6511/files
As we know, in some cases we need to deploy multiple network interfaces
@@ -79,20 +79,22 @@ Please make sure that rbac was added for Kubernetes cluster.
here we name it as rbac.yaml:
::
- apiVersion: rbac.authorization.k8s.io/v1beta1
- kind: ClusterRoleBinding
- metadata:
- name: fabric8-rbac
- subjects:
- - kind: ServiceAccount
- # Reference to upper's `metadata.name`
- name: default
- # Reference to upper's `metadata.namespace`
- namespace: default
- roleRef:
- kind: ClusterRole
- name: cluster-admin
- apiGroup: rbac.authorization.k8s.io
+ .. code-block:: yaml
+
+ apiVersion: rbac.authorization.k8s.io/v1beta1
+ kind: ClusterRoleBinding
+ metadata:
+ name: fabric8-rbac
+ subjects:
+ - kind: ServiceAccount
+ # Reference to upper's `metadata.name`
+ name: default
+ # Reference to upper's `metadata.namespace`
+ namespace: default
+ roleRef:
+ kind: ClusterRole
+ name: cluster-admin
+ apiGroup: rbac.authorization.k8s.io
command:
@@ -105,28 +107,30 @@ Please make sure that CRD was added for Kubernetes cluster.
Here we name it as crdnetwork.yaml:
::
- apiVersion: apiextensions.k8s.io/v1beta1
- kind: CustomResourceDefinition
- metadata:
- # name must match the spec fields below, and be in the form: <plural>.<group>
- name: networks.kubernetes.com
- spec:
- # group name to use for REST API: /apis/<group>/<version>
- group: kubernetes.com
- # version name to use for REST API: /apis/<group>/<version>
- version: v1
- # either Namespaced or Cluster
- scope: Namespaced
- names:
- # plural name to be used in the URL: /apis/<group>/<version>/<plural>
- plural: networks
- # singular name to be used as an alias on the CLI and for display
- singular: network
- # kind is normally the CamelCased singular type. Your resource manifests use this.
- kind: Network
- # shortNames allow shorter string to match your resource on the CLI
- shortNames:
- - net
+ .. code-block:: yaml
+
+ apiVersion: apiextensions.k8s.io/v1beta1
+ kind: CustomResourceDefinition
+ metadata:
+ # name must match the spec fields below, and be in the form: <plural>.<group>
+ name: networks.kubernetes.com
+ spec:
+ # group name to use for REST API: /apis/<group>/<version>
+ group: kubernetes.com
+ # version name to use for REST API: /apis/<group>/<version>
+ version: v1
+ # either Namespaced or Cluster
+ scope: Namespaced
+ names:
+ # plural name to be used in the URL: /apis/<group>/<version>/<plural>
+ plural: networks
+ # singular name to be used as an alias on the CLI and for display
+ singular: network
+ # kind is normally the CamelCased singular type. Your resource manifests use this.
+ kind: Network
+ # shortNames allow shorter string to match your resource on the CLI
+ shortNames:
+ - net
command:
@@ -139,19 +143,21 @@ Create flannel network as control plane.
Here we name it as flannel-network.yaml:
::
- apiVersion: "kubernetes.com/v1"
- kind: Network
- metadata:
- name: flannel-conf
- plugin: flannel
- args: '[
- {
- "masterplugin": true,
- "delegate": {
- "isDefaultGateway": true
- }
- }
- ]'
+ .. code-block:: yaml
+
+ apiVersion: "kubernetes.com/v1"
+ kind: Network
+ metadata:
+ name: flannel-conf
+ plugin: flannel
+ args: '[
+ {
+ "masterplugin": true,
+ "delegate": {
+ "isDefaultGateway": true
+ }
+ }
+ ]'
command:
@@ -164,27 +170,29 @@ Create sriov network with PF mode as data plane.
Here we name it as sriov-network.yaml:
::
- apiVersion: "kubernetes.com/v1"
- kind: Network
- metadata:
- name: sriov-conf
- plugin: sriov
- args: '[
- {
- "master": "eth1",
- "pfOnly": true,
- "ipam": {
- "type": "host-local",
- "subnet": "192.168.123.0/24",
- "rangeStart": "192.168.123.2",
- "rangeEnd": "192.168.123.10",
- "routes": [
- { "dst": "0.0.0.0/0" }
- ],
- "gateway": "192.168.123.1"
- }
- }
- ]'
+ .. code-block:: yaml
+
+ apiVersion: "kubernetes.com/v1"
+ kind: Network
+ metadata:
+ name: sriov-conf
+ plugin: sriov
+ args: '[
+ {
+ "master": "eth1",
+ "pfOnly": true,
+ "ipam": {
+ "type": "host-local",
+ "subnet": "192.168.123.0/24",
+ "rangeStart": "192.168.123.2",
+ "rangeEnd": "192.168.123.10",
+ "routes": [
+ { "dst": "0.0.0.0/0" }
+ ],
+ "gateway": "192.168.123.1"
+ }
+ }
+ ]'
command:
@@ -194,8 +202,8 @@ command:
CNI Installation
================
.. _CNI: https://github.com/containernetworking/plugins
-Firstly, we should deploy all CNI plugins. The build process is following:
+Firstly, we should deploy all CNI plugins. The build process is following:
::
git clone https://github.com/containernetworking/plugins.git
@@ -219,6 +227,7 @@ we should put the Multus CNI binary to /opt/cni/bin/ where the Flannel CNI and S
CNIs are put.
.. _SRIOV: https://github.com/hustcat/sriov-cni
+
The build process of it is as:
::
@@ -233,18 +242,20 @@ The following multus CNI configuration is located in /etc/cni/net.d/, here we na
as multus-cni.conf:
::
- {
- "name": "minion-cni-network",
- "type": "multus",
- "kubeconfig": "/etc/kubernetes/admin.conf",
- "delegates": [{
- "type": "flannel",
- "masterplugin": true,
- "delegate": {
- "isDefaultGateway": true
- }
- }]
- }
+ .. code-block:: json
+
+ {
+ "name": "minion-cni-network",
+ "type": "multus",
+ "kubeconfig": "/etc/kubernetes/admin.conf",
+ "delegates": [{
+ "type": "flannel",
+ "masterplugin": true,
+ "delegate": {
+ "isDefaultGateway": true
+ }
+ }]
+ }
command:
@@ -267,22 +278,24 @@ Configuring Pod with Control Plane and Data Plane
In this case flannle-conf network object act as the primary network.
::
- apiVersion: v1
- kind: Pod
- metadata:
- name: pod-sriov
- annotations:
- networks: '[
- { "name": "flannel-conf" },
- { "name": "sriov-conf" }
- ]'
- spec: # specification of the pod's contents
- containers:
- - name: pod-sriov
- image: "busybox"
- command: ["top"]
- stdin: true
- tty: true
+ .. code-block:: yaml
+
+ apiVersion: v1
+ kind: Pod
+ metadata:
+ name: pod-sriov
+ annotations:
+ networks: '[
+ { "name": "flannel-conf" },
+ { "name": "sriov-conf" }
+ ]'
+ spec: # specification of the pod's contents
+ containers:
+ - name: pod-sriov
+ image: "busybox"
+ command: ["top"]
+ stdin: true
+ tty: true
2, Create Pod
@@ -301,25 +314,27 @@ Verifying Pod Network
=====================
::
- # kubectl exec pod-sriov -- ip a
- 1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue qlen 1000
- link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
- inet 127.0.0.1/8 scope host lo
- valid_lft forever preferred_lft forever
- inet6 ::1/128 scope host
- valid_lft forever preferred_lft forever
- 3: eth0@if124: <BROADCAST,MULTICAST,UP,LOWER_UP,M-DOWN> mtu 1450 qdisc noqueue
- link/ether 0a:58:0a:e9:40:2a brd ff:ff:ff:ff:ff:ff
- inet 10.233.64.42/24 scope global eth0
- valid_lft forever preferred_lft forever
- inet6 fe80::8e6:32ff:fed3:7645/64 scope link
- valid_lft forever preferred_lft forever
- 4: net0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast qlen 1000
- link/ether 52:54:00:d4:d2:e5 brd ff:ff:ff:ff:ff:ff
- inet 192.168.123.2/24 scope global net0
- valid_lft forever preferred_lft forever
- inet6 fe80::5054:ff:fed4:d2e5/64 scope link
- valid_lft forever preferred_lft forever
+ .. code-block:: bash
+
+ # kubectl exec pod-sriov -- ip a
+ 1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue qlen 1000
+ link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
+ inet 127.0.0.1/8 scope host lo
+ valid_lft forever preferred_lft forever
+ inet6 ::1/128 scope host
+ valid_lft forever preferred_lft forever
+ 3: eth0@if124: <BROADCAST,MULTICAST,UP,LOWER_UP,M-DOWN> mtu 1450 qdisc noqueue
+ link/ether 0a:58:0a:e9:40:2a brd ff:ff:ff:ff:ff:ff
+ inet 10.233.64.42/24 scope global eth0
+ valid_lft forever preferred_lft forever
+ inet6 fe80::8e6:32ff:fed3:7645/64 scope link
+ valid_lft forever preferred_lft forever
+ 4: net0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast qlen 1000
+ link/ether 52:54:00:d4:d2:e5 brd ff:ff:ff:ff:ff:ff
+ inet 192.168.123.2/24 scope global net0
+ valid_lft forever preferred_lft forever
+ inet6 fe80::5054:ff:fed4:d2e5/64 scope link
+ valid_lft forever preferred_lft forever
Contacts
========
diff --git a/docs/arm/multi_flannel_intfs_deployment.rst b/docs/arm/multi_flannel_intfs_deployment.rst
index 07c8ad7..65e643d 100644
--- a/docs/arm/multi_flannel_intfs_deployment.rst
+++ b/docs/arm/multi_flannel_intfs_deployment.rst
@@ -55,10 +55,10 @@ which uses Flannel as the networking backend. The related Flannel deployment fil
image to start the Flannel service.
.. image:: images/multi_flannel_intfs.PNG
- :alt: 2 Flannel interfaces deployment scenario
- :figclass: align-center
+ :width: 800px
+ :alt: 2 Flannel interfaces deployment scenario
- Fig 1. Multiple Flannel interfaces deployment architecture
+Fig 1. Multiple Flannel interfaces deployment architecture
.. _Etcd: https://coreos.com/etcd/
@@ -84,7 +84,7 @@ kube-flannel.yml. Here we give a revised version of this yaml file to start 2 Fl
.. include:: files/kube-2flannels.yml
:literal:
- kube-2flannels.yml
+kube-2flannels.yml
ConfigMap Added
@@ -94,14 +94,16 @@ To start the 2nd Flannel container process, we add a new ConfigMap named kube-fl
includes a new net-conf.json from the 1st:
::
- net-conf.json: |
- {
- "Network": "10.3.0.0/16",
- "Backend": {
- "Type": "udp",
- "Port": 8286
+ .. code-block:: json
+
+ net-conf.json: |
+ {
+ "Network": "10.3.0.0/16",
+ "Backend": {
+ "Type": "udp",
+ "Port": 8286
+ }
}
- }
2nd Flannel Container Added
@@ -112,20 +114,24 @@ The default Flanneld's UDP listen port is 8285, we set the 2nd Flanneld to liste
For the 2nd Flannel container, we use the command as:
::
- - name: kube-flannel2
- image: quay.io/coreos/flannel:v0.8.0-arm64
- command: [ "/opt/bin/flanneld", "--ip-masq", "--kube-subnet-mgr", "--subnet-file=/run/flannel/subnet2.env" ]
+ .. code-block:: yaml
+
+ - name: kube-flannel2
+ image: quay.io/coreos/flannel:v0.8.0-arm64
+ command: [ "/opt/bin/flanneld", "--ip-masq", "--kube-subnet-mgr", "--subnet-file=/run/flannel/subnet2.env" ]
which outputs the subnet file to /run/flannel/subnet2.env for the 2nd Flannel CNI to use.
And mount the 2nd Flannel ConfigMap to /etc/kube-flannel/ for the 2nd Flanneld container process:
::
- volumeMounts:
- - name: run
- mountPath: /run
- - name: flannel-cfg2
- mountPath: /etc/kube-flannel/
+ .. code-block:: yaml
+
+ volumeMounts:
+ - name: run
+ mountPath: /run
+ - name: flannel-cfg2
+ mountPath: /etc/kube-flannel/
CNI Configuration
@@ -148,33 +154,35 @@ The following CNI configuration sample for 2 Flannel interfaces is located in /e
as 10-2flannels.conf:
::
- {
- "name": "flannel-networks",
- "type": "multus",
- "delegates": [
- {
- "type": "flannel",
- "name": "flannel.2",
- "subnetFile": "/run/flannel/subnet2.env",
- "dataDir": "/var/lib/cni/flannel/2",
- "delegate": {
- "bridge": "kbr1",
- "isDefaultGateway": false
- }
- },
- {
- "type": "flannel",
- "name": "flannel.1",
- "subnetFile": "/run/flannel/subnet.env",
- "dataDir": "/var/lib/cni/flannel",
- "masterplugin": true,
- "delegate": {
- "bridge": "kbr0",
- "isDefaultGateway": true
- }
- }
- ]
- }
+ .. code-block:: json
+
+ {
+ "name": "flannel-networks",
+ "type": "multus",
+ "delegates": [
+ {
+ "type": "flannel",
+ "name": "flannel.2",
+ "subnetFile": "/run/flannel/subnet2.env",
+ "dataDir": "/var/lib/cni/flannel/2",
+ "delegate": {
+ "bridge": "kbr1",
+ "isDefaultGateway": false
+ }
+ },
+ {
+ "type": "flannel",
+ "name": "flannel.1",
+ "subnetFile": "/run/flannel/subnet.env",
+ "dataDir": "/var/lib/cni/flannel",
+ "masterplugin": true,
+ "delegate": {
+ "bridge": "kbr0",
+ "isDefaultGateway": true
+ }
+ }
+ ]
+ }
For the 2nd Flannel CNI, it will use the subnet file /run/flannel/subnet2.env instead of the default /run/flannel/subnet.env,
which is generated by the 2nd Flanneld process, and the subnet data would be output to the directory:
@@ -228,32 +236,36 @@ kube-flannel.yml. For Flanneld to use the etcd backend, we could change the cont
backend:
::
- ...
- containers:
- - name: kube-flannel
- image: quay.io/coreos/flannel:v0.8.0-arm64
- command: [ "/opt/bin/flanneld", "--ip-masq", "--etcd-endpoints=http://ETCD_CLUSTER_IP1:2379", "--etcd-prefix=/coreos.com/network" ]
- securityContext:
- privileged: true
- env:
- - name: POD_NAME
- valueFrom:
- fieldRef:
- fieldPath: metadata.name
- - name: POD_NAMESPACE
- valueFrom:
- fieldRef:
- fieldPath: metadata.namespace
- volumeMounts:
- - name: run
- mountPath: /run
- - name: flannel-cfg
- mountPath: /etc/kube-flannel/
+ .. code-block:: yaml
+
+ ...
+ containers:
+ - name: kube-flannel
+ image: quay.io/coreos/flannel:v0.8.0-arm64
+ command: [ "/opt/bin/flanneld", "--ip-masq", "--etcd-endpoints=http://ETCD_CLUSTER_IP1:2379", "--etcd-prefix=/coreos.com/network" ]
+ securityContext:
+ privileged: true
+ env:
+ - name: POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ - name: POD_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ volumeMounts:
+ - name: run
+ mountPath: /run
+ - name: flannel-cfg
+ mountPath: /etc/kube-flannel/
Here as we don't use the "--kube-subnet-mgr" option, the last 2 lines of
::
- - name: flannel-cfg
+ .. code-block:: yaml
+
+ - name: flannel-cfg
mountPath: /etc/kube-flannel/
can be ignored.
@@ -262,24 +274,26 @@ To start the 2nd Flanneld process, we can add the 2nd Flanneld container section
the 1st Flanneld container:
::
- containers:
- - name: kube-flannel2
- image: quay.io/coreos/flannel:v0.8.0-arm64
- command: [ "/opt/bin/flanneld", "--ip-masq", "--etcd-endpoints=http://ETCD_CLUSTER_IP1:2379", "--etcd-prefix=/coreos.com/network2", "--subnet-file=/run/flannel/subnet2.env" ]
- securityContext:
- privileged: true
- env:
- - name: POD_NAME
- valueFrom:
- fieldRef:
- fieldPath: metadata.name
- - name: POD_NAMESPACE
- valueFrom:
- fieldRef:
- fieldPath: metadata.namespace
- volumeMounts:
- - name: run
- mountPath: /run
+ .. code-block:: yaml
+
+ containers:
+ - name: kube-flannel2
+ image: quay.io/coreos/flannel:v0.8.0-arm64
+ command: [ "/opt/bin/flanneld", "--ip-masq", "--etcd-endpoints=http://ETCD_CLUSTER_IP1:2379", "--etcd-prefix=/coreos.com/network2", "--subnet-file=/run/flannel/subnet2.env" ]
+ securityContext:
+ privileged: true
+ env:
+ - name: POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ - name: POD_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ volumeMounts:
+ - name: run
+ mountPath: /run
The option "-subnet-file" for the 2nd Flanneld is to output a subnet file for the 2nd Flannel subnet configuration
of the Flannel CNI which is called by Multus CNI.
diff --git a/docs/conf.py b/docs/conf.py
new file mode 100644
index 0000000..3c4453e
--- /dev/null
+++ b/docs/conf.py
@@ -0,0 +1 @@
+from docs_conf.conf import *
diff --git a/docs/conf.yaml b/docs/conf.yaml
new file mode 100644
index 0000000..f56ea02
--- /dev/null
+++ b/docs/conf.yaml
@@ -0,0 +1,3 @@
+---
+project_cfg: opnfv
+project: Container4NFV
diff --git a/docs/development/gapanalysis/gap-analysis-architecture-options.rst b/docs/development/gapanalysis/gap-analysis-architecture-options.rst
index c629dfe..4d91c09 100644
--- a/docs/development/gapanalysis/gap-analysis-architecture-options.rst
+++ b/docs/development/gapanalysis/gap-analysis-architecture-options.rst
@@ -3,71 +3,8 @@
.. (c) Gergely Csatari (Nokia)
==================================
-OpenRetriever architecture options
+Container4NFV architecture options
==================================
-1 Architecture options to support only containers on bare metal
-...............................................................
-To support containers on bare metal without the support of VM-s only a single
-VIM is needed.
-This architecture option is targeted by OpenRetriever in OPNFV Euphrates, and
-this architecture option is considered in the gap analyzis against
-:doc:`OpenStack <gap-analysis-openstack>` and
-:doc:`Kubernetes <gap-analysis-kubernetes-v1.5>`.
-Examples: Kubernetes, OpenStack with Zun_ and Kuryr_, which as a side effect
-also supports VM-s.
-
-2 Architecture options to support containers and VM-s
-.....................................................
-There are different architecture options to support container based and VM based
-VNF-s in OPNFV. This section provides a list of these options with a brief
-description and examples.
-In the descriptions providing the same API means, that the same set of API-s are
-used (like the OpenStack_ API-s or the Kubernetes_ API), integrted networks mean,
-that the network connections of the workloads can be connected without leaving
-the domain of the VIM and shared hardware resources mean that it is possible to
-start a workload VM and a workload container on the same physical host.
-
-2.1 Separate VIM-s
-==================
-There is a separate VIM for VM-s and a separate for containers, they use
-different hardware pools, they provide different API-s and their networks are
-not integrated.
-Examples: A separate OpenStack instance for the VM-s and a separate Kubernetes_
-instance for the containers.
-
-2.2 Single VIM
-==============
-One VIM supports both VM-s and containers using the same hardware pool, with
-the same API and with integrated networking solution.
-Examples: OpenStack with Zun_ and Kuryr_ or Kubernetes_ with Kubevirt_, Virtlet_ or
-similar.
-
-2.3 Combined VIM-s
-==================
-There are two VIM-s from API perspective, but usually the VIM-s share hardware
-pools on some level. This option have suboptions.
-
-2.3.1 Container VIM on VM VIM
------------------------------
-A container VIM is deployed on top of resources managed by a VM VIM, they share
-the same hardware pool, but they have separate domains in the pool, they provide
-separate API-s and there are possibilities to integrate their networks.
-Example: A Kubernetes_ is deployed into VM-s or bare metal hosts into an
-OpenStack deployment optionally with Magnum. Kuryr_ integrates the networks on
-some level.
-
-2.3.2 VM VIM on Container VIM
------------------------------
-A VM VIM is deployed on top of resources managed by a container VIM, they do not
-share the same hardware pool, provide different API and do not have integrated
-networks.
-Example: `Kolla Kubernetes <https://github.com/openstack/kolla-kubernetes>`_ or
-`OpenStack Helm <https://wiki.openstack.org/wiki/Openstack-helm>`_.
-
-.. _Kubernetes: http://kubernetes.io/
-.. _Kubevirt: https://github.com/kubevirt/
-.. _Kuryr: https://docs.openstack.org/developer/kuryr/
-.. _OpenStack: https://www.openstack.org/
-.. _Virtlet: https://github.com/Mirantis/virtlet
-.. _Zun: https://wiki.openstack.org/wiki/Zun
+Analyzis of the architecture options were moved to the
+`Container4NFV wiki <https://wiki.opnfv.org/display/OpenRetriever/Analyzis+of+architecture+options>`_. \ No newline at end of file
diff --git a/docs/development/gapanalysis/gap-analysis-kubernetes-v1.5.rst b/docs/development/gapanalysis/gap-analysis-kubernetes-v1.5.rst
index f8fb85a..367237d 100644
--- a/docs/development/gapanalysis/gap-analysis-kubernetes-v1.5.rst
+++ b/docs/development/gapanalysis/gap-analysis-kubernetes-v1.5.rst
@@ -3,10 +3,10 @@
.. (c) Xuan Jia (China Mobile)
================================================
-OpenRetriever Gap Analysis with Kubernetes v1.5
+Container4NFV Gap Analysis with Kubernetes v1.5
================================================
-This section provides users with OpenRetriever gap analysis regarding feature
+This section provides users with Container4NFV gap analysis regarding feature
requirement with Kubernetes Official Release. The following table lists the use
cases / feature requirements of container integrated functionality, and its gap
analysis with Kubernetes Official Release.
diff --git a/docs/development/gapanalysis/gap-analysis-openstack.rst b/docs/development/gapanalysis/gap-analysis-openstack.rst
index a7357aa..5840f1d 100644
--- a/docs/development/gapanalysis/gap-analysis-openstack.rst
+++ b/docs/development/gapanalysis/gap-analysis-openstack.rst
@@ -3,9 +3,9 @@
.. (c) Xuan Jia (China Mobile), Gergely Csatari (Nokia)
=========================================
-OpenRetriever Gap Analysis with OpenStack
+Container4NFV Gap Analysis with OpenStack
=========================================
-This section provides a gap analyzis between the targets of OpenRetriever for
+This section provides a gap analyzis between the targets of Container4NFV for
release Euphrates (E) or later and the features provided by OpenStack in release
Ocata. As the OPNFV and OpenStack releases tend to change over time this
analyzis is planned to be countinously updated.
@@ -47,7 +47,7 @@ integrated functionality, and its gap analysis with OpenStack.
|Kuryr_ needs to support MACVLAN and IPVLAN |Kuryr_ |Using MACVLAN or IPVLAN could provide better network performance. |Open |
| | |It is planned for Ocata. | |
+-----------------------------------------------------------+-------------------+--------------------------------------------------------------------+----------------+
- |Kuryr_ Kubernetes_ integration is needed |Kuryr_ |It is done in the frame of OpenRetriever. |Targeted to |
+ |Kuryr_ Kubernetes_ integration is needed |Kuryr_ |It is done in the frame of Container4NFV. |Targeted to |
| | | |OPNFV release E |
| | | |/OpenStack Ocata|
+-----------------------------------------------------------+-------------------+--------------------------------------------------------------------+----------------+
diff --git a/docs/development/gapanalysis/gap-analysis-opnfv-installer.rst b/docs/development/gapanalysis/gap-analysis-opnfv-installer.rst
index f1c047b..ecfa47e 100644
--- a/docs/development/gapanalysis/gap-analysis-opnfv-installer.rst
+++ b/docs/development/gapanalysis/gap-analysis-opnfv-installer.rst
@@ -3,9 +3,9 @@
.. (c) Xuan Jia (China Mobile)
===============================================
-OpenRetriever Gap Analysis with OPNFV Installer
+Container4NFV Gap Analysis with OPNFV Installer
===============================================
-This section provides users with OpenRetriever gap analysis regarding feature
+This section provides users with Container4NFV gap analysis regarding feature
requirement with OPNFV Installer in Danube Official Release. The following
table lists the use cases / feature requirements of container integrated
functionality, and its gap analysis with OPNFV Installer in Danube Official
diff --git a/docs/development/gapanalysis/index.rst b/docs/development/gapanalysis/index.rst
index 9b22674..dc390d9 100644
--- a/docs/development/gapanalysis/index.rst
+++ b/docs/development/gapanalysis/index.rst
@@ -3,13 +3,13 @@
.. (c) Xuan Jia (China Mobile)
==========================
-OpenRetriever Gap Analysis
+Container4NFV Gap Analysis
==========================
-:Project: OpenRetriever, https://wiki.opnfv.org/display/openretriever
+:Project: Container4NFV, https://wiki.opnfv.org/display/OpenRetriever/Container4NFV
-:Editors: Xuan Jia (China Mobile)
-:Authors: Xuan Jia (China Mobile)
+:Editors: Xuan Jia (China Mobile), Gergely Csatari (Nokia)
+:Authors: Container4NFV team
:Abstract: This document provides the users with top-down gap analysis regarding
OpenRetriever feature requirements with OPNFV Installer, OpenStack
diff --git a/docs/development/ngvsrequirements/ngvs-requirements-document.rst b/docs/development/ngvsrequirements/ngvs-requirements-document.rst
index d2c775e..793708f 100644
--- a/docs/development/ngvsrequirements/ngvs-requirements-document.rst
+++ b/docs/development/ngvsrequirements/ngvs-requirements-document.rst
@@ -2,7 +2,7 @@
.. License.http://creativecommons.org/licenses/by/4.0
.. (c) Xuan Jia (China Mobile)
-==========================================================================
+===========================================================================
OpenRetriever Next Gen VIM & Edge Computing Scheduler Requirements Document
===========================================================================
@@ -44,15 +44,15 @@ At a high level, we believe the VIM scheduler must:
- Support legacy and event-driven scheduling
- By legacy scheduling we mean scheduling without any trigger (see above)
-i.e. the current technique used by schedulers such as OpenStack Nova.
+ i.e. the current technique used by schedulers such as OpenStack Nova.
- By event-driven scheduling we mean scheduling with a trigger (see above).
-We do not mean that the unikernel or container that is going to run the VNF is
-already running . The instance is started and torn-down in response to traffic.
-The two step process is transparent to the user.
+ We do not mean that the unikernel or container that is going to run the VNF is
+ already running . The instance is started and torn-down in response to traffic.
+ The two step process is transparent to the user.
- More specialized higher level schedulers and orchestration systems may be
-run on top e.g. FaaS (similar to AWS Lambda) etc.
+ run on top e.g. FaaS (similar to AWS Lambda) etc.
+----------------------------------------------------------------------------------------+
| Serverless vs. FaaS vs. Event-Driven Terminology |
@@ -257,6 +257,7 @@ Multiple compute types
| | - Support shared storage (e.g. OpenStack |
| | Cinder, K8s volumes etc.) |
+----------------------------------------+-----------------------------------------------+
+
.. [1]
Intel EPA includes DPDK, SR-IOV, CPU and NUMA pinning, Huge Pages
etc.
diff --git a/docs/index.rst b/docs/index.rst
new file mode 100644
index 0000000..30fe35b
--- /dev/null
+++ b/docs/index.rst
@@ -0,0 +1,17 @@
+.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+.. SPDX-License-Identifier: CC-BY-4.0
+.. (c) Open Platform for NFV Project, Inc. and its contributors
+
+.. _container4nfv:
+
+=============
+Container4NFV
+=============
+
+.. toctree::
+ :maxdepth: 1
+
+ development/gapanalysis/index
+ development/nvgsrequirements/index
+ release/release-notes/index
+ release/userguide/index
diff --git a/docs/release/index.rst b/docs/release/index.rst
new file mode 100644
index 0000000..e09f09f
--- /dev/null
+++ b/docs/release/index.rst
@@ -0,0 +1,15 @@
+.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+.. SPDX-License-Identifier: CC-BY-4.0
+.. (c) Open Platform for NFV Project, Inc. and its contributors
+
+.. _container4nfv-release:
+
+====================
+Release Documetation
+====================
+
+.. toctree::
+ :numbered:
+ :maxdepth: 1
+
+ scenarios/index
diff --git a/docs/release/release-notes/index.rst b/docs/release/release-notes/index.rst
index c6c67b7..4325454 100644
--- a/docs/release/release-notes/index.rst
+++ b/docs/release/release-notes/index.rst
@@ -2,16 +2,11 @@
.. License. http://creativecommons.org/licenses/by/4.0
.. (c) Xuan Jia (China Mobile)
-==========================
-Container4NFV Release Notes
-==========================
-
-:Project: OpenRetriever, https://wiki.opnfv.org/display/openretriever
+.. _container4nfv-releasenotes:
-:Editors: Xuan Jia (China Mobile)
-:Authors: Xuan Jia (China Mobile)
-
-:Abstract: Container4NFV Release Notes.
+===========================
+Container4NFV Release Notes
+===========================
.. toctree::
:maxdepth: 1
diff --git a/docs/release/release-notes/release-notes.rst b/docs/release/release-notes/release-notes.rst
index 51711f8..39c95a2 100644
--- a/docs/release/release-notes/release-notes.rst
+++ b/docs/release/release-notes/release-notes.rst
@@ -9,3 +9,19 @@ Container4NFV E release Notes
2. Container architecture options
3. Joid could support Kubernetes
4. Using vagrant tool to setup an env with DPDK enabled.
+
+==================================
+Container4NFV F release Notes
+==================================
+1. Enable Multus in Kubernetes
+2. Enable SR-IOV in Kubernetes
+3. Support ARM platform
+
+
+==================================
+Container4NFV G release Notes
+==================================
+1. Enable Virtlet in Kubernetes
+2. Enable Kata in Kubernetes
+3. Enable VPP in Kubernetes
+4. Enable Vagrant tools.
diff --git a/docs/release/scenarios/k8-nosdn-virtlet-noha/index.rst b/docs/release/scenarios/k8-nosdn-virtlet-noha/index.rst
new file mode 100644
index 0000000..ba7c54e
--- /dev/null
+++ b/docs/release/scenarios/k8-nosdn-virtlet-noha/index.rst
@@ -0,0 +1,18 @@
+.. _k8-nosdn-virtlet-noha:
+
+.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+.. http://creativecommons.org/licenses/by/4.0
+.. (c) <optionally add copywriters name>
+
+=========================================
+k8-nosdn-virtlet-noha overview and description
+=========================================
+.. This document will be used to provide a description of the scenario for an end user.
+.. You should explain the purpose of the scenario, the types of capabilities provided and
+.. the unique components that make up the scenario including how they are used.
+
+.. toctree::
+ :maxdepth: 3
+
+ ./scenario.description.rst
+
diff --git a/docs/release/userguide/virlet.rst b/docs/release/scenarios/k8-nosdn-virtlet-noha/scenario.description.rst
index 60902db..60902db 100644
--- a/docs/release/userguide/virlet.rst
+++ b/docs/release/scenarios/k8-nosdn-virtlet-noha/scenario.description.rst
diff --git a/docs/release/scenarios/k8-ovn-kata-noha/index.rst b/docs/release/scenarios/k8-ovn-kata-noha/index.rst
new file mode 100644
index 0000000..fe2cf42
--- /dev/null
+++ b/docs/release/scenarios/k8-ovn-kata-noha/index.rst
@@ -0,0 +1,18 @@
+.. _k8-ovn-kata-noha:
+
+.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+.. http://creativecommons.org/licenses/by/4.0
+.. (c) <optionally add copywriters name>
+
+=========================================
+k8-ovn-kata-noha overview and description
+=========================================
+.. This document will be used to provide a description of the scenario for an end user.
+.. You should explain the purpose of the scenario, the types of capabilities provided and
+.. the unique components that make up the scenario including how they are used.
+
+.. toctree::
+ :maxdepth: 3
+
+ ./scenario.description.rst
+
diff --git a/docs/release/userguide/kata.rst b/docs/release/scenarios/k8-ovn-kata-noha/scenario.description.rst
index c7e6340..c7e6340 100644
--- a/docs/release/userguide/kata.rst
+++ b/docs/release/scenarios/k8-ovn-kata-noha/scenario.description.rst
diff --git a/docs/release/userguide/index.rst b/docs/release/userguide/index.rst
index cf244e1..f2149b0 100644
--- a/docs/release/userguide/index.rst
+++ b/docs/release/userguide/index.rst
@@ -2,14 +2,11 @@
.. License. http://creativecommons.org/licenses/by/4.0
.. (c) Xuan Jia (China Mobile)
-==========================
-Container4NFV User Guide
-==========================
-
-:Project: Container4NFV, https://wiki.opnfv.org/display/openretriever
+.. _container4nfv-userguide:
-:Editors: Xuan Jia (China Mobile)
-:Authors: Xuan Jia (China Mobile)
+========================
+Container4NFV User Guide
+========================
.. toctree::
:maxdepth: 3
diff --git a/docs/release/userguide/scenario.rst b/docs/release/userguide/scenario.rst
index 87f7e00..32200d6 100644
--- a/docs/release/userguide/scenario.rst
+++ b/docs/release/userguide/scenario.rst
@@ -3,7 +3,7 @@
.. (c) Xuan Jia (China Mobile)
Senario:
-==========================
+========
k8-nosdn-nofeature-noha
--------------------------
@@ -13,7 +13,8 @@ https://build.opnfv.org/ci/job/joid-k8-nosdn-nofeature-noha-baremetal-daily-euph
k8-nosdn-lb-noha
--------------------------
+----------------
+
Using Joid to deploy Kubernetes in bare metal machine with load balance enabled
https://build.opnfv.org/ci/job/joid-k8-nosdn-lb-noha-baremetal-daily-euphrates/
@@ -21,15 +22,17 @@ https://build.opnfv.org/ci/job/joid-k8-nosdn-lb-noha-baremetal-daily-euphrates/
YardStick test Cases
-==========================
+====================
opnfv_yardstick_tc080
---------------------------
+---------------------
+
measure network latency between containers in k8s using ping
https://git.opnfv.org/yardstick/tree/tests/opnfv/test_cases/opnfv_yardstick_tc080.yaml
- opnfv_yardstick_tc081
---------------------------
+opnfv_yardstick_tc081
+---------------------
+
measure network latency between container and VM using ping
https://git.opnfv.org/yardstick/tree/tests/opnfv/test_cases/opnfv_yardstick_tc081.yaml
diff --git a/docs/release/userguide/snort.rst b/docs/release/userguide/snort.rst
new file mode 100644
index 0000000..9bb6b3b
--- /dev/null
+++ b/docs/release/userguide/snort.rst
@@ -0,0 +1,33 @@
+================
+ Snort
+================
+
+----------
+ What is Snort?
+----------
+
+`Snort <https://www.snort.org/>`_. is an open source network intrusion prevention system, capable
+of performing real-time traffic analysis and packet logging on IP
+networks. It can perform protocol analysis, content searching/matching,
+and can be used to detect a variety of attacks and probes, such as buffer
+overflows, stealth port scans, CGI attacks, SMB probes, OS fingerprinting
+attempts, and much more.
+
+----------
+ What can I do with Snort?
+----------
+
+Snort has three primary uses: It can be used as a straight packet sniffer
+like tcpdump, a packet logger (useful for network traffic debugging, etc),
+or as a full blown network intrusion prevention system.
+
+----------
+ How Snort works?
+----------
+
+Snort works with rules. Rules are a different methodology for performing
+detection, which bring the advantage of 0-day detection to the table.
+Unlike signatures, rules are based on detecting the actual vulnerability,
+not an exploit or a unique piece of data. Developing a rule requires an
+acute understanding of how the vulnerability actually works.
+
diff --git a/docs/requirements.txt b/docs/requirements.txt
new file mode 100644
index 0000000..9fde2df
--- /dev/null
+++ b/docs/requirements.txt
@@ -0,0 +1,2 @@
+lfdocs-conf
+sphinx_opnfv_theme
diff --git a/src/arm/cni-deploy/.gitignore b/src/arm/cni-deploy/.gitignore
new file mode 100644
index 0000000..a8b42eb
--- /dev/null
+++ b/src/arm/cni-deploy/.gitignore
@@ -0,0 +1 @@
+*.retry
diff --git a/src/arm/cni-deploy/deploy.yml b/src/arm/cni-deploy/deploy.yml
new file mode 100644
index 0000000..c54353a
--- /dev/null
+++ b/src/arm/cni-deploy/deploy.yml
@@ -0,0 +1,32 @@
+---
+- name: Fixup default flannel
+ hosts: kube-master
+ gather_facts: "no"
+ vars_files:
+ - "vars/global"
+ roles:
+ - {role: flannel, tags: [flannel]}
+
+- name: Deploy Multus CNI
+ hosts: all
+ gather_facts: "no"
+ vars_files:
+ - "vars/global"
+ roles:
+ - {role: multus, tags: [multus]}
+
+- name: Deploy SRIOV CNI
+ hosts: all
+ gather_facts: "no"
+ vars_files:
+ - "vars/global"
+ roles:
+ - {role: sriov, tags: [sriov]}
+
+- name: Deploy Vhostuser CNI and VPP
+ hosts: all
+ gather_facts: "yes"
+ vars_files:
+ - "vars/global"
+ roles:
+ - {role: vhost-vpp, tags: [vhost-vpp]}
diff --git a/src/arm/cni-deploy/inventory/inventory.cfg b/src/arm/cni-deploy/inventory/inventory.cfg
new file mode 100644
index 0000000..cd8bb25
--- /dev/null
+++ b/src/arm/cni-deploy/inventory/inventory.cfg
@@ -0,0 +1,18 @@
+# compass-tasks: /opt/kargo_k8s/inventory/inventory.cfg
+
+[all]
+host2 ansible_ssh_host=10.1.0.51 ansible_ssh_pass=root ansible_user=root
+host1 ansible_ssh_host=10.1.0.50 ansible_ssh_pass=root ansible_user=root
+
+[kube-master]
+host1
+
+[etcd]
+host1
+
+[kube-node]
+host2
+
+[k8s-cluster:children]
+kube-node
+kube-master
diff --git a/src/arm/cni-deploy/roles/flannel/files/cni-flannel-ds.yml b/src/arm/cni-deploy/roles/flannel/files/cni-flannel-ds.yml
new file mode 100644
index 0000000..a99983b
--- /dev/null
+++ b/src/arm/cni-deploy/roles/flannel/files/cni-flannel-ds.yml
@@ -0,0 +1,86 @@
+---
+apiVersion: extensions/v1beta1
+kind: DaemonSet
+metadata:
+ name: kube-flannel
+ namespace: "kube-system"
+ labels:
+ tier: node
+ k8s-app: flannel
+spec:
+ template:
+ metadata:
+ labels:
+ tier: node
+ k8s-app: flannel
+ spec:
+ serviceAccountName: flannel
+ containers:
+ - name: kube-flannel
+ image: quay.io/coreos/flannel:v0.9.1-arm64
+ imagePullPolicy: IfNotPresent
+ resources:
+ limits:
+ cpu: 300m
+ memory: 500M
+ requests:
+ cpu: 150m
+ memory: 64M
+ command: ["/opt/bin/flanneld", "--ip-masq", "--kube-subnet-mgr"]
+ securityContext:
+ privileged: true
+ env:
+ - name: POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ - name: POD_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ volumeMounts:
+ - name: run
+ mountPath: /run
+ - name: cni
+ mountPath: /etc/cni/net.d
+ - name: flannel-cfg
+ mountPath: /etc/kube-flannel/
+ # - name: install-cni
+ # image: linaro/flannel-cni-arm64:v0.3.0
+ # command: ["/install-cni.sh"]
+ # env:
+ # # The CNI network config to install on each node.
+ # - name: CNI_NETWORK_CONFIG
+ # valueFrom:
+ # configMapKeyRef:
+ # name: kube-flannel-cfg
+ # key: cni-conf.json
+ # - name: CNI_CONF_NAME
+ # value: "10-flannel.conflist"
+ # volumeMounts:
+ # - name: cni
+ # mountPath: /host/etc/cni/net.d
+ # - name: host-cni-bin
+ # mountPath: /host/opt/cni/bin/
+ hostNetwork: true
+ tolerations:
+ - key: node-role.kubernetes.io/master
+ operator: Exists
+ effect: NoSchedule
+ volumes:
+ - name: run
+ hostPath:
+ path: /run
+ - name: cni
+ hostPath:
+ path: /etc/cni/net.d
+ - name: flannel-cfg
+ configMap:
+ name: kube-flannel-cfg
+ # - name: host-cni-bin
+ # hostPath:
+ # path: /opt/cni/bin
+ updateStrategy:
+ rollingUpdate:
+ maxUnavailable: 20%
+ type: RollingUpdate
diff --git a/src/arm/cni-deploy/roles/flannel/tasks/main.yml b/src/arm/cni-deploy/roles/flannel/tasks/main.yml
new file mode 100644
index 0000000..4f1a910
--- /dev/null
+++ b/src/arm/cni-deploy/roles/flannel/tasks/main.yml
@@ -0,0 +1,12 @@
+---
+- name: Copy flannel daemonset file
+ copy:
+ src: cni-flannel-ds.yml
+ dest: /tmp/cni-flannel-ds.yml
+
+- name: Apply flannel daemonset
+ shell: kubectl apply -f /tmp/cni-flannel-ds.yml
+ ignore_errors: "yes"
+
+- name: Sleep 10 seconds
+ wait_for: timeout=10
diff --git a/src/arm/cni-deploy/roles/multus/files/10-multus.conf b/src/arm/cni-deploy/roles/multus/files/10-multus.conf
new file mode 100644
index 0000000..3726413
--- /dev/null
+++ b/src/arm/cni-deploy/roles/multus/files/10-multus.conf
@@ -0,0 +1,13 @@
+{
+ "name": "multus-cni-network",
+ "type": "multus",
+ "kubeconfig": "/etc/kubernetes/node-kubeconfig.yaml",
+ "delegates": [{
+ "type": "flannel",
+ "masterplugin": true,
+ "delegate": {
+ "isDefaultGateway": true
+ }
+ }]
+}
+
diff --git a/src/arm/cni-deploy/roles/multus/files/clusterrole.yml b/src/arm/cni-deploy/roles/multus/files/clusterrole.yml
new file mode 100644
index 0000000..fb056d4
--- /dev/null
+++ b/src/arm/cni-deploy/roles/multus/files/clusterrole.yml
@@ -0,0 +1,16 @@
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: multus-crd-overpowered
+rules:
+ - apiGroups:
+ - '*'
+ resources:
+ - '*'
+ verbs:
+ - '*'
+ - nonResourceURLs:
+ - '*'
+ verbs:
+ - '*'
diff --git a/src/arm/cni-deploy/roles/multus/files/crdnetwork.yml b/src/arm/cni-deploy/roles/multus/files/crdnetwork.yml
new file mode 100644
index 0000000..9aefdb8
--- /dev/null
+++ b/src/arm/cni-deploy/roles/multus/files/crdnetwork.yml
@@ -0,0 +1,15 @@
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: networks.kubernetes.com
+spec:
+ group: kubernetes.com
+ version: v1
+ scope: Namespaced
+ names:
+ plural: networks
+ singular: network
+ kind: Network
+ shortNames:
+ - net
diff --git a/src/arm/cni-deploy/roles/multus/files/flannel-obj.yml b/src/arm/cni-deploy/roles/multus/files/flannel-obj.yml
new file mode 100644
index 0000000..bd7891d
--- /dev/null
+++ b/src/arm/cni-deploy/roles/multus/files/flannel-obj.yml
@@ -0,0 +1,13 @@
+---
+apiVersion: "kubernetes.com/v1"
+kind: Network
+metadata:
+ name: flannel-networkobj
+plugin: flannel
+args: '[
+ {
+ "delegate": {
+ "isDefaultGateway": true
+ }
+ }
+]'
diff --git a/src/arm/cni-deploy/roles/multus/handlers/main.yml b/src/arm/cni-deploy/roles/multus/handlers/main.yml
new file mode 100644
index 0000000..8474d34
--- /dev/null
+++ b/src/arm/cni-deploy/roles/multus/handlers/main.yml
@@ -0,0 +1,4 @@
+- name: Restart kubelet
+ service:
+ name: kubelet
+ state: restarted
diff --git a/src/arm/cni-deploy/roles/multus/tasks/crd.yml b/src/arm/cni-deploy/roles/multus/tasks/crd.yml
new file mode 100644
index 0000000..cacf98a
--- /dev/null
+++ b/src/arm/cni-deploy/roles/multus/tasks/crd.yml
@@ -0,0 +1,44 @@
+---
+- name: Copy yaml files
+ copy:
+ src: "{{ item }}"
+ dest: "/tmp/{{ item }}"
+ with_items:
+ - clusterrole.yml
+ - crdnetwork.yml
+ - flannel-obj.yml
+
+- name: Copy macvlan template
+ template:
+ src: macvlan-obj.yml.j2
+ dest: /tmp/macvlan-obj.yml
+
+- name: Copy Multus testpod template
+ template:
+ src: multus-testpod.yml.j2
+ dest: /root/multus-testpod.yml
+
+- name: Create cluster role
+ shell: kubectl apply -f /tmp/clusterrole.yml
+
+- name: Check if role binding is created
+ shell: kubectl get clusterrolebinding multus-node-{{ item }}
+ register: check_rb
+ ignore_errors: "yes"
+ with_items: "{{ groups['all'] }}"
+
+- name: Create role binding
+ shell: >
+ kubectl create clusterrolebinding multus-node-{{ item }}
+ --clusterrole=multus-crd-overpowered
+ --user=system:node:{{ item }}
+ when: check_rb is failed
+ with_items: "{{ groups['all'] }}"
+
+- name: Create network CRD
+ shell: kubectl apply -f /tmp/crdnetwork.yml
+
+- name: Create flannel and macvlan network objects
+ shell: >
+ kubectl apply -f /tmp/flannel-obj.yml &&
+ kubectl apply -f /tmp/macvlan-obj.yml
diff --git a/src/arm/cni-deploy/roles/multus/tasks/main.yml b/src/arm/cni-deploy/roles/multus/tasks/main.yml
new file mode 100644
index 0000000..a200215
--- /dev/null
+++ b/src/arm/cni-deploy/roles/multus/tasks/main.yml
@@ -0,0 +1,24 @@
+---
+- name: Build Multus CNI
+ shell: >
+ docker run --rm --network host -v /opt/cni/bin:/opt/cni/bin golang:1.9
+ bash -c "git clone {{ multus_repo }} multus_cni && cd multus_cni &&
+ git checkout {{ multus_commit }} && ./build && cp bin/multus /opt/cni/bin/"
+ args:
+ creates: /opt/cni/bin/multus
+
+- name: Remove default CNI configuration
+ shell: rm -f /etc/cni/net.d/*
+ args:
+ warn: "no"
+
+- name: Set Multus as default CNI
+ copy:
+ src: 10-multus.conf
+ dest: /etc/cni/net.d/
+ notify:
+ - Restart kubelet
+
+- name: Import CRD task
+ import_tasks: crd.yml
+ when: inventory_hostname == groups["kube-master"][0]
diff --git a/src/arm/cni-deploy/roles/multus/templates/macvlan-obj.yml.j2 b/src/arm/cni-deploy/roles/multus/templates/macvlan-obj.yml.j2
new file mode 100644
index 0000000..b5a549f
--- /dev/null
+++ b/src/arm/cni-deploy/roles/multus/templates/macvlan-obj.yml.j2
@@ -0,0 +1,22 @@
+---
+apiVersion: "kubernetes.com/v1"
+kind: Network
+metadata:
+ name: macvlan-networkobj
+plugin: macvlan
+args: '[
+ {
+ "master": "{{ macvlan_master }}",
+ "mode": "vepa",
+ "ipam": {
+ "type": "host-local",
+ "subnet": "{{ macvlan_subnet }}",
+ "rangeStart": "{{ macvlan_range_start }}",
+ "rangeEnd": "{{ macvlan_range_end }}",
+ "routes": [
+ { "dst": "0.0.0.0/0" }
+ ],
+ "gateway": "{{ macvlan_gateway }}"
+ }
+ }
+]'
diff --git a/src/arm/cni-deploy/roles/multus/templates/multus-testpod.yml.j2 b/src/arm/cni-deploy/roles/multus/templates/multus-testpod.yml.j2
new file mode 100644
index 0000000..4884846
--- /dev/null
+++ b/src/arm/cni-deploy/roles/multus/templates/multus-testpod.yml.j2
@@ -0,0 +1,19 @@
+---
+apiVersion: v1
+kind: Pod
+metadata:
+ name: multus-test
+ annotations:
+ networks: '[
+ { "name": "flannel-networkobj" },
+ { "name": "macvlan-networkobj" }
+ ]'
+spec:
+ containers:
+ - name: multus-test
+ image: "busybox"
+ command: ["sleep", "100d"]
+ stdin: true
+ tty: true
+ nodeSelector:
+ kubernetes.io/hostname: "{{ groups['kube-node'][0] }}"
diff --git a/src/arm/cni-deploy/roles/sriov/tasks/crd.yml b/src/arm/cni-deploy/roles/sriov/tasks/crd.yml
new file mode 100644
index 0000000..5cc7892
--- /dev/null
+++ b/src/arm/cni-deploy/roles/sriov/tasks/crd.yml
@@ -0,0 +1,13 @@
+---
+- name: Copy SRIOV template
+ template:
+ src: sriov-obj.yml.j2
+ dest: /tmp/sriov-obj.yml
+
+- name: Copy SRIOV testpod template
+ template:
+ src: sriov-testpod.yml.j2
+ dest: /root/sriov-testpod.yml
+
+- name: Create SRIOV network object
+ shell: kubectl apply -f /tmp/sriov-obj.yml
diff --git a/src/arm/cni-deploy/roles/sriov/tasks/main.yml b/src/arm/cni-deploy/roles/sriov/tasks/main.yml
new file mode 100644
index 0000000..9c190ad
--- /dev/null
+++ b/src/arm/cni-deploy/roles/sriov/tasks/main.yml
@@ -0,0 +1,12 @@
+---
+- name: Build SRIOV CNI
+ shell: >
+ docker run --rm --network host -v /opt/cni/bin:/opt/cni/bin golang:1.9
+ bash -c "git clone {{ sriov_repo }} sriov_cni && cd sriov_cni &&
+ git checkout {{ sriov_commit }} && ./build && cp bin/sriov /opt/cni/bin/"
+ args:
+ creates: /opt/cni/bin/sriov
+
+- name: Import CRD task
+ import_tasks: crd.yml
+ when: inventory_hostname == groups["kube-master"][0]
diff --git a/src/arm/cni-deploy/roles/sriov/templates/sriov-obj.yml.j2 b/src/arm/cni-deploy/roles/sriov/templates/sriov-obj.yml.j2
new file mode 100644
index 0000000..6c67968
--- /dev/null
+++ b/src/arm/cni-deploy/roles/sriov/templates/sriov-obj.yml.j2
@@ -0,0 +1,25 @@
+---
+apiVersion: "kubernetes.com/v1"
+kind: Network
+metadata:
+ name: sriov-networkobj
+plugin: sriov
+args: '[
+ {
+ "master": "{{ sriov_master }}",
+ "pfOnly": true,
+ "if0name": "net0",
+ "ipam": {
+ "type": "host-local",
+ "subnet": "{{ sriov_subnet }}",
+ "rangeStart": "{{ sriov_range_start }}",
+ "rangeEnd": "{{ sriov_range_end }}",
+ "routes": [
+ {
+ "dst": "0.0.0.0/0"
+ }
+ ],
+ "gateway": "{{ sriov_gateway }}"
+ }
+ }
+]'
diff --git a/src/arm/cni-deploy/roles/sriov/templates/sriov-testpod.yml.j2 b/src/arm/cni-deploy/roles/sriov/templates/sriov-testpod.yml.j2
new file mode 100644
index 0000000..c1d01bc
--- /dev/null
+++ b/src/arm/cni-deploy/roles/sriov/templates/sriov-testpod.yml.j2
@@ -0,0 +1,19 @@
+---
+apiVersion: v1
+kind: Pod
+metadata:
+ name: sriov-test
+ annotations:
+ networks: '[
+ { "name": "flannel-networkobj" },
+ { "name": "sriov-networkobj" }
+ ]'
+spec:
+ containers:
+ - name: sriov-test
+ image: "busybox"
+ command: ["sleep", "100d"]
+ stdin: true
+ tty: true
+ nodeSelector:
+ kubernetes.io/hostname: "{{ groups['kube-node'][0] }}"
diff --git a/src/arm/cni-deploy/roles/vhost-vpp/files/0001-net-virtio-ethdev.patch b/src/arm/cni-deploy/roles/vhost-vpp/files/0001-net-virtio-ethdev.patch
new file mode 100644
index 0000000..171ff4d
--- /dev/null
+++ b/src/arm/cni-deploy/roles/vhost-vpp/files/0001-net-virtio-ethdev.patch
@@ -0,0 +1,16 @@
+diff --git a/drivers/net/virtio/virtio_ethdev.c b/drivers/net/virtio/virtio_ethdev.c
+index e320811..c1b1640 100644
+--- a/drivers/net/virtio/virtio_ethdev.c
++++ b/drivers/net/virtio/virtio_ethdev.c
+@@ -1754,6 +1754,11 @@ virtio_dev_start(struct rte_eth_dev *dev)
+ virtqueue_notify(rxvq->vq);
+ }
+
++ for (i = 0; i < dev->data->nb_tx_queues; i++) {
++ txvq = dev->data->tx_queues[i];
++ virtqueue_notify(txvq->vq);
++ }
++
+ PMD_INIT_LOG(DEBUG, "Notified backend at initialization");
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
diff --git a/src/arm/cni-deploy/roles/vhost-vpp/files/Dockerfile.vpp1710-dpdk1708 b/src/arm/cni-deploy/roles/vhost-vpp/files/Dockerfile.vpp1710-dpdk1708
new file mode 100644
index 0000000..2f83534
--- /dev/null
+++ b/src/arm/cni-deploy/roles/vhost-vpp/files/Dockerfile.vpp1710-dpdk1708
@@ -0,0 +1,24 @@
+FROM ubuntu:xenial
+
+RUN apt-get update && \
+ apt-get install -y git make openssl libcrypto++-dev libnuma-dev && \
+ apt-get autoclean
+
+RUN git clone https://gerrit.fd.io/r/vpp -b stable/1710 /root/vpp-1710
+
+WORKDIR /root/vpp-1710
+COPY ./0001-net-virtio-ethdev.patch dpdk/dpdk-17.08_patches/0001-net-virtio-ethdev.patch
+RUN sed -i "s/sudo -E //g" Makefile
+RUN make UNATTENDED=yes install-dep
+
+WORKDIR /root/vpp-1710/build-root
+RUN ./bootstrap.sh
+RUN make PLATFORM=vpp TAG=vpp_debug vpp-install
+RUN mkdir -p /etc/vpp && \
+ cp /root/vpp-1710/src/vpp/conf/startup.conf /etc/vpp/startup.conf && \
+ cp /root/vpp-1710/build-root/install-vpp_debug-native/vpp/bin/* /usr/bin && \
+ ln -s /root/vpp-1710/build-root/install-vpp_debug-native/vpp/lib64/vpp_plugins /usr/lib/vpp_plugins
+RUN groupadd vpp
+
+ENV PATH "$PATH:/root/vpp-1710/build-root/install-vpp_debug-native/dpdk/bin"
+ENV PATH "$PATH:/root/vpp-1710/build-root/install-vpp_debug-native/vpp/bin"
diff --git a/src/arm/cni-deploy/roles/vhost-vpp/files/setvpp.sh b/src/arm/cni-deploy/roles/vhost-vpp/files/setvpp.sh
new file mode 100755
index 0000000..15b0d27
--- /dev/null
+++ b/src/arm/cni-deploy/roles/vhost-vpp/files/setvpp.sh
@@ -0,0 +1,30 @@
+#!/bin/bash
+
+set -x
+
+cid=`sed -ne '/hostname/p' /proc/1/task/1/mountinfo | awk -F '/' '{print $6}'`
+cid_s=${cid:0:12}
+filename=${cid_s}-net1.json
+ifstring=`cat /vhost-user-net-plugin/${cid}/${cid_s}-net1.json | awk -F ',' '{print $4}'`
+ifmac=`echo ${ifstring} | awk -F '\"' '{print $4}'`
+
+ipstr=$(cat /vhost-user-net-plugin/${cid}/${cid_s}-net1-ip4.conf |grep "ipAddr")
+ipaddr=$(echo $ipstr | awk -F '\"' '{print $4}')
+ipaddr1=$(echo $ipaddr | cut -d / -f 1)
+
+vdev_str="vdev virtio_user0,path=/vhost-user-net-plugin/$cid/$cid_s-net1,mac=$ifmac"
+
+sed -i.bak '/# dpdk/a\dpdk \{' /etc/vpp/startup.conf
+sed -i.bak "/# vdev eth_bond1,mode=1/a\\$vdev_str" /etc/vpp/startup.conf
+sed -i.bak '/# socket-mem/a\\}' /etc/vpp/startup.conf
+
+vpp -c /etc/vpp/startup.conf &
+
+sleep 40
+
+vppctl set int state VirtioUser0/0/0 up
+vppctl set int ip address VirtioUser0/0/0 ${ipaddr1}/24
+vppctl show int
+vppctl show int address
+
+echo ${ipaddr1} > /vhost-user-net-plugin/$(hostname)
diff --git a/src/arm/cni-deploy/roles/vhost-vpp/files/startup.conf b/src/arm/cni-deploy/roles/vhost-vpp/files/startup.conf
new file mode 100644
index 0000000..ae86e38
--- /dev/null
+++ b/src/arm/cni-deploy/roles/vhost-vpp/files/startup.conf
@@ -0,0 +1,21 @@
+unix {
+ nodaemon
+ log /tmp/vpp.log
+ full-coredump
+ cli-listen /run/vpp/cli.sock
+ gid vpp
+}
+api-trace {
+ on
+}
+api-segment {
+ gid vpp
+}
+cpu {
+ main-core 1
+ corelist-workers 2-3
+ workers 2
+}
+dpdk {
+ uio-driver vfio-pci
+}
diff --git a/src/arm/cni-deploy/roles/vhost-vpp/files/vhostuser-obj.yml b/src/arm/cni-deploy/roles/vhost-vpp/files/vhostuser-obj.yml
new file mode 100644
index 0000000..1e9bc66
--- /dev/null
+++ b/src/arm/cni-deploy/roles/vhost-vpp/files/vhostuser-obj.yml
@@ -0,0 +1,28 @@
+---
+apiVersion: "kubernetes.com/v1"
+kind: Network
+metadata:
+ name: vhostuser-networkobj
+plugin: vhostuser
+args: '[
+ {
+ "type": "vhostuser",
+ "name": "vhostuser-network",
+ "if0name": "net1",
+ "vhost": {
+ "vhost_tool": "/opt/cni/bin/vpp-config.py"
+ },
+ "ipam": {
+ "type": "host-local",
+ "subnet": "10.56.217.0/24",
+ "rangeStart": "10.56.217.131",
+ "rangeEnd": "10.56.217.190",
+ "routes": [
+ {
+ "dst": "0.0.0.0/0"
+ }
+ ],
+ "gateway": "10.56.217.1"
+ }
+ }
+]'
diff --git a/src/arm/cni-deploy/roles/vhost-vpp/tasks/crd.yml b/src/arm/cni-deploy/roles/vhost-vpp/tasks/crd.yml
new file mode 100644
index 0000000..ad36c90
--- /dev/null
+++ b/src/arm/cni-deploy/roles/vhost-vpp/tasks/crd.yml
@@ -0,0 +1,13 @@
+---
+- name: Copy Vhostuser yaml
+ copy:
+ src: vhostuser-obj.yml
+ dest: /tmp/vhostuser-obj.yml
+
+- name: Copy VPP testpod template
+ template:
+ src: vpp-testpod.yml.j2
+ dest: /root/vpp-testpod.yml
+
+- name: Create Vhostuser network object
+ shell: kubectl apply -f /tmp/vhostuser-obj.yml
diff --git a/src/arm/cni-deploy/roles/vhost-vpp/tasks/main.yml b/src/arm/cni-deploy/roles/vhost-vpp/tasks/main.yml
new file mode 100644
index 0000000..df890ea
--- /dev/null
+++ b/src/arm/cni-deploy/roles/vhost-vpp/tasks/main.yml
@@ -0,0 +1,18 @@
+---
+- name: Build Vhostuser CNI
+ shell: >
+ docker run --rm --network host -v /opt/cni/bin:/opt/cni/bin golang:1.9
+ bash -c "git clone {{ vhostuser_repo }} vhostuser_cni && cd vhostuser_cni
+ && git checkout {{ vhostuser_commit }} && ./build
+ && cp bin/vhostuser /opt/cni/bin/
+ && cp tests/vpp-config-debug.py /opt/cni/bin/vpp-config.py"
+ args:
+ creates: /opt/cni/bin/vhostuser
+
+- name: Import CRD task
+ import_tasks: crd.yml
+ when: inventory_hostname == groups["kube-master"][0]
+
+- name: Import VPP task
+ import_tasks: vpp.yml
+ when: inventory_hostname in groups["kube-node"]
diff --git a/src/arm/cni-deploy/roles/vhost-vpp/tasks/vpp.yml b/src/arm/cni-deploy/roles/vhost-vpp/tasks/vpp.yml
new file mode 100644
index 0000000..7f5be05
--- /dev/null
+++ b/src/arm/cni-deploy/roles/vhost-vpp/tasks/vpp.yml
@@ -0,0 +1,47 @@
+---
+- name: Create dest directories
+ file:
+ path: "{{ item }}"
+ state: directory
+ with_items:
+ - /tmp/vpp1710/
+ - /var/lib/cni/vhostuser/
+ - /etc/vpp/
+
+- name: Copy VPP files
+ copy:
+ src: "{{ item.src }}"
+ dest: "{{ item.dest }}"
+ with_items:
+ - {src: "Dockerfile.vpp1710-dpdk1708", dest: "/tmp/vpp1710/Dockerfile"}
+ - {src: "0001-net-virtio-ethdev.patch", dest: "/tmp/vpp1710/0001-net-virtio-ethdev.patch"}
+ - {src: "setvpp.sh", dest: "/var/lib/cni/vhostuser/setvpp.sh"}
+ - {src: "startup.conf", dest: "/etc/vpp/startup.conf"}
+
+- name: Check if VPP image exists
+ shell: docker inspect --type=image vpp-1710:virtio-patched > /dev/null 2>&1
+ ignore_errors: "yes"
+ register: check_vpp
+
+- name: Building VPP container. Be patient...
+ shell: docker build -t vpp-1710:virtio-patched --network host .
+ args:
+ chdir: /tmp/vpp1710/
+ when: check_vpp is failed
+
+- name: Copy VPP binaries to host
+ shell: >
+ docker run --rm -v /root/vpp-1710/build-root:/root/vpp-host vpp-1710:virtio-patched
+ /bin/cp -a /root/vpp-1710/build-root/install-vpp_debug-native /root/vpp-host
+ && /bin/cp /root/vpp-1710/build-root/install-vpp_debug-native/vpp/bin/* /usr/bin
+ && /bin/rm -rf /usr/lib/vpp_plugins
+ && ln -s /root/vpp-1710/build-root/install-vpp_debug-native/vpp/lib64/vpp_plugins /usr/lib/vpp_plugins
+ && (groupadd vpp || true)
+
+- name: Copy libcrypto.so.1.0.0 for CentOS
+ shell: >
+ docker run --rm -v /usr/lib64:/root/lib64-centos vpp-1710:virtio-patched
+ /bin/cp /lib/aarch64-linux-gnu/libcrypto.so.1.0.0 /root/lib64-centos/
+ args:
+ creates: /usr/lib64/libcrypto.so.1.0.0
+ when: ansible_os_family == "RedHat"
diff --git a/src/arm/cni-deploy/roles/vhost-vpp/templates/vpp-testpod.yml.j2 b/src/arm/cni-deploy/roles/vhost-vpp/templates/vpp-testpod.yml.j2
new file mode 100644
index 0000000..2efd4e0
--- /dev/null
+++ b/src/arm/cni-deploy/roles/vhost-vpp/templates/vpp-testpod.yml.j2
@@ -0,0 +1,68 @@
+---
+apiVersion: v1
+kind: Pod
+metadata:
+ name: vpp-test1
+ annotations:
+ networks: '[
+ { "name": "flannel-networkobj" },
+ { "name": "vhostuser-networkobj" }
+ ]'
+spec:
+ containers:
+ - name: vpp-test1
+ image: vpp-1710:virtio-patched
+ imagePullPolicy: "Never"
+ stdin: true
+ terminationMessagePath: /dev/termination-log
+ tty: true
+ securityContext:
+ privileged: true
+ volumeMounts:
+ - mountPath: /vhost-user-net-plugin
+ name: vhost-user-net-plugin
+ - mountPath: /mnt/huge
+ name: huge
+ nodeSelector:
+ kubernetes.io/hostname: "{{ groups['kube-node'][0] }}"
+ volumes:
+ - name: vhost-user-net-plugin
+ hostPath:
+ path: /var/lib/cni/vhostuser
+ - name: huge
+ hostPath:
+ path: /mnt/huge
+---
+apiVersion: v1
+kind: Pod
+metadata:
+ name: vpp-test2
+ annotations:
+ networks: '[
+ { "name": "flannel-networkobj" },
+ { "name": "vhostuser-networkobj" }
+ ]'
+spec:
+ containers:
+ - name: vpp-test2
+ image: vpp-1710:virtio-patched
+ imagePullPolicy: "Never"
+ stdin: true
+ terminationMessagePath: /dev/termination-log
+ tty: true
+ securityContext:
+ privileged: true
+ volumeMounts:
+ - mountPath: /vhost-user-net-plugin
+ name: vhost-user-net-plugin
+ - mountPath: /mnt/huge
+ name: huge
+ nodeSelector:
+ kubernetes.io/hostname: "{{ groups['kube-node'][0] }}"
+ volumes:
+ - name: vhost-user-net-plugin
+ hostPath:
+ path: /var/lib/cni/vhostuser
+ - name: huge
+ hostPath:
+ path: /mnt/huge
diff --git a/src/arm/cni-deploy/vars/global b/src/arm/cni-deploy/vars/global
new file mode 100644
index 0000000..35d76b4
--- /dev/null
+++ b/src/arm/cni-deploy/vars/global
@@ -0,0 +1,20 @@
+multus_repo: https://github.com/Intel-Corp/multus-cni
+multus_commit: 61959e04
+
+sriov_repo: https://github.com/hustcat/sriov-cni
+sriov_commit: 8b7ed984
+
+vhostuser_repo: https://github.com/yibo-cai/vhost-user-net-plugin
+vhostuser_commit: e8dc9d8e
+
+macvlan_master: eth2
+macvlan_subnet: 192.168.166.0/24
+macvlan_range_start: 192.168.166.11
+macvlan_range_end: 192.168.166.30
+macvlan_gateway: 192.168.166.1
+
+sriov_master: eth2
+sriov_subnet: 192.168.166.0/24
+sriov_range_start: 192.168.166.31
+sriov_range_end: 192.168.166.50
+sriov_gateway: 192.168.166.1
diff --git a/src/arm/edge/gateway/MACCHIATObin/README.rst b/src/arm/edge/gateway/MACCHIATObin/README.rst
new file mode 100644
index 0000000..2082e5a
--- /dev/null
+++ b/src/arm/edge/gateway/MACCHIATObin/README.rst
@@ -0,0 +1,70 @@
+=================================================================
+Linux Kernel Build Guide on MACCHIATObin for Edge Infrastructure
+=================================================================
+
+The Marvell MACCHIATObin is a family of cost-effective and high-performance networking community boards targeting ARM64bit high end networking and storage applications.
+With a offering that include a fully open source software that include U-Boot, Linux, ODP and DPDK, the Marvell MACCHIATObin are optimal platforms for community developers and Independent Software Vendors (ISVs) to develop networking and storage applications.
+The default kernel configuration provided by Marvell does not meet the container's system requirements.
+We provide a kernel configuration file that has been verified on the MACCHIATObin board for developers to use, as well as a verified kernel image for the edge infrastructure deployment.
+
+
+Build From Source
+=================
+
+The procedures to build kernel from source is almost the same, but there are still some points you need to pay attention to on MACCHIATObin board.
+
+Download Kernel Source::
+
+ mkdir -p ~/kernel/4.14.22
+ cd ~/kernel/4.14.22
+ git clone https://github.com/MarvellEmbeddedProcessors/linux-marvell .
+ git checkout linux-4.14.22-armada-18.09
+
+Download MUSDK Package
+Marvell User-Space SDK(MUSDK) is a light-weight user-space I/O driver for Marvell's Embedded Networking SoC's. The MUSDK library provides a simple and direct access to Marvell's SoC blocks to networking applications and networking infrastrucutre::
+
+ mkdir -p ~/musdk
+ git clone https://github.com/MarvellEmbeddedProcessors/musdk-marvell .
+ git checkout musdk-armada-18.09
+
+Patch Kernel
+Linux Kernel needs to be patched and built in order to run MUSDK on the MACCHIATObin board::
+
+ cd ~/kernel/4.14.22/
+ git am ~/musdk/patches/linux-4.14/*.patch
+
+Build & Install
+First, replace the default kernel configuration file with defconfig-mcbin-edge::
+
+ cp defconfig-mcbin-edge ~/kernel/4.14.22/arch/arm64/configs/mvebu_v8_lsp_defconfig
+
+and then compile the kernel::
+
+ export ARCH=arm64
+ make mvebu_v8_lsp_defconfig
+ make -j$(($(nproc)+1))
+
+ make modules_install
+ cp ./arch/arm64/boot/Image /boot/
+ cp ./arch/arm64/boot/dts/marvell/armada-8040-mcbin.dtb /boot/
+
+Script is provided to facilitate the build of the kernel image, the developer needs to run with root privileges::
+
+ ./setup-macbin-kernel.sh
+
+Quick Deployment
+================
+
+The image file in the compressed package can also quickly build the edge system, you need to execute the following instructions::
+ git clone https://github.com/Jianlin-lv/Kernel-for-Edge-System.git
+ tar zxvf mcbin-double-shot-linux-4.14.22.tar.gz
+ cd mcbin-double-shot-linux-4.14.22
+ cp Image /boot/Image
+ cp armada-8040-mcbin.dtb /boot/armada-8040-mcbin.dtb
+ cp -rf ./lib/modules/4.14.22-armada-18.09.3-ge9aff6a-dirty/ /lib/modules/
+
+Other
+=====
+Marvell provides guidance on the build toolchain, file system and bootloader, which can be found at the link below:
+http://wiki.macchiatobin.net/tiki-index.php?page=Wiki+Home
+
diff --git a/src/arm/edge/gateway/MACCHIATObin/defconfig-mcbin-edge b/src/arm/edge/gateway/MACCHIATObin/defconfig-mcbin-edge
new file mode 100644
index 0000000..f1a26d6
--- /dev/null
+++ b/src/arm/edge/gateway/MACCHIATObin/defconfig-mcbin-edge
@@ -0,0 +1,590 @@
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_AUDIT=y
+CONFIG_NO_HZ_IDLE=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_IRQ_TIME_ACCOUNTING=y
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_BSD_PROCESS_ACCT_V3=y
+CONFIG_TASKSTATS=y
+CONFIG_TASK_DELAY_ACCT=y
+CONFIG_TASK_XACCT=y
+CONFIG_TASK_IO_ACCOUNTING=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_NUMA_BALANCING=y
+CONFIG_MEMCG=y
+CONFIG_MEMCG_SWAP=y
+CONFIG_BLK_CGROUP=y
+CONFIG_CFS_BANDWIDTH=y
+CONFIG_RT_GROUP_SCHED=y
+CONFIG_CGROUP_PIDS=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CGROUP_HUGETLB=y
+CONFIG_CPUSETS=y
+CONFIG_CGROUP_DEVICE=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_CGROUP_PERF=y
+CONFIG_USER_NS=y
+CONFIG_SCHED_AUTOGROUP=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_KALLSYMS_ALL=y
+# CONFIG_COMPAT_BRK is not set
+CONFIG_PROFILING=y
+CONFIG_JUMP_LABEL=y
+# CONFIG_VMAP_STACK is not set
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_BLK_DEV_THROTTLING=y
+# CONFIG_IOSCHED_DEADLINE is not set
+CONFIG_CFQ_GROUP_IOSCHED=y
+CONFIG_ARCH_MVEBU=y
+CONFIG_PCI=y
+CONFIG_HOTPLUG_PCI_PCIE=y
+CONFIG_PCI_IOV=y
+CONFIG_HOTPLUG_PCI=y
+CONFIG_HOTPLUG_PCI_ACPI=y
+CONFIG_PCI_HISI=y
+CONFIG_PCIE_ARMADA_8K=y
+CONFIG_PCIE_KIRIN=y
+CONFIG_PCI_AARDVARK=y
+CONFIG_PCI_HOST_GENERIC=y
+CONFIG_PCI_XGENE=y
+CONFIG_ARM64_VA_BITS_48=y
+CONFIG_SCHED_MC=y
+CONFIG_NUMA=y
+CONFIG_PREEMPT=y
+CONFIG_KSM=y
+CONFIG_TRANSPARENT_HUGEPAGE=y
+CONFIG_CMA=y
+CONFIG_SECCOMP=y
+CONFIG_KEXEC=y
+CONFIG_CRASH_DUMP=y
+CONFIG_XEN=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_COMPAT=y
+CONFIG_HIBERNATION=y
+CONFIG_WQ_POWER_EFFICIENT_DEFAULT=y
+CONFIG_ARM_CPUIDLE=y
+CONFIG_CPU_FREQ=y
+CONFIG_CPUFREQ_DT=y
+CONFIG_ARM_ARMADA_37XX_CPUFREQ=y
+CONFIG_ARM_BIG_LITTLE_CPUFREQ=y
+CONFIG_ARM_SCPI_CPUFREQ=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_XFRM_USER=y
+CONFIG_XFRM_SUB_POLICY=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_NET_IPIP=y
+# CONFIG_INET6_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET6_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET6_XFRM_MODE_BEET is not set
+# CONFIG_IPV6_SIT is not set
+CONFIG_NETFILTER=y
+CONFIG_BRIDGE_NETFILTER=y
+CONFIG_NETFILTER_NETLINK_LOG=y
+CONFIG_NF_CONNTRACK=y
+CONFIG_NF_CONNTRACK_MARK=y
+CONFIG_NF_CONNTRACK_ZONES=y
+CONFIG_NF_CONNTRACK_EVENTS=y
+CONFIG_NF_CONNTRACK_TIMEOUT=y
+CONFIG_NF_CONNTRACK_TIMESTAMP=y
+CONFIG_NF_CONNTRACK_AMANDA=y
+CONFIG_NF_CONNTRACK_FTP=y
+CONFIG_NF_CONNTRACK_H323=y
+CONFIG_NF_CONNTRACK_IRC=y
+CONFIG_NF_CONNTRACK_NETBIOS_NS=y
+CONFIG_NF_CONNTRACK_SNMP=y
+CONFIG_NF_CONNTRACK_TFTP=y
+CONFIG_NF_CT_NETLINK=y
+CONFIG_NF_CT_NETLINK_TIMEOUT=y
+CONFIG_NF_CT_NETLINK_HELPER=y
+CONFIG_NETFILTER_NETLINK_GLUE_CT=y
+CONFIG_NF_TABLES=y
+CONFIG_NF_TABLES_NETDEV=y
+CONFIG_NFT_RT=y
+CONFIG_NFT_NUMGEN=y
+CONFIG_NFT_CT=y
+CONFIG_NFT_SET_HASH=y
+CONFIG_NFT_COUNTER=y
+CONFIG_NFT_LOG=y
+CONFIG_NFT_LIMIT=y
+CONFIG_NFT_MASQ=y
+CONFIG_NFT_REDIR=y
+CONFIG_NFT_NAT=y
+CONFIG_NFT_OBJREF=y
+CONFIG_NFT_QUEUE=y
+CONFIG_NFT_QUOTA=y
+CONFIG_NFT_REJECT=y
+CONFIG_NFT_HASH=y
+CONFIG_NETFILTER_XT_SET=y
+CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y
+CONFIG_NETFILTER_XT_TARGET_LOG=y
+CONFIG_NETFILTER_XT_TARGET_MARK=y
+CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y
+CONFIG_NETFILTER_XT_TARGET_NOTRACK=y
+CONFIG_NETFILTER_XT_TARGET_TCPMSS=y
+CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=y
+CONFIG_NETFILTER_XT_MATCH_COMMENT=y
+CONFIG_NETFILTER_XT_MATCH_CONNBYTES=y
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y
+CONFIG_NETFILTER_XT_MATCH_DCCP=y
+CONFIG_NETFILTER_XT_MATCH_DSCP=y
+CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y
+CONFIG_NETFILTER_XT_MATCH_HELPER=y
+CONFIG_NETFILTER_XT_MATCH_IPRANGE=y
+CONFIG_NETFILTER_XT_MATCH_IPVS=y
+CONFIG_NETFILTER_XT_MATCH_LENGTH=y
+CONFIG_NETFILTER_XT_MATCH_LIMIT=y
+CONFIG_NETFILTER_XT_MATCH_MAC=y
+CONFIG_NETFILTER_XT_MATCH_MARK=y
+CONFIG_NETFILTER_XT_MATCH_MULTIPORT=y
+CONFIG_NETFILTER_XT_MATCH_NFACCT=y
+CONFIG_NETFILTER_XT_MATCH_OWNER=y
+CONFIG_NETFILTER_XT_MATCH_PHYSDEV=y
+CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y
+CONFIG_NETFILTER_XT_MATCH_REALM=y
+CONFIG_NETFILTER_XT_MATCH_RECENT=y
+CONFIG_NETFILTER_XT_MATCH_SCTP=y
+CONFIG_NETFILTER_XT_MATCH_STATE=y
+CONFIG_NETFILTER_XT_MATCH_STATISTIC=y
+CONFIG_NETFILTER_XT_MATCH_STRING=y
+CONFIG_NETFILTER_XT_MATCH_TCPMSS=y
+CONFIG_IP_SET=y
+CONFIG_IP_SET_BITMAP_IP=y
+CONFIG_IP_SET_BITMAP_IPMAC=y
+CONFIG_IP_SET_BITMAP_PORT=y
+CONFIG_IP_SET_HASH_IP=y
+CONFIG_IP_SET_HASH_IPMARK=y
+CONFIG_IP_SET_HASH_IPPORT=y
+CONFIG_IP_SET_HASH_IPPORTIP=y
+CONFIG_IP_SET_HASH_IPPORTNET=y
+CONFIG_IP_SET_HASH_IPMAC=y
+CONFIG_IP_SET_HASH_MAC=y
+CONFIG_IP_SET_HASH_NETPORTNET=y
+CONFIG_IP_SET_HASH_NET=y
+CONFIG_IP_SET_HASH_NETNET=y
+CONFIG_IP_SET_HASH_NETPORT=y
+CONFIG_IP_SET_HASH_NETIFACE=y
+CONFIG_IP_SET_LIST_SET=y
+CONFIG_IP_VS=y
+CONFIG_IP_VS_PROTO_TCP=y
+CONFIG_IP_VS_PROTO_UDP=y
+CONFIG_IP_VS_RR=y
+CONFIG_IP_VS_WRR=y
+CONFIG_IP_VS_LC=y
+CONFIG_IP_VS_WLC=y
+CONFIG_IP_VS_FO=y
+CONFIG_IP_VS_OVF=y
+CONFIG_IP_VS_LBLC=y
+CONFIG_IP_VS_LBLCR=y
+CONFIG_IP_VS_DH=y
+CONFIG_IP_VS_SH=y
+CONFIG_IP_VS_SED=y
+CONFIG_IP_VS_NQ=y
+CONFIG_IP_VS_FTP=y
+CONFIG_NF_CONNTRACK_IPV4=y
+CONFIG_NF_SOCKET_IPV4=y
+CONFIG_NF_TABLES_IPV4=y
+CONFIG_NFT_CHAIN_ROUTE_IPV4=y
+CONFIG_NFT_DUP_IPV4=y
+CONFIG_NFT_FIB_IPV4=y
+CONFIG_NF_TABLES_ARP=y
+CONFIG_NFT_CHAIN_NAT_IPV4=y
+CONFIG_NFT_MASQ_IPV4=y
+CONFIG_NFT_REDIR_IPV4=y
+CONFIG_IP_NF_IPTABLES=y
+CONFIG_IP_NF_MATCH_ECN=y
+CONFIG_IP_NF_MATCH_RPFILTER=y
+CONFIG_IP_NF_MATCH_TTL=y
+CONFIG_IP_NF_FILTER=y
+CONFIG_IP_NF_TARGET_REJECT=y
+CONFIG_IP_NF_TARGET_SYNPROXY=y
+CONFIG_IP_NF_NAT=y
+CONFIG_IP_NF_TARGET_MASQUERADE=y
+CONFIG_IP_NF_TARGET_NETMAP=y
+CONFIG_IP_NF_TARGET_REDIRECT=y
+CONFIG_IP_NF_MANGLE=y
+CONFIG_IP_NF_TARGET_ECN=y
+CONFIG_IP_NF_TARGET_TTL=y
+CONFIG_IP_NF_RAW=y
+CONFIG_IP_NF_ARPTABLES=y
+CONFIG_IP_NF_ARPFILTER=y
+CONFIG_IP_NF_ARP_MANGLE=y
+CONFIG_IP6_NF_IPTABLES=y
+CONFIG_BRIDGE=y
+CONFIG_BRIDGE_VLAN_FILTERING=y
+CONFIG_NET_DSA=y
+CONFIG_VLAN_8021Q=y
+CONFIG_VLAN_8021Q_GVRP=y
+CONFIG_VLAN_8021Q_MVRP=y
+CONFIG_NET_SCHED=y
+CONFIG_NET_CLS_CGROUP=y
+CONFIG_NETLINK_DIAG=y
+CONFIG_MPLS=y
+CONFIG_NET_MPLS_GSO=y
+CONFIG_NET_L3_MASTER_DEV=y
+CONFIG_CGROUP_NET_PRIO=y
+CONFIG_BPF_JIT=y
+CONFIG_RFKILL=y
+CONFIG_NET_9P=y
+CONFIG_NET_9P_VIRTIO=y
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
+CONFIG_DMA_CMA=y
+CONFIG_CMA_SIZE_MBYTES=256
+CONFIG_BRCMSTB_GISB_ARB=y
+CONFIG_VEXPRESS_CONFIG=y
+CONFIG_MTD=y
+CONFIG_MTD_CMDLINE_PARTS=y
+CONFIG_MTD_BLOCK=y
+CONFIG_MTD_M25P80=y
+CONFIG_MTD_NAND=y
+CONFIG_MTD_NAND_DENALI_DT=y
+CONFIG_MTD_NAND_MARVELL=y
+CONFIG_MTD_SPI_NOR=y
+CONFIG_MTD_UBI=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_VIRTIO_BLK=y
+CONFIG_SRAM=y
+# CONFIG_SCSI_PROC_FS is not set
+CONFIG_BLK_DEV_SD=y
+CONFIG_SCSI_FC_ATTRS=y
+CONFIG_SCSI_SAS_ATA=y
+CONFIG_SCSI_HISI_SAS=y
+CONFIG_SCSI_HISI_SAS_PCI=y
+CONFIG_ATA=y
+CONFIG_SATA_AHCI=y
+CONFIG_SATA_AHCI_PLATFORM=y
+CONFIG_AHCI_CEVA=y
+CONFIG_AHCI_MVEBU=y
+CONFIG_AHCI_XGENE=y
+CONFIG_AHCI_QORIQ=y
+CONFIG_SATA_SIL24=y
+CONFIG_PATA_PLATFORM=y
+CONFIG_PATA_OF_PLATFORM=y
+CONFIG_MD=y
+CONFIG_BLK_DEV_MD=y
+CONFIG_MD_LINEAR=y
+CONFIG_MD_RAID0=y
+CONFIG_MD_RAID1=y
+CONFIG_MD_RAID456=y
+CONFIG_BLK_DEV_DM=y
+CONFIG_DM_CRYPT=y
+CONFIG_DM_THIN_PROVISIONING=y
+CONFIG_NETDEVICES=y
+CONFIG_BONDING=y
+CONFIG_DUMMY=y
+CONFIG_MACVLAN=y
+CONFIG_MACVTAP=y
+CONFIG_IPVLAN=y
+CONFIG_VXLAN=y
+CONFIG_TUN=y
+CONFIG_VETH=y
+CONFIG_VIRTIO_NET=y
+CONFIG_NET_DSA_MV88E6XXX=y
+CONFIG_AMD_XGBE=y
+CONFIG_MACB=y
+CONFIG_HNS_DSAF=y
+CONFIG_HNS_ENET=y
+CONFIG_E1000E=y
+CONFIG_IGB=y
+CONFIG_IGBVF=y
+CONFIG_IXGB=y
+CONFIG_IXGBE=y
+CONFIG_IXGBEVF=y
+CONFIG_MVNETA=y
+CONFIG_MVPP2=y
+CONFIG_SKY2=y
+CONFIG_SMC91X=y
+CONFIG_SMSC911X=y
+CONFIG_MDIO_BITBANG=y
+CONFIG_MDIO_BUS_MUX_MMIOREG=y
+CONFIG_MARVELL_PHY=y
+CONFIG_MARVELL_10G_PHY=y
+CONFIG_MICREL_PHY=y
+CONFIG_ROCKCHIP_PHY=y
+CONFIG_USB_USBNET=y
+# CONFIG_USB_NET_CDCETHER is not set
+# CONFIG_USB_NET_CDC_NCM is not set
+CONFIG_USB_NET_DM9601=y
+CONFIG_USB_NET_SR9800=y
+CONFIG_USB_NET_SMSC75XX=y
+CONFIG_USB_NET_SMSC95XX=y
+CONFIG_USB_NET_PLUSB=y
+CONFIG_USB_NET_MCS7830=y
+# CONFIG_USB_NET_CDC_SUBSET is not set
+# CONFIG_USB_NET_ZAURUS is not set
+CONFIG_INPUT_EVDEV=y
+CONFIG_KEYBOARD_GPIO=y
+CONFIG_KEYBOARD_CROS_EC=y
+CONFIG_INPUT_MISC=y
+# CONFIG_SERIO_SERPORT is not set
+CONFIG_SERIO_AMBAKMI=y
+CONFIG_LEGACY_PTY_COUNT=16
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_EXTENDED=y
+CONFIG_SERIAL_8250_SHARE_IRQ=y
+CONFIG_SERIAL_8250_DW=y
+CONFIG_SERIAL_OF_PLATFORM=y
+CONFIG_SERIAL_AMBA_PL011=y
+CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
+CONFIG_SERIAL_XILINX_PS_UART=y
+CONFIG_SERIAL_XILINX_PS_UART_CONSOLE=y
+CONFIG_SERIAL_MVEBU_UART=y
+CONFIG_SERIAL_DEV_BUS=y
+CONFIG_SERIAL_DEV_CTRL_TTYPORT=y
+CONFIG_VIRTIO_CONSOLE=y
+CONFIG_HW_RANDOM=y
+# CONFIG_HW_RANDOM_CAVIUM is not set
+CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_MUX=y
+CONFIG_I2C_MUX_PCA954x=y
+CONFIG_I2C_DESIGNWARE_PLATFORM=y
+CONFIG_I2C_MV64XXX=y
+CONFIG_I2C_PXA=y
+CONFIG_I2C_RK3X=y
+CONFIG_I2C_CROS_EC_TUNNEL=y
+CONFIG_I2C_SLAVE=y
+CONFIG_SPI=y
+CONFIG_SPI_ARMADA_3700=y
+CONFIG_SPI_ORION=y
+CONFIG_SPI_PL022=y
+CONFIG_SPI_ROCKCHIP=y
+CONFIG_SPMI=y
+CONFIG_PINCTRL_SINGLE=y
+CONFIG_PINCTRL_MAX77620=y
+CONFIG_GPIO_DWAPB=y
+CONFIG_GPIO_PL061=y
+CONFIG_GPIO_XGENE=y
+CONFIG_GPIO_PCA953X=y
+CONFIG_GPIO_PCA953X_IRQ=y
+CONFIG_GPIO_MAX77620=y
+CONFIG_POWER_RESET_BRCMSTB=y
+CONFIG_POWER_RESET_VEXPRESS=y
+CONFIG_POWER_RESET_XGENE=y
+CONFIG_POWER_RESET_SYSCON=y
+CONFIG_SYSCON_REBOOT_MODE=y
+CONFIG_BATTERY_BQ27XXX=y
+CONFIG_SENSORS_ARM_SCPI=y
+CONFIG_THERMAL_GOV_POWER_ALLOCATOR=y
+CONFIG_CPU_THERMAL=y
+CONFIG_THERMAL_EMULATION=y
+CONFIG_WATCHDOG=y
+CONFIG_WATCHDOG_CORE=y
+CONFIG_MFD_CROS_EC=y
+CONFIG_MFD_CROS_EC_I2C=y
+CONFIG_MFD_CROS_EC_SPI=y
+CONFIG_MFD_HI6421_PMIC=y
+CONFIG_MFD_MAX77620=y
+CONFIG_MFD_RK808=y
+CONFIG_MFD_SEC_CORE=y
+CONFIG_REGULATOR=y
+CONFIG_REGULATOR_FIXED_VOLTAGE=y
+CONFIG_REGULATOR_FAN53555=y
+CONFIG_REGULATOR_GPIO=y
+CONFIG_REGULATOR_HI6421V530=y
+CONFIG_REGULATOR_MAX77620=y
+CONFIG_REGULATOR_PWM=y
+CONFIG_REGULATOR_QCOM_SPMI=y
+CONFIG_REGULATOR_RK808=y
+CONFIG_REGULATOR_S2MPS11=y
+# CONFIG_RC_CORE is not set
+CONFIG_FB=y
+CONFIG_FB_ARMCLCD=y
+# CONFIG_LCD_CLASS_DEVICE is not set
+# CONFIG_BACKLIGHT_GENERIC is not set
+CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y
+CONFIG_LOGO=y
+# CONFIG_LOGO_LINUX_MONO is not set
+# CONFIG_LOGO_LINUX_VGA16 is not set
+CONFIG_SOUND=y
+CONFIG_SND=y
+CONFIG_SND_SOC=y
+CONFIG_SND_SIMPLE_CARD=y
+CONFIG_USB=y
+CONFIG_USB_OTG=y
+CONFIG_USB_XHCI_HCD=y
+CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_EHCI_HCD_PLATFORM=y
+CONFIG_USB_OHCI_HCD=y
+CONFIG_USB_OHCI_HCD_PLATFORM=y
+CONFIG_USB_STORAGE=y
+CONFIG_USB_DWC3=y
+CONFIG_USB_DWC2=y
+CONFIG_USB_CHIPIDEA=y
+CONFIG_USB_CHIPIDEA_UDC=y
+CONFIG_USB_CHIPIDEA_HOST=y
+CONFIG_USB_ISP1760=y
+CONFIG_USB_HSIC_USB3503=y
+CONFIG_NOP_USB_XCEIV=y
+CONFIG_USB_ULPI=y
+CONFIG_USB_GADGET=y
+CONFIG_USB_MV_UDC=y
+CONFIG_USB_MV_U3D=y
+CONFIG_USB_SNP_UDC_PLAT=y
+CONFIG_USB_BDC_UDC=y
+CONFIG_MMC=y
+CONFIG_MMC_BLOCK_MINORS=32
+CONFIG_MMC_ARMMMCI=y
+CONFIG_MMC_SDHCI=y
+CONFIG_MMC_SDHCI_ACPI=y
+CONFIG_MMC_SDHCI_PLTFM=y
+CONFIG_MMC_SDHCI_OF_ARASAN=y
+CONFIG_MMC_SDHCI_CADENCE=y
+CONFIG_MMC_SPI=y
+CONFIG_MMC_DW=y
+CONFIG_MMC_DW_EXYNOS=y
+CONFIG_MMC_DW_K3=y
+CONFIG_MMC_SDHCI_XENON=y
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_GPIO=y
+CONFIG_LEDS_PWM=y
+CONFIG_LEDS_SYSCON=y
+CONFIG_LEDS_TRIGGERS=y
+CONFIG_LEDS_TRIGGER_HEARTBEAT=y
+CONFIG_LEDS_TRIGGER_CPU=y
+CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
+CONFIG_EDAC=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_MAX77686=y
+CONFIG_RTC_DRV_S5M=y
+CONFIG_RTC_DRV_DS3232=y
+CONFIG_RTC_DRV_EFI=y
+CONFIG_RTC_DRV_PL031=y
+CONFIG_RTC_DRV_ARMADA38X=y
+CONFIG_DMADEVICES=y
+CONFIG_MV_XOR=y
+CONFIG_MV_XOR_V2=y
+CONFIG_PL330_DMA=y
+CONFIG_QCOM_HIDMA_MGMT=y
+CONFIG_QCOM_HIDMA=y
+CONFIG_ASYNC_TX_DMA=y
+CONFIG_UIO_PDRV_GENIRQ=m
+CONFIG_UIO_PCI_GENERIC=m
+CONFIG_VFIO=y
+CONFIG_VFIO_PCI=y
+CONFIG_VFIO_PLATFORM=y
+CONFIG_VFIO_PLATFORM_XHCI_RESET=y
+CONFIG_VIRT_DRIVERS=y
+CONFIG_VIRTIO_PCI=y
+CONFIG_VIRTIO_BALLOON=y
+CONFIG_VIRTIO_MMIO=y
+CONFIG_XEN_GNTDEV=y
+CONFIG_XEN_GRANT_DEV_ALLOC=y
+CONFIG_STAGING=y
+CONFIG_COMMON_CLK_VERSATILE=y
+CONFIG_CLK_SP810=y
+CONFIG_CLK_VEXPRESS_OSC=y
+CONFIG_COMMON_CLK_RK808=y
+CONFIG_COMMON_CLK_SCPI=y
+CONFIG_COMMON_CLK_CS2000_CP=y
+CONFIG_COMMON_CLK_S2MPS11=y
+CONFIG_CLK_QORIQ=y
+CONFIG_COMMON_CLK_PWM=y
+CONFIG_HWSPINLOCK=y
+CONFIG_ARM_TIMER_SP804=y
+CONFIG_MAILBOX=y
+CONFIG_ARM_MHU=y
+CONFIG_PLATFORM_MHU=y
+CONFIG_PCC=y
+CONFIG_BCM_FLEXRM_MBOX=y
+CONFIG_ARM_SMMU=y
+CONFIG_ARM_SMMU_V3=y
+CONFIG_EXTCON_USB_GPIO=y
+CONFIG_IIO=y
+CONFIG_PWM=y
+CONFIG_PHY_XGENE=y
+CONFIG_PHY_MVEBU_CP110_COMPHY=y
+CONFIG_PHY_SAMSUNG_USB2=y
+CONFIG_TEE=y
+CONFIG_OPTEE=y
+CONFIG_ARM_SCPI_PROTOCOL=y
+CONFIG_EFI_CAPSULE_LOADER=y
+CONFIG_ACPI=y
+CONFIG_ACPI_APEI=y
+CONFIG_ACPI_APEI_GHES=y
+CONFIG_ACPI_APEI_PCIEAER=y
+CONFIG_EXT2_FS=y
+CONFIG_EXT3_FS=y
+CONFIG_EXT3_FS_POSIX_ACL=y
+CONFIG_EXT3_FS_SECURITY=y
+CONFIG_BTRFS_FS=y
+CONFIG_BTRFS_FS_POSIX_ACL=y
+CONFIG_FANOTIFY=y
+CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y
+CONFIG_QUOTA=y
+CONFIG_QUOTA_NETLINK_INTERFACE=y
+CONFIG_AUTOFS4_FS=y
+CONFIG_OVERLAY_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_TMPFS=y
+CONFIG_HUGETLBFS=y
+CONFIG_CONFIGFS_FS=y
+CONFIG_EFIVAR_FS=y
+CONFIG_UBIFS_FS=y
+CONFIG_UBIFS_FS_ADVANCED_COMPR=y
+CONFIG_SQUASHFS=y
+CONFIG_SQUASHFS_LZO=y
+CONFIG_NFS_FS=y
+CONFIG_NFS_V4=y
+CONFIG_NFS_V4_1=y
+CONFIG_NFS_V4_2=y
+CONFIG_ROOT_NFS=y
+CONFIG_NFSD=y
+CONFIG_NFSD_V3=y
+CONFIG_9P_FS=y
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_VIRTUALIZATION=y
+CONFIG_KVM=y
+CONFIG_PRINTK_TIME=y
+CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_FS=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_KERNEL=y
+# CONFIG_SCHED_DEBUG is not set
+# CONFIG_DEBUG_PREEMPT is not set
+# CONFIG_FTRACE is not set
+CONFIG_MEMTEST=y
+CONFIG_CORESIGHT=y
+CONFIG_CORESIGHT_LINK_AND_SINK_TMC=y
+CONFIG_CORESIGHT_SOURCE_ETM4X=y
+CONFIG_CORESIGHT_SOURCE_AXIM=y
+CONFIG_SECURITY=y
+CONFIG_CRYPTO_AUTHENC=y
+CONFIG_CRYPTO_TEST=m
+CONFIG_CRYPTO_CCM=y
+CONFIG_CRYPTO_GCM=y
+CONFIG_CRYPTO_ECHAINIV=y
+CONFIG_CRYPTO_CTS=y
+CONFIG_CRYPTO_ECB=y
+CONFIG_CRYPTO_CMAC=y
+CONFIG_CRYPTO_MD5=y
+CONFIG_CRYPTO_SHA512=y
+CONFIG_CRYPTO_SHA3=y
+CONFIG_CRYPTO_ARC4=y
+CONFIG_CRYPTO_DES=y
+CONFIG_CRYPTO_ANSI_CPRNG=y
+CONFIG_CRYPTO_DEV_SAFEXCEL=m
+CONFIG_ARM64_CRYPTO=y
+CONFIG_CRYPTO_SHA512_ARM64=y
+CONFIG_CRYPTO_SHA1_ARM64_CE=y
+CONFIG_CRYPTO_SHA2_ARM64_CE=y
+CONFIG_CRYPTO_GHASH_ARM64_CE=y
+CONFIG_CRYPTO_CRCT10DIF_ARM64_CE=y
+CONFIG_CRYPTO_CRC32_ARM64_CE=y
+CONFIG_CRYPTO_AES_ARM64_CE_CCM=y
+CONFIG_CRYPTO_AES_ARM64_CE_BLK=y
+CONFIG_CRYPTO_CHACHA20_NEON=y
+CONFIG_CRYPTO_AES_ARM64_BS=y
diff --git a/src/arm/edge/gateway/MACCHIATObin/setup-macbin-kernel.sh b/src/arm/edge/gateway/MACCHIATObin/setup-macbin-kernel.sh
new file mode 100644
index 0000000..c38ca9b
--- /dev/null
+++ b/src/arm/edge/gateway/MACCHIATObin/setup-macbin-kernel.sh
@@ -0,0 +1,74 @@
+#!/bin/bash
+##################################################################
+#Set up linux kernel on MACCHIATObin for Edge Infrastructure #
+#This script not support cross-compilation #
+##################################################################
+
+# Hardcoded Paths
+export ROOTDIR=${PWD}
+
+# Hardcoded Build_param
+export ARCH=arm64
+
+# Parameter Overridable Paths
+export KDIR=${ROOTDIR}/kernel/4.14.22
+export MUSDK_PATH=${ROOTDIR}/musdk
+export DECONFIG_MCBIN=${ROOTDIR}/defconfig-mcbin-edge
+
+echo -e "Please run shell script as root!"
+
+# Check file defconfig-mcbin-edge
+if [ ! -f "$DECONFIG_MCBIN" ]; then
+ echo -e "\tPlease copy defconfig-mcbin-edge to currently directory!"
+ exit 1
+fi
+
+
+# Download Kernel Source
+echo -e "Download marvell linux 18.09..."
+mkdir -p $KDIR
+cd $KDIR
+#touch kernle-test
+git clone https://github.com/MarvellEmbeddedProcessors/linux-marvell .
+git checkout linux-4.14.22-armada-18.09
+cd $ROOTDIR
+
+# Download MUSDK Package
+echo -e "Download MUSDK package 18.09..."
+mkdir -p $MUSDK_PATH
+cd $MUSDK_PATH
+#touch musdk-test
+git clone https://github.com/MarvellEmbeddedProcessors/musdk-marvell .
+git checkout musdk-armada-18.09
+cd $ROOTDIR
+
+#Patch kernel
+cd $KDIR
+echo -e "Patch kernel..."
+#touch patch_kernel
+git am $MUSDK_PATH/patches/linux-4.14/*.patch
+
+# Check file defconfig-mcbin-edge
+if [ ! -f "$DECONFIG_MCBIN" ]; then
+ echo -e "\tPlease copy defconfig-mcbin-edge to $ROOTDIR!"
+ exit 1
+fi
+
+
+# Build Kernel
+echo -e "Backup mvebu_v8_lsp_defconfig"
+mv $KDIR/arch/arm64/configs/mvebu_v8_lsp_defconfig $KDIR/arch/arm64/configs/mvebu_v8_lsp_defconfig.bac
+echo -e "Replease kernel config by defconfig-mcbin-edge"
+cp $DECONFIG_MCBIN $KDIR/arch/arm64/configs/mvebu_v8_lsp_defconfig
+echo -e "Build Kernel..."
+make mvebu_v8_lsp_defconfig
+make -j$(($(nproc)+1))
+
+#Install Kernel
+echo -e "Install Kernel..."
+make modules_install
+cp ./arch/arm64/boot/Image /boot/
+cp ./arch/arm64/boot/dts/marvell/armada-8040-mcbin.dtb /boot/
+
+echo -e "Success! Please reboot!"
+
diff --git a/src/arm/kubernetes_vpp_vhostuser/deploy-cni.sh b/src/arm/kubernetes_vpp_vhostuser/deploy-cni.sh
new file mode 100755
index 0000000..941b917
--- /dev/null
+++ b/src/arm/kubernetes_vpp_vhostuser/deploy-cni.sh
@@ -0,0 +1,16 @@
+#!/bin/bash -e
+
+cd ../cni-deploy
+
+DEPLOY_SCENARIO="k8-vpp-nofeature-noha"
+
+export ANSIBLE_HOST_KEY_CHECKING=False
+
+virtualenv .venv
+source .venv/bin/activate
+pip install ansible==2.6.1
+
+#deploy flannel, multus
+ansible-playbook -i inventory/inventory.cfg deploy.yml --tags flannel,multus
+#deploy vhost-vpp
+ansible-playbook -i inventory/inventory.cfg deploy.yml --tags vhost-vpp
diff --git a/src/arm/kubernetes_vpp_vhostuser/k8s-build.sh b/src/arm/kubernetes_vpp_vhostuser/k8s-build.sh
new file mode 100755
index 0000000..fa7aa53
--- /dev/null
+++ b/src/arm/kubernetes_vpp_vhostuser/k8s-build.sh
@@ -0,0 +1,25 @@
+#!/bin/bash
+set -e
+
+
+sudo apt-get install -y docker.io libvirt-bin virt-manager qemu qemu-efi
+
+WORKSPACE=`pwd`
+if [ ! -d "$WORKSPACE/compass4nfv" ]; then
+ git clone https://gerrit.opnfv.org/gerrit/compass4nfv
+fi
+
+#rm -rf compass4nfv
+#git clone https://gerrit.opnfv.org/gerrit/compass4nfv
+
+cd compass4nfv
+
+COMPASS_WORK_DIR=$WORKSPACE/../compass-work
+mkdir -p $COMPASS_WORK_DIR
+ln -s $COMPASS_WORK_DIR work
+
+sudo docker rm -f `docker ps | grep compass | cut -f1 -d' '` || true
+
+curl -s http://people.linaro.org/~yibo.cai/compass/compass4nfv-arm64-fixup.sh | bash || true
+
+./build.sh
diff --git a/src/arm/kubernetes_vpp_vhostuser/k8s-deploy.sh b/src/arm/kubernetes_vpp_vhostuser/k8s-deploy.sh
new file mode 100755
index 0000000..21082b3
--- /dev/null
+++ b/src/arm/kubernetes_vpp_vhostuser/k8s-deploy.sh
@@ -0,0 +1,17 @@
+#!/bin/bash
+set -e
+
+cd compass4nfv
+
+
+export ADAPTER_OS_PATTERN='(?i)CentOS-7.*arm.*'
+export OS_VERSION="centos7"
+export KUBERNETES_VERSION="v1.9.1"
+
+
+#For virtual environment:
+export DHA="deploy/conf/vm_environment/k8-nosdn-nofeature-noha.yml"
+export NETWORK="deploy/conf/vm_environment/network.yml"
+export VIRT_NUMBER=2 VIRT_CPUS=8 VIRT_MEM=8192 VIRT_DISK=50G
+
+./deploy.sh
diff --git a/src/arm/kubernetes_vpp_vhostuser/setup.sh b/src/arm/kubernetes_vpp_vhostuser/setup.sh
new file mode 100755
index 0000000..ae30803
--- /dev/null
+++ b/src/arm/kubernetes_vpp_vhostuser/setup.sh
@@ -0,0 +1,11 @@
+#!/bin/bash
+echo "Now build:"
+./k8s-build.sh
+
+sleep 2
+echo "Now deploy VMs:"
+./k8s-deploy.sh
+
+sleep 2
+echo "Now deploy vpp_vhostuser:"
+./deploy-cni.sh
diff --git a/src/arm/openwrt_demo/1_buildimage/Dockerfile b/src/arm/openwrt_demo/1_buildimage/Dockerfile
new file mode 100644
index 0000000..5b6fc22
--- /dev/null
+++ b/src/arm/openwrt_demo/1_buildimage/Dockerfile
@@ -0,0 +1,22 @@
+FROM openwrt/build/base
+
+ADD resources /root/resources
+
+RUN mkdir -p /root/certs/keys \
+ && mv /root/resources/keys/* /root/certs/keys/ \
+ && mv /root/certs/keys/vpn-server-cert.pem /etc/ipsec.d/certs/ \
+ && mv /root/certs/keys/vpn-server-key.pem /etc/ipsec.d/private/ \
+ && mv /root/resources/strongswan/* /etc/strongswan.d/ \
+ && mv /root/resources/ipsec/* /etc/ \
+ && mv /root/resources/config/firewall /etc/config/ \
+ && mv /root/resources/config/network /etc/config/ \
+ && mv /root/resources/config/uhttpd /etc/config/ \
+ && mv /root/resources/config/firewall.user /etc/ \
+ && mv /root/resources/bin/* /etc/init.d/ \
+ && ln -s /etc/init.d/getips /etc/rc.d/S20getips \
+ && ln -s /etc/init.d/getips /etc/rc.d/K90getips \
+ && ln -s /etc/init.d/setroutes /etc/rc.d/S99setroutes \
+ && ln -s /etc/init.d/setroutes /etc/rc.d/K99ysetroutes \
+ && rm -rf /root/resources/
+
+USER root
diff --git a/src/arm/openwrt_demo/1_buildimage/resources/bin/getips b/src/arm/openwrt_demo/1_buildimage/resources/bin/getips
new file mode 100644
index 0000000..3c68e95
--- /dev/null
+++ b/src/arm/openwrt_demo/1_buildimage/resources/bin/getips
@@ -0,0 +1,24 @@
+#!/bin/sh
+
+nwfn='/etc/config/network'
+gwPost=".1"
+nwPost=".0"
+
+ethname='eth0'
+ipeth=$(ifconfig $ethname |grep "inet addr" | cut -d: -f2 | awk '{print $1}')
+dirtyIp=$(grep ipaddr $nwfn | grep -v "127.0.0.1" | awk '{print $3}' | sed "s/'//g" | awk 'NR==1')
+dirtyGw=$(grep gateway $nwfn | grep -v "127.0.0.1" | awk '{print $3}' | sed "s/'//g" | awk 'NR==1')
+expNetPrefix=$(echo $ipeth | cut -d. -f 1,2,3)
+expGw=$expNetPrefix$gwPost
+sed -i "s/$dirtyIp/$ipeth/g" $nwfn
+sed -i "s/$dirtyGw/$expGw/g" $nwfn
+
+
+ethname='net0'
+ipeth=$(ifconfig $ethname |grep "inet addr" | cut -d: -f2 | awk '{print $1}')
+dirtyIp=$(grep ipaddr $nwfn | grep -v "127.0.0.1" | awk '{print $3}' | sed "s/'//g" | awk 'NR==2')
+dirtyGw=$(grep gateway $nwfn | grep -v "127.0.0.1" | awk '{print $3}' | sed "s/'//g" | awk 'NR==2')
+expNetPrefix=$(echo $ipeth | cut -d. -f 1,2,3)
+expGw=$expNetPrefix$gwPost
+sed -i "s/$dirtyIp/$ipeth/g" $nwfn
+sed -i "s/$dirtyGw/$expGw/g" $nwfn
diff --git a/src/arm/openwrt_demo/1_buildimage/resources/bin/setroutes b/src/arm/openwrt_demo/1_buildimage/resources/bin/setroutes
new file mode 100644
index 0000000..540a235
--- /dev/null
+++ b/src/arm/openwrt_demo/1_buildimage/resources/bin/setroutes
@@ -0,0 +1,26 @@
+#!/bin/sh
+
+nwfn='/etc/config/network'
+gwPost=".1"
+nwPost=".0"
+maskPost="/16"
+defaultgw="0.0.0.0/0"
+
+ethname='eth0'
+ipeth=$(ifconfig $ethname |grep "inet addr" | cut -d: -f2 | awk '{print $1}')
+expGwPrefix=$(echo $ipeth | cut -d. -f 1,2,3)
+expGw=$expGwPrefix$gwPost
+expNetPrefix=$(echo $ipeth | cut -d. -f 1,2)
+expNet=$expNetPrefix$nwPost$nwPost$maskPost
+echo "$expNet, $expGw, $ethname"
+ip route add $expNet via $expGw dev $ethname
+
+
+ethname='net0'
+ipeth=$(ifconfig $ethname |grep "inet addr" | cut -d: -f2 | awk '{print $1}')
+expGwPrefix=$(echo $ipeth | cut -d. -f 1,2,3)
+expGw=$expGwPrefix$gwPost
+expNetPrefix=$(echo $ipeth | cut -d. -f 1,2)
+expNet=$expNetPrefix$nwPost$nwPost$maskPost
+ip route add $expNet via $expGw dev $ethname
+ip route add $defaultgw via $expGw
diff --git a/src/arm/openwrt_demo/1_buildimage/resources/config/firewall b/src/arm/openwrt_demo/1_buildimage/resources/config/firewall
new file mode 100644
index 0000000..faa8851
--- /dev/null
+++ b/src/arm/openwrt_demo/1_buildimage/resources/config/firewall
@@ -0,0 +1,149 @@
+
+config rule
+ option name '-testcustomer'
+ option src '*'
+ option src_ip '192.168.10.1/32'
+ option dest '*'
+ option dest_ip '151.101.0.0/16'
+ option target 'REJECT'
+
+config rule
+ option name 'Allow-DHCP-Renew'
+ option src 'wan'
+ option proto 'udp'
+ option dest_port '68'
+ option target 'ACCEPT'
+ option family 'ipv4'
+
+config rule
+ option name 'Allow-Ping'
+ option src 'wan'
+ option proto 'icmp'
+ option icmp_type 'echo-request'
+ option family 'ipv4'
+ option target 'ACCEPT'
+
+config rule
+ option name 'Allow-IGMP'
+ option src 'wan'
+ option proto 'igmp'
+ option family 'ipv4'
+ option target 'ACCEPT'
+
+config rule
+ option name 'Allow-DHCPv6'
+ option src 'wan'
+ option proto 'udp'
+ option src_ip 'fc00::/6'
+ option dest_ip 'fc00::/6'
+ option dest_port '546'
+ option family 'ipv6'
+ option target 'ACCEPT'
+
+config rule
+ option name 'Allow-MLD'
+ option src 'wan'
+ option proto 'icmp'
+ option src_ip 'fe80::/10'
+ list icmp_type '130/0'
+ list icmp_type '131/0'
+ list icmp_type '132/0'
+ list icmp_type '143/0'
+ option family 'ipv6'
+ option target 'ACCEPT'
+
+config rule
+ option name 'Allow-ICMPv6-Input'
+ option src 'wan'
+ option proto 'icmp'
+ list icmp_type 'echo-request'
+ list icmp_type 'echo-reply'
+ list icmp_type 'destination-unreachable'
+ list icmp_type 'packet-too-big'
+ list icmp_type 'time-exceeded'
+ list icmp_type 'bad-header'
+ list icmp_type 'unknown-header-type'
+ list icmp_type 'router-solicitation'
+ list icmp_type 'neighbour-solicitation'
+ list icmp_type 'router-advertisement'
+ list icmp_type 'neighbour-advertisement'
+ option limit '1000/sec'
+ option family 'ipv6'
+ option target 'ACCEPT'
+
+config rule
+ option name 'Allow-ICMPv6-Forward'
+ option src 'wan'
+ option dest '*'
+ option proto 'icmp'
+ list icmp_type 'echo-request'
+ list icmp_type 'echo-reply'
+ list icmp_type 'destination-unreachable'
+ list icmp_type 'packet-too-big'
+ list icmp_type 'time-exceeded'
+ list icmp_type 'bad-header'
+ list icmp_type 'unknown-header-type'
+ option limit '1000/sec'
+ option family 'ipv6'
+ option target 'ACCEPT'
+
+config rule
+ option target 'ACCEPT'
+ option src 'lan'
+ option proto 'esp'
+ option src_ip '192.168.10.0/24'
+ option dest '*'
+ option name 'ipsecin'
+
+config rule
+ option target 'ACCEPT'
+ option proto 'esp'
+ option src '*'
+ option dest 'lan'
+ option dest_ip '192.168.10.0/24'
+ option name 'ipsecout'
+
+config rule
+ option target 'ACCEPT'
+ option proto 'udp'
+ option src 'lan'
+ option dest_port '500'
+ option name 'ipsec'
+
+config rule
+ option target 'ACCEPT'
+ option name '-ipsecnat'
+ option proto 'udp'
+ option src 'lan'
+ option dest_port '4500'
+
+config defaults
+ option syn_flood '1'
+ option input 'ACCEPT'
+ option output 'ACCEPT'
+ option forward 'REJECT'
+
+config zone
+ option name 'lan'
+ list network 'lan'
+ option input 'ACCEPT'
+ option output 'ACCEPT'
+ option forward 'ACCEPT'
+
+config zone
+ option name 'wan'
+ list network 'wan'
+ list network 'wan6'
+ option input 'REJECT'
+ option output 'ACCEPT'
+ option forward 'REJECT'
+ option masq '1'
+ option mtu_fix '1'
+
+config forwarding
+ option src 'lan'
+ option dest 'wan'
+
+config include
+ option path '/etc/firewall.user'
+
diff --git a/src/arm/openwrt_demo/1_buildimage/resources/config/firewall.user b/src/arm/openwrt_demo/1_buildimage/resources/config/firewall.user
new file mode 100644
index 0000000..ab61136
--- /dev/null
+++ b/src/arm/openwrt_demo/1_buildimage/resources/config/firewall.user
@@ -0,0 +1,9 @@
+# This file is interpreted as shell script.
+# Put your custom iptables rules here, they will
+# be executed with each firewall (re-)start.
+
+# Internal uci firewall chains are flushed and recreated on reload, so
+# put custom rules into the root chains e.g. INPUT or FORWARD or into the
+# special user chains, e.g. input_wan_rule or postrouting_lan_rule.
+iptables -t nat -A POSTROUTING -s 192.168.10.0/24 -o eth0 -m policy --pol ipsec --dir out -j ACCEPT
+iptables -t nat -A POSTROUTING -s 192.168.10.0/24 -o eth0 -j MASQUERADE
diff --git a/src/arm/openwrt_demo/1_buildimage/resources/config/network b/src/arm/openwrt_demo/1_buildimage/resources/config/network
new file mode 100644
index 0000000..eef18e8
--- /dev/null
+++ b/src/arm/openwrt_demo/1_buildimage/resources/config/network
@@ -0,0 +1,27 @@
+
+config interface 'loopback'
+ option ifname 'lo'
+ option proto 'static'
+ option ipaddr '127.0.0.1'
+ option netmask '255.0.0.0'
+
+config globals 'globals'
+ option ula_prefix 'fd5f:b3f4:4633::/48'
+
+config interface 'lan'
+ option ifname 'eth0'
+ option proto 'static'
+ option ipaddr '10.244.1.42'
+ option netmask '255.255.255.0'
+ option gateway '10.244.1.1'
+
+config interface 'wan'
+ option ifname 'net0'
+ option proto 'dhcp'
+
+config route 'r6'
+ option interface 'eth0'
+ option target '10.244.0.0'
+ option netmask '255.255.0.0'
+ option gateway '10.244.1.1'
+
diff --git a/src/arm/openwrt_demo/1_buildimage/resources/config/uhttpd b/src/arm/openwrt_demo/1_buildimage/resources/config/uhttpd
new file mode 100644
index 0000000..fe0691d
--- /dev/null
+++ b/src/arm/openwrt_demo/1_buildimage/resources/config/uhttpd
@@ -0,0 +1,24 @@
+
+config uhttpd 'main'
+ list listen_http '0.0.0.0:80'
+ option redirect_https '1'
+ option home '/www'
+ option rfc1918_filter '1'
+ option max_requests '3'
+ option max_connections '100'
+ option cert '/etc/uhttpd.crt'
+ option key '/etc/uhttpd.key'
+ option cgi_prefix '/cgi-bin'
+ option script_timeout '60'
+ option network_timeout '30'
+ option http_keepalive '20'
+ option tcp_keepalive '1'
+ option ubus_prefix '/ubus'
+
+config cert 'px5g'
+ option days '730'
+ option bits '2048'
+ option country 'ZZ'
+ option state 'Somewhere'
+ option location 'Unknown'
+ option commonname 'OpenWrt'
diff --git a/src/arm/openwrt_demo/1_buildimage/resources/ipsec/ipsec.conf b/src/arm/openwrt_demo/1_buildimage/resources/ipsec/ipsec.conf
new file mode 100644
index 0000000..9310276
--- /dev/null
+++ b/src/arm/openwrt_demo/1_buildimage/resources/ipsec/ipsec.conf
@@ -0,0 +1,29 @@
+config setup
+ charondebug="ike 1, knl 1, cfg 0"
+ uniqueids=no
+
+conn ikev2-vpn
+ auto=add
+ compress=no
+ type=tunnel
+ keyexchange=ikev2
+ fragmentation=yes
+ forceencaps=yes
+ ike=aes256-sha1-modp1024,3des-sha1-modp1024!
+ esp=aes256-sha1,3des-sha1!
+ dpdaction=clear
+ dpddelay=300s
+ rekey=no
+ left=%any
+ leftid=testvpn
+ leftcert=/etc/ipsec.d/certs/vpn-server-cert.pem
+ leftsendcert=always
+ leftsubnet=0.0.0.0/0
+ right=%any
+ rightid=%any
+ rightauth=eap-mschapv2
+ rightdns=8.8.8.8,8.8.4.4
+ rightsourceip=192.168.10.0/24
+ rightsendcert=never
+ eap_identity=%identity
+
diff --git a/src/arm/openwrt_demo/1_buildimage/resources/ipsec/ipsec.secrets b/src/arm/openwrt_demo/1_buildimage/resources/ipsec/ipsec.secrets
new file mode 100644
index 0000000..da553b7
--- /dev/null
+++ b/src/arm/openwrt_demo/1_buildimage/resources/ipsec/ipsec.secrets
@@ -0,0 +1,5 @@
+# /etc/ipsec.secrets - strongSwan IPsec secrets file
+testvpn : RSA "/etc/ipsec.d/private/vpn-server-key.pem"
+test %any% : EAP "arm"
+test2 %any% : EAP "arm"
+test3 %any% : EAP "arm"
diff --git a/src/arm/openwrt_demo/1_buildimage/resources/keys/server-root-ca.pem b/src/arm/openwrt_demo/1_buildimage/resources/keys/server-root-ca.pem
new file mode 100644
index 0000000..f1b654d
--- /dev/null
+++ b/src/arm/openwrt_demo/1_buildimage/resources/keys/server-root-ca.pem
@@ -0,0 +1,30 @@
+-----BEGIN CERTIFICATE-----
+MIIFLDCCAxSgAwIBAgIIPOALX6JnyDUwDQYJKoZIhvcNAQEMBQAwNDELMAkGA1UE
+BhMCQ04xEzARBgNVBAoTClZQTiBTZXJ2ZXIxEDAOBgNVBAMTB3Rlc3R2cG4wHhcN
+MTcwNDE4MDgwNzA3WhcNMjcwNDE2MDgwNzA3WjA0MQswCQYDVQQGEwJDTjETMBEG
+A1UEChMKVlBOIFNlcnZlcjEQMA4GA1UEAxMHdGVzdHZwbjCCAiIwDQYJKoZIhvcN
+AQEBBQADggIPADCCAgoCggIBAPg9YIcsvvXz9Zqepxx+92yBf/YkPFewortfvMay
+boF6AnJtGJB0JSpSm+ZPSrCk6GtBYvQit70OCwnXXDJQ/PDdE2aal1fD6/5wHugW
+yPWgNFIW1SuzEjxiTIIb13wYuJJ9eIev5jptyqn+obTwPHlKc8NfLGy22qpwR6i+
+y2Za4nblZUWuQkMaGshRJVF0SijGuIStZ0//JyzCge4ZPLVxcvSBhN/922FmZCog
+L9HNrI+1Q32Cv97UB5N4k3U2PWgzWGJWG4GVYKePIEniMPfdwHjwcinlAZ0WD3CA
+7shA5+0DjH/NX57H+0+QE4/CGT71jBziM0cBhNRd7S80nIy/WKNaVseuVa4Z3T9F
+XkfPPl0udCSEBwSOiURgAQNh2lljL3T4okb/MueuhMiR1mQfS/NXx0zbl95R7UcJ
+CAsZKhLxJ3eE0cxX9q+VWsXKizXAyi1puIXTfM6+xU1X1/c2FZtDIarxMqoBQylC
+H/HLJLwuoiROAEqtZLFYSAHkCDG9sDQ+UeD8fogL388R12L6+JaXf7UEFawByjEZ
+MEzRkUtt1fLXl0O3HuaoQKa5Q+sKSk/IYTynvRR7znEaVnZnK3yty+yNPBYdrsR4
+N/EXq6rIWwR6vdRxAae/BlOpKN76mFlN6g5DXBvguxFgEPbvAe7Eb/Hs7v9WPUni
+i9IlAgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0G
+A1UdDgQWBBSO1seRVuAcyN00/9Xlp19EPxlJiDANBgkqhkiG9w0BAQwFAAOCAgEA
+sZfhybZRUOzUrQOtyLhLinUbuOtj6lF9E9lxT9Wh8lu5hN2jXj5fh2c5THWMDvns
+4cIYO+3hbFYDRdL7lrtMLQU9YwhFGXixgCqBj3PxRdsiha0xSIVbTzFe8O01HA/l
+62KsGjZ2UWpTC2FYvmszDseAjcZ+SCCeQRyNrBdZ5UPsAnq5xnf6X+9JlRxuF4OE
+H6XPQJNhtx06VRD3dWSTkyNlmAARCXJKCCG/3s35ccSDG7AnMnc6b6uU0IEQ1WE1
+csvpHDdt2ianYMbafGlcL/B3UcvkpFPQ6aAQH2x5Kx6nUWOmBwksHcF/x8UtIAKJ
+QDRAGraefyMpHgTdLqm4FRjWsf5uoRvvjwvnKb4Waz59RUgKnBv5vVKVUSFhn0rf
+gHqDN969fkzbkedokxjUzMS+nNXBaI9zhVLlBpClXBdNUMamuUlPFq5jI+YF/G8J
+kDjoi+l9D/yCmMVTNdLn2WyXVmy2kJvovB7RsquPzcXlYJksGOSPq3EJC9iAtYz7
+NSgIHG9mnBAAetKRD3OMLSwYj2UyVOOKmOodiC1xbhT8+F/3F/d4vpN2oBdkfUgJ
+LmdIoLcgPe0mamfCwYY1pSibpDXRaXkrozoZBl+6r2PFQITLNiYGbjxpdyJffHAg
+rAqFWYHrARbKNHNUsV2TfOJD4XmAAdZO7YwZ3gNmniY=
+-----END CERTIFICATE-----
diff --git a/src/arm/openwrt_demo/1_buildimage/resources/keys/server-root-key.pem b/src/arm/openwrt_demo/1_buildimage/resources/keys/server-root-key.pem
new file mode 100644
index 0000000..48056be
--- /dev/null
+++ b/src/arm/openwrt_demo/1_buildimage/resources/keys/server-root-key.pem
@@ -0,0 +1,51 @@
+-----BEGIN RSA PRIVATE KEY-----
+MIIJJwIBAAKCAgEA+D1ghyy+9fP1mp6nHH73bIF/9iQ8V7Ciu1+8xrJugXoCcm0Y
+kHQlKlKb5k9KsKToa0Fi9CK3vQ4LCddcMlD88N0TZpqXV8Pr/nAe6BbI9aA0UhbV
+K7MSPGJMghvXfBi4kn14h6/mOm3Kqf6htPA8eUpzw18sbLbaqnBHqL7LZlriduVl
+Ra5CQxoayFElUXRKKMa4hK1nT/8nLMKB7hk8tXFy9IGE3/3bYWZkKiAv0c2sj7VD
+fYK/3tQHk3iTdTY9aDNYYlYbgZVgp48gSeIw993AePByKeUBnRYPcIDuyEDn7QOM
+f81fnsf7T5ATj8IZPvWMHOIzRwGE1F3tLzScjL9Yo1pWx65VrhndP0VeR88+XS50
+JIQHBI6JRGABA2HaWWMvdPiiRv8y566EyJHWZB9L81fHTNuX3lHtRwkICxkqEvEn
+d4TRzFf2r5VaxcqLNcDKLWm4hdN8zr7FTVfX9zYVm0MhqvEyqgFDKUIf8cskvC6i
+JE4ASq1ksVhIAeQIMb2wND5R4Px+iAvfzxHXYvr4lpd/tQQVrAHKMRkwTNGRS23V
+8teXQ7ce5qhAprlD6wpKT8hhPKe9FHvOcRpWdmcrfK3L7I08Fh2uxHg38Rerqshb
+BHq91HEBp78GU6ko3vqYWU3qDkNcG+C7EWAQ9u8B7sRv8ezu/1Y9SeKL0iUCAwEA
+AQKCAgBNP3xMVEZQb0xcg0ZpfbEtGNdjFz+X4iWhvVcXVetBa2Bbj0t3mE0AcJiH
+AOGzOn4A8mYCptMah8Yzl8re9Yjgw0sIQM8bxqInmWhkvMJofSQK74QCh0UDeWtp
+iZRyz5aQL29Ueg5g3E2WvOBBWAjZjaucfn9qjTRamXoTLtxIy7txWE09c8625uay
+s12zjUaOjdhZoURnBnWAXj7kgwH7TISDRdK9iVe9ZYmB+mYnGaO7TKLl6cwfYUfC
+QmFQtkJBrMiyQS1qE7vyKH3ZwAOQ/naoq9o640KvSXAgiF7F/jyt6s7L7nL1DDJO
+Pf14XORSTUL+sf1W+UgGdfwbFnooTXUf/SdaWC5KTYYxUwaTXcfYvFl/UJjhDXZ3
+C2n1szJ7MF0yOlUeUsmbGGCJwCq7kwvMZk8faueiqB2R5m741EI+LekEiktySWbg
+/J16sp3/4BGHq8OAXCl0FwPvlYGygT9F2GRKzMZX9oJkEryvMp+ObO6ViJrMAah7
+1gIFTVlubN483NjWjKGh79fdcfaccGU8l66COauXp6J2aFeSC9ajEfBggUiOWGiQ
+XazTUGlzW6pDHDaQB8kBcjZru3brb2j+6tX9ipVp3hpDTpymvNvtGS3v0V0Oyfta
+qbadqiRAlo+8vaGkxkEGhoFwWJ6rRatW7qc4ZpcxNFO4scGPeQKCAQEA/dcqVDAC
+oK105KLv9T0B9iGvoJeupJLswnNZbqzxQD5HDh9imjNxowJGgUat4FtSOYJP1eA2
+gFqmEIIduRYPNNmGxaT9ctSz1sO3Qz6+2zyJ2Xk3HTEwY3QLns8beANNvwjvqgJp
+vKgFWsY0GqD8XKHzeZYdAadxi2ZjZPRi0MU04daahp6WNsWNNexnRQHVSp0w5p6x
+FwSxKmmo/1x897rZf4R7Uc9nLwom9nYaKBR2HeqR+ySXGJUIyQ7E9lBkphVLYoDq
+AlIpQ0GycwJmHkWkCgmBOXfgPsYMe/Ir8WfXUabzrOyk8wGKFp4/QJYGeKizFqip
+2tQkBAxmpiIJuwKCAQEA+loDk83z+zJY8YmOlqcBJ7obid3SAjJr69MuYJuuv985
+SQhOhwLye8xvcW77oQfW2jjP/i425IcJAqCJCgpyJebSttrXnygYETbbQUhyfDcd
+zlgFHri9Wmu5gOskEsdUsoB+NjR6m6KcDbeYuiCQKkPOc0ITG6NtwE1RTvaW36kr
+bPN2YXEfK7+tA/i2LnGA2O5z0FJ+Nlnx1pJ9rAqpkpR04tocMX0NTJ637xVtGDFX
+r8Y2RtOaQeTfjDA+7v4syeSuOIch96iR89swsC13kujtMoS3G0CuCeL569JC31I7
+A/IvG5mWTdiDfDxq9sv0s2whRJwjHi+L0Xa+QyNlnwKCAQAi3usOs6W4wvta6VND
+gkUBtfD1g8DXFOP3dncjsBhYNfX257LY8hY7SXW8DqSWPJVYFyG2hN2X1lwXyngg
+0/n0zako/5hdrQCjkTFcyILZhUB+optCpF48W1W5VEQ2wWVtx+F8nmY+J2rM5IuF
+2PWyGAFlg4yqjIEZoFApLzVf7qdsGtoRgjmqfor+jGJHZZASdvOfys8TFW7tH6S6
+p873DTERxnZWb8KCAMgHdYP0W5M6Wt4A/S7QjrCtRh0ipTqeYjB/8Ku08+p9Nco4
+6Gx03iZBxrp81Y31salHYaZNvHEk42V4LO4f/+cjYkvYKIPtEWfAxhzHVfs4nyd+
+zRA/AoIBAEDk7GB31nKazmtt2MQ8bhQ6LcFC+pkPMOJkT3VDZbzexB6mRJTCstBc
+YdbpidhoC81tRJ0Cpb//MNq5ekxcANLKTnyPpazf2706lwMJIIQKVXOTZWBdStgR
+bHh6e1NS0CWlIRIz8EQ/lmwH11MH9da+1NkTm5hieKSMZjMtwFYhp9wKD/maNRZG
+DTcmVTMcwOV6ihLKD2VPU1znhCQAb4xLZzEWkJBTdgsSaWNUDn9i6vPpUVBysV27
+UicoqmeRA1MiL/b/MFLeI1cuziQc5Q3zyuh5dm1eCr8NUvNKAYOZ8SpIsOVanpd3
+ND4T+zYWEEwiD02Vm5TLhla5jQAiQMkCggEAbH5n7lyHfMl+Mft+EI58wPG85+Ih
+592FGSpQGYw4KtBhYOpD4mfoEi4PHHB72S41LSIAIbksXrXjIlpxlRUJWha0TxvJ
++/8fwMA1GRZ87eAwo4lkx+8kBt/5ipH0w+zO8mt83HYZz7Cnl3yU3iejSk6jeIAN
+12PYo17GetZWkuNu5PgwuaiaT/Hoy6WcFbCB+U3p+s7e93Fdk8sDrUq06P9Lpkp1
+goeAxtKKgpelmpBSKyDm3biYbd+32SrCp2wiMr30K1ttDAh8rinaS44atEnG58Ep
+8XTWeLsOVo72l3mvnLGTTzrmW9rFcCXTN7zeRugl7lehINn9bkg64kz57A==
+-----END RSA PRIVATE KEY-----
diff --git a/src/arm/openwrt_demo/1_buildimage/resources/keys/vpn-server-cert.pem b/src/arm/openwrt_demo/1_buildimage/resources/keys/vpn-server-cert.pem
new file mode 100644
index 0000000..7edbbe1
--- /dev/null
+++ b/src/arm/openwrt_demo/1_buildimage/resources/keys/vpn-server-cert.pem
@@ -0,0 +1,31 @@
+-----BEGIN CERTIFICATE-----
+MIIFQDCCAyigAwIBAgIILxhLDcigK7IwDQYJKoZIhvcNAQEMBQAwNDELMAkGA1UE
+BhMCQ04xEzARBgNVBAoTClZQTiBTZXJ2ZXIxEDAOBgNVBAMTB3Rlc3R2cG4wHhcN
+MTcwNDE4MDgwNzE1WhcNMjIwNDE3MDgwNzE1WjA0MQswCQYDVQQGEwJDTjETMBEG
+A1UEChMKVlBOIFNlcnZlcjEQMA4GA1UEAxMHdGVzdHZwbjCCAiIwDQYJKoZIhvcN
+AQEBBQADggIPADCCAgoCggIBAJ1m81Tj1/QJCw8rD3euk69ffLBxGh5sZ8vCn0dM
+mSXzU0xI5wv6Ss5tJsVCvesr741K3x+hgj6cdLj0UneGpSKz3ULn0+m7gACM401o
+Ms51aVEagz+O0fe9wWDZ+82xMXAw/bSvrMs34co8OofKF26WH6mPHxSkCU6edudm
+063zwQwlmvqeFhoxUvZtM65iUSQZrWuxBZkmEPfwfZz8E8v94xs40QicYl/gOoPP
+sgbzlsLQEqJAGrhC8HsMaNicr8n2Iie1PBxfhTdn/nqA4oQCrp5az28xGrjsNVXJ
+teTZTo0Nyg60bMbdR7rN5StWdDolzd/DKr8Jy3J/7xbgGHDftDnqMKLtsUPe+4Mi
+euLw3y1DkOZGt85dw05C/LbRupaZL3Yk7ehi+xPzNC6e3ssqKNjbffjtqDh3Ol3b
+5QmhBUoULWDzB9wSfwHueOFPptOK2c2pQh7U2bPcalXMwf6sCWdx3TokniLvAhxH
+8alBINZJ7ZSgA9vyH1KUzT5+5nXhPayXOXwvIEqNvig84bApCglIkO6jty1jZ79X
+Nd4TwOWuJSav4WQn3+t+5GWvrZzsuABzLruUcWTwdNA64Yw4AzwJoU6RZMbcHGPf
+bAofOtXn7H7ncrvWAahpFDmNge0GBsXSmTp01FBMEOdnnRG2b+C8dJyZpNPlr2si
+5oKJAgMBAAGjVjBUMB8GA1UdIwQYMBaAFI7Wx5FW4BzI3TT/1eWnX0Q/GUmIMBIG
+A1UdEQQLMAmCB3Rlc3R2cG4wHQYDVR0lBBYwFAYIKwYBBQUHAwEGCCsGAQUFCAIC
+MA0GCSqGSIb3DQEBDAUAA4ICAQBPGakMBK6Wsc1zAwkogsKtnStU1tq1IaOAUfpN
+cANuP0n3gsAD3aFFYKHsecL2nC2YO+HgmXZNJUTlcSftVSA+roZKqS6gxEwH75xC
+ponFnqrVnzEP7mLTA4/DQGfTRcGBTY5JEr9BUZsl+sD5XeekAKQOtTq2du+1tFQU
+aJlqwv39a+D7dPGfue2jHlIC48b0HyFpL7gGPidB9QDWjKVC8ZBaf0RDqNy70Qyh
+a1iAbSAsWzHvEvwkUAVyk8+oRNwd0IPmbRyKZXLNXIqHsYmdXgfK7o+vF1Qv30rn
+U2OwFqpGLsmo7CGI9fDjWUqoGn5hJJppvvP3cjXqhgMsa/dxel9dQMs8ERIO4rkP
+YJUmH5RSZwyc1iAfikaAHFRy0zauK1sHX2DPg+xyY/FzU4bfdKQTZYEBzIgBoN4q
+fmGY2EuApH/Z4BAGk9RostQIOmXcbm0/PAZDMgCS7Ms7ONbm9y2dssuY5f2rURBh
+xsANB/D8lzTzHFOtxwgTRFuQ69SO8Q7htKK/+bGe2YhqgFi53M6FT2EDOiCPfG4t
+d437KMXyQzXSkBJYVwSM5xHvc1xMWH14YK2AZFbmCRGp9Iv5GJBd04Eb9ziU0iDi
+DtUoqjP9XWO3nf7CiJPIna6G0LXYDKjNz1vUzbmLeDnw8hSqQJbn7lp4VqF1pI0o
+taHEkA==
+-----END CERTIFICATE-----
diff --git a/src/arm/openwrt_demo/1_buildimage/resources/keys/vpn-server-key.pem b/src/arm/openwrt_demo/1_buildimage/resources/keys/vpn-server-key.pem
new file mode 100644
index 0000000..6d48ac4
--- /dev/null
+++ b/src/arm/openwrt_demo/1_buildimage/resources/keys/vpn-server-key.pem
@@ -0,0 +1,51 @@
+-----BEGIN RSA PRIVATE KEY-----
+MIIJKAIBAAKCAgEAnWbzVOPX9AkLDysPd66Tr198sHEaHmxny8KfR0yZJfNTTEjn
+C/pKzm0mxUK96yvvjUrfH6GCPpx0uPRSd4alIrPdQufT6buAAIzjTWgyznVpURqD
+P47R973BYNn7zbExcDD9tK+syzfhyjw6h8oXbpYfqY8fFKQJTp5252bTrfPBDCWa
++p4WGjFS9m0zrmJRJBmta7EFmSYQ9/B9nPwTy/3jGzjRCJxiX+A6g8+yBvOWwtAS
+okAauELwewxo2JyvyfYiJ7U8HF+FN2f+eoDihAKunlrPbzEauOw1Vcm15NlOjQ3K
+DrRsxt1Hus3lK1Z0OiXN38MqvwnLcn/vFuAYcN+0Oeowou2xQ977gyJ64vDfLUOQ
+5ka3zl3DTkL8ttG6lpkvdiTt6GL7E/M0Lp7eyyoo2Nt9+O2oOHc6XdvlCaEFShQt
+YPMH3BJ/Ae544U+m04rZzalCHtTZs9xqVczB/qwJZ3HdOiSeIu8CHEfxqUEg1knt
+lKAD2/IfUpTNPn7mdeE9rJc5fC8gSo2+KDzhsCkKCUiQ7qO3LWNnv1c13hPA5a4l
+Jq/hZCff637kZa+tnOy4AHMuu5RxZPB00DrhjDgDPAmhTpFkxtwcY99sCh861efs
+fudyu9YBqGkUOY2B7QYGxdKZOnTUUEwQ52edEbZv4Lx0nJmk0+WvayLmgokCAwEA
+AQKCAgACn+UiPEeH8GUp8EsVG65R1AGndwarQnM+IBgJZ4cdDVhCmxYYL+jyqs+v
+pcfhLaBpqUQ8b1Q1cKTXxyzUr6RIsSSuZot07VKVH8RJWy8QByrrBmpGbhPFTI7r
+4KKiCMilC7oVRYHNTy3xh3d8jL5Uh/C4oklvmIx2WO6CYrIYUip88NJrXVALc9kD
+wSklvkpcU93INYwnzuJ6TJJ+9+s+wT8PKi9pHs4bS2kVCiqYUqmSmhyEw6p2ZK5a
+HOg8JsEIePVTlxeXdlvJg1nzJ2ZE3X8Ve/iTbJy66pG5RRsRI8hQp43VQ4VV+7oE
+F5tmRuN6qzyfii4dVUpaSx9nW3P+L3hES6B7y4cE6hmQH09/qkpXoyb3JkXyRGZv
+QLkm/7ywSwmxjsS2a3YmpLffEZZHgccunWZAvCHHb2L4Cf17VY3hB1q7Bc+ANKIr
+KT32fUqH5MAt3usY0J3eaJ8RheKh9irJWxju/fxKFFQA9vesBxl1hZ3GYC7NQQky
+RAffn1FPW5e3OOrBfp374q1TxLVqmMtY+djh1OqlLaoXoAU+3+qrkM+Kt4mZVThz
+0G2MSujgLnDf00nddLMyNApnbcYoa1/mTYKaSpkTkHZpXjvBWn/GapK5NGfdDFiM
+JzW5J/Y8aXhBujdmW7/QxDW6aLcRKTtyML2wiYDDA/SyVfTukQKCAQEAzpUXU9W8
+dZ7Na1LXuNxAJWx4peRCV9UUE+6XQlcchPb6slLfTGrjSp1Bkgz8YR4yuypq8KG3
+DTEw5BPjkamgsYE8yzu1Etv6j2uGo/F+C7RdsRIu2jjGXrGFH+DNc+hEvuMEMk4x
+c9S1Xf4Gazllcmy01qJ+OZR2vvCdysU46IR+OnlZ9hH0b3DSOmhDMEJhArvjalVu
+4GPHGlC+0+y/wF5kXiQoPp4CzSNQ8Gho/KuhcCH+lqwkA2qqht/TzXvzeHvyiOVY
++cWCwdkYdxEt932OEfGw3hlfiyvsHIrq8svG7c+6VwKc0R9hHSuUQCDhbtnb4nT2
+lqwyBG3D6xKxWQKCAQEAww4bxRLpgwnCgqgtpK8buTj/DdesiQIDA5fAU4eYKKnL
+RHFfTiBU8pg55UONiQP0AciGefKWkfRaoX34/Jk27oJiA4MoWOOd71W1d7tjeq6C
+oVC0Jw7zxrq/LlrmbIz2jzOPP5upw9KyCApzAfv6Q3eOHuYpTCV7FTpeQQ2FObVu
+W4CsjiAudMt5OZDOEsv2Uxd44NUxHhQd3+uz+G/2VhF711LkNLrf7X5toDGTn3nJ
+R8QptNYMojf+p3rud24pFdGGuZ0hyqTrK94TBheMOM93LENNcevnBjPwQ7PO6tDE
+Y2SrigE1CSGFYOR/HvqrpXOKvM1xAPByx/6JesiEsQKCAQBprc9vLanpKcHAI3MD
+uHiALItTof9mWzSYNbffUhze0FHTI53jw9Jeey/t/QKm1AHzyXFHhBLWhtGR+7Kw
+82unIovtE7A/45S8Ba+s8n8ekbhUOw8Ix36DNqD5e9DeeHWiiRO+gE3ACZJ2cNrr
+w0LoVD/2hM25uv88Em9GKbpBCHZih23D+c9nqvmAs5GbgHmMIn3mCapc0+4owiG8
+3CID0MXbevezgLXCJ0zijycWCt7dNCa/AXSy4sA1mw8I0V3txsp9yYXI0IdhjyN6
+1akEMJCbEV7/X0+HLILu3wnuBtzPDzMuC8IZIMpXV9HRNIDeakiYAmmbDp/PsC9H
+dBqRAoIBAQCbQa6e9gfCktEteLoj/HG/w/tYRFSEFYLaqD6g/iwCKeyuxOMMZ7XW
+B48FyvhsmCXwCXHovUxWTr6ZDpFSVo4f2M41Z3+FCWBb8cfoztJHA4Lc7kUHVeJ6
+S4kDV71Tp/xVTb/27Gt7gEjPF6olaTDx5MbOF3vFrYvEANqnQyDJJ334/XncAweX
+VaJfTMCKu6iMyQEhTPC0tWR2KMHuvQfByFbftI4K3riA7IJL4UpUxPaO1jgwRbR2
+psVe//2yOJAhWs63Dbio+Q5rs29HCRVG3vRH2iZZyGDyUgMrkILh61x2lNnplj5l
+zzXAQwBgYzyfDFHhKFGLYtiqEhPSFKtxAoIBAEbqAGd7MrKmBNiXKo22EFeaZg3N
+w7yr9EVkuGKBWSy5pwQJ2o7lGUjEXsrUzNnWiRTuLkaGnngfgqY9L1PMQJQ65fas
+OGY34pguI8OiLPtveUuKL3dAMN5eeKV7kdpDZgtVKvWAANNWW7oaZV7OQT3wpqF1
+G19ObcrCVKfws3CDFO2KcUFbNesNubTuACCzB/j/jfx/X4UI5kkdZJxrcK6sK88G
+nqTFe0KNm9ud6HQe0eqmusk/jmf8ifOSHAONT8I9JBmpo4vpxSqoADnEajWc6MJt
+UTCk/WTnd6hG6ER6VuhhqzWjJ/dSrEpIR0LxGkHp9hO68c/BeVAbOmS6Q5Y=
+-----END RSA PRIVATE KEY-----
diff --git a/src/arm/openwrt_demo/1_buildimage/resources/strongswan/charon-logging.conf b/src/arm/openwrt_demo/1_buildimage/resources/strongswan/charon-logging.conf
new file mode 100644
index 0000000..c91421d
--- /dev/null
+++ b/src/arm/openwrt_demo/1_buildimage/resources/strongswan/charon-logging.conf
@@ -0,0 +1,62 @@
+charon {
+
+ # Section to define file loggers, see LOGGER CONFIGURATION in
+ # strongswan.conf(5).
+ filelog {
+
+ # <filename> is the full path to the log file.
+ # <filename> {
+
+ # Loglevel for a specific subsystem.
+ # <subsystem> = <default>
+
+ # If this option is enabled log entries are appended to the existing
+ # file.
+ # append = yes
+
+ # Default loglevel.
+ # default = 1
+
+ # Enabling this option disables block buffering and enables line
+ # buffering.
+ # flush_line = no
+
+ # Prefix each log entry with the connection name and a unique
+ # numerical identifier for each IKE_SA.
+ # ike_name = no
+
+ # Prefix each log entry with a timestamp. The option accepts a
+ # format string as passed to strftime(3).
+ # time_format =
+
+ # }
+
+ }
+
+ # Section to define syslog loggers, see LOGGER CONFIGURATION in
+ # strongswan.conf(5).
+ syslog {
+
+ # Identifier for use with openlog(3).
+ # identifier =
+
+ # <facility> is one of the supported syslog facilities, see LOGGER
+ # CONFIGURATION in strongswan.conf(5).
+ # <facility> {
+
+ # Loglevel for a specific subsystem.
+ # <subsystem> = <default>
+
+ # Default loglevel.
+ # default = 1
+
+ # Prefix each log entry with the connection name and a unique
+ # numerical identifier for each IKE_SA.
+ # ike_name = no
+
+ # }
+
+ }
+
+}
+
diff --git a/src/arm/openwrt_demo/1_buildimage/resources/strongswan/charon.conf b/src/arm/openwrt_demo/1_buildimage/resources/strongswan/charon.conf
new file mode 100644
index 0000000..5cab2b1
--- /dev/null
+++ b/src/arm/openwrt_demo/1_buildimage/resources/strongswan/charon.conf
@@ -0,0 +1,281 @@
+# Options for the charon IKE daemon.
+charon {
+
+ # Maximum number of half-open IKE_SAs for a single peer IP.
+ # block_threshold = 5
+
+ # Whether relations in validated certificate chains should be cached in
+ # memory.
+ # cert_cache = yes
+
+ # Send Cisco Unity vendor ID payload (IKEv1 only).
+ # cisco_unity = no
+
+ # Close the IKE_SA if setup of the CHILD_SA along with IKE_AUTH failed.
+ # close_ike_on_child_failure = no
+
+ # Number of half-open IKE_SAs that activate the cookie mechanism.
+ # cookie_threshold = 10
+
+ # Use ANSI X9.42 DH exponent size or optimum size matched to cryptographic
+ # strength.
+ # dh_exponent_ansi_x9_42 = yes
+
+ # DNS server assigned to peer via configuration payload (CP).
+ # dns1 =
+
+ # DNS server assigned to peer via configuration payload (CP).
+ # dns2 =
+
+ # Enable Denial of Service protection using cookies and aggressiveness
+ # checks.
+ # dos_protection = yes
+
+ # Compliance with the errata for RFC 4753.
+ # ecp_x_coordinate_only = yes
+
+ # Free objects during authentication (might conflict with plugins).
+ # flush_auth_cfg = no
+
+ # Maximum size (in bytes) of a sent fragment when using the proprietary
+ # IKEv1 fragmentation extension.
+ # fragment_size = 512
+
+ # Name of the group the daemon changes to after startup.
+ # group =
+
+ # Timeout in seconds for connecting IKE_SAs (also see IKE_SA_INIT DROPPING).
+ # half_open_timeout = 30
+
+ # Enable hash and URL support.
+ # hash_and_url = no
+
+ # Allow IKEv1 Aggressive Mode with pre-shared keys as responder.
+ # i_dont_care_about_security_and_use_aggressive_mode_psk = no
+
+ # A space-separated list of routing tables to be excluded from route
+ # lookups.
+ # ignore_routing_tables =
+
+ # Maximum number of IKE_SAs that can be established at the same time before
+ # new connection attempts are blocked.
+ # ikesa_limit = 0
+
+ # Number of exclusively locked segments in the hash table.
+ # ikesa_table_segments = 1
+
+ # Size of the IKE_SA hash table.
+ # ikesa_table_size = 1
+
+ # Whether to close IKE_SA if the only CHILD_SA closed due to inactivity.
+ # inactivity_close_ike = no
+
+ # Limit new connections based on the current number of half open IKE_SAs,
+ # see IKE_SA_INIT DROPPING in strongswan.conf(5).
+ # init_limit_half_open = 0
+
+ # Limit new connections based on the number of queued jobs.
+ # init_limit_job_load = 0
+
+ # Causes charon daemon to ignore IKE initiation requests.
+ # initiator_only = no
+
+ # Install routes into a separate routing table for established IPsec
+ # tunnels.
+ # install_routes = yes
+
+ # Install virtual IP addresses.
+ # install_virtual_ip = yes
+
+ # The name of the interface on which virtual IP addresses should be
+ # installed.
+ # install_virtual_ip_on =
+
+ # Check daemon, libstrongswan and plugin integrity at startup.
+ # integrity_test = no
+
+ # A comma-separated list of network interfaces that should be ignored, if
+ # interfaces_use is specified this option has no effect.
+ # interfaces_ignore =
+
+ # A comma-separated list of network interfaces that should be used by
+ # charon. All other interfaces are ignored.
+ # interfaces_use =
+
+ # NAT keep alive interval.
+ # keep_alive = 20s
+
+ # Plugins to load in the IKE daemon charon.
+ # load =
+
+ # Determine plugins to load via each plugin's load option.
+ # load_modular = no
+
+ # Maximum packet size accepted by charon.
+ # max_packet = 10000
+
+ # Enable multiple authentication exchanges (RFC 4739).
+ # multiple_authentication = yes
+
+ # WINS servers assigned to peer via configuration payload (CP).
+ # nbns1 =
+
+ # WINS servers assigned to peer via configuration payload (CP).
+ # nbns2 =
+
+ # UDP port used locally. If set to 0 a random port will be allocated.
+ # port = 500
+
+ # UDP port used locally in case of NAT-T. If set to 0 a random port will be
+ # allocated. Has to be different from charon.port, otherwise a random port
+ # will be allocated.
+ # port_nat_t = 4500
+
+ # Process RTM_NEWROUTE and RTM_DELROUTE events.
+ # process_route = yes
+
+ # Delay in ms for receiving packets, to simulate larger RTT.
+ # receive_delay = 0
+
+ # Delay request messages.
+ # receive_delay_request = yes
+
+ # Delay response messages.
+ # receive_delay_response = yes
+
+ # Specific IKEv2 message type to delay, 0 for any.
+ # receive_delay_type = 0
+
+ # Size of the AH/ESP replay window, in packets.
+ # replay_window = 32
+
+ # Base to use for calculating exponential back off, see IKEv2 RETRANSMISSION
+ # in strongswan.conf(5).
+ # retransmit_base = 1.8
+
+ # Timeout in seconds before sending first retransmit.
+ # retransmit_timeout = 4.0
+
+ # Number of times to retransmit a packet before giving up.
+ # retransmit_tries = 5
+
+ # Interval to use when retrying to initiate an IKE_SA (e.g. if DNS
+ # resolution failed), 0 to disable retries.
+ # retry_initiate_interval = 0
+
+ # Initiate CHILD_SA within existing IKE_SAs.
+ # reuse_ikesa = yes
+
+ # Numerical routing table to install routes to.
+ # routing_table =
+
+ # Priority of the routing table.
+ # routing_table_prio =
+
+ # Delay in ms for sending packets, to simulate larger RTT.
+ # send_delay = 0
+
+ # Delay request messages.
+ # send_delay_request = yes
+
+ # Delay response messages.
+ # send_delay_response = yes
+
+ # Specific IKEv2 message type to delay, 0 for any.
+ # send_delay_type = 0
+
+ # Send strongSwan vendor ID payload
+ # send_vendor_id = no
+
+ # Number of worker threads in charon.
+ # threads = 16
+
+ # Name of the user the daemon changes to after startup.
+ # user =
+
+ crypto_test {
+
+ # Benchmark crypto algorithms and order them by efficiency.
+ # bench = no
+
+ # Buffer size used for crypto benchmark.
+ # bench_size = 1024
+
+ # Number of iterations to test each algorithm.
+ # bench_time = 50
+
+ # Test crypto algorithms during registration (requires test vectors
+ # provided by the test-vectors plugin).
+ # on_add = no
+
+ # Test crypto algorithms on each crypto primitive instantiation.
+ # on_create = no
+
+ # Strictly require at least one test vector to enable an algorithm.
+ # required = no
+
+ # Whether to test RNG with TRUE quality; requires a lot of entropy.
+ # rng_true = no
+
+ }
+
+ host_resolver {
+
+ # Maximum number of concurrent resolver threads (they are terminated if
+ # unused).
+ # max_threads = 3
+
+ # Minimum number of resolver threads to keep around.
+ # min_threads = 0
+
+ }
+
+ leak_detective {
+
+ # Includes source file names and line numbers in leak detective output.
+ # detailed = yes
+
+ # Threshold in bytes for leaks to be reported (0 to report all).
+ # usage_threshold = 10240
+
+ # Threshold in number of allocations for leaks to be reported (0 to
+ # report all).
+ # usage_threshold_count = 0
+
+ }
+
+ processor {
+
+ # Section to configure the number of reserved threads per priority class
+ # see JOB PRIORITY MANAGEMENT in strongswan.conf(5).
+ priority_threads {
+
+ }
+
+ }
+
+ tls {
+
+ # List of TLS encryption ciphers.
+ # cipher =
+
+ # List of TLS key exchange methods.
+ # key_exchange =
+
+ # List of TLS MAC algorithms.
+ # mac =
+
+ # List of TLS cipher suites.
+ # suites =
+
+ }
+
+ x509 {
+
+ # Discard certificates with unsupported or unknown critical extensions.
+ # enforce_critical = yes
+
+ }
+
+}
+
diff --git a/src/arm/openwrt_demo/1_buildimage/resources/strongswan/pool.conf b/src/arm/openwrt_demo/1_buildimage/resources/strongswan/pool.conf
new file mode 100644
index 0000000..297c0f8
--- /dev/null
+++ b/src/arm/openwrt_demo/1_buildimage/resources/strongswan/pool.conf
@@ -0,0 +1,12 @@
+pool {
+
+ # Database URI for the database that stores IP pools and configuration
+ # attributes. If it contains a password, make sure to adjust the
+ # permissions of the config file accordingly.
+ # database =
+
+ # Plugins to load in ipsec pool tool.
+ # load =
+
+}
+
diff --git a/src/arm/openwrt_demo/1_buildimage/resources/strongswan/starter.conf b/src/arm/openwrt_demo/1_buildimage/resources/strongswan/starter.conf
new file mode 100644
index 0000000..8465f7e
--- /dev/null
+++ b/src/arm/openwrt_demo/1_buildimage/resources/strongswan/starter.conf
@@ -0,0 +1,10 @@
+starter {
+
+ # Plugins to load in starter.
+ # load =
+
+ # Disable charon plugin load option warning.
+ # load_warning = yes
+
+}
+
diff --git a/src/arm/openwrt_demo/1_buildimage/resources/strongswan/tools.conf b/src/arm/openwrt_demo/1_buildimage/resources/strongswan/tools.conf
new file mode 100644
index 0000000..a3ab099
--- /dev/null
+++ b/src/arm/openwrt_demo/1_buildimage/resources/strongswan/tools.conf
@@ -0,0 +1,21 @@
+openac {
+
+ # Plugins to load in ipsec openac tool.
+ # load =
+
+}
+
+pki {
+
+ # Plugins to load in ipsec pki tool.
+ # load =
+
+}
+
+scepclient {
+
+ # Plugins to load in ipsec scepclient tool.
+ # load =
+
+}
+
diff --git a/src/cni/multus/install_cni.sh b/src/cni/multus/install_cni.sh
deleted file mode 100644
index 0d1ee6b..0000000
--- a/src/cni/multus/install_cni.sh
+++ /dev/null
@@ -1,12 +0,0 @@
-#!/bin/bash
-
-set -ex
-
-export PATH=/usr/local/go/bin:$PATH
-apt-get update && apt-get install -y wget
-rm -rf multus-cni
-wget -qO- https://storage.googleapis.com/golang/go1.8.3.linux-amd64.tar.gz | tar -C /usr/local/ -xz
-git clone https://github.com/Intel-Corp/multus-cni
-cd multus-cni; bash ./build
-cp bin/multus /opt/cni/bin
-cp /etc/kube-cnimultus/cni-conf.json /etc/cni/net.d/05-multus.conf
diff --git a/src/vagrant/kubeadm/Vagrantfile b/src/vagrant/kubeadm/Vagrantfile
new file mode 100644
index 0000000..dc5efb1
--- /dev/null
+++ b/src/vagrant/kubeadm/Vagrantfile
@@ -0,0 +1,34 @@
+$num_workers=2
+
+Vagrant.require_version ">= 1.8.6"
+Vagrant.configure("2") do |config|
+
+ config.vm.box = "ceph/ubuntu-xenial"
+ config.vm.provider :libvirt do |libvirt|
+ libvirt.memory = 4096
+ libvirt.cpus = 4
+ end
+
+ config.vm.define "registry" do |config|
+ config.vm.hostname = "registry"
+ #config.vm.provision "shell", path: "registry_setup.sh", privileged: false
+ config.vm.network :private_network, ip: "192.168.1.5"
+ end
+
+ config.vm.define "master" do |config|
+ config.vm.hostname = "master"
+ config.vm.provision "shell", path: "host_setup.sh", privileged: false
+ config.vm.provision "shell", path: "master_setup.sh", privileged: false
+ config.vm.network :private_network, ip: "192.168.1.10"
+ end
+
+ (1 .. $num_workers).each do |i|
+ config.vm.define vm_name = "worker%d" % [i] do |config|
+ config.vm.hostname = vm_name
+ config.vm.provision "shell", path: "host_setup.sh", privileged: false
+ config.vm.provision "shell", path: "worker_setup.sh", privileged: false
+ config.vm.network :private_network, ip: "192.168.1.#{i+20}"
+ end
+ end
+
+end
diff --git a/src/vagrant/kubeadm/deploy.sh b/src/vagrant/kubeadm/deploy.sh
new file mode 100755
index 0000000..eb61ad8
--- /dev/null
+++ b/src/vagrant/kubeadm/deploy.sh
@@ -0,0 +1,10 @@
+#!/bin/bash
+
+set -ex
+DIR="$(dirname `readlink -f $0`)"
+
+cd $DIR
+../cleanup.sh
+vagrant up
+vagrant ssh master -c "/vagrant/kata/nginx-app.sh"
+vagrant ssh master -c "/vagrant/virtlet/virtlet.sh"
diff --git a/src/vagrant/kubeadm/host_setup.sh b/src/vagrant/kubeadm/host_setup.sh
new file mode 100644
index 0000000..1cb46f6
--- /dev/null
+++ b/src/vagrant/kubeadm/host_setup.sh
@@ -0,0 +1,32 @@
+#!/bin/bash
+
+set -ex
+
+cat << EOF | sudo tee /etc/hosts
+127.0.0.1 localhost
+192.168.1.5 registry
+192.168.1.10 master
+192.168.1.21 worker1
+192.168.1.22 worker2
+192.168.1.23 worker3
+EOF
+
+curl -s http://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -
+cat <<EOF | sudo tee /etc/apt/sources.list.d/kubernetes.list
+deb http://apt.kubernetes.io/ kubernetes-xenial main
+EOF
+sudo apt-get update
+sudo apt-get install -y --allow-unauthenticated kubelet=1.12.2-00 kubeadm=1.12.2-00 kubectl=1.12.2-00 kubernetes-cni=0.6.0-00
+echo 'Environment="KUBELET_EXTRA_ARGS=--feature-gates=DevicePlugins=true"' | sudo tee /etc/default/kubelet
+echo 1 | sudo tee /proc/sys/net/ipv4/ip_forward
+sudo modprobe ip_vs
+sudo modprobe ip_vs_rr
+sudo modprobe ip_vs_wrr
+sudo modprobe ip_vs_sh
+sudo modprobe br_netfilter
+sudo modprobe nf_conntrack_ipv4
+
+sudo swapoff -a
+sudo systemctl daemon-reload
+sudo systemctl stop kubelet
+sudo systemctl start kubelet
diff --git a/src/vagrant/kubeadm_istio/istio/bookinfo.sh b/src/vagrant/kubeadm/istio/bookinfo.sh
index cc09167..c4eef11 100755
--- a/src/vagrant/kubeadm_istio/istio/bookinfo.sh
+++ b/src/vagrant/kubeadm/istio/bookinfo.sh
@@ -21,7 +21,10 @@ cd /vagrant/istio-source/
export PATH=$PWD/bin:$PATH
# Run the test application: bookinfo
-kubectl apply -f <(istioctl kube-inject -f samples/bookinfo/kube/bookinfo.yaml)
+kubectl apply -f <(istioctl kube-inject -f samples/bookinfo/platform/kube/bookinfo.yaml)
+
+# Define the ingress gateway for the application
+kubectl apply -f samples/bookinfo/networking/bookinfo-gateway.yaml
# Wait for bookinfo deployed
kubectl get services
@@ -36,6 +39,6 @@ do
done
# Validate the bookinfo app
-export GATEWAY_URL=$(kubectl get po -l istio=ingress -n istio-system -o 'jsonpath={.items[0].status.hostIP}'):$(kubectl get svc istio-ingress -n istio-system -o 'jsonpath={.spec.ports[0].nodePort}')
+export GATEWAY_URL=$(kubectl get po -l istio=ingressgateway -n istio-system -o 'jsonpath={.items[0].status.hostIP}'):$(kubectl get svc istio-ingressgateway -n istio-system -o 'jsonpath={.spec.ports[0].nodePort}')
curl -o /dev/null -s -w "%{http_code}\n" http://${GATEWAY_URL}/productpage
diff --git a/src/vagrant/kubeadm_istio/istio/clean_bookinfo.sh b/src/vagrant/kubeadm/istio/clean_bookinfo.sh
index ede825f..7c539c0 100755
--- a/src/vagrant/kubeadm_istio/istio/clean_bookinfo.sh
+++ b/src/vagrant/kubeadm/istio/clean_bookinfo.sh
@@ -21,7 +21,9 @@ cd /vagrant/istio-source/
export PATH=$PWD/bin:$PATH
# Clean up bookinfo
-echo "" | samples/bookinfo/kube/cleanup.sh
+echo "" | samples/bookinfo/platform/kube/cleanup.sh
-istioctl get routerules
+kubectl get virtualservices
+kubectl get destinationrules
+kubectl get gateway
kubectl get pods
diff --git a/src/vagrant/kubeadm_istio/istio/deploy.sh b/src/vagrant/kubeadm/istio/deploy.sh
index 4abc856..e896580 100755
--- a/src/vagrant/kubeadm_istio/istio/deploy.sh
+++ b/src/vagrant/kubeadm/istio/deploy.sh
@@ -35,21 +35,13 @@ echo 'export PATH="$PATH:/vagrant/istio-source/bin"' >> ~/.bashrc
echo "source <(kubectl completion bash)" >> ~/.bashrc
source ~/.bashrc
-kubectl apply -f install/kubernetes/istio.yaml
+# Install Istio’s Custom Resource Definitions first
+kubectl apply -f install/kubernetes/helm/istio/templates/crds.yaml
-# Install the sidecar injection configmap
-./install/kubernetes/webhook-create-signed-cert.sh \
- --service istio-sidecar-injector \
- --namespace istio-system \
- --secret sidecar-injector-certs
-kubectl apply -f install/kubernetes/istio-sidecar-injector-configmap-release.yaml
+# Wait 30s for Kubernetes to register the Istio CRDs
+sleep 30
-# Install the sidecar injector webhook
-cat install/kubernetes/istio-sidecar-injector.yaml | \
- ./install/kubernetes/webhook-patch-ca-bundle.sh > \
- install/kubernetes/istio-sidecar-injector-with-ca-bundle.yaml
-kubectl apply -f install/kubernetes/istio-sidecar-injector-with-ca-bundle.yaml
-kubectl -n istio-system get deployment -listio=sidecar-injector
+kubectl apply -f install/kubernetes/istio-demo.yaml
# Validate the installation
kubectl get svc -n istio-system
@@ -61,6 +53,6 @@ while [ $r -ne "0" ]
do
sleep 30
kubectl get pods -n istio-system
- r=$(kubectl get pods -n istio-system | egrep -v 'NAME|Running' | wc -l)
+ r=$(kubectl get pods -n istio-system | egrep -v 'NAME|Running|Completed' | wc -l)
done
diff --git a/src/vagrant/kubeadm/istio/istio.sh b/src/vagrant/kubeadm/istio/istio.sh
new file mode 100755
index 0000000..9c2caf6
--- /dev/null
+++ b/src/vagrant/kubeadm/istio/istio.sh
@@ -0,0 +1,6 @@
+#!/bin/bash
+
+/vagrant/istio/deploy.sh
+/vagrant/istio/bookinfo.sh
+/vagrant/istio/clean_bookinfo.sh
+
diff --git a/src/vagrant/kubeadm/kata/containerd.service b/src/vagrant/kubeadm/kata/containerd.service
new file mode 100644
index 0000000..1ae7fe8
--- /dev/null
+++ b/src/vagrant/kubeadm/kata/containerd.service
@@ -0,0 +1,22 @@
+[Unit]
+Description=containerd container runtime
+Documentation=https://containerd.io
+After=network.target
+
+[Service]
+ExecStartPre=-/sbin/modprobe overlay
+ExecStart=/usr/local/bin/containerd
+
+Delegate=yes
+KillMode=process
+# Having non-zero Limit*s causes performance problems due to accounting overhead
+# in the kernel. We recommend using cgroups to do container-local accounting.
+LimitNPROC=infinity
+LimitCORE=infinity
+LimitNOFILE=infinity
+# Comment TasksMax if your systemd version does not supports it.
+# Only systemd 226 and above support this version.
+TasksMax=infinity
+
+[Install]
+WantedBy=multi-user.target
diff --git a/src/vagrant/kubeadm/kata/kata_setup.sh b/src/vagrant/kubeadm/kata/kata_setup.sh
new file mode 100644
index 0000000..1fd77b5
--- /dev/null
+++ b/src/vagrant/kubeadm/kata/kata_setup.sh
@@ -0,0 +1,54 @@
+#!/bin/bash
+#
+# Copyright (c) 2017 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+set -ex
+
+ARCH=$(arch)
+BRANCH="${BRANCH:-stable-1.7}"
+sudo sh -c "echo 'deb http://download.opensuse.org/repositories/home:/katacontainers:/releases:/${ARCH}:/${BRANCH}/xUbuntu_$(lsb_release -rs)/ /' > /etc/apt/sources.list.d/kata-containers.list"
+curl -sL http://download.opensuse.org/repositories/home:/katacontainers:/releases:/${ARCH}:/${BRANCH}/xUbuntu_$(lsb_release -rs)/Release.key | sudo apt-key add -
+sudo -E apt-get update
+sudo -E apt-get -y install kata-runtime kata-proxy kata-shim
+
+wget https://github.com/opencontainers/runc/releases/download/v1.0.0-rc6/runc.amd64
+sudo cp runc.amd64 /usr/sbin/runc
+sudo chmod 755 /usr/sbin/runc
+wget http://github.com/containerd/containerd/releases/download/v1.2.2/containerd-1.2.2.linux-amd64.tar.gz >& /dev/null
+sudo tar -C /usr/local -xzf containerd-1.2.2.linux-amd64.tar.gz
+wget https://github.com/kubernetes-sigs/cri-tools/releases/download/v1.13.0/crictl-v1.13.0-linux-amd64.tar.gz >& /dev/null
+sudo tar -C /usr/local/bin -xzf crictl-v1.13.0-linux-amd64.tar.gz
+echo "runtime-endpoint: unix:///run/containerd/containerd.sock" | sudo tee /etc/crictl.yaml
+wget https://github.com/kubernetes-sigs/cri-tools/releases/download/v1.13.0/critest-v1.13.0-linux-amd64.tar.gz >& /dev/null
+sudo tar C /usr/local/bin -xzf critest-v1.13.0-linux-amd64.tar.gz
+sudo cp /vagrant/kata/containerd.service /etc/systemd/system/
+sudo systemctl start containerd
+sudo mkdir -p /opt/cni/bin
+sudo mkdir -p /etc/cni/net.d
+sudo mkdir -p /etc/containerd
+containerd config default | sudo tee /etc/containerd/config.toml
+sudo sed -i "s,\[plugins.cri.registry.mirrors\],\[plugins.cri.registry.mirrors\]\n \[plugins.cri.registry.mirrors.\"registry:5000\"\]\n endpoint = \[\"http://registry:5000\"\]," /etc/containerd/config.toml
+sudo sed -i "/.*untrusted_workload_runtime.*/,+5s/runtime_type.*/runtime_type=\"io.containerd.runtime.v1.linux\"/" /etc/containerd/config.toml
+sudo sed -i "/.*untrusted_workload_runtime.*/,+5s/runtime_engine.*/runtime_engine=\"kata-runtime\"/" /etc/containerd/config.toml
+sudo systemctl restart containerd
+
+cat << EOF | sudo tee /etc/systemd/system/kubelet.service.d/0-containerd.conf
+[Service]
+Environment="KUBELET_EXTRA_ARGS=--container-runtime=remote --runtime-request-timeout=15m --container-runtime-endpoint=unix:///run/containerd/containerd.sock"
+EOF
+
+sudo systemctl daemon-reload
+sudo systemctl restart kubelet
diff --git a/src/vagrant/kubeadm/kata/nginx-app.sh b/src/vagrant/kubeadm/kata/nginx-app.sh
new file mode 100755
index 0000000..fb9540e
--- /dev/null
+++ b/src/vagrant/kubeadm/kata/nginx-app.sh
@@ -0,0 +1,33 @@
+#!/bin/bash
+#
+# Copyright (c) 2017 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+kubectl delete services --all
+kubectl delete rc --all
+kubectl delete pods --all
+kubectl create -f /vagrant/kata/nginx-app.yaml
+kubectl get nodes
+kubectl get services
+kubectl get pods
+kubectl get rc
+r=0
+while [ "$r" -eq "0" ]
+do
+ sleep 30
+ r=$(kubectl get pods | grep Running | wc -l)
+done
+svcip=$(kubectl get services nginx -o json | grep clusterIP | cut -f4 -d'"')
+wget http://$svcip
diff --git a/src/vagrant/kubeadm/kata/nginx-app.yaml b/src/vagrant/kubeadm/kata/nginx-app.yaml
new file mode 100644
index 0000000..9de4ef4
--- /dev/null
+++ b/src/vagrant/kubeadm/kata/nginx-app.yaml
@@ -0,0 +1,33 @@
+apiVersion: v1
+kind: Service
+metadata:
+ name: nginx
+ labels:
+ app: nginx
+spec:
+ type: NodePort
+ ports:
+ - port: 80
+ protocol: TCP
+ name: http
+ selector:
+ app: nginx
+---
+apiVersion: v1
+kind: ReplicationController
+metadata:
+ name: nginx
+spec:
+ replicas: 2
+ template:
+ metadata:
+ labels:
+ app: nginx
+ annotations:
+ io.kubernetes.cri.untrusted-workload: "true"
+ spec:
+ containers:
+ - name: nginx
+ image: nginx
+ ports:
+ - containerPort: 80
diff --git a/src/vagrant/kubeadm/master_setup.sh b/src/vagrant/kubeadm/master_setup.sh
new file mode 100644
index 0000000..cec8877
--- /dev/null
+++ b/src/vagrant/kubeadm/master_setup.sh
@@ -0,0 +1,32 @@
+#!/bin/bash
+
+set -ex
+
+sudo apt-get update
+sudo apt-get install -y \
+ apt-transport-https \
+ ca-certificates \
+ curl \
+ software-properties-common
+
+curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
+sudo add-apt-repository \
+ "deb [arch=amd64] https://download.docker.com/linux/ubuntu \
+ $(lsb_release -cs) \
+ stable"
+sudo apt-get update
+sudo apt-get install -y docker-ce=18.03.1~ce-0~ubuntu
+cat << EOF | sudo tee /etc/docker/daemon.json
+{
+ "insecure-registries": ["registry:5000"]
+}
+EOF
+sudo service docker restart
+
+sudo kubeadm init --apiserver-advertise-address=192.168.1.10 --service-cidr=10.96.0.0/16 --pod-network-cidr=10.32.0.0/12 --token 8c5adc.1cec8dbf339093f0
+mkdir ~/.kube
+sudo cp /etc/kubernetes/admin.conf $HOME/.kube/config
+sudo chown $(id -u):$(id -g) $HOME/.kube/config
+
+kubectl apply -f http://git.io/weave-kube-1.6
+kubectl apply -f /vagrant/multus/cni_multus.yml
diff --git a/src/vagrant/kubeadm/multus/Dockerfile b/src/vagrant/kubeadm/multus/Dockerfile
new file mode 100644
index 0000000..7923d0d
--- /dev/null
+++ b/src/vagrant/kubeadm/multus/Dockerfile
@@ -0,0 +1,10 @@
+FROM ubuntu:16.04
+ENV PATH="/usr/local/go/bin:$PATH"
+WORKDIR /go/src/
+RUN apt-get update && apt-get install -y wget git gcc
+RUN wget -qO- https://storage.googleapis.com/golang/go1.8.3.linux-amd64.tar.gz | tar -C /usr/local/ -xz
+RUN git clone https://github.com/Intel-Corp/multus-cni
+RUN cd multus-cni; bash ./build
+
+FROM busybox
+COPY --from=0 /go/src/multus-cni/bin/multus /root
diff --git a/src/vagrant/kubeadm_multus/examples/busybox.yaml b/src/vagrant/kubeadm/multus/busybox.yaml
index 7fd1b8d..7fd1b8d 100644
--- a/src/vagrant/kubeadm_multus/examples/busybox.yaml
+++ b/src/vagrant/kubeadm/multus/busybox.yaml
diff --git a/src/cni/multus/kube_cni_multus.yml b/src/vagrant/kubeadm/multus/cni_multus.yml
index cd91737..123392b 100644
--- a/src/cni/multus/kube_cni_multus.yml
+++ b/src/vagrant/kubeadm/multus/cni_multus.yml
@@ -8,7 +8,7 @@ metadata:
kind: ConfigMap
apiVersion: v1
metadata:
- name: kube-cnimultus-cfg
+ name: cnimultus-cfg
namespace: kube-system
labels:
tier: node
@@ -45,7 +45,7 @@ data:
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
- name: kube-cnimultus-ds
+ name: cnimultus-ds
namespace: kube-system
labels:
tier: node
@@ -65,31 +65,24 @@ spec:
operator: Exists
effect: NoSchedule
serviceAccountName: cnimultus
- initContainers:
- - name: install-cni
- image: ubuntu:16.04
- command:
- - bash
- - "-c"
- - "apt-get update && apt-get install -y git && git clone http://github.com/opnfv/container4nfv && cd container4nfv && git fetch https://gerrit.opnfv.org/gerrit/container4nfv refs/changes/81/47681/5 && git checkout FETCH_HEAD && bash ./src/cni/multus/install_cni.sh"
+ containers:
+ - name: run-cni
+ image: registry:5000/multus-cni:latest
+ command: ['sh', '-c', 'cp /multus/cni-conf.json /etc/cni/net.d/05-multus.conf; cp /root/multus /opt/cni/bin; while true; do sleep 10000; done' ]
volumeMounts:
- name: cni-bin
mountPath: /opt/cni/bin
- - name: cni-cfg
+ - name: etc-cni
mountPath: /etc/cni/net.d
- name: cnimultus-cfg
- mountPath: /etc/kube-cnimultus
- containers:
- - name: run-cni
- image: busybox:1.27.2
- command: ['sh', '-c', 'while true; do sleep 10000; done' ]
+ mountPath: /multus/
volumes:
- name: cni-bin
hostPath:
path: /opt/cni/bin
- - name: cni-cfg
+ - name: etc-cni
hostPath:
path: /etc/cni/net.d
- name: cnimultus-cfg
configMap:
- name: kube-cnimultus-cfg
+ name: cnimultus-cfg
diff --git a/src/vagrant/kubeadm_multus/examples/multus.sh b/src/vagrant/kubeadm/multus/multus.sh
index d7b39a0..9461a6f 100755
--- a/src/vagrant/kubeadm_multus/examples/multus.sh
+++ b/src/vagrant/kubeadm/multus/multus.sh
@@ -24,7 +24,7 @@ do
done
kubectl delete rc --all
-kubectl apply -f /vagrant/examples/busybox.yaml
+kubectl apply -f /vagrant/multus/busybox.yaml
r="0"
while [ $r -ne "2" ]
do
diff --git a/src/vagrant/kubeadm/registry_setup.sh b/src/vagrant/kubeadm/registry_setup.sh
new file mode 100644
index 0000000..5466f1c
--- /dev/null
+++ b/src/vagrant/kubeadm/registry_setup.sh
@@ -0,0 +1,23 @@
+#!/bin/bash
+
+set -ex
+
+cat << EOF | sudo tee /etc/hosts
+127.0.0.1 localhost
+192.168.1.5 registry
+EOF
+
+sudo apt-get update
+sudo apt-get install -y docker.io
+cat << EOF | sudo tee /etc/docker/daemon.json
+{
+ "insecure-registries": ["registry:5000"]
+}
+EOF
+sudo service docker restart
+
+sudo docker pull registry:2
+sudo docker run -d -p 5000:5000 --restart=always --name registry registry:2
+sudo docker build . -f /vagrant/multus/Dockerfile -t multus-cni
+sudo docker tag multus-cni localhost:5000/multus-cni
+sudo docker push localhost:5000/multus-cni
diff --git a/src/vagrant/kubeadm/virtlet/cirros-vm.yaml b/src/vagrant/kubeadm/virtlet/cirros-vm.yaml
new file mode 100644
index 0000000..334142b
--- /dev/null
+++ b/src/vagrant/kubeadm/virtlet/cirros-vm.yaml
@@ -0,0 +1,42 @@
+apiVersion: v1
+kind: Pod
+metadata:
+ name: cirros-vm
+ annotations:
+ # This tells CRI Proxy that this pod belongs to Virtlet runtime
+ kubernetes.io/target-runtime: virtlet.cloud
+ # CirrOS doesn't load nocloud data from SCSI CD-ROM for some reason
+ VirtletDiskDriver: virtio
+ # inject ssh keys via cloud-init
+ VirtletSSHKeys: |
+ ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCaJEcFDXEK2ZbX0ZLS1EIYFZRbDAcRfuVjpstSc0De8+sV1aiu+dePxdkuDRwqFtCyk6dEZkssjOkBXtri00MECLkir6FcH3kKOJtbJ6vy3uaJc9w1ERo+wyl6SkAh/+JTJkp7QRXj8oylW5E20LsbnA/dIwWzAF51PPwF7A7FtNg9DnwPqMkxFo1Th/buOMKbP5ZA1mmNNtmzbMpMfJATvVyiv3ccsSJKOiyQr6UG+j7sc/7jMVz5Xk34Vd0l8GwcB0334MchHckmqDB142h/NCWTr8oLakDNvkfC1YneAfAO41hDkUbxPtVBG5M/o7P4fxoqiHEX+ZLfRxDtHB53 me@localhost
+ # set root volume size
+ VirtletRootVolumeSize: 1Gi
+spec:
+ # This nodeAffinity specification tells Kubernetes to run this
+ # pod only on the nodes that have extraRuntime=virtlet label.
+ # This label is used by Virtlet DaemonSet to select nodes
+ # that must have Virtlet runtime
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: extraRuntime
+ operator: In
+ values:
+ - virtlet
+ containers:
+ - name: cirros-vm
+ # This specifies the image to use.
+ # virtlet.cloud/ prefix is used by CRI proxy, the remaining part
+ # of the image name is prepended with https:// and used to download the image
+ image: virtlet.cloud/cirros
+ imagePullPolicy: IfNotPresent
+ # tty and stdin required for `kubectl attach -t` to work
+ tty: true
+ stdin: true
+ resources:
+ limits:
+ # This memory limit is applied to the libvirt domain definition
+ memory: 160Mi
diff --git a/src/vagrant/kubeadm/virtlet/images.yaml b/src/vagrant/kubeadm/virtlet/images.yaml
new file mode 100644
index 0000000..1541ca7
--- /dev/null
+++ b/src/vagrant/kubeadm/virtlet/images.yaml
@@ -0,0 +1,3 @@
+translations:
+ - name: cirros
+ url: https://github.com/mirantis/virtlet/releases/download/v0.9.3/cirros.img
diff --git a/src/vagrant/kubeadm/virtlet/virtlet-ds.yaml b/src/vagrant/kubeadm/virtlet/virtlet-ds.yaml
new file mode 100644
index 0000000..1bb4882
--- /dev/null
+++ b/src/vagrant/kubeadm/virtlet/virtlet-ds.yaml
@@ -0,0 +1,521 @@
+---
+apiVersion: apps/v1
+kind: DaemonSet
+metadata:
+ creationTimestamp: null
+ name: virtlet
+ namespace: kube-system
+spec:
+ selector:
+ matchLabels:
+ runtime: virtlet
+ template:
+ metadata:
+ creationTimestamp: null
+ labels:
+ runtime: virtlet
+ name: virtlet
+ spec:
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: extraRuntime
+ operator: In
+ values:
+ - virtlet
+ containers:
+ - command:
+ - /libvirt.sh
+ image: mirantis/virtlet:v1.4.1
+ imagePullPolicy: IfNotPresent
+ name: libvirt
+ readinessProbe:
+ exec:
+ command:
+ - /bin/sh
+ - -c
+ - socat - UNIX:/var/run/libvirt/libvirt-sock-ro </dev/null
+ resources: {}
+ securityContext:
+ privileged: true
+ volumeMounts:
+ - mountPath: /sys/fs/cgroup
+ name: cgroup
+ - mountPath: /lib/modules
+ name: modules
+ readOnly: true
+ - mountPath: /boot
+ name: boot
+ readOnly: true
+ - mountPath: /run
+ name: run
+ - mountPath: /var/lib/virtlet
+ name: virtlet
+ - mountPath: /var/lib/libvirt
+ name: libvirt
+ - mountPath: /var/run/libvirt
+ name: libvirt-sockets
+ - mountPath: /var/log/vms
+ name: vms-log
+ - mountPath: /var/log/libvirt
+ name: libvirt-log
+ - mountPath: /dev
+ name: dev
+ - image: mirantis/virtlet:v1.4.1
+ imagePullPolicy: IfNotPresent
+ name: virtlet
+ readinessProbe:
+ exec:
+ command:
+ - /bin/sh
+ - -c
+ - socat - UNIX:/run/virtlet.sock </dev/null
+ resources: {}
+ securityContext:
+ privileged: true
+ volumeMounts:
+ - mountPath: /run
+ name: run
+ - mountPath: /lib/modules
+ name: modules
+ readOnly: true
+ - mountPath: /boot
+ name: boot
+ readOnly: true
+ - mountPath: /dev
+ name: dev
+ - mountPath: /var/lib/virtlet
+ mountPropagation: Bidirectional
+ name: virtlet
+ - mountPath: /var/lib/libvirt
+ name: libvirt
+ - mountPath: /var/run/libvirt
+ name: libvirt-sockets
+ - mountPath: /usr/libexec/kubernetes/kubelet-plugins/volume/exec
+ name: k8s-flexvolume-plugins-dir
+ - mountPath: /var/lib/kubelet/pods
+ mountPropagation: Bidirectional
+ name: k8s-pods-dir
+ - mountPath: /var/log/vms
+ name: vms-log
+ - mountPath: /etc/virtlet/images
+ name: image-name-translations
+ - mountPath: /var/log/pods
+ name: pods-log
+ - mountPath: /var/log/libvirt
+ name: libvirt-log
+ - mountPath: /var/run/netns
+ mountPropagation: Bidirectional
+ name: netns-dir
+ - command:
+ - /vms.sh
+ image: mirantis/virtlet:v1.4.1
+ imagePullPolicy: IfNotPresent
+ name: vms
+ resources: {}
+ volumeMounts:
+ - mountPath: /var/lib/virtlet
+ mountPropagation: HostToContainer
+ name: virtlet
+ - mountPath: /var/lib/libvirt
+ name: libvirt
+ - mountPath: /var/log/vms
+ name: vms-log
+ - mountPath: /var/lib/kubelet/pods
+ mountPropagation: HostToContainer
+ name: k8s-pods-dir
+ - mountPath: /dev
+ name: dev
+ - mountPath: /lib/modules
+ name: modules
+ dnsPolicy: ClusterFirstWithHostNet
+ hostNetwork: true
+ hostPID: true
+ initContainers:
+ - command:
+ - /prepare-node.sh
+ env:
+ - name: KUBE_NODE_NAME
+ valueFrom:
+ fieldRef:
+ apiVersion: v1
+ fieldPath: spec.nodeName
+ - name: VIRTLET_DISABLE_KVM
+ valueFrom:
+ configMapKeyRef:
+ key: disable_kvm
+ name: virtlet-config
+ optional: true
+ - name: VIRTLET_SRIOV_SUPPORT
+ valueFrom:
+ configMapKeyRef:
+ key: sriov_support
+ name: virtlet-config
+ optional: true
+ - name: VIRTLET_DOWNLOAD_PROTOCOL
+ valueFrom:
+ configMapKeyRef:
+ key: download_protocol
+ name: virtlet-config
+ optional: true
+ - name: VIRTLET_LOGLEVEL
+ valueFrom:
+ configMapKeyRef:
+ key: loglevel
+ name: virtlet-config
+ optional: true
+ - name: VIRTLET_CALICO_SUBNET
+ valueFrom:
+ configMapKeyRef:
+ key: calico-subnet
+ name: virtlet-config
+ optional: true
+ - name: IMAGE_REGEXP_TRANSLATION
+ valueFrom:
+ configMapKeyRef:
+ key: image_regexp_translation
+ name: virtlet-config
+ optional: true
+ - name: VIRTLET_RAW_DEVICES
+ valueFrom:
+ configMapKeyRef:
+ key: raw_devices
+ name: virtlet-config
+ optional: true
+ - name: VIRTLET_DISABLE_LOGGING
+ valueFrom:
+ configMapKeyRef:
+ key: disable_logging
+ name: virtlet-config
+ optional: true
+ - name: VIRTLET_CPU_MODEL
+ valueFrom:
+ configMapKeyRef:
+ key: cpu-model
+ name: virtlet-config
+ optional: true
+ - name: KUBELET_ROOT_DIR
+ valueFrom:
+ configMapKeyRef:
+ key: kubelet_root_dir
+ name: virtlet-config
+ optional: true
+ - name: VIRTLET_IMAGE_TRANSLATIONS_DIR
+ value: /etc/virtlet/images
+ image: mirantis/virtlet:v1.4.1
+ imagePullPolicy: IfNotPresent
+ name: prepare-node
+ resources: {}
+ securityContext:
+ privileged: true
+ volumeMounts:
+ - mountPath: /kubelet-volume-plugins
+ name: k8s-flexvolume-plugins-dir
+ - mountPath: /run
+ name: run
+ - mountPath: /var/run/docker.sock
+ name: dockersock
+ - mountPath: /hostlog
+ name: log
+ - mountPath: /host-var-lib
+ name: var-lib
+ - mountPath: /dev
+ name: dev
+ - mountPath: /var/lib/virtlet
+ name: virtlet
+ serviceAccountName: virtlet
+ volumes:
+ - hostPath:
+ path: /dev
+ name: dev
+ - hostPath:
+ path: /sys/fs/cgroup
+ name: cgroup
+ - hostPath:
+ path: /lib/modules
+ name: modules
+ - hostPath:
+ path: /boot
+ name: boot
+ - hostPath:
+ path: /run
+ name: run
+ - hostPath:
+ path: /var/run/docker.sock
+ name: dockersock
+ - hostPath:
+ path: /var/lib/virtlet
+ name: virtlet
+ - hostPath:
+ path: /var/lib/libvirt
+ name: libvirt
+ - hostPath:
+ path: /var/log
+ name: log
+ - hostPath:
+ path: /usr/libexec/kubernetes/kubelet-plugins/volume/exec
+ name: k8s-flexvolume-plugins-dir
+ - hostPath:
+ path: /var/lib/kubelet/pods
+ name: k8s-pods-dir
+ - hostPath:
+ path: /var/lib
+ name: var-lib
+ - hostPath:
+ path: /var/log/virtlet/vms
+ name: vms-log
+ - hostPath:
+ path: /var/log/libvirt
+ name: libvirt-log
+ - hostPath:
+ path: /var/run/libvirt
+ name: libvirt-sockets
+ - hostPath:
+ path: /var/log/pods
+ name: pods-log
+ - hostPath:
+ path: /var/run/netns
+ name: netns-dir
+ - configMap:
+ name: virtlet-image-translations
+ name: image-name-translations
+ updateStrategy: {}
+
+---
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRoleBinding
+metadata:
+ creationTimestamp: null
+ name: virtlet
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: virtlet
+subjects:
+- kind: ServiceAccount
+ name: virtlet
+ namespace: kube-system
+
+---
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRole
+metadata:
+ creationTimestamp: null
+ name: virtlet
+ namespace: kube-system
+rules:
+- apiGroups:
+ - ""
+ resources:
+ - configmaps
+ - nodes
+ verbs:
+ - create
+ - get
+
+---
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRole
+metadata:
+ creationTimestamp: null
+ name: configmap-reader
+rules:
+- apiGroups:
+ - ""
+ resources:
+ - configmaps
+ verbs:
+ - get
+ - list
+ - watch
+
+---
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRole
+metadata:
+ creationTimestamp: null
+ name: virtlet-userdata-reader
+rules:
+- apiGroups:
+ - ""
+ resources:
+ - configmaps
+ - secrets
+ verbs:
+ - get
+
+---
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRoleBinding
+metadata:
+ creationTimestamp: null
+ name: kubelet-node-binding
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: configmap-reader
+subjects:
+- apiGroup: rbac.authorization.k8s.io
+ kind: Group
+ name: system:nodes
+
+---
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRoleBinding
+metadata:
+ creationTimestamp: null
+ name: vm-userdata-binding
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: virtlet-userdata-reader
+subjects:
+- kind: ServiceAccount
+ name: virtlet
+ namespace: kube-system
+
+---
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRole
+metadata:
+ creationTimestamp: null
+ name: virtlet-crd
+rules:
+- apiGroups:
+ - apiextensions.k8s.io
+ resources:
+ - customresourcedefinitions
+ verbs:
+ - create
+- apiGroups:
+ - virtlet.k8s
+ resources:
+ - virtletimagemappings
+ - virtletconfigmappings
+ verbs:
+ - list
+ - get
+
+---
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRoleBinding
+metadata:
+ creationTimestamp: null
+ name: virtlet-crd
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: virtlet-crd
+subjects:
+- kind: ServiceAccount
+ name: virtlet
+ namespace: kube-system
+
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ creationTimestamp: null
+ name: virtlet
+ namespace: kube-system
+
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ creationTimestamp: null
+ labels:
+ virtlet.cloud: ""
+ name: virtletimagemappings.virtlet.k8s
+spec:
+ group: virtlet.k8s
+ names:
+ kind: VirtletImageMapping
+ plural: virtletimagemappings
+ shortNames:
+ - vim
+ singular: virtletimagemapping
+ scope: Namespaced
+ version: v1
+
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ creationTimestamp: null
+ labels:
+ virtlet.cloud: ""
+ name: virtletconfigmappings.virtlet.k8s
+spec:
+ group: virtlet.k8s
+ names:
+ kind: VirtletConfigMapping
+ plural: virtletconfigmappings
+ shortNames:
+ - vcm
+ singular: virtletconfigmapping
+ scope: Namespaced
+ validation:
+ openAPIV3Schema:
+ properties:
+ spec:
+ properties:
+ config:
+ properties:
+ calicoSubnetSize:
+ maximum: 32
+ minimum: 0
+ type: integer
+ cniConfigDir:
+ type: string
+ cniPluginDir:
+ type: string
+ cpuModel:
+ type: string
+ criSocketPath:
+ type: string
+ databasePath:
+ type: string
+ disableKVM:
+ type: boolean
+ disableLogging:
+ type: boolean
+ downloadProtocol:
+ pattern: ^https?$
+ type: string
+ enableRegexpImageTranslation:
+ type: boolean
+ enableSriov:
+ type: boolean
+ fdServerSocketPath:
+ type: string
+ imageDir:
+ type: string
+ imageTranslationConfigsDir:
+ type: string
+ kubeletRootDir:
+ type: string
+ libvirtURI:
+ type: string
+ logLevel:
+ maximum: 2147483647
+ minimum: 0
+ type: integer
+ rawDevices:
+ type: string
+ skipImageTranslation:
+ type: boolean
+ streamPort:
+ maximum: 65535
+ minimum: 1
+ type: integer
+ nodeName:
+ type: string
+ nodeSelector:
+ type: object
+ priority:
+ type: integer
+ version: v1
+
diff --git a/src/vagrant/kubeadm/virtlet/virtlet.sh b/src/vagrant/kubeadm/virtlet/virtlet.sh
new file mode 100755
index 0000000..4ed527e
--- /dev/null
+++ b/src/vagrant/kubeadm/virtlet/virtlet.sh
@@ -0,0 +1,21 @@
+#!/bin/bash
+
+set -ex
+
+kubectl label node worker1 extraRuntime=virtlet
+kubectl label node worker2 extraRuntime=virtlet
+kubectl create configmap -n kube-system virtlet-config --from-literal=download_protocol=http --from-literal=image_regexp_translation=1 --from-literal=disable_kvm=y
+kubectl create configmap -n kube-system virtlet-image-translations --from-file /vagrant/virtlet/images.yaml
+kubectl create -f /vagrant/virtlet/virtlet-ds.yaml
+
+kubectl delete pod --all
+kubectl create -f /vagrant/virtlet/cirros-vm.yaml
+r="0"
+while [ $r -ne "1" ]
+do
+ r=$(kubectl get pods cirros-vm | grep Running | wc -l)
+ sleep 60
+done
+sleep 360
+kubectl get pods cirros-vm -o custom-columns=:.status.podIP | xargs ping -c 4
+echo 'login by user:cirros & password:gocubsgo'
diff --git a/src/vagrant/kubeadm/virtlet/virtlet_setup.sh b/src/vagrant/kubeadm/virtlet/virtlet_setup.sh
new file mode 100644
index 0000000..b2dfaa0
--- /dev/null
+++ b/src/vagrant/kubeadm/virtlet/virtlet_setup.sh
@@ -0,0 +1,10 @@
+#!/bin/bash
+
+set -ex
+
+wget https://github.com/Mirantis/criproxy/releases/download/v0.14.0/criproxy_0.14.0_amd64.deb
+echo "criproxy criproxy/primary_cri select containerd" | sudo debconf-set-selections
+sudo dpkg -i criproxy_0.14.0_amd64.deb
+sudo sed -i "s/EnvironmentFile/#EnvironmentFile/" /etc/systemd/system/kubelet.service.d/10-kubeadm.conf
+sudo systemctl daemon-reload
+sudo systemctl restart kubelet
diff --git a/src/vagrant/kubeadm/worker_setup.sh b/src/vagrant/kubeadm/worker_setup.sh
new file mode 100644
index 0000000..6b08712
--- /dev/null
+++ b/src/vagrant/kubeadm/worker_setup.sh
@@ -0,0 +1,8 @@
+#!/bin/bash
+
+set -ex
+
+bash /vagrant/kata/kata_setup.sh
+bash /vagrant/virtlet/virtlet_setup.sh
+sleep 120
+sudo kubeadm join --discovery-token-unsafe-skip-ca-verification --token 8c5adc.1cec8dbf339093f0 192.168.1.10:6443
diff --git a/src/vagrant/kubeadm_multus/Vagrantfile b/src/vagrant/kubeadm_app/Vagrantfile
index 9320074..3ed02d5 100644
--- a/src/vagrant/kubeadm_multus/Vagrantfile
+++ b/src/vagrant/kubeadm_app/Vagrantfile
@@ -5,7 +5,7 @@ Vagrant.configure("2") do |config|
config.vm.box = "ceph/ubuntu-xenial"
config.vm.provider :libvirt do |libvirt|
- libvirt.memory = 4096
+ libvirt.memory = 8192
libvirt.cpus = 4
end
diff --git a/src/vagrant/kubeadm_app/app_setup.sh b/src/vagrant/kubeadm_app/app_setup.sh
new file mode 100755
index 0000000..a67a54f
--- /dev/null
+++ b/src/vagrant/kubeadm_app/app_setup.sh
@@ -0,0 +1,65 @@
+#!/bin/bash
+#
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+set -ex
+
+static_ip=$(ifconfig eth0 | grep "inet addr" | cut -d ':' -f 2 | cut -d ' ' -f 1)
+echo "STATIC_IP is $static_ip."
+
+git clone --recursive https://github.com/Metaswitch/clearwater-docker.git
+
+# Set the configmaps
+kubectl create configmap env-vars --from-literal=ZONE=default.svc.cluster.local
+
+# Generate the yamls
+cd clearwater-docker/kubernetes/
+./k8s-gencfg --image_path=enriquetaso --image_tag=latest
+
+# Expose Ellis
+# The Ellis provisioning interface can then be accessed on static_ip:30080
+cat ellis-svc.yaml | sed "s/clusterIP: None/type: NodePort/" > ellis-svc.yaml.new
+cat ellis-svc.yaml.new | sed "s/port: 80/port: 80\n nodePort: 30080/" > ellis-svc.yaml
+rm ellis-svc.yaml.new
+
+# Bono configuration
+# Have a static external IP address available that the load balancer can use
+cp /vagrant/custom-bono-svc/bono-svc.yaml .
+sed -ie "6s/$/\n - $static_ip/" bono-svc.yaml
+sed -ie "7s/$/\n loadBalancerIP: $static_ip/" bono-svc.yaml
+
+cd
+kubectl apply -f clearwater-docker/kubernetes
+kubectl get nodes
+kubectl get services
+kubectl get pods
+kubectl get rc
+sleep 60
+
+r="1"
+while [ $r != "0" ]
+do
+ kubectl get pods
+ r=$( kubectl get pods | grep Pending | wc -l)
+ sleep 60
+done
+
+q="1"
+while [ $q != "0" ]
+do
+ kubectl get pods
+ q=$( kubectl get pods | grep ContainerCreating | wc -l)
+ sleep 60
+done
diff --git a/src/vagrant/kubeadm_app/create_images.sh b/src/vagrant/kubeadm_app/create_images.sh
new file mode 100755
index 0000000..12b28a3
--- /dev/null
+++ b/src/vagrant/kubeadm_app/create_images.sh
@@ -0,0 +1,10 @@
+#!/bin/bash
+
+# Build images
+git clone --recursive https://github.com/Metaswitch/clearwater-docker.git
+cd clearwater-docker
+for i in base astaire cassandra chronos bono ellis homer homestead homestead-prov ralf sprout
+do
+ docker build -t clearwater/$i $i
+done
+
diff --git a/src/vagrant/kubeadm_app/custom-bono-svc/bono-svc.yaml b/src/vagrant/kubeadm_app/custom-bono-svc/bono-svc.yaml
new file mode 100644
index 0000000..9280b0f
--- /dev/null
+++ b/src/vagrant/kubeadm_app/custom-bono-svc/bono-svc.yaml
@@ -0,0 +1,25 @@
+apiVersion: v1
+kind: Service
+metadata:
+ name: bono
+spec:
+ externalIPs:
+ ports:
+ - name: "3478"
+ port: 3478
+ protocol: TCP
+ targetPort: 3478
+ - name: "5060"
+ port: 5060
+ protocol: TCP
+ targetPort: 5060
+ - name: "5062"
+ port: 5062
+ protocol: TCP
+ targetPort: 5062
+ selector:
+ service: bono
+ sessionAffinity: None
+ type: ClusterIP
+status:
+ loadBalancer: {}
diff --git a/src/vagrant/kubeadm_app/custom-bono-svc/deployment-svc.yaml b/src/vagrant/kubeadm_app/custom-bono-svc/deployment-svc.yaml
new file mode 100644
index 0000000..cde909b
--- /dev/null
+++ b/src/vagrant/kubeadm_app/custom-bono-svc/deployment-svc.yaml
@@ -0,0 +1,82 @@
+apiVersion: extensions/v1beta1
+kind: Deployment
+metadata:
+ name: busybox
+spec:
+ strategy:
+ rollingUpdate:
+ maxSurge: 10%
+ maxUnavailable: 0
+ selector:
+ matchLabels:
+ app: busybox
+ replicas: 3
+ template:
+ metadata:
+ labels:
+ app: busybox
+ annotations:
+ networks: '[
+ { "name": "calico"},
+ { "name": "weave"}
+ ]'
+ spec:
+ containers:
+ - name: busybox
+ image: bcmt-registry:5000/busybox:latest
+ command: ["top"]
+ stdin: true
+ tty: true
+ dnsPolicy: Default
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ labels:
+ k8s-app: nginx
+ name: nginx
+ namespace: nginx
+
+---
+
+kind: Deployment
+apiVersion: apps/v1beta2
+metadata:
+ labels:
+ k8s-app: nginx
+ name: nginx
+ namespace: nginx
+spec:
+ replicas: 1
+ revisionHistoryLimit: 10
+ selector:
+ matchLabels:
+ k8s-app: nginx
+ template:
+ metadata:
+ labels:
+ k8s-app: nginx
+ spec:
+ containers:
+ - name: nginx
+ image: nginx:2
+ ports:
+ - containerPort: 80
+ protocol: TCP
+ args:
+---
+# ------------------- Dashboard Service ------------------- #
+
+kind: Service
+apiVersion: v1
+metadata:
+ labels:
+ k8s-app: nginx
+ name: nginx
+ namespace: nginx
+spec:
+ type: NodePort
+ ports:
+ - port: 80
+ nodePort: 31001
+ selector:
+ k8s-app: nginx
diff --git a/src/vagrant/kubeadm_app/deploy.sh b/src/vagrant/kubeadm_app/deploy.sh
new file mode 100755
index 0000000..54644a3
--- /dev/null
+++ b/src/vagrant/kubeadm_app/deploy.sh
@@ -0,0 +1,12 @@
+#!/bin/bash
+
+set -ex
+DIR="$(dirname `readlink -f $0`)"
+
+cd $DIR
+../cleanup.sh
+vagrant up
+vagrant ssh master -c "/vagrant/clearwater_setup.sh"
+
+# Run tests
+vagrant ssh master -c "/vagrant/tests/clearwater-live-test.sh"
diff --git a/src/vagrant/kubeadm_multus/host_setup.sh b/src/vagrant/kubeadm_app/host_setup.sh
index c1a23eb..524a967 100644
--- a/src/vagrant/kubeadm_multus/host_setup.sh
+++ b/src/vagrant/kubeadm_app/host_setup.sh
@@ -21,7 +21,7 @@ cat <<EOF | sudo tee /etc/apt/sources.list.d/kubernetes.list
deb http://apt.kubernetes.io/ kubernetes-xenial main
EOF
sudo apt-get update
-sudo apt-get install -y --allow-downgrades docker-engine=1.12.6-0~ubuntu-xenial kubelet=1.9.1-00 kubeadm=1.9.1-00 kubectl=1.9.1-00 kubernetes-cni=0.6.0-00
+sudo apt-get install -y --allow-unauthenticated --allow-downgrades docker-engine=1.12.6-0~ubuntu-xenial kubelet=1.9.1-00 kubeadm=1.9.1-00 kubectl=1.9.1-00 kubernetes-cni=0.6.0-00
sudo swapoff -a
sudo systemctl daemon-reload
diff --git a/src/vagrant/kubeadm_app/master_setup.sh b/src/vagrant/kubeadm_app/master_setup.sh
new file mode 100644
index 0000000..b181582
--- /dev/null
+++ b/src/vagrant/kubeadm_app/master_setup.sh
@@ -0,0 +1,10 @@
+#!/bin/bash
+
+set -ex
+
+sudo kubeadm init --apiserver-advertise-address=192.168.1.10 --service-cidr=10.96.0.0/16 --pod-network-cidr=10.32.0.0/12 --token 8c5adc.1cec8dbf339093f0
+mkdir ~/.kube
+sudo cp /etc/kubernetes/admin.conf $HOME/.kube/config
+sudo chown $(id -u):$(id -g) $HOME/.kube/config
+
+kubectl apply -f http://git.io/weave-kube-1.6
diff --git a/src/vagrant/kubeadm_app/setup_vagrant.sh b/src/vagrant/kubeadm_app/setup_vagrant.sh
new file mode 100755
index 0000000..23fdcd2
--- /dev/null
+++ b/src/vagrant/kubeadm_app/setup_vagrant.sh
@@ -0,0 +1,97 @@
+#!/bin/bash
+#
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+set -ex
+
+DIR="$(dirname `readlink -f $0`)"
+
+usage() {
+ echo "Usage: $0 -b virtualbox|libvirt"
+}
+
+install_packages()
+{
+ cat << EOF | sudo tee /etc/sudoers.d/${USER}
+${USER} ALL = (root) NOPASSWD:ALL
+EOF
+ sudo apt-get update -y
+ sudo apt-get install -y git unzip
+ wget https://releases.hashicorp.com/vagrant/2.0.2/vagrant_2.0.2_x86_64.deb
+ sudo dpkg -i vagrant_2.0.2_x86_64.deb
+ rm -rf vagrant_2.0.2_x86_64.deb
+
+ sudo apt-get install -y virtualbox
+
+ #refer to https://github.com/vagrant-libvirt/vagrant-libvirt
+ sudo sed -i 's/^# deb-src/deb-src/g' /etc/apt/sources.list
+ sudo apt-get update
+ sudo apt-get build-dep vagrant ruby-libvirt -y
+ sudo apt-get install -y bridge-utils qemu libvirt-bin ebtables dnsmasq
+ sudo apt-get install -y libffi-dev libxslt-dev libxml2-dev libvirt-dev zlib1g-dev ruby-dev
+ vagrant plugin install vagrant-libvirt
+ sudo adduser ${USER} libvirtd
+ sudo service libvirtd restart
+}
+
+install_box_builder()
+{
+ # Thanks Bento's great effort
+ # Bento project(https://github.com/chef/bento) is released by Apache 2.0 License
+ cd $DIR
+ rm -rf bento
+ git clone https://github.com/chef/bento
+ cd bento; git checkout 05d98910d835b503e7be3d2e4071956f66fbbbc4
+ cp ../update.sh ubuntu/scripts/
+ wget https://releases.hashicorp.com/packer/1.1.2/packer_1.1.2_linux_amd64.zip
+ unzip packer_1.1.2_linux_amd64.zip
+ cd ubuntu
+ sed -i 's/"disk_size": "40960"/"disk_size": "409600"/' ubuntu-16.04-amd64.json
+}
+
+build_virtualbox() {
+ cd $DIR/bento/ubuntu
+ rm -rf ~/'VirtualBox VMs'/ubuntu-16.04-amd64
+ ../packer build -var 'headless=true' -only=virtualbox-iso ubuntu-16.04-amd64.json
+ vagrant box remove -f opnfv/container4nfv --all || true
+ vagrant box add opnfv/container4nfv ../builds/ubuntu-16.04.virtualbox.box
+}
+
+build_libvirtbox() {
+ cd $DIR/bento/ubuntu
+ ../packer build -var 'headless=true' -only=qemu ubuntu-16.04-amd64.json
+ vagrant box remove -f opnfv/container4nfv.kvm --all || true
+ vagrant box add opnfv/container4nfv.kvm ../builds/ubuntu-16.04.libvirt.box
+}
+
+install_packages
+
+set +x
+while getopts "b:h" OPTION; do
+ case $OPTION in
+ b)
+ if [ ${OPTARG} == "virtualbox" ]; then
+ install_box_builder
+ build_virtualbox
+ elif [ ${OPTARG} == "libvirt" ]; then
+ install_box_builder
+ build_libvirtbox
+ fi
+ ;;
+ h)
+ usage;
+ ;;
+ esac
+done
diff --git a/src/vagrant/kubeadm_app/tests/clearwater-live-test.sh b/src/vagrant/kubeadm_app/tests/clearwater-live-test.sh
new file mode 100755
index 0000000..6e5238e
--- /dev/null
+++ b/src/vagrant/kubeadm_app/tests/clearwater-live-test.sh
@@ -0,0 +1,46 @@
+#!/bin/bash
+#
+# Copyright (c) 2017 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+set -ex
+
+# http://clearwater.readthedocs.io/en/latest/Running_the_live_tests.html
+sudo apt-get install build-essential bundler git --yes
+sudo apt install gnupg2 --yes
+gpg2 --recv-keys 409B6B1796C275462A1703113804BB82D39DC0E3
+curl -L https://get.rvm.io | bash -s stable
+
+source ~/.rvm/scripts/rvm
+rvm autolibs enable
+rvm install 1.9.3
+rvm use 1.9.3
+
+
+# Setup ruby and gems
+git clone https://github.com/Metaswitch/clearwater-live-test.git
+cd clearwater-live-test/
+cd quaff/ && git clone https://github.com/Metaswitch/quaff.git
+cd ..
+bundle install
+
+# Get Ellis ip
+ellisip=$(kubectl get services ellis -o json | grep clusterIP | cut -f4 -d'"')
+
+# Get Ellis ip
+bonoip=$(kubectl get services bono -o json | grep clusterIP | cut -f4 -d'"')
+
+# Run the tests
+rake test[default.svc.cluster.local] SIGNUP_CODE=secret PROXY=$bonoip ELLIS=$ellisip
diff --git a/src/vagrant/kubeadm_istio/worker_setup.sh b/src/vagrant/kubeadm_app/worker_setup.sh
index 74e4178..74e4178 100644
--- a/src/vagrant/kubeadm_istio/worker_setup.sh
+++ b/src/vagrant/kubeadm_app/worker_setup.sh
diff --git a/src/vagrant/kubeadm_basic/Vagrantfile b/src/vagrant/kubeadm_basic/Vagrantfile
index 9320074..54b6b59 100644
--- a/src/vagrant/kubeadm_basic/Vagrantfile
+++ b/src/vagrant/kubeadm_basic/Vagrantfile
@@ -3,13 +3,13 @@ $num_workers=2
Vagrant.require_version ">= 1.8.6"
Vagrant.configure("2") do |config|
- config.vm.box = "ceph/ubuntu-xenial"
+ config.vm.box = "generic/ubuntu1804"
config.vm.provider :libvirt do |libvirt|
libvirt.memory = 4096
libvirt.cpus = 4
end
- config.vm.synced_folder "../..", "/src"
+ config.vm.synced_folder ".", "/vagrant"
config.vm.provision "shell", path: "host_setup.sh", privileged: false
config.vm.define "master" do |config|
diff --git a/src/vagrant/kubeadm_basic/host_setup.sh b/src/vagrant/kubeadm_basic/host_setup.sh
index c1a23eb..2094628 100644
--- a/src/vagrant/kubeadm_basic/host_setup.sh
+++ b/src/vagrant/kubeadm_basic/host_setup.sh
@@ -2,6 +2,11 @@
set -ex
+sudo systemctl stop systemd-resolved
+cat << EOF | sudo tee /etc/resolv.conf
+nameserver 8.8.8.8
+EOF
+
cat << EOF | sudo tee /etc/hosts
127.0.0.1 localhost
192.168.1.10 master
@@ -10,19 +15,21 @@ cat << EOF | sudo tee /etc/hosts
192.168.1.23 worker3
EOF
-sudo apt-key adv --keyserver hkp://ha.pool.sks-keyservers.net:80 --recv-keys 58118E89F3A912897C070ADBF76221572C52609D
-sudo apt-key adv -k 58118E89F3A912897C070ADBF76221572C52609D
-cat << EOF | sudo tee /etc/apt/sources.list.d/docker.list
-deb [arch=amd64] https://apt.dockerproject.org/repo ubuntu-xenial main
-EOF
+sudo apt-get update
+sudo apt-get install -y docker.io
curl -s http://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -
cat <<EOF | sudo tee /etc/apt/sources.list.d/kubernetes.list
deb http://apt.kubernetes.io/ kubernetes-xenial main
EOF
sudo apt-get update
-sudo apt-get install -y --allow-downgrades docker-engine=1.12.6-0~ubuntu-xenial kubelet=1.9.1-00 kubeadm=1.9.1-00 kubectl=1.9.1-00 kubernetes-cni=0.6.0-00
+sudo apt-get install -y --allow-unauthenticated kubelet=1.15.2-00 kubeadm=1.15.2-00 kubectl=1.15.2-00 kubernetes-cni=0.7.5-00
+sudo sed -i '9i\Environment="KUBELET_EXTRA_ARGS=--feature-gates=DevicePlugins=true"' /etc/systemd/system/kubelet.service.d/10-kubeadm.conf
+sudo modprobe ip_vs
+sudo modprobe ip_vs_rr
+sudo modprobe ip_vs_wrr
+sudo modprobe ip_vs_sh
sudo swapoff -a
sudo systemctl daemon-reload
sudo systemctl stop kubelet
diff --git a/src/vagrant/kubeadm_basic/worker_setup.sh b/src/vagrant/kubeadm_basic/worker_setup.sh
index 74e4178..42477e6 100644
--- a/src/vagrant/kubeadm_basic/worker_setup.sh
+++ b/src/vagrant/kubeadm_basic/worker_setup.sh
@@ -1,4 +1,5 @@
#!/bin/bash
set -ex
+sleep 120
sudo kubeadm join --discovery-token-unsafe-skip-ca-verification --token 8c5adc.1cec8dbf339093f0 192.168.1.10:6443 || true
diff --git a/src/vagrant/kubeadm_clearwater/host_setup.sh b/src/vagrant/kubeadm_clearwater/host_setup.sh
index c1a23eb..524a967 100644
--- a/src/vagrant/kubeadm_clearwater/host_setup.sh
+++ b/src/vagrant/kubeadm_clearwater/host_setup.sh
@@ -21,7 +21,7 @@ cat <<EOF | sudo tee /etc/apt/sources.list.d/kubernetes.list
deb http://apt.kubernetes.io/ kubernetes-xenial main
EOF
sudo apt-get update
-sudo apt-get install -y --allow-downgrades docker-engine=1.12.6-0~ubuntu-xenial kubelet=1.9.1-00 kubeadm=1.9.1-00 kubectl=1.9.1-00 kubernetes-cni=0.6.0-00
+sudo apt-get install -y --allow-unauthenticated --allow-downgrades docker-engine=1.12.6-0~ubuntu-xenial kubelet=1.9.1-00 kubeadm=1.9.1-00 kubectl=1.9.1-00 kubernetes-cni=0.6.0-00
sudo swapoff -a
sudo systemctl daemon-reload
diff --git a/src/vagrant/kubeadm_istio/deploy.sh b/src/vagrant/kubeadm_istio/deploy.sh
deleted file mode 100755
index d947645..0000000
--- a/src/vagrant/kubeadm_istio/deploy.sh
+++ /dev/null
@@ -1,12 +0,0 @@
-#!/bin/bash
-
-set -ex
-DIR="$(dirname `readlink -f $0`)"
-
-cd $DIR
-../cleanup.sh
-vagrant up
-vagrant ssh master -c "/vagrant/istio/deploy.sh"
-vagrant ssh master -c "/vagrant/istio/bookinfo.sh"
-vagrant ssh master -c "/vagrant/istio/clean_bookinfo.sh"
-
diff --git a/src/vagrant/kubeadm_istio/master_setup.sh b/src/vagrant/kubeadm_istio/master_setup.sh
deleted file mode 100644
index f308244..0000000
--- a/src/vagrant/kubeadm_istio/master_setup.sh
+++ /dev/null
@@ -1,33 +0,0 @@
-#!/bin/bash
-
-set -ex
-
-ADMISSION_CONTROL="Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,NodeRestriction,ResourceQuota"
-KUBE_APISERVER_CONF="/etc/kubernetes/manifests/kube-apiserver.yaml"
-
-sudo kubeadm init --apiserver-advertise-address=192.168.1.10 --service-cidr=10.96.0.0/16 --pod-network-cidr=10.32.0.0/12 --token 8c5adc.1cec8dbf339093f0
-mkdir ~/.kube
-sudo cp /etc/kubernetes/admin.conf $HOME/.kube/config
-sudo chown $(id -u):$(id -g) $HOME/.kube/config
-
-kubectl apply -f http://git.io/weave-kube-1.6
-
-# Enable mutating webhook admission controller
-# kube-apiserver will be automatically restarted by kubelet when its manifest file update.
-# https://istio.io/docs/setup/kubernetes/sidecar-injection.html
-sudo sed -i "s/admission-control=.*/admission-control=$ADMISSION_CONTROL/g" $KUBE_APISERVER_CONF
-
-set +e
-# wait for kube-apiserver restart
-r="1"
-while [ $r -ne "0" ]
-do
- sleep 2
- kubectl version > /dev/null
- r=$?
-done
-set -e
-
-# check if admissionregistration.k8s.io/v1beta1 API is enabled
-kubectl api-versions | grep admissionregistration
-
diff --git a/src/vagrant/kubeadm_kata/examples/nginx-app.sh b/src/vagrant/kubeadm_kata/examples/nginx-app.sh
index 96d776c..a66b7ca 100755
--- a/src/vagrant/kubeadm_kata/examples/nginx-app.sh
+++ b/src/vagrant/kubeadm_kata/examples/nginx-app.sh
@@ -20,6 +20,11 @@ kubectl get nodes
kubectl get services
kubectl get pods
kubectl get rc
-sleep 180
+r=0
+while [ "$r" -eq "0" ]
+do
+ sleep 30
+ r=$(kubectl get pods | grep Running | wc -l)
+done
svcip=$(kubectl get services nginx -o json | grep clusterIP | cut -f4 -d'"')
wget http://$svcip
diff --git a/src/vagrant/kubeadm_kata/examples/nginx-app.yaml b/src/vagrant/kubeadm_kata/examples/nginx-app.yaml
index f80881a..9de4ef4 100644
--- a/src/vagrant/kubeadm_kata/examples/nginx-app.yaml
+++ b/src/vagrant/kubeadm_kata/examples/nginx-app.yaml
@@ -23,6 +23,8 @@ spec:
metadata:
labels:
app: nginx
+ annotations:
+ io.kubernetes.cri.untrusted-workload: "true"
spec:
containers:
- name: nginx
diff --git a/src/vagrant/kubeadm_kata/host_setup.sh b/src/vagrant/kubeadm_kata/host_setup.sh
index d2af951..02bb296 100644
--- a/src/vagrant/kubeadm_kata/host_setup.sh
+++ b/src/vagrant/kubeadm_kata/host_setup.sh
@@ -30,10 +30,37 @@ cat <<EOF | sudo tee /etc/apt/sources.list.d/kubernetes.list
deb http://apt.kubernetes.io/ kubernetes-xenial main
EOF
sudo apt-get update
-sudo apt-get install -y kubelet kubeadm kubectl kubernetes-cni
+sudo apt-get install -y --allow-unauthenticated kubelet=1.10.5-00 kubeadm=1.10.5-00 kubectl=1.10.5-00 kubernetes-cni=0.6.0-00
+
sudo swapoff -a
sudo systemctl stop kubelet
sudo rm -rf /var/lib/kubelet
sudo systemctl daemon-reload
sudo systemctl start kubelet
+
+
+sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 5EDB1B62EC4926EA
+sudo apt-get update -y
+sudo apt-get install software-properties-common -y
+sudo apt-add-repository cloud-archive:queens -y
+sudo apt-get update -y
+
+#sudo apt-get build-dep dkms -y
+sudo apt-get install python-six openssl python-pip -y
+sudo -H pip install --upgrade pip
+sudo -H pip install ovs
+#sudo apt-get install openvswitch-datapath-dkms -y
+sudo apt-get install openvswitch-switch openvswitch-common -y
+sudo apt-get install ovn-central ovn-common ovn-host -y
+sudo modprobe vport-geneve
+
+wget https://storage.googleapis.com/golang/go1.8.3.linux-amd64.tar.gz
+sudo tar -xvf go1.8.3.linux-amd64.tar.gz -C /usr/local/
+mkdir -p $HOME/go/src
+export GOPATH=$HOME/go
+export PATH=$PATH:/usr/local/go/bin:$GOPATH/bin
+git clone https://github.com/openvswitch/ovn-kubernetes -b v0.3.0
+cd ovn-kubernetes/go-controller
+make
+sudo make install
diff --git a/src/vagrant/kubeadm_kata/kata_setup.sh b/src/vagrant/kubeadm_kata/kata_setup.sh
index c14d844..18c4cd1 100644
--- a/src/vagrant/kubeadm_kata/kata_setup.sh
+++ b/src/vagrant/kubeadm_kata/kata_setup.sh
@@ -17,27 +17,27 @@
set -ex
-cat << EOF | sudo tee /etc/apt/sources.list.d/cc-oci-runtime.list
-deb http://download.opensuse.org/repositories/home:/clearcontainers:/clear-containers-3/xUbuntu_16.04/ /
-EOF
-curl -fsSL http://download.opensuse.org/repositories/home:/clearcontainers:/clear-containers-3/xUbuntu_16.04/Release.key | sudo apt-key add -
-sudo apt-get update
-sudo apt-get install -y cc-oci-runtime
+sudo sh -c "echo 'deb http://download.opensuse.org/repositories/home:/katacontainers:/releases:/x86_64:/master/xUbuntu_16.04/ /' > /etc/apt/sources.list.d/kata-containers.list"
+curl -sL http://download.opensuse.org/repositories/home:/katacontainers:/release/xUbuntu_$(lsb_release -rs)/Release.key | sudo apt-key add -
+sudo -E apt-get update
+sudo -E apt-get -y install kata-runtime kata-proxy kata-shim
+sudo -E apt-get -y install libseccomp2
-echo | sudo add-apt-repository ppa:projectatomic/ppa
-sudo apt-get update
-sudo apt-get install -y cri-o
-sudo sed -i 's,runtime_untrusted_workload.*,runtime_untrusted_workload = "/usr/bin/cc-runtime",' /etc/crio/crio.conf
-sudo sed -i 's,cgroup_manager.*,cgroup_manager = "cgroupfs",' /etc/crio/crio.conf
-sudo sed -i 's,default_workload_trust.*,default_workload_trust = "untrusted",' /etc/crio/crio.conf
-sudo sed -i 's,^registries.*,registries = [ "docker.io",' /etc/crio/crio.conf
-sudo systemctl enable crio
-sudo systemctl daemon-reload
-sudo systemctl restart crio
+wget http://storage.googleapis.com/cri-containerd-release/cri-containerd-1.1.0.linux-amd64.tar.gz >& /dev/null
+sudo tar -C / -xzf cri-containerd-1.1.0.linux-amd64.tar.gz
+sudo systemctl start containerd
+sudo mkdir -p /opt/cni/bin
+sudo mkdir -p /etc/cni/net.d
+sudo mkdir -p /etc/containerd
+containerd config default | sudo tee /etc/containerd/config.toml
+sudo sed -i "/.*untrusted_workload_runtime.*/,+5s/runtime_type.*/runtime_type=\"io.containerd.runtime.v1.linux\"/" /etc/containerd/config.toml
+sudo sed -i "/.*untrusted_workload_runtime.*/,+5s/runtime_engine.*/runtime_engine=\"kata-runtime\"/" /etc/containerd/config.toml
+sudo systemctl restart containerd
+
+cat << EOF | sudo tee /etc/systemd/system/kubelet.service.d/0-containerd.conf
+[Service]
+Environment="KUBELET_EXTRA_ARGS=--container-runtime=remote --runtime-request-timeout=15m --container-runtime-endpoint=unix:///run/containerd/containerd.sock"
+EOF
-sudo systemctl stop kubelet
-echo "Modify kubelet systemd configuration to use CRI-O"
-k8s_systemd_file="/etc/systemd/system/kubelet.service.d/10-kubeadm.conf"
-sudo sed -i '/KUBELET_AUTHZ_ARGS/a Environment="KUBELET_EXTRA_ARGS=--container-runtime=remote --container-runtime-endpoint=/var/run/crio/crio.sock --runtime-request-timeout=30m"' "$k8s_systemd_file"
sudo systemctl daemon-reload
-sudo systemctl start kubelet
+sudo systemctl restart kubelet
diff --git a/src/vagrant/kubeadm_kata/master_setup.sh b/src/vagrant/kubeadm_kata/master_setup.sh
index 41dadf0..42b3aee 100644
--- a/src/vagrant/kubeadm_kata/master_setup.sh
+++ b/src/vagrant/kubeadm_kata/master_setup.sh
@@ -22,13 +22,6 @@ mkdir ~/.kube
sudo cp /etc/kubernetes/admin.conf .kube/config
sudo chown $(id -u):$(id -g) ~/.kube/config
-kubectl apply -f http://git.io/weave-kube-1.6
+nohup /usr/bin/kubectl proxy --address=0.0.0.0 --accept-hosts=.* --port=8080 & sleep 1
-r=1
-while [ "$r" -ne "0" ]
-do
- sleep 30
- r=$(kubectl get pods -n kube-system | grep weave-net | grep -v Run | wc -l)
-done
-
-sudo systemctl restart crio
+sudo ovnkube -k8s-kubeconfig /home/vagrant/.kube/config -net-controller -loglevel=4 -k8s-apiserver=http://192.168.1.10:8080 -logfile=/var/log/openvswitch/ovnkube.log -init-master=master -cluster-subnet=10.32.0.0/12 -service-cluster-ip-range=10.96.0.0/16 -nodeport -nb-address=tcp://192.168.1.10:6631 -sb-address=tcp://192.168.1.10:6632 &
diff --git a/src/vagrant/kubeadm_kata/worker_setup.sh b/src/vagrant/kubeadm_kata/worker_setup.sh
index 6145793..63d42a5 100644
--- a/src/vagrant/kubeadm_kata/worker_setup.sh
+++ b/src/vagrant/kubeadm_kata/worker_setup.sh
@@ -18,16 +18,23 @@
set -ex
sudo kubeadm join --discovery-token-unsafe-skip-ca-verification \
--token 8c5adc.1cec8dbf339093f0 192.168.1.10:6443 \
- --ignore-preflight-errors=SystemVerification,FileContent--proc-sys-net-bridge-bridge-nf-call-iptables
+ --ignore-preflight-errors=SystemVerification,CRI,FileContent--proc-sys-net-bridge-bridge-nf-call-iptables
sudo apt-get install -y putty-tools
mkdir ~/.kube
-r=1
-while [ "$r" -ne "0" ]
-do
- sleep 30
- echo "y\n" | plink -ssh -pw vagrant vagrant@master "cat ~/.kube/config" > ~/.kube/config || true
- r=$(kubectl get pods -n kube-system | grep weave-net | grep -v Run | wc -l)
-done
+echo "y\n" | plink -ssh -pw vagrant vagrant@master "cat ~/.kube/config" > ~/.kube/config || true
-sudo systemctl restart crio
+CENTRAL_IP=192.168.1.10
+NODE_NAME=$(hostname)
+TOKEN="8c5adc.1cec8dbf339093f0"
+
+sudo ovnkube -k8s-kubeconfig /home/vagrant/.kube/config -loglevel=4 \
+ -logfile="/var/log/openvswitch/ovnkube.log" \
+ -k8s-apiserver="http://$CENTRAL_IP:8080" \
+ -init-node="$NODE_NAME" \
+ -nodeport \
+ -nb-address="tcp://$CENTRAL_IP:6631" \
+ -sb-address="tcp://$CENTRAL_IP:6632" -k8s-token="$TOKEN" \
+ -init-gateways \
+ -service-cluster-ip-range=10.96.0.0/16 \
+ -cluster-subnet=10.32.0.0/12 &
diff --git a/src/vagrant/kubeadm_multus/master_setup.sh b/src/vagrant/kubeadm_multus/master_setup.sh
deleted file mode 100644
index dfc3d05..0000000
--- a/src/vagrant/kubeadm_multus/master_setup.sh
+++ /dev/null
@@ -1,12 +0,0 @@
-#!/bin/bash
-
-set -ex
-
-sudo kubeadm init --apiserver-advertise-address=192.168.1.10 --service-cidr=10.96.0.0/16 --pod-network-cidr=10.32.0.0/12 --token 8c5adc.1cec8dbf339093f0
-
-mkdir ~/.kube
-sudo cp /etc/kubernetes/admin.conf $HOME/.kube/config
-sudo chown $(id -u):$(id -g) $HOME/.kube/config
-
-kubectl apply -f http://git.io/weave-kube-1.6
-kubectl apply -f /src/cni/multus/kube_cni_multus.yml
diff --git a/src/vagrant/kubeadm_onap/Vagrantfile b/src/vagrant/kubeadm_onap/Vagrantfile
index fe24252..699f607 100644
--- a/src/vagrant/kubeadm_onap/Vagrantfile
+++ b/src/vagrant/kubeadm_onap/Vagrantfile
@@ -1,17 +1,17 @@
-$num_workers=1
+$num_workers=4
Vagrant.require_version ">= 1.8.6"
Vagrant.configure("2") do |config|
- config.vm.box = "yk0/ubuntu-xenial"
- config.vm.provision "shell", path: "host_setup.sh", privileged: false
+ config.vm.box = "ceph/ubuntu-xenial"
config.vm.define "master" do |config|
config.vm.hostname = "master"
+ config.vm.provision "shell", path: "host_setup.sh", privileged: false
config.vm.provision "shell", path: "master_setup.sh", privileged: false
config.vm.network :private_network, ip: "192.168.0.10"
config.vm.provider :libvirt do |libvirt|
- libvirt.memory = 4096
+ libvirt.memory = 8192
libvirt.cpus = 4
end
end
@@ -19,23 +19,14 @@ Vagrant.configure("2") do |config|
(1 .. $num_workers).each do |i|
config.vm.define vm_name = "worker%d" % [i] do |config|
config.vm.hostname = vm_name
+ config.vm.provision "shell", path: "host_setup.sh", privileged: false
config.vm.provision "shell", path: "worker_setup.sh", privileged: false
config.vm.network :private_network, ip: "192.168.0.#{i+20}"
config.vm.provider :libvirt do |libvirt|
- libvirt.memory = 81920
- libvirt.cpus = 32
+ libvirt.memory = 40960
+ libvirt.cpus = 16
end
end
end
- config.vm.define "onap" do |config|
- config.vm.hostname = "onap"
- config.vm.provision "shell", path: "onap_setup.sh", privileged: false
- config.vm.network :private_network, ip: "192.168.0.5"
- config.vm.provider :libvirt do |libvirt|
- libvirt.memory = 2048
- libvirt.cpus = 1
- end
- end
-
end
diff --git a/src/vagrant/kubeadm_onap/host_setup.sh b/src/vagrant/kubeadm_onap/host_setup.sh
index 87b0062..9cfd266 100755
--- a/src/vagrant/kubeadm_onap/host_setup.sh
+++ b/src/vagrant/kubeadm_onap/host_setup.sh
@@ -4,13 +4,15 @@ set -ex
cat << EOF | sudo tee /etc/hosts
127.0.0.1 localhost
-192.168.0.5 onap
192.168.0.10 master
192.168.0.21 worker1
192.168.0.22 worker2
192.168.0.23 worker3
+192.168.0.24 worker4
EOF
+sudo ifconfig eth1 mtu 1400
+
sudo apt-key adv --keyserver hkp://ha.pool.sks-keyservers.net:80 --recv-keys 58118E89F3A912897C070ADBF76221572C52609D
sudo apt-key adv -k 58118E89F3A912897C070ADBF76221572C52609D
cat << EOF | sudo tee /etc/apt/sources.list.d/docker.list
@@ -22,18 +24,17 @@ cat <<EOF | sudo tee /etc/apt/sources.list.d/kubernetes.list
deb http://apt.kubernetes.io/ kubernetes-xenial main
EOF
sudo apt-get update
-sudo apt-get install -y --allow-downgrades docker-engine=1.12.6-0~ubuntu-xenial kubelet=1.7.0-00 kubeadm=1.7.0-00 kubectl=1.7.0-00 kubernetes-cni=0.5.1-00
+sudo apt-get install -y --allow-unauthenticated --allow-downgrades docker-engine=1.12.6-0~ubuntu-xenial kubelet=1.9.1-00 kubeadm=1.9.1-00 kubectl=1.9.1-00 kubernetes-cni=0.6.0-00
-sudo systemctl stop docker
cat << EOF | sudo tee /etc/docker/daemon.json
{
- "storage-driver": "overlay"
+ "insecure-registries" : [ "nexus3.onap.org:10001" ]
}
EOF
sudo systemctl daemon-reload
-sudo systemctl start docker
+sudo systemctl restart docker
-sudo systemctl stop kubelet
-sudo rm -rf /var/lib/kubelet
+sudo swapoff -a
sudo systemctl daemon-reload
+sudo systemctl stop kubelet
sudo systemctl start kubelet
diff --git a/src/vagrant/kubeadm_onap/master_setup.sh b/src/vagrant/kubeadm_onap/master_setup.sh
index fa451a2..8840541 100755
--- a/src/vagrant/kubeadm_onap/master_setup.sh
+++ b/src/vagrant/kubeadm_onap/master_setup.sh
@@ -1,13 +1,28 @@
#!/bin/bash
-
set -ex
-sudo kubeadm init --apiserver-advertise-address=192.168.0.10 --service-cidr=10.96.0.0/24 --pod-network-cidr=10.32.0.0/12 --token 8c5adc.1cec8dbf339093f0
+sudo apt-get -y install ntp
+cat << EOF | sudo tee /etc/ntp.conf
+server 127.127.1.0
+fudge 127.127.1.0 stratum 10
+EOF
+sudo service ntp restart
+
+sudo apt install nfs-kernel-server -y
+sudo mkdir /dockerdata-nfs
+sudo chmod 777 /dockerdata-nfs
+cat << EOF | sudo tee /etc/exports
+/dockerdata-nfs *(rw,sync,no_subtree_check,no_root_squash)
+EOF
+sudo systemctl restart nfs-kernel-server.service
+
+sudo kubeadm init --apiserver-advertise-address=192.168.0.10 --service-cidr=10.96.0.0/16 --pod-network-cidr=10.244.0.0/16 --token 8c5adc.1cec8dbf339093f0
mkdir ~/.kube
-sudo cp /etc/kubernetes/admin.conf ~/.kube/config
-sudo chown $(id -u):$(id -g) ~/.kube/config
+sudo cp /etc/kubernetes/admin.conf $HOME/.kube/config
+sudo chown $(id -u):$(id -g) $HOME/.kube/config
+
+wget https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
+sed -i "s/kube-subnet-mgr/kube-subnet-mgr\n - --iface=eth1/" kube-flannel.yml
+kubectl apply -f kube-flannel.yml
-kubectl apply -f http://git.io/weave-kube-1.6
-curl https://raw.githubusercontent.com/kubernetes/helm/master/scripts/get | bash
-helm init
-kubectl create clusterrolebinding --user system:serviceaccount:kube-system:default kube-system-cluster-admin --clusterrole cluster-admin
+/vagrant/onap_setup.sh
diff --git a/src/vagrant/kubeadm_onap/onap_setup.sh b/src/vagrant/kubeadm_onap/onap_setup.sh
index 4dfe1e1..e4edd8f 100755
--- a/src/vagrant/kubeadm_onap/onap_setup.sh
+++ b/src/vagrant/kubeadm_onap/onap_setup.sh
@@ -2,42 +2,19 @@
set -ex
-sudo apt-get install -y putty-tools python-openstackclient
-mkdir ~/.kube
-r=0
-while [ "$r" == "0" ]
-do
- sleep 30
- echo "y\n" | plink -ssh -pw vagrant vagrant@master "cat ~/.kube/config" > ~/.kube/config || true
- r=$(kubectl get pods -n kube-system | grep "tiller-deploy.*Run" | wc -l)
-done
+kubectl create clusterrolebinding --user system:serviceaccount:kube-system:default kube-system-cluster-admin --clusterrole cluster-admin
+wget https://storage.googleapis.com/kubernetes-helm/helm-v2.8.2-linux-amd64.tar.gz
+tar xzvf helm-v2.8.2-linux-amd64.tar.gz
+sudo mv linux-amd64/helm /usr/local/bin/
+helm init
+helm serve &
+helm repo remove stable
+helm repo add local http://127.0.0.1:8879
-curl https://raw.githubusercontent.com/kubernetes/helm/master/scripts/get | bash
-git clone http://gerrit.onap.org/r/oom
-cd oom; git checkout amsterdam
-source /vagrant/openstack/openrc
-cat <<EOF | tee ~/oom/kubernetes/config/onap-parameters.yaml
-OPENSTACK_UBUNTU_14_IMAGE: "ubuntu1404"
-OPENSTACK_PUBLIC_NET_ID: "e8f51956-00dd-4425-af36-045716781ffc"
-OPENSTACK_OAM_NETWORK_ID: "d4769dfb-c9e4-4f72-b3d6-1d18f4ac4ee6"
-OPENSTACK_OAM_SUBNET_ID: "191f7580-acf6-4c2b-8ec0-ba7d99b3bc4e"
-OPENSTACK_OAM_NETWORK_CIDR: "10.0.0.0/16"
-OPENSTACK_USERNAME: "admin"
-OPENSTACK_API_KEY: "adim"
-OPENSTACK_TENANT_NAME: "admin"
-OPENSTACK_TENANT_ID: "47899782ed714295b1151681fdfd51f5"
-OPENSTACK_REGION: "RegionOne"
-OPENSTACK_KEYSTONE_URL: "http://192.168.0.30:5000/v2.0"
-OPENSTACK_FLAVOUR_MEDIUM: "m1.medium"
-OPENSTACK_SERVICE_TENANT_NAME: "service"
-DMAAP_TOPIC: "AUTO"
-DEMO_ARTIFACTS_VERSION: "1.1.0-SNAPSHOT"
-EOF
-cd ~/oom/kubernetes/oneclick && ./deleteAll.bash -n onap || true
-(kubectl delete ns onap; helm del --purge onap-config) || true
-echo "y\n" | plink -ssh -pw vagrant vagrant@worker1 "sudo rm -rf /dockerdata-nfs/onap"
-cd ~/oom/kubernetes/config && ./createConfig.sh -n onap
-while true; do sleep 30; kubectl get pods --all-namespaces | grep onap | wc -l | grep "^0$" && break; done
-source ~/oom/kubernetes/oneclick/setenv.bash
-sed -i "s/aaiServiceClusterIp:.*/aaiServiceClusterIp: 10.96.0.254/" ~/oom/kubernetes/aai/values.yaml
-cd ~/oom/kubernetes/oneclick && ./createAll.bash -n onap
+git clone -b beijing http://gerrit.onap.org/r/oom
+cd oom/kubernetes
+
+sudo apt-get install make -y
+make all
+sleep 300
+helm install local/onap -n dev --namespace onap
diff --git a/src/vagrant/kubeadm_onap/registry_setup.sh b/src/vagrant/kubeadm_onap/registry_setup.sh
new file mode 100644
index 0000000..669268b
--- /dev/null
+++ b/src/vagrant/kubeadm_onap/registry_setup.sh
@@ -0,0 +1,30 @@
+#!/bin/bash
+set -ex
+
+sudo apt-get update -y
+sudo apt install -y jq docker.io
+
+NEXUS_REPO=nexus3.onap.org:10001
+LOCAL_REPO=192.168.0.2:5000
+
+cat << EOF | sudo tee /etc/docker/daemon.json
+{
+ "insecure-registries" : [ "$LOCAL_REPO" ]
+}
+EOF
+sudo systemctl daemon-reload
+sudo systemctl restart docker
+
+sudo docker run -d -p 5000:5000 --restart=always --name registry registry:2
+
+dockers=$(curl -X GET https://$NEXUS_REPO/v2/_catalog | jq -r ".repositories[]")
+for d in $dockers
+do
+ tags=$(curl -X GET https://$NEXUS_REPO/v2/$d/tags/list | jq -r ".tags[]")
+ for t in $tags
+ do
+ sudo docker pull $NEXUS_REPO/$d:$t
+ sudo docker tag $NEXUS_REPO/$d:$t $LOCAL_REPO/$d:$t
+ sudo docker push $LOCAL_REPO/$d:$t
+ done
+done
diff --git a/src/vagrant/kubeadm_onap/setup_swap.sh b/src/vagrant/kubeadm_onap/setup_swap.sh
new file mode 100644
index 0000000..c2432b7
--- /dev/null
+++ b/src/vagrant/kubeadm_onap/setup_swap.sh
@@ -0,0 +1,5 @@
+sudo swapoff -a
+sudo fallocate -l 50G /swapfile
+sudo mkswap /swapfile
+sudo swapon /swapfile
+sudo swapon --show
diff --git a/src/vagrant/kubeadm_onap/setup_tunnel.sh b/src/vagrant/kubeadm_onap/setup_tunnel.sh
new file mode 100644
index 0000000..3a6ef75
--- /dev/null
+++ b/src/vagrant/kubeadm_onap/setup_tunnel.sh
@@ -0,0 +1,3 @@
+sudo ip link add tunnel0 type gretap local <local> remote <remote>
+sudo ifconfig tunnel0 up
+sudo brctl addif <br> tunnel0
diff --git a/src/vagrant/kubeadm_onap/worker_setup.sh b/src/vagrant/kubeadm_onap/worker_setup.sh
index aa60df3..e65a65c 100755
--- a/src/vagrant/kubeadm_onap/worker_setup.sh
+++ b/src/vagrant/kubeadm_onap/worker_setup.sh
@@ -1,11 +1,15 @@
#!/bin/bash
-
set -ex
-sudo mkdir /dockerdata-nfs
-sudo chmod 755 /dockerdata-nfs
-sudo kubeadm join --token 8c5adc.1cec8dbf339093f0 192.168.0.10:6443 || true
+sudo apt-get -y install ntp
+cat << EOF | sudo tee /etc/ntp.conf
+pool master
+EOF
+sudo service ntp restart
-sudo apt-get install -y putty-tools
-mkdir ~/.kube
-echo "y\n" | plink -ssh -pw vagrant vagrant@master "cat ~/.kube/config" > ~/.kube/config
+sudo kubeadm join --discovery-token-unsafe-skip-ca-verification --token 8c5adc.1cec8dbf339093f0 192.168.0.10:6443 || true
+
+sudo apt-get install nfs-common -y
+sudo mkdir /dockerdata-nfs
+sudo chmod 777 /dockerdata-nfs
+sudo mount master:/dockerdata-nfs /dockerdata-nfs
diff --git a/src/vagrant/kubeadm_ovsdpdk/host_setup.sh b/src/vagrant/kubeadm_ovsdpdk/host_setup.sh
index b86a618..b2ee85c 100644
--- a/src/vagrant/kubeadm_ovsdpdk/host_setup.sh
+++ b/src/vagrant/kubeadm_ovsdpdk/host_setup.sh
@@ -21,7 +21,7 @@ cat <<EOF | sudo tee /etc/apt/sources.list.d/kubernetes.list
deb http://apt.kubernetes.io/ kubernetes-xenial main
EOF
sudo apt-get update
-sudo apt-get install -y --allow-downgrades docker-engine=1.12.6-0~ubuntu-xenial kubelet=1.7.0-00 kubeadm=1.7.0-00 kubectl=1.7.0-00 kubernetes-cni=0.5.1-00
+sudo apt-get install -y --allow-unauthenticated --allow-downgrades docker-engine=1.12.6-0~ubuntu-xenial kubelet=1.7.0-00 kubeadm=1.7.0-00 kubectl=1.7.0-00 kubernetes-cni=0.5.1-00
sudo rm -rf /var/lib/kubelet
sudo systemctl stop kubelet
diff --git a/src/vagrant/kubeadm_istio/Vagrantfile b/src/vagrant/kubeadm_snort/Vagrantfile
index 9320074..9320074 100644
--- a/src/vagrant/kubeadm_istio/Vagrantfile
+++ b/src/vagrant/kubeadm_snort/Vagrantfile
diff --git a/src/vagrant/kubeadm_multus/deploy.sh b/src/vagrant/kubeadm_snort/deploy.sh
index 9c9e51e..e1e16d6 100755
--- a/src/vagrant/kubeadm_multus/deploy.sh
+++ b/src/vagrant/kubeadm_snort/deploy.sh
@@ -6,4 +6,4 @@ DIR="$(dirname `readlink -f $0`)"
cd $DIR
../cleanup.sh
vagrant up
-vagrant ssh master -c "/vagrant/examples/multus.sh"
+vagrant ssh master -c "/vagrant/snort/snort-setup.sh"
diff --git a/src/vagrant/kubeadm_istio/host_setup.sh b/src/vagrant/kubeadm_snort/host_setup.sh
index c1a23eb..524a967 100644
--- a/src/vagrant/kubeadm_istio/host_setup.sh
+++ b/src/vagrant/kubeadm_snort/host_setup.sh
@@ -21,7 +21,7 @@ cat <<EOF | sudo tee /etc/apt/sources.list.d/kubernetes.list
deb http://apt.kubernetes.io/ kubernetes-xenial main
EOF
sudo apt-get update
-sudo apt-get install -y --allow-downgrades docker-engine=1.12.6-0~ubuntu-xenial kubelet=1.9.1-00 kubeadm=1.9.1-00 kubectl=1.9.1-00 kubernetes-cni=0.6.0-00
+sudo apt-get install -y --allow-unauthenticated --allow-downgrades docker-engine=1.12.6-0~ubuntu-xenial kubelet=1.9.1-00 kubeadm=1.9.1-00 kubectl=1.9.1-00 kubernetes-cni=0.6.0-00
sudo swapoff -a
sudo systemctl daemon-reload
diff --git a/src/vagrant/kubeadm_snort/master_setup.sh b/src/vagrant/kubeadm_snort/master_setup.sh
new file mode 100644
index 0000000..972768f
--- /dev/null
+++ b/src/vagrant/kubeadm_snort/master_setup.sh
@@ -0,0 +1,10 @@
+#!/bin/bash
+
+set -ex
+
+sudo kubeadm init --apiserver-advertise-address=192.168.1.10 --service-cidr=10.96.0.0/16 --pod-network-cidr=10.32.0.0/12 --token 8c5adc.1cec8dbf339093f0
+mkdir ~/.kube
+sudo cp /etc/kubernetes/admin.conf $HOME/.kube/config
+sudo chown $(id -u):$(id -g) $HOME/.kube/config
+
+kubectl apply -f https://raw.githubusercontent.com/weaveworks/weave/master/prog/weave-kube/weave-daemonset-k8s-1.6.yaml
diff --git a/src/vagrant/kubeadm_snort/snort/snort-setup.sh b/src/vagrant/kubeadm_snort/snort/snort-setup.sh
new file mode 100755
index 0000000..08ae663
--- /dev/null
+++ b/src/vagrant/kubeadm_snort/snort/snort-setup.sh
@@ -0,0 +1,31 @@
+#!/bin/bash
+#
+# Copyright (c) 2017 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+set -ex
+
+kubectl create -f /vagrant/snort/snort.yaml
+kubectl get nodes
+kubectl get services
+kubectl get pods
+kubectl get rc
+
+r="0"
+while [ $r -ne "2" ]
+do
+ r=$(kubectl get pods | grep Running | wc -l)
+ sleep 60
+done
diff --git a/src/vagrant/kubeadm_snort/snort/snort.yaml b/src/vagrant/kubeadm_snort/snort/snort.yaml
new file mode 100644
index 0000000..60dede2
--- /dev/null
+++ b/src/vagrant/kubeadm_snort/snort/snort.yaml
@@ -0,0 +1,32 @@
+apiVersion: v1
+kind: Service
+metadata:
+ name: snort-service
+ labels:
+ app: snort
+spec:
+ type: NodePort
+ ports:
+ - port: 80
+ protocol: TCP
+ name: http
+ selector:
+ app: snort
+---
+apiVersion: v1
+kind: ReplicationController
+metadata:
+ name: snort-pod
+spec:
+ replicas: 2
+ template:
+ metadata:
+ labels:
+ app: snort
+ spec:
+ containers:
+ - name: snort
+ image: frapsoft/snort
+ args: ["-v"]
+ ports:
+ - containerPort: 80
diff --git a/src/vagrant/kubeadm_multus/worker_setup.sh b/src/vagrant/kubeadm_snort/worker_setup.sh
index 74e4178..74e4178 100644
--- a/src/vagrant/kubeadm_multus/worker_setup.sh
+++ b/src/vagrant/kubeadm_snort/worker_setup.sh
diff --git a/src/vagrant/kubeadm_virtlet/examples/cirros-vm.yaml b/src/vagrant/kubeadm_virtlet/examples/cirros-vm.yaml
index 8beb03f..334142b 100644
--- a/src/vagrant/kubeadm_virtlet/examples/cirros-vm.yaml
+++ b/src/vagrant/kubeadm_virtlet/examples/cirros-vm.yaml
@@ -4,21 +4,14 @@ metadata:
name: cirros-vm
annotations:
# This tells CRI Proxy that this pod belongs to Virtlet runtime
- kubernetes.io/target-runtime: virtlet
- # An optional annotation specifying the count of virtual CPUs.
- # Note that annotation values must always be strings,
- # thus numeric values need to be quoted.
- # Defaults to "1".
- VirtletVCPUCount: "1"
+ kubernetes.io/target-runtime: virtlet.cloud
# CirrOS doesn't load nocloud data from SCSI CD-ROM for some reason
VirtletDiskDriver: virtio
# inject ssh keys via cloud-init
VirtletSSHKeys: |
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCaJEcFDXEK2ZbX0ZLS1EIYFZRbDAcRfuVjpstSc0De8+sV1aiu+dePxdkuDRwqFtCyk6dEZkssjOkBXtri00MECLkir6FcH3kKOJtbJ6vy3uaJc9w1ERo+wyl6SkAh/+JTJkp7QRXj8oylW5E20LsbnA/dIwWzAF51PPwF7A7FtNg9DnwPqMkxFo1Th/buOMKbP5ZA1mmNNtmzbMpMfJATvVyiv3ccsSJKOiyQr6UG+j7sc/7jMVz5Xk34Vd0l8GwcB0334MchHckmqDB142h/NCWTr8oLakDNvkfC1YneAfAO41hDkUbxPtVBG5M/o7P4fxoqiHEX+ZLfRxDtHB53 me@localhost
- # cloud-init user data
- VirtletCloudInitUserDataScript: |
- #!/bin/sh
- echo "Hi there"
+ # set root volume size
+ VirtletRootVolumeSize: 1Gi
spec:
# This nodeAffinity specification tells Kubernetes to run this
# pod only on the nodes that have extraRuntime=virtlet label.
@@ -36,17 +29,9 @@ spec:
containers:
- name: cirros-vm
# This specifies the image to use.
- # virtlet/ prefix is used by CRI proxy, the remaining part
+ # virtlet.cloud/ prefix is used by CRI proxy, the remaining part
# of the image name is prepended with https:// and used to download the image
- image: virtlet/cirros
- # Virtlet currently ignores image tags, but their meaning may change
- # in future, so it’s better not to set them for VM pods. If there’s no tag
- # provided in the image specification kubelet defaults to
- # imagePullPolicy: Always, which means that the image is always
- # redownloaded when the pod is created. In order to make pod creation
- # faster and more reliable, we set imagePullPolicy to IfNotPresent here
- # so a previously downloaded image is reused if there is one
- # in Virtlet’s image store
+ image: virtlet.cloud/cirros
imagePullPolicy: IfNotPresent
# tty and stdin required for `kubectl attach -t` to work
tty: true
diff --git a/src/vagrant/kubeadm_virtlet/examples/images.yaml b/src/vagrant/kubeadm_virtlet/examples/images.yaml
index 3a84585..1541ca7 100644
--- a/src/vagrant/kubeadm_virtlet/examples/images.yaml
+++ b/src/vagrant/kubeadm_virtlet/examples/images.yaml
@@ -1,3 +1,3 @@
translations:
- name: cirros
- url: http://github.com/mirantis/virtlet/releases/download/v0.8.2/cirros.img
+ url: https://github.com/mirantis/virtlet/releases/download/v0.9.3/cirros.img
diff --git a/src/vagrant/kubeadm_virtlet/examples/virtlet-ds.yaml b/src/vagrant/kubeadm_virtlet/examples/virtlet-ds.yaml
index ed037d9..1bb4882 100644
--- a/src/vagrant/kubeadm_virtlet/examples/virtlet-ds.yaml
+++ b/src/vagrant/kubeadm_virtlet/examples/virtlet-ds.yaml
@@ -1,25 +1,21 @@
---
-apiVersion: extensions/v1beta1
+apiVersion: apps/v1
kind: DaemonSet
metadata:
+ creationTimestamp: null
name: virtlet
namespace: kube-system
spec:
+ selector:
+ matchLabels:
+ runtime: virtlet
template:
metadata:
- name: virtlet
+ creationTimestamp: null
labels:
runtime: virtlet
+ name: virtlet
spec:
- hostNetwork: true
- dnsPolicy: ClusterFirstWithHostNet
- # hostPID is true to (1) enable VMs to survive virtlet container restart
- # (to be checked) and (2) to enable the use of nsenter in init container
- hostPID: true
- # bootstrap procedure needs to create a configmap in kube-system namespace
- serviceAccountName: virtlet
-
- # only run Virtlet pods on the nodes with extraRuntime=virtlet label
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
@@ -29,50 +25,21 @@ spec:
operator: In
values:
- virtlet
-
- initContainers:
- # The init container first copies virtlet's flexvolume driver
- # to the default kubelet plugin dir to have it in the proper place by the
- # time kubelet is restarted by CRI proxy bootstrap procedure.
- # After that it checks if there's already saved kubelet config
- # and considers that CRI proxy bootstrap is already done if it exists.
- # If it doesn't, it drops criproxy binary into /opt/criproxy/bin
- # if it's not already there and then starts criproxy installation.
- # The possibility to put criproxy binary in advance into
- # /opt/criproxy/bin may be helpful for the purpose of
- # debugging criproxy
- # At the end it ensures that /var/lib/libvirt/images exists on node.
- - name: prepare-node
- image: openretriever/virtlet
+ containers:
+ - command:
+ - /libvirt.sh
+ image: mirantis/virtlet:v1.4.1
imagePullPolicy: IfNotPresent
- command:
- - /prepare-node.sh
- volumeMounts:
- - name: k8s-flexvolume-plugins-dir
- mountPath: /kubelet-volume-plugins
- - name: criproxybin
- mountPath: /opt/criproxy/bin
- - name: run
- mountPath: /run
- - name: dockersock
- mountPath: /var/run/docker.sock
- - name: criproxyconf
- mountPath: /etc/criproxy
- - name: log
- mountPath: /hostlog
- # for ensuring that /var/lib/libvirt/images exists on node
- - name: var-lib
- mountPath: /host-var-lib
+ name: libvirt
+ readinessProbe:
+ exec:
+ command:
+ - /bin/sh
+ - -c
+ - socat - UNIX:/var/run/libvirt/libvirt-sock-ro </dev/null
+ resources: {}
securityContext:
privileged: true
-
- containers:
- - name: libvirt
- image: openretriever/virtlet
- # In case we inject local virtlet image we want to use it not officially available one
- imagePullPolicy: IfNotPresent
- command:
- - /libvirt.sh
volumeMounts:
- mountPath: /sys/fs/cgroup
name: cgroup
@@ -90,117 +57,176 @@ spec:
name: libvirt
- mountPath: /var/run/libvirt
name: libvirt-sockets
- # the log dir is needed here because otherwise libvirt will produce errors
- # like this:
- # Unable to pre-create chardev file '/var/log/vms/afd75bbb-8e97-11e7-9561-02420ac00002/cirros-vm_0.log': No such file or directory
- - name: vms-log
- mountPath: /var/log/vms
- - name: dev
- mountPath: /dev
+ - mountPath: /var/log/vms
+ name: vms-log
+ - mountPath: /var/log/libvirt
+ name: libvirt-log
+ - mountPath: /dev
+ name: dev
+ - image: mirantis/virtlet:v1.4.1
+ imagePullPolicy: IfNotPresent
+ name: virtlet
+ readinessProbe:
+ exec:
+ command:
+ - /bin/sh
+ - -c
+ - socat - UNIX:/run/virtlet.sock </dev/null
+ resources: {}
securityContext:
privileged: true
- env:
- - name: VIRTLET_DISABLE_KVM
- valueFrom:
- configMapKeyRef:
- name: virtlet-config
- key: disable_kvm
- optional: true
- - name: virtlet
- image: openretriever/virtlet
- # In case we inject local virtlet image we want to use it not officially available one
- imagePullPolicy: IfNotPresent
volumeMounts:
- mountPath: /run
name: run
- # /boot and /lib/modules are required by supermin
- mountPath: /lib/modules
name: modules
readOnly: true
- mountPath: /boot
name: boot
readOnly: true
+ - mountPath: /dev
+ name: dev
- mountPath: /var/lib/virtlet
+ mountPropagation: Bidirectional
name: virtlet
- mountPath: /var/lib/libvirt
name: libvirt
- - mountPath: /etc/cni
- name: cniconf
- - mountPath: /opt/cni/bin
- name: cnibin
- mountPath: /var/run/libvirt
name: libvirt-sockets
- - mountPath: /var/lib/cni
- name: cnidata
- mountPath: /usr/libexec/kubernetes/kubelet-plugins/volume/exec
name: k8s-flexvolume-plugins-dir
- # below `:shared` is unofficial way to pass this option docker
- # which then will allow virtlet to see what kubelet mounts in
- # underlaying directories, after virtlet container is created
- - mountPath: /var/lib/kubelet/pods:shared
+ - mountPath: /var/lib/kubelet/pods
+ mountPropagation: Bidirectional
name: k8s-pods-dir
- - name: vms-log
- mountPath: /var/log/vms
+ - mountPath: /var/log/vms
+ name: vms-log
- mountPath: /etc/virtlet/images
name: image-name-translations
- - name: pods-log
- mountPath: /kubernetes-log
- securityContext:
- privileged: true
+ - mountPath: /var/log/pods
+ name: pods-log
+ - mountPath: /var/log/libvirt
+ name: libvirt-log
+ - mountPath: /var/run/netns
+ mountPropagation: Bidirectional
+ name: netns-dir
+ - command:
+ - /vms.sh
+ image: mirantis/virtlet:v1.4.1
+ imagePullPolicy: IfNotPresent
+ name: vms
+ resources: {}
+ volumeMounts:
+ - mountPath: /var/lib/virtlet
+ mountPropagation: HostToContainer
+ name: virtlet
+ - mountPath: /var/lib/libvirt
+ name: libvirt
+ - mountPath: /var/log/vms
+ name: vms-log
+ - mountPath: /var/lib/kubelet/pods
+ mountPropagation: HostToContainer
+ name: k8s-pods-dir
+ - mountPath: /dev
+ name: dev
+ - mountPath: /lib/modules
+ name: modules
+ dnsPolicy: ClusterFirstWithHostNet
+ hostNetwork: true
+ hostPID: true
+ initContainers:
+ - command:
+ - /prepare-node.sh
env:
+ - name: KUBE_NODE_NAME
+ valueFrom:
+ fieldRef:
+ apiVersion: v1
+ fieldPath: spec.nodeName
- name: VIRTLET_DISABLE_KVM
valueFrom:
configMapKeyRef:
- name: virtlet-config
key: disable_kvm
+ name: virtlet-config
optional: true
- - name: VIRTLET_DOWNLOAD_PROTOCOL
+ - name: VIRTLET_SRIOV_SUPPORT
valueFrom:
configMapKeyRef:
+ key: sriov_support
name: virtlet-config
+ optional: true
+ - name: VIRTLET_DOWNLOAD_PROTOCOL
+ valueFrom:
+ configMapKeyRef:
key: download_protocol
+ name: virtlet-config
optional: true
- name: VIRTLET_LOGLEVEL
valueFrom:
configMapKeyRef:
- name: virtlet-config
key: loglevel
+ name: virtlet-config
optional: true
- name: VIRTLET_CALICO_SUBNET
valueFrom:
configMapKeyRef:
- name: virtlet-config
key: calico-subnet
+ name: virtlet-config
optional: true
- name: IMAGE_REGEXP_TRANSLATION
valueFrom:
configMapKeyRef:
- name: virtlet-config
key: image_regexp_translation
+ name: virtlet-config
+ optional: true
+ - name: VIRTLET_RAW_DEVICES
+ valueFrom:
+ configMapKeyRef:
+ key: raw_devices
+ name: virtlet-config
+ optional: true
+ - name: VIRTLET_DISABLE_LOGGING
+ valueFrom:
+ configMapKeyRef:
+ key: disable_logging
+ name: virtlet-config
+ optional: true
+ - name: VIRTLET_CPU_MODEL
+ valueFrom:
+ configMapKeyRef:
+ key: cpu-model
+ name: virtlet-config
+ optional: true
+ - name: KUBELET_ROOT_DIR
+ valueFrom:
+ configMapKeyRef:
+ key: kubelet_root_dir
+ name: virtlet-config
optional: true
- - name: IMAGE_TRANSLATIONS_DIR
+ - name: VIRTLET_IMAGE_TRANSLATIONS_DIR
value: /etc/virtlet/images
- - name: KUBERNETES_POD_LOGS
- value: "/kubernetes-log"
- # TODO: should we rename it?
- - name: VIRTLET_VM_LOG_LOCATION
- value: "1"
- - name: vms
- image: openretriever/virtlet
+ image: mirantis/virtlet:v1.4.1
imagePullPolicy: IfNotPresent
- command:
- - /vms.sh
+ name: prepare-node
+ resources: {}
+ securityContext:
+ privileged: true
volumeMounts:
+ - mountPath: /kubelet-volume-plugins
+ name: k8s-flexvolume-plugins-dir
+ - mountPath: /run
+ name: run
+ - mountPath: /var/run/docker.sock
+ name: dockersock
+ - mountPath: /hostlog
+ name: log
+ - mountPath: /host-var-lib
+ name: var-lib
+ - mountPath: /dev
+ name: dev
- mountPath: /var/lib/virtlet
name: virtlet
- - mountPath: /var/lib/libvirt
- name: libvirt
- - name: vms-log
- mountPath: /var/log/vms
- - name: dev
- mountPath: /dev
+ serviceAccountName: virtlet
volumes:
- # /dev is needed for host raw device access
- hostPath:
path: /dev
name: dev
@@ -216,9 +242,6 @@ spec:
- hostPath:
path: /run
name: run
- # TODO: don't hardcode docker socket location here
- # This will require CRI proxy installation to run
- # in host mount namespace.
- hostPath:
path: /var/run/docker.sock
name: dockersock
@@ -229,21 +252,6 @@ spec:
path: /var/lib/libvirt
name: libvirt
- hostPath:
- path: /etc/cni
- name: cniconf
- - hostPath:
- path: /opt/cni/bin
- name: cnibin
- - hostPath:
- path: /var/lib/cni
- name: cnidata
- - hostPath:
- path: /opt/criproxy/bin
- name: criproxybin
- - hostPath:
- path: /etc/criproxy
- name: criproxyconf
- - hostPath:
path: /var/log
name: log
- hostPath:
@@ -259,18 +267,27 @@ spec:
path: /var/log/virtlet/vms
name: vms-log
- hostPath:
+ path: /var/log/libvirt
+ name: libvirt-log
+ - hostPath:
path: /var/run/libvirt
name: libvirt-sockets
- hostPath:
path: /var/log/pods
name: pods-log
+ - hostPath:
+ path: /var/run/netns
+ name: netns-dir
- configMap:
name: virtlet-image-translations
name: image-name-translations
+ updateStrategy: {}
+
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
+ creationTimestamp: null
name: virtlet
roleRef:
apiGroup: rbac.authorization.k8s.io
@@ -280,23 +297,29 @@ subjects:
- kind: ServiceAccount
name: virtlet
namespace: kube-system
+
---
-kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRole
metadata:
+ creationTimestamp: null
name: virtlet
namespace: kube-system
rules:
- - apiGroups:
- - ""
- resources:
- - configmaps
- verbs:
- - create
+- apiGroups:
+ - ""
+ resources:
+ - configmaps
+ - nodes
+ verbs:
+ - create
+ - get
+
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
+ creationTimestamp: null
name: configmap-reader
rules:
- apiGroups:
@@ -307,10 +330,27 @@ rules:
- get
- list
- watch
+
+---
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRole
+metadata:
+ creationTimestamp: null
+ name: virtlet-userdata-reader
+rules:
+- apiGroups:
+ - ""
+ resources:
+ - configmaps
+ - secrets
+ verbs:
+ - get
+
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
+ creationTimestamp: null
name: kubelet-node-binding
roleRef:
apiGroup: rbac.authorization.k8s.io
@@ -320,29 +360,49 @@ subjects:
- apiGroup: rbac.authorization.k8s.io
kind: Group
name: system:nodes
+
---
-kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRoleBinding
metadata:
+ creationTimestamp: null
+ name: vm-userdata-binding
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: virtlet-userdata-reader
+subjects:
+- kind: ServiceAccount
+ name: virtlet
+ namespace: kube-system
+
+---
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRole
+metadata:
+ creationTimestamp: null
name: virtlet-crd
rules:
- - apiGroups:
- - "apiextensions.k8s.io"
- resources:
- - customresourcedefinitions
- verbs:
- - create
- - apiGroups:
- - "virtlet.k8s"
- resources:
- - virtletimagemappings
- verbs:
- - list
- - get
+- apiGroups:
+ - apiextensions.k8s.io
+ resources:
+ - customresourcedefinitions
+ verbs:
+ - create
+- apiGroups:
+ - virtlet.k8s
+ resources:
+ - virtletimagemappings
+ - virtletconfigmappings
+ verbs:
+ - list
+ - get
+
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
+ creationTimestamp: null
name: virtlet-crd
roleRef:
apiGroup: rbac.authorization.k8s.io
@@ -352,9 +412,110 @@ subjects:
- kind: ServiceAccount
name: virtlet
namespace: kube-system
+
---
apiVersion: v1
kind: ServiceAccount
metadata:
+ creationTimestamp: null
name: virtlet
namespace: kube-system
+
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ creationTimestamp: null
+ labels:
+ virtlet.cloud: ""
+ name: virtletimagemappings.virtlet.k8s
+spec:
+ group: virtlet.k8s
+ names:
+ kind: VirtletImageMapping
+ plural: virtletimagemappings
+ shortNames:
+ - vim
+ singular: virtletimagemapping
+ scope: Namespaced
+ version: v1
+
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ creationTimestamp: null
+ labels:
+ virtlet.cloud: ""
+ name: virtletconfigmappings.virtlet.k8s
+spec:
+ group: virtlet.k8s
+ names:
+ kind: VirtletConfigMapping
+ plural: virtletconfigmappings
+ shortNames:
+ - vcm
+ singular: virtletconfigmapping
+ scope: Namespaced
+ validation:
+ openAPIV3Schema:
+ properties:
+ spec:
+ properties:
+ config:
+ properties:
+ calicoSubnetSize:
+ maximum: 32
+ minimum: 0
+ type: integer
+ cniConfigDir:
+ type: string
+ cniPluginDir:
+ type: string
+ cpuModel:
+ type: string
+ criSocketPath:
+ type: string
+ databasePath:
+ type: string
+ disableKVM:
+ type: boolean
+ disableLogging:
+ type: boolean
+ downloadProtocol:
+ pattern: ^https?$
+ type: string
+ enableRegexpImageTranslation:
+ type: boolean
+ enableSriov:
+ type: boolean
+ fdServerSocketPath:
+ type: string
+ imageDir:
+ type: string
+ imageTranslationConfigsDir:
+ type: string
+ kubeletRootDir:
+ type: string
+ libvirtURI:
+ type: string
+ logLevel:
+ maximum: 2147483647
+ minimum: 0
+ type: integer
+ rawDevices:
+ type: string
+ skipImageTranslation:
+ type: boolean
+ streamPort:
+ maximum: 65535
+ minimum: 1
+ type: integer
+ nodeName:
+ type: string
+ nodeSelector:
+ type: object
+ priority:
+ type: integer
+ version: v1
+
diff --git a/src/vagrant/kubeadm_virtlet/host_setup.sh b/src/vagrant/kubeadm_virtlet/host_setup.sh
index b86a618..f211f19 100644
--- a/src/vagrant/kubeadm_virtlet/host_setup.sh
+++ b/src/vagrant/kubeadm_virtlet/host_setup.sh
@@ -10,20 +10,33 @@ cat << EOF | sudo tee /etc/hosts
192.168.1.23 worker3
EOF
-sudo apt-key adv --keyserver hkp://ha.pool.sks-keyservers.net:80 --recv-keys 58118E89F3A912897C070ADBF76221572C52609D
-sudo apt-key adv -k 58118E89F3A912897C070ADBF76221572C52609D
-cat << EOF | sudo tee /etc/apt/sources.list.d/docker.list
-deb [arch=amd64] https://apt.dockerproject.org/repo ubuntu-xenial main
-EOF
+sudo apt-get update
+sudo apt-get install -y \
+ apt-transport-https \
+ ca-certificates \
+ curl \
+ software-properties-common
+
+curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
+sudo add-apt-repository \
+ "deb [arch=amd64] https://download.docker.com/linux/ubuntu \
+ $(lsb_release -cs) \
+ stable"
+sudo apt-get update
+sudo apt-get install -y docker-ce=18.03.1~ce-0~ubuntu
curl -s http://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -
cat <<EOF | sudo tee /etc/apt/sources.list.d/kubernetes.list
deb http://apt.kubernetes.io/ kubernetes-xenial main
EOF
sudo apt-get update
-sudo apt-get install -y --allow-downgrades docker-engine=1.12.6-0~ubuntu-xenial kubelet=1.7.0-00 kubeadm=1.7.0-00 kubectl=1.7.0-00 kubernetes-cni=0.5.1-00
+sudo apt-get install -y --allow-unauthenticated kubelet=1.12.2-00 kubeadm=1.12.2-00 kubectl=1.12.2-00 kubernetes-cni=0.6.0-00
-sudo rm -rf /var/lib/kubelet
-sudo systemctl stop kubelet
+sudo modprobe ip_vs
+sudo modprobe ip_vs_rr
+sudo modprobe ip_vs_wrr
+sudo modprobe ip_vs_sh
+sudo swapoff -a
sudo systemctl daemon-reload
+sudo systemctl stop kubelet
sudo systemctl start kubelet
diff --git a/src/vagrant/kubeadm_virtlet/virtlet/etc/systemd/system/criproxy.service b/src/vagrant/kubeadm_virtlet/virtlet/etc/systemd/system/criproxy.service
deleted file mode 100644
index bb2f1de..0000000
--- a/src/vagrant/kubeadm_virtlet/virtlet/etc/systemd/system/criproxy.service
+++ /dev/null
@@ -1,11 +0,0 @@
-[Unit]
-Description=CRI Proxy
-
-[Service]
-ExecStart=/usr/local/bin/criproxy -v 3 -alsologtostderr -connect /var/run/dockershim.sock,virtlet:/run/virtlet.sock -listen /run/criproxy.sock
-Restart=always
-StartLimitInterval=0
-RestartSec=10
-
-[Install]
-WantedBy=kubelet.service
diff --git a/src/vagrant/kubeadm_virtlet/virtlet/etc/systemd/system/dockershim.service b/src/vagrant/kubeadm_virtlet/virtlet/etc/systemd/system/dockershim.service
deleted file mode 100644
index c629a4b..0000000
--- a/src/vagrant/kubeadm_virtlet/virtlet/etc/systemd/system/dockershim.service
+++ /dev/null
@@ -1,11 +0,0 @@
-[Unit]
-Description=dockershim for criproxy
-
-[Service]
-ExecStart=/usr/local/bin/dockershim ......
-Restart=always
-StartLimitInterval=0
-RestartSec=10
-
-[Install]
-RequiredBy=criproxy.service
diff --git a/src/vagrant/kubeadm_virtlet/virtlet/etc/systemd/system/kubelet.service.d/20-criproxy.conf b/src/vagrant/kubeadm_virtlet/virtlet/etc/systemd/system/kubelet.service.d/20-criproxy.conf
deleted file mode 100644
index 412a48d..0000000
--- a/src/vagrant/kubeadm_virtlet/virtlet/etc/systemd/system/kubelet.service.d/20-criproxy.conf
+++ /dev/null
@@ -1,2 +0,0 @@
-[Service]
-Environment="KUBELET_EXTRA_ARGS=--container-runtime=remote --container-runtime-endpoint=/run/criproxy.sock --image-service-endpoint=/run/criproxy.sock --enable-controller-attach-detach=false"
diff --git a/src/vagrant/kubeadm_virtlet/worker_setup.sh b/src/vagrant/kubeadm_virtlet/worker_setup.sh
index 4472874..bc37fb3 100644
--- a/src/vagrant/kubeadm_virtlet/worker_setup.sh
+++ b/src/vagrant/kubeadm_virtlet/worker_setup.sh
@@ -1,18 +1,12 @@
#!/bin/bash
set -ex
-sudo kubeadm join --token 8c5adc.1cec8dbf339093f0 192.168.1.10:6443 || true
+sudo kubeadm join --discovery-token-unsafe-skip-ca-verification --token 8c5adc.1cec8dbf339093f0 192.168.1.10:6443
-sudo docker pull openretriever/virtlet
-sudo docker run --rm openretriever/virtlet tar -c /criproxy | sudo tar -C /usr/local/bin -xv
-sudo ln -s /usr/local/bin/criproxy /usr/local/bin/dockershim
-
-sudo mkdir /etc/criproxy
-sudo touch /etc/criproxy/node.conf
-sudo cp -r /vagrant/virtlet/etc/systemd/system/* /etc/systemd/system/
-sudo systemctl stop kubelet
-sudo systemctl daemon-reload
-sudo systemctl enable criproxy dockershim
-sudo systemctl start criproxy dockershim
+wget https://github.com/Mirantis/criproxy/releases/download/v0.12.0/criproxy_0.12.0_amd64.deb
+sudo dpkg -i criproxy_0.12.0_amd64.deb
+sudo sed -i "s/EnvironmentFile/#EnvironmentFile/" /etc/systemd/system/kubelet.service.d/10-kubeadm.conf
sudo systemctl daemon-reload
-sudo systemctl start kubelet
+sudo systemctl restart dockershim
+sudo systemctl restart criproxy
+sudo systemctl restart kubelet
diff --git a/src/vagrant/setup_vagrant.sh b/src/vagrant/setup_vagrant.sh
index fcde052..23fdcd2 100755
--- a/src/vagrant/setup_vagrant.sh
+++ b/src/vagrant/setup_vagrant.sh
@@ -1,6 +1,5 @@
#!/bin/bash
#
-# Copyright (c) 2017 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
diff --git a/tox.ini b/tox.ini
new file mode 100644
index 0000000..69aa189
--- /dev/null
+++ b/tox.ini
@@ -0,0 +1,17 @@
+[tox]
+minversion = 1.6
+envlist =
+ docs,
+ docs-linkcheck
+skipsdist = true
+
+[testenv:docs]
+deps = -rdocs/requirements.txt
+commands =
+ sphinx-build -b html -n -d {envtmpdir}/doctrees ./docs/ {toxinidir}/docs/_build/html
+ echo "Generated docs available in {toxinidir}/docs/_build/html"
+whitelist_externals = echo
+
+[testenv:docs-linkcheck]
+deps = -rdocs/requirements.txt
+commands = sphinx-build -b linkcheck -d {envtmpdir}/doctrees ./docs/ {toxinidir}/docs/_build/linkcheck