summaryrefslogtreecommitdiffstats
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/arm/cni-deploy/.gitignore1
-rw-r--r--src/arm/cni-deploy/deploy.yml32
-rw-r--r--src/arm/cni-deploy/inventory/inventory.cfg18
-rw-r--r--src/arm/cni-deploy/roles/flannel/files/cni-flannel-ds.yml86
-rw-r--r--src/arm/cni-deploy/roles/flannel/tasks/main.yml12
-rw-r--r--src/arm/cni-deploy/roles/multus/files/10-multus.conf13
-rw-r--r--src/arm/cni-deploy/roles/multus/files/clusterrole.yml16
-rw-r--r--src/arm/cni-deploy/roles/multus/files/crdnetwork.yml15
-rw-r--r--src/arm/cni-deploy/roles/multus/files/flannel-obj.yml13
-rw-r--r--src/arm/cni-deploy/roles/multus/handlers/main.yml4
-rw-r--r--src/arm/cni-deploy/roles/multus/tasks/crd.yml44
-rw-r--r--src/arm/cni-deploy/roles/multus/tasks/main.yml24
-rw-r--r--src/arm/cni-deploy/roles/multus/templates/macvlan-obj.yml.j222
-rw-r--r--src/arm/cni-deploy/roles/multus/templates/multus-testpod.yml.j219
-rw-r--r--src/arm/cni-deploy/roles/sriov/tasks/crd.yml13
-rw-r--r--src/arm/cni-deploy/roles/sriov/tasks/main.yml12
-rw-r--r--src/arm/cni-deploy/roles/sriov/templates/sriov-obj.yml.j225
-rw-r--r--src/arm/cni-deploy/roles/sriov/templates/sriov-testpod.yml.j219
-rw-r--r--src/arm/cni-deploy/roles/vhost-vpp/files/0001-net-virtio-ethdev.patch16
-rw-r--r--src/arm/cni-deploy/roles/vhost-vpp/files/Dockerfile.vpp1710-dpdk170824
-rwxr-xr-xsrc/arm/cni-deploy/roles/vhost-vpp/files/setvpp.sh30
-rw-r--r--src/arm/cni-deploy/roles/vhost-vpp/files/startup.conf21
-rw-r--r--src/arm/cni-deploy/roles/vhost-vpp/files/vhostuser-obj.yml28
-rw-r--r--src/arm/cni-deploy/roles/vhost-vpp/tasks/crd.yml13
-rw-r--r--src/arm/cni-deploy/roles/vhost-vpp/tasks/main.yml18
-rw-r--r--src/arm/cni-deploy/roles/vhost-vpp/tasks/vpp.yml47
-rw-r--r--src/arm/cni-deploy/roles/vhost-vpp/templates/vpp-testpod.yml.j268
-rw-r--r--src/arm/cni-deploy/vars/global20
-rw-r--r--src/arm/edge/gateway/MACCHIATObin/README.rst70
-rw-r--r--src/arm/edge/gateway/MACCHIATObin/defconfig-mcbin-edge590
-rw-r--r--src/arm/edge/gateway/MACCHIATObin/setup-macbin-kernel.sh74
-rwxr-xr-xsrc/arm/kubernetes_vpp_vhostuser/deploy-cni.sh16
-rwxr-xr-xsrc/arm/kubernetes_vpp_vhostuser/k8s-build.sh25
-rwxr-xr-xsrc/arm/kubernetes_vpp_vhostuser/k8s-deploy.sh17
-rwxr-xr-xsrc/arm/kubernetes_vpp_vhostuser/setup.sh11
-rw-r--r--src/cni/multus/install_cni.sh12
-rw-r--r--src/vagrant/kubeadm/Vagrantfile (renamed from src/vagrant/kubeadm_multus/Vagrantfile)9
-rwxr-xr-xsrc/vagrant/kubeadm/deploy.sh10
-rw-r--r--src/vagrant/kubeadm/host_setup.sh32
-rwxr-xr-xsrc/vagrant/kubeadm/istio/bookinfo.sh (renamed from src/vagrant/kubeadm_istio/istio/bookinfo.sh)4
-rwxr-xr-xsrc/vagrant/kubeadm/istio/clean_bookinfo.sh (renamed from src/vagrant/kubeadm_istio/istio/clean_bookinfo.sh)6
-rwxr-xr-xsrc/vagrant/kubeadm/istio/deploy.sh (renamed from src/vagrant/kubeadm_istio/istio/deploy.sh)8
-rwxr-xr-xsrc/vagrant/kubeadm/istio/istio.sh6
-rw-r--r--src/vagrant/kubeadm/kata/containerd.service22
-rw-r--r--src/vagrant/kubeadm/kata/kata_setup.sh54
-rwxr-xr-xsrc/vagrant/kubeadm/kata/nginx-app.sh33
-rw-r--r--src/vagrant/kubeadm/kata/nginx-app.yaml33
-rw-r--r--src/vagrant/kubeadm/master_setup.sh32
-rw-r--r--src/vagrant/kubeadm/multus/Dockerfile10
-rw-r--r--src/vagrant/kubeadm/multus/busybox.yaml (renamed from src/vagrant/kubeadm_multus/examples/busybox.yaml)0
-rw-r--r--src/vagrant/kubeadm/multus/cni_multus.yml (renamed from src/cni/multus/kube_cni_multus.yml)27
-rwxr-xr-xsrc/vagrant/kubeadm/multus/multus.sh (renamed from src/vagrant/kubeadm_multus/examples/multus.sh)2
-rw-r--r--src/vagrant/kubeadm/registry_setup.sh23
-rw-r--r--src/vagrant/kubeadm/virtlet/cirros-vm.yaml42
-rw-r--r--src/vagrant/kubeadm/virtlet/images.yaml3
-rw-r--r--src/vagrant/kubeadm/virtlet/virtlet-ds.yaml521
-rwxr-xr-xsrc/vagrant/kubeadm/virtlet/virtlet.sh21
-rw-r--r--src/vagrant/kubeadm/virtlet/virtlet_setup.sh10
-rw-r--r--src/vagrant/kubeadm/worker_setup.sh8
-rw-r--r--src/vagrant/kubeadm_app/Vagrantfile (renamed from src/vagrant/kubeadm_istio/Vagrantfile)2
-rwxr-xr-xsrc/vagrant/kubeadm_app/app_setup.sh65
-rwxr-xr-xsrc/vagrant/kubeadm_app/create_images.sh10
-rw-r--r--src/vagrant/kubeadm_app/custom-bono-svc/bono-svc.yaml25
-rw-r--r--src/vagrant/kubeadm_app/custom-bono-svc/deployment-svc.yaml82
-rwxr-xr-xsrc/vagrant/kubeadm_app/deploy.sh12
-rw-r--r--src/vagrant/kubeadm_app/host_setup.sh (renamed from src/vagrant/kubeadm_istio/host_setup.sh)0
-rw-r--r--src/vagrant/kubeadm_app/master_setup.sh10
-rwxr-xr-xsrc/vagrant/kubeadm_app/setup_vagrant.sh97
-rwxr-xr-xsrc/vagrant/kubeadm_app/tests/clearwater-live-test.sh46
-rw-r--r--src/vagrant/kubeadm_app/worker_setup.sh (renamed from src/vagrant/kubeadm_istio/worker_setup.sh)0
-rw-r--r--src/vagrant/kubeadm_basic/Vagrantfile4
-rw-r--r--src/vagrant/kubeadm_basic/host_setup.sh19
-rw-r--r--src/vagrant/kubeadm_basic/worker_setup.sh1
-rwxr-xr-xsrc/vagrant/kubeadm_istio/deploy.sh12
-rw-r--r--src/vagrant/kubeadm_istio/master_setup.sh33
-rw-r--r--src/vagrant/kubeadm_kata/host_setup.sh29
-rw-r--r--src/vagrant/kubeadm_kata/kata_setup.sh2
-rw-r--r--src/vagrant/kubeadm_kata/master_setup.sh11
-rw-r--r--src/vagrant/kubeadm_kata/worker_setup.sh19
-rwxr-xr-xsrc/vagrant/kubeadm_multus/deploy.sh9
-rw-r--r--src/vagrant/kubeadm_multus/host_setup.sh29
-rw-r--r--src/vagrant/kubeadm_multus/master_setup.sh12
-rw-r--r--src/vagrant/kubeadm_multus/worker_setup.sh4
-rw-r--r--src/vagrant/kubeadm_onap/Vagrantfile6
-rwxr-xr-xsrc/vagrant/kubeadm_onap/host_setup.sh9
-rwxr-xr-xsrc/vagrant/kubeadm_onap/master_setup.sh12
-rwxr-xr-xsrc/vagrant/kubeadm_onap/onap_setup.sh3
-rw-r--r--src/vagrant/kubeadm_onap/setup_tunnel.sh3
-rw-r--r--src/vagrant/kubeadm_onap/values.yaml149
-rwxr-xr-xsrc/vagrant/kubeadm_onap/worker_setup.sh6
-rw-r--r--src/vagrant/kubeadm_virtlet/examples/cirros-vm.yaml25
-rw-r--r--src/vagrant/kubeadm_virtlet/examples/images.yaml2
-rw-r--r--src/vagrant/kubeadm_virtlet/examples/virtlet-ds.yaml457
-rw-r--r--src/vagrant/kubeadm_virtlet/host_setup.sh29
-rw-r--r--src/vagrant/kubeadm_virtlet/virtlet/etc/systemd/system/criproxy.service11
-rw-r--r--src/vagrant/kubeadm_virtlet/virtlet/etc/systemd/system/dockershim.service11
-rw-r--r--src/vagrant/kubeadm_virtlet/virtlet/etc/systemd/system/kubelet.service.d/20-criproxy.conf2
-rw-r--r--src/vagrant/kubeadm_virtlet/worker_setup.sh20
-rwxr-xr-xsrc/vagrant/setup_vagrant.sh1
99 files changed, 3150 insertions, 533 deletions
diff --git a/src/arm/cni-deploy/.gitignore b/src/arm/cni-deploy/.gitignore
new file mode 100644
index 0000000..a8b42eb
--- /dev/null
+++ b/src/arm/cni-deploy/.gitignore
@@ -0,0 +1 @@
+*.retry
diff --git a/src/arm/cni-deploy/deploy.yml b/src/arm/cni-deploy/deploy.yml
new file mode 100644
index 0000000..c54353a
--- /dev/null
+++ b/src/arm/cni-deploy/deploy.yml
@@ -0,0 +1,32 @@
+---
+- name: Fixup default flannel
+ hosts: kube-master
+ gather_facts: "no"
+ vars_files:
+ - "vars/global"
+ roles:
+ - {role: flannel, tags: [flannel]}
+
+- name: Deploy Multus CNI
+ hosts: all
+ gather_facts: "no"
+ vars_files:
+ - "vars/global"
+ roles:
+ - {role: multus, tags: [multus]}
+
+- name: Deploy SRIOV CNI
+ hosts: all
+ gather_facts: "no"
+ vars_files:
+ - "vars/global"
+ roles:
+ - {role: sriov, tags: [sriov]}
+
+- name: Deploy Vhostuser CNI and VPP
+ hosts: all
+ gather_facts: "yes"
+ vars_files:
+ - "vars/global"
+ roles:
+ - {role: vhost-vpp, tags: [vhost-vpp]}
diff --git a/src/arm/cni-deploy/inventory/inventory.cfg b/src/arm/cni-deploy/inventory/inventory.cfg
new file mode 100644
index 0000000..cd8bb25
--- /dev/null
+++ b/src/arm/cni-deploy/inventory/inventory.cfg
@@ -0,0 +1,18 @@
+# compass-tasks: /opt/kargo_k8s/inventory/inventory.cfg
+
+[all]
+host2 ansible_ssh_host=10.1.0.51 ansible_ssh_pass=root ansible_user=root
+host1 ansible_ssh_host=10.1.0.50 ansible_ssh_pass=root ansible_user=root
+
+[kube-master]
+host1
+
+[etcd]
+host1
+
+[kube-node]
+host2
+
+[k8s-cluster:children]
+kube-node
+kube-master
diff --git a/src/arm/cni-deploy/roles/flannel/files/cni-flannel-ds.yml b/src/arm/cni-deploy/roles/flannel/files/cni-flannel-ds.yml
new file mode 100644
index 0000000..a99983b
--- /dev/null
+++ b/src/arm/cni-deploy/roles/flannel/files/cni-flannel-ds.yml
@@ -0,0 +1,86 @@
+---
+apiVersion: extensions/v1beta1
+kind: DaemonSet
+metadata:
+ name: kube-flannel
+ namespace: "kube-system"
+ labels:
+ tier: node
+ k8s-app: flannel
+spec:
+ template:
+ metadata:
+ labels:
+ tier: node
+ k8s-app: flannel
+ spec:
+ serviceAccountName: flannel
+ containers:
+ - name: kube-flannel
+ image: quay.io/coreos/flannel:v0.9.1-arm64
+ imagePullPolicy: IfNotPresent
+ resources:
+ limits:
+ cpu: 300m
+ memory: 500M
+ requests:
+ cpu: 150m
+ memory: 64M
+ command: ["/opt/bin/flanneld", "--ip-masq", "--kube-subnet-mgr"]
+ securityContext:
+ privileged: true
+ env:
+ - name: POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ - name: POD_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ volumeMounts:
+ - name: run
+ mountPath: /run
+ - name: cni
+ mountPath: /etc/cni/net.d
+ - name: flannel-cfg
+ mountPath: /etc/kube-flannel/
+ # - name: install-cni
+ # image: linaro/flannel-cni-arm64:v0.3.0
+ # command: ["/install-cni.sh"]
+ # env:
+ # # The CNI network config to install on each node.
+ # - name: CNI_NETWORK_CONFIG
+ # valueFrom:
+ # configMapKeyRef:
+ # name: kube-flannel-cfg
+ # key: cni-conf.json
+ # - name: CNI_CONF_NAME
+ # value: "10-flannel.conflist"
+ # volumeMounts:
+ # - name: cni
+ # mountPath: /host/etc/cni/net.d
+ # - name: host-cni-bin
+ # mountPath: /host/opt/cni/bin/
+ hostNetwork: true
+ tolerations:
+ - key: node-role.kubernetes.io/master
+ operator: Exists
+ effect: NoSchedule
+ volumes:
+ - name: run
+ hostPath:
+ path: /run
+ - name: cni
+ hostPath:
+ path: /etc/cni/net.d
+ - name: flannel-cfg
+ configMap:
+ name: kube-flannel-cfg
+ # - name: host-cni-bin
+ # hostPath:
+ # path: /opt/cni/bin
+ updateStrategy:
+ rollingUpdate:
+ maxUnavailable: 20%
+ type: RollingUpdate
diff --git a/src/arm/cni-deploy/roles/flannel/tasks/main.yml b/src/arm/cni-deploy/roles/flannel/tasks/main.yml
new file mode 100644
index 0000000..4f1a910
--- /dev/null
+++ b/src/arm/cni-deploy/roles/flannel/tasks/main.yml
@@ -0,0 +1,12 @@
+---
+- name: Copy flannel daemonset file
+ copy:
+ src: cni-flannel-ds.yml
+ dest: /tmp/cni-flannel-ds.yml
+
+- name: Apply flannel daemonset
+ shell: kubectl apply -f /tmp/cni-flannel-ds.yml
+ ignore_errors: "yes"
+
+- name: Sleep 10 seconds
+ wait_for: timeout=10
diff --git a/src/arm/cni-deploy/roles/multus/files/10-multus.conf b/src/arm/cni-deploy/roles/multus/files/10-multus.conf
new file mode 100644
index 0000000..3726413
--- /dev/null
+++ b/src/arm/cni-deploy/roles/multus/files/10-multus.conf
@@ -0,0 +1,13 @@
+{
+ "name": "multus-cni-network",
+ "type": "multus",
+ "kubeconfig": "/etc/kubernetes/node-kubeconfig.yaml",
+ "delegates": [{
+ "type": "flannel",
+ "masterplugin": true,
+ "delegate": {
+ "isDefaultGateway": true
+ }
+ }]
+}
+
diff --git a/src/arm/cni-deploy/roles/multus/files/clusterrole.yml b/src/arm/cni-deploy/roles/multus/files/clusterrole.yml
new file mode 100644
index 0000000..fb056d4
--- /dev/null
+++ b/src/arm/cni-deploy/roles/multus/files/clusterrole.yml
@@ -0,0 +1,16 @@
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: multus-crd-overpowered
+rules:
+ - apiGroups:
+ - '*'
+ resources:
+ - '*'
+ verbs:
+ - '*'
+ - nonResourceURLs:
+ - '*'
+ verbs:
+ - '*'
diff --git a/src/arm/cni-deploy/roles/multus/files/crdnetwork.yml b/src/arm/cni-deploy/roles/multus/files/crdnetwork.yml
new file mode 100644
index 0000000..9aefdb8
--- /dev/null
+++ b/src/arm/cni-deploy/roles/multus/files/crdnetwork.yml
@@ -0,0 +1,15 @@
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: networks.kubernetes.com
+spec:
+ group: kubernetes.com
+ version: v1
+ scope: Namespaced
+ names:
+ plural: networks
+ singular: network
+ kind: Network
+ shortNames:
+ - net
diff --git a/src/arm/cni-deploy/roles/multus/files/flannel-obj.yml b/src/arm/cni-deploy/roles/multus/files/flannel-obj.yml
new file mode 100644
index 0000000..bd7891d
--- /dev/null
+++ b/src/arm/cni-deploy/roles/multus/files/flannel-obj.yml
@@ -0,0 +1,13 @@
+---
+apiVersion: "kubernetes.com/v1"
+kind: Network
+metadata:
+ name: flannel-networkobj
+plugin: flannel
+args: '[
+ {
+ "delegate": {
+ "isDefaultGateway": true
+ }
+ }
+]'
diff --git a/src/arm/cni-deploy/roles/multus/handlers/main.yml b/src/arm/cni-deploy/roles/multus/handlers/main.yml
new file mode 100644
index 0000000..8474d34
--- /dev/null
+++ b/src/arm/cni-deploy/roles/multus/handlers/main.yml
@@ -0,0 +1,4 @@
+- name: Restart kubelet
+ service:
+ name: kubelet
+ state: restarted
diff --git a/src/arm/cni-deploy/roles/multus/tasks/crd.yml b/src/arm/cni-deploy/roles/multus/tasks/crd.yml
new file mode 100644
index 0000000..cacf98a
--- /dev/null
+++ b/src/arm/cni-deploy/roles/multus/tasks/crd.yml
@@ -0,0 +1,44 @@
+---
+- name: Copy yaml files
+ copy:
+ src: "{{ item }}"
+ dest: "/tmp/{{ item }}"
+ with_items:
+ - clusterrole.yml
+ - crdnetwork.yml
+ - flannel-obj.yml
+
+- name: Copy macvlan template
+ template:
+ src: macvlan-obj.yml.j2
+ dest: /tmp/macvlan-obj.yml
+
+- name: Copy Multus testpod template
+ template:
+ src: multus-testpod.yml.j2
+ dest: /root/multus-testpod.yml
+
+- name: Create cluster role
+ shell: kubectl apply -f /tmp/clusterrole.yml
+
+- name: Check if role binding is created
+ shell: kubectl get clusterrolebinding multus-node-{{ item }}
+ register: check_rb
+ ignore_errors: "yes"
+ with_items: "{{ groups['all'] }}"
+
+- name: Create role binding
+ shell: >
+ kubectl create clusterrolebinding multus-node-{{ item }}
+ --clusterrole=multus-crd-overpowered
+ --user=system:node:{{ item }}
+ when: check_rb is failed
+ with_items: "{{ groups['all'] }}"
+
+- name: Create network CRD
+ shell: kubectl apply -f /tmp/crdnetwork.yml
+
+- name: Create flannel and macvlan network objects
+ shell: >
+ kubectl apply -f /tmp/flannel-obj.yml &&
+ kubectl apply -f /tmp/macvlan-obj.yml
diff --git a/src/arm/cni-deploy/roles/multus/tasks/main.yml b/src/arm/cni-deploy/roles/multus/tasks/main.yml
new file mode 100644
index 0000000..a200215
--- /dev/null
+++ b/src/arm/cni-deploy/roles/multus/tasks/main.yml
@@ -0,0 +1,24 @@
+---
+- name: Build Multus CNI
+ shell: >
+ docker run --rm --network host -v /opt/cni/bin:/opt/cni/bin golang:1.9
+ bash -c "git clone {{ multus_repo }} multus_cni && cd multus_cni &&
+ git checkout {{ multus_commit }} && ./build && cp bin/multus /opt/cni/bin/"
+ args:
+ creates: /opt/cni/bin/multus
+
+- name: Remove default CNI configuration
+ shell: rm -f /etc/cni/net.d/*
+ args:
+ warn: "no"
+
+- name: Set Multus as default CNI
+ copy:
+ src: 10-multus.conf
+ dest: /etc/cni/net.d/
+ notify:
+ - Restart kubelet
+
+- name: Import CRD task
+ import_tasks: crd.yml
+ when: inventory_hostname == groups["kube-master"][0]
diff --git a/src/arm/cni-deploy/roles/multus/templates/macvlan-obj.yml.j2 b/src/arm/cni-deploy/roles/multus/templates/macvlan-obj.yml.j2
new file mode 100644
index 0000000..b5a549f
--- /dev/null
+++ b/src/arm/cni-deploy/roles/multus/templates/macvlan-obj.yml.j2
@@ -0,0 +1,22 @@
+---
+apiVersion: "kubernetes.com/v1"
+kind: Network
+metadata:
+ name: macvlan-networkobj
+plugin: macvlan
+args: '[
+ {
+ "master": "{{ macvlan_master }}",
+ "mode": "vepa",
+ "ipam": {
+ "type": "host-local",
+ "subnet": "{{ macvlan_subnet }}",
+ "rangeStart": "{{ macvlan_range_start }}",
+ "rangeEnd": "{{ macvlan_range_end }}",
+ "routes": [
+ { "dst": "0.0.0.0/0" }
+ ],
+ "gateway": "{{ macvlan_gateway }}"
+ }
+ }
+]'
diff --git a/src/arm/cni-deploy/roles/multus/templates/multus-testpod.yml.j2 b/src/arm/cni-deploy/roles/multus/templates/multus-testpod.yml.j2
new file mode 100644
index 0000000..4884846
--- /dev/null
+++ b/src/arm/cni-deploy/roles/multus/templates/multus-testpod.yml.j2
@@ -0,0 +1,19 @@
+---
+apiVersion: v1
+kind: Pod
+metadata:
+ name: multus-test
+ annotations:
+ networks: '[
+ { "name": "flannel-networkobj" },
+ { "name": "macvlan-networkobj" }
+ ]'
+spec:
+ containers:
+ - name: multus-test
+ image: "busybox"
+ command: ["sleep", "100d"]
+ stdin: true
+ tty: true
+ nodeSelector:
+ kubernetes.io/hostname: "{{ groups['kube-node'][0] }}"
diff --git a/src/arm/cni-deploy/roles/sriov/tasks/crd.yml b/src/arm/cni-deploy/roles/sriov/tasks/crd.yml
new file mode 100644
index 0000000..5cc7892
--- /dev/null
+++ b/src/arm/cni-deploy/roles/sriov/tasks/crd.yml
@@ -0,0 +1,13 @@
+---
+- name: Copy SRIOV template
+ template:
+ src: sriov-obj.yml.j2
+ dest: /tmp/sriov-obj.yml
+
+- name: Copy SRIOV testpod template
+ template:
+ src: sriov-testpod.yml.j2
+ dest: /root/sriov-testpod.yml
+
+- name: Create SRIOV network object
+ shell: kubectl apply -f /tmp/sriov-obj.yml
diff --git a/src/arm/cni-deploy/roles/sriov/tasks/main.yml b/src/arm/cni-deploy/roles/sriov/tasks/main.yml
new file mode 100644
index 0000000..9c190ad
--- /dev/null
+++ b/src/arm/cni-deploy/roles/sriov/tasks/main.yml
@@ -0,0 +1,12 @@
+---
+- name: Build SRIOV CNI
+ shell: >
+ docker run --rm --network host -v /opt/cni/bin:/opt/cni/bin golang:1.9
+ bash -c "git clone {{ sriov_repo }} sriov_cni && cd sriov_cni &&
+ git checkout {{ sriov_commit }} && ./build && cp bin/sriov /opt/cni/bin/"
+ args:
+ creates: /opt/cni/bin/sriov
+
+- name: Import CRD task
+ import_tasks: crd.yml
+ when: inventory_hostname == groups["kube-master"][0]
diff --git a/src/arm/cni-deploy/roles/sriov/templates/sriov-obj.yml.j2 b/src/arm/cni-deploy/roles/sriov/templates/sriov-obj.yml.j2
new file mode 100644
index 0000000..6c67968
--- /dev/null
+++ b/src/arm/cni-deploy/roles/sriov/templates/sriov-obj.yml.j2
@@ -0,0 +1,25 @@
+---
+apiVersion: "kubernetes.com/v1"
+kind: Network
+metadata:
+ name: sriov-networkobj
+plugin: sriov
+args: '[
+ {
+ "master": "{{ sriov_master }}",
+ "pfOnly": true,
+ "if0name": "net0",
+ "ipam": {
+ "type": "host-local",
+ "subnet": "{{ sriov_subnet }}",
+ "rangeStart": "{{ sriov_range_start }}",
+ "rangeEnd": "{{ sriov_range_end }}",
+ "routes": [
+ {
+ "dst": "0.0.0.0/0"
+ }
+ ],
+ "gateway": "{{ sriov_gateway }}"
+ }
+ }
+]'
diff --git a/src/arm/cni-deploy/roles/sriov/templates/sriov-testpod.yml.j2 b/src/arm/cni-deploy/roles/sriov/templates/sriov-testpod.yml.j2
new file mode 100644
index 0000000..c1d01bc
--- /dev/null
+++ b/src/arm/cni-deploy/roles/sriov/templates/sriov-testpod.yml.j2
@@ -0,0 +1,19 @@
+---
+apiVersion: v1
+kind: Pod
+metadata:
+ name: sriov-test
+ annotations:
+ networks: '[
+ { "name": "flannel-networkobj" },
+ { "name": "sriov-networkobj" }
+ ]'
+spec:
+ containers:
+ - name: sriov-test
+ image: "busybox"
+ command: ["sleep", "100d"]
+ stdin: true
+ tty: true
+ nodeSelector:
+ kubernetes.io/hostname: "{{ groups['kube-node'][0] }}"
diff --git a/src/arm/cni-deploy/roles/vhost-vpp/files/0001-net-virtio-ethdev.patch b/src/arm/cni-deploy/roles/vhost-vpp/files/0001-net-virtio-ethdev.patch
new file mode 100644
index 0000000..171ff4d
--- /dev/null
+++ b/src/arm/cni-deploy/roles/vhost-vpp/files/0001-net-virtio-ethdev.patch
@@ -0,0 +1,16 @@
+diff --git a/drivers/net/virtio/virtio_ethdev.c b/drivers/net/virtio/virtio_ethdev.c
+index e320811..c1b1640 100644
+--- a/drivers/net/virtio/virtio_ethdev.c
++++ b/drivers/net/virtio/virtio_ethdev.c
+@@ -1754,6 +1754,11 @@ virtio_dev_start(struct rte_eth_dev *dev)
+ virtqueue_notify(rxvq->vq);
+ }
+
++ for (i = 0; i < dev->data->nb_tx_queues; i++) {
++ txvq = dev->data->tx_queues[i];
++ virtqueue_notify(txvq->vq);
++ }
++
+ PMD_INIT_LOG(DEBUG, "Notified backend at initialization");
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
diff --git a/src/arm/cni-deploy/roles/vhost-vpp/files/Dockerfile.vpp1710-dpdk1708 b/src/arm/cni-deploy/roles/vhost-vpp/files/Dockerfile.vpp1710-dpdk1708
new file mode 100644
index 0000000..2f83534
--- /dev/null
+++ b/src/arm/cni-deploy/roles/vhost-vpp/files/Dockerfile.vpp1710-dpdk1708
@@ -0,0 +1,24 @@
+FROM ubuntu:xenial
+
+RUN apt-get update && \
+ apt-get install -y git make openssl libcrypto++-dev libnuma-dev && \
+ apt-get autoclean
+
+RUN git clone https://gerrit.fd.io/r/vpp -b stable/1710 /root/vpp-1710
+
+WORKDIR /root/vpp-1710
+COPY ./0001-net-virtio-ethdev.patch dpdk/dpdk-17.08_patches/0001-net-virtio-ethdev.patch
+RUN sed -i "s/sudo -E //g" Makefile
+RUN make UNATTENDED=yes install-dep
+
+WORKDIR /root/vpp-1710/build-root
+RUN ./bootstrap.sh
+RUN make PLATFORM=vpp TAG=vpp_debug vpp-install
+RUN mkdir -p /etc/vpp && \
+ cp /root/vpp-1710/src/vpp/conf/startup.conf /etc/vpp/startup.conf && \
+ cp /root/vpp-1710/build-root/install-vpp_debug-native/vpp/bin/* /usr/bin && \
+ ln -s /root/vpp-1710/build-root/install-vpp_debug-native/vpp/lib64/vpp_plugins /usr/lib/vpp_plugins
+RUN groupadd vpp
+
+ENV PATH "$PATH:/root/vpp-1710/build-root/install-vpp_debug-native/dpdk/bin"
+ENV PATH "$PATH:/root/vpp-1710/build-root/install-vpp_debug-native/vpp/bin"
diff --git a/src/arm/cni-deploy/roles/vhost-vpp/files/setvpp.sh b/src/arm/cni-deploy/roles/vhost-vpp/files/setvpp.sh
new file mode 100755
index 0000000..15b0d27
--- /dev/null
+++ b/src/arm/cni-deploy/roles/vhost-vpp/files/setvpp.sh
@@ -0,0 +1,30 @@
+#!/bin/bash
+
+set -x
+
+cid=`sed -ne '/hostname/p' /proc/1/task/1/mountinfo | awk -F '/' '{print $6}'`
+cid_s=${cid:0:12}
+filename=${cid_s}-net1.json
+ifstring=`cat /vhost-user-net-plugin/${cid}/${cid_s}-net1.json | awk -F ',' '{print $4}'`
+ifmac=`echo ${ifstring} | awk -F '\"' '{print $4}'`
+
+ipstr=$(cat /vhost-user-net-plugin/${cid}/${cid_s}-net1-ip4.conf |grep "ipAddr")
+ipaddr=$(echo $ipstr | awk -F '\"' '{print $4}')
+ipaddr1=$(echo $ipaddr | cut -d / -f 1)
+
+vdev_str="vdev virtio_user0,path=/vhost-user-net-plugin/$cid/$cid_s-net1,mac=$ifmac"
+
+sed -i.bak '/# dpdk/a\dpdk \{' /etc/vpp/startup.conf
+sed -i.bak "/# vdev eth_bond1,mode=1/a\\$vdev_str" /etc/vpp/startup.conf
+sed -i.bak '/# socket-mem/a\\}' /etc/vpp/startup.conf
+
+vpp -c /etc/vpp/startup.conf &
+
+sleep 40
+
+vppctl set int state VirtioUser0/0/0 up
+vppctl set int ip address VirtioUser0/0/0 ${ipaddr1}/24
+vppctl show int
+vppctl show int address
+
+echo ${ipaddr1} > /vhost-user-net-plugin/$(hostname)
diff --git a/src/arm/cni-deploy/roles/vhost-vpp/files/startup.conf b/src/arm/cni-deploy/roles/vhost-vpp/files/startup.conf
new file mode 100644
index 0000000..ae86e38
--- /dev/null
+++ b/src/arm/cni-deploy/roles/vhost-vpp/files/startup.conf
@@ -0,0 +1,21 @@
+unix {
+ nodaemon
+ log /tmp/vpp.log
+ full-coredump
+ cli-listen /run/vpp/cli.sock
+ gid vpp
+}
+api-trace {
+ on
+}
+api-segment {
+ gid vpp
+}
+cpu {
+ main-core 1
+ corelist-workers 2-3
+ workers 2
+}
+dpdk {
+ uio-driver vfio-pci
+}
diff --git a/src/arm/cni-deploy/roles/vhost-vpp/files/vhostuser-obj.yml b/src/arm/cni-deploy/roles/vhost-vpp/files/vhostuser-obj.yml
new file mode 100644
index 0000000..1e9bc66
--- /dev/null
+++ b/src/arm/cni-deploy/roles/vhost-vpp/files/vhostuser-obj.yml
@@ -0,0 +1,28 @@
+---
+apiVersion: "kubernetes.com/v1"
+kind: Network
+metadata:
+ name: vhostuser-networkobj
+plugin: vhostuser
+args: '[
+ {
+ "type": "vhostuser",
+ "name": "vhostuser-network",
+ "if0name": "net1",
+ "vhost": {
+ "vhost_tool": "/opt/cni/bin/vpp-config.py"
+ },
+ "ipam": {
+ "type": "host-local",
+ "subnet": "10.56.217.0/24",
+ "rangeStart": "10.56.217.131",
+ "rangeEnd": "10.56.217.190",
+ "routes": [
+ {
+ "dst": "0.0.0.0/0"
+ }
+ ],
+ "gateway": "10.56.217.1"
+ }
+ }
+]'
diff --git a/src/arm/cni-deploy/roles/vhost-vpp/tasks/crd.yml b/src/arm/cni-deploy/roles/vhost-vpp/tasks/crd.yml
new file mode 100644
index 0000000..ad36c90
--- /dev/null
+++ b/src/arm/cni-deploy/roles/vhost-vpp/tasks/crd.yml
@@ -0,0 +1,13 @@
+---
+- name: Copy Vhostuser yaml
+ copy:
+ src: vhostuser-obj.yml
+ dest: /tmp/vhostuser-obj.yml
+
+- name: Copy VPP testpod template
+ template:
+ src: vpp-testpod.yml.j2
+ dest: /root/vpp-testpod.yml
+
+- name: Create Vhostuser network object
+ shell: kubectl apply -f /tmp/vhostuser-obj.yml
diff --git a/src/arm/cni-deploy/roles/vhost-vpp/tasks/main.yml b/src/arm/cni-deploy/roles/vhost-vpp/tasks/main.yml
new file mode 100644
index 0000000..df890ea
--- /dev/null
+++ b/src/arm/cni-deploy/roles/vhost-vpp/tasks/main.yml
@@ -0,0 +1,18 @@
+---
+- name: Build Vhostuser CNI
+ shell: >
+ docker run --rm --network host -v /opt/cni/bin:/opt/cni/bin golang:1.9
+ bash -c "git clone {{ vhostuser_repo }} vhostuser_cni && cd vhostuser_cni
+ && git checkout {{ vhostuser_commit }} && ./build
+ && cp bin/vhostuser /opt/cni/bin/
+ && cp tests/vpp-config-debug.py /opt/cni/bin/vpp-config.py"
+ args:
+ creates: /opt/cni/bin/vhostuser
+
+- name: Import CRD task
+ import_tasks: crd.yml
+ when: inventory_hostname == groups["kube-master"][0]
+
+- name: Import VPP task
+ import_tasks: vpp.yml
+ when: inventory_hostname in groups["kube-node"]
diff --git a/src/arm/cni-deploy/roles/vhost-vpp/tasks/vpp.yml b/src/arm/cni-deploy/roles/vhost-vpp/tasks/vpp.yml
new file mode 100644
index 0000000..7f5be05
--- /dev/null
+++ b/src/arm/cni-deploy/roles/vhost-vpp/tasks/vpp.yml
@@ -0,0 +1,47 @@
+---
+- name: Create dest directories
+ file:
+ path: "{{ item }}"
+ state: directory
+ with_items:
+ - /tmp/vpp1710/
+ - /var/lib/cni/vhostuser/
+ - /etc/vpp/
+
+- name: Copy VPP files
+ copy:
+ src: "{{ item.src }}"
+ dest: "{{ item.dest }}"
+ with_items:
+ - {src: "Dockerfile.vpp1710-dpdk1708", dest: "/tmp/vpp1710/Dockerfile"}
+ - {src: "0001-net-virtio-ethdev.patch", dest: "/tmp/vpp1710/0001-net-virtio-ethdev.patch"}
+ - {src: "setvpp.sh", dest: "/var/lib/cni/vhostuser/setvpp.sh"}
+ - {src: "startup.conf", dest: "/etc/vpp/startup.conf"}
+
+- name: Check if VPP image exists
+ shell: docker inspect --type=image vpp-1710:virtio-patched > /dev/null 2>&1
+ ignore_errors: "yes"
+ register: check_vpp
+
+- name: Building VPP container. Be patient...
+ shell: docker build -t vpp-1710:virtio-patched --network host .
+ args:
+ chdir: /tmp/vpp1710/
+ when: check_vpp is failed
+
+- name: Copy VPP binaries to host
+ shell: >
+ docker run --rm -v /root/vpp-1710/build-root:/root/vpp-host vpp-1710:virtio-patched
+ /bin/cp -a /root/vpp-1710/build-root/install-vpp_debug-native /root/vpp-host
+ && /bin/cp /root/vpp-1710/build-root/install-vpp_debug-native/vpp/bin/* /usr/bin
+ && /bin/rm -rf /usr/lib/vpp_plugins
+ && ln -s /root/vpp-1710/build-root/install-vpp_debug-native/vpp/lib64/vpp_plugins /usr/lib/vpp_plugins
+ && (groupadd vpp || true)
+
+- name: Copy libcrypto.so.1.0.0 for CentOS
+ shell: >
+ docker run --rm -v /usr/lib64:/root/lib64-centos vpp-1710:virtio-patched
+ /bin/cp /lib/aarch64-linux-gnu/libcrypto.so.1.0.0 /root/lib64-centos/
+ args:
+ creates: /usr/lib64/libcrypto.so.1.0.0
+ when: ansible_os_family == "RedHat"
diff --git a/src/arm/cni-deploy/roles/vhost-vpp/templates/vpp-testpod.yml.j2 b/src/arm/cni-deploy/roles/vhost-vpp/templates/vpp-testpod.yml.j2
new file mode 100644
index 0000000..2efd4e0
--- /dev/null
+++ b/src/arm/cni-deploy/roles/vhost-vpp/templates/vpp-testpod.yml.j2
@@ -0,0 +1,68 @@
+---
+apiVersion: v1
+kind: Pod
+metadata:
+ name: vpp-test1
+ annotations:
+ networks: '[
+ { "name": "flannel-networkobj" },
+ { "name": "vhostuser-networkobj" }
+ ]'
+spec:
+ containers:
+ - name: vpp-test1
+ image: vpp-1710:virtio-patched
+ imagePullPolicy: "Never"
+ stdin: true
+ terminationMessagePath: /dev/termination-log
+ tty: true
+ securityContext:
+ privileged: true
+ volumeMounts:
+ - mountPath: /vhost-user-net-plugin
+ name: vhost-user-net-plugin
+ - mountPath: /mnt/huge
+ name: huge
+ nodeSelector:
+ kubernetes.io/hostname: "{{ groups['kube-node'][0] }}"
+ volumes:
+ - name: vhost-user-net-plugin
+ hostPath:
+ path: /var/lib/cni/vhostuser
+ - name: huge
+ hostPath:
+ path: /mnt/huge
+---
+apiVersion: v1
+kind: Pod
+metadata:
+ name: vpp-test2
+ annotations:
+ networks: '[
+ { "name": "flannel-networkobj" },
+ { "name": "vhostuser-networkobj" }
+ ]'
+spec:
+ containers:
+ - name: vpp-test2
+ image: vpp-1710:virtio-patched
+ imagePullPolicy: "Never"
+ stdin: true
+ terminationMessagePath: /dev/termination-log
+ tty: true
+ securityContext:
+ privileged: true
+ volumeMounts:
+ - mountPath: /vhost-user-net-plugin
+ name: vhost-user-net-plugin
+ - mountPath: /mnt/huge
+ name: huge
+ nodeSelector:
+ kubernetes.io/hostname: "{{ groups['kube-node'][0] }}"
+ volumes:
+ - name: vhost-user-net-plugin
+ hostPath:
+ path: /var/lib/cni/vhostuser
+ - name: huge
+ hostPath:
+ path: /mnt/huge
diff --git a/src/arm/cni-deploy/vars/global b/src/arm/cni-deploy/vars/global
new file mode 100644
index 0000000..35d76b4
--- /dev/null
+++ b/src/arm/cni-deploy/vars/global
@@ -0,0 +1,20 @@
+multus_repo: https://github.com/Intel-Corp/multus-cni
+multus_commit: 61959e04
+
+sriov_repo: https://github.com/hustcat/sriov-cni
+sriov_commit: 8b7ed984
+
+vhostuser_repo: https://github.com/yibo-cai/vhost-user-net-plugin
+vhostuser_commit: e8dc9d8e
+
+macvlan_master: eth2
+macvlan_subnet: 192.168.166.0/24
+macvlan_range_start: 192.168.166.11
+macvlan_range_end: 192.168.166.30
+macvlan_gateway: 192.168.166.1
+
+sriov_master: eth2
+sriov_subnet: 192.168.166.0/24
+sriov_range_start: 192.168.166.31
+sriov_range_end: 192.168.166.50
+sriov_gateway: 192.168.166.1
diff --git a/src/arm/edge/gateway/MACCHIATObin/README.rst b/src/arm/edge/gateway/MACCHIATObin/README.rst
new file mode 100644
index 0000000..2082e5a
--- /dev/null
+++ b/src/arm/edge/gateway/MACCHIATObin/README.rst
@@ -0,0 +1,70 @@
+=================================================================
+Linux Kernel Build Guide on MACCHIATObin for Edge Infrastructure
+=================================================================
+
+The Marvell MACCHIATObin is a family of cost-effective and high-performance networking community boards targeting ARM64bit high end networking and storage applications.
+With a offering that include a fully open source software that include U-Boot, Linux, ODP and DPDK, the Marvell MACCHIATObin are optimal platforms for community developers and Independent Software Vendors (ISVs) to develop networking and storage applications.
+The default kernel configuration provided by Marvell does not meet the container's system requirements.
+We provide a kernel configuration file that has been verified on the MACCHIATObin board for developers to use, as well as a verified kernel image for the edge infrastructure deployment.
+
+
+Build From Source
+=================
+
+The procedures to build kernel from source is almost the same, but there are still some points you need to pay attention to on MACCHIATObin board.
+
+Download Kernel Source::
+
+ mkdir -p ~/kernel/4.14.22
+ cd ~/kernel/4.14.22
+ git clone https://github.com/MarvellEmbeddedProcessors/linux-marvell .
+ git checkout linux-4.14.22-armada-18.09
+
+Download MUSDK Package
+Marvell User-Space SDK(MUSDK) is a light-weight user-space I/O driver for Marvell's Embedded Networking SoC's. The MUSDK library provides a simple and direct access to Marvell's SoC blocks to networking applications and networking infrastrucutre::
+
+ mkdir -p ~/musdk
+ git clone https://github.com/MarvellEmbeddedProcessors/musdk-marvell .
+ git checkout musdk-armada-18.09
+
+Patch Kernel
+Linux Kernel needs to be patched and built in order to run MUSDK on the MACCHIATObin board::
+
+ cd ~/kernel/4.14.22/
+ git am ~/musdk/patches/linux-4.14/*.patch
+
+Build & Install
+First, replace the default kernel configuration file with defconfig-mcbin-edge::
+
+ cp defconfig-mcbin-edge ~/kernel/4.14.22/arch/arm64/configs/mvebu_v8_lsp_defconfig
+
+and then compile the kernel::
+
+ export ARCH=arm64
+ make mvebu_v8_lsp_defconfig
+ make -j$(($(nproc)+1))
+
+ make modules_install
+ cp ./arch/arm64/boot/Image /boot/
+ cp ./arch/arm64/boot/dts/marvell/armada-8040-mcbin.dtb /boot/
+
+Script is provided to facilitate the build of the kernel image, the developer needs to run with root privileges::
+
+ ./setup-macbin-kernel.sh
+
+Quick Deployment
+================
+
+The image file in the compressed package can also quickly build the edge system, you need to execute the following instructions::
+ git clone https://github.com/Jianlin-lv/Kernel-for-Edge-System.git
+ tar zxvf mcbin-double-shot-linux-4.14.22.tar.gz
+ cd mcbin-double-shot-linux-4.14.22
+ cp Image /boot/Image
+ cp armada-8040-mcbin.dtb /boot/armada-8040-mcbin.dtb
+ cp -rf ./lib/modules/4.14.22-armada-18.09.3-ge9aff6a-dirty/ /lib/modules/
+
+Other
+=====
+Marvell provides guidance on the build toolchain, file system and bootloader, which can be found at the link below:
+http://wiki.macchiatobin.net/tiki-index.php?page=Wiki+Home
+
diff --git a/src/arm/edge/gateway/MACCHIATObin/defconfig-mcbin-edge b/src/arm/edge/gateway/MACCHIATObin/defconfig-mcbin-edge
new file mode 100644
index 0000000..f1a26d6
--- /dev/null
+++ b/src/arm/edge/gateway/MACCHIATObin/defconfig-mcbin-edge
@@ -0,0 +1,590 @@
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_AUDIT=y
+CONFIG_NO_HZ_IDLE=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_IRQ_TIME_ACCOUNTING=y
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_BSD_PROCESS_ACCT_V3=y
+CONFIG_TASKSTATS=y
+CONFIG_TASK_DELAY_ACCT=y
+CONFIG_TASK_XACCT=y
+CONFIG_TASK_IO_ACCOUNTING=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_NUMA_BALANCING=y
+CONFIG_MEMCG=y
+CONFIG_MEMCG_SWAP=y
+CONFIG_BLK_CGROUP=y
+CONFIG_CFS_BANDWIDTH=y
+CONFIG_RT_GROUP_SCHED=y
+CONFIG_CGROUP_PIDS=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CGROUP_HUGETLB=y
+CONFIG_CPUSETS=y
+CONFIG_CGROUP_DEVICE=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_CGROUP_PERF=y
+CONFIG_USER_NS=y
+CONFIG_SCHED_AUTOGROUP=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_KALLSYMS_ALL=y
+# CONFIG_COMPAT_BRK is not set
+CONFIG_PROFILING=y
+CONFIG_JUMP_LABEL=y
+# CONFIG_VMAP_STACK is not set
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_BLK_DEV_THROTTLING=y
+# CONFIG_IOSCHED_DEADLINE is not set
+CONFIG_CFQ_GROUP_IOSCHED=y
+CONFIG_ARCH_MVEBU=y
+CONFIG_PCI=y
+CONFIG_HOTPLUG_PCI_PCIE=y
+CONFIG_PCI_IOV=y
+CONFIG_HOTPLUG_PCI=y
+CONFIG_HOTPLUG_PCI_ACPI=y
+CONFIG_PCI_HISI=y
+CONFIG_PCIE_ARMADA_8K=y
+CONFIG_PCIE_KIRIN=y
+CONFIG_PCI_AARDVARK=y
+CONFIG_PCI_HOST_GENERIC=y
+CONFIG_PCI_XGENE=y
+CONFIG_ARM64_VA_BITS_48=y
+CONFIG_SCHED_MC=y
+CONFIG_NUMA=y
+CONFIG_PREEMPT=y
+CONFIG_KSM=y
+CONFIG_TRANSPARENT_HUGEPAGE=y
+CONFIG_CMA=y
+CONFIG_SECCOMP=y
+CONFIG_KEXEC=y
+CONFIG_CRASH_DUMP=y
+CONFIG_XEN=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_COMPAT=y
+CONFIG_HIBERNATION=y
+CONFIG_WQ_POWER_EFFICIENT_DEFAULT=y
+CONFIG_ARM_CPUIDLE=y
+CONFIG_CPU_FREQ=y
+CONFIG_CPUFREQ_DT=y
+CONFIG_ARM_ARMADA_37XX_CPUFREQ=y
+CONFIG_ARM_BIG_LITTLE_CPUFREQ=y
+CONFIG_ARM_SCPI_CPUFREQ=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_XFRM_USER=y
+CONFIG_XFRM_SUB_POLICY=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_NET_IPIP=y
+# CONFIG_INET6_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET6_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET6_XFRM_MODE_BEET is not set
+# CONFIG_IPV6_SIT is not set
+CONFIG_NETFILTER=y
+CONFIG_BRIDGE_NETFILTER=y
+CONFIG_NETFILTER_NETLINK_LOG=y
+CONFIG_NF_CONNTRACK=y
+CONFIG_NF_CONNTRACK_MARK=y
+CONFIG_NF_CONNTRACK_ZONES=y
+CONFIG_NF_CONNTRACK_EVENTS=y
+CONFIG_NF_CONNTRACK_TIMEOUT=y
+CONFIG_NF_CONNTRACK_TIMESTAMP=y
+CONFIG_NF_CONNTRACK_AMANDA=y
+CONFIG_NF_CONNTRACK_FTP=y
+CONFIG_NF_CONNTRACK_H323=y
+CONFIG_NF_CONNTRACK_IRC=y
+CONFIG_NF_CONNTRACK_NETBIOS_NS=y
+CONFIG_NF_CONNTRACK_SNMP=y
+CONFIG_NF_CONNTRACK_TFTP=y
+CONFIG_NF_CT_NETLINK=y
+CONFIG_NF_CT_NETLINK_TIMEOUT=y
+CONFIG_NF_CT_NETLINK_HELPER=y
+CONFIG_NETFILTER_NETLINK_GLUE_CT=y
+CONFIG_NF_TABLES=y
+CONFIG_NF_TABLES_NETDEV=y
+CONFIG_NFT_RT=y
+CONFIG_NFT_NUMGEN=y
+CONFIG_NFT_CT=y
+CONFIG_NFT_SET_HASH=y
+CONFIG_NFT_COUNTER=y
+CONFIG_NFT_LOG=y
+CONFIG_NFT_LIMIT=y
+CONFIG_NFT_MASQ=y
+CONFIG_NFT_REDIR=y
+CONFIG_NFT_NAT=y
+CONFIG_NFT_OBJREF=y
+CONFIG_NFT_QUEUE=y
+CONFIG_NFT_QUOTA=y
+CONFIG_NFT_REJECT=y
+CONFIG_NFT_HASH=y
+CONFIG_NETFILTER_XT_SET=y
+CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y
+CONFIG_NETFILTER_XT_TARGET_LOG=y
+CONFIG_NETFILTER_XT_TARGET_MARK=y
+CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y
+CONFIG_NETFILTER_XT_TARGET_NOTRACK=y
+CONFIG_NETFILTER_XT_TARGET_TCPMSS=y
+CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=y
+CONFIG_NETFILTER_XT_MATCH_COMMENT=y
+CONFIG_NETFILTER_XT_MATCH_CONNBYTES=y
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y
+CONFIG_NETFILTER_XT_MATCH_DCCP=y
+CONFIG_NETFILTER_XT_MATCH_DSCP=y
+CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y
+CONFIG_NETFILTER_XT_MATCH_HELPER=y
+CONFIG_NETFILTER_XT_MATCH_IPRANGE=y
+CONFIG_NETFILTER_XT_MATCH_IPVS=y
+CONFIG_NETFILTER_XT_MATCH_LENGTH=y
+CONFIG_NETFILTER_XT_MATCH_LIMIT=y
+CONFIG_NETFILTER_XT_MATCH_MAC=y
+CONFIG_NETFILTER_XT_MATCH_MARK=y
+CONFIG_NETFILTER_XT_MATCH_MULTIPORT=y
+CONFIG_NETFILTER_XT_MATCH_NFACCT=y
+CONFIG_NETFILTER_XT_MATCH_OWNER=y
+CONFIG_NETFILTER_XT_MATCH_PHYSDEV=y
+CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y
+CONFIG_NETFILTER_XT_MATCH_REALM=y
+CONFIG_NETFILTER_XT_MATCH_RECENT=y
+CONFIG_NETFILTER_XT_MATCH_SCTP=y
+CONFIG_NETFILTER_XT_MATCH_STATE=y
+CONFIG_NETFILTER_XT_MATCH_STATISTIC=y
+CONFIG_NETFILTER_XT_MATCH_STRING=y
+CONFIG_NETFILTER_XT_MATCH_TCPMSS=y
+CONFIG_IP_SET=y
+CONFIG_IP_SET_BITMAP_IP=y
+CONFIG_IP_SET_BITMAP_IPMAC=y
+CONFIG_IP_SET_BITMAP_PORT=y
+CONFIG_IP_SET_HASH_IP=y
+CONFIG_IP_SET_HASH_IPMARK=y
+CONFIG_IP_SET_HASH_IPPORT=y
+CONFIG_IP_SET_HASH_IPPORTIP=y
+CONFIG_IP_SET_HASH_IPPORTNET=y
+CONFIG_IP_SET_HASH_IPMAC=y
+CONFIG_IP_SET_HASH_MAC=y
+CONFIG_IP_SET_HASH_NETPORTNET=y
+CONFIG_IP_SET_HASH_NET=y
+CONFIG_IP_SET_HASH_NETNET=y
+CONFIG_IP_SET_HASH_NETPORT=y
+CONFIG_IP_SET_HASH_NETIFACE=y
+CONFIG_IP_SET_LIST_SET=y
+CONFIG_IP_VS=y
+CONFIG_IP_VS_PROTO_TCP=y
+CONFIG_IP_VS_PROTO_UDP=y
+CONFIG_IP_VS_RR=y
+CONFIG_IP_VS_WRR=y
+CONFIG_IP_VS_LC=y
+CONFIG_IP_VS_WLC=y
+CONFIG_IP_VS_FO=y
+CONFIG_IP_VS_OVF=y
+CONFIG_IP_VS_LBLC=y
+CONFIG_IP_VS_LBLCR=y
+CONFIG_IP_VS_DH=y
+CONFIG_IP_VS_SH=y
+CONFIG_IP_VS_SED=y
+CONFIG_IP_VS_NQ=y
+CONFIG_IP_VS_FTP=y
+CONFIG_NF_CONNTRACK_IPV4=y
+CONFIG_NF_SOCKET_IPV4=y
+CONFIG_NF_TABLES_IPV4=y
+CONFIG_NFT_CHAIN_ROUTE_IPV4=y
+CONFIG_NFT_DUP_IPV4=y
+CONFIG_NFT_FIB_IPV4=y
+CONFIG_NF_TABLES_ARP=y
+CONFIG_NFT_CHAIN_NAT_IPV4=y
+CONFIG_NFT_MASQ_IPV4=y
+CONFIG_NFT_REDIR_IPV4=y
+CONFIG_IP_NF_IPTABLES=y
+CONFIG_IP_NF_MATCH_ECN=y
+CONFIG_IP_NF_MATCH_RPFILTER=y
+CONFIG_IP_NF_MATCH_TTL=y
+CONFIG_IP_NF_FILTER=y
+CONFIG_IP_NF_TARGET_REJECT=y
+CONFIG_IP_NF_TARGET_SYNPROXY=y
+CONFIG_IP_NF_NAT=y
+CONFIG_IP_NF_TARGET_MASQUERADE=y
+CONFIG_IP_NF_TARGET_NETMAP=y
+CONFIG_IP_NF_TARGET_REDIRECT=y
+CONFIG_IP_NF_MANGLE=y
+CONFIG_IP_NF_TARGET_ECN=y
+CONFIG_IP_NF_TARGET_TTL=y
+CONFIG_IP_NF_RAW=y
+CONFIG_IP_NF_ARPTABLES=y
+CONFIG_IP_NF_ARPFILTER=y
+CONFIG_IP_NF_ARP_MANGLE=y
+CONFIG_IP6_NF_IPTABLES=y
+CONFIG_BRIDGE=y
+CONFIG_BRIDGE_VLAN_FILTERING=y
+CONFIG_NET_DSA=y
+CONFIG_VLAN_8021Q=y
+CONFIG_VLAN_8021Q_GVRP=y
+CONFIG_VLAN_8021Q_MVRP=y
+CONFIG_NET_SCHED=y
+CONFIG_NET_CLS_CGROUP=y
+CONFIG_NETLINK_DIAG=y
+CONFIG_MPLS=y
+CONFIG_NET_MPLS_GSO=y
+CONFIG_NET_L3_MASTER_DEV=y
+CONFIG_CGROUP_NET_PRIO=y
+CONFIG_BPF_JIT=y
+CONFIG_RFKILL=y
+CONFIG_NET_9P=y
+CONFIG_NET_9P_VIRTIO=y
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
+CONFIG_DMA_CMA=y
+CONFIG_CMA_SIZE_MBYTES=256
+CONFIG_BRCMSTB_GISB_ARB=y
+CONFIG_VEXPRESS_CONFIG=y
+CONFIG_MTD=y
+CONFIG_MTD_CMDLINE_PARTS=y
+CONFIG_MTD_BLOCK=y
+CONFIG_MTD_M25P80=y
+CONFIG_MTD_NAND=y
+CONFIG_MTD_NAND_DENALI_DT=y
+CONFIG_MTD_NAND_MARVELL=y
+CONFIG_MTD_SPI_NOR=y
+CONFIG_MTD_UBI=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_VIRTIO_BLK=y
+CONFIG_SRAM=y
+# CONFIG_SCSI_PROC_FS is not set
+CONFIG_BLK_DEV_SD=y
+CONFIG_SCSI_FC_ATTRS=y
+CONFIG_SCSI_SAS_ATA=y
+CONFIG_SCSI_HISI_SAS=y
+CONFIG_SCSI_HISI_SAS_PCI=y
+CONFIG_ATA=y
+CONFIG_SATA_AHCI=y
+CONFIG_SATA_AHCI_PLATFORM=y
+CONFIG_AHCI_CEVA=y
+CONFIG_AHCI_MVEBU=y
+CONFIG_AHCI_XGENE=y
+CONFIG_AHCI_QORIQ=y
+CONFIG_SATA_SIL24=y
+CONFIG_PATA_PLATFORM=y
+CONFIG_PATA_OF_PLATFORM=y
+CONFIG_MD=y
+CONFIG_BLK_DEV_MD=y
+CONFIG_MD_LINEAR=y
+CONFIG_MD_RAID0=y
+CONFIG_MD_RAID1=y
+CONFIG_MD_RAID456=y
+CONFIG_BLK_DEV_DM=y
+CONFIG_DM_CRYPT=y
+CONFIG_DM_THIN_PROVISIONING=y
+CONFIG_NETDEVICES=y
+CONFIG_BONDING=y
+CONFIG_DUMMY=y
+CONFIG_MACVLAN=y
+CONFIG_MACVTAP=y
+CONFIG_IPVLAN=y
+CONFIG_VXLAN=y
+CONFIG_TUN=y
+CONFIG_VETH=y
+CONFIG_VIRTIO_NET=y
+CONFIG_NET_DSA_MV88E6XXX=y
+CONFIG_AMD_XGBE=y
+CONFIG_MACB=y
+CONFIG_HNS_DSAF=y
+CONFIG_HNS_ENET=y
+CONFIG_E1000E=y
+CONFIG_IGB=y
+CONFIG_IGBVF=y
+CONFIG_IXGB=y
+CONFIG_IXGBE=y
+CONFIG_IXGBEVF=y
+CONFIG_MVNETA=y
+CONFIG_MVPP2=y
+CONFIG_SKY2=y
+CONFIG_SMC91X=y
+CONFIG_SMSC911X=y
+CONFIG_MDIO_BITBANG=y
+CONFIG_MDIO_BUS_MUX_MMIOREG=y
+CONFIG_MARVELL_PHY=y
+CONFIG_MARVELL_10G_PHY=y
+CONFIG_MICREL_PHY=y
+CONFIG_ROCKCHIP_PHY=y
+CONFIG_USB_USBNET=y
+# CONFIG_USB_NET_CDCETHER is not set
+# CONFIG_USB_NET_CDC_NCM is not set
+CONFIG_USB_NET_DM9601=y
+CONFIG_USB_NET_SR9800=y
+CONFIG_USB_NET_SMSC75XX=y
+CONFIG_USB_NET_SMSC95XX=y
+CONFIG_USB_NET_PLUSB=y
+CONFIG_USB_NET_MCS7830=y
+# CONFIG_USB_NET_CDC_SUBSET is not set
+# CONFIG_USB_NET_ZAURUS is not set
+CONFIG_INPUT_EVDEV=y
+CONFIG_KEYBOARD_GPIO=y
+CONFIG_KEYBOARD_CROS_EC=y
+CONFIG_INPUT_MISC=y
+# CONFIG_SERIO_SERPORT is not set
+CONFIG_SERIO_AMBAKMI=y
+CONFIG_LEGACY_PTY_COUNT=16
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_EXTENDED=y
+CONFIG_SERIAL_8250_SHARE_IRQ=y
+CONFIG_SERIAL_8250_DW=y
+CONFIG_SERIAL_OF_PLATFORM=y
+CONFIG_SERIAL_AMBA_PL011=y
+CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
+CONFIG_SERIAL_XILINX_PS_UART=y
+CONFIG_SERIAL_XILINX_PS_UART_CONSOLE=y
+CONFIG_SERIAL_MVEBU_UART=y
+CONFIG_SERIAL_DEV_BUS=y
+CONFIG_SERIAL_DEV_CTRL_TTYPORT=y
+CONFIG_VIRTIO_CONSOLE=y
+CONFIG_HW_RANDOM=y
+# CONFIG_HW_RANDOM_CAVIUM is not set
+CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_MUX=y
+CONFIG_I2C_MUX_PCA954x=y
+CONFIG_I2C_DESIGNWARE_PLATFORM=y
+CONFIG_I2C_MV64XXX=y
+CONFIG_I2C_PXA=y
+CONFIG_I2C_RK3X=y
+CONFIG_I2C_CROS_EC_TUNNEL=y
+CONFIG_I2C_SLAVE=y
+CONFIG_SPI=y
+CONFIG_SPI_ARMADA_3700=y
+CONFIG_SPI_ORION=y
+CONFIG_SPI_PL022=y
+CONFIG_SPI_ROCKCHIP=y
+CONFIG_SPMI=y
+CONFIG_PINCTRL_SINGLE=y
+CONFIG_PINCTRL_MAX77620=y
+CONFIG_GPIO_DWAPB=y
+CONFIG_GPIO_PL061=y
+CONFIG_GPIO_XGENE=y
+CONFIG_GPIO_PCA953X=y
+CONFIG_GPIO_PCA953X_IRQ=y
+CONFIG_GPIO_MAX77620=y
+CONFIG_POWER_RESET_BRCMSTB=y
+CONFIG_POWER_RESET_VEXPRESS=y
+CONFIG_POWER_RESET_XGENE=y
+CONFIG_POWER_RESET_SYSCON=y
+CONFIG_SYSCON_REBOOT_MODE=y
+CONFIG_BATTERY_BQ27XXX=y
+CONFIG_SENSORS_ARM_SCPI=y
+CONFIG_THERMAL_GOV_POWER_ALLOCATOR=y
+CONFIG_CPU_THERMAL=y
+CONFIG_THERMAL_EMULATION=y
+CONFIG_WATCHDOG=y
+CONFIG_WATCHDOG_CORE=y
+CONFIG_MFD_CROS_EC=y
+CONFIG_MFD_CROS_EC_I2C=y
+CONFIG_MFD_CROS_EC_SPI=y
+CONFIG_MFD_HI6421_PMIC=y
+CONFIG_MFD_MAX77620=y
+CONFIG_MFD_RK808=y
+CONFIG_MFD_SEC_CORE=y
+CONFIG_REGULATOR=y
+CONFIG_REGULATOR_FIXED_VOLTAGE=y
+CONFIG_REGULATOR_FAN53555=y
+CONFIG_REGULATOR_GPIO=y
+CONFIG_REGULATOR_HI6421V530=y
+CONFIG_REGULATOR_MAX77620=y
+CONFIG_REGULATOR_PWM=y
+CONFIG_REGULATOR_QCOM_SPMI=y
+CONFIG_REGULATOR_RK808=y
+CONFIG_REGULATOR_S2MPS11=y
+# CONFIG_RC_CORE is not set
+CONFIG_FB=y
+CONFIG_FB_ARMCLCD=y
+# CONFIG_LCD_CLASS_DEVICE is not set
+# CONFIG_BACKLIGHT_GENERIC is not set
+CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y
+CONFIG_LOGO=y
+# CONFIG_LOGO_LINUX_MONO is not set
+# CONFIG_LOGO_LINUX_VGA16 is not set
+CONFIG_SOUND=y
+CONFIG_SND=y
+CONFIG_SND_SOC=y
+CONFIG_SND_SIMPLE_CARD=y
+CONFIG_USB=y
+CONFIG_USB_OTG=y
+CONFIG_USB_XHCI_HCD=y
+CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_EHCI_HCD_PLATFORM=y
+CONFIG_USB_OHCI_HCD=y
+CONFIG_USB_OHCI_HCD_PLATFORM=y
+CONFIG_USB_STORAGE=y
+CONFIG_USB_DWC3=y
+CONFIG_USB_DWC2=y
+CONFIG_USB_CHIPIDEA=y
+CONFIG_USB_CHIPIDEA_UDC=y
+CONFIG_USB_CHIPIDEA_HOST=y
+CONFIG_USB_ISP1760=y
+CONFIG_USB_HSIC_USB3503=y
+CONFIG_NOP_USB_XCEIV=y
+CONFIG_USB_ULPI=y
+CONFIG_USB_GADGET=y
+CONFIG_USB_MV_UDC=y
+CONFIG_USB_MV_U3D=y
+CONFIG_USB_SNP_UDC_PLAT=y
+CONFIG_USB_BDC_UDC=y
+CONFIG_MMC=y
+CONFIG_MMC_BLOCK_MINORS=32
+CONFIG_MMC_ARMMMCI=y
+CONFIG_MMC_SDHCI=y
+CONFIG_MMC_SDHCI_ACPI=y
+CONFIG_MMC_SDHCI_PLTFM=y
+CONFIG_MMC_SDHCI_OF_ARASAN=y
+CONFIG_MMC_SDHCI_CADENCE=y
+CONFIG_MMC_SPI=y
+CONFIG_MMC_DW=y
+CONFIG_MMC_DW_EXYNOS=y
+CONFIG_MMC_DW_K3=y
+CONFIG_MMC_SDHCI_XENON=y
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_GPIO=y
+CONFIG_LEDS_PWM=y
+CONFIG_LEDS_SYSCON=y
+CONFIG_LEDS_TRIGGERS=y
+CONFIG_LEDS_TRIGGER_HEARTBEAT=y
+CONFIG_LEDS_TRIGGER_CPU=y
+CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
+CONFIG_EDAC=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_MAX77686=y
+CONFIG_RTC_DRV_S5M=y
+CONFIG_RTC_DRV_DS3232=y
+CONFIG_RTC_DRV_EFI=y
+CONFIG_RTC_DRV_PL031=y
+CONFIG_RTC_DRV_ARMADA38X=y
+CONFIG_DMADEVICES=y
+CONFIG_MV_XOR=y
+CONFIG_MV_XOR_V2=y
+CONFIG_PL330_DMA=y
+CONFIG_QCOM_HIDMA_MGMT=y
+CONFIG_QCOM_HIDMA=y
+CONFIG_ASYNC_TX_DMA=y
+CONFIG_UIO_PDRV_GENIRQ=m
+CONFIG_UIO_PCI_GENERIC=m
+CONFIG_VFIO=y
+CONFIG_VFIO_PCI=y
+CONFIG_VFIO_PLATFORM=y
+CONFIG_VFIO_PLATFORM_XHCI_RESET=y
+CONFIG_VIRT_DRIVERS=y
+CONFIG_VIRTIO_PCI=y
+CONFIG_VIRTIO_BALLOON=y
+CONFIG_VIRTIO_MMIO=y
+CONFIG_XEN_GNTDEV=y
+CONFIG_XEN_GRANT_DEV_ALLOC=y
+CONFIG_STAGING=y
+CONFIG_COMMON_CLK_VERSATILE=y
+CONFIG_CLK_SP810=y
+CONFIG_CLK_VEXPRESS_OSC=y
+CONFIG_COMMON_CLK_RK808=y
+CONFIG_COMMON_CLK_SCPI=y
+CONFIG_COMMON_CLK_CS2000_CP=y
+CONFIG_COMMON_CLK_S2MPS11=y
+CONFIG_CLK_QORIQ=y
+CONFIG_COMMON_CLK_PWM=y
+CONFIG_HWSPINLOCK=y
+CONFIG_ARM_TIMER_SP804=y
+CONFIG_MAILBOX=y
+CONFIG_ARM_MHU=y
+CONFIG_PLATFORM_MHU=y
+CONFIG_PCC=y
+CONFIG_BCM_FLEXRM_MBOX=y
+CONFIG_ARM_SMMU=y
+CONFIG_ARM_SMMU_V3=y
+CONFIG_EXTCON_USB_GPIO=y
+CONFIG_IIO=y
+CONFIG_PWM=y
+CONFIG_PHY_XGENE=y
+CONFIG_PHY_MVEBU_CP110_COMPHY=y
+CONFIG_PHY_SAMSUNG_USB2=y
+CONFIG_TEE=y
+CONFIG_OPTEE=y
+CONFIG_ARM_SCPI_PROTOCOL=y
+CONFIG_EFI_CAPSULE_LOADER=y
+CONFIG_ACPI=y
+CONFIG_ACPI_APEI=y
+CONFIG_ACPI_APEI_GHES=y
+CONFIG_ACPI_APEI_PCIEAER=y
+CONFIG_EXT2_FS=y
+CONFIG_EXT3_FS=y
+CONFIG_EXT3_FS_POSIX_ACL=y
+CONFIG_EXT3_FS_SECURITY=y
+CONFIG_BTRFS_FS=y
+CONFIG_BTRFS_FS_POSIX_ACL=y
+CONFIG_FANOTIFY=y
+CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y
+CONFIG_QUOTA=y
+CONFIG_QUOTA_NETLINK_INTERFACE=y
+CONFIG_AUTOFS4_FS=y
+CONFIG_OVERLAY_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_TMPFS=y
+CONFIG_HUGETLBFS=y
+CONFIG_CONFIGFS_FS=y
+CONFIG_EFIVAR_FS=y
+CONFIG_UBIFS_FS=y
+CONFIG_UBIFS_FS_ADVANCED_COMPR=y
+CONFIG_SQUASHFS=y
+CONFIG_SQUASHFS_LZO=y
+CONFIG_NFS_FS=y
+CONFIG_NFS_V4=y
+CONFIG_NFS_V4_1=y
+CONFIG_NFS_V4_2=y
+CONFIG_ROOT_NFS=y
+CONFIG_NFSD=y
+CONFIG_NFSD_V3=y
+CONFIG_9P_FS=y
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_VIRTUALIZATION=y
+CONFIG_KVM=y
+CONFIG_PRINTK_TIME=y
+CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_FS=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_KERNEL=y
+# CONFIG_SCHED_DEBUG is not set
+# CONFIG_DEBUG_PREEMPT is not set
+# CONFIG_FTRACE is not set
+CONFIG_MEMTEST=y
+CONFIG_CORESIGHT=y
+CONFIG_CORESIGHT_LINK_AND_SINK_TMC=y
+CONFIG_CORESIGHT_SOURCE_ETM4X=y
+CONFIG_CORESIGHT_SOURCE_AXIM=y
+CONFIG_SECURITY=y
+CONFIG_CRYPTO_AUTHENC=y
+CONFIG_CRYPTO_TEST=m
+CONFIG_CRYPTO_CCM=y
+CONFIG_CRYPTO_GCM=y
+CONFIG_CRYPTO_ECHAINIV=y
+CONFIG_CRYPTO_CTS=y
+CONFIG_CRYPTO_ECB=y
+CONFIG_CRYPTO_CMAC=y
+CONFIG_CRYPTO_MD5=y
+CONFIG_CRYPTO_SHA512=y
+CONFIG_CRYPTO_SHA3=y
+CONFIG_CRYPTO_ARC4=y
+CONFIG_CRYPTO_DES=y
+CONFIG_CRYPTO_ANSI_CPRNG=y
+CONFIG_CRYPTO_DEV_SAFEXCEL=m
+CONFIG_ARM64_CRYPTO=y
+CONFIG_CRYPTO_SHA512_ARM64=y
+CONFIG_CRYPTO_SHA1_ARM64_CE=y
+CONFIG_CRYPTO_SHA2_ARM64_CE=y
+CONFIG_CRYPTO_GHASH_ARM64_CE=y
+CONFIG_CRYPTO_CRCT10DIF_ARM64_CE=y
+CONFIG_CRYPTO_CRC32_ARM64_CE=y
+CONFIG_CRYPTO_AES_ARM64_CE_CCM=y
+CONFIG_CRYPTO_AES_ARM64_CE_BLK=y
+CONFIG_CRYPTO_CHACHA20_NEON=y
+CONFIG_CRYPTO_AES_ARM64_BS=y
diff --git a/src/arm/edge/gateway/MACCHIATObin/setup-macbin-kernel.sh b/src/arm/edge/gateway/MACCHIATObin/setup-macbin-kernel.sh
new file mode 100644
index 0000000..c38ca9b
--- /dev/null
+++ b/src/arm/edge/gateway/MACCHIATObin/setup-macbin-kernel.sh
@@ -0,0 +1,74 @@
+#!/bin/bash
+##################################################################
+#Set up linux kernel on MACCHIATObin for Edge Infrastructure #
+#This script not support cross-compilation #
+##################################################################
+
+# Hardcoded Paths
+export ROOTDIR=${PWD}
+
+# Hardcoded Build_param
+export ARCH=arm64
+
+# Parameter Overridable Paths
+export KDIR=${ROOTDIR}/kernel/4.14.22
+export MUSDK_PATH=${ROOTDIR}/musdk
+export DECONFIG_MCBIN=${ROOTDIR}/defconfig-mcbin-edge
+
+echo -e "Please run shell script as root!"
+
+# Check file defconfig-mcbin-edge
+if [ ! -f "$DECONFIG_MCBIN" ]; then
+ echo -e "\tPlease copy defconfig-mcbin-edge to currently directory!"
+ exit 1
+fi
+
+
+# Download Kernel Source
+echo -e "Download marvell linux 18.09..."
+mkdir -p $KDIR
+cd $KDIR
+#touch kernle-test
+git clone https://github.com/MarvellEmbeddedProcessors/linux-marvell .
+git checkout linux-4.14.22-armada-18.09
+cd $ROOTDIR
+
+# Download MUSDK Package
+echo -e "Download MUSDK package 18.09..."
+mkdir -p $MUSDK_PATH
+cd $MUSDK_PATH
+#touch musdk-test
+git clone https://github.com/MarvellEmbeddedProcessors/musdk-marvell .
+git checkout musdk-armada-18.09
+cd $ROOTDIR
+
+#Patch kernel
+cd $KDIR
+echo -e "Patch kernel..."
+#touch patch_kernel
+git am $MUSDK_PATH/patches/linux-4.14/*.patch
+
+# Check file defconfig-mcbin-edge
+if [ ! -f "$DECONFIG_MCBIN" ]; then
+ echo -e "\tPlease copy defconfig-mcbin-edge to $ROOTDIR!"
+ exit 1
+fi
+
+
+# Build Kernel
+echo -e "Backup mvebu_v8_lsp_defconfig"
+mv $KDIR/arch/arm64/configs/mvebu_v8_lsp_defconfig $KDIR/arch/arm64/configs/mvebu_v8_lsp_defconfig.bac
+echo -e "Replease kernel config by defconfig-mcbin-edge"
+cp $DECONFIG_MCBIN $KDIR/arch/arm64/configs/mvebu_v8_lsp_defconfig
+echo -e "Build Kernel..."
+make mvebu_v8_lsp_defconfig
+make -j$(($(nproc)+1))
+
+#Install Kernel
+echo -e "Install Kernel..."
+make modules_install
+cp ./arch/arm64/boot/Image /boot/
+cp ./arch/arm64/boot/dts/marvell/armada-8040-mcbin.dtb /boot/
+
+echo -e "Success! Please reboot!"
+
diff --git a/src/arm/kubernetes_vpp_vhostuser/deploy-cni.sh b/src/arm/kubernetes_vpp_vhostuser/deploy-cni.sh
new file mode 100755
index 0000000..941b917
--- /dev/null
+++ b/src/arm/kubernetes_vpp_vhostuser/deploy-cni.sh
@@ -0,0 +1,16 @@
+#!/bin/bash -e
+
+cd ../cni-deploy
+
+DEPLOY_SCENARIO="k8-vpp-nofeature-noha"
+
+export ANSIBLE_HOST_KEY_CHECKING=False
+
+virtualenv .venv
+source .venv/bin/activate
+pip install ansible==2.6.1
+
+#deploy flannel, multus
+ansible-playbook -i inventory/inventory.cfg deploy.yml --tags flannel,multus
+#deploy vhost-vpp
+ansible-playbook -i inventory/inventory.cfg deploy.yml --tags vhost-vpp
diff --git a/src/arm/kubernetes_vpp_vhostuser/k8s-build.sh b/src/arm/kubernetes_vpp_vhostuser/k8s-build.sh
new file mode 100755
index 0000000..fa7aa53
--- /dev/null
+++ b/src/arm/kubernetes_vpp_vhostuser/k8s-build.sh
@@ -0,0 +1,25 @@
+#!/bin/bash
+set -e
+
+
+sudo apt-get install -y docker.io libvirt-bin virt-manager qemu qemu-efi
+
+WORKSPACE=`pwd`
+if [ ! -d "$WORKSPACE/compass4nfv" ]; then
+ git clone https://gerrit.opnfv.org/gerrit/compass4nfv
+fi
+
+#rm -rf compass4nfv
+#git clone https://gerrit.opnfv.org/gerrit/compass4nfv
+
+cd compass4nfv
+
+COMPASS_WORK_DIR=$WORKSPACE/../compass-work
+mkdir -p $COMPASS_WORK_DIR
+ln -s $COMPASS_WORK_DIR work
+
+sudo docker rm -f `docker ps | grep compass | cut -f1 -d' '` || true
+
+curl -s http://people.linaro.org/~yibo.cai/compass/compass4nfv-arm64-fixup.sh | bash || true
+
+./build.sh
diff --git a/src/arm/kubernetes_vpp_vhostuser/k8s-deploy.sh b/src/arm/kubernetes_vpp_vhostuser/k8s-deploy.sh
new file mode 100755
index 0000000..21082b3
--- /dev/null
+++ b/src/arm/kubernetes_vpp_vhostuser/k8s-deploy.sh
@@ -0,0 +1,17 @@
+#!/bin/bash
+set -e
+
+cd compass4nfv
+
+
+export ADAPTER_OS_PATTERN='(?i)CentOS-7.*arm.*'
+export OS_VERSION="centos7"
+export KUBERNETES_VERSION="v1.9.1"
+
+
+#For virtual environment:
+export DHA="deploy/conf/vm_environment/k8-nosdn-nofeature-noha.yml"
+export NETWORK="deploy/conf/vm_environment/network.yml"
+export VIRT_NUMBER=2 VIRT_CPUS=8 VIRT_MEM=8192 VIRT_DISK=50G
+
+./deploy.sh
diff --git a/src/arm/kubernetes_vpp_vhostuser/setup.sh b/src/arm/kubernetes_vpp_vhostuser/setup.sh
new file mode 100755
index 0000000..ae30803
--- /dev/null
+++ b/src/arm/kubernetes_vpp_vhostuser/setup.sh
@@ -0,0 +1,11 @@
+#!/bin/bash
+echo "Now build:"
+./k8s-build.sh
+
+sleep 2
+echo "Now deploy VMs:"
+./k8s-deploy.sh
+
+sleep 2
+echo "Now deploy vpp_vhostuser:"
+./deploy-cni.sh
diff --git a/src/cni/multus/install_cni.sh b/src/cni/multus/install_cni.sh
deleted file mode 100644
index 0d1ee6b..0000000
--- a/src/cni/multus/install_cni.sh
+++ /dev/null
@@ -1,12 +0,0 @@
-#!/bin/bash
-
-set -ex
-
-export PATH=/usr/local/go/bin:$PATH
-apt-get update && apt-get install -y wget
-rm -rf multus-cni
-wget -qO- https://storage.googleapis.com/golang/go1.8.3.linux-amd64.tar.gz | tar -C /usr/local/ -xz
-git clone https://github.com/Intel-Corp/multus-cni
-cd multus-cni; bash ./build
-cp bin/multus /opt/cni/bin
-cp /etc/kube-cnimultus/cni-conf.json /etc/cni/net.d/05-multus.conf
diff --git a/src/vagrant/kubeadm_multus/Vagrantfile b/src/vagrant/kubeadm/Vagrantfile
index 9320074..dc5efb1 100644
--- a/src/vagrant/kubeadm_multus/Vagrantfile
+++ b/src/vagrant/kubeadm/Vagrantfile
@@ -9,11 +9,15 @@ Vagrant.configure("2") do |config|
libvirt.cpus = 4
end
- config.vm.synced_folder "../..", "/src"
- config.vm.provision "shell", path: "host_setup.sh", privileged: false
+ config.vm.define "registry" do |config|
+ config.vm.hostname = "registry"
+ #config.vm.provision "shell", path: "registry_setup.sh", privileged: false
+ config.vm.network :private_network, ip: "192.168.1.5"
+ end
config.vm.define "master" do |config|
config.vm.hostname = "master"
+ config.vm.provision "shell", path: "host_setup.sh", privileged: false
config.vm.provision "shell", path: "master_setup.sh", privileged: false
config.vm.network :private_network, ip: "192.168.1.10"
end
@@ -21,6 +25,7 @@ Vagrant.configure("2") do |config|
(1 .. $num_workers).each do |i|
config.vm.define vm_name = "worker%d" % [i] do |config|
config.vm.hostname = vm_name
+ config.vm.provision "shell", path: "host_setup.sh", privileged: false
config.vm.provision "shell", path: "worker_setup.sh", privileged: false
config.vm.network :private_network, ip: "192.168.1.#{i+20}"
end
diff --git a/src/vagrant/kubeadm/deploy.sh b/src/vagrant/kubeadm/deploy.sh
new file mode 100755
index 0000000..eb61ad8
--- /dev/null
+++ b/src/vagrant/kubeadm/deploy.sh
@@ -0,0 +1,10 @@
+#!/bin/bash
+
+set -ex
+DIR="$(dirname `readlink -f $0`)"
+
+cd $DIR
+../cleanup.sh
+vagrant up
+vagrant ssh master -c "/vagrant/kata/nginx-app.sh"
+vagrant ssh master -c "/vagrant/virtlet/virtlet.sh"
diff --git a/src/vagrant/kubeadm/host_setup.sh b/src/vagrant/kubeadm/host_setup.sh
new file mode 100644
index 0000000..1cb46f6
--- /dev/null
+++ b/src/vagrant/kubeadm/host_setup.sh
@@ -0,0 +1,32 @@
+#!/bin/bash
+
+set -ex
+
+cat << EOF | sudo tee /etc/hosts
+127.0.0.1 localhost
+192.168.1.5 registry
+192.168.1.10 master
+192.168.1.21 worker1
+192.168.1.22 worker2
+192.168.1.23 worker3
+EOF
+
+curl -s http://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -
+cat <<EOF | sudo tee /etc/apt/sources.list.d/kubernetes.list
+deb http://apt.kubernetes.io/ kubernetes-xenial main
+EOF
+sudo apt-get update
+sudo apt-get install -y --allow-unauthenticated kubelet=1.12.2-00 kubeadm=1.12.2-00 kubectl=1.12.2-00 kubernetes-cni=0.6.0-00
+echo 'Environment="KUBELET_EXTRA_ARGS=--feature-gates=DevicePlugins=true"' | sudo tee /etc/default/kubelet
+echo 1 | sudo tee /proc/sys/net/ipv4/ip_forward
+sudo modprobe ip_vs
+sudo modprobe ip_vs_rr
+sudo modprobe ip_vs_wrr
+sudo modprobe ip_vs_sh
+sudo modprobe br_netfilter
+sudo modprobe nf_conntrack_ipv4
+
+sudo swapoff -a
+sudo systemctl daemon-reload
+sudo systemctl stop kubelet
+sudo systemctl start kubelet
diff --git a/src/vagrant/kubeadm_istio/istio/bookinfo.sh b/src/vagrant/kubeadm/istio/bookinfo.sh
index ad8c120..c4eef11 100755
--- a/src/vagrant/kubeadm_istio/istio/bookinfo.sh
+++ b/src/vagrant/kubeadm/istio/bookinfo.sh
@@ -21,10 +21,10 @@ cd /vagrant/istio-source/
export PATH=$PWD/bin:$PATH
# Run the test application: bookinfo
-kubectl apply -f <(istioctl kube-inject -f samples/bookinfo/kube/bookinfo.yaml)
+kubectl apply -f <(istioctl kube-inject -f samples/bookinfo/platform/kube/bookinfo.yaml)
# Define the ingress gateway for the application
-istioctl create -f samples/bookinfo/routing/bookinfo-gateway.yaml
+kubectl apply -f samples/bookinfo/networking/bookinfo-gateway.yaml
# Wait for bookinfo deployed
kubectl get services
diff --git a/src/vagrant/kubeadm_istio/istio/clean_bookinfo.sh b/src/vagrant/kubeadm/istio/clean_bookinfo.sh
index ede825f..7c539c0 100755
--- a/src/vagrant/kubeadm_istio/istio/clean_bookinfo.sh
+++ b/src/vagrant/kubeadm/istio/clean_bookinfo.sh
@@ -21,7 +21,9 @@ cd /vagrant/istio-source/
export PATH=$PWD/bin:$PATH
# Clean up bookinfo
-echo "" | samples/bookinfo/kube/cleanup.sh
+echo "" | samples/bookinfo/platform/kube/cleanup.sh
-istioctl get routerules
+kubectl get virtualservices
+kubectl get destinationrules
+kubectl get gateway
kubectl get pods
diff --git a/src/vagrant/kubeadm_istio/istio/deploy.sh b/src/vagrant/kubeadm/istio/deploy.sh
index 84af41b..e896580 100755
--- a/src/vagrant/kubeadm_istio/istio/deploy.sh
+++ b/src/vagrant/kubeadm/istio/deploy.sh
@@ -35,6 +35,12 @@ echo 'export PATH="$PATH:/vagrant/istio-source/bin"' >> ~/.bashrc
echo "source <(kubectl completion bash)" >> ~/.bashrc
source ~/.bashrc
+# Install Istio’s Custom Resource Definitions first
+kubectl apply -f install/kubernetes/helm/istio/templates/crds.yaml
+
+# Wait 30s for Kubernetes to register the Istio CRDs
+sleep 30
+
kubectl apply -f install/kubernetes/istio-demo.yaml
# Validate the installation
@@ -47,6 +53,6 @@ while [ $r -ne "0" ]
do
sleep 30
kubectl get pods -n istio-system
- r=$(kubectl get pods -n istio-system | egrep -v 'NAME|Running' | wc -l)
+ r=$(kubectl get pods -n istio-system | egrep -v 'NAME|Running|Completed' | wc -l)
done
diff --git a/src/vagrant/kubeadm/istio/istio.sh b/src/vagrant/kubeadm/istio/istio.sh
new file mode 100755
index 0000000..9c2caf6
--- /dev/null
+++ b/src/vagrant/kubeadm/istio/istio.sh
@@ -0,0 +1,6 @@
+#!/bin/bash
+
+/vagrant/istio/deploy.sh
+/vagrant/istio/bookinfo.sh
+/vagrant/istio/clean_bookinfo.sh
+
diff --git a/src/vagrant/kubeadm/kata/containerd.service b/src/vagrant/kubeadm/kata/containerd.service
new file mode 100644
index 0000000..1ae7fe8
--- /dev/null
+++ b/src/vagrant/kubeadm/kata/containerd.service
@@ -0,0 +1,22 @@
+[Unit]
+Description=containerd container runtime
+Documentation=https://containerd.io
+After=network.target
+
+[Service]
+ExecStartPre=-/sbin/modprobe overlay
+ExecStart=/usr/local/bin/containerd
+
+Delegate=yes
+KillMode=process
+# Having non-zero Limit*s causes performance problems due to accounting overhead
+# in the kernel. We recommend using cgroups to do container-local accounting.
+LimitNPROC=infinity
+LimitCORE=infinity
+LimitNOFILE=infinity
+# Comment TasksMax if your systemd version does not supports it.
+# Only systemd 226 and above support this version.
+TasksMax=infinity
+
+[Install]
+WantedBy=multi-user.target
diff --git a/src/vagrant/kubeadm/kata/kata_setup.sh b/src/vagrant/kubeadm/kata/kata_setup.sh
new file mode 100644
index 0000000..1fd77b5
--- /dev/null
+++ b/src/vagrant/kubeadm/kata/kata_setup.sh
@@ -0,0 +1,54 @@
+#!/bin/bash
+#
+# Copyright (c) 2017 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+set -ex
+
+ARCH=$(arch)
+BRANCH="${BRANCH:-stable-1.7}"
+sudo sh -c "echo 'deb http://download.opensuse.org/repositories/home:/katacontainers:/releases:/${ARCH}:/${BRANCH}/xUbuntu_$(lsb_release -rs)/ /' > /etc/apt/sources.list.d/kata-containers.list"
+curl -sL http://download.opensuse.org/repositories/home:/katacontainers:/releases:/${ARCH}:/${BRANCH}/xUbuntu_$(lsb_release -rs)/Release.key | sudo apt-key add -
+sudo -E apt-get update
+sudo -E apt-get -y install kata-runtime kata-proxy kata-shim
+
+wget https://github.com/opencontainers/runc/releases/download/v1.0.0-rc6/runc.amd64
+sudo cp runc.amd64 /usr/sbin/runc
+sudo chmod 755 /usr/sbin/runc
+wget http://github.com/containerd/containerd/releases/download/v1.2.2/containerd-1.2.2.linux-amd64.tar.gz >& /dev/null
+sudo tar -C /usr/local -xzf containerd-1.2.2.linux-amd64.tar.gz
+wget https://github.com/kubernetes-sigs/cri-tools/releases/download/v1.13.0/crictl-v1.13.0-linux-amd64.tar.gz >& /dev/null
+sudo tar -C /usr/local/bin -xzf crictl-v1.13.0-linux-amd64.tar.gz
+echo "runtime-endpoint: unix:///run/containerd/containerd.sock" | sudo tee /etc/crictl.yaml
+wget https://github.com/kubernetes-sigs/cri-tools/releases/download/v1.13.0/critest-v1.13.0-linux-amd64.tar.gz >& /dev/null
+sudo tar C /usr/local/bin -xzf critest-v1.13.0-linux-amd64.tar.gz
+sudo cp /vagrant/kata/containerd.service /etc/systemd/system/
+sudo systemctl start containerd
+sudo mkdir -p /opt/cni/bin
+sudo mkdir -p /etc/cni/net.d
+sudo mkdir -p /etc/containerd
+containerd config default | sudo tee /etc/containerd/config.toml
+sudo sed -i "s,\[plugins.cri.registry.mirrors\],\[plugins.cri.registry.mirrors\]\n \[plugins.cri.registry.mirrors.\"registry:5000\"\]\n endpoint = \[\"http://registry:5000\"\]," /etc/containerd/config.toml
+sudo sed -i "/.*untrusted_workload_runtime.*/,+5s/runtime_type.*/runtime_type=\"io.containerd.runtime.v1.linux\"/" /etc/containerd/config.toml
+sudo sed -i "/.*untrusted_workload_runtime.*/,+5s/runtime_engine.*/runtime_engine=\"kata-runtime\"/" /etc/containerd/config.toml
+sudo systemctl restart containerd
+
+cat << EOF | sudo tee /etc/systemd/system/kubelet.service.d/0-containerd.conf
+[Service]
+Environment="KUBELET_EXTRA_ARGS=--container-runtime=remote --runtime-request-timeout=15m --container-runtime-endpoint=unix:///run/containerd/containerd.sock"
+EOF
+
+sudo systemctl daemon-reload
+sudo systemctl restart kubelet
diff --git a/src/vagrant/kubeadm/kata/nginx-app.sh b/src/vagrant/kubeadm/kata/nginx-app.sh
new file mode 100755
index 0000000..fb9540e
--- /dev/null
+++ b/src/vagrant/kubeadm/kata/nginx-app.sh
@@ -0,0 +1,33 @@
+#!/bin/bash
+#
+# Copyright (c) 2017 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+kubectl delete services --all
+kubectl delete rc --all
+kubectl delete pods --all
+kubectl create -f /vagrant/kata/nginx-app.yaml
+kubectl get nodes
+kubectl get services
+kubectl get pods
+kubectl get rc
+r=0
+while [ "$r" -eq "0" ]
+do
+ sleep 30
+ r=$(kubectl get pods | grep Running | wc -l)
+done
+svcip=$(kubectl get services nginx -o json | grep clusterIP | cut -f4 -d'"')
+wget http://$svcip
diff --git a/src/vagrant/kubeadm/kata/nginx-app.yaml b/src/vagrant/kubeadm/kata/nginx-app.yaml
new file mode 100644
index 0000000..9de4ef4
--- /dev/null
+++ b/src/vagrant/kubeadm/kata/nginx-app.yaml
@@ -0,0 +1,33 @@
+apiVersion: v1
+kind: Service
+metadata:
+ name: nginx
+ labels:
+ app: nginx
+spec:
+ type: NodePort
+ ports:
+ - port: 80
+ protocol: TCP
+ name: http
+ selector:
+ app: nginx
+---
+apiVersion: v1
+kind: ReplicationController
+metadata:
+ name: nginx
+spec:
+ replicas: 2
+ template:
+ metadata:
+ labels:
+ app: nginx
+ annotations:
+ io.kubernetes.cri.untrusted-workload: "true"
+ spec:
+ containers:
+ - name: nginx
+ image: nginx
+ ports:
+ - containerPort: 80
diff --git a/src/vagrant/kubeadm/master_setup.sh b/src/vagrant/kubeadm/master_setup.sh
new file mode 100644
index 0000000..cec8877
--- /dev/null
+++ b/src/vagrant/kubeadm/master_setup.sh
@@ -0,0 +1,32 @@
+#!/bin/bash
+
+set -ex
+
+sudo apt-get update
+sudo apt-get install -y \
+ apt-transport-https \
+ ca-certificates \
+ curl \
+ software-properties-common
+
+curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
+sudo add-apt-repository \
+ "deb [arch=amd64] https://download.docker.com/linux/ubuntu \
+ $(lsb_release -cs) \
+ stable"
+sudo apt-get update
+sudo apt-get install -y docker-ce=18.03.1~ce-0~ubuntu
+cat << EOF | sudo tee /etc/docker/daemon.json
+{
+ "insecure-registries": ["registry:5000"]
+}
+EOF
+sudo service docker restart
+
+sudo kubeadm init --apiserver-advertise-address=192.168.1.10 --service-cidr=10.96.0.0/16 --pod-network-cidr=10.32.0.0/12 --token 8c5adc.1cec8dbf339093f0
+mkdir ~/.kube
+sudo cp /etc/kubernetes/admin.conf $HOME/.kube/config
+sudo chown $(id -u):$(id -g) $HOME/.kube/config
+
+kubectl apply -f http://git.io/weave-kube-1.6
+kubectl apply -f /vagrant/multus/cni_multus.yml
diff --git a/src/vagrant/kubeadm/multus/Dockerfile b/src/vagrant/kubeadm/multus/Dockerfile
new file mode 100644
index 0000000..7923d0d
--- /dev/null
+++ b/src/vagrant/kubeadm/multus/Dockerfile
@@ -0,0 +1,10 @@
+FROM ubuntu:16.04
+ENV PATH="/usr/local/go/bin:$PATH"
+WORKDIR /go/src/
+RUN apt-get update && apt-get install -y wget git gcc
+RUN wget -qO- https://storage.googleapis.com/golang/go1.8.3.linux-amd64.tar.gz | tar -C /usr/local/ -xz
+RUN git clone https://github.com/Intel-Corp/multus-cni
+RUN cd multus-cni; bash ./build
+
+FROM busybox
+COPY --from=0 /go/src/multus-cni/bin/multus /root
diff --git a/src/vagrant/kubeadm_multus/examples/busybox.yaml b/src/vagrant/kubeadm/multus/busybox.yaml
index 7fd1b8d..7fd1b8d 100644
--- a/src/vagrant/kubeadm_multus/examples/busybox.yaml
+++ b/src/vagrant/kubeadm/multus/busybox.yaml
diff --git a/src/cni/multus/kube_cni_multus.yml b/src/vagrant/kubeadm/multus/cni_multus.yml
index cd91737..123392b 100644
--- a/src/cni/multus/kube_cni_multus.yml
+++ b/src/vagrant/kubeadm/multus/cni_multus.yml
@@ -8,7 +8,7 @@ metadata:
kind: ConfigMap
apiVersion: v1
metadata:
- name: kube-cnimultus-cfg
+ name: cnimultus-cfg
namespace: kube-system
labels:
tier: node
@@ -45,7 +45,7 @@ data:
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
- name: kube-cnimultus-ds
+ name: cnimultus-ds
namespace: kube-system
labels:
tier: node
@@ -65,31 +65,24 @@ spec:
operator: Exists
effect: NoSchedule
serviceAccountName: cnimultus
- initContainers:
- - name: install-cni
- image: ubuntu:16.04
- command:
- - bash
- - "-c"
- - "apt-get update && apt-get install -y git && git clone http://github.com/opnfv/container4nfv && cd container4nfv && git fetch https://gerrit.opnfv.org/gerrit/container4nfv refs/changes/81/47681/5 && git checkout FETCH_HEAD && bash ./src/cni/multus/install_cni.sh"
+ containers:
+ - name: run-cni
+ image: registry:5000/multus-cni:latest
+ command: ['sh', '-c', 'cp /multus/cni-conf.json /etc/cni/net.d/05-multus.conf; cp /root/multus /opt/cni/bin; while true; do sleep 10000; done' ]
volumeMounts:
- name: cni-bin
mountPath: /opt/cni/bin
- - name: cni-cfg
+ - name: etc-cni
mountPath: /etc/cni/net.d
- name: cnimultus-cfg
- mountPath: /etc/kube-cnimultus
- containers:
- - name: run-cni
- image: busybox:1.27.2
- command: ['sh', '-c', 'while true; do sleep 10000; done' ]
+ mountPath: /multus/
volumes:
- name: cni-bin
hostPath:
path: /opt/cni/bin
- - name: cni-cfg
+ - name: etc-cni
hostPath:
path: /etc/cni/net.d
- name: cnimultus-cfg
configMap:
- name: kube-cnimultus-cfg
+ name: cnimultus-cfg
diff --git a/src/vagrant/kubeadm_multus/examples/multus.sh b/src/vagrant/kubeadm/multus/multus.sh
index d7b39a0..9461a6f 100755
--- a/src/vagrant/kubeadm_multus/examples/multus.sh
+++ b/src/vagrant/kubeadm/multus/multus.sh
@@ -24,7 +24,7 @@ do
done
kubectl delete rc --all
-kubectl apply -f /vagrant/examples/busybox.yaml
+kubectl apply -f /vagrant/multus/busybox.yaml
r="0"
while [ $r -ne "2" ]
do
diff --git a/src/vagrant/kubeadm/registry_setup.sh b/src/vagrant/kubeadm/registry_setup.sh
new file mode 100644
index 0000000..5466f1c
--- /dev/null
+++ b/src/vagrant/kubeadm/registry_setup.sh
@@ -0,0 +1,23 @@
+#!/bin/bash
+
+set -ex
+
+cat << EOF | sudo tee /etc/hosts
+127.0.0.1 localhost
+192.168.1.5 registry
+EOF
+
+sudo apt-get update
+sudo apt-get install -y docker.io
+cat << EOF | sudo tee /etc/docker/daemon.json
+{
+ "insecure-registries": ["registry:5000"]
+}
+EOF
+sudo service docker restart
+
+sudo docker pull registry:2
+sudo docker run -d -p 5000:5000 --restart=always --name registry registry:2
+sudo docker build . -f /vagrant/multus/Dockerfile -t multus-cni
+sudo docker tag multus-cni localhost:5000/multus-cni
+sudo docker push localhost:5000/multus-cni
diff --git a/src/vagrant/kubeadm/virtlet/cirros-vm.yaml b/src/vagrant/kubeadm/virtlet/cirros-vm.yaml
new file mode 100644
index 0000000..334142b
--- /dev/null
+++ b/src/vagrant/kubeadm/virtlet/cirros-vm.yaml
@@ -0,0 +1,42 @@
+apiVersion: v1
+kind: Pod
+metadata:
+ name: cirros-vm
+ annotations:
+ # This tells CRI Proxy that this pod belongs to Virtlet runtime
+ kubernetes.io/target-runtime: virtlet.cloud
+ # CirrOS doesn't load nocloud data from SCSI CD-ROM for some reason
+ VirtletDiskDriver: virtio
+ # inject ssh keys via cloud-init
+ VirtletSSHKeys: |
+ ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCaJEcFDXEK2ZbX0ZLS1EIYFZRbDAcRfuVjpstSc0De8+sV1aiu+dePxdkuDRwqFtCyk6dEZkssjOkBXtri00MECLkir6FcH3kKOJtbJ6vy3uaJc9w1ERo+wyl6SkAh/+JTJkp7QRXj8oylW5E20LsbnA/dIwWzAF51PPwF7A7FtNg9DnwPqMkxFo1Th/buOMKbP5ZA1mmNNtmzbMpMfJATvVyiv3ccsSJKOiyQr6UG+j7sc/7jMVz5Xk34Vd0l8GwcB0334MchHckmqDB142h/NCWTr8oLakDNvkfC1YneAfAO41hDkUbxPtVBG5M/o7P4fxoqiHEX+ZLfRxDtHB53 me@localhost
+ # set root volume size
+ VirtletRootVolumeSize: 1Gi
+spec:
+ # This nodeAffinity specification tells Kubernetes to run this
+ # pod only on the nodes that have extraRuntime=virtlet label.
+ # This label is used by Virtlet DaemonSet to select nodes
+ # that must have Virtlet runtime
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: extraRuntime
+ operator: In
+ values:
+ - virtlet
+ containers:
+ - name: cirros-vm
+ # This specifies the image to use.
+ # virtlet.cloud/ prefix is used by CRI proxy, the remaining part
+ # of the image name is prepended with https:// and used to download the image
+ image: virtlet.cloud/cirros
+ imagePullPolicy: IfNotPresent
+ # tty and stdin required for `kubectl attach -t` to work
+ tty: true
+ stdin: true
+ resources:
+ limits:
+ # This memory limit is applied to the libvirt domain definition
+ memory: 160Mi
diff --git a/src/vagrant/kubeadm/virtlet/images.yaml b/src/vagrant/kubeadm/virtlet/images.yaml
new file mode 100644
index 0000000..1541ca7
--- /dev/null
+++ b/src/vagrant/kubeadm/virtlet/images.yaml
@@ -0,0 +1,3 @@
+translations:
+ - name: cirros
+ url: https://github.com/mirantis/virtlet/releases/download/v0.9.3/cirros.img
diff --git a/src/vagrant/kubeadm/virtlet/virtlet-ds.yaml b/src/vagrant/kubeadm/virtlet/virtlet-ds.yaml
new file mode 100644
index 0000000..1bb4882
--- /dev/null
+++ b/src/vagrant/kubeadm/virtlet/virtlet-ds.yaml
@@ -0,0 +1,521 @@
+---
+apiVersion: apps/v1
+kind: DaemonSet
+metadata:
+ creationTimestamp: null
+ name: virtlet
+ namespace: kube-system
+spec:
+ selector:
+ matchLabels:
+ runtime: virtlet
+ template:
+ metadata:
+ creationTimestamp: null
+ labels:
+ runtime: virtlet
+ name: virtlet
+ spec:
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: extraRuntime
+ operator: In
+ values:
+ - virtlet
+ containers:
+ - command:
+ - /libvirt.sh
+ image: mirantis/virtlet:v1.4.1
+ imagePullPolicy: IfNotPresent
+ name: libvirt
+ readinessProbe:
+ exec:
+ command:
+ - /bin/sh
+ - -c
+ - socat - UNIX:/var/run/libvirt/libvirt-sock-ro </dev/null
+ resources: {}
+ securityContext:
+ privileged: true
+ volumeMounts:
+ - mountPath: /sys/fs/cgroup
+ name: cgroup
+ - mountPath: /lib/modules
+ name: modules
+ readOnly: true
+ - mountPath: /boot
+ name: boot
+ readOnly: true
+ - mountPath: /run
+ name: run
+ - mountPath: /var/lib/virtlet
+ name: virtlet
+ - mountPath: /var/lib/libvirt
+ name: libvirt
+ - mountPath: /var/run/libvirt
+ name: libvirt-sockets
+ - mountPath: /var/log/vms
+ name: vms-log
+ - mountPath: /var/log/libvirt
+ name: libvirt-log
+ - mountPath: /dev
+ name: dev
+ - image: mirantis/virtlet:v1.4.1
+ imagePullPolicy: IfNotPresent
+ name: virtlet
+ readinessProbe:
+ exec:
+ command:
+ - /bin/sh
+ - -c
+ - socat - UNIX:/run/virtlet.sock </dev/null
+ resources: {}
+ securityContext:
+ privileged: true
+ volumeMounts:
+ - mountPath: /run
+ name: run
+ - mountPath: /lib/modules
+ name: modules
+ readOnly: true
+ - mountPath: /boot
+ name: boot
+ readOnly: true
+ - mountPath: /dev
+ name: dev
+ - mountPath: /var/lib/virtlet
+ mountPropagation: Bidirectional
+ name: virtlet
+ - mountPath: /var/lib/libvirt
+ name: libvirt
+ - mountPath: /var/run/libvirt
+ name: libvirt-sockets
+ - mountPath: /usr/libexec/kubernetes/kubelet-plugins/volume/exec
+ name: k8s-flexvolume-plugins-dir
+ - mountPath: /var/lib/kubelet/pods
+ mountPropagation: Bidirectional
+ name: k8s-pods-dir
+ - mountPath: /var/log/vms
+ name: vms-log
+ - mountPath: /etc/virtlet/images
+ name: image-name-translations
+ - mountPath: /var/log/pods
+ name: pods-log
+ - mountPath: /var/log/libvirt
+ name: libvirt-log
+ - mountPath: /var/run/netns
+ mountPropagation: Bidirectional
+ name: netns-dir
+ - command:
+ - /vms.sh
+ image: mirantis/virtlet:v1.4.1
+ imagePullPolicy: IfNotPresent
+ name: vms
+ resources: {}
+ volumeMounts:
+ - mountPath: /var/lib/virtlet
+ mountPropagation: HostToContainer
+ name: virtlet
+ - mountPath: /var/lib/libvirt
+ name: libvirt
+ - mountPath: /var/log/vms
+ name: vms-log
+ - mountPath: /var/lib/kubelet/pods
+ mountPropagation: HostToContainer
+ name: k8s-pods-dir
+ - mountPath: /dev
+ name: dev
+ - mountPath: /lib/modules
+ name: modules
+ dnsPolicy: ClusterFirstWithHostNet
+ hostNetwork: true
+ hostPID: true
+ initContainers:
+ - command:
+ - /prepare-node.sh
+ env:
+ - name: KUBE_NODE_NAME
+ valueFrom:
+ fieldRef:
+ apiVersion: v1
+ fieldPath: spec.nodeName
+ - name: VIRTLET_DISABLE_KVM
+ valueFrom:
+ configMapKeyRef:
+ key: disable_kvm
+ name: virtlet-config
+ optional: true
+ - name: VIRTLET_SRIOV_SUPPORT
+ valueFrom:
+ configMapKeyRef:
+ key: sriov_support
+ name: virtlet-config
+ optional: true
+ - name: VIRTLET_DOWNLOAD_PROTOCOL
+ valueFrom:
+ configMapKeyRef:
+ key: download_protocol
+ name: virtlet-config
+ optional: true
+ - name: VIRTLET_LOGLEVEL
+ valueFrom:
+ configMapKeyRef:
+ key: loglevel
+ name: virtlet-config
+ optional: true
+ - name: VIRTLET_CALICO_SUBNET
+ valueFrom:
+ configMapKeyRef:
+ key: calico-subnet
+ name: virtlet-config
+ optional: true
+ - name: IMAGE_REGEXP_TRANSLATION
+ valueFrom:
+ configMapKeyRef:
+ key: image_regexp_translation
+ name: virtlet-config
+ optional: true
+ - name: VIRTLET_RAW_DEVICES
+ valueFrom:
+ configMapKeyRef:
+ key: raw_devices
+ name: virtlet-config
+ optional: true
+ - name: VIRTLET_DISABLE_LOGGING
+ valueFrom:
+ configMapKeyRef:
+ key: disable_logging
+ name: virtlet-config
+ optional: true
+ - name: VIRTLET_CPU_MODEL
+ valueFrom:
+ configMapKeyRef:
+ key: cpu-model
+ name: virtlet-config
+ optional: true
+ - name: KUBELET_ROOT_DIR
+ valueFrom:
+ configMapKeyRef:
+ key: kubelet_root_dir
+ name: virtlet-config
+ optional: true
+ - name: VIRTLET_IMAGE_TRANSLATIONS_DIR
+ value: /etc/virtlet/images
+ image: mirantis/virtlet:v1.4.1
+ imagePullPolicy: IfNotPresent
+ name: prepare-node
+ resources: {}
+ securityContext:
+ privileged: true
+ volumeMounts:
+ - mountPath: /kubelet-volume-plugins
+ name: k8s-flexvolume-plugins-dir
+ - mountPath: /run
+ name: run
+ - mountPath: /var/run/docker.sock
+ name: dockersock
+ - mountPath: /hostlog
+ name: log
+ - mountPath: /host-var-lib
+ name: var-lib
+ - mountPath: /dev
+ name: dev
+ - mountPath: /var/lib/virtlet
+ name: virtlet
+ serviceAccountName: virtlet
+ volumes:
+ - hostPath:
+ path: /dev
+ name: dev
+ - hostPath:
+ path: /sys/fs/cgroup
+ name: cgroup
+ - hostPath:
+ path: /lib/modules
+ name: modules
+ - hostPath:
+ path: /boot
+ name: boot
+ - hostPath:
+ path: /run
+ name: run
+ - hostPath:
+ path: /var/run/docker.sock
+ name: dockersock
+ - hostPath:
+ path: /var/lib/virtlet
+ name: virtlet
+ - hostPath:
+ path: /var/lib/libvirt
+ name: libvirt
+ - hostPath:
+ path: /var/log
+ name: log
+ - hostPath:
+ path: /usr/libexec/kubernetes/kubelet-plugins/volume/exec
+ name: k8s-flexvolume-plugins-dir
+ - hostPath:
+ path: /var/lib/kubelet/pods
+ name: k8s-pods-dir
+ - hostPath:
+ path: /var/lib
+ name: var-lib
+ - hostPath:
+ path: /var/log/virtlet/vms
+ name: vms-log
+ - hostPath:
+ path: /var/log/libvirt
+ name: libvirt-log
+ - hostPath:
+ path: /var/run/libvirt
+ name: libvirt-sockets
+ - hostPath:
+ path: /var/log/pods
+ name: pods-log
+ - hostPath:
+ path: /var/run/netns
+ name: netns-dir
+ - configMap:
+ name: virtlet-image-translations
+ name: image-name-translations
+ updateStrategy: {}
+
+---
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRoleBinding
+metadata:
+ creationTimestamp: null
+ name: virtlet
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: virtlet
+subjects:
+- kind: ServiceAccount
+ name: virtlet
+ namespace: kube-system
+
+---
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRole
+metadata:
+ creationTimestamp: null
+ name: virtlet
+ namespace: kube-system
+rules:
+- apiGroups:
+ - ""
+ resources:
+ - configmaps
+ - nodes
+ verbs:
+ - create
+ - get
+
+---
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRole
+metadata:
+ creationTimestamp: null
+ name: configmap-reader
+rules:
+- apiGroups:
+ - ""
+ resources:
+ - configmaps
+ verbs:
+ - get
+ - list
+ - watch
+
+---
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRole
+metadata:
+ creationTimestamp: null
+ name: virtlet-userdata-reader
+rules:
+- apiGroups:
+ - ""
+ resources:
+ - configmaps
+ - secrets
+ verbs:
+ - get
+
+---
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRoleBinding
+metadata:
+ creationTimestamp: null
+ name: kubelet-node-binding
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: configmap-reader
+subjects:
+- apiGroup: rbac.authorization.k8s.io
+ kind: Group
+ name: system:nodes
+
+---
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRoleBinding
+metadata:
+ creationTimestamp: null
+ name: vm-userdata-binding
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: virtlet-userdata-reader
+subjects:
+- kind: ServiceAccount
+ name: virtlet
+ namespace: kube-system
+
+---
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRole
+metadata:
+ creationTimestamp: null
+ name: virtlet-crd
+rules:
+- apiGroups:
+ - apiextensions.k8s.io
+ resources:
+ - customresourcedefinitions
+ verbs:
+ - create
+- apiGroups:
+ - virtlet.k8s
+ resources:
+ - virtletimagemappings
+ - virtletconfigmappings
+ verbs:
+ - list
+ - get
+
+---
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRoleBinding
+metadata:
+ creationTimestamp: null
+ name: virtlet-crd
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: virtlet-crd
+subjects:
+- kind: ServiceAccount
+ name: virtlet
+ namespace: kube-system
+
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ creationTimestamp: null
+ name: virtlet
+ namespace: kube-system
+
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ creationTimestamp: null
+ labels:
+ virtlet.cloud: ""
+ name: virtletimagemappings.virtlet.k8s
+spec:
+ group: virtlet.k8s
+ names:
+ kind: VirtletImageMapping
+ plural: virtletimagemappings
+ shortNames:
+ - vim
+ singular: virtletimagemapping
+ scope: Namespaced
+ version: v1
+
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ creationTimestamp: null
+ labels:
+ virtlet.cloud: ""
+ name: virtletconfigmappings.virtlet.k8s
+spec:
+ group: virtlet.k8s
+ names:
+ kind: VirtletConfigMapping
+ plural: virtletconfigmappings
+ shortNames:
+ - vcm
+ singular: virtletconfigmapping
+ scope: Namespaced
+ validation:
+ openAPIV3Schema:
+ properties:
+ spec:
+ properties:
+ config:
+ properties:
+ calicoSubnetSize:
+ maximum: 32
+ minimum: 0
+ type: integer
+ cniConfigDir:
+ type: string
+ cniPluginDir:
+ type: string
+ cpuModel:
+ type: string
+ criSocketPath:
+ type: string
+ databasePath:
+ type: string
+ disableKVM:
+ type: boolean
+ disableLogging:
+ type: boolean
+ downloadProtocol:
+ pattern: ^https?$
+ type: string
+ enableRegexpImageTranslation:
+ type: boolean
+ enableSriov:
+ type: boolean
+ fdServerSocketPath:
+ type: string
+ imageDir:
+ type: string
+ imageTranslationConfigsDir:
+ type: string
+ kubeletRootDir:
+ type: string
+ libvirtURI:
+ type: string
+ logLevel:
+ maximum: 2147483647
+ minimum: 0
+ type: integer
+ rawDevices:
+ type: string
+ skipImageTranslation:
+ type: boolean
+ streamPort:
+ maximum: 65535
+ minimum: 1
+ type: integer
+ nodeName:
+ type: string
+ nodeSelector:
+ type: object
+ priority:
+ type: integer
+ version: v1
+
diff --git a/src/vagrant/kubeadm/virtlet/virtlet.sh b/src/vagrant/kubeadm/virtlet/virtlet.sh
new file mode 100755
index 0000000..4ed527e
--- /dev/null
+++ b/src/vagrant/kubeadm/virtlet/virtlet.sh
@@ -0,0 +1,21 @@
+#!/bin/bash
+
+set -ex
+
+kubectl label node worker1 extraRuntime=virtlet
+kubectl label node worker2 extraRuntime=virtlet
+kubectl create configmap -n kube-system virtlet-config --from-literal=download_protocol=http --from-literal=image_regexp_translation=1 --from-literal=disable_kvm=y
+kubectl create configmap -n kube-system virtlet-image-translations --from-file /vagrant/virtlet/images.yaml
+kubectl create -f /vagrant/virtlet/virtlet-ds.yaml
+
+kubectl delete pod --all
+kubectl create -f /vagrant/virtlet/cirros-vm.yaml
+r="0"
+while [ $r -ne "1" ]
+do
+ r=$(kubectl get pods cirros-vm | grep Running | wc -l)
+ sleep 60
+done
+sleep 360
+kubectl get pods cirros-vm -o custom-columns=:.status.podIP | xargs ping -c 4
+echo 'login by user:cirros & password:gocubsgo'
diff --git a/src/vagrant/kubeadm/virtlet/virtlet_setup.sh b/src/vagrant/kubeadm/virtlet/virtlet_setup.sh
new file mode 100644
index 0000000..b2dfaa0
--- /dev/null
+++ b/src/vagrant/kubeadm/virtlet/virtlet_setup.sh
@@ -0,0 +1,10 @@
+#!/bin/bash
+
+set -ex
+
+wget https://github.com/Mirantis/criproxy/releases/download/v0.14.0/criproxy_0.14.0_amd64.deb
+echo "criproxy criproxy/primary_cri select containerd" | sudo debconf-set-selections
+sudo dpkg -i criproxy_0.14.0_amd64.deb
+sudo sed -i "s/EnvironmentFile/#EnvironmentFile/" /etc/systemd/system/kubelet.service.d/10-kubeadm.conf
+sudo systemctl daemon-reload
+sudo systemctl restart kubelet
diff --git a/src/vagrant/kubeadm/worker_setup.sh b/src/vagrant/kubeadm/worker_setup.sh
new file mode 100644
index 0000000..6b08712
--- /dev/null
+++ b/src/vagrant/kubeadm/worker_setup.sh
@@ -0,0 +1,8 @@
+#!/bin/bash
+
+set -ex
+
+bash /vagrant/kata/kata_setup.sh
+bash /vagrant/virtlet/virtlet_setup.sh
+sleep 120
+sudo kubeadm join --discovery-token-unsafe-skip-ca-verification --token 8c5adc.1cec8dbf339093f0 192.168.1.10:6443
diff --git a/src/vagrant/kubeadm_istio/Vagrantfile b/src/vagrant/kubeadm_app/Vagrantfile
index 9320074..3ed02d5 100644
--- a/src/vagrant/kubeadm_istio/Vagrantfile
+++ b/src/vagrant/kubeadm_app/Vagrantfile
@@ -5,7 +5,7 @@ Vagrant.configure("2") do |config|
config.vm.box = "ceph/ubuntu-xenial"
config.vm.provider :libvirt do |libvirt|
- libvirt.memory = 4096
+ libvirt.memory = 8192
libvirt.cpus = 4
end
diff --git a/src/vagrant/kubeadm_app/app_setup.sh b/src/vagrant/kubeadm_app/app_setup.sh
new file mode 100755
index 0000000..a67a54f
--- /dev/null
+++ b/src/vagrant/kubeadm_app/app_setup.sh
@@ -0,0 +1,65 @@
+#!/bin/bash
+#
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+set -ex
+
+static_ip=$(ifconfig eth0 | grep "inet addr" | cut -d ':' -f 2 | cut -d ' ' -f 1)
+echo "STATIC_IP is $static_ip."
+
+git clone --recursive https://github.com/Metaswitch/clearwater-docker.git
+
+# Set the configmaps
+kubectl create configmap env-vars --from-literal=ZONE=default.svc.cluster.local
+
+# Generate the yamls
+cd clearwater-docker/kubernetes/
+./k8s-gencfg --image_path=enriquetaso --image_tag=latest
+
+# Expose Ellis
+# The Ellis provisioning interface can then be accessed on static_ip:30080
+cat ellis-svc.yaml | sed "s/clusterIP: None/type: NodePort/" > ellis-svc.yaml.new
+cat ellis-svc.yaml.new | sed "s/port: 80/port: 80\n nodePort: 30080/" > ellis-svc.yaml
+rm ellis-svc.yaml.new
+
+# Bono configuration
+# Have a static external IP address available that the load balancer can use
+cp /vagrant/custom-bono-svc/bono-svc.yaml .
+sed -ie "6s/$/\n - $static_ip/" bono-svc.yaml
+sed -ie "7s/$/\n loadBalancerIP: $static_ip/" bono-svc.yaml
+
+cd
+kubectl apply -f clearwater-docker/kubernetes
+kubectl get nodes
+kubectl get services
+kubectl get pods
+kubectl get rc
+sleep 60
+
+r="1"
+while [ $r != "0" ]
+do
+ kubectl get pods
+ r=$( kubectl get pods | grep Pending | wc -l)
+ sleep 60
+done
+
+q="1"
+while [ $q != "0" ]
+do
+ kubectl get pods
+ q=$( kubectl get pods | grep ContainerCreating | wc -l)
+ sleep 60
+done
diff --git a/src/vagrant/kubeadm_app/create_images.sh b/src/vagrant/kubeadm_app/create_images.sh
new file mode 100755
index 0000000..12b28a3
--- /dev/null
+++ b/src/vagrant/kubeadm_app/create_images.sh
@@ -0,0 +1,10 @@
+#!/bin/bash
+
+# Build images
+git clone --recursive https://github.com/Metaswitch/clearwater-docker.git
+cd clearwater-docker
+for i in base astaire cassandra chronos bono ellis homer homestead homestead-prov ralf sprout
+do
+ docker build -t clearwater/$i $i
+done
+
diff --git a/src/vagrant/kubeadm_app/custom-bono-svc/bono-svc.yaml b/src/vagrant/kubeadm_app/custom-bono-svc/bono-svc.yaml
new file mode 100644
index 0000000..9280b0f
--- /dev/null
+++ b/src/vagrant/kubeadm_app/custom-bono-svc/bono-svc.yaml
@@ -0,0 +1,25 @@
+apiVersion: v1
+kind: Service
+metadata:
+ name: bono
+spec:
+ externalIPs:
+ ports:
+ - name: "3478"
+ port: 3478
+ protocol: TCP
+ targetPort: 3478
+ - name: "5060"
+ port: 5060
+ protocol: TCP
+ targetPort: 5060
+ - name: "5062"
+ port: 5062
+ protocol: TCP
+ targetPort: 5062
+ selector:
+ service: bono
+ sessionAffinity: None
+ type: ClusterIP
+status:
+ loadBalancer: {}
diff --git a/src/vagrant/kubeadm_app/custom-bono-svc/deployment-svc.yaml b/src/vagrant/kubeadm_app/custom-bono-svc/deployment-svc.yaml
new file mode 100644
index 0000000..cde909b
--- /dev/null
+++ b/src/vagrant/kubeadm_app/custom-bono-svc/deployment-svc.yaml
@@ -0,0 +1,82 @@
+apiVersion: extensions/v1beta1
+kind: Deployment
+metadata:
+ name: busybox
+spec:
+ strategy:
+ rollingUpdate:
+ maxSurge: 10%
+ maxUnavailable: 0
+ selector:
+ matchLabels:
+ app: busybox
+ replicas: 3
+ template:
+ metadata:
+ labels:
+ app: busybox
+ annotations:
+ networks: '[
+ { "name": "calico"},
+ { "name": "weave"}
+ ]'
+ spec:
+ containers:
+ - name: busybox
+ image: bcmt-registry:5000/busybox:latest
+ command: ["top"]
+ stdin: true
+ tty: true
+ dnsPolicy: Default
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ labels:
+ k8s-app: nginx
+ name: nginx
+ namespace: nginx
+
+---
+
+kind: Deployment
+apiVersion: apps/v1beta2
+metadata:
+ labels:
+ k8s-app: nginx
+ name: nginx
+ namespace: nginx
+spec:
+ replicas: 1
+ revisionHistoryLimit: 10
+ selector:
+ matchLabels:
+ k8s-app: nginx
+ template:
+ metadata:
+ labels:
+ k8s-app: nginx
+ spec:
+ containers:
+ - name: nginx
+ image: nginx:2
+ ports:
+ - containerPort: 80
+ protocol: TCP
+ args:
+---
+# ------------------- Dashboard Service ------------------- #
+
+kind: Service
+apiVersion: v1
+metadata:
+ labels:
+ k8s-app: nginx
+ name: nginx
+ namespace: nginx
+spec:
+ type: NodePort
+ ports:
+ - port: 80
+ nodePort: 31001
+ selector:
+ k8s-app: nginx
diff --git a/src/vagrant/kubeadm_app/deploy.sh b/src/vagrant/kubeadm_app/deploy.sh
new file mode 100755
index 0000000..54644a3
--- /dev/null
+++ b/src/vagrant/kubeadm_app/deploy.sh
@@ -0,0 +1,12 @@
+#!/bin/bash
+
+set -ex
+DIR="$(dirname `readlink -f $0`)"
+
+cd $DIR
+../cleanup.sh
+vagrant up
+vagrant ssh master -c "/vagrant/clearwater_setup.sh"
+
+# Run tests
+vagrant ssh master -c "/vagrant/tests/clearwater-live-test.sh"
diff --git a/src/vagrant/kubeadm_istio/host_setup.sh b/src/vagrant/kubeadm_app/host_setup.sh
index 524a967..524a967 100644
--- a/src/vagrant/kubeadm_istio/host_setup.sh
+++ b/src/vagrant/kubeadm_app/host_setup.sh
diff --git a/src/vagrant/kubeadm_app/master_setup.sh b/src/vagrant/kubeadm_app/master_setup.sh
new file mode 100644
index 0000000..b181582
--- /dev/null
+++ b/src/vagrant/kubeadm_app/master_setup.sh
@@ -0,0 +1,10 @@
+#!/bin/bash
+
+set -ex
+
+sudo kubeadm init --apiserver-advertise-address=192.168.1.10 --service-cidr=10.96.0.0/16 --pod-network-cidr=10.32.0.0/12 --token 8c5adc.1cec8dbf339093f0
+mkdir ~/.kube
+sudo cp /etc/kubernetes/admin.conf $HOME/.kube/config
+sudo chown $(id -u):$(id -g) $HOME/.kube/config
+
+kubectl apply -f http://git.io/weave-kube-1.6
diff --git a/src/vagrant/kubeadm_app/setup_vagrant.sh b/src/vagrant/kubeadm_app/setup_vagrant.sh
new file mode 100755
index 0000000..23fdcd2
--- /dev/null
+++ b/src/vagrant/kubeadm_app/setup_vagrant.sh
@@ -0,0 +1,97 @@
+#!/bin/bash
+#
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+set -ex
+
+DIR="$(dirname `readlink -f $0`)"
+
+usage() {
+ echo "Usage: $0 -b virtualbox|libvirt"
+}
+
+install_packages()
+{
+ cat << EOF | sudo tee /etc/sudoers.d/${USER}
+${USER} ALL = (root) NOPASSWD:ALL
+EOF
+ sudo apt-get update -y
+ sudo apt-get install -y git unzip
+ wget https://releases.hashicorp.com/vagrant/2.0.2/vagrant_2.0.2_x86_64.deb
+ sudo dpkg -i vagrant_2.0.2_x86_64.deb
+ rm -rf vagrant_2.0.2_x86_64.deb
+
+ sudo apt-get install -y virtualbox
+
+ #refer to https://github.com/vagrant-libvirt/vagrant-libvirt
+ sudo sed -i 's/^# deb-src/deb-src/g' /etc/apt/sources.list
+ sudo apt-get update
+ sudo apt-get build-dep vagrant ruby-libvirt -y
+ sudo apt-get install -y bridge-utils qemu libvirt-bin ebtables dnsmasq
+ sudo apt-get install -y libffi-dev libxslt-dev libxml2-dev libvirt-dev zlib1g-dev ruby-dev
+ vagrant plugin install vagrant-libvirt
+ sudo adduser ${USER} libvirtd
+ sudo service libvirtd restart
+}
+
+install_box_builder()
+{
+ # Thanks Bento's great effort
+ # Bento project(https://github.com/chef/bento) is released by Apache 2.0 License
+ cd $DIR
+ rm -rf bento
+ git clone https://github.com/chef/bento
+ cd bento; git checkout 05d98910d835b503e7be3d2e4071956f66fbbbc4
+ cp ../update.sh ubuntu/scripts/
+ wget https://releases.hashicorp.com/packer/1.1.2/packer_1.1.2_linux_amd64.zip
+ unzip packer_1.1.2_linux_amd64.zip
+ cd ubuntu
+ sed -i 's/"disk_size": "40960"/"disk_size": "409600"/' ubuntu-16.04-amd64.json
+}
+
+build_virtualbox() {
+ cd $DIR/bento/ubuntu
+ rm -rf ~/'VirtualBox VMs'/ubuntu-16.04-amd64
+ ../packer build -var 'headless=true' -only=virtualbox-iso ubuntu-16.04-amd64.json
+ vagrant box remove -f opnfv/container4nfv --all || true
+ vagrant box add opnfv/container4nfv ../builds/ubuntu-16.04.virtualbox.box
+}
+
+build_libvirtbox() {
+ cd $DIR/bento/ubuntu
+ ../packer build -var 'headless=true' -only=qemu ubuntu-16.04-amd64.json
+ vagrant box remove -f opnfv/container4nfv.kvm --all || true
+ vagrant box add opnfv/container4nfv.kvm ../builds/ubuntu-16.04.libvirt.box
+}
+
+install_packages
+
+set +x
+while getopts "b:h" OPTION; do
+ case $OPTION in
+ b)
+ if [ ${OPTARG} == "virtualbox" ]; then
+ install_box_builder
+ build_virtualbox
+ elif [ ${OPTARG} == "libvirt" ]; then
+ install_box_builder
+ build_libvirtbox
+ fi
+ ;;
+ h)
+ usage;
+ ;;
+ esac
+done
diff --git a/src/vagrant/kubeadm_app/tests/clearwater-live-test.sh b/src/vagrant/kubeadm_app/tests/clearwater-live-test.sh
new file mode 100755
index 0000000..6e5238e
--- /dev/null
+++ b/src/vagrant/kubeadm_app/tests/clearwater-live-test.sh
@@ -0,0 +1,46 @@
+#!/bin/bash
+#
+# Copyright (c) 2017 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+set -ex
+
+# http://clearwater.readthedocs.io/en/latest/Running_the_live_tests.html
+sudo apt-get install build-essential bundler git --yes
+sudo apt install gnupg2 --yes
+gpg2 --recv-keys 409B6B1796C275462A1703113804BB82D39DC0E3
+curl -L https://get.rvm.io | bash -s stable
+
+source ~/.rvm/scripts/rvm
+rvm autolibs enable
+rvm install 1.9.3
+rvm use 1.9.3
+
+
+# Setup ruby and gems
+git clone https://github.com/Metaswitch/clearwater-live-test.git
+cd clearwater-live-test/
+cd quaff/ && git clone https://github.com/Metaswitch/quaff.git
+cd ..
+bundle install
+
+# Get Ellis ip
+ellisip=$(kubectl get services ellis -o json | grep clusterIP | cut -f4 -d'"')
+
+# Get Ellis ip
+bonoip=$(kubectl get services bono -o json | grep clusterIP | cut -f4 -d'"')
+
+# Run the tests
+rake test[default.svc.cluster.local] SIGNUP_CODE=secret PROXY=$bonoip ELLIS=$ellisip
diff --git a/src/vagrant/kubeadm_istio/worker_setup.sh b/src/vagrant/kubeadm_app/worker_setup.sh
index 74e4178..74e4178 100644
--- a/src/vagrant/kubeadm_istio/worker_setup.sh
+++ b/src/vagrant/kubeadm_app/worker_setup.sh
diff --git a/src/vagrant/kubeadm_basic/Vagrantfile b/src/vagrant/kubeadm_basic/Vagrantfile
index 9320074..54b6b59 100644
--- a/src/vagrant/kubeadm_basic/Vagrantfile
+++ b/src/vagrant/kubeadm_basic/Vagrantfile
@@ -3,13 +3,13 @@ $num_workers=2
Vagrant.require_version ">= 1.8.6"
Vagrant.configure("2") do |config|
- config.vm.box = "ceph/ubuntu-xenial"
+ config.vm.box = "generic/ubuntu1804"
config.vm.provider :libvirt do |libvirt|
libvirt.memory = 4096
libvirt.cpus = 4
end
- config.vm.synced_folder "../..", "/src"
+ config.vm.synced_folder ".", "/vagrant"
config.vm.provision "shell", path: "host_setup.sh", privileged: false
config.vm.define "master" do |config|
diff --git a/src/vagrant/kubeadm_basic/host_setup.sh b/src/vagrant/kubeadm_basic/host_setup.sh
index 524a967..2094628 100644
--- a/src/vagrant/kubeadm_basic/host_setup.sh
+++ b/src/vagrant/kubeadm_basic/host_setup.sh
@@ -2,6 +2,11 @@
set -ex
+sudo systemctl stop systemd-resolved
+cat << EOF | sudo tee /etc/resolv.conf
+nameserver 8.8.8.8
+EOF
+
cat << EOF | sudo tee /etc/hosts
127.0.0.1 localhost
192.168.1.10 master
@@ -10,19 +15,21 @@ cat << EOF | sudo tee /etc/hosts
192.168.1.23 worker3
EOF
-sudo apt-key adv --keyserver hkp://ha.pool.sks-keyservers.net:80 --recv-keys 58118E89F3A912897C070ADBF76221572C52609D
-sudo apt-key adv -k 58118E89F3A912897C070ADBF76221572C52609D
-cat << EOF | sudo tee /etc/apt/sources.list.d/docker.list
-deb [arch=amd64] https://apt.dockerproject.org/repo ubuntu-xenial main
-EOF
+sudo apt-get update
+sudo apt-get install -y docker.io
curl -s http://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -
cat <<EOF | sudo tee /etc/apt/sources.list.d/kubernetes.list
deb http://apt.kubernetes.io/ kubernetes-xenial main
EOF
sudo apt-get update
-sudo apt-get install -y --allow-unauthenticated --allow-downgrades docker-engine=1.12.6-0~ubuntu-xenial kubelet=1.9.1-00 kubeadm=1.9.1-00 kubectl=1.9.1-00 kubernetes-cni=0.6.0-00
+sudo apt-get install -y --allow-unauthenticated kubelet=1.15.2-00 kubeadm=1.15.2-00 kubectl=1.15.2-00 kubernetes-cni=0.7.5-00
+sudo sed -i '9i\Environment="KUBELET_EXTRA_ARGS=--feature-gates=DevicePlugins=true"' /etc/systemd/system/kubelet.service.d/10-kubeadm.conf
+sudo modprobe ip_vs
+sudo modprobe ip_vs_rr
+sudo modprobe ip_vs_wrr
+sudo modprobe ip_vs_sh
sudo swapoff -a
sudo systemctl daemon-reload
sudo systemctl stop kubelet
diff --git a/src/vagrant/kubeadm_basic/worker_setup.sh b/src/vagrant/kubeadm_basic/worker_setup.sh
index 74e4178..42477e6 100644
--- a/src/vagrant/kubeadm_basic/worker_setup.sh
+++ b/src/vagrant/kubeadm_basic/worker_setup.sh
@@ -1,4 +1,5 @@
#!/bin/bash
set -ex
+sleep 120
sudo kubeadm join --discovery-token-unsafe-skip-ca-verification --token 8c5adc.1cec8dbf339093f0 192.168.1.10:6443 || true
diff --git a/src/vagrant/kubeadm_istio/deploy.sh b/src/vagrant/kubeadm_istio/deploy.sh
deleted file mode 100755
index d947645..0000000
--- a/src/vagrant/kubeadm_istio/deploy.sh
+++ /dev/null
@@ -1,12 +0,0 @@
-#!/bin/bash
-
-set -ex
-DIR="$(dirname `readlink -f $0`)"
-
-cd $DIR
-../cleanup.sh
-vagrant up
-vagrant ssh master -c "/vagrant/istio/deploy.sh"
-vagrant ssh master -c "/vagrant/istio/bookinfo.sh"
-vagrant ssh master -c "/vagrant/istio/clean_bookinfo.sh"
-
diff --git a/src/vagrant/kubeadm_istio/master_setup.sh b/src/vagrant/kubeadm_istio/master_setup.sh
deleted file mode 100644
index f308244..0000000
--- a/src/vagrant/kubeadm_istio/master_setup.sh
+++ /dev/null
@@ -1,33 +0,0 @@
-#!/bin/bash
-
-set -ex
-
-ADMISSION_CONTROL="Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,NodeRestriction,ResourceQuota"
-KUBE_APISERVER_CONF="/etc/kubernetes/manifests/kube-apiserver.yaml"
-
-sudo kubeadm init --apiserver-advertise-address=192.168.1.10 --service-cidr=10.96.0.0/16 --pod-network-cidr=10.32.0.0/12 --token 8c5adc.1cec8dbf339093f0
-mkdir ~/.kube
-sudo cp /etc/kubernetes/admin.conf $HOME/.kube/config
-sudo chown $(id -u):$(id -g) $HOME/.kube/config
-
-kubectl apply -f http://git.io/weave-kube-1.6
-
-# Enable mutating webhook admission controller
-# kube-apiserver will be automatically restarted by kubelet when its manifest file update.
-# https://istio.io/docs/setup/kubernetes/sidecar-injection.html
-sudo sed -i "s/admission-control=.*/admission-control=$ADMISSION_CONTROL/g" $KUBE_APISERVER_CONF
-
-set +e
-# wait for kube-apiserver restart
-r="1"
-while [ $r -ne "0" ]
-do
- sleep 2
- kubectl version > /dev/null
- r=$?
-done
-set -e
-
-# check if admissionregistration.k8s.io/v1beta1 API is enabled
-kubectl api-versions | grep admissionregistration
-
diff --git a/src/vagrant/kubeadm_kata/host_setup.sh b/src/vagrant/kubeadm_kata/host_setup.sh
index d2af951..02bb296 100644
--- a/src/vagrant/kubeadm_kata/host_setup.sh
+++ b/src/vagrant/kubeadm_kata/host_setup.sh
@@ -30,10 +30,37 @@ cat <<EOF | sudo tee /etc/apt/sources.list.d/kubernetes.list
deb http://apt.kubernetes.io/ kubernetes-xenial main
EOF
sudo apt-get update
-sudo apt-get install -y kubelet kubeadm kubectl kubernetes-cni
+sudo apt-get install -y --allow-unauthenticated kubelet=1.10.5-00 kubeadm=1.10.5-00 kubectl=1.10.5-00 kubernetes-cni=0.6.0-00
+
sudo swapoff -a
sudo systemctl stop kubelet
sudo rm -rf /var/lib/kubelet
sudo systemctl daemon-reload
sudo systemctl start kubelet
+
+
+sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 5EDB1B62EC4926EA
+sudo apt-get update -y
+sudo apt-get install software-properties-common -y
+sudo apt-add-repository cloud-archive:queens -y
+sudo apt-get update -y
+
+#sudo apt-get build-dep dkms -y
+sudo apt-get install python-six openssl python-pip -y
+sudo -H pip install --upgrade pip
+sudo -H pip install ovs
+#sudo apt-get install openvswitch-datapath-dkms -y
+sudo apt-get install openvswitch-switch openvswitch-common -y
+sudo apt-get install ovn-central ovn-common ovn-host -y
+sudo modprobe vport-geneve
+
+wget https://storage.googleapis.com/golang/go1.8.3.linux-amd64.tar.gz
+sudo tar -xvf go1.8.3.linux-amd64.tar.gz -C /usr/local/
+mkdir -p $HOME/go/src
+export GOPATH=$HOME/go
+export PATH=$PATH:/usr/local/go/bin:$GOPATH/bin
+git clone https://github.com/openvswitch/ovn-kubernetes -b v0.3.0
+cd ovn-kubernetes/go-controller
+make
+sudo make install
diff --git a/src/vagrant/kubeadm_kata/kata_setup.sh b/src/vagrant/kubeadm_kata/kata_setup.sh
index 53a2bbf..18c4cd1 100644
--- a/src/vagrant/kubeadm_kata/kata_setup.sh
+++ b/src/vagrant/kubeadm_kata/kata_setup.sh
@@ -17,7 +17,7 @@
set -ex
-sudo sh -c "echo 'deb http://download.opensuse.org/repositories/home:/katacontainers:/release/xUbuntu_$(lsb_release -rs)/ /' > /etc/apt/sources.list.d/kata-containers.list"
+sudo sh -c "echo 'deb http://download.opensuse.org/repositories/home:/katacontainers:/releases:/x86_64:/master/xUbuntu_16.04/ /' > /etc/apt/sources.list.d/kata-containers.list"
curl -sL http://download.opensuse.org/repositories/home:/katacontainers:/release/xUbuntu_$(lsb_release -rs)/Release.key | sudo apt-key add -
sudo -E apt-get update
sudo -E apt-get -y install kata-runtime kata-proxy kata-shim
diff --git a/src/vagrant/kubeadm_kata/master_setup.sh b/src/vagrant/kubeadm_kata/master_setup.sh
index 3f1177e..42b3aee 100644
--- a/src/vagrant/kubeadm_kata/master_setup.sh
+++ b/src/vagrant/kubeadm_kata/master_setup.sh
@@ -17,16 +17,11 @@
set -ex
-sudo kubeadm init --skip-preflight-checks --apiserver-advertise-address=192.168.1.10 --service-cidr=10.96.0.0/16 --pod-network-cidr=10.244.0.0/16 --token 8c5adc.1cec8dbf339093f0
+sudo kubeadm init --skip-preflight-checks --apiserver-advertise-address=192.168.1.10 --service-cidr=10.96.0.0/16 --pod-network-cidr=10.32.0.0/12 --token 8c5adc.1cec8dbf339093f0
mkdir ~/.kube
sudo cp /etc/kubernetes/admin.conf .kube/config
sudo chown $(id -u):$(id -g) ~/.kube/config
-kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
+nohup /usr/bin/kubectl proxy --address=0.0.0.0 --accept-hosts=.* --port=8080 & sleep 1
-r=0
-while [ "$r" -ne "1" ]
-do
- sleep 30
- r=$(kubectl get pods -n kube-system | grep -v Running | wc -l)
-done
+sudo ovnkube -k8s-kubeconfig /home/vagrant/.kube/config -net-controller -loglevel=4 -k8s-apiserver=http://192.168.1.10:8080 -logfile=/var/log/openvswitch/ovnkube.log -init-master=master -cluster-subnet=10.32.0.0/12 -service-cluster-ip-range=10.96.0.0/16 -nodeport -nb-address=tcp://192.168.1.10:6631 -sb-address=tcp://192.168.1.10:6632 &
diff --git a/src/vagrant/kubeadm_kata/worker_setup.sh b/src/vagrant/kubeadm_kata/worker_setup.sh
index b717291..63d42a5 100644
--- a/src/vagrant/kubeadm_kata/worker_setup.sh
+++ b/src/vagrant/kubeadm_kata/worker_setup.sh
@@ -19,3 +19,22 @@ set -ex
sudo kubeadm join --discovery-token-unsafe-skip-ca-verification \
--token 8c5adc.1cec8dbf339093f0 192.168.1.10:6443 \
--ignore-preflight-errors=SystemVerification,CRI,FileContent--proc-sys-net-bridge-bridge-nf-call-iptables
+
+sudo apt-get install -y putty-tools
+mkdir ~/.kube
+echo "y\n" | plink -ssh -pw vagrant vagrant@master "cat ~/.kube/config" > ~/.kube/config || true
+
+CENTRAL_IP=192.168.1.10
+NODE_NAME=$(hostname)
+TOKEN="8c5adc.1cec8dbf339093f0"
+
+sudo ovnkube -k8s-kubeconfig /home/vagrant/.kube/config -loglevel=4 \
+ -logfile="/var/log/openvswitch/ovnkube.log" \
+ -k8s-apiserver="http://$CENTRAL_IP:8080" \
+ -init-node="$NODE_NAME" \
+ -nodeport \
+ -nb-address="tcp://$CENTRAL_IP:6631" \
+ -sb-address="tcp://$CENTRAL_IP:6632" -k8s-token="$TOKEN" \
+ -init-gateways \
+ -service-cluster-ip-range=10.96.0.0/16 \
+ -cluster-subnet=10.32.0.0/12 &
diff --git a/src/vagrant/kubeadm_multus/deploy.sh b/src/vagrant/kubeadm_multus/deploy.sh
deleted file mode 100755
index 9c9e51e..0000000
--- a/src/vagrant/kubeadm_multus/deploy.sh
+++ /dev/null
@@ -1,9 +0,0 @@
-#!/bin/bash
-
-set -ex
-DIR="$(dirname `readlink -f $0`)"
-
-cd $DIR
-../cleanup.sh
-vagrant up
-vagrant ssh master -c "/vagrant/examples/multus.sh"
diff --git a/src/vagrant/kubeadm_multus/host_setup.sh b/src/vagrant/kubeadm_multus/host_setup.sh
deleted file mode 100644
index 524a967..0000000
--- a/src/vagrant/kubeadm_multus/host_setup.sh
+++ /dev/null
@@ -1,29 +0,0 @@
-#!/bin/bash
-
-set -ex
-
-cat << EOF | sudo tee /etc/hosts
-127.0.0.1 localhost
-192.168.1.10 master
-192.168.1.21 worker1
-192.168.1.22 worker2
-192.168.1.23 worker3
-EOF
-
-sudo apt-key adv --keyserver hkp://ha.pool.sks-keyservers.net:80 --recv-keys 58118E89F3A912897C070ADBF76221572C52609D
-sudo apt-key adv -k 58118E89F3A912897C070ADBF76221572C52609D
-cat << EOF | sudo tee /etc/apt/sources.list.d/docker.list
-deb [arch=amd64] https://apt.dockerproject.org/repo ubuntu-xenial main
-EOF
-
-curl -s http://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -
-cat <<EOF | sudo tee /etc/apt/sources.list.d/kubernetes.list
-deb http://apt.kubernetes.io/ kubernetes-xenial main
-EOF
-sudo apt-get update
-sudo apt-get install -y --allow-unauthenticated --allow-downgrades docker-engine=1.12.6-0~ubuntu-xenial kubelet=1.9.1-00 kubeadm=1.9.1-00 kubectl=1.9.1-00 kubernetes-cni=0.6.0-00
-
-sudo swapoff -a
-sudo systemctl daemon-reload
-sudo systemctl stop kubelet
-sudo systemctl start kubelet
diff --git a/src/vagrant/kubeadm_multus/master_setup.sh b/src/vagrant/kubeadm_multus/master_setup.sh
deleted file mode 100644
index dfc3d05..0000000
--- a/src/vagrant/kubeadm_multus/master_setup.sh
+++ /dev/null
@@ -1,12 +0,0 @@
-#!/bin/bash
-
-set -ex
-
-sudo kubeadm init --apiserver-advertise-address=192.168.1.10 --service-cidr=10.96.0.0/16 --pod-network-cidr=10.32.0.0/12 --token 8c5adc.1cec8dbf339093f0
-
-mkdir ~/.kube
-sudo cp /etc/kubernetes/admin.conf $HOME/.kube/config
-sudo chown $(id -u):$(id -g) $HOME/.kube/config
-
-kubectl apply -f http://git.io/weave-kube-1.6
-kubectl apply -f /src/cni/multus/kube_cni_multus.yml
diff --git a/src/vagrant/kubeadm_multus/worker_setup.sh b/src/vagrant/kubeadm_multus/worker_setup.sh
deleted file mode 100644
index 74e4178..0000000
--- a/src/vagrant/kubeadm_multus/worker_setup.sh
+++ /dev/null
@@ -1,4 +0,0 @@
-#!/bin/bash
-
-set -ex
-sudo kubeadm join --discovery-token-unsafe-skip-ca-verification --token 8c5adc.1cec8dbf339093f0 192.168.1.10:6443 || true
diff --git a/src/vagrant/kubeadm_onap/Vagrantfile b/src/vagrant/kubeadm_onap/Vagrantfile
index 73c442d..699f607 100644
--- a/src/vagrant/kubeadm_onap/Vagrantfile
+++ b/src/vagrant/kubeadm_onap/Vagrantfile
@@ -1,4 +1,4 @@
-$num_workers=3
+$num_workers=4
Vagrant.require_version ">= 1.8.6"
Vagrant.configure("2") do |config|
@@ -23,8 +23,8 @@ Vagrant.configure("2") do |config|
config.vm.provision "shell", path: "worker_setup.sh", privileged: false
config.vm.network :private_network, ip: "192.168.0.#{i+20}"
config.vm.provider :libvirt do |libvirt|
- libvirt.memory = 51200
- libvirt.cpus = 32
+ libvirt.memory = 40960
+ libvirt.cpus = 16
end
end
end
diff --git a/src/vagrant/kubeadm_onap/host_setup.sh b/src/vagrant/kubeadm_onap/host_setup.sh
index 7778e3d..9cfd266 100755
--- a/src/vagrant/kubeadm_onap/host_setup.sh
+++ b/src/vagrant/kubeadm_onap/host_setup.sh
@@ -8,15 +8,10 @@ cat << EOF | sudo tee /etc/hosts
192.168.0.21 worker1
192.168.0.22 worker2
192.168.0.23 worker3
+192.168.0.24 worker4
EOF
-cat << EOF | sudo tee /etc/resolv.conf
-search svc.cluster.local cluster.local
-nameserver 10.96.0.10
-nameserver 8.8.8.8
-nameserver 8.8.4.4
-options ndots:5 timeout:1 attempts:1
-EOF
+sudo ifconfig eth1 mtu 1400
sudo apt-key adv --keyserver hkp://ha.pool.sks-keyservers.net:80 --recv-keys 58118E89F3A912897C070ADBF76221572C52609D
sudo apt-key adv -k 58118E89F3A912897C070ADBF76221572C52609D
diff --git a/src/vagrant/kubeadm_onap/master_setup.sh b/src/vagrant/kubeadm_onap/master_setup.sh
index 6d06159..8840541 100755
--- a/src/vagrant/kubeadm_onap/master_setup.sh
+++ b/src/vagrant/kubeadm_onap/master_setup.sh
@@ -1,7 +1,13 @@
#!/bin/bash
-
set -ex
+sudo apt-get -y install ntp
+cat << EOF | sudo tee /etc/ntp.conf
+server 127.127.1.0
+fudge 127.127.1.0 stratum 10
+EOF
+sudo service ntp restart
+
sudo apt install nfs-kernel-server -y
sudo mkdir /dockerdata-nfs
sudo chmod 777 /dockerdata-nfs
@@ -15,6 +21,8 @@ mkdir ~/.kube
sudo cp /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
-kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
+wget https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
+sed -i "s/kube-subnet-mgr/kube-subnet-mgr\n - --iface=eth1/" kube-flannel.yml
+kubectl apply -f kube-flannel.yml
/vagrant/onap_setup.sh
diff --git a/src/vagrant/kubeadm_onap/onap_setup.sh b/src/vagrant/kubeadm_onap/onap_setup.sh
index d8e3dcf..e4edd8f 100755
--- a/src/vagrant/kubeadm_onap/onap_setup.sh
+++ b/src/vagrant/kubeadm_onap/onap_setup.sh
@@ -11,9 +11,8 @@ helm serve &
helm repo remove stable
helm repo add local http://127.0.0.1:8879
-git clone http://gerrit.onap.org/r/oom
+git clone -b beijing http://gerrit.onap.org/r/oom
cd oom/kubernetes
-cp /vagrant/values.yaml onap
sudo apt-get install make -y
make all
diff --git a/src/vagrant/kubeadm_onap/setup_tunnel.sh b/src/vagrant/kubeadm_onap/setup_tunnel.sh
new file mode 100644
index 0000000..3a6ef75
--- /dev/null
+++ b/src/vagrant/kubeadm_onap/setup_tunnel.sh
@@ -0,0 +1,3 @@
+sudo ip link add tunnel0 type gretap local <local> remote <remote>
+sudo ifconfig tunnel0 up
+sudo brctl addif <br> tunnel0
diff --git a/src/vagrant/kubeadm_onap/values.yaml b/src/vagrant/kubeadm_onap/values.yaml
deleted file mode 100644
index 3071609..0000000
--- a/src/vagrant/kubeadm_onap/values.yaml
+++ /dev/null
@@ -1,149 +0,0 @@
-# Copyright © 2017 Amdocs, Bell Canada
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-#################################################################
-# Global configuration overrides.
-#
-# These overrides will affect all helm charts (ie. applications)
-# that are listed below and are 'enabled'.
-#################################################################
-global:
- # Change to an unused port prefix range to prevent port conflicts
- # with other instances running within the same k8s cluster
- nodePortPrefix: 302
-
- # ONAP Repository
- # Uncomment the following to enable the use of a single docker
- # repository but ONLY if your repository mirrors all ONAP
- # docker images. This includes all images from dockerhub and
- # any other repository that hosts images for ONAP components.
- #repository: nexus3.onap.org:10001
- repositoryCred:
- user: docker
- password: docker
-
- # readiness check - temporary repo until images migrated to nexus3
- readinessRepository: oomk8s
- # logging agent - temporary repo until images migrated to nexus3
- loggingRepository: docker.elastic.co
-
- # image pull policy
- pullPolicy: Always
-
- # default mount path root directory referenced
- # by persistent volumes and log files
- persistence:
- mountPath: /dockerdata-nfs
-
- # flag to enable debugging - application support required
- debugEnabled: false
-
-# Repository for creation of nexus3.onap.org secret
-repository: nexus3.onap.org:10001
-
-
-#################################################################
-# Enable/disable and configure helm charts (ie. applications)
-# to customize the ONAP deployment.
-#################################################################
-aaf:
- enabled: true
-aai:
- enabled: true
-appc:
- enabled: true
- config:
- openStackType: OpenStackProvider
- openStackName: OpenStack
- openStackKeyStoneUrl: http://localhost:8181/apidoc/explorer/index.html
- openStackServiceTenantName: default
- openStackDomain: default
- openStackUserName: admin
- openStackEncryptedPassword: admin
-clamp:
- enabled: false
-cli:
- enabled: true
-consul:
- enabled: true
-dcaegen2:
- enabled: false
-dmaap:
- enabled: true
-esr:
- enabled: true
-log:
- enabled: true
-sniro-emulator:
- enabled: true
-oof:
- enabled: true
-msb:
- enabled: true
-multicloud:
- enabled: true
-nbi:
- enabled: true
- config:
- # openstack configuration
- openStackRegion: "Yolo"
- openStackVNFTenantId: "1234"
-policy:
- enabled: true
-portal:
- enabled: true
-robot:
- enabled: true
-sdc:
- enabled: true
-sdnc:
- enabled: true
-
- replicaCount: 1
-
- mysql:
- replicaCount: 1
-so:
- enabled: true
-
- replicaCount: 1
-
- liveness:
- # necessary to disable liveness probe when setting breakpoints
- # in debugger so K8s doesn't restart unresponsive container
- enabled: true
-
- # so server configuration
- config:
- # message router configuration
- dmaapTopic: "AUTO"
- # openstack configuration
- openStackUserName: "vnf_user"
- openStackRegion: "RegionOne"
- openStackKeyStoneUrl: "http://1.2.3.4:5000"
- openStackServiceTenantName: "service"
- openStackEncryptedPasswordHere: "c124921a3a0efbe579782cde8227681e"
-
- # configure embedded mariadb
- mariadb:
- config:
- mariadbRootPassword: password
-uui:
- enabled: true
-vfc:
- enabled: true
-vid:
- enabled: true
-vnfsdk:
- enabled: true
diff --git a/src/vagrant/kubeadm_onap/worker_setup.sh b/src/vagrant/kubeadm_onap/worker_setup.sh
index a088619..e65a65c 100755
--- a/src/vagrant/kubeadm_onap/worker_setup.sh
+++ b/src/vagrant/kubeadm_onap/worker_setup.sh
@@ -1,6 +1,12 @@
#!/bin/bash
set -ex
+sudo apt-get -y install ntp
+cat << EOF | sudo tee /etc/ntp.conf
+pool master
+EOF
+sudo service ntp restart
+
sudo kubeadm join --discovery-token-unsafe-skip-ca-verification --token 8c5adc.1cec8dbf339093f0 192.168.0.10:6443 || true
sudo apt-get install nfs-common -y
diff --git a/src/vagrant/kubeadm_virtlet/examples/cirros-vm.yaml b/src/vagrant/kubeadm_virtlet/examples/cirros-vm.yaml
index 8beb03f..334142b 100644
--- a/src/vagrant/kubeadm_virtlet/examples/cirros-vm.yaml
+++ b/src/vagrant/kubeadm_virtlet/examples/cirros-vm.yaml
@@ -4,21 +4,14 @@ metadata:
name: cirros-vm
annotations:
# This tells CRI Proxy that this pod belongs to Virtlet runtime
- kubernetes.io/target-runtime: virtlet
- # An optional annotation specifying the count of virtual CPUs.
- # Note that annotation values must always be strings,
- # thus numeric values need to be quoted.
- # Defaults to "1".
- VirtletVCPUCount: "1"
+ kubernetes.io/target-runtime: virtlet.cloud
# CirrOS doesn't load nocloud data from SCSI CD-ROM for some reason
VirtletDiskDriver: virtio
# inject ssh keys via cloud-init
VirtletSSHKeys: |
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCaJEcFDXEK2ZbX0ZLS1EIYFZRbDAcRfuVjpstSc0De8+sV1aiu+dePxdkuDRwqFtCyk6dEZkssjOkBXtri00MECLkir6FcH3kKOJtbJ6vy3uaJc9w1ERo+wyl6SkAh/+JTJkp7QRXj8oylW5E20LsbnA/dIwWzAF51PPwF7A7FtNg9DnwPqMkxFo1Th/buOMKbP5ZA1mmNNtmzbMpMfJATvVyiv3ccsSJKOiyQr6UG+j7sc/7jMVz5Xk34Vd0l8GwcB0334MchHckmqDB142h/NCWTr8oLakDNvkfC1YneAfAO41hDkUbxPtVBG5M/o7P4fxoqiHEX+ZLfRxDtHB53 me@localhost
- # cloud-init user data
- VirtletCloudInitUserDataScript: |
- #!/bin/sh
- echo "Hi there"
+ # set root volume size
+ VirtletRootVolumeSize: 1Gi
spec:
# This nodeAffinity specification tells Kubernetes to run this
# pod only on the nodes that have extraRuntime=virtlet label.
@@ -36,17 +29,9 @@ spec:
containers:
- name: cirros-vm
# This specifies the image to use.
- # virtlet/ prefix is used by CRI proxy, the remaining part
+ # virtlet.cloud/ prefix is used by CRI proxy, the remaining part
# of the image name is prepended with https:// and used to download the image
- image: virtlet/cirros
- # Virtlet currently ignores image tags, but their meaning may change
- # in future, so it’s better not to set them for VM pods. If there’s no tag
- # provided in the image specification kubelet defaults to
- # imagePullPolicy: Always, which means that the image is always
- # redownloaded when the pod is created. In order to make pod creation
- # faster and more reliable, we set imagePullPolicy to IfNotPresent here
- # so a previously downloaded image is reused if there is one
- # in Virtlet’s image store
+ image: virtlet.cloud/cirros
imagePullPolicy: IfNotPresent
# tty and stdin required for `kubectl attach -t` to work
tty: true
diff --git a/src/vagrant/kubeadm_virtlet/examples/images.yaml b/src/vagrant/kubeadm_virtlet/examples/images.yaml
index 3a84585..1541ca7 100644
--- a/src/vagrant/kubeadm_virtlet/examples/images.yaml
+++ b/src/vagrant/kubeadm_virtlet/examples/images.yaml
@@ -1,3 +1,3 @@
translations:
- name: cirros
- url: http://github.com/mirantis/virtlet/releases/download/v0.8.2/cirros.img
+ url: https://github.com/mirantis/virtlet/releases/download/v0.9.3/cirros.img
diff --git a/src/vagrant/kubeadm_virtlet/examples/virtlet-ds.yaml b/src/vagrant/kubeadm_virtlet/examples/virtlet-ds.yaml
index ed037d9..1bb4882 100644
--- a/src/vagrant/kubeadm_virtlet/examples/virtlet-ds.yaml
+++ b/src/vagrant/kubeadm_virtlet/examples/virtlet-ds.yaml
@@ -1,25 +1,21 @@
---
-apiVersion: extensions/v1beta1
+apiVersion: apps/v1
kind: DaemonSet
metadata:
+ creationTimestamp: null
name: virtlet
namespace: kube-system
spec:
+ selector:
+ matchLabels:
+ runtime: virtlet
template:
metadata:
- name: virtlet
+ creationTimestamp: null
labels:
runtime: virtlet
+ name: virtlet
spec:
- hostNetwork: true
- dnsPolicy: ClusterFirstWithHostNet
- # hostPID is true to (1) enable VMs to survive virtlet container restart
- # (to be checked) and (2) to enable the use of nsenter in init container
- hostPID: true
- # bootstrap procedure needs to create a configmap in kube-system namespace
- serviceAccountName: virtlet
-
- # only run Virtlet pods on the nodes with extraRuntime=virtlet label
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
@@ -29,50 +25,21 @@ spec:
operator: In
values:
- virtlet
-
- initContainers:
- # The init container first copies virtlet's flexvolume driver
- # to the default kubelet plugin dir to have it in the proper place by the
- # time kubelet is restarted by CRI proxy bootstrap procedure.
- # After that it checks if there's already saved kubelet config
- # and considers that CRI proxy bootstrap is already done if it exists.
- # If it doesn't, it drops criproxy binary into /opt/criproxy/bin
- # if it's not already there and then starts criproxy installation.
- # The possibility to put criproxy binary in advance into
- # /opt/criproxy/bin may be helpful for the purpose of
- # debugging criproxy
- # At the end it ensures that /var/lib/libvirt/images exists on node.
- - name: prepare-node
- image: openretriever/virtlet
+ containers:
+ - command:
+ - /libvirt.sh
+ image: mirantis/virtlet:v1.4.1
imagePullPolicy: IfNotPresent
- command:
- - /prepare-node.sh
- volumeMounts:
- - name: k8s-flexvolume-plugins-dir
- mountPath: /kubelet-volume-plugins
- - name: criproxybin
- mountPath: /opt/criproxy/bin
- - name: run
- mountPath: /run
- - name: dockersock
- mountPath: /var/run/docker.sock
- - name: criproxyconf
- mountPath: /etc/criproxy
- - name: log
- mountPath: /hostlog
- # for ensuring that /var/lib/libvirt/images exists on node
- - name: var-lib
- mountPath: /host-var-lib
+ name: libvirt
+ readinessProbe:
+ exec:
+ command:
+ - /bin/sh
+ - -c
+ - socat - UNIX:/var/run/libvirt/libvirt-sock-ro </dev/null
+ resources: {}
securityContext:
privileged: true
-
- containers:
- - name: libvirt
- image: openretriever/virtlet
- # In case we inject local virtlet image we want to use it not officially available one
- imagePullPolicy: IfNotPresent
- command:
- - /libvirt.sh
volumeMounts:
- mountPath: /sys/fs/cgroup
name: cgroup
@@ -90,117 +57,176 @@ spec:
name: libvirt
- mountPath: /var/run/libvirt
name: libvirt-sockets
- # the log dir is needed here because otherwise libvirt will produce errors
- # like this:
- # Unable to pre-create chardev file '/var/log/vms/afd75bbb-8e97-11e7-9561-02420ac00002/cirros-vm_0.log': No such file or directory
- - name: vms-log
- mountPath: /var/log/vms
- - name: dev
- mountPath: /dev
+ - mountPath: /var/log/vms
+ name: vms-log
+ - mountPath: /var/log/libvirt
+ name: libvirt-log
+ - mountPath: /dev
+ name: dev
+ - image: mirantis/virtlet:v1.4.1
+ imagePullPolicy: IfNotPresent
+ name: virtlet
+ readinessProbe:
+ exec:
+ command:
+ - /bin/sh
+ - -c
+ - socat - UNIX:/run/virtlet.sock </dev/null
+ resources: {}
securityContext:
privileged: true
- env:
- - name: VIRTLET_DISABLE_KVM
- valueFrom:
- configMapKeyRef:
- name: virtlet-config
- key: disable_kvm
- optional: true
- - name: virtlet
- image: openretriever/virtlet
- # In case we inject local virtlet image we want to use it not officially available one
- imagePullPolicy: IfNotPresent
volumeMounts:
- mountPath: /run
name: run
- # /boot and /lib/modules are required by supermin
- mountPath: /lib/modules
name: modules
readOnly: true
- mountPath: /boot
name: boot
readOnly: true
+ - mountPath: /dev
+ name: dev
- mountPath: /var/lib/virtlet
+ mountPropagation: Bidirectional
name: virtlet
- mountPath: /var/lib/libvirt
name: libvirt
- - mountPath: /etc/cni
- name: cniconf
- - mountPath: /opt/cni/bin
- name: cnibin
- mountPath: /var/run/libvirt
name: libvirt-sockets
- - mountPath: /var/lib/cni
- name: cnidata
- mountPath: /usr/libexec/kubernetes/kubelet-plugins/volume/exec
name: k8s-flexvolume-plugins-dir
- # below `:shared` is unofficial way to pass this option docker
- # which then will allow virtlet to see what kubelet mounts in
- # underlaying directories, after virtlet container is created
- - mountPath: /var/lib/kubelet/pods:shared
+ - mountPath: /var/lib/kubelet/pods
+ mountPropagation: Bidirectional
name: k8s-pods-dir
- - name: vms-log
- mountPath: /var/log/vms
+ - mountPath: /var/log/vms
+ name: vms-log
- mountPath: /etc/virtlet/images
name: image-name-translations
- - name: pods-log
- mountPath: /kubernetes-log
- securityContext:
- privileged: true
+ - mountPath: /var/log/pods
+ name: pods-log
+ - mountPath: /var/log/libvirt
+ name: libvirt-log
+ - mountPath: /var/run/netns
+ mountPropagation: Bidirectional
+ name: netns-dir
+ - command:
+ - /vms.sh
+ image: mirantis/virtlet:v1.4.1
+ imagePullPolicy: IfNotPresent
+ name: vms
+ resources: {}
+ volumeMounts:
+ - mountPath: /var/lib/virtlet
+ mountPropagation: HostToContainer
+ name: virtlet
+ - mountPath: /var/lib/libvirt
+ name: libvirt
+ - mountPath: /var/log/vms
+ name: vms-log
+ - mountPath: /var/lib/kubelet/pods
+ mountPropagation: HostToContainer
+ name: k8s-pods-dir
+ - mountPath: /dev
+ name: dev
+ - mountPath: /lib/modules
+ name: modules
+ dnsPolicy: ClusterFirstWithHostNet
+ hostNetwork: true
+ hostPID: true
+ initContainers:
+ - command:
+ - /prepare-node.sh
env:
+ - name: KUBE_NODE_NAME
+ valueFrom:
+ fieldRef:
+ apiVersion: v1
+ fieldPath: spec.nodeName
- name: VIRTLET_DISABLE_KVM
valueFrom:
configMapKeyRef:
- name: virtlet-config
key: disable_kvm
+ name: virtlet-config
optional: true
- - name: VIRTLET_DOWNLOAD_PROTOCOL
+ - name: VIRTLET_SRIOV_SUPPORT
valueFrom:
configMapKeyRef:
+ key: sriov_support
name: virtlet-config
+ optional: true
+ - name: VIRTLET_DOWNLOAD_PROTOCOL
+ valueFrom:
+ configMapKeyRef:
key: download_protocol
+ name: virtlet-config
optional: true
- name: VIRTLET_LOGLEVEL
valueFrom:
configMapKeyRef:
- name: virtlet-config
key: loglevel
+ name: virtlet-config
optional: true
- name: VIRTLET_CALICO_SUBNET
valueFrom:
configMapKeyRef:
- name: virtlet-config
key: calico-subnet
+ name: virtlet-config
optional: true
- name: IMAGE_REGEXP_TRANSLATION
valueFrom:
configMapKeyRef:
- name: virtlet-config
key: image_regexp_translation
+ name: virtlet-config
+ optional: true
+ - name: VIRTLET_RAW_DEVICES
+ valueFrom:
+ configMapKeyRef:
+ key: raw_devices
+ name: virtlet-config
+ optional: true
+ - name: VIRTLET_DISABLE_LOGGING
+ valueFrom:
+ configMapKeyRef:
+ key: disable_logging
+ name: virtlet-config
+ optional: true
+ - name: VIRTLET_CPU_MODEL
+ valueFrom:
+ configMapKeyRef:
+ key: cpu-model
+ name: virtlet-config
+ optional: true
+ - name: KUBELET_ROOT_DIR
+ valueFrom:
+ configMapKeyRef:
+ key: kubelet_root_dir
+ name: virtlet-config
optional: true
- - name: IMAGE_TRANSLATIONS_DIR
+ - name: VIRTLET_IMAGE_TRANSLATIONS_DIR
value: /etc/virtlet/images
- - name: KUBERNETES_POD_LOGS
- value: "/kubernetes-log"
- # TODO: should we rename it?
- - name: VIRTLET_VM_LOG_LOCATION
- value: "1"
- - name: vms
- image: openretriever/virtlet
+ image: mirantis/virtlet:v1.4.1
imagePullPolicy: IfNotPresent
- command:
- - /vms.sh
+ name: prepare-node
+ resources: {}
+ securityContext:
+ privileged: true
volumeMounts:
+ - mountPath: /kubelet-volume-plugins
+ name: k8s-flexvolume-plugins-dir
+ - mountPath: /run
+ name: run
+ - mountPath: /var/run/docker.sock
+ name: dockersock
+ - mountPath: /hostlog
+ name: log
+ - mountPath: /host-var-lib
+ name: var-lib
+ - mountPath: /dev
+ name: dev
- mountPath: /var/lib/virtlet
name: virtlet
- - mountPath: /var/lib/libvirt
- name: libvirt
- - name: vms-log
- mountPath: /var/log/vms
- - name: dev
- mountPath: /dev
+ serviceAccountName: virtlet
volumes:
- # /dev is needed for host raw device access
- hostPath:
path: /dev
name: dev
@@ -216,9 +242,6 @@ spec:
- hostPath:
path: /run
name: run
- # TODO: don't hardcode docker socket location here
- # This will require CRI proxy installation to run
- # in host mount namespace.
- hostPath:
path: /var/run/docker.sock
name: dockersock
@@ -229,21 +252,6 @@ spec:
path: /var/lib/libvirt
name: libvirt
- hostPath:
- path: /etc/cni
- name: cniconf
- - hostPath:
- path: /opt/cni/bin
- name: cnibin
- - hostPath:
- path: /var/lib/cni
- name: cnidata
- - hostPath:
- path: /opt/criproxy/bin
- name: criproxybin
- - hostPath:
- path: /etc/criproxy
- name: criproxyconf
- - hostPath:
path: /var/log
name: log
- hostPath:
@@ -259,18 +267,27 @@ spec:
path: /var/log/virtlet/vms
name: vms-log
- hostPath:
+ path: /var/log/libvirt
+ name: libvirt-log
+ - hostPath:
path: /var/run/libvirt
name: libvirt-sockets
- hostPath:
path: /var/log/pods
name: pods-log
+ - hostPath:
+ path: /var/run/netns
+ name: netns-dir
- configMap:
name: virtlet-image-translations
name: image-name-translations
+ updateStrategy: {}
+
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
+ creationTimestamp: null
name: virtlet
roleRef:
apiGroup: rbac.authorization.k8s.io
@@ -280,23 +297,29 @@ subjects:
- kind: ServiceAccount
name: virtlet
namespace: kube-system
+
---
-kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRole
metadata:
+ creationTimestamp: null
name: virtlet
namespace: kube-system
rules:
- - apiGroups:
- - ""
- resources:
- - configmaps
- verbs:
- - create
+- apiGroups:
+ - ""
+ resources:
+ - configmaps
+ - nodes
+ verbs:
+ - create
+ - get
+
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
+ creationTimestamp: null
name: configmap-reader
rules:
- apiGroups:
@@ -307,10 +330,27 @@ rules:
- get
- list
- watch
+
+---
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRole
+metadata:
+ creationTimestamp: null
+ name: virtlet-userdata-reader
+rules:
+- apiGroups:
+ - ""
+ resources:
+ - configmaps
+ - secrets
+ verbs:
+ - get
+
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
+ creationTimestamp: null
name: kubelet-node-binding
roleRef:
apiGroup: rbac.authorization.k8s.io
@@ -320,29 +360,49 @@ subjects:
- apiGroup: rbac.authorization.k8s.io
kind: Group
name: system:nodes
+
---
-kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRoleBinding
metadata:
+ creationTimestamp: null
+ name: vm-userdata-binding
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: virtlet-userdata-reader
+subjects:
+- kind: ServiceAccount
+ name: virtlet
+ namespace: kube-system
+
+---
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRole
+metadata:
+ creationTimestamp: null
name: virtlet-crd
rules:
- - apiGroups:
- - "apiextensions.k8s.io"
- resources:
- - customresourcedefinitions
- verbs:
- - create
- - apiGroups:
- - "virtlet.k8s"
- resources:
- - virtletimagemappings
- verbs:
- - list
- - get
+- apiGroups:
+ - apiextensions.k8s.io
+ resources:
+ - customresourcedefinitions
+ verbs:
+ - create
+- apiGroups:
+ - virtlet.k8s
+ resources:
+ - virtletimagemappings
+ - virtletconfigmappings
+ verbs:
+ - list
+ - get
+
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
+ creationTimestamp: null
name: virtlet-crd
roleRef:
apiGroup: rbac.authorization.k8s.io
@@ -352,9 +412,110 @@ subjects:
- kind: ServiceAccount
name: virtlet
namespace: kube-system
+
---
apiVersion: v1
kind: ServiceAccount
metadata:
+ creationTimestamp: null
name: virtlet
namespace: kube-system
+
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ creationTimestamp: null
+ labels:
+ virtlet.cloud: ""
+ name: virtletimagemappings.virtlet.k8s
+spec:
+ group: virtlet.k8s
+ names:
+ kind: VirtletImageMapping
+ plural: virtletimagemappings
+ shortNames:
+ - vim
+ singular: virtletimagemapping
+ scope: Namespaced
+ version: v1
+
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ creationTimestamp: null
+ labels:
+ virtlet.cloud: ""
+ name: virtletconfigmappings.virtlet.k8s
+spec:
+ group: virtlet.k8s
+ names:
+ kind: VirtletConfigMapping
+ plural: virtletconfigmappings
+ shortNames:
+ - vcm
+ singular: virtletconfigmapping
+ scope: Namespaced
+ validation:
+ openAPIV3Schema:
+ properties:
+ spec:
+ properties:
+ config:
+ properties:
+ calicoSubnetSize:
+ maximum: 32
+ minimum: 0
+ type: integer
+ cniConfigDir:
+ type: string
+ cniPluginDir:
+ type: string
+ cpuModel:
+ type: string
+ criSocketPath:
+ type: string
+ databasePath:
+ type: string
+ disableKVM:
+ type: boolean
+ disableLogging:
+ type: boolean
+ downloadProtocol:
+ pattern: ^https?$
+ type: string
+ enableRegexpImageTranslation:
+ type: boolean
+ enableSriov:
+ type: boolean
+ fdServerSocketPath:
+ type: string
+ imageDir:
+ type: string
+ imageTranslationConfigsDir:
+ type: string
+ kubeletRootDir:
+ type: string
+ libvirtURI:
+ type: string
+ logLevel:
+ maximum: 2147483647
+ minimum: 0
+ type: integer
+ rawDevices:
+ type: string
+ skipImageTranslation:
+ type: boolean
+ streamPort:
+ maximum: 65535
+ minimum: 1
+ type: integer
+ nodeName:
+ type: string
+ nodeSelector:
+ type: object
+ priority:
+ type: integer
+ version: v1
+
diff --git a/src/vagrant/kubeadm_virtlet/host_setup.sh b/src/vagrant/kubeadm_virtlet/host_setup.sh
index b2ee85c..f211f19 100644
--- a/src/vagrant/kubeadm_virtlet/host_setup.sh
+++ b/src/vagrant/kubeadm_virtlet/host_setup.sh
@@ -10,20 +10,33 @@ cat << EOF | sudo tee /etc/hosts
192.168.1.23 worker3
EOF
-sudo apt-key adv --keyserver hkp://ha.pool.sks-keyservers.net:80 --recv-keys 58118E89F3A912897C070ADBF76221572C52609D
-sudo apt-key adv -k 58118E89F3A912897C070ADBF76221572C52609D
-cat << EOF | sudo tee /etc/apt/sources.list.d/docker.list
-deb [arch=amd64] https://apt.dockerproject.org/repo ubuntu-xenial main
-EOF
+sudo apt-get update
+sudo apt-get install -y \
+ apt-transport-https \
+ ca-certificates \
+ curl \
+ software-properties-common
+
+curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
+sudo add-apt-repository \
+ "deb [arch=amd64] https://download.docker.com/linux/ubuntu \
+ $(lsb_release -cs) \
+ stable"
+sudo apt-get update
+sudo apt-get install -y docker-ce=18.03.1~ce-0~ubuntu
curl -s http://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -
cat <<EOF | sudo tee /etc/apt/sources.list.d/kubernetes.list
deb http://apt.kubernetes.io/ kubernetes-xenial main
EOF
sudo apt-get update
-sudo apt-get install -y --allow-unauthenticated --allow-downgrades docker-engine=1.12.6-0~ubuntu-xenial kubelet=1.7.0-00 kubeadm=1.7.0-00 kubectl=1.7.0-00 kubernetes-cni=0.5.1-00
+sudo apt-get install -y --allow-unauthenticated kubelet=1.12.2-00 kubeadm=1.12.2-00 kubectl=1.12.2-00 kubernetes-cni=0.6.0-00
-sudo rm -rf /var/lib/kubelet
-sudo systemctl stop kubelet
+sudo modprobe ip_vs
+sudo modprobe ip_vs_rr
+sudo modprobe ip_vs_wrr
+sudo modprobe ip_vs_sh
+sudo swapoff -a
sudo systemctl daemon-reload
+sudo systemctl stop kubelet
sudo systemctl start kubelet
diff --git a/src/vagrant/kubeadm_virtlet/virtlet/etc/systemd/system/criproxy.service b/src/vagrant/kubeadm_virtlet/virtlet/etc/systemd/system/criproxy.service
deleted file mode 100644
index bb2f1de..0000000
--- a/src/vagrant/kubeadm_virtlet/virtlet/etc/systemd/system/criproxy.service
+++ /dev/null
@@ -1,11 +0,0 @@
-[Unit]
-Description=CRI Proxy
-
-[Service]
-ExecStart=/usr/local/bin/criproxy -v 3 -alsologtostderr -connect /var/run/dockershim.sock,virtlet:/run/virtlet.sock -listen /run/criproxy.sock
-Restart=always
-StartLimitInterval=0
-RestartSec=10
-
-[Install]
-WantedBy=kubelet.service
diff --git a/src/vagrant/kubeadm_virtlet/virtlet/etc/systemd/system/dockershim.service b/src/vagrant/kubeadm_virtlet/virtlet/etc/systemd/system/dockershim.service
deleted file mode 100644
index c629a4b..0000000
--- a/src/vagrant/kubeadm_virtlet/virtlet/etc/systemd/system/dockershim.service
+++ /dev/null
@@ -1,11 +0,0 @@
-[Unit]
-Description=dockershim for criproxy
-
-[Service]
-ExecStart=/usr/local/bin/dockershim ......
-Restart=always
-StartLimitInterval=0
-RestartSec=10
-
-[Install]
-RequiredBy=criproxy.service
diff --git a/src/vagrant/kubeadm_virtlet/virtlet/etc/systemd/system/kubelet.service.d/20-criproxy.conf b/src/vagrant/kubeadm_virtlet/virtlet/etc/systemd/system/kubelet.service.d/20-criproxy.conf
deleted file mode 100644
index 412a48d..0000000
--- a/src/vagrant/kubeadm_virtlet/virtlet/etc/systemd/system/kubelet.service.d/20-criproxy.conf
+++ /dev/null
@@ -1,2 +0,0 @@
-[Service]
-Environment="KUBELET_EXTRA_ARGS=--container-runtime=remote --container-runtime-endpoint=/run/criproxy.sock --image-service-endpoint=/run/criproxy.sock --enable-controller-attach-detach=false"
diff --git a/src/vagrant/kubeadm_virtlet/worker_setup.sh b/src/vagrant/kubeadm_virtlet/worker_setup.sh
index 4472874..bc37fb3 100644
--- a/src/vagrant/kubeadm_virtlet/worker_setup.sh
+++ b/src/vagrant/kubeadm_virtlet/worker_setup.sh
@@ -1,18 +1,12 @@
#!/bin/bash
set -ex
-sudo kubeadm join --token 8c5adc.1cec8dbf339093f0 192.168.1.10:6443 || true
+sudo kubeadm join --discovery-token-unsafe-skip-ca-verification --token 8c5adc.1cec8dbf339093f0 192.168.1.10:6443
-sudo docker pull openretriever/virtlet
-sudo docker run --rm openretriever/virtlet tar -c /criproxy | sudo tar -C /usr/local/bin -xv
-sudo ln -s /usr/local/bin/criproxy /usr/local/bin/dockershim
-
-sudo mkdir /etc/criproxy
-sudo touch /etc/criproxy/node.conf
-sudo cp -r /vagrant/virtlet/etc/systemd/system/* /etc/systemd/system/
-sudo systemctl stop kubelet
-sudo systemctl daemon-reload
-sudo systemctl enable criproxy dockershim
-sudo systemctl start criproxy dockershim
+wget https://github.com/Mirantis/criproxy/releases/download/v0.12.0/criproxy_0.12.0_amd64.deb
+sudo dpkg -i criproxy_0.12.0_amd64.deb
+sudo sed -i "s/EnvironmentFile/#EnvironmentFile/" /etc/systemd/system/kubelet.service.d/10-kubeadm.conf
sudo systemctl daemon-reload
-sudo systemctl start kubelet
+sudo systemctl restart dockershim
+sudo systemctl restart criproxy
+sudo systemctl restart kubelet
diff --git a/src/vagrant/setup_vagrant.sh b/src/vagrant/setup_vagrant.sh
index fcde052..23fdcd2 100755
--- a/src/vagrant/setup_vagrant.sh
+++ b/src/vagrant/setup_vagrant.sh
@@ -1,6 +1,5 @@
#!/bin/bash
#
-# Copyright (c) 2017 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.