summaryrefslogtreecommitdiffstats
path: root/src/vagrant
diff options
context:
space:
mode:
Diffstat (limited to 'src/vagrant')
-rw-r--r--src/vagrant/kubeadm/Vagrantfile34
-rwxr-xr-xsrc/vagrant/kubeadm/deploy.sh10
-rw-r--r--src/vagrant/kubeadm/host_setup.sh32
-rwxr-xr-xsrc/vagrant/kubeadm/istio/bookinfo.sh (renamed from src/vagrant/kubeadm_istio/istio/bookinfo.sh)7
-rwxr-xr-xsrc/vagrant/kubeadm/istio/clean_bookinfo.sh (renamed from src/vagrant/kubeadm_istio/istio/clean_bookinfo.sh)6
-rwxr-xr-xsrc/vagrant/kubeadm/istio/deploy.sh (renamed from src/vagrant/kubeadm_istio/istio/deploy.sh)20
-rwxr-xr-xsrc/vagrant/kubeadm/istio/istio.sh6
-rw-r--r--src/vagrant/kubeadm/kata/containerd.service22
-rw-r--r--src/vagrant/kubeadm/kata/kata_setup.sh54
-rwxr-xr-xsrc/vagrant/kubeadm/kata/nginx-app.sh33
-rw-r--r--src/vagrant/kubeadm/kata/nginx-app.yaml33
-rw-r--r--src/vagrant/kubeadm/master_setup.sh32
-rw-r--r--src/vagrant/kubeadm/multus/Dockerfile10
-rw-r--r--src/vagrant/kubeadm/multus/busybox.yaml (renamed from src/vagrant/kubeadm_multus/examples/busybox.yaml)0
-rw-r--r--src/vagrant/kubeadm/multus/cni_multus.yml88
-rwxr-xr-xsrc/vagrant/kubeadm/multus/multus.sh (renamed from src/vagrant/kubeadm_multus/examples/multus.sh)2
-rw-r--r--src/vagrant/kubeadm/registry_setup.sh23
-rw-r--r--src/vagrant/kubeadm/virtlet/cirros-vm.yaml42
-rw-r--r--src/vagrant/kubeadm/virtlet/images.yaml3
-rw-r--r--src/vagrant/kubeadm/virtlet/virtlet-ds.yaml521
-rwxr-xr-xsrc/vagrant/kubeadm/virtlet/virtlet.sh21
-rw-r--r--src/vagrant/kubeadm/virtlet/virtlet_setup.sh10
-rw-r--r--src/vagrant/kubeadm/worker_setup.sh8
-rw-r--r--src/vagrant/kubeadm_app/Vagrantfile (renamed from src/vagrant/kubeadm_multus/Vagrantfile)2
-rwxr-xr-xsrc/vagrant/kubeadm_app/app_setup.sh65
-rwxr-xr-xsrc/vagrant/kubeadm_app/create_images.sh10
-rw-r--r--src/vagrant/kubeadm_app/custom-bono-svc/bono-svc.yaml25
-rw-r--r--src/vagrant/kubeadm_app/custom-bono-svc/deployment-svc.yaml82
-rwxr-xr-xsrc/vagrant/kubeadm_app/deploy.sh12
-rw-r--r--src/vagrant/kubeadm_app/host_setup.sh (renamed from src/vagrant/kubeadm_istio/host_setup.sh)2
-rw-r--r--src/vagrant/kubeadm_app/master_setup.sh10
-rwxr-xr-xsrc/vagrant/kubeadm_app/setup_vagrant.sh97
-rwxr-xr-xsrc/vagrant/kubeadm_app/tests/clearwater-live-test.sh46
-rw-r--r--src/vagrant/kubeadm_app/worker_setup.sh (renamed from src/vagrant/kubeadm_istio/worker_setup.sh)0
-rw-r--r--src/vagrant/kubeadm_basic/Vagrantfile4
-rw-r--r--src/vagrant/kubeadm_basic/host_setup.sh19
-rw-r--r--src/vagrant/kubeadm_basic/worker_setup.sh1
-rw-r--r--src/vagrant/kubeadm_clearwater/host_setup.sh2
-rwxr-xr-xsrc/vagrant/kubeadm_istio/deploy.sh12
-rw-r--r--src/vagrant/kubeadm_istio/master_setup.sh33
-rwxr-xr-xsrc/vagrant/kubeadm_kata/examples/nginx-app.sh7
-rw-r--r--src/vagrant/kubeadm_kata/examples/nginx-app.yaml2
-rw-r--r--src/vagrant/kubeadm_kata/host_setup.sh29
-rw-r--r--src/vagrant/kubeadm_kata/kata_setup.sh42
-rw-r--r--src/vagrant/kubeadm_kata/master_setup.sh11
-rw-r--r--src/vagrant/kubeadm_kata/worker_setup.sh25
-rw-r--r--src/vagrant/kubeadm_multus/master_setup.sh12
-rw-r--r--src/vagrant/kubeadm_onap/Vagrantfile23
-rwxr-xr-xsrc/vagrant/kubeadm_onap/host_setup.sh15
-rwxr-xr-xsrc/vagrant/kubeadm_onap/master_setup.sh31
-rwxr-xr-xsrc/vagrant/kubeadm_onap/onap_setup.sh53
-rw-r--r--src/vagrant/kubeadm_onap/registry_setup.sh30
-rw-r--r--src/vagrant/kubeadm_onap/setup_swap.sh5
-rw-r--r--src/vagrant/kubeadm_onap/setup_tunnel.sh3
-rwxr-xr-xsrc/vagrant/kubeadm_onap/worker_setup.sh18
-rw-r--r--src/vagrant/kubeadm_ovsdpdk/host_setup.sh2
-rw-r--r--src/vagrant/kubeadm_snort/Vagrantfile (renamed from src/vagrant/kubeadm_istio/Vagrantfile)0
-rwxr-xr-xsrc/vagrant/kubeadm_snort/deploy.sh (renamed from src/vagrant/kubeadm_multus/deploy.sh)2
-rw-r--r--src/vagrant/kubeadm_snort/host_setup.sh (renamed from src/vagrant/kubeadm_multus/host_setup.sh)2
-rw-r--r--src/vagrant/kubeadm_snort/master_setup.sh10
-rwxr-xr-xsrc/vagrant/kubeadm_snort/snort/snort-setup.sh31
-rw-r--r--src/vagrant/kubeadm_snort/snort/snort.yaml32
-rw-r--r--src/vagrant/kubeadm_snort/worker_setup.sh (renamed from src/vagrant/kubeadm_multus/worker_setup.sh)0
-rw-r--r--src/vagrant/kubeadm_virtlet/examples/cirros-vm.yaml25
-rw-r--r--src/vagrant/kubeadm_virtlet/examples/images.yaml2
-rw-r--r--src/vagrant/kubeadm_virtlet/examples/virtlet-ds.yaml457
-rw-r--r--src/vagrant/kubeadm_virtlet/host_setup.sh29
-rw-r--r--src/vagrant/kubeadm_virtlet/virtlet/etc/systemd/system/criproxy.service11
-rw-r--r--src/vagrant/kubeadm_virtlet/virtlet/etc/systemd/system/dockershim.service11
-rw-r--r--src/vagrant/kubeadm_virtlet/virtlet/etc/systemd/system/kubelet.service.d/20-criproxy.conf2
-rw-r--r--src/vagrant/kubeadm_virtlet/worker_setup.sh20
-rwxr-xr-xsrc/vagrant/setup_vagrant.sh1
72 files changed, 1960 insertions, 422 deletions
diff --git a/src/vagrant/kubeadm/Vagrantfile b/src/vagrant/kubeadm/Vagrantfile
new file mode 100644
index 0000000..dc5efb1
--- /dev/null
+++ b/src/vagrant/kubeadm/Vagrantfile
@@ -0,0 +1,34 @@
+$num_workers=2
+
+Vagrant.require_version ">= 1.8.6"
+Vagrant.configure("2") do |config|
+
+ config.vm.box = "ceph/ubuntu-xenial"
+ config.vm.provider :libvirt do |libvirt|
+ libvirt.memory = 4096
+ libvirt.cpus = 4
+ end
+
+ config.vm.define "registry" do |config|
+ config.vm.hostname = "registry"
+ #config.vm.provision "shell", path: "registry_setup.sh", privileged: false
+ config.vm.network :private_network, ip: "192.168.1.5"
+ end
+
+ config.vm.define "master" do |config|
+ config.vm.hostname = "master"
+ config.vm.provision "shell", path: "host_setup.sh", privileged: false
+ config.vm.provision "shell", path: "master_setup.sh", privileged: false
+ config.vm.network :private_network, ip: "192.168.1.10"
+ end
+
+ (1 .. $num_workers).each do |i|
+ config.vm.define vm_name = "worker%d" % [i] do |config|
+ config.vm.hostname = vm_name
+ config.vm.provision "shell", path: "host_setup.sh", privileged: false
+ config.vm.provision "shell", path: "worker_setup.sh", privileged: false
+ config.vm.network :private_network, ip: "192.168.1.#{i+20}"
+ end
+ end
+
+end
diff --git a/src/vagrant/kubeadm/deploy.sh b/src/vagrant/kubeadm/deploy.sh
new file mode 100755
index 0000000..eb61ad8
--- /dev/null
+++ b/src/vagrant/kubeadm/deploy.sh
@@ -0,0 +1,10 @@
+#!/bin/bash
+
+set -ex
+DIR="$(dirname `readlink -f $0`)"
+
+cd $DIR
+../cleanup.sh
+vagrant up
+vagrant ssh master -c "/vagrant/kata/nginx-app.sh"
+vagrant ssh master -c "/vagrant/virtlet/virtlet.sh"
diff --git a/src/vagrant/kubeadm/host_setup.sh b/src/vagrant/kubeadm/host_setup.sh
new file mode 100644
index 0000000..1cb46f6
--- /dev/null
+++ b/src/vagrant/kubeadm/host_setup.sh
@@ -0,0 +1,32 @@
+#!/bin/bash
+
+set -ex
+
+cat << EOF | sudo tee /etc/hosts
+127.0.0.1 localhost
+192.168.1.5 registry
+192.168.1.10 master
+192.168.1.21 worker1
+192.168.1.22 worker2
+192.168.1.23 worker3
+EOF
+
+curl -s http://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -
+cat <<EOF | sudo tee /etc/apt/sources.list.d/kubernetes.list
+deb http://apt.kubernetes.io/ kubernetes-xenial main
+EOF
+sudo apt-get update
+sudo apt-get install -y --allow-unauthenticated kubelet=1.12.2-00 kubeadm=1.12.2-00 kubectl=1.12.2-00 kubernetes-cni=0.6.0-00
+echo 'Environment="KUBELET_EXTRA_ARGS=--feature-gates=DevicePlugins=true"' | sudo tee /etc/default/kubelet
+echo 1 | sudo tee /proc/sys/net/ipv4/ip_forward
+sudo modprobe ip_vs
+sudo modprobe ip_vs_rr
+sudo modprobe ip_vs_wrr
+sudo modprobe ip_vs_sh
+sudo modprobe br_netfilter
+sudo modprobe nf_conntrack_ipv4
+
+sudo swapoff -a
+sudo systemctl daemon-reload
+sudo systemctl stop kubelet
+sudo systemctl start kubelet
diff --git a/src/vagrant/kubeadm_istio/istio/bookinfo.sh b/src/vagrant/kubeadm/istio/bookinfo.sh
index cc09167..c4eef11 100755
--- a/src/vagrant/kubeadm_istio/istio/bookinfo.sh
+++ b/src/vagrant/kubeadm/istio/bookinfo.sh
@@ -21,7 +21,10 @@ cd /vagrant/istio-source/
export PATH=$PWD/bin:$PATH
# Run the test application: bookinfo
-kubectl apply -f <(istioctl kube-inject -f samples/bookinfo/kube/bookinfo.yaml)
+kubectl apply -f <(istioctl kube-inject -f samples/bookinfo/platform/kube/bookinfo.yaml)
+
+# Define the ingress gateway for the application
+kubectl apply -f samples/bookinfo/networking/bookinfo-gateway.yaml
# Wait for bookinfo deployed
kubectl get services
@@ -36,6 +39,6 @@ do
done
# Validate the bookinfo app
-export GATEWAY_URL=$(kubectl get po -l istio=ingress -n istio-system -o 'jsonpath={.items[0].status.hostIP}'):$(kubectl get svc istio-ingress -n istio-system -o 'jsonpath={.spec.ports[0].nodePort}')
+export GATEWAY_URL=$(kubectl get po -l istio=ingressgateway -n istio-system -o 'jsonpath={.items[0].status.hostIP}'):$(kubectl get svc istio-ingressgateway -n istio-system -o 'jsonpath={.spec.ports[0].nodePort}')
curl -o /dev/null -s -w "%{http_code}\n" http://${GATEWAY_URL}/productpage
diff --git a/src/vagrant/kubeadm_istio/istio/clean_bookinfo.sh b/src/vagrant/kubeadm/istio/clean_bookinfo.sh
index ede825f..7c539c0 100755
--- a/src/vagrant/kubeadm_istio/istio/clean_bookinfo.sh
+++ b/src/vagrant/kubeadm/istio/clean_bookinfo.sh
@@ -21,7 +21,9 @@ cd /vagrant/istio-source/
export PATH=$PWD/bin:$PATH
# Clean up bookinfo
-echo "" | samples/bookinfo/kube/cleanup.sh
+echo "" | samples/bookinfo/platform/kube/cleanup.sh
-istioctl get routerules
+kubectl get virtualservices
+kubectl get destinationrules
+kubectl get gateway
kubectl get pods
diff --git a/src/vagrant/kubeadm_istio/istio/deploy.sh b/src/vagrant/kubeadm/istio/deploy.sh
index 4abc856..e896580 100755
--- a/src/vagrant/kubeadm_istio/istio/deploy.sh
+++ b/src/vagrant/kubeadm/istio/deploy.sh
@@ -35,21 +35,13 @@ echo 'export PATH="$PATH:/vagrant/istio-source/bin"' >> ~/.bashrc
echo "source <(kubectl completion bash)" >> ~/.bashrc
source ~/.bashrc
-kubectl apply -f install/kubernetes/istio.yaml
+# Install Istio’s Custom Resource Definitions first
+kubectl apply -f install/kubernetes/helm/istio/templates/crds.yaml
-# Install the sidecar injection configmap
-./install/kubernetes/webhook-create-signed-cert.sh \
- --service istio-sidecar-injector \
- --namespace istio-system \
- --secret sidecar-injector-certs
-kubectl apply -f install/kubernetes/istio-sidecar-injector-configmap-release.yaml
+# Wait 30s for Kubernetes to register the Istio CRDs
+sleep 30
-# Install the sidecar injector webhook
-cat install/kubernetes/istio-sidecar-injector.yaml | \
- ./install/kubernetes/webhook-patch-ca-bundle.sh > \
- install/kubernetes/istio-sidecar-injector-with-ca-bundle.yaml
-kubectl apply -f install/kubernetes/istio-sidecar-injector-with-ca-bundle.yaml
-kubectl -n istio-system get deployment -listio=sidecar-injector
+kubectl apply -f install/kubernetes/istio-demo.yaml
# Validate the installation
kubectl get svc -n istio-system
@@ -61,6 +53,6 @@ while [ $r -ne "0" ]
do
sleep 30
kubectl get pods -n istio-system
- r=$(kubectl get pods -n istio-system | egrep -v 'NAME|Running' | wc -l)
+ r=$(kubectl get pods -n istio-system | egrep -v 'NAME|Running|Completed' | wc -l)
done
diff --git a/src/vagrant/kubeadm/istio/istio.sh b/src/vagrant/kubeadm/istio/istio.sh
new file mode 100755
index 0000000..9c2caf6
--- /dev/null
+++ b/src/vagrant/kubeadm/istio/istio.sh
@@ -0,0 +1,6 @@
+#!/bin/bash
+
+/vagrant/istio/deploy.sh
+/vagrant/istio/bookinfo.sh
+/vagrant/istio/clean_bookinfo.sh
+
diff --git a/src/vagrant/kubeadm/kata/containerd.service b/src/vagrant/kubeadm/kata/containerd.service
new file mode 100644
index 0000000..1ae7fe8
--- /dev/null
+++ b/src/vagrant/kubeadm/kata/containerd.service
@@ -0,0 +1,22 @@
+[Unit]
+Description=containerd container runtime
+Documentation=https://containerd.io
+After=network.target
+
+[Service]
+ExecStartPre=-/sbin/modprobe overlay
+ExecStart=/usr/local/bin/containerd
+
+Delegate=yes
+KillMode=process
+# Having non-zero Limit*s causes performance problems due to accounting overhead
+# in the kernel. We recommend using cgroups to do container-local accounting.
+LimitNPROC=infinity
+LimitCORE=infinity
+LimitNOFILE=infinity
+# Comment TasksMax if your systemd version does not supports it.
+# Only systemd 226 and above support this version.
+TasksMax=infinity
+
+[Install]
+WantedBy=multi-user.target
diff --git a/src/vagrant/kubeadm/kata/kata_setup.sh b/src/vagrant/kubeadm/kata/kata_setup.sh
new file mode 100644
index 0000000..1fd77b5
--- /dev/null
+++ b/src/vagrant/kubeadm/kata/kata_setup.sh
@@ -0,0 +1,54 @@
+#!/bin/bash
+#
+# Copyright (c) 2017 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+set -ex
+
+ARCH=$(arch)
+BRANCH="${BRANCH:-stable-1.7}"
+sudo sh -c "echo 'deb http://download.opensuse.org/repositories/home:/katacontainers:/releases:/${ARCH}:/${BRANCH}/xUbuntu_$(lsb_release -rs)/ /' > /etc/apt/sources.list.d/kata-containers.list"
+curl -sL http://download.opensuse.org/repositories/home:/katacontainers:/releases:/${ARCH}:/${BRANCH}/xUbuntu_$(lsb_release -rs)/Release.key | sudo apt-key add -
+sudo -E apt-get update
+sudo -E apt-get -y install kata-runtime kata-proxy kata-shim
+
+wget https://github.com/opencontainers/runc/releases/download/v1.0.0-rc6/runc.amd64
+sudo cp runc.amd64 /usr/sbin/runc
+sudo chmod 755 /usr/sbin/runc
+wget http://github.com/containerd/containerd/releases/download/v1.2.2/containerd-1.2.2.linux-amd64.tar.gz >& /dev/null
+sudo tar -C /usr/local -xzf containerd-1.2.2.linux-amd64.tar.gz
+wget https://github.com/kubernetes-sigs/cri-tools/releases/download/v1.13.0/crictl-v1.13.0-linux-amd64.tar.gz >& /dev/null
+sudo tar -C /usr/local/bin -xzf crictl-v1.13.0-linux-amd64.tar.gz
+echo "runtime-endpoint: unix:///run/containerd/containerd.sock" | sudo tee /etc/crictl.yaml
+wget https://github.com/kubernetes-sigs/cri-tools/releases/download/v1.13.0/critest-v1.13.0-linux-amd64.tar.gz >& /dev/null
+sudo tar C /usr/local/bin -xzf critest-v1.13.0-linux-amd64.tar.gz
+sudo cp /vagrant/kata/containerd.service /etc/systemd/system/
+sudo systemctl start containerd
+sudo mkdir -p /opt/cni/bin
+sudo mkdir -p /etc/cni/net.d
+sudo mkdir -p /etc/containerd
+containerd config default | sudo tee /etc/containerd/config.toml
+sudo sed -i "s,\[plugins.cri.registry.mirrors\],\[plugins.cri.registry.mirrors\]\n \[plugins.cri.registry.mirrors.\"registry:5000\"\]\n endpoint = \[\"http://registry:5000\"\]," /etc/containerd/config.toml
+sudo sed -i "/.*untrusted_workload_runtime.*/,+5s/runtime_type.*/runtime_type=\"io.containerd.runtime.v1.linux\"/" /etc/containerd/config.toml
+sudo sed -i "/.*untrusted_workload_runtime.*/,+5s/runtime_engine.*/runtime_engine=\"kata-runtime\"/" /etc/containerd/config.toml
+sudo systemctl restart containerd
+
+cat << EOF | sudo tee /etc/systemd/system/kubelet.service.d/0-containerd.conf
+[Service]
+Environment="KUBELET_EXTRA_ARGS=--container-runtime=remote --runtime-request-timeout=15m --container-runtime-endpoint=unix:///run/containerd/containerd.sock"
+EOF
+
+sudo systemctl daemon-reload
+sudo systemctl restart kubelet
diff --git a/src/vagrant/kubeadm/kata/nginx-app.sh b/src/vagrant/kubeadm/kata/nginx-app.sh
new file mode 100755
index 0000000..fb9540e
--- /dev/null
+++ b/src/vagrant/kubeadm/kata/nginx-app.sh
@@ -0,0 +1,33 @@
+#!/bin/bash
+#
+# Copyright (c) 2017 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+kubectl delete services --all
+kubectl delete rc --all
+kubectl delete pods --all
+kubectl create -f /vagrant/kata/nginx-app.yaml
+kubectl get nodes
+kubectl get services
+kubectl get pods
+kubectl get rc
+r=0
+while [ "$r" -eq "0" ]
+do
+ sleep 30
+ r=$(kubectl get pods | grep Running | wc -l)
+done
+svcip=$(kubectl get services nginx -o json | grep clusterIP | cut -f4 -d'"')
+wget http://$svcip
diff --git a/src/vagrant/kubeadm/kata/nginx-app.yaml b/src/vagrant/kubeadm/kata/nginx-app.yaml
new file mode 100644
index 0000000..9de4ef4
--- /dev/null
+++ b/src/vagrant/kubeadm/kata/nginx-app.yaml
@@ -0,0 +1,33 @@
+apiVersion: v1
+kind: Service
+metadata:
+ name: nginx
+ labels:
+ app: nginx
+spec:
+ type: NodePort
+ ports:
+ - port: 80
+ protocol: TCP
+ name: http
+ selector:
+ app: nginx
+---
+apiVersion: v1
+kind: ReplicationController
+metadata:
+ name: nginx
+spec:
+ replicas: 2
+ template:
+ metadata:
+ labels:
+ app: nginx
+ annotations:
+ io.kubernetes.cri.untrusted-workload: "true"
+ spec:
+ containers:
+ - name: nginx
+ image: nginx
+ ports:
+ - containerPort: 80
diff --git a/src/vagrant/kubeadm/master_setup.sh b/src/vagrant/kubeadm/master_setup.sh
new file mode 100644
index 0000000..cec8877
--- /dev/null
+++ b/src/vagrant/kubeadm/master_setup.sh
@@ -0,0 +1,32 @@
+#!/bin/bash
+
+set -ex
+
+sudo apt-get update
+sudo apt-get install -y \
+ apt-transport-https \
+ ca-certificates \
+ curl \
+ software-properties-common
+
+curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
+sudo add-apt-repository \
+ "deb [arch=amd64] https://download.docker.com/linux/ubuntu \
+ $(lsb_release -cs) \
+ stable"
+sudo apt-get update
+sudo apt-get install -y docker-ce=18.03.1~ce-0~ubuntu
+cat << EOF | sudo tee /etc/docker/daemon.json
+{
+ "insecure-registries": ["registry:5000"]
+}
+EOF
+sudo service docker restart
+
+sudo kubeadm init --apiserver-advertise-address=192.168.1.10 --service-cidr=10.96.0.0/16 --pod-network-cidr=10.32.0.0/12 --token 8c5adc.1cec8dbf339093f0
+mkdir ~/.kube
+sudo cp /etc/kubernetes/admin.conf $HOME/.kube/config
+sudo chown $(id -u):$(id -g) $HOME/.kube/config
+
+kubectl apply -f http://git.io/weave-kube-1.6
+kubectl apply -f /vagrant/multus/cni_multus.yml
diff --git a/src/vagrant/kubeadm/multus/Dockerfile b/src/vagrant/kubeadm/multus/Dockerfile
new file mode 100644
index 0000000..7923d0d
--- /dev/null
+++ b/src/vagrant/kubeadm/multus/Dockerfile
@@ -0,0 +1,10 @@
+FROM ubuntu:16.04
+ENV PATH="/usr/local/go/bin:$PATH"
+WORKDIR /go/src/
+RUN apt-get update && apt-get install -y wget git gcc
+RUN wget -qO- https://storage.googleapis.com/golang/go1.8.3.linux-amd64.tar.gz | tar -C /usr/local/ -xz
+RUN git clone https://github.com/Intel-Corp/multus-cni
+RUN cd multus-cni; bash ./build
+
+FROM busybox
+COPY --from=0 /go/src/multus-cni/bin/multus /root
diff --git a/src/vagrant/kubeadm_multus/examples/busybox.yaml b/src/vagrant/kubeadm/multus/busybox.yaml
index 7fd1b8d..7fd1b8d 100644
--- a/src/vagrant/kubeadm_multus/examples/busybox.yaml
+++ b/src/vagrant/kubeadm/multus/busybox.yaml
diff --git a/src/vagrant/kubeadm/multus/cni_multus.yml b/src/vagrant/kubeadm/multus/cni_multus.yml
new file mode 100644
index 0000000..123392b
--- /dev/null
+++ b/src/vagrant/kubeadm/multus/cni_multus.yml
@@ -0,0 +1,88 @@
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: cnimultus
+ namespace: kube-system
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: cnimultus-cfg
+ namespace: kube-system
+ labels:
+ tier: node
+ app: cnimultus
+data:
+ cni-conf.json: |
+ {
+ "name": "multus",
+ "type": "multus",
+ "delegates": [
+ {
+ "name": "weave",
+ "type": "weave-net",
+ "hairpinMode": true,
+ "masterplugin": true
+ },
+ {
+ "name": "mynet",
+ "type": "bridge",
+ "bridge": "cni0",
+ "isGateway": true,
+ "ipMasq": true,
+ "ipam": {
+ "type": "host-local",
+ "subnet": "10.22.0.0/16",
+ "routes": [
+ { "dst": "0.0.0.0/0" }
+ ]
+ }
+ }
+ ]
+ }
+---
+apiVersion: extensions/v1beta1
+kind: DaemonSet
+metadata:
+ name: cnimultus-ds
+ namespace: kube-system
+ labels:
+ tier: node
+ app: cnimultus
+spec:
+ template:
+ metadata:
+ labels:
+ tier: node
+ app: cnimultus
+ spec:
+ hostNetwork: true
+ nodeSelector:
+ beta.kubernetes.io/arch: amd64
+ tolerations:
+ - key: node-role.kubernetes.io/master
+ operator: Exists
+ effect: NoSchedule
+ serviceAccountName: cnimultus
+ containers:
+ - name: run-cni
+ image: registry:5000/multus-cni:latest
+ command: ['sh', '-c', 'cp /multus/cni-conf.json /etc/cni/net.d/05-multus.conf; cp /root/multus /opt/cni/bin; while true; do sleep 10000; done' ]
+ volumeMounts:
+ - name: cni-bin
+ mountPath: /opt/cni/bin
+ - name: etc-cni
+ mountPath: /etc/cni/net.d
+ - name: cnimultus-cfg
+ mountPath: /multus/
+ volumes:
+ - name: cni-bin
+ hostPath:
+ path: /opt/cni/bin
+ - name: etc-cni
+ hostPath:
+ path: /etc/cni/net.d
+ - name: cnimultus-cfg
+ configMap:
+ name: cnimultus-cfg
diff --git a/src/vagrant/kubeadm_multus/examples/multus.sh b/src/vagrant/kubeadm/multus/multus.sh
index d7b39a0..9461a6f 100755
--- a/src/vagrant/kubeadm_multus/examples/multus.sh
+++ b/src/vagrant/kubeadm/multus/multus.sh
@@ -24,7 +24,7 @@ do
done
kubectl delete rc --all
-kubectl apply -f /vagrant/examples/busybox.yaml
+kubectl apply -f /vagrant/multus/busybox.yaml
r="0"
while [ $r -ne "2" ]
do
diff --git a/src/vagrant/kubeadm/registry_setup.sh b/src/vagrant/kubeadm/registry_setup.sh
new file mode 100644
index 0000000..5466f1c
--- /dev/null
+++ b/src/vagrant/kubeadm/registry_setup.sh
@@ -0,0 +1,23 @@
+#!/bin/bash
+
+set -ex
+
+cat << EOF | sudo tee /etc/hosts
+127.0.0.1 localhost
+192.168.1.5 registry
+EOF
+
+sudo apt-get update
+sudo apt-get install -y docker.io
+cat << EOF | sudo tee /etc/docker/daemon.json
+{
+ "insecure-registries": ["registry:5000"]
+}
+EOF
+sudo service docker restart
+
+sudo docker pull registry:2
+sudo docker run -d -p 5000:5000 --restart=always --name registry registry:2
+sudo docker build . -f /vagrant/multus/Dockerfile -t multus-cni
+sudo docker tag multus-cni localhost:5000/multus-cni
+sudo docker push localhost:5000/multus-cni
diff --git a/src/vagrant/kubeadm/virtlet/cirros-vm.yaml b/src/vagrant/kubeadm/virtlet/cirros-vm.yaml
new file mode 100644
index 0000000..334142b
--- /dev/null
+++ b/src/vagrant/kubeadm/virtlet/cirros-vm.yaml
@@ -0,0 +1,42 @@
+apiVersion: v1
+kind: Pod
+metadata:
+ name: cirros-vm
+ annotations:
+ # This tells CRI Proxy that this pod belongs to Virtlet runtime
+ kubernetes.io/target-runtime: virtlet.cloud
+ # CirrOS doesn't load nocloud data from SCSI CD-ROM for some reason
+ VirtletDiskDriver: virtio
+ # inject ssh keys via cloud-init
+ VirtletSSHKeys: |
+ ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCaJEcFDXEK2ZbX0ZLS1EIYFZRbDAcRfuVjpstSc0De8+sV1aiu+dePxdkuDRwqFtCyk6dEZkssjOkBXtri00MECLkir6FcH3kKOJtbJ6vy3uaJc9w1ERo+wyl6SkAh/+JTJkp7QRXj8oylW5E20LsbnA/dIwWzAF51PPwF7A7FtNg9DnwPqMkxFo1Th/buOMKbP5ZA1mmNNtmzbMpMfJATvVyiv3ccsSJKOiyQr6UG+j7sc/7jMVz5Xk34Vd0l8GwcB0334MchHckmqDB142h/NCWTr8oLakDNvkfC1YneAfAO41hDkUbxPtVBG5M/o7P4fxoqiHEX+ZLfRxDtHB53 me@localhost
+ # set root volume size
+ VirtletRootVolumeSize: 1Gi
+spec:
+ # This nodeAffinity specification tells Kubernetes to run this
+ # pod only on the nodes that have extraRuntime=virtlet label.
+ # This label is used by Virtlet DaemonSet to select nodes
+ # that must have Virtlet runtime
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: extraRuntime
+ operator: In
+ values:
+ - virtlet
+ containers:
+ - name: cirros-vm
+ # This specifies the image to use.
+ # virtlet.cloud/ prefix is used by CRI proxy, the remaining part
+ # of the image name is prepended with https:// and used to download the image
+ image: virtlet.cloud/cirros
+ imagePullPolicy: IfNotPresent
+ # tty and stdin required for `kubectl attach -t` to work
+ tty: true
+ stdin: true
+ resources:
+ limits:
+ # This memory limit is applied to the libvirt domain definition
+ memory: 160Mi
diff --git a/src/vagrant/kubeadm/virtlet/images.yaml b/src/vagrant/kubeadm/virtlet/images.yaml
new file mode 100644
index 0000000..1541ca7
--- /dev/null
+++ b/src/vagrant/kubeadm/virtlet/images.yaml
@@ -0,0 +1,3 @@
+translations:
+ - name: cirros
+ url: https://github.com/mirantis/virtlet/releases/download/v0.9.3/cirros.img
diff --git a/src/vagrant/kubeadm/virtlet/virtlet-ds.yaml b/src/vagrant/kubeadm/virtlet/virtlet-ds.yaml
new file mode 100644
index 0000000..1bb4882
--- /dev/null
+++ b/src/vagrant/kubeadm/virtlet/virtlet-ds.yaml
@@ -0,0 +1,521 @@
+---
+apiVersion: apps/v1
+kind: DaemonSet
+metadata:
+ creationTimestamp: null
+ name: virtlet
+ namespace: kube-system
+spec:
+ selector:
+ matchLabels:
+ runtime: virtlet
+ template:
+ metadata:
+ creationTimestamp: null
+ labels:
+ runtime: virtlet
+ name: virtlet
+ spec:
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: extraRuntime
+ operator: In
+ values:
+ - virtlet
+ containers:
+ - command:
+ - /libvirt.sh
+ image: mirantis/virtlet:v1.4.1
+ imagePullPolicy: IfNotPresent
+ name: libvirt
+ readinessProbe:
+ exec:
+ command:
+ - /bin/sh
+ - -c
+ - socat - UNIX:/var/run/libvirt/libvirt-sock-ro </dev/null
+ resources: {}
+ securityContext:
+ privileged: true
+ volumeMounts:
+ - mountPath: /sys/fs/cgroup
+ name: cgroup
+ - mountPath: /lib/modules
+ name: modules
+ readOnly: true
+ - mountPath: /boot
+ name: boot
+ readOnly: true
+ - mountPath: /run
+ name: run
+ - mountPath: /var/lib/virtlet
+ name: virtlet
+ - mountPath: /var/lib/libvirt
+ name: libvirt
+ - mountPath: /var/run/libvirt
+ name: libvirt-sockets
+ - mountPath: /var/log/vms
+ name: vms-log
+ - mountPath: /var/log/libvirt
+ name: libvirt-log
+ - mountPath: /dev
+ name: dev
+ - image: mirantis/virtlet:v1.4.1
+ imagePullPolicy: IfNotPresent
+ name: virtlet
+ readinessProbe:
+ exec:
+ command:
+ - /bin/sh
+ - -c
+ - socat - UNIX:/run/virtlet.sock </dev/null
+ resources: {}
+ securityContext:
+ privileged: true
+ volumeMounts:
+ - mountPath: /run
+ name: run
+ - mountPath: /lib/modules
+ name: modules
+ readOnly: true
+ - mountPath: /boot
+ name: boot
+ readOnly: true
+ - mountPath: /dev
+ name: dev
+ - mountPath: /var/lib/virtlet
+ mountPropagation: Bidirectional
+ name: virtlet
+ - mountPath: /var/lib/libvirt
+ name: libvirt
+ - mountPath: /var/run/libvirt
+ name: libvirt-sockets
+ - mountPath: /usr/libexec/kubernetes/kubelet-plugins/volume/exec
+ name: k8s-flexvolume-plugins-dir
+ - mountPath: /var/lib/kubelet/pods
+ mountPropagation: Bidirectional
+ name: k8s-pods-dir
+ - mountPath: /var/log/vms
+ name: vms-log
+ - mountPath: /etc/virtlet/images
+ name: image-name-translations
+ - mountPath: /var/log/pods
+ name: pods-log
+ - mountPath: /var/log/libvirt
+ name: libvirt-log
+ - mountPath: /var/run/netns
+ mountPropagation: Bidirectional
+ name: netns-dir
+ - command:
+ - /vms.sh
+ image: mirantis/virtlet:v1.4.1
+ imagePullPolicy: IfNotPresent
+ name: vms
+ resources: {}
+ volumeMounts:
+ - mountPath: /var/lib/virtlet
+ mountPropagation: HostToContainer
+ name: virtlet
+ - mountPath: /var/lib/libvirt
+ name: libvirt
+ - mountPath: /var/log/vms
+ name: vms-log
+ - mountPath: /var/lib/kubelet/pods
+ mountPropagation: HostToContainer
+ name: k8s-pods-dir
+ - mountPath: /dev
+ name: dev
+ - mountPath: /lib/modules
+ name: modules
+ dnsPolicy: ClusterFirstWithHostNet
+ hostNetwork: true
+ hostPID: true
+ initContainers:
+ - command:
+ - /prepare-node.sh
+ env:
+ - name: KUBE_NODE_NAME
+ valueFrom:
+ fieldRef:
+ apiVersion: v1
+ fieldPath: spec.nodeName
+ - name: VIRTLET_DISABLE_KVM
+ valueFrom:
+ configMapKeyRef:
+ key: disable_kvm
+ name: virtlet-config
+ optional: true
+ - name: VIRTLET_SRIOV_SUPPORT
+ valueFrom:
+ configMapKeyRef:
+ key: sriov_support
+ name: virtlet-config
+ optional: true
+ - name: VIRTLET_DOWNLOAD_PROTOCOL
+ valueFrom:
+ configMapKeyRef:
+ key: download_protocol
+ name: virtlet-config
+ optional: true
+ - name: VIRTLET_LOGLEVEL
+ valueFrom:
+ configMapKeyRef:
+ key: loglevel
+ name: virtlet-config
+ optional: true
+ - name: VIRTLET_CALICO_SUBNET
+ valueFrom:
+ configMapKeyRef:
+ key: calico-subnet
+ name: virtlet-config
+ optional: true
+ - name: IMAGE_REGEXP_TRANSLATION
+ valueFrom:
+ configMapKeyRef:
+ key: image_regexp_translation
+ name: virtlet-config
+ optional: true
+ - name: VIRTLET_RAW_DEVICES
+ valueFrom:
+ configMapKeyRef:
+ key: raw_devices
+ name: virtlet-config
+ optional: true
+ - name: VIRTLET_DISABLE_LOGGING
+ valueFrom:
+ configMapKeyRef:
+ key: disable_logging
+ name: virtlet-config
+ optional: true
+ - name: VIRTLET_CPU_MODEL
+ valueFrom:
+ configMapKeyRef:
+ key: cpu-model
+ name: virtlet-config
+ optional: true
+ - name: KUBELET_ROOT_DIR
+ valueFrom:
+ configMapKeyRef:
+ key: kubelet_root_dir
+ name: virtlet-config
+ optional: true
+ - name: VIRTLET_IMAGE_TRANSLATIONS_DIR
+ value: /etc/virtlet/images
+ image: mirantis/virtlet:v1.4.1
+ imagePullPolicy: IfNotPresent
+ name: prepare-node
+ resources: {}
+ securityContext:
+ privileged: true
+ volumeMounts:
+ - mountPath: /kubelet-volume-plugins
+ name: k8s-flexvolume-plugins-dir
+ - mountPath: /run
+ name: run
+ - mountPath: /var/run/docker.sock
+ name: dockersock
+ - mountPath: /hostlog
+ name: log
+ - mountPath: /host-var-lib
+ name: var-lib
+ - mountPath: /dev
+ name: dev
+ - mountPath: /var/lib/virtlet
+ name: virtlet
+ serviceAccountName: virtlet
+ volumes:
+ - hostPath:
+ path: /dev
+ name: dev
+ - hostPath:
+ path: /sys/fs/cgroup
+ name: cgroup
+ - hostPath:
+ path: /lib/modules
+ name: modules
+ - hostPath:
+ path: /boot
+ name: boot
+ - hostPath:
+ path: /run
+ name: run
+ - hostPath:
+ path: /var/run/docker.sock
+ name: dockersock
+ - hostPath:
+ path: /var/lib/virtlet
+ name: virtlet
+ - hostPath:
+ path: /var/lib/libvirt
+ name: libvirt
+ - hostPath:
+ path: /var/log
+ name: log
+ - hostPath:
+ path: /usr/libexec/kubernetes/kubelet-plugins/volume/exec
+ name: k8s-flexvolume-plugins-dir
+ - hostPath:
+ path: /var/lib/kubelet/pods
+ name: k8s-pods-dir
+ - hostPath:
+ path: /var/lib
+ name: var-lib
+ - hostPath:
+ path: /var/log/virtlet/vms
+ name: vms-log
+ - hostPath:
+ path: /var/log/libvirt
+ name: libvirt-log
+ - hostPath:
+ path: /var/run/libvirt
+ name: libvirt-sockets
+ - hostPath:
+ path: /var/log/pods
+ name: pods-log
+ - hostPath:
+ path: /var/run/netns
+ name: netns-dir
+ - configMap:
+ name: virtlet-image-translations
+ name: image-name-translations
+ updateStrategy: {}
+
+---
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRoleBinding
+metadata:
+ creationTimestamp: null
+ name: virtlet
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: virtlet
+subjects:
+- kind: ServiceAccount
+ name: virtlet
+ namespace: kube-system
+
+---
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRole
+metadata:
+ creationTimestamp: null
+ name: virtlet
+ namespace: kube-system
+rules:
+- apiGroups:
+ - ""
+ resources:
+ - configmaps
+ - nodes
+ verbs:
+ - create
+ - get
+
+---
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRole
+metadata:
+ creationTimestamp: null
+ name: configmap-reader
+rules:
+- apiGroups:
+ - ""
+ resources:
+ - configmaps
+ verbs:
+ - get
+ - list
+ - watch
+
+---
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRole
+metadata:
+ creationTimestamp: null
+ name: virtlet-userdata-reader
+rules:
+- apiGroups:
+ - ""
+ resources:
+ - configmaps
+ - secrets
+ verbs:
+ - get
+
+---
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRoleBinding
+metadata:
+ creationTimestamp: null
+ name: kubelet-node-binding
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: configmap-reader
+subjects:
+- apiGroup: rbac.authorization.k8s.io
+ kind: Group
+ name: system:nodes
+
+---
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRoleBinding
+metadata:
+ creationTimestamp: null
+ name: vm-userdata-binding
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: virtlet-userdata-reader
+subjects:
+- kind: ServiceAccount
+ name: virtlet
+ namespace: kube-system
+
+---
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRole
+metadata:
+ creationTimestamp: null
+ name: virtlet-crd
+rules:
+- apiGroups:
+ - apiextensions.k8s.io
+ resources:
+ - customresourcedefinitions
+ verbs:
+ - create
+- apiGroups:
+ - virtlet.k8s
+ resources:
+ - virtletimagemappings
+ - virtletconfigmappings
+ verbs:
+ - list
+ - get
+
+---
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRoleBinding
+metadata:
+ creationTimestamp: null
+ name: virtlet-crd
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: virtlet-crd
+subjects:
+- kind: ServiceAccount
+ name: virtlet
+ namespace: kube-system
+
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ creationTimestamp: null
+ name: virtlet
+ namespace: kube-system
+
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ creationTimestamp: null
+ labels:
+ virtlet.cloud: ""
+ name: virtletimagemappings.virtlet.k8s
+spec:
+ group: virtlet.k8s
+ names:
+ kind: VirtletImageMapping
+ plural: virtletimagemappings
+ shortNames:
+ - vim
+ singular: virtletimagemapping
+ scope: Namespaced
+ version: v1
+
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ creationTimestamp: null
+ labels:
+ virtlet.cloud: ""
+ name: virtletconfigmappings.virtlet.k8s
+spec:
+ group: virtlet.k8s
+ names:
+ kind: VirtletConfigMapping
+ plural: virtletconfigmappings
+ shortNames:
+ - vcm
+ singular: virtletconfigmapping
+ scope: Namespaced
+ validation:
+ openAPIV3Schema:
+ properties:
+ spec:
+ properties:
+ config:
+ properties:
+ calicoSubnetSize:
+ maximum: 32
+ minimum: 0
+ type: integer
+ cniConfigDir:
+ type: string
+ cniPluginDir:
+ type: string
+ cpuModel:
+ type: string
+ criSocketPath:
+ type: string
+ databasePath:
+ type: string
+ disableKVM:
+ type: boolean
+ disableLogging:
+ type: boolean
+ downloadProtocol:
+ pattern: ^https?$
+ type: string
+ enableRegexpImageTranslation:
+ type: boolean
+ enableSriov:
+ type: boolean
+ fdServerSocketPath:
+ type: string
+ imageDir:
+ type: string
+ imageTranslationConfigsDir:
+ type: string
+ kubeletRootDir:
+ type: string
+ libvirtURI:
+ type: string
+ logLevel:
+ maximum: 2147483647
+ minimum: 0
+ type: integer
+ rawDevices:
+ type: string
+ skipImageTranslation:
+ type: boolean
+ streamPort:
+ maximum: 65535
+ minimum: 1
+ type: integer
+ nodeName:
+ type: string
+ nodeSelector:
+ type: object
+ priority:
+ type: integer
+ version: v1
+
diff --git a/src/vagrant/kubeadm/virtlet/virtlet.sh b/src/vagrant/kubeadm/virtlet/virtlet.sh
new file mode 100755
index 0000000..4ed527e
--- /dev/null
+++ b/src/vagrant/kubeadm/virtlet/virtlet.sh
@@ -0,0 +1,21 @@
+#!/bin/bash
+
+set -ex
+
+kubectl label node worker1 extraRuntime=virtlet
+kubectl label node worker2 extraRuntime=virtlet
+kubectl create configmap -n kube-system virtlet-config --from-literal=download_protocol=http --from-literal=image_regexp_translation=1 --from-literal=disable_kvm=y
+kubectl create configmap -n kube-system virtlet-image-translations --from-file /vagrant/virtlet/images.yaml
+kubectl create -f /vagrant/virtlet/virtlet-ds.yaml
+
+kubectl delete pod --all
+kubectl create -f /vagrant/virtlet/cirros-vm.yaml
+r="0"
+while [ $r -ne "1" ]
+do
+ r=$(kubectl get pods cirros-vm | grep Running | wc -l)
+ sleep 60
+done
+sleep 360
+kubectl get pods cirros-vm -o custom-columns=:.status.podIP | xargs ping -c 4
+echo 'login by user:cirros & password:gocubsgo'
diff --git a/src/vagrant/kubeadm/virtlet/virtlet_setup.sh b/src/vagrant/kubeadm/virtlet/virtlet_setup.sh
new file mode 100644
index 0000000..b2dfaa0
--- /dev/null
+++ b/src/vagrant/kubeadm/virtlet/virtlet_setup.sh
@@ -0,0 +1,10 @@
+#!/bin/bash
+
+set -ex
+
+wget https://github.com/Mirantis/criproxy/releases/download/v0.14.0/criproxy_0.14.0_amd64.deb
+echo "criproxy criproxy/primary_cri select containerd" | sudo debconf-set-selections
+sudo dpkg -i criproxy_0.14.0_amd64.deb
+sudo sed -i "s/EnvironmentFile/#EnvironmentFile/" /etc/systemd/system/kubelet.service.d/10-kubeadm.conf
+sudo systemctl daemon-reload
+sudo systemctl restart kubelet
diff --git a/src/vagrant/kubeadm/worker_setup.sh b/src/vagrant/kubeadm/worker_setup.sh
new file mode 100644
index 0000000..6b08712
--- /dev/null
+++ b/src/vagrant/kubeadm/worker_setup.sh
@@ -0,0 +1,8 @@
+#!/bin/bash
+
+set -ex
+
+bash /vagrant/kata/kata_setup.sh
+bash /vagrant/virtlet/virtlet_setup.sh
+sleep 120
+sudo kubeadm join --discovery-token-unsafe-skip-ca-verification --token 8c5adc.1cec8dbf339093f0 192.168.1.10:6443
diff --git a/src/vagrant/kubeadm_multus/Vagrantfile b/src/vagrant/kubeadm_app/Vagrantfile
index 9320074..3ed02d5 100644
--- a/src/vagrant/kubeadm_multus/Vagrantfile
+++ b/src/vagrant/kubeadm_app/Vagrantfile
@@ -5,7 +5,7 @@ Vagrant.configure("2") do |config|
config.vm.box = "ceph/ubuntu-xenial"
config.vm.provider :libvirt do |libvirt|
- libvirt.memory = 4096
+ libvirt.memory = 8192
libvirt.cpus = 4
end
diff --git a/src/vagrant/kubeadm_app/app_setup.sh b/src/vagrant/kubeadm_app/app_setup.sh
new file mode 100755
index 0000000..a67a54f
--- /dev/null
+++ b/src/vagrant/kubeadm_app/app_setup.sh
@@ -0,0 +1,65 @@
+#!/bin/bash
+#
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+set -ex
+
+static_ip=$(ifconfig eth0 | grep "inet addr" | cut -d ':' -f 2 | cut -d ' ' -f 1)
+echo "STATIC_IP is $static_ip."
+
+git clone --recursive https://github.com/Metaswitch/clearwater-docker.git
+
+# Set the configmaps
+kubectl create configmap env-vars --from-literal=ZONE=default.svc.cluster.local
+
+# Generate the yamls
+cd clearwater-docker/kubernetes/
+./k8s-gencfg --image_path=enriquetaso --image_tag=latest
+
+# Expose Ellis
+# The Ellis provisioning interface can then be accessed on static_ip:30080
+cat ellis-svc.yaml | sed "s/clusterIP: None/type: NodePort/" > ellis-svc.yaml.new
+cat ellis-svc.yaml.new | sed "s/port: 80/port: 80\n nodePort: 30080/" > ellis-svc.yaml
+rm ellis-svc.yaml.new
+
+# Bono configuration
+# Have a static external IP address available that the load balancer can use
+cp /vagrant/custom-bono-svc/bono-svc.yaml .
+sed -ie "6s/$/\n - $static_ip/" bono-svc.yaml
+sed -ie "7s/$/\n loadBalancerIP: $static_ip/" bono-svc.yaml
+
+cd
+kubectl apply -f clearwater-docker/kubernetes
+kubectl get nodes
+kubectl get services
+kubectl get pods
+kubectl get rc
+sleep 60
+
+r="1"
+while [ $r != "0" ]
+do
+ kubectl get pods
+ r=$( kubectl get pods | grep Pending | wc -l)
+ sleep 60
+done
+
+q="1"
+while [ $q != "0" ]
+do
+ kubectl get pods
+ q=$( kubectl get pods | grep ContainerCreating | wc -l)
+ sleep 60
+done
diff --git a/src/vagrant/kubeadm_app/create_images.sh b/src/vagrant/kubeadm_app/create_images.sh
new file mode 100755
index 0000000..12b28a3
--- /dev/null
+++ b/src/vagrant/kubeadm_app/create_images.sh
@@ -0,0 +1,10 @@
+#!/bin/bash
+
+# Build images
+git clone --recursive https://github.com/Metaswitch/clearwater-docker.git
+cd clearwater-docker
+for i in base astaire cassandra chronos bono ellis homer homestead homestead-prov ralf sprout
+do
+ docker build -t clearwater/$i $i
+done
+
diff --git a/src/vagrant/kubeadm_app/custom-bono-svc/bono-svc.yaml b/src/vagrant/kubeadm_app/custom-bono-svc/bono-svc.yaml
new file mode 100644
index 0000000..9280b0f
--- /dev/null
+++ b/src/vagrant/kubeadm_app/custom-bono-svc/bono-svc.yaml
@@ -0,0 +1,25 @@
+apiVersion: v1
+kind: Service
+metadata:
+ name: bono
+spec:
+ externalIPs:
+ ports:
+ - name: "3478"
+ port: 3478
+ protocol: TCP
+ targetPort: 3478
+ - name: "5060"
+ port: 5060
+ protocol: TCP
+ targetPort: 5060
+ - name: "5062"
+ port: 5062
+ protocol: TCP
+ targetPort: 5062
+ selector:
+ service: bono
+ sessionAffinity: None
+ type: ClusterIP
+status:
+ loadBalancer: {}
diff --git a/src/vagrant/kubeadm_app/custom-bono-svc/deployment-svc.yaml b/src/vagrant/kubeadm_app/custom-bono-svc/deployment-svc.yaml
new file mode 100644
index 0000000..cde909b
--- /dev/null
+++ b/src/vagrant/kubeadm_app/custom-bono-svc/deployment-svc.yaml
@@ -0,0 +1,82 @@
+apiVersion: extensions/v1beta1
+kind: Deployment
+metadata:
+ name: busybox
+spec:
+ strategy:
+ rollingUpdate:
+ maxSurge: 10%
+ maxUnavailable: 0
+ selector:
+ matchLabels:
+ app: busybox
+ replicas: 3
+ template:
+ metadata:
+ labels:
+ app: busybox
+ annotations:
+ networks: '[
+ { "name": "calico"},
+ { "name": "weave"}
+ ]'
+ spec:
+ containers:
+ - name: busybox
+ image: bcmt-registry:5000/busybox:latest
+ command: ["top"]
+ stdin: true
+ tty: true
+ dnsPolicy: Default
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ labels:
+ k8s-app: nginx
+ name: nginx
+ namespace: nginx
+
+---
+
+kind: Deployment
+apiVersion: apps/v1beta2
+metadata:
+ labels:
+ k8s-app: nginx
+ name: nginx
+ namespace: nginx
+spec:
+ replicas: 1
+ revisionHistoryLimit: 10
+ selector:
+ matchLabels:
+ k8s-app: nginx
+ template:
+ metadata:
+ labels:
+ k8s-app: nginx
+ spec:
+ containers:
+ - name: nginx
+ image: nginx:2
+ ports:
+ - containerPort: 80
+ protocol: TCP
+ args:
+---
+# ------------------- Dashboard Service ------------------- #
+
+kind: Service
+apiVersion: v1
+metadata:
+ labels:
+ k8s-app: nginx
+ name: nginx
+ namespace: nginx
+spec:
+ type: NodePort
+ ports:
+ - port: 80
+ nodePort: 31001
+ selector:
+ k8s-app: nginx
diff --git a/src/vagrant/kubeadm_app/deploy.sh b/src/vagrant/kubeadm_app/deploy.sh
new file mode 100755
index 0000000..54644a3
--- /dev/null
+++ b/src/vagrant/kubeadm_app/deploy.sh
@@ -0,0 +1,12 @@
+#!/bin/bash
+
+set -ex
+DIR="$(dirname `readlink -f $0`)"
+
+cd $DIR
+../cleanup.sh
+vagrant up
+vagrant ssh master -c "/vagrant/clearwater_setup.sh"
+
+# Run tests
+vagrant ssh master -c "/vagrant/tests/clearwater-live-test.sh"
diff --git a/src/vagrant/kubeadm_istio/host_setup.sh b/src/vagrant/kubeadm_app/host_setup.sh
index c1a23eb..524a967 100644
--- a/src/vagrant/kubeadm_istio/host_setup.sh
+++ b/src/vagrant/kubeadm_app/host_setup.sh
@@ -21,7 +21,7 @@ cat <<EOF | sudo tee /etc/apt/sources.list.d/kubernetes.list
deb http://apt.kubernetes.io/ kubernetes-xenial main
EOF
sudo apt-get update
-sudo apt-get install -y --allow-downgrades docker-engine=1.12.6-0~ubuntu-xenial kubelet=1.9.1-00 kubeadm=1.9.1-00 kubectl=1.9.1-00 kubernetes-cni=0.6.0-00
+sudo apt-get install -y --allow-unauthenticated --allow-downgrades docker-engine=1.12.6-0~ubuntu-xenial kubelet=1.9.1-00 kubeadm=1.9.1-00 kubectl=1.9.1-00 kubernetes-cni=0.6.0-00
sudo swapoff -a
sudo systemctl daemon-reload
diff --git a/src/vagrant/kubeadm_app/master_setup.sh b/src/vagrant/kubeadm_app/master_setup.sh
new file mode 100644
index 0000000..b181582
--- /dev/null
+++ b/src/vagrant/kubeadm_app/master_setup.sh
@@ -0,0 +1,10 @@
+#!/bin/bash
+
+set -ex
+
+sudo kubeadm init --apiserver-advertise-address=192.168.1.10 --service-cidr=10.96.0.0/16 --pod-network-cidr=10.32.0.0/12 --token 8c5adc.1cec8dbf339093f0
+mkdir ~/.kube
+sudo cp /etc/kubernetes/admin.conf $HOME/.kube/config
+sudo chown $(id -u):$(id -g) $HOME/.kube/config
+
+kubectl apply -f http://git.io/weave-kube-1.6
diff --git a/src/vagrant/kubeadm_app/setup_vagrant.sh b/src/vagrant/kubeadm_app/setup_vagrant.sh
new file mode 100755
index 0000000..23fdcd2
--- /dev/null
+++ b/src/vagrant/kubeadm_app/setup_vagrant.sh
@@ -0,0 +1,97 @@
+#!/bin/bash
+#
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+set -ex
+
+DIR="$(dirname `readlink -f $0`)"
+
+usage() {
+ echo "Usage: $0 -b virtualbox|libvirt"
+}
+
+install_packages()
+{
+ cat << EOF | sudo tee /etc/sudoers.d/${USER}
+${USER} ALL = (root) NOPASSWD:ALL
+EOF
+ sudo apt-get update -y
+ sudo apt-get install -y git unzip
+ wget https://releases.hashicorp.com/vagrant/2.0.2/vagrant_2.0.2_x86_64.deb
+ sudo dpkg -i vagrant_2.0.2_x86_64.deb
+ rm -rf vagrant_2.0.2_x86_64.deb
+
+ sudo apt-get install -y virtualbox
+
+ #refer to https://github.com/vagrant-libvirt/vagrant-libvirt
+ sudo sed -i 's/^# deb-src/deb-src/g' /etc/apt/sources.list
+ sudo apt-get update
+ sudo apt-get build-dep vagrant ruby-libvirt -y
+ sudo apt-get install -y bridge-utils qemu libvirt-bin ebtables dnsmasq
+ sudo apt-get install -y libffi-dev libxslt-dev libxml2-dev libvirt-dev zlib1g-dev ruby-dev
+ vagrant plugin install vagrant-libvirt
+ sudo adduser ${USER} libvirtd
+ sudo service libvirtd restart
+}
+
+install_box_builder()
+{
+ # Thanks Bento's great effort
+ # Bento project(https://github.com/chef/bento) is released by Apache 2.0 License
+ cd $DIR
+ rm -rf bento
+ git clone https://github.com/chef/bento
+ cd bento; git checkout 05d98910d835b503e7be3d2e4071956f66fbbbc4
+ cp ../update.sh ubuntu/scripts/
+ wget https://releases.hashicorp.com/packer/1.1.2/packer_1.1.2_linux_amd64.zip
+ unzip packer_1.1.2_linux_amd64.zip
+ cd ubuntu
+ sed -i 's/"disk_size": "40960"/"disk_size": "409600"/' ubuntu-16.04-amd64.json
+}
+
+build_virtualbox() {
+ cd $DIR/bento/ubuntu
+ rm -rf ~/'VirtualBox VMs'/ubuntu-16.04-amd64
+ ../packer build -var 'headless=true' -only=virtualbox-iso ubuntu-16.04-amd64.json
+ vagrant box remove -f opnfv/container4nfv --all || true
+ vagrant box add opnfv/container4nfv ../builds/ubuntu-16.04.virtualbox.box
+}
+
+build_libvirtbox() {
+ cd $DIR/bento/ubuntu
+ ../packer build -var 'headless=true' -only=qemu ubuntu-16.04-amd64.json
+ vagrant box remove -f opnfv/container4nfv.kvm --all || true
+ vagrant box add opnfv/container4nfv.kvm ../builds/ubuntu-16.04.libvirt.box
+}
+
+install_packages
+
+set +x
+while getopts "b:h" OPTION; do
+ case $OPTION in
+ b)
+ if [ ${OPTARG} == "virtualbox" ]; then
+ install_box_builder
+ build_virtualbox
+ elif [ ${OPTARG} == "libvirt" ]; then
+ install_box_builder
+ build_libvirtbox
+ fi
+ ;;
+ h)
+ usage;
+ ;;
+ esac
+done
diff --git a/src/vagrant/kubeadm_app/tests/clearwater-live-test.sh b/src/vagrant/kubeadm_app/tests/clearwater-live-test.sh
new file mode 100755
index 0000000..6e5238e
--- /dev/null
+++ b/src/vagrant/kubeadm_app/tests/clearwater-live-test.sh
@@ -0,0 +1,46 @@
+#!/bin/bash
+#
+# Copyright (c) 2017 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+set -ex
+
+# http://clearwater.readthedocs.io/en/latest/Running_the_live_tests.html
+sudo apt-get install build-essential bundler git --yes
+sudo apt install gnupg2 --yes
+gpg2 --recv-keys 409B6B1796C275462A1703113804BB82D39DC0E3
+curl -L https://get.rvm.io | bash -s stable
+
+source ~/.rvm/scripts/rvm
+rvm autolibs enable
+rvm install 1.9.3
+rvm use 1.9.3
+
+
+# Setup ruby and gems
+git clone https://github.com/Metaswitch/clearwater-live-test.git
+cd clearwater-live-test/
+cd quaff/ && git clone https://github.com/Metaswitch/quaff.git
+cd ..
+bundle install
+
+# Get Ellis ip
+ellisip=$(kubectl get services ellis -o json | grep clusterIP | cut -f4 -d'"')
+
+# Get Ellis ip
+bonoip=$(kubectl get services bono -o json | grep clusterIP | cut -f4 -d'"')
+
+# Run the tests
+rake test[default.svc.cluster.local] SIGNUP_CODE=secret PROXY=$bonoip ELLIS=$ellisip
diff --git a/src/vagrant/kubeadm_istio/worker_setup.sh b/src/vagrant/kubeadm_app/worker_setup.sh
index 74e4178..74e4178 100644
--- a/src/vagrant/kubeadm_istio/worker_setup.sh
+++ b/src/vagrant/kubeadm_app/worker_setup.sh
diff --git a/src/vagrant/kubeadm_basic/Vagrantfile b/src/vagrant/kubeadm_basic/Vagrantfile
index 9320074..54b6b59 100644
--- a/src/vagrant/kubeadm_basic/Vagrantfile
+++ b/src/vagrant/kubeadm_basic/Vagrantfile
@@ -3,13 +3,13 @@ $num_workers=2
Vagrant.require_version ">= 1.8.6"
Vagrant.configure("2") do |config|
- config.vm.box = "ceph/ubuntu-xenial"
+ config.vm.box = "generic/ubuntu1804"
config.vm.provider :libvirt do |libvirt|
libvirt.memory = 4096
libvirt.cpus = 4
end
- config.vm.synced_folder "../..", "/src"
+ config.vm.synced_folder ".", "/vagrant"
config.vm.provision "shell", path: "host_setup.sh", privileged: false
config.vm.define "master" do |config|
diff --git a/src/vagrant/kubeadm_basic/host_setup.sh b/src/vagrant/kubeadm_basic/host_setup.sh
index c1a23eb..2094628 100644
--- a/src/vagrant/kubeadm_basic/host_setup.sh
+++ b/src/vagrant/kubeadm_basic/host_setup.sh
@@ -2,6 +2,11 @@
set -ex
+sudo systemctl stop systemd-resolved
+cat << EOF | sudo tee /etc/resolv.conf
+nameserver 8.8.8.8
+EOF
+
cat << EOF | sudo tee /etc/hosts
127.0.0.1 localhost
192.168.1.10 master
@@ -10,19 +15,21 @@ cat << EOF | sudo tee /etc/hosts
192.168.1.23 worker3
EOF
-sudo apt-key adv --keyserver hkp://ha.pool.sks-keyservers.net:80 --recv-keys 58118E89F3A912897C070ADBF76221572C52609D
-sudo apt-key adv -k 58118E89F3A912897C070ADBF76221572C52609D
-cat << EOF | sudo tee /etc/apt/sources.list.d/docker.list
-deb [arch=amd64] https://apt.dockerproject.org/repo ubuntu-xenial main
-EOF
+sudo apt-get update
+sudo apt-get install -y docker.io
curl -s http://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -
cat <<EOF | sudo tee /etc/apt/sources.list.d/kubernetes.list
deb http://apt.kubernetes.io/ kubernetes-xenial main
EOF
sudo apt-get update
-sudo apt-get install -y --allow-downgrades docker-engine=1.12.6-0~ubuntu-xenial kubelet=1.9.1-00 kubeadm=1.9.1-00 kubectl=1.9.1-00 kubernetes-cni=0.6.0-00
+sudo apt-get install -y --allow-unauthenticated kubelet=1.15.2-00 kubeadm=1.15.2-00 kubectl=1.15.2-00 kubernetes-cni=0.7.5-00
+sudo sed -i '9i\Environment="KUBELET_EXTRA_ARGS=--feature-gates=DevicePlugins=true"' /etc/systemd/system/kubelet.service.d/10-kubeadm.conf
+sudo modprobe ip_vs
+sudo modprobe ip_vs_rr
+sudo modprobe ip_vs_wrr
+sudo modprobe ip_vs_sh
sudo swapoff -a
sudo systemctl daemon-reload
sudo systemctl stop kubelet
diff --git a/src/vagrant/kubeadm_basic/worker_setup.sh b/src/vagrant/kubeadm_basic/worker_setup.sh
index 74e4178..42477e6 100644
--- a/src/vagrant/kubeadm_basic/worker_setup.sh
+++ b/src/vagrant/kubeadm_basic/worker_setup.sh
@@ -1,4 +1,5 @@
#!/bin/bash
set -ex
+sleep 120
sudo kubeadm join --discovery-token-unsafe-skip-ca-verification --token 8c5adc.1cec8dbf339093f0 192.168.1.10:6443 || true
diff --git a/src/vagrant/kubeadm_clearwater/host_setup.sh b/src/vagrant/kubeadm_clearwater/host_setup.sh
index c1a23eb..524a967 100644
--- a/src/vagrant/kubeadm_clearwater/host_setup.sh
+++ b/src/vagrant/kubeadm_clearwater/host_setup.sh
@@ -21,7 +21,7 @@ cat <<EOF | sudo tee /etc/apt/sources.list.d/kubernetes.list
deb http://apt.kubernetes.io/ kubernetes-xenial main
EOF
sudo apt-get update
-sudo apt-get install -y --allow-downgrades docker-engine=1.12.6-0~ubuntu-xenial kubelet=1.9.1-00 kubeadm=1.9.1-00 kubectl=1.9.1-00 kubernetes-cni=0.6.0-00
+sudo apt-get install -y --allow-unauthenticated --allow-downgrades docker-engine=1.12.6-0~ubuntu-xenial kubelet=1.9.1-00 kubeadm=1.9.1-00 kubectl=1.9.1-00 kubernetes-cni=0.6.0-00
sudo swapoff -a
sudo systemctl daemon-reload
diff --git a/src/vagrant/kubeadm_istio/deploy.sh b/src/vagrant/kubeadm_istio/deploy.sh
deleted file mode 100755
index d947645..0000000
--- a/src/vagrant/kubeadm_istio/deploy.sh
+++ /dev/null
@@ -1,12 +0,0 @@
-#!/bin/bash
-
-set -ex
-DIR="$(dirname `readlink -f $0`)"
-
-cd $DIR
-../cleanup.sh
-vagrant up
-vagrant ssh master -c "/vagrant/istio/deploy.sh"
-vagrant ssh master -c "/vagrant/istio/bookinfo.sh"
-vagrant ssh master -c "/vagrant/istio/clean_bookinfo.sh"
-
diff --git a/src/vagrant/kubeadm_istio/master_setup.sh b/src/vagrant/kubeadm_istio/master_setup.sh
deleted file mode 100644
index f308244..0000000
--- a/src/vagrant/kubeadm_istio/master_setup.sh
+++ /dev/null
@@ -1,33 +0,0 @@
-#!/bin/bash
-
-set -ex
-
-ADMISSION_CONTROL="Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,NodeRestriction,ResourceQuota"
-KUBE_APISERVER_CONF="/etc/kubernetes/manifests/kube-apiserver.yaml"
-
-sudo kubeadm init --apiserver-advertise-address=192.168.1.10 --service-cidr=10.96.0.0/16 --pod-network-cidr=10.32.0.0/12 --token 8c5adc.1cec8dbf339093f0
-mkdir ~/.kube
-sudo cp /etc/kubernetes/admin.conf $HOME/.kube/config
-sudo chown $(id -u):$(id -g) $HOME/.kube/config
-
-kubectl apply -f http://git.io/weave-kube-1.6
-
-# Enable mutating webhook admission controller
-# kube-apiserver will be automatically restarted by kubelet when its manifest file update.
-# https://istio.io/docs/setup/kubernetes/sidecar-injection.html
-sudo sed -i "s/admission-control=.*/admission-control=$ADMISSION_CONTROL/g" $KUBE_APISERVER_CONF
-
-set +e
-# wait for kube-apiserver restart
-r="1"
-while [ $r -ne "0" ]
-do
- sleep 2
- kubectl version > /dev/null
- r=$?
-done
-set -e
-
-# check if admissionregistration.k8s.io/v1beta1 API is enabled
-kubectl api-versions | grep admissionregistration
-
diff --git a/src/vagrant/kubeadm_kata/examples/nginx-app.sh b/src/vagrant/kubeadm_kata/examples/nginx-app.sh
index 96d776c..a66b7ca 100755
--- a/src/vagrant/kubeadm_kata/examples/nginx-app.sh
+++ b/src/vagrant/kubeadm_kata/examples/nginx-app.sh
@@ -20,6 +20,11 @@ kubectl get nodes
kubectl get services
kubectl get pods
kubectl get rc
-sleep 180
+r=0
+while [ "$r" -eq "0" ]
+do
+ sleep 30
+ r=$(kubectl get pods | grep Running | wc -l)
+done
svcip=$(kubectl get services nginx -o json | grep clusterIP | cut -f4 -d'"')
wget http://$svcip
diff --git a/src/vagrant/kubeadm_kata/examples/nginx-app.yaml b/src/vagrant/kubeadm_kata/examples/nginx-app.yaml
index f80881a..9de4ef4 100644
--- a/src/vagrant/kubeadm_kata/examples/nginx-app.yaml
+++ b/src/vagrant/kubeadm_kata/examples/nginx-app.yaml
@@ -23,6 +23,8 @@ spec:
metadata:
labels:
app: nginx
+ annotations:
+ io.kubernetes.cri.untrusted-workload: "true"
spec:
containers:
- name: nginx
diff --git a/src/vagrant/kubeadm_kata/host_setup.sh b/src/vagrant/kubeadm_kata/host_setup.sh
index d2af951..02bb296 100644
--- a/src/vagrant/kubeadm_kata/host_setup.sh
+++ b/src/vagrant/kubeadm_kata/host_setup.sh
@@ -30,10 +30,37 @@ cat <<EOF | sudo tee /etc/apt/sources.list.d/kubernetes.list
deb http://apt.kubernetes.io/ kubernetes-xenial main
EOF
sudo apt-get update
-sudo apt-get install -y kubelet kubeadm kubectl kubernetes-cni
+sudo apt-get install -y --allow-unauthenticated kubelet=1.10.5-00 kubeadm=1.10.5-00 kubectl=1.10.5-00 kubernetes-cni=0.6.0-00
+
sudo swapoff -a
sudo systemctl stop kubelet
sudo rm -rf /var/lib/kubelet
sudo systemctl daemon-reload
sudo systemctl start kubelet
+
+
+sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 5EDB1B62EC4926EA
+sudo apt-get update -y
+sudo apt-get install software-properties-common -y
+sudo apt-add-repository cloud-archive:queens -y
+sudo apt-get update -y
+
+#sudo apt-get build-dep dkms -y
+sudo apt-get install python-six openssl python-pip -y
+sudo -H pip install --upgrade pip
+sudo -H pip install ovs
+#sudo apt-get install openvswitch-datapath-dkms -y
+sudo apt-get install openvswitch-switch openvswitch-common -y
+sudo apt-get install ovn-central ovn-common ovn-host -y
+sudo modprobe vport-geneve
+
+wget https://storage.googleapis.com/golang/go1.8.3.linux-amd64.tar.gz
+sudo tar -xvf go1.8.3.linux-amd64.tar.gz -C /usr/local/
+mkdir -p $HOME/go/src
+export GOPATH=$HOME/go
+export PATH=$PATH:/usr/local/go/bin:$GOPATH/bin
+git clone https://github.com/openvswitch/ovn-kubernetes -b v0.3.0
+cd ovn-kubernetes/go-controller
+make
+sudo make install
diff --git a/src/vagrant/kubeadm_kata/kata_setup.sh b/src/vagrant/kubeadm_kata/kata_setup.sh
index c14d844..18c4cd1 100644
--- a/src/vagrant/kubeadm_kata/kata_setup.sh
+++ b/src/vagrant/kubeadm_kata/kata_setup.sh
@@ -17,27 +17,27 @@
set -ex
-cat << EOF | sudo tee /etc/apt/sources.list.d/cc-oci-runtime.list
-deb http://download.opensuse.org/repositories/home:/clearcontainers:/clear-containers-3/xUbuntu_16.04/ /
-EOF
-curl -fsSL http://download.opensuse.org/repositories/home:/clearcontainers:/clear-containers-3/xUbuntu_16.04/Release.key | sudo apt-key add -
-sudo apt-get update
-sudo apt-get install -y cc-oci-runtime
+sudo sh -c "echo 'deb http://download.opensuse.org/repositories/home:/katacontainers:/releases:/x86_64:/master/xUbuntu_16.04/ /' > /etc/apt/sources.list.d/kata-containers.list"
+curl -sL http://download.opensuse.org/repositories/home:/katacontainers:/release/xUbuntu_$(lsb_release -rs)/Release.key | sudo apt-key add -
+sudo -E apt-get update
+sudo -E apt-get -y install kata-runtime kata-proxy kata-shim
+sudo -E apt-get -y install libseccomp2
-echo | sudo add-apt-repository ppa:projectatomic/ppa
-sudo apt-get update
-sudo apt-get install -y cri-o
-sudo sed -i 's,runtime_untrusted_workload.*,runtime_untrusted_workload = "/usr/bin/cc-runtime",' /etc/crio/crio.conf
-sudo sed -i 's,cgroup_manager.*,cgroup_manager = "cgroupfs",' /etc/crio/crio.conf
-sudo sed -i 's,default_workload_trust.*,default_workload_trust = "untrusted",' /etc/crio/crio.conf
-sudo sed -i 's,^registries.*,registries = [ "docker.io",' /etc/crio/crio.conf
-sudo systemctl enable crio
-sudo systemctl daemon-reload
-sudo systemctl restart crio
+wget http://storage.googleapis.com/cri-containerd-release/cri-containerd-1.1.0.linux-amd64.tar.gz >& /dev/null
+sudo tar -C / -xzf cri-containerd-1.1.0.linux-amd64.tar.gz
+sudo systemctl start containerd
+sudo mkdir -p /opt/cni/bin
+sudo mkdir -p /etc/cni/net.d
+sudo mkdir -p /etc/containerd
+containerd config default | sudo tee /etc/containerd/config.toml
+sudo sed -i "/.*untrusted_workload_runtime.*/,+5s/runtime_type.*/runtime_type=\"io.containerd.runtime.v1.linux\"/" /etc/containerd/config.toml
+sudo sed -i "/.*untrusted_workload_runtime.*/,+5s/runtime_engine.*/runtime_engine=\"kata-runtime\"/" /etc/containerd/config.toml
+sudo systemctl restart containerd
+
+cat << EOF | sudo tee /etc/systemd/system/kubelet.service.d/0-containerd.conf
+[Service]
+Environment="KUBELET_EXTRA_ARGS=--container-runtime=remote --runtime-request-timeout=15m --container-runtime-endpoint=unix:///run/containerd/containerd.sock"
+EOF
-sudo systemctl stop kubelet
-echo "Modify kubelet systemd configuration to use CRI-O"
-k8s_systemd_file="/etc/systemd/system/kubelet.service.d/10-kubeadm.conf"
-sudo sed -i '/KUBELET_AUTHZ_ARGS/a Environment="KUBELET_EXTRA_ARGS=--container-runtime=remote --container-runtime-endpoint=/var/run/crio/crio.sock --runtime-request-timeout=30m"' "$k8s_systemd_file"
sudo systemctl daemon-reload
-sudo systemctl start kubelet
+sudo systemctl restart kubelet
diff --git a/src/vagrant/kubeadm_kata/master_setup.sh b/src/vagrant/kubeadm_kata/master_setup.sh
index 41dadf0..42b3aee 100644
--- a/src/vagrant/kubeadm_kata/master_setup.sh
+++ b/src/vagrant/kubeadm_kata/master_setup.sh
@@ -22,13 +22,6 @@ mkdir ~/.kube
sudo cp /etc/kubernetes/admin.conf .kube/config
sudo chown $(id -u):$(id -g) ~/.kube/config
-kubectl apply -f http://git.io/weave-kube-1.6
+nohup /usr/bin/kubectl proxy --address=0.0.0.0 --accept-hosts=.* --port=8080 & sleep 1
-r=1
-while [ "$r" -ne "0" ]
-do
- sleep 30
- r=$(kubectl get pods -n kube-system | grep weave-net | grep -v Run | wc -l)
-done
-
-sudo systemctl restart crio
+sudo ovnkube -k8s-kubeconfig /home/vagrant/.kube/config -net-controller -loglevel=4 -k8s-apiserver=http://192.168.1.10:8080 -logfile=/var/log/openvswitch/ovnkube.log -init-master=master -cluster-subnet=10.32.0.0/12 -service-cluster-ip-range=10.96.0.0/16 -nodeport -nb-address=tcp://192.168.1.10:6631 -sb-address=tcp://192.168.1.10:6632 &
diff --git a/src/vagrant/kubeadm_kata/worker_setup.sh b/src/vagrant/kubeadm_kata/worker_setup.sh
index 6145793..63d42a5 100644
--- a/src/vagrant/kubeadm_kata/worker_setup.sh
+++ b/src/vagrant/kubeadm_kata/worker_setup.sh
@@ -18,16 +18,23 @@
set -ex
sudo kubeadm join --discovery-token-unsafe-skip-ca-verification \
--token 8c5adc.1cec8dbf339093f0 192.168.1.10:6443 \
- --ignore-preflight-errors=SystemVerification,FileContent--proc-sys-net-bridge-bridge-nf-call-iptables
+ --ignore-preflight-errors=SystemVerification,CRI,FileContent--proc-sys-net-bridge-bridge-nf-call-iptables
sudo apt-get install -y putty-tools
mkdir ~/.kube
-r=1
-while [ "$r" -ne "0" ]
-do
- sleep 30
- echo "y\n" | plink -ssh -pw vagrant vagrant@master "cat ~/.kube/config" > ~/.kube/config || true
- r=$(kubectl get pods -n kube-system | grep weave-net | grep -v Run | wc -l)
-done
+echo "y\n" | plink -ssh -pw vagrant vagrant@master "cat ~/.kube/config" > ~/.kube/config || true
-sudo systemctl restart crio
+CENTRAL_IP=192.168.1.10
+NODE_NAME=$(hostname)
+TOKEN="8c5adc.1cec8dbf339093f0"
+
+sudo ovnkube -k8s-kubeconfig /home/vagrant/.kube/config -loglevel=4 \
+ -logfile="/var/log/openvswitch/ovnkube.log" \
+ -k8s-apiserver="http://$CENTRAL_IP:8080" \
+ -init-node="$NODE_NAME" \
+ -nodeport \
+ -nb-address="tcp://$CENTRAL_IP:6631" \
+ -sb-address="tcp://$CENTRAL_IP:6632" -k8s-token="$TOKEN" \
+ -init-gateways \
+ -service-cluster-ip-range=10.96.0.0/16 \
+ -cluster-subnet=10.32.0.0/12 &
diff --git a/src/vagrant/kubeadm_multus/master_setup.sh b/src/vagrant/kubeadm_multus/master_setup.sh
deleted file mode 100644
index dfc3d05..0000000
--- a/src/vagrant/kubeadm_multus/master_setup.sh
+++ /dev/null
@@ -1,12 +0,0 @@
-#!/bin/bash
-
-set -ex
-
-sudo kubeadm init --apiserver-advertise-address=192.168.1.10 --service-cidr=10.96.0.0/16 --pod-network-cidr=10.32.0.0/12 --token 8c5adc.1cec8dbf339093f0
-
-mkdir ~/.kube
-sudo cp /etc/kubernetes/admin.conf $HOME/.kube/config
-sudo chown $(id -u):$(id -g) $HOME/.kube/config
-
-kubectl apply -f http://git.io/weave-kube-1.6
-kubectl apply -f /src/cni/multus/kube_cni_multus.yml
diff --git a/src/vagrant/kubeadm_onap/Vagrantfile b/src/vagrant/kubeadm_onap/Vagrantfile
index fe24252..699f607 100644
--- a/src/vagrant/kubeadm_onap/Vagrantfile
+++ b/src/vagrant/kubeadm_onap/Vagrantfile
@@ -1,17 +1,17 @@
-$num_workers=1
+$num_workers=4
Vagrant.require_version ">= 1.8.6"
Vagrant.configure("2") do |config|
- config.vm.box = "yk0/ubuntu-xenial"
- config.vm.provision "shell", path: "host_setup.sh", privileged: false
+ config.vm.box = "ceph/ubuntu-xenial"
config.vm.define "master" do |config|
config.vm.hostname = "master"
+ config.vm.provision "shell", path: "host_setup.sh", privileged: false
config.vm.provision "shell", path: "master_setup.sh", privileged: false
config.vm.network :private_network, ip: "192.168.0.10"
config.vm.provider :libvirt do |libvirt|
- libvirt.memory = 4096
+ libvirt.memory = 8192
libvirt.cpus = 4
end
end
@@ -19,23 +19,14 @@ Vagrant.configure("2") do |config|
(1 .. $num_workers).each do |i|
config.vm.define vm_name = "worker%d" % [i] do |config|
config.vm.hostname = vm_name
+ config.vm.provision "shell", path: "host_setup.sh", privileged: false
config.vm.provision "shell", path: "worker_setup.sh", privileged: false
config.vm.network :private_network, ip: "192.168.0.#{i+20}"
config.vm.provider :libvirt do |libvirt|
- libvirt.memory = 81920
- libvirt.cpus = 32
+ libvirt.memory = 40960
+ libvirt.cpus = 16
end
end
end
- config.vm.define "onap" do |config|
- config.vm.hostname = "onap"
- config.vm.provision "shell", path: "onap_setup.sh", privileged: false
- config.vm.network :private_network, ip: "192.168.0.5"
- config.vm.provider :libvirt do |libvirt|
- libvirt.memory = 2048
- libvirt.cpus = 1
- end
- end
-
end
diff --git a/src/vagrant/kubeadm_onap/host_setup.sh b/src/vagrant/kubeadm_onap/host_setup.sh
index 87b0062..9cfd266 100755
--- a/src/vagrant/kubeadm_onap/host_setup.sh
+++ b/src/vagrant/kubeadm_onap/host_setup.sh
@@ -4,13 +4,15 @@ set -ex
cat << EOF | sudo tee /etc/hosts
127.0.0.1 localhost
-192.168.0.5 onap
192.168.0.10 master
192.168.0.21 worker1
192.168.0.22 worker2
192.168.0.23 worker3
+192.168.0.24 worker4
EOF
+sudo ifconfig eth1 mtu 1400
+
sudo apt-key adv --keyserver hkp://ha.pool.sks-keyservers.net:80 --recv-keys 58118E89F3A912897C070ADBF76221572C52609D
sudo apt-key adv -k 58118E89F3A912897C070ADBF76221572C52609D
cat << EOF | sudo tee /etc/apt/sources.list.d/docker.list
@@ -22,18 +24,17 @@ cat <<EOF | sudo tee /etc/apt/sources.list.d/kubernetes.list
deb http://apt.kubernetes.io/ kubernetes-xenial main
EOF
sudo apt-get update
-sudo apt-get install -y --allow-downgrades docker-engine=1.12.6-0~ubuntu-xenial kubelet=1.7.0-00 kubeadm=1.7.0-00 kubectl=1.7.0-00 kubernetes-cni=0.5.1-00
+sudo apt-get install -y --allow-unauthenticated --allow-downgrades docker-engine=1.12.6-0~ubuntu-xenial kubelet=1.9.1-00 kubeadm=1.9.1-00 kubectl=1.9.1-00 kubernetes-cni=0.6.0-00
-sudo systemctl stop docker
cat << EOF | sudo tee /etc/docker/daemon.json
{
- "storage-driver": "overlay"
+ "insecure-registries" : [ "nexus3.onap.org:10001" ]
}
EOF
sudo systemctl daemon-reload
-sudo systemctl start docker
+sudo systemctl restart docker
-sudo systemctl stop kubelet
-sudo rm -rf /var/lib/kubelet
+sudo swapoff -a
sudo systemctl daemon-reload
+sudo systemctl stop kubelet
sudo systemctl start kubelet
diff --git a/src/vagrant/kubeadm_onap/master_setup.sh b/src/vagrant/kubeadm_onap/master_setup.sh
index fa451a2..8840541 100755
--- a/src/vagrant/kubeadm_onap/master_setup.sh
+++ b/src/vagrant/kubeadm_onap/master_setup.sh
@@ -1,13 +1,28 @@
#!/bin/bash
-
set -ex
-sudo kubeadm init --apiserver-advertise-address=192.168.0.10 --service-cidr=10.96.0.0/24 --pod-network-cidr=10.32.0.0/12 --token 8c5adc.1cec8dbf339093f0
+sudo apt-get -y install ntp
+cat << EOF | sudo tee /etc/ntp.conf
+server 127.127.1.0
+fudge 127.127.1.0 stratum 10
+EOF
+sudo service ntp restart
+
+sudo apt install nfs-kernel-server -y
+sudo mkdir /dockerdata-nfs
+sudo chmod 777 /dockerdata-nfs
+cat << EOF | sudo tee /etc/exports
+/dockerdata-nfs *(rw,sync,no_subtree_check,no_root_squash)
+EOF
+sudo systemctl restart nfs-kernel-server.service
+
+sudo kubeadm init --apiserver-advertise-address=192.168.0.10 --service-cidr=10.96.0.0/16 --pod-network-cidr=10.244.0.0/16 --token 8c5adc.1cec8dbf339093f0
mkdir ~/.kube
-sudo cp /etc/kubernetes/admin.conf ~/.kube/config
-sudo chown $(id -u):$(id -g) ~/.kube/config
+sudo cp /etc/kubernetes/admin.conf $HOME/.kube/config
+sudo chown $(id -u):$(id -g) $HOME/.kube/config
+
+wget https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
+sed -i "s/kube-subnet-mgr/kube-subnet-mgr\n - --iface=eth1/" kube-flannel.yml
+kubectl apply -f kube-flannel.yml
-kubectl apply -f http://git.io/weave-kube-1.6
-curl https://raw.githubusercontent.com/kubernetes/helm/master/scripts/get | bash
-helm init
-kubectl create clusterrolebinding --user system:serviceaccount:kube-system:default kube-system-cluster-admin --clusterrole cluster-admin
+/vagrant/onap_setup.sh
diff --git a/src/vagrant/kubeadm_onap/onap_setup.sh b/src/vagrant/kubeadm_onap/onap_setup.sh
index 4dfe1e1..e4edd8f 100755
--- a/src/vagrant/kubeadm_onap/onap_setup.sh
+++ b/src/vagrant/kubeadm_onap/onap_setup.sh
@@ -2,42 +2,19 @@
set -ex
-sudo apt-get install -y putty-tools python-openstackclient
-mkdir ~/.kube
-r=0
-while [ "$r" == "0" ]
-do
- sleep 30
- echo "y\n" | plink -ssh -pw vagrant vagrant@master "cat ~/.kube/config" > ~/.kube/config || true
- r=$(kubectl get pods -n kube-system | grep "tiller-deploy.*Run" | wc -l)
-done
+kubectl create clusterrolebinding --user system:serviceaccount:kube-system:default kube-system-cluster-admin --clusterrole cluster-admin
+wget https://storage.googleapis.com/kubernetes-helm/helm-v2.8.2-linux-amd64.tar.gz
+tar xzvf helm-v2.8.2-linux-amd64.tar.gz
+sudo mv linux-amd64/helm /usr/local/bin/
+helm init
+helm serve &
+helm repo remove stable
+helm repo add local http://127.0.0.1:8879
-curl https://raw.githubusercontent.com/kubernetes/helm/master/scripts/get | bash
-git clone http://gerrit.onap.org/r/oom
-cd oom; git checkout amsterdam
-source /vagrant/openstack/openrc
-cat <<EOF | tee ~/oom/kubernetes/config/onap-parameters.yaml
-OPENSTACK_UBUNTU_14_IMAGE: "ubuntu1404"
-OPENSTACK_PUBLIC_NET_ID: "e8f51956-00dd-4425-af36-045716781ffc"
-OPENSTACK_OAM_NETWORK_ID: "d4769dfb-c9e4-4f72-b3d6-1d18f4ac4ee6"
-OPENSTACK_OAM_SUBNET_ID: "191f7580-acf6-4c2b-8ec0-ba7d99b3bc4e"
-OPENSTACK_OAM_NETWORK_CIDR: "10.0.0.0/16"
-OPENSTACK_USERNAME: "admin"
-OPENSTACK_API_KEY: "adim"
-OPENSTACK_TENANT_NAME: "admin"
-OPENSTACK_TENANT_ID: "47899782ed714295b1151681fdfd51f5"
-OPENSTACK_REGION: "RegionOne"
-OPENSTACK_KEYSTONE_URL: "http://192.168.0.30:5000/v2.0"
-OPENSTACK_FLAVOUR_MEDIUM: "m1.medium"
-OPENSTACK_SERVICE_TENANT_NAME: "service"
-DMAAP_TOPIC: "AUTO"
-DEMO_ARTIFACTS_VERSION: "1.1.0-SNAPSHOT"
-EOF
-cd ~/oom/kubernetes/oneclick && ./deleteAll.bash -n onap || true
-(kubectl delete ns onap; helm del --purge onap-config) || true
-echo "y\n" | plink -ssh -pw vagrant vagrant@worker1 "sudo rm -rf /dockerdata-nfs/onap"
-cd ~/oom/kubernetes/config && ./createConfig.sh -n onap
-while true; do sleep 30; kubectl get pods --all-namespaces | grep onap | wc -l | grep "^0$" && break; done
-source ~/oom/kubernetes/oneclick/setenv.bash
-sed -i "s/aaiServiceClusterIp:.*/aaiServiceClusterIp: 10.96.0.254/" ~/oom/kubernetes/aai/values.yaml
-cd ~/oom/kubernetes/oneclick && ./createAll.bash -n onap
+git clone -b beijing http://gerrit.onap.org/r/oom
+cd oom/kubernetes
+
+sudo apt-get install make -y
+make all
+sleep 300
+helm install local/onap -n dev --namespace onap
diff --git a/src/vagrant/kubeadm_onap/registry_setup.sh b/src/vagrant/kubeadm_onap/registry_setup.sh
new file mode 100644
index 0000000..669268b
--- /dev/null
+++ b/src/vagrant/kubeadm_onap/registry_setup.sh
@@ -0,0 +1,30 @@
+#!/bin/bash
+set -ex
+
+sudo apt-get update -y
+sudo apt install -y jq docker.io
+
+NEXUS_REPO=nexus3.onap.org:10001
+LOCAL_REPO=192.168.0.2:5000
+
+cat << EOF | sudo tee /etc/docker/daemon.json
+{
+ "insecure-registries" : [ "$LOCAL_REPO" ]
+}
+EOF
+sudo systemctl daemon-reload
+sudo systemctl restart docker
+
+sudo docker run -d -p 5000:5000 --restart=always --name registry registry:2
+
+dockers=$(curl -X GET https://$NEXUS_REPO/v2/_catalog | jq -r ".repositories[]")
+for d in $dockers
+do
+ tags=$(curl -X GET https://$NEXUS_REPO/v2/$d/tags/list | jq -r ".tags[]")
+ for t in $tags
+ do
+ sudo docker pull $NEXUS_REPO/$d:$t
+ sudo docker tag $NEXUS_REPO/$d:$t $LOCAL_REPO/$d:$t
+ sudo docker push $LOCAL_REPO/$d:$t
+ done
+done
diff --git a/src/vagrant/kubeadm_onap/setup_swap.sh b/src/vagrant/kubeadm_onap/setup_swap.sh
new file mode 100644
index 0000000..c2432b7
--- /dev/null
+++ b/src/vagrant/kubeadm_onap/setup_swap.sh
@@ -0,0 +1,5 @@
+sudo swapoff -a
+sudo fallocate -l 50G /swapfile
+sudo mkswap /swapfile
+sudo swapon /swapfile
+sudo swapon --show
diff --git a/src/vagrant/kubeadm_onap/setup_tunnel.sh b/src/vagrant/kubeadm_onap/setup_tunnel.sh
new file mode 100644
index 0000000..3a6ef75
--- /dev/null
+++ b/src/vagrant/kubeadm_onap/setup_tunnel.sh
@@ -0,0 +1,3 @@
+sudo ip link add tunnel0 type gretap local <local> remote <remote>
+sudo ifconfig tunnel0 up
+sudo brctl addif <br> tunnel0
diff --git a/src/vagrant/kubeadm_onap/worker_setup.sh b/src/vagrant/kubeadm_onap/worker_setup.sh
index aa60df3..e65a65c 100755
--- a/src/vagrant/kubeadm_onap/worker_setup.sh
+++ b/src/vagrant/kubeadm_onap/worker_setup.sh
@@ -1,11 +1,15 @@
#!/bin/bash
-
set -ex
-sudo mkdir /dockerdata-nfs
-sudo chmod 755 /dockerdata-nfs
-sudo kubeadm join --token 8c5adc.1cec8dbf339093f0 192.168.0.10:6443 || true
+sudo apt-get -y install ntp
+cat << EOF | sudo tee /etc/ntp.conf
+pool master
+EOF
+sudo service ntp restart
-sudo apt-get install -y putty-tools
-mkdir ~/.kube
-echo "y\n" | plink -ssh -pw vagrant vagrant@master "cat ~/.kube/config" > ~/.kube/config
+sudo kubeadm join --discovery-token-unsafe-skip-ca-verification --token 8c5adc.1cec8dbf339093f0 192.168.0.10:6443 || true
+
+sudo apt-get install nfs-common -y
+sudo mkdir /dockerdata-nfs
+sudo chmod 777 /dockerdata-nfs
+sudo mount master:/dockerdata-nfs /dockerdata-nfs
diff --git a/src/vagrant/kubeadm_ovsdpdk/host_setup.sh b/src/vagrant/kubeadm_ovsdpdk/host_setup.sh
index b86a618..b2ee85c 100644
--- a/src/vagrant/kubeadm_ovsdpdk/host_setup.sh
+++ b/src/vagrant/kubeadm_ovsdpdk/host_setup.sh
@@ -21,7 +21,7 @@ cat <<EOF | sudo tee /etc/apt/sources.list.d/kubernetes.list
deb http://apt.kubernetes.io/ kubernetes-xenial main
EOF
sudo apt-get update
-sudo apt-get install -y --allow-downgrades docker-engine=1.12.6-0~ubuntu-xenial kubelet=1.7.0-00 kubeadm=1.7.0-00 kubectl=1.7.0-00 kubernetes-cni=0.5.1-00
+sudo apt-get install -y --allow-unauthenticated --allow-downgrades docker-engine=1.12.6-0~ubuntu-xenial kubelet=1.7.0-00 kubeadm=1.7.0-00 kubectl=1.7.0-00 kubernetes-cni=0.5.1-00
sudo rm -rf /var/lib/kubelet
sudo systemctl stop kubelet
diff --git a/src/vagrant/kubeadm_istio/Vagrantfile b/src/vagrant/kubeadm_snort/Vagrantfile
index 9320074..9320074 100644
--- a/src/vagrant/kubeadm_istio/Vagrantfile
+++ b/src/vagrant/kubeadm_snort/Vagrantfile
diff --git a/src/vagrant/kubeadm_multus/deploy.sh b/src/vagrant/kubeadm_snort/deploy.sh
index 9c9e51e..e1e16d6 100755
--- a/src/vagrant/kubeadm_multus/deploy.sh
+++ b/src/vagrant/kubeadm_snort/deploy.sh
@@ -6,4 +6,4 @@ DIR="$(dirname `readlink -f $0`)"
cd $DIR
../cleanup.sh
vagrant up
-vagrant ssh master -c "/vagrant/examples/multus.sh"
+vagrant ssh master -c "/vagrant/snort/snort-setup.sh"
diff --git a/src/vagrant/kubeadm_multus/host_setup.sh b/src/vagrant/kubeadm_snort/host_setup.sh
index c1a23eb..524a967 100644
--- a/src/vagrant/kubeadm_multus/host_setup.sh
+++ b/src/vagrant/kubeadm_snort/host_setup.sh
@@ -21,7 +21,7 @@ cat <<EOF | sudo tee /etc/apt/sources.list.d/kubernetes.list
deb http://apt.kubernetes.io/ kubernetes-xenial main
EOF
sudo apt-get update
-sudo apt-get install -y --allow-downgrades docker-engine=1.12.6-0~ubuntu-xenial kubelet=1.9.1-00 kubeadm=1.9.1-00 kubectl=1.9.1-00 kubernetes-cni=0.6.0-00
+sudo apt-get install -y --allow-unauthenticated --allow-downgrades docker-engine=1.12.6-0~ubuntu-xenial kubelet=1.9.1-00 kubeadm=1.9.1-00 kubectl=1.9.1-00 kubernetes-cni=0.6.0-00
sudo swapoff -a
sudo systemctl daemon-reload
diff --git a/src/vagrant/kubeadm_snort/master_setup.sh b/src/vagrant/kubeadm_snort/master_setup.sh
new file mode 100644
index 0000000..972768f
--- /dev/null
+++ b/src/vagrant/kubeadm_snort/master_setup.sh
@@ -0,0 +1,10 @@
+#!/bin/bash
+
+set -ex
+
+sudo kubeadm init --apiserver-advertise-address=192.168.1.10 --service-cidr=10.96.0.0/16 --pod-network-cidr=10.32.0.0/12 --token 8c5adc.1cec8dbf339093f0
+mkdir ~/.kube
+sudo cp /etc/kubernetes/admin.conf $HOME/.kube/config
+sudo chown $(id -u):$(id -g) $HOME/.kube/config
+
+kubectl apply -f https://raw.githubusercontent.com/weaveworks/weave/master/prog/weave-kube/weave-daemonset-k8s-1.6.yaml
diff --git a/src/vagrant/kubeadm_snort/snort/snort-setup.sh b/src/vagrant/kubeadm_snort/snort/snort-setup.sh
new file mode 100755
index 0000000..08ae663
--- /dev/null
+++ b/src/vagrant/kubeadm_snort/snort/snort-setup.sh
@@ -0,0 +1,31 @@
+#!/bin/bash
+#
+# Copyright (c) 2017 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+set -ex
+
+kubectl create -f /vagrant/snort/snort.yaml
+kubectl get nodes
+kubectl get services
+kubectl get pods
+kubectl get rc
+
+r="0"
+while [ $r -ne "2" ]
+do
+ r=$(kubectl get pods | grep Running | wc -l)
+ sleep 60
+done
diff --git a/src/vagrant/kubeadm_snort/snort/snort.yaml b/src/vagrant/kubeadm_snort/snort/snort.yaml
new file mode 100644
index 0000000..60dede2
--- /dev/null
+++ b/src/vagrant/kubeadm_snort/snort/snort.yaml
@@ -0,0 +1,32 @@
+apiVersion: v1
+kind: Service
+metadata:
+ name: snort-service
+ labels:
+ app: snort
+spec:
+ type: NodePort
+ ports:
+ - port: 80
+ protocol: TCP
+ name: http
+ selector:
+ app: snort
+---
+apiVersion: v1
+kind: ReplicationController
+metadata:
+ name: snort-pod
+spec:
+ replicas: 2
+ template:
+ metadata:
+ labels:
+ app: snort
+ spec:
+ containers:
+ - name: snort
+ image: frapsoft/snort
+ args: ["-v"]
+ ports:
+ - containerPort: 80
diff --git a/src/vagrant/kubeadm_multus/worker_setup.sh b/src/vagrant/kubeadm_snort/worker_setup.sh
index 74e4178..74e4178 100644
--- a/src/vagrant/kubeadm_multus/worker_setup.sh
+++ b/src/vagrant/kubeadm_snort/worker_setup.sh
diff --git a/src/vagrant/kubeadm_virtlet/examples/cirros-vm.yaml b/src/vagrant/kubeadm_virtlet/examples/cirros-vm.yaml
index 8beb03f..334142b 100644
--- a/src/vagrant/kubeadm_virtlet/examples/cirros-vm.yaml
+++ b/src/vagrant/kubeadm_virtlet/examples/cirros-vm.yaml
@@ -4,21 +4,14 @@ metadata:
name: cirros-vm
annotations:
# This tells CRI Proxy that this pod belongs to Virtlet runtime
- kubernetes.io/target-runtime: virtlet
- # An optional annotation specifying the count of virtual CPUs.
- # Note that annotation values must always be strings,
- # thus numeric values need to be quoted.
- # Defaults to "1".
- VirtletVCPUCount: "1"
+ kubernetes.io/target-runtime: virtlet.cloud
# CirrOS doesn't load nocloud data from SCSI CD-ROM for some reason
VirtletDiskDriver: virtio
# inject ssh keys via cloud-init
VirtletSSHKeys: |
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCaJEcFDXEK2ZbX0ZLS1EIYFZRbDAcRfuVjpstSc0De8+sV1aiu+dePxdkuDRwqFtCyk6dEZkssjOkBXtri00MECLkir6FcH3kKOJtbJ6vy3uaJc9w1ERo+wyl6SkAh/+JTJkp7QRXj8oylW5E20LsbnA/dIwWzAF51PPwF7A7FtNg9DnwPqMkxFo1Th/buOMKbP5ZA1mmNNtmzbMpMfJATvVyiv3ccsSJKOiyQr6UG+j7sc/7jMVz5Xk34Vd0l8GwcB0334MchHckmqDB142h/NCWTr8oLakDNvkfC1YneAfAO41hDkUbxPtVBG5M/o7P4fxoqiHEX+ZLfRxDtHB53 me@localhost
- # cloud-init user data
- VirtletCloudInitUserDataScript: |
- #!/bin/sh
- echo "Hi there"
+ # set root volume size
+ VirtletRootVolumeSize: 1Gi
spec:
# This nodeAffinity specification tells Kubernetes to run this
# pod only on the nodes that have extraRuntime=virtlet label.
@@ -36,17 +29,9 @@ spec:
containers:
- name: cirros-vm
# This specifies the image to use.
- # virtlet/ prefix is used by CRI proxy, the remaining part
+ # virtlet.cloud/ prefix is used by CRI proxy, the remaining part
# of the image name is prepended with https:// and used to download the image
- image: virtlet/cirros
- # Virtlet currently ignores image tags, but their meaning may change
- # in future, so it’s better not to set them for VM pods. If there’s no tag
- # provided in the image specification kubelet defaults to
- # imagePullPolicy: Always, which means that the image is always
- # redownloaded when the pod is created. In order to make pod creation
- # faster and more reliable, we set imagePullPolicy to IfNotPresent here
- # so a previously downloaded image is reused if there is one
- # in Virtlet’s image store
+ image: virtlet.cloud/cirros
imagePullPolicy: IfNotPresent
# tty and stdin required for `kubectl attach -t` to work
tty: true
diff --git a/src/vagrant/kubeadm_virtlet/examples/images.yaml b/src/vagrant/kubeadm_virtlet/examples/images.yaml
index 3a84585..1541ca7 100644
--- a/src/vagrant/kubeadm_virtlet/examples/images.yaml
+++ b/src/vagrant/kubeadm_virtlet/examples/images.yaml
@@ -1,3 +1,3 @@
translations:
- name: cirros
- url: http://github.com/mirantis/virtlet/releases/download/v0.8.2/cirros.img
+ url: https://github.com/mirantis/virtlet/releases/download/v0.9.3/cirros.img
diff --git a/src/vagrant/kubeadm_virtlet/examples/virtlet-ds.yaml b/src/vagrant/kubeadm_virtlet/examples/virtlet-ds.yaml
index ed037d9..1bb4882 100644
--- a/src/vagrant/kubeadm_virtlet/examples/virtlet-ds.yaml
+++ b/src/vagrant/kubeadm_virtlet/examples/virtlet-ds.yaml
@@ -1,25 +1,21 @@
---
-apiVersion: extensions/v1beta1
+apiVersion: apps/v1
kind: DaemonSet
metadata:
+ creationTimestamp: null
name: virtlet
namespace: kube-system
spec:
+ selector:
+ matchLabels:
+ runtime: virtlet
template:
metadata:
- name: virtlet
+ creationTimestamp: null
labels:
runtime: virtlet
+ name: virtlet
spec:
- hostNetwork: true
- dnsPolicy: ClusterFirstWithHostNet
- # hostPID is true to (1) enable VMs to survive virtlet container restart
- # (to be checked) and (2) to enable the use of nsenter in init container
- hostPID: true
- # bootstrap procedure needs to create a configmap in kube-system namespace
- serviceAccountName: virtlet
-
- # only run Virtlet pods on the nodes with extraRuntime=virtlet label
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
@@ -29,50 +25,21 @@ spec:
operator: In
values:
- virtlet
-
- initContainers:
- # The init container first copies virtlet's flexvolume driver
- # to the default kubelet plugin dir to have it in the proper place by the
- # time kubelet is restarted by CRI proxy bootstrap procedure.
- # After that it checks if there's already saved kubelet config
- # and considers that CRI proxy bootstrap is already done if it exists.
- # If it doesn't, it drops criproxy binary into /opt/criproxy/bin
- # if it's not already there and then starts criproxy installation.
- # The possibility to put criproxy binary in advance into
- # /opt/criproxy/bin may be helpful for the purpose of
- # debugging criproxy
- # At the end it ensures that /var/lib/libvirt/images exists on node.
- - name: prepare-node
- image: openretriever/virtlet
+ containers:
+ - command:
+ - /libvirt.sh
+ image: mirantis/virtlet:v1.4.1
imagePullPolicy: IfNotPresent
- command:
- - /prepare-node.sh
- volumeMounts:
- - name: k8s-flexvolume-plugins-dir
- mountPath: /kubelet-volume-plugins
- - name: criproxybin
- mountPath: /opt/criproxy/bin
- - name: run
- mountPath: /run
- - name: dockersock
- mountPath: /var/run/docker.sock
- - name: criproxyconf
- mountPath: /etc/criproxy
- - name: log
- mountPath: /hostlog
- # for ensuring that /var/lib/libvirt/images exists on node
- - name: var-lib
- mountPath: /host-var-lib
+ name: libvirt
+ readinessProbe:
+ exec:
+ command:
+ - /bin/sh
+ - -c
+ - socat - UNIX:/var/run/libvirt/libvirt-sock-ro </dev/null
+ resources: {}
securityContext:
privileged: true
-
- containers:
- - name: libvirt
- image: openretriever/virtlet
- # In case we inject local virtlet image we want to use it not officially available one
- imagePullPolicy: IfNotPresent
- command:
- - /libvirt.sh
volumeMounts:
- mountPath: /sys/fs/cgroup
name: cgroup
@@ -90,117 +57,176 @@ spec:
name: libvirt
- mountPath: /var/run/libvirt
name: libvirt-sockets
- # the log dir is needed here because otherwise libvirt will produce errors
- # like this:
- # Unable to pre-create chardev file '/var/log/vms/afd75bbb-8e97-11e7-9561-02420ac00002/cirros-vm_0.log': No such file or directory
- - name: vms-log
- mountPath: /var/log/vms
- - name: dev
- mountPath: /dev
+ - mountPath: /var/log/vms
+ name: vms-log
+ - mountPath: /var/log/libvirt
+ name: libvirt-log
+ - mountPath: /dev
+ name: dev
+ - image: mirantis/virtlet:v1.4.1
+ imagePullPolicy: IfNotPresent
+ name: virtlet
+ readinessProbe:
+ exec:
+ command:
+ - /bin/sh
+ - -c
+ - socat - UNIX:/run/virtlet.sock </dev/null
+ resources: {}
securityContext:
privileged: true
- env:
- - name: VIRTLET_DISABLE_KVM
- valueFrom:
- configMapKeyRef:
- name: virtlet-config
- key: disable_kvm
- optional: true
- - name: virtlet
- image: openretriever/virtlet
- # In case we inject local virtlet image we want to use it not officially available one
- imagePullPolicy: IfNotPresent
volumeMounts:
- mountPath: /run
name: run
- # /boot and /lib/modules are required by supermin
- mountPath: /lib/modules
name: modules
readOnly: true
- mountPath: /boot
name: boot
readOnly: true
+ - mountPath: /dev
+ name: dev
- mountPath: /var/lib/virtlet
+ mountPropagation: Bidirectional
name: virtlet
- mountPath: /var/lib/libvirt
name: libvirt
- - mountPath: /etc/cni
- name: cniconf
- - mountPath: /opt/cni/bin
- name: cnibin
- mountPath: /var/run/libvirt
name: libvirt-sockets
- - mountPath: /var/lib/cni
- name: cnidata
- mountPath: /usr/libexec/kubernetes/kubelet-plugins/volume/exec
name: k8s-flexvolume-plugins-dir
- # below `:shared` is unofficial way to pass this option docker
- # which then will allow virtlet to see what kubelet mounts in
- # underlaying directories, after virtlet container is created
- - mountPath: /var/lib/kubelet/pods:shared
+ - mountPath: /var/lib/kubelet/pods
+ mountPropagation: Bidirectional
name: k8s-pods-dir
- - name: vms-log
- mountPath: /var/log/vms
+ - mountPath: /var/log/vms
+ name: vms-log
- mountPath: /etc/virtlet/images
name: image-name-translations
- - name: pods-log
- mountPath: /kubernetes-log
- securityContext:
- privileged: true
+ - mountPath: /var/log/pods
+ name: pods-log
+ - mountPath: /var/log/libvirt
+ name: libvirt-log
+ - mountPath: /var/run/netns
+ mountPropagation: Bidirectional
+ name: netns-dir
+ - command:
+ - /vms.sh
+ image: mirantis/virtlet:v1.4.1
+ imagePullPolicy: IfNotPresent
+ name: vms
+ resources: {}
+ volumeMounts:
+ - mountPath: /var/lib/virtlet
+ mountPropagation: HostToContainer
+ name: virtlet
+ - mountPath: /var/lib/libvirt
+ name: libvirt
+ - mountPath: /var/log/vms
+ name: vms-log
+ - mountPath: /var/lib/kubelet/pods
+ mountPropagation: HostToContainer
+ name: k8s-pods-dir
+ - mountPath: /dev
+ name: dev
+ - mountPath: /lib/modules
+ name: modules
+ dnsPolicy: ClusterFirstWithHostNet
+ hostNetwork: true
+ hostPID: true
+ initContainers:
+ - command:
+ - /prepare-node.sh
env:
+ - name: KUBE_NODE_NAME
+ valueFrom:
+ fieldRef:
+ apiVersion: v1
+ fieldPath: spec.nodeName
- name: VIRTLET_DISABLE_KVM
valueFrom:
configMapKeyRef:
- name: virtlet-config
key: disable_kvm
+ name: virtlet-config
optional: true
- - name: VIRTLET_DOWNLOAD_PROTOCOL
+ - name: VIRTLET_SRIOV_SUPPORT
valueFrom:
configMapKeyRef:
+ key: sriov_support
name: virtlet-config
+ optional: true
+ - name: VIRTLET_DOWNLOAD_PROTOCOL
+ valueFrom:
+ configMapKeyRef:
key: download_protocol
+ name: virtlet-config
optional: true
- name: VIRTLET_LOGLEVEL
valueFrom:
configMapKeyRef:
- name: virtlet-config
key: loglevel
+ name: virtlet-config
optional: true
- name: VIRTLET_CALICO_SUBNET
valueFrom:
configMapKeyRef:
- name: virtlet-config
key: calico-subnet
+ name: virtlet-config
optional: true
- name: IMAGE_REGEXP_TRANSLATION
valueFrom:
configMapKeyRef:
- name: virtlet-config
key: image_regexp_translation
+ name: virtlet-config
+ optional: true
+ - name: VIRTLET_RAW_DEVICES
+ valueFrom:
+ configMapKeyRef:
+ key: raw_devices
+ name: virtlet-config
+ optional: true
+ - name: VIRTLET_DISABLE_LOGGING
+ valueFrom:
+ configMapKeyRef:
+ key: disable_logging
+ name: virtlet-config
+ optional: true
+ - name: VIRTLET_CPU_MODEL
+ valueFrom:
+ configMapKeyRef:
+ key: cpu-model
+ name: virtlet-config
+ optional: true
+ - name: KUBELET_ROOT_DIR
+ valueFrom:
+ configMapKeyRef:
+ key: kubelet_root_dir
+ name: virtlet-config
optional: true
- - name: IMAGE_TRANSLATIONS_DIR
+ - name: VIRTLET_IMAGE_TRANSLATIONS_DIR
value: /etc/virtlet/images
- - name: KUBERNETES_POD_LOGS
- value: "/kubernetes-log"
- # TODO: should we rename it?
- - name: VIRTLET_VM_LOG_LOCATION
- value: "1"
- - name: vms
- image: openretriever/virtlet
+ image: mirantis/virtlet:v1.4.1
imagePullPolicy: IfNotPresent
- command:
- - /vms.sh
+ name: prepare-node
+ resources: {}
+ securityContext:
+ privileged: true
volumeMounts:
+ - mountPath: /kubelet-volume-plugins
+ name: k8s-flexvolume-plugins-dir
+ - mountPath: /run
+ name: run
+ - mountPath: /var/run/docker.sock
+ name: dockersock
+ - mountPath: /hostlog
+ name: log
+ - mountPath: /host-var-lib
+ name: var-lib
+ - mountPath: /dev
+ name: dev
- mountPath: /var/lib/virtlet
name: virtlet
- - mountPath: /var/lib/libvirt
- name: libvirt
- - name: vms-log
- mountPath: /var/log/vms
- - name: dev
- mountPath: /dev
+ serviceAccountName: virtlet
volumes:
- # /dev is needed for host raw device access
- hostPath:
path: /dev
name: dev
@@ -216,9 +242,6 @@ spec:
- hostPath:
path: /run
name: run
- # TODO: don't hardcode docker socket location here
- # This will require CRI proxy installation to run
- # in host mount namespace.
- hostPath:
path: /var/run/docker.sock
name: dockersock
@@ -229,21 +252,6 @@ spec:
path: /var/lib/libvirt
name: libvirt
- hostPath:
- path: /etc/cni
- name: cniconf
- - hostPath:
- path: /opt/cni/bin
- name: cnibin
- - hostPath:
- path: /var/lib/cni
- name: cnidata
- - hostPath:
- path: /opt/criproxy/bin
- name: criproxybin
- - hostPath:
- path: /etc/criproxy
- name: criproxyconf
- - hostPath:
path: /var/log
name: log
- hostPath:
@@ -259,18 +267,27 @@ spec:
path: /var/log/virtlet/vms
name: vms-log
- hostPath:
+ path: /var/log/libvirt
+ name: libvirt-log
+ - hostPath:
path: /var/run/libvirt
name: libvirt-sockets
- hostPath:
path: /var/log/pods
name: pods-log
+ - hostPath:
+ path: /var/run/netns
+ name: netns-dir
- configMap:
name: virtlet-image-translations
name: image-name-translations
+ updateStrategy: {}
+
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
+ creationTimestamp: null
name: virtlet
roleRef:
apiGroup: rbac.authorization.k8s.io
@@ -280,23 +297,29 @@ subjects:
- kind: ServiceAccount
name: virtlet
namespace: kube-system
+
---
-kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRole
metadata:
+ creationTimestamp: null
name: virtlet
namespace: kube-system
rules:
- - apiGroups:
- - ""
- resources:
- - configmaps
- verbs:
- - create
+- apiGroups:
+ - ""
+ resources:
+ - configmaps
+ - nodes
+ verbs:
+ - create
+ - get
+
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
+ creationTimestamp: null
name: configmap-reader
rules:
- apiGroups:
@@ -307,10 +330,27 @@ rules:
- get
- list
- watch
+
+---
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRole
+metadata:
+ creationTimestamp: null
+ name: virtlet-userdata-reader
+rules:
+- apiGroups:
+ - ""
+ resources:
+ - configmaps
+ - secrets
+ verbs:
+ - get
+
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
+ creationTimestamp: null
name: kubelet-node-binding
roleRef:
apiGroup: rbac.authorization.k8s.io
@@ -320,29 +360,49 @@ subjects:
- apiGroup: rbac.authorization.k8s.io
kind: Group
name: system:nodes
+
---
-kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRoleBinding
metadata:
+ creationTimestamp: null
+ name: vm-userdata-binding
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: virtlet-userdata-reader
+subjects:
+- kind: ServiceAccount
+ name: virtlet
+ namespace: kube-system
+
+---
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRole
+metadata:
+ creationTimestamp: null
name: virtlet-crd
rules:
- - apiGroups:
- - "apiextensions.k8s.io"
- resources:
- - customresourcedefinitions
- verbs:
- - create
- - apiGroups:
- - "virtlet.k8s"
- resources:
- - virtletimagemappings
- verbs:
- - list
- - get
+- apiGroups:
+ - apiextensions.k8s.io
+ resources:
+ - customresourcedefinitions
+ verbs:
+ - create
+- apiGroups:
+ - virtlet.k8s
+ resources:
+ - virtletimagemappings
+ - virtletconfigmappings
+ verbs:
+ - list
+ - get
+
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
+ creationTimestamp: null
name: virtlet-crd
roleRef:
apiGroup: rbac.authorization.k8s.io
@@ -352,9 +412,110 @@ subjects:
- kind: ServiceAccount
name: virtlet
namespace: kube-system
+
---
apiVersion: v1
kind: ServiceAccount
metadata:
+ creationTimestamp: null
name: virtlet
namespace: kube-system
+
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ creationTimestamp: null
+ labels:
+ virtlet.cloud: ""
+ name: virtletimagemappings.virtlet.k8s
+spec:
+ group: virtlet.k8s
+ names:
+ kind: VirtletImageMapping
+ plural: virtletimagemappings
+ shortNames:
+ - vim
+ singular: virtletimagemapping
+ scope: Namespaced
+ version: v1
+
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ creationTimestamp: null
+ labels:
+ virtlet.cloud: ""
+ name: virtletconfigmappings.virtlet.k8s
+spec:
+ group: virtlet.k8s
+ names:
+ kind: VirtletConfigMapping
+ plural: virtletconfigmappings
+ shortNames:
+ - vcm
+ singular: virtletconfigmapping
+ scope: Namespaced
+ validation:
+ openAPIV3Schema:
+ properties:
+ spec:
+ properties:
+ config:
+ properties:
+ calicoSubnetSize:
+ maximum: 32
+ minimum: 0
+ type: integer
+ cniConfigDir:
+ type: string
+ cniPluginDir:
+ type: string
+ cpuModel:
+ type: string
+ criSocketPath:
+ type: string
+ databasePath:
+ type: string
+ disableKVM:
+ type: boolean
+ disableLogging:
+ type: boolean
+ downloadProtocol:
+ pattern: ^https?$
+ type: string
+ enableRegexpImageTranslation:
+ type: boolean
+ enableSriov:
+ type: boolean
+ fdServerSocketPath:
+ type: string
+ imageDir:
+ type: string
+ imageTranslationConfigsDir:
+ type: string
+ kubeletRootDir:
+ type: string
+ libvirtURI:
+ type: string
+ logLevel:
+ maximum: 2147483647
+ minimum: 0
+ type: integer
+ rawDevices:
+ type: string
+ skipImageTranslation:
+ type: boolean
+ streamPort:
+ maximum: 65535
+ minimum: 1
+ type: integer
+ nodeName:
+ type: string
+ nodeSelector:
+ type: object
+ priority:
+ type: integer
+ version: v1
+
diff --git a/src/vagrant/kubeadm_virtlet/host_setup.sh b/src/vagrant/kubeadm_virtlet/host_setup.sh
index b86a618..f211f19 100644
--- a/src/vagrant/kubeadm_virtlet/host_setup.sh
+++ b/src/vagrant/kubeadm_virtlet/host_setup.sh
@@ -10,20 +10,33 @@ cat << EOF | sudo tee /etc/hosts
192.168.1.23 worker3
EOF
-sudo apt-key adv --keyserver hkp://ha.pool.sks-keyservers.net:80 --recv-keys 58118E89F3A912897C070ADBF76221572C52609D
-sudo apt-key adv -k 58118E89F3A912897C070ADBF76221572C52609D
-cat << EOF | sudo tee /etc/apt/sources.list.d/docker.list
-deb [arch=amd64] https://apt.dockerproject.org/repo ubuntu-xenial main
-EOF
+sudo apt-get update
+sudo apt-get install -y \
+ apt-transport-https \
+ ca-certificates \
+ curl \
+ software-properties-common
+
+curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
+sudo add-apt-repository \
+ "deb [arch=amd64] https://download.docker.com/linux/ubuntu \
+ $(lsb_release -cs) \
+ stable"
+sudo apt-get update
+sudo apt-get install -y docker-ce=18.03.1~ce-0~ubuntu
curl -s http://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -
cat <<EOF | sudo tee /etc/apt/sources.list.d/kubernetes.list
deb http://apt.kubernetes.io/ kubernetes-xenial main
EOF
sudo apt-get update
-sudo apt-get install -y --allow-downgrades docker-engine=1.12.6-0~ubuntu-xenial kubelet=1.7.0-00 kubeadm=1.7.0-00 kubectl=1.7.0-00 kubernetes-cni=0.5.1-00
+sudo apt-get install -y --allow-unauthenticated kubelet=1.12.2-00 kubeadm=1.12.2-00 kubectl=1.12.2-00 kubernetes-cni=0.6.0-00
-sudo rm -rf /var/lib/kubelet
-sudo systemctl stop kubelet
+sudo modprobe ip_vs
+sudo modprobe ip_vs_rr
+sudo modprobe ip_vs_wrr
+sudo modprobe ip_vs_sh
+sudo swapoff -a
sudo systemctl daemon-reload
+sudo systemctl stop kubelet
sudo systemctl start kubelet
diff --git a/src/vagrant/kubeadm_virtlet/virtlet/etc/systemd/system/criproxy.service b/src/vagrant/kubeadm_virtlet/virtlet/etc/systemd/system/criproxy.service
deleted file mode 100644
index bb2f1de..0000000
--- a/src/vagrant/kubeadm_virtlet/virtlet/etc/systemd/system/criproxy.service
+++ /dev/null
@@ -1,11 +0,0 @@
-[Unit]
-Description=CRI Proxy
-
-[Service]
-ExecStart=/usr/local/bin/criproxy -v 3 -alsologtostderr -connect /var/run/dockershim.sock,virtlet:/run/virtlet.sock -listen /run/criproxy.sock
-Restart=always
-StartLimitInterval=0
-RestartSec=10
-
-[Install]
-WantedBy=kubelet.service
diff --git a/src/vagrant/kubeadm_virtlet/virtlet/etc/systemd/system/dockershim.service b/src/vagrant/kubeadm_virtlet/virtlet/etc/systemd/system/dockershim.service
deleted file mode 100644
index c629a4b..0000000
--- a/src/vagrant/kubeadm_virtlet/virtlet/etc/systemd/system/dockershim.service
+++ /dev/null
@@ -1,11 +0,0 @@
-[Unit]
-Description=dockershim for criproxy
-
-[Service]
-ExecStart=/usr/local/bin/dockershim ......
-Restart=always
-StartLimitInterval=0
-RestartSec=10
-
-[Install]
-RequiredBy=criproxy.service
diff --git a/src/vagrant/kubeadm_virtlet/virtlet/etc/systemd/system/kubelet.service.d/20-criproxy.conf b/src/vagrant/kubeadm_virtlet/virtlet/etc/systemd/system/kubelet.service.d/20-criproxy.conf
deleted file mode 100644
index 412a48d..0000000
--- a/src/vagrant/kubeadm_virtlet/virtlet/etc/systemd/system/kubelet.service.d/20-criproxy.conf
+++ /dev/null
@@ -1,2 +0,0 @@
-[Service]
-Environment="KUBELET_EXTRA_ARGS=--container-runtime=remote --container-runtime-endpoint=/run/criproxy.sock --image-service-endpoint=/run/criproxy.sock --enable-controller-attach-detach=false"
diff --git a/src/vagrant/kubeadm_virtlet/worker_setup.sh b/src/vagrant/kubeadm_virtlet/worker_setup.sh
index 4472874..bc37fb3 100644
--- a/src/vagrant/kubeadm_virtlet/worker_setup.sh
+++ b/src/vagrant/kubeadm_virtlet/worker_setup.sh
@@ -1,18 +1,12 @@
#!/bin/bash
set -ex
-sudo kubeadm join --token 8c5adc.1cec8dbf339093f0 192.168.1.10:6443 || true
+sudo kubeadm join --discovery-token-unsafe-skip-ca-verification --token 8c5adc.1cec8dbf339093f0 192.168.1.10:6443
-sudo docker pull openretriever/virtlet
-sudo docker run --rm openretriever/virtlet tar -c /criproxy | sudo tar -C /usr/local/bin -xv
-sudo ln -s /usr/local/bin/criproxy /usr/local/bin/dockershim
-
-sudo mkdir /etc/criproxy
-sudo touch /etc/criproxy/node.conf
-sudo cp -r /vagrant/virtlet/etc/systemd/system/* /etc/systemd/system/
-sudo systemctl stop kubelet
-sudo systemctl daemon-reload
-sudo systemctl enable criproxy dockershim
-sudo systemctl start criproxy dockershim
+wget https://github.com/Mirantis/criproxy/releases/download/v0.12.0/criproxy_0.12.0_amd64.deb
+sudo dpkg -i criproxy_0.12.0_amd64.deb
+sudo sed -i "s/EnvironmentFile/#EnvironmentFile/" /etc/systemd/system/kubelet.service.d/10-kubeadm.conf
sudo systemctl daemon-reload
-sudo systemctl start kubelet
+sudo systemctl restart dockershim
+sudo systemctl restart criproxy
+sudo systemctl restart kubelet
diff --git a/src/vagrant/setup_vagrant.sh b/src/vagrant/setup_vagrant.sh
index fcde052..23fdcd2 100755
--- a/src/vagrant/setup_vagrant.sh
+++ b/src/vagrant/setup_vagrant.sh
@@ -1,6 +1,5 @@
#!/bin/bash
#
-# Copyright (c) 2017 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.