summaryrefslogtreecommitdiffstats
path: root/src/vagrant
diff options
context:
space:
mode:
Diffstat (limited to 'src/vagrant')
-rwxr-xr-xsrc/vagrant/kubeadm_istio/istio/bookinfo.sh5
-rwxr-xr-xsrc/vagrant/kubeadm_istio/istio/deploy.sh16
-rwxr-xr-xsrc/vagrant/kubeadm_kata/examples/nginx-app.sh7
-rw-r--r--src/vagrant/kubeadm_kata/examples/nginx-app.yaml2
-rw-r--r--src/vagrant/kubeadm_kata/host_setup.sh29
-rw-r--r--src/vagrant/kubeadm_kata/kata_setup.sh42
-rw-r--r--src/vagrant/kubeadm_kata/master_setup.sh11
-rw-r--r--src/vagrant/kubeadm_kata/worker_setup.sh25
-rw-r--r--src/vagrant/kubeadm_onap/Vagrantfile23
-rwxr-xr-xsrc/vagrant/kubeadm_onap/host_setup.sh15
-rwxr-xr-xsrc/vagrant/kubeadm_onap/master_setup.sh31
-rwxr-xr-xsrc/vagrant/kubeadm_onap/onap_setup.sh53
-rw-r--r--src/vagrant/kubeadm_onap/registry_setup.sh30
-rw-r--r--src/vagrant/kubeadm_onap/setup_swap.sh5
-rw-r--r--src/vagrant/kubeadm_onap/setup_tunnel.sh3
-rwxr-xr-xsrc/vagrant/kubeadm_onap/worker_setup.sh18
-rw-r--r--src/vagrant/kubeadm_snort/Vagrantfile29
-rwxr-xr-xsrc/vagrant/kubeadm_snort/deploy.sh9
-rw-r--r--src/vagrant/kubeadm_snort/host_setup.sh29
-rw-r--r--src/vagrant/kubeadm_snort/master_setup.sh10
-rwxr-xr-xsrc/vagrant/kubeadm_snort/snort/snort-setup.sh31
-rw-r--r--src/vagrant/kubeadm_snort/snort/snort.yaml32
-rw-r--r--src/vagrant/kubeadm_snort/worker_setup.sh4
23 files changed, 326 insertions, 133 deletions
diff --git a/src/vagrant/kubeadm_istio/istio/bookinfo.sh b/src/vagrant/kubeadm_istio/istio/bookinfo.sh
index cc09167..ad8c120 100755
--- a/src/vagrant/kubeadm_istio/istio/bookinfo.sh
+++ b/src/vagrant/kubeadm_istio/istio/bookinfo.sh
@@ -23,6 +23,9 @@ export PATH=$PWD/bin:$PATH
# Run the test application: bookinfo
kubectl apply -f <(istioctl kube-inject -f samples/bookinfo/kube/bookinfo.yaml)
+# Define the ingress gateway for the application
+istioctl create -f samples/bookinfo/routing/bookinfo-gateway.yaml
+
# Wait for bookinfo deployed
kubectl get services
kubectl get pods
@@ -36,6 +39,6 @@ do
done
# Validate the bookinfo app
-export GATEWAY_URL=$(kubectl get po -l istio=ingress -n istio-system -o 'jsonpath={.items[0].status.hostIP}'):$(kubectl get svc istio-ingress -n istio-system -o 'jsonpath={.spec.ports[0].nodePort}')
+export GATEWAY_URL=$(kubectl get po -l istio=ingressgateway -n istio-system -o 'jsonpath={.items[0].status.hostIP}'):$(kubectl get svc istio-ingressgateway -n istio-system -o 'jsonpath={.spec.ports[0].nodePort}')
curl -o /dev/null -s -w "%{http_code}\n" http://${GATEWAY_URL}/productpage
diff --git a/src/vagrant/kubeadm_istio/istio/deploy.sh b/src/vagrant/kubeadm_istio/istio/deploy.sh
index 4abc856..84af41b 100755
--- a/src/vagrant/kubeadm_istio/istio/deploy.sh
+++ b/src/vagrant/kubeadm_istio/istio/deploy.sh
@@ -35,21 +35,7 @@ echo 'export PATH="$PATH:/vagrant/istio-source/bin"' >> ~/.bashrc
echo "source <(kubectl completion bash)" >> ~/.bashrc
source ~/.bashrc
-kubectl apply -f install/kubernetes/istio.yaml
-
-# Install the sidecar injection configmap
-./install/kubernetes/webhook-create-signed-cert.sh \
- --service istio-sidecar-injector \
- --namespace istio-system \
- --secret sidecar-injector-certs
-kubectl apply -f install/kubernetes/istio-sidecar-injector-configmap-release.yaml
-
-# Install the sidecar injector webhook
-cat install/kubernetes/istio-sidecar-injector.yaml | \
- ./install/kubernetes/webhook-patch-ca-bundle.sh > \
- install/kubernetes/istio-sidecar-injector-with-ca-bundle.yaml
-kubectl apply -f install/kubernetes/istio-sidecar-injector-with-ca-bundle.yaml
-kubectl -n istio-system get deployment -listio=sidecar-injector
+kubectl apply -f install/kubernetes/istio-demo.yaml
# Validate the installation
kubectl get svc -n istio-system
diff --git a/src/vagrant/kubeadm_kata/examples/nginx-app.sh b/src/vagrant/kubeadm_kata/examples/nginx-app.sh
index 96d776c..a66b7ca 100755
--- a/src/vagrant/kubeadm_kata/examples/nginx-app.sh
+++ b/src/vagrant/kubeadm_kata/examples/nginx-app.sh
@@ -20,6 +20,11 @@ kubectl get nodes
kubectl get services
kubectl get pods
kubectl get rc
-sleep 180
+r=0
+while [ "$r" -eq "0" ]
+do
+ sleep 30
+ r=$(kubectl get pods | grep Running | wc -l)
+done
svcip=$(kubectl get services nginx -o json | grep clusterIP | cut -f4 -d'"')
wget http://$svcip
diff --git a/src/vagrant/kubeadm_kata/examples/nginx-app.yaml b/src/vagrant/kubeadm_kata/examples/nginx-app.yaml
index f80881a..9de4ef4 100644
--- a/src/vagrant/kubeadm_kata/examples/nginx-app.yaml
+++ b/src/vagrant/kubeadm_kata/examples/nginx-app.yaml
@@ -23,6 +23,8 @@ spec:
metadata:
labels:
app: nginx
+ annotations:
+ io.kubernetes.cri.untrusted-workload: "true"
spec:
containers:
- name: nginx
diff --git a/src/vagrant/kubeadm_kata/host_setup.sh b/src/vagrant/kubeadm_kata/host_setup.sh
index d2af951..02bb296 100644
--- a/src/vagrant/kubeadm_kata/host_setup.sh
+++ b/src/vagrant/kubeadm_kata/host_setup.sh
@@ -30,10 +30,37 @@ cat <<EOF | sudo tee /etc/apt/sources.list.d/kubernetes.list
deb http://apt.kubernetes.io/ kubernetes-xenial main
EOF
sudo apt-get update
-sudo apt-get install -y kubelet kubeadm kubectl kubernetes-cni
+sudo apt-get install -y --allow-unauthenticated kubelet=1.10.5-00 kubeadm=1.10.5-00 kubectl=1.10.5-00 kubernetes-cni=0.6.0-00
+
sudo swapoff -a
sudo systemctl stop kubelet
sudo rm -rf /var/lib/kubelet
sudo systemctl daemon-reload
sudo systemctl start kubelet
+
+
+sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 5EDB1B62EC4926EA
+sudo apt-get update -y
+sudo apt-get install software-properties-common -y
+sudo apt-add-repository cloud-archive:queens -y
+sudo apt-get update -y
+
+#sudo apt-get build-dep dkms -y
+sudo apt-get install python-six openssl python-pip -y
+sudo -H pip install --upgrade pip
+sudo -H pip install ovs
+#sudo apt-get install openvswitch-datapath-dkms -y
+sudo apt-get install openvswitch-switch openvswitch-common -y
+sudo apt-get install ovn-central ovn-common ovn-host -y
+sudo modprobe vport-geneve
+
+wget https://storage.googleapis.com/golang/go1.8.3.linux-amd64.tar.gz
+sudo tar -xvf go1.8.3.linux-amd64.tar.gz -C /usr/local/
+mkdir -p $HOME/go/src
+export GOPATH=$HOME/go
+export PATH=$PATH:/usr/local/go/bin:$GOPATH/bin
+git clone https://github.com/openvswitch/ovn-kubernetes -b v0.3.0
+cd ovn-kubernetes/go-controller
+make
+sudo make install
diff --git a/src/vagrant/kubeadm_kata/kata_setup.sh b/src/vagrant/kubeadm_kata/kata_setup.sh
index c14d844..53a2bbf 100644
--- a/src/vagrant/kubeadm_kata/kata_setup.sh
+++ b/src/vagrant/kubeadm_kata/kata_setup.sh
@@ -17,27 +17,27 @@
set -ex
-cat << EOF | sudo tee /etc/apt/sources.list.d/cc-oci-runtime.list
-deb http://download.opensuse.org/repositories/home:/clearcontainers:/clear-containers-3/xUbuntu_16.04/ /
-EOF
-curl -fsSL http://download.opensuse.org/repositories/home:/clearcontainers:/clear-containers-3/xUbuntu_16.04/Release.key | sudo apt-key add -
-sudo apt-get update
-sudo apt-get install -y cc-oci-runtime
+sudo sh -c "echo 'deb http://download.opensuse.org/repositories/home:/katacontainers:/release/xUbuntu_$(lsb_release -rs)/ /' > /etc/apt/sources.list.d/kata-containers.list"
+curl -sL http://download.opensuse.org/repositories/home:/katacontainers:/release/xUbuntu_$(lsb_release -rs)/Release.key | sudo apt-key add -
+sudo -E apt-get update
+sudo -E apt-get -y install kata-runtime kata-proxy kata-shim
+sudo -E apt-get -y install libseccomp2
-echo | sudo add-apt-repository ppa:projectatomic/ppa
-sudo apt-get update
-sudo apt-get install -y cri-o
-sudo sed -i 's,runtime_untrusted_workload.*,runtime_untrusted_workload = "/usr/bin/cc-runtime",' /etc/crio/crio.conf
-sudo sed -i 's,cgroup_manager.*,cgroup_manager = "cgroupfs",' /etc/crio/crio.conf
-sudo sed -i 's,default_workload_trust.*,default_workload_trust = "untrusted",' /etc/crio/crio.conf
-sudo sed -i 's,^registries.*,registries = [ "docker.io",' /etc/crio/crio.conf
-sudo systemctl enable crio
-sudo systemctl daemon-reload
-sudo systemctl restart crio
+wget http://storage.googleapis.com/cri-containerd-release/cri-containerd-1.1.0.linux-amd64.tar.gz >& /dev/null
+sudo tar -C / -xzf cri-containerd-1.1.0.linux-amd64.tar.gz
+sudo systemctl start containerd
+sudo mkdir -p /opt/cni/bin
+sudo mkdir -p /etc/cni/net.d
+sudo mkdir -p /etc/containerd
+containerd config default | sudo tee /etc/containerd/config.toml
+sudo sed -i "/.*untrusted_workload_runtime.*/,+5s/runtime_type.*/runtime_type=\"io.containerd.runtime.v1.linux\"/" /etc/containerd/config.toml
+sudo sed -i "/.*untrusted_workload_runtime.*/,+5s/runtime_engine.*/runtime_engine=\"kata-runtime\"/" /etc/containerd/config.toml
+sudo systemctl restart containerd
+
+cat << EOF | sudo tee /etc/systemd/system/kubelet.service.d/0-containerd.conf
+[Service]
+Environment="KUBELET_EXTRA_ARGS=--container-runtime=remote --runtime-request-timeout=15m --container-runtime-endpoint=unix:///run/containerd/containerd.sock"
+EOF
-sudo systemctl stop kubelet
-echo "Modify kubelet systemd configuration to use CRI-O"
-k8s_systemd_file="/etc/systemd/system/kubelet.service.d/10-kubeadm.conf"
-sudo sed -i '/KUBELET_AUTHZ_ARGS/a Environment="KUBELET_EXTRA_ARGS=--container-runtime=remote --container-runtime-endpoint=/var/run/crio/crio.sock --runtime-request-timeout=30m"' "$k8s_systemd_file"
sudo systemctl daemon-reload
-sudo systemctl start kubelet
+sudo systemctl restart kubelet
diff --git a/src/vagrant/kubeadm_kata/master_setup.sh b/src/vagrant/kubeadm_kata/master_setup.sh
index 41dadf0..42b3aee 100644
--- a/src/vagrant/kubeadm_kata/master_setup.sh
+++ b/src/vagrant/kubeadm_kata/master_setup.sh
@@ -22,13 +22,6 @@ mkdir ~/.kube
sudo cp /etc/kubernetes/admin.conf .kube/config
sudo chown $(id -u):$(id -g) ~/.kube/config
-kubectl apply -f http://git.io/weave-kube-1.6
+nohup /usr/bin/kubectl proxy --address=0.0.0.0 --accept-hosts=.* --port=8080 & sleep 1
-r=1
-while [ "$r" -ne "0" ]
-do
- sleep 30
- r=$(kubectl get pods -n kube-system | grep weave-net | grep -v Run | wc -l)
-done
-
-sudo systemctl restart crio
+sudo ovnkube -k8s-kubeconfig /home/vagrant/.kube/config -net-controller -loglevel=4 -k8s-apiserver=http://192.168.1.10:8080 -logfile=/var/log/openvswitch/ovnkube.log -init-master=master -cluster-subnet=10.32.0.0/12 -service-cluster-ip-range=10.96.0.0/16 -nodeport -nb-address=tcp://192.168.1.10:6631 -sb-address=tcp://192.168.1.10:6632 &
diff --git a/src/vagrant/kubeadm_kata/worker_setup.sh b/src/vagrant/kubeadm_kata/worker_setup.sh
index 6145793..63d42a5 100644
--- a/src/vagrant/kubeadm_kata/worker_setup.sh
+++ b/src/vagrant/kubeadm_kata/worker_setup.sh
@@ -18,16 +18,23 @@
set -ex
sudo kubeadm join --discovery-token-unsafe-skip-ca-verification \
--token 8c5adc.1cec8dbf339093f0 192.168.1.10:6443 \
- --ignore-preflight-errors=SystemVerification,FileContent--proc-sys-net-bridge-bridge-nf-call-iptables
+ --ignore-preflight-errors=SystemVerification,CRI,FileContent--proc-sys-net-bridge-bridge-nf-call-iptables
sudo apt-get install -y putty-tools
mkdir ~/.kube
-r=1
-while [ "$r" -ne "0" ]
-do
- sleep 30
- echo "y\n" | plink -ssh -pw vagrant vagrant@master "cat ~/.kube/config" > ~/.kube/config || true
- r=$(kubectl get pods -n kube-system | grep weave-net | grep -v Run | wc -l)
-done
+echo "y\n" | plink -ssh -pw vagrant vagrant@master "cat ~/.kube/config" > ~/.kube/config || true
-sudo systemctl restart crio
+CENTRAL_IP=192.168.1.10
+NODE_NAME=$(hostname)
+TOKEN="8c5adc.1cec8dbf339093f0"
+
+sudo ovnkube -k8s-kubeconfig /home/vagrant/.kube/config -loglevel=4 \
+ -logfile="/var/log/openvswitch/ovnkube.log" \
+ -k8s-apiserver="http://$CENTRAL_IP:8080" \
+ -init-node="$NODE_NAME" \
+ -nodeport \
+ -nb-address="tcp://$CENTRAL_IP:6631" \
+ -sb-address="tcp://$CENTRAL_IP:6632" -k8s-token="$TOKEN" \
+ -init-gateways \
+ -service-cluster-ip-range=10.96.0.0/16 \
+ -cluster-subnet=10.32.0.0/12 &
diff --git a/src/vagrant/kubeadm_onap/Vagrantfile b/src/vagrant/kubeadm_onap/Vagrantfile
index fe24252..699f607 100644
--- a/src/vagrant/kubeadm_onap/Vagrantfile
+++ b/src/vagrant/kubeadm_onap/Vagrantfile
@@ -1,17 +1,17 @@
-$num_workers=1
+$num_workers=4
Vagrant.require_version ">= 1.8.6"
Vagrant.configure("2") do |config|
- config.vm.box = "yk0/ubuntu-xenial"
- config.vm.provision "shell", path: "host_setup.sh", privileged: false
+ config.vm.box = "ceph/ubuntu-xenial"
config.vm.define "master" do |config|
config.vm.hostname = "master"
+ config.vm.provision "shell", path: "host_setup.sh", privileged: false
config.vm.provision "shell", path: "master_setup.sh", privileged: false
config.vm.network :private_network, ip: "192.168.0.10"
config.vm.provider :libvirt do |libvirt|
- libvirt.memory = 4096
+ libvirt.memory = 8192
libvirt.cpus = 4
end
end
@@ -19,23 +19,14 @@ Vagrant.configure("2") do |config|
(1 .. $num_workers).each do |i|
config.vm.define vm_name = "worker%d" % [i] do |config|
config.vm.hostname = vm_name
+ config.vm.provision "shell", path: "host_setup.sh", privileged: false
config.vm.provision "shell", path: "worker_setup.sh", privileged: false
config.vm.network :private_network, ip: "192.168.0.#{i+20}"
config.vm.provider :libvirt do |libvirt|
- libvirt.memory = 81920
- libvirt.cpus = 32
+ libvirt.memory = 40960
+ libvirt.cpus = 16
end
end
end
- config.vm.define "onap" do |config|
- config.vm.hostname = "onap"
- config.vm.provision "shell", path: "onap_setup.sh", privileged: false
- config.vm.network :private_network, ip: "192.168.0.5"
- config.vm.provider :libvirt do |libvirt|
- libvirt.memory = 2048
- libvirt.cpus = 1
- end
- end
-
end
diff --git a/src/vagrant/kubeadm_onap/host_setup.sh b/src/vagrant/kubeadm_onap/host_setup.sh
index 64e1733..9cfd266 100755
--- a/src/vagrant/kubeadm_onap/host_setup.sh
+++ b/src/vagrant/kubeadm_onap/host_setup.sh
@@ -4,13 +4,15 @@ set -ex
cat << EOF | sudo tee /etc/hosts
127.0.0.1 localhost
-192.168.0.5 onap
192.168.0.10 master
192.168.0.21 worker1
192.168.0.22 worker2
192.168.0.23 worker3
+192.168.0.24 worker4
EOF
+sudo ifconfig eth1 mtu 1400
+
sudo apt-key adv --keyserver hkp://ha.pool.sks-keyservers.net:80 --recv-keys 58118E89F3A912897C070ADBF76221572C52609D
sudo apt-key adv -k 58118E89F3A912897C070ADBF76221572C52609D
cat << EOF | sudo tee /etc/apt/sources.list.d/docker.list
@@ -22,18 +24,17 @@ cat <<EOF | sudo tee /etc/apt/sources.list.d/kubernetes.list
deb http://apt.kubernetes.io/ kubernetes-xenial main
EOF
sudo apt-get update
-sudo apt-get install -y --allow-unauthenticated --allow-downgrades docker-engine=1.12.6-0~ubuntu-xenial kubelet=1.7.0-00 kubeadm=1.7.0-00 kubectl=1.7.0-00 kubernetes-cni=0.5.1-00
+sudo apt-get install -y --allow-unauthenticated --allow-downgrades docker-engine=1.12.6-0~ubuntu-xenial kubelet=1.9.1-00 kubeadm=1.9.1-00 kubectl=1.9.1-00 kubernetes-cni=0.6.0-00
-sudo systemctl stop docker
cat << EOF | sudo tee /etc/docker/daemon.json
{
- "storage-driver": "overlay"
+ "insecure-registries" : [ "nexus3.onap.org:10001" ]
}
EOF
sudo systemctl daemon-reload
-sudo systemctl start docker
+sudo systemctl restart docker
-sudo systemctl stop kubelet
-sudo rm -rf /var/lib/kubelet
+sudo swapoff -a
sudo systemctl daemon-reload
+sudo systemctl stop kubelet
sudo systemctl start kubelet
diff --git a/src/vagrant/kubeadm_onap/master_setup.sh b/src/vagrant/kubeadm_onap/master_setup.sh
index fa451a2..8840541 100755
--- a/src/vagrant/kubeadm_onap/master_setup.sh
+++ b/src/vagrant/kubeadm_onap/master_setup.sh
@@ -1,13 +1,28 @@
#!/bin/bash
-
set -ex
-sudo kubeadm init --apiserver-advertise-address=192.168.0.10 --service-cidr=10.96.0.0/24 --pod-network-cidr=10.32.0.0/12 --token 8c5adc.1cec8dbf339093f0
+sudo apt-get -y install ntp
+cat << EOF | sudo tee /etc/ntp.conf
+server 127.127.1.0
+fudge 127.127.1.0 stratum 10
+EOF
+sudo service ntp restart
+
+sudo apt install nfs-kernel-server -y
+sudo mkdir /dockerdata-nfs
+sudo chmod 777 /dockerdata-nfs
+cat << EOF | sudo tee /etc/exports
+/dockerdata-nfs *(rw,sync,no_subtree_check,no_root_squash)
+EOF
+sudo systemctl restart nfs-kernel-server.service
+
+sudo kubeadm init --apiserver-advertise-address=192.168.0.10 --service-cidr=10.96.0.0/16 --pod-network-cidr=10.244.0.0/16 --token 8c5adc.1cec8dbf339093f0
mkdir ~/.kube
-sudo cp /etc/kubernetes/admin.conf ~/.kube/config
-sudo chown $(id -u):$(id -g) ~/.kube/config
+sudo cp /etc/kubernetes/admin.conf $HOME/.kube/config
+sudo chown $(id -u):$(id -g) $HOME/.kube/config
+
+wget https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
+sed -i "s/kube-subnet-mgr/kube-subnet-mgr\n - --iface=eth1/" kube-flannel.yml
+kubectl apply -f kube-flannel.yml
-kubectl apply -f http://git.io/weave-kube-1.6
-curl https://raw.githubusercontent.com/kubernetes/helm/master/scripts/get | bash
-helm init
-kubectl create clusterrolebinding --user system:serviceaccount:kube-system:default kube-system-cluster-admin --clusterrole cluster-admin
+/vagrant/onap_setup.sh
diff --git a/src/vagrant/kubeadm_onap/onap_setup.sh b/src/vagrant/kubeadm_onap/onap_setup.sh
index 4dfe1e1..e4edd8f 100755
--- a/src/vagrant/kubeadm_onap/onap_setup.sh
+++ b/src/vagrant/kubeadm_onap/onap_setup.sh
@@ -2,42 +2,19 @@
set -ex
-sudo apt-get install -y putty-tools python-openstackclient
-mkdir ~/.kube
-r=0
-while [ "$r" == "0" ]
-do
- sleep 30
- echo "y\n" | plink -ssh -pw vagrant vagrant@master "cat ~/.kube/config" > ~/.kube/config || true
- r=$(kubectl get pods -n kube-system | grep "tiller-deploy.*Run" | wc -l)
-done
+kubectl create clusterrolebinding --user system:serviceaccount:kube-system:default kube-system-cluster-admin --clusterrole cluster-admin
+wget https://storage.googleapis.com/kubernetes-helm/helm-v2.8.2-linux-amd64.tar.gz
+tar xzvf helm-v2.8.2-linux-amd64.tar.gz
+sudo mv linux-amd64/helm /usr/local/bin/
+helm init
+helm serve &
+helm repo remove stable
+helm repo add local http://127.0.0.1:8879
-curl https://raw.githubusercontent.com/kubernetes/helm/master/scripts/get | bash
-git clone http://gerrit.onap.org/r/oom
-cd oom; git checkout amsterdam
-source /vagrant/openstack/openrc
-cat <<EOF | tee ~/oom/kubernetes/config/onap-parameters.yaml
-OPENSTACK_UBUNTU_14_IMAGE: "ubuntu1404"
-OPENSTACK_PUBLIC_NET_ID: "e8f51956-00dd-4425-af36-045716781ffc"
-OPENSTACK_OAM_NETWORK_ID: "d4769dfb-c9e4-4f72-b3d6-1d18f4ac4ee6"
-OPENSTACK_OAM_SUBNET_ID: "191f7580-acf6-4c2b-8ec0-ba7d99b3bc4e"
-OPENSTACK_OAM_NETWORK_CIDR: "10.0.0.0/16"
-OPENSTACK_USERNAME: "admin"
-OPENSTACK_API_KEY: "adim"
-OPENSTACK_TENANT_NAME: "admin"
-OPENSTACK_TENANT_ID: "47899782ed714295b1151681fdfd51f5"
-OPENSTACK_REGION: "RegionOne"
-OPENSTACK_KEYSTONE_URL: "http://192.168.0.30:5000/v2.0"
-OPENSTACK_FLAVOUR_MEDIUM: "m1.medium"
-OPENSTACK_SERVICE_TENANT_NAME: "service"
-DMAAP_TOPIC: "AUTO"
-DEMO_ARTIFACTS_VERSION: "1.1.0-SNAPSHOT"
-EOF
-cd ~/oom/kubernetes/oneclick && ./deleteAll.bash -n onap || true
-(kubectl delete ns onap; helm del --purge onap-config) || true
-echo "y\n" | plink -ssh -pw vagrant vagrant@worker1 "sudo rm -rf /dockerdata-nfs/onap"
-cd ~/oom/kubernetes/config && ./createConfig.sh -n onap
-while true; do sleep 30; kubectl get pods --all-namespaces | grep onap | wc -l | grep "^0$" && break; done
-source ~/oom/kubernetes/oneclick/setenv.bash
-sed -i "s/aaiServiceClusterIp:.*/aaiServiceClusterIp: 10.96.0.254/" ~/oom/kubernetes/aai/values.yaml
-cd ~/oom/kubernetes/oneclick && ./createAll.bash -n onap
+git clone -b beijing http://gerrit.onap.org/r/oom
+cd oom/kubernetes
+
+sudo apt-get install make -y
+make all
+sleep 300
+helm install local/onap -n dev --namespace onap
diff --git a/src/vagrant/kubeadm_onap/registry_setup.sh b/src/vagrant/kubeadm_onap/registry_setup.sh
new file mode 100644
index 0000000..669268b
--- /dev/null
+++ b/src/vagrant/kubeadm_onap/registry_setup.sh
@@ -0,0 +1,30 @@
+#!/bin/bash
+set -ex
+
+sudo apt-get update -y
+sudo apt install -y jq docker.io
+
+NEXUS_REPO=nexus3.onap.org:10001
+LOCAL_REPO=192.168.0.2:5000
+
+cat << EOF | sudo tee /etc/docker/daemon.json
+{
+ "insecure-registries" : [ "$LOCAL_REPO" ]
+}
+EOF
+sudo systemctl daemon-reload
+sudo systemctl restart docker
+
+sudo docker run -d -p 5000:5000 --restart=always --name registry registry:2
+
+dockers=$(curl -X GET https://$NEXUS_REPO/v2/_catalog | jq -r ".repositories[]")
+for d in $dockers
+do
+ tags=$(curl -X GET https://$NEXUS_REPO/v2/$d/tags/list | jq -r ".tags[]")
+ for t in $tags
+ do
+ sudo docker pull $NEXUS_REPO/$d:$t
+ sudo docker tag $NEXUS_REPO/$d:$t $LOCAL_REPO/$d:$t
+ sudo docker push $LOCAL_REPO/$d:$t
+ done
+done
diff --git a/src/vagrant/kubeadm_onap/setup_swap.sh b/src/vagrant/kubeadm_onap/setup_swap.sh
new file mode 100644
index 0000000..c2432b7
--- /dev/null
+++ b/src/vagrant/kubeadm_onap/setup_swap.sh
@@ -0,0 +1,5 @@
+sudo swapoff -a
+sudo fallocate -l 50G /swapfile
+sudo mkswap /swapfile
+sudo swapon /swapfile
+sudo swapon --show
diff --git a/src/vagrant/kubeadm_onap/setup_tunnel.sh b/src/vagrant/kubeadm_onap/setup_tunnel.sh
new file mode 100644
index 0000000..3a6ef75
--- /dev/null
+++ b/src/vagrant/kubeadm_onap/setup_tunnel.sh
@@ -0,0 +1,3 @@
+sudo ip link add tunnel0 type gretap local <local> remote <remote>
+sudo ifconfig tunnel0 up
+sudo brctl addif <br> tunnel0
diff --git a/src/vagrant/kubeadm_onap/worker_setup.sh b/src/vagrant/kubeadm_onap/worker_setup.sh
index aa60df3..e65a65c 100755
--- a/src/vagrant/kubeadm_onap/worker_setup.sh
+++ b/src/vagrant/kubeadm_onap/worker_setup.sh
@@ -1,11 +1,15 @@
#!/bin/bash
-
set -ex
-sudo mkdir /dockerdata-nfs
-sudo chmod 755 /dockerdata-nfs
-sudo kubeadm join --token 8c5adc.1cec8dbf339093f0 192.168.0.10:6443 || true
+sudo apt-get -y install ntp
+cat << EOF | sudo tee /etc/ntp.conf
+pool master
+EOF
+sudo service ntp restart
-sudo apt-get install -y putty-tools
-mkdir ~/.kube
-echo "y\n" | plink -ssh -pw vagrant vagrant@master "cat ~/.kube/config" > ~/.kube/config
+sudo kubeadm join --discovery-token-unsafe-skip-ca-verification --token 8c5adc.1cec8dbf339093f0 192.168.0.10:6443 || true
+
+sudo apt-get install nfs-common -y
+sudo mkdir /dockerdata-nfs
+sudo chmod 777 /dockerdata-nfs
+sudo mount master:/dockerdata-nfs /dockerdata-nfs
diff --git a/src/vagrant/kubeadm_snort/Vagrantfile b/src/vagrant/kubeadm_snort/Vagrantfile
new file mode 100644
index 0000000..9320074
--- /dev/null
+++ b/src/vagrant/kubeadm_snort/Vagrantfile
@@ -0,0 +1,29 @@
+$num_workers=2
+
+Vagrant.require_version ">= 1.8.6"
+Vagrant.configure("2") do |config|
+
+ config.vm.box = "ceph/ubuntu-xenial"
+ config.vm.provider :libvirt do |libvirt|
+ libvirt.memory = 4096
+ libvirt.cpus = 4
+ end
+
+ config.vm.synced_folder "../..", "/src"
+ config.vm.provision "shell", path: "host_setup.sh", privileged: false
+
+ config.vm.define "master" do |config|
+ config.vm.hostname = "master"
+ config.vm.provision "shell", path: "master_setup.sh", privileged: false
+ config.vm.network :private_network, ip: "192.168.1.10"
+ end
+
+ (1 .. $num_workers).each do |i|
+ config.vm.define vm_name = "worker%d" % [i] do |config|
+ config.vm.hostname = vm_name
+ config.vm.provision "shell", path: "worker_setup.sh", privileged: false
+ config.vm.network :private_network, ip: "192.168.1.#{i+20}"
+ end
+ end
+
+end
diff --git a/src/vagrant/kubeadm_snort/deploy.sh b/src/vagrant/kubeadm_snort/deploy.sh
new file mode 100755
index 0000000..e1e16d6
--- /dev/null
+++ b/src/vagrant/kubeadm_snort/deploy.sh
@@ -0,0 +1,9 @@
+#!/bin/bash
+
+set -ex
+DIR="$(dirname `readlink -f $0`)"
+
+cd $DIR
+../cleanup.sh
+vagrant up
+vagrant ssh master -c "/vagrant/snort/snort-setup.sh"
diff --git a/src/vagrant/kubeadm_snort/host_setup.sh b/src/vagrant/kubeadm_snort/host_setup.sh
new file mode 100644
index 0000000..524a967
--- /dev/null
+++ b/src/vagrant/kubeadm_snort/host_setup.sh
@@ -0,0 +1,29 @@
+#!/bin/bash
+
+set -ex
+
+cat << EOF | sudo tee /etc/hosts
+127.0.0.1 localhost
+192.168.1.10 master
+192.168.1.21 worker1
+192.168.1.22 worker2
+192.168.1.23 worker3
+EOF
+
+sudo apt-key adv --keyserver hkp://ha.pool.sks-keyservers.net:80 --recv-keys 58118E89F3A912897C070ADBF76221572C52609D
+sudo apt-key adv -k 58118E89F3A912897C070ADBF76221572C52609D
+cat << EOF | sudo tee /etc/apt/sources.list.d/docker.list
+deb [arch=amd64] https://apt.dockerproject.org/repo ubuntu-xenial main
+EOF
+
+curl -s http://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -
+cat <<EOF | sudo tee /etc/apt/sources.list.d/kubernetes.list
+deb http://apt.kubernetes.io/ kubernetes-xenial main
+EOF
+sudo apt-get update
+sudo apt-get install -y --allow-unauthenticated --allow-downgrades docker-engine=1.12.6-0~ubuntu-xenial kubelet=1.9.1-00 kubeadm=1.9.1-00 kubectl=1.9.1-00 kubernetes-cni=0.6.0-00
+
+sudo swapoff -a
+sudo systemctl daemon-reload
+sudo systemctl stop kubelet
+sudo systemctl start kubelet
diff --git a/src/vagrant/kubeadm_snort/master_setup.sh b/src/vagrant/kubeadm_snort/master_setup.sh
new file mode 100644
index 0000000..972768f
--- /dev/null
+++ b/src/vagrant/kubeadm_snort/master_setup.sh
@@ -0,0 +1,10 @@
+#!/bin/bash
+
+set -ex
+
+sudo kubeadm init --apiserver-advertise-address=192.168.1.10 --service-cidr=10.96.0.0/16 --pod-network-cidr=10.32.0.0/12 --token 8c5adc.1cec8dbf339093f0
+mkdir ~/.kube
+sudo cp /etc/kubernetes/admin.conf $HOME/.kube/config
+sudo chown $(id -u):$(id -g) $HOME/.kube/config
+
+kubectl apply -f https://raw.githubusercontent.com/weaveworks/weave/master/prog/weave-kube/weave-daemonset-k8s-1.6.yaml
diff --git a/src/vagrant/kubeadm_snort/snort/snort-setup.sh b/src/vagrant/kubeadm_snort/snort/snort-setup.sh
new file mode 100755
index 0000000..08ae663
--- /dev/null
+++ b/src/vagrant/kubeadm_snort/snort/snort-setup.sh
@@ -0,0 +1,31 @@
+#!/bin/bash
+#
+# Copyright (c) 2017 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+set -ex
+
+kubectl create -f /vagrant/snort/snort.yaml
+kubectl get nodes
+kubectl get services
+kubectl get pods
+kubectl get rc
+
+r="0"
+while [ $r -ne "2" ]
+do
+ r=$(kubectl get pods | grep Running | wc -l)
+ sleep 60
+done
diff --git a/src/vagrant/kubeadm_snort/snort/snort.yaml b/src/vagrant/kubeadm_snort/snort/snort.yaml
new file mode 100644
index 0000000..60dede2
--- /dev/null
+++ b/src/vagrant/kubeadm_snort/snort/snort.yaml
@@ -0,0 +1,32 @@
+apiVersion: v1
+kind: Service
+metadata:
+ name: snort-service
+ labels:
+ app: snort
+spec:
+ type: NodePort
+ ports:
+ - port: 80
+ protocol: TCP
+ name: http
+ selector:
+ app: snort
+---
+apiVersion: v1
+kind: ReplicationController
+metadata:
+ name: snort-pod
+spec:
+ replicas: 2
+ template:
+ metadata:
+ labels:
+ app: snort
+ spec:
+ containers:
+ - name: snort
+ image: frapsoft/snort
+ args: ["-v"]
+ ports:
+ - containerPort: 80
diff --git a/src/vagrant/kubeadm_snort/worker_setup.sh b/src/vagrant/kubeadm_snort/worker_setup.sh
new file mode 100644
index 0000000..74e4178
--- /dev/null
+++ b/src/vagrant/kubeadm_snort/worker_setup.sh
@@ -0,0 +1,4 @@
+#!/bin/bash
+
+set -ex
+sudo kubeadm join --discovery-token-unsafe-skip-ca-verification --token 8c5adc.1cec8dbf339093f0 192.168.1.10:6443 || true