diff options
-rw-r--r-- | docs/arm/files/kube-2flannels.yml | 154 | ||||
-rw-r--r-- | docs/arm/images/multi_flannel_intfs.PNG | bin | 0 -> 102479 bytes | |||
-rw-r--r-- | docs/arm/index.rst | 1 | ||||
-rw-r--r-- | docs/arm/multi_flannel_intfs_deployment.rst | 298 | ||||
-rw-r--r-- | src/vagrant/kubeadm_onap/Vagrantfile | 41 | ||||
-rwxr-xr-x | src/vagrant/kubeadm_onap/host_setup.sh | 39 | ||||
-rwxr-xr-x | src/vagrant/kubeadm_onap/master_setup.sh | 13 | ||||
-rwxr-xr-x | src/vagrant/kubeadm_onap/onap_setup.sh | 42 | ||||
-rw-r--r-- | src/vagrant/kubeadm_onap/openstack/Vagrantfile | 37 | ||||
-rw-r--r-- | src/vagrant/kubeadm_onap/openstack/bootstrap.sh | 14 | ||||
-rw-r--r-- | src/vagrant/kubeadm_onap/openstack/compute.conf | 20 | ||||
-rw-r--r-- | src/vagrant/kubeadm_onap/openstack/control.conf | 40 | ||||
-rw-r--r-- | src/vagrant/kubeadm_onap/openstack/create_vm.sh | 5 | ||||
-rw-r--r-- | src/vagrant/kubeadm_onap/openstack/openrc | 4 | ||||
-rw-r--r-- | src/vagrant/kubeadm_onap/openstack/setup_cell.sh | 6 | ||||
-rw-r--r-- | src/vagrant/kubeadm_onap/openstack/setup_compute.sh | 19 | ||||
-rw-r--r-- | src/vagrant/kubeadm_onap/openstack/setup_control.sh | 40 | ||||
-rwxr-xr-x | src/vagrant/kubeadm_onap/worker_setup.sh | 11 |
18 files changed, 784 insertions, 0 deletions
diff --git a/docs/arm/files/kube-2flannels.yml b/docs/arm/files/kube-2flannels.yml new file mode 100644 index 0000000..4d73be1 --- /dev/null +++ b/docs/arm/files/kube-2flannels.yml @@ -0,0 +1,154 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: flannel + namespace: kube-system +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: kube-flannel-cfg + namespace: kube-system + labels: + tier: node + app: flannel +data: + cni-conf.json: | + { + "name": "flannel-networks", + "type": "multus", + "delegates": [ + { + "type": "flannel", + "name": "flannel.2", + "subnetFile": "/run/flannel/subnet2.env", + "dataDir": "/var/lib/cni/flannel/2", + "delegate": { + "bridge": "kbr1", + "isDefaultGateway": false + } + }, + { + "type": "flannel", + "name": "flannel.1", + "subnetFile": "/run/flannel/subnet.env", + "dataDir": "/var/lib/cni/flannel", + "masterplugin": true, + "delegate": { + "bridge": "kbr0", + "isDefaultGateway": true + } + } + ] + } + net-conf.json: | + { + "Network": "10.1.0.0/16", + "Backend": { + "Type": "udp", + "Port": 8285 + } + } +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: kube-flannel2-cfg + namespace: kube-system + labels: + tier: node + app: flannel2 +data: + net-conf.json: | + { + "Network": "10.3.0.0/16", + "Backend": { + "Type": "udp", + "Port": 8286 + } + } +--- +apiVersion: extensions/v1beta1 +kind: DaemonSet +metadata: + name: kube-flannel-ds + namespace: kube-system + labels: + tier: node + app: flannel +spec: + template: + metadata: + labels: + tier: node + app: flannel + spec: + hostNetwork: true + nodeSelector: + beta.kubernetes.io/arch: arm64 + tolerations: + - key: node-role.kubernetes.io/master + operator: Exists + effect: NoSchedule + serviceAccountName: flannel + containers: + - name: kube-flannel + image: quay.io/coreos/flannel:v0.8.0-arm64 + command: [ "/opt/bin/flanneld", "--ip-masq", "--kube-subnet-mgr" ] + securityContext: + privileged: true + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + volumeMounts: + - name: run + mountPath: /run + - name: flannel-cfg + mountPath: /etc/kube-flannel/ + - name: kube-flannel2 + image: quay.io/coreos/flannel:v0.8.0-arm64 + command: [ "/opt/bin/flanneld", "--ip-masq", "--kube-subnet-mgr", "--subnet-file=/run/flannel/subnet2.env" ] + securityContext: + privileged: true + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + volumeMounts: + - name: run + mountPath: /run + - name: flannel-cfg2 + mountPath: /etc/kube-flannel/ + - name: install-cni + image: quay.io/coreos/flannel:v0.8.0-arm64 + command: [ "/bin/sh", "-c", "set -e -x; cp -f /etc/kube-flannel/cni-conf.json /etc/cni/net.d/10-2flannels.conf; while true; do sleep 3600; done" ] + volumeMounts: + - name: cni + mountPath: /etc/cni/net.d + - name: flannel-cfg + mountPath: /etc/kube-flannel/ + volumes: + - name: run + hostPath: + path: /run + - name: cni + hostPath: + path: /etc/cni/net.d + - name: flannel-cfg + configMap: + name: kube-flannel-cfg + - name: flannel-cfg2 + configMap: + name: kube-flannel2-cfg diff --git a/docs/arm/images/multi_flannel_intfs.PNG b/docs/arm/images/multi_flannel_intfs.PNG Binary files differnew file mode 100644 index 0000000..4fcf667 --- /dev/null +++ b/docs/arm/images/multi_flannel_intfs.PNG diff --git a/docs/arm/index.rst b/docs/arm/index.rst index beeec0f..eebea47 100644 --- a/docs/arm/index.rst +++ b/docs/arm/index.rst @@ -22,3 +22,4 @@ Container4NFV on Arm :numbered: container4nfv_on_arm + multi_flannel_intfs_deployment diff --git a/docs/arm/multi_flannel_intfs_deployment.rst b/docs/arm/multi_flannel_intfs_deployment.rst new file mode 100644 index 0000000..07c8ad7 --- /dev/null +++ b/docs/arm/multi_flannel_intfs_deployment.rst @@ -0,0 +1,298 @@ +.. This work is licensed under a Creative Commons Attribution 4.0 International +.. License. +.. http://creativecommons.org/licenses/by/4.0 +.. (c) OPNFV, Arm Limited. + + + +======================================================================= +Multiple Flannel Interfaces Deployment for Kubernetes Pod on Arm server +======================================================================= + +Abstract +======== + +This document gives a brief introduction on how to deploy multiple Flannel interfaces for Kubernetes +Pod on Arm server. +For simplicity, here the word 'multiple' is for '2' which can be easily extended to more with the methods +introduced in this document. +Besides Arm server, most of the deployment steps talked in the document can be used on other platforms. + + +Introduction +============ + +.. _Flannel: https://github.com/coreos/flannel +.. _Multus: https://github.com/Intel-Corp/multus-cni +.. _Compass: https://wiki.opnfv.org/display/compass4nfv/Compass4nfv +.. _arm64: https://github.com/kubernetes/website/pull/6511 +.. _files: https://github.com/kubernetes/website/pull/6511/files + + +As we know, in some cases we need to deploy multiple network interfaces for a single container. For example, +one interface is used for external data access, the other for internal data access. Originally, +only one interface can be deployed for a separate type of Container Networking Interface(CNI). With the help +of Multus_ CNI, multile CNIs can be driven with an integrated configution. + +Flannel_ is a widely used solution to configure a layer 3 network fabric designed for Kubernetes. We would +introduce the methods of how to deploy multiple Flannel network interfaces for Kubernetes pod from the +following points: +1. Use Case Architecture +2. Flannel Configuration +3. CNI Configuration +4. Etcd Based Configuration(Optional) +5. Contacts + +For project Container4nfv, Arm's favorite installer for Kubernetes clusters is mainly Compass_ now. Arm would try +to give out a typical deployment scenario with Kubernetes networked with 2 flannel interfaces. + + +Use Case Architecture +===================== + +Arm gives out a document on how to deploy Kubernetes on arm64_ platform to the Kubernetes community +which uses Flannel as the networking backend. The related Flannel deployment files_ use Flannel docker +image to start the Flannel service. + +.. image:: images/multi_flannel_intfs.PNG + :alt: 2 Flannel interfaces deployment scenario + :figclass: align-center + + Fig 1. Multiple Flannel interfaces deployment architecture + +.. _Etcd: https://coreos.com/etcd/ + +Figure 1 gives out a typical use case for Kubernetes pod configured with 2 Flannel interfaces. +For this use case, we have to start 2 flanneld processes, each flanneld process listens on a different UDP +port and serves for a different subnet. With the help of Multus_ CNI, the Flannel CNI would be called twice +to attach 2 Linux veth devices to a pod. + +Flanneld has 2 ways to get the network backend configuration data: one is from Kubernetes directly by setting +the option "--kube-subnet-mgr", in this way as it simply reads the configuration from the file +"/etc/kube-flannel/net-conf.json"; the other way is from the backend datastore Etcd_ which is the default. + +In this document, we would mainly give the Flanneld configuration in the 1st way. Anybody interested +in the 2nd way can refer to the section "Etcd Based Configuration". + + +Flannel Configuration +===================== + +Refer to the Kubernetes installation guide on arm64_, the Flanneld is installed as a Kubernetes DaemonSet in the +kube-flannel.yml. Here we give a revised version of this yaml file to start 2 Flannel containers: + +.. include:: files/kube-2flannels.yml + :literal: + + kube-2flannels.yml + + +ConfigMap Added +--------------- + +To start the 2nd Flannel container process, we add a new ConfigMap named kube-flannel2-cfg which +includes a new net-conf.json from the 1st: + +:: + net-conf.json: | + { + "Network": "10.3.0.0/16", + "Backend": { + "Type": "udp", + "Port": 8286 + } + } + + +2nd Flannel Container Added +--------------------------- + +The default Flanneld's UDP listen port is 8285, we set the 2nd Flanneld to listen to port 8286 and a new subnet. + +For the 2nd Flannel container, we use the command as: + +:: + - name: kube-flannel2 + image: quay.io/coreos/flannel:v0.8.0-arm64 + command: [ "/opt/bin/flanneld", "--ip-masq", "--kube-subnet-mgr", "--subnet-file=/run/flannel/subnet2.env" ] + +which outputs the subnet file to /run/flannel/subnet2.env for the 2nd Flannel CNI to use. + +And mount the 2nd Flannel ConfigMap to /etc/kube-flannel/ for the 2nd Flanneld container process: + +:: + volumeMounts: + - name: run + mountPath: /run + - name: flannel-cfg2 + mountPath: /etc/kube-flannel/ + + +CNI Configuration +================= + +.. _Multus: https://github.com/Intel-Corp/multus-cni + +To deploy 2 Flannel interfaces, besides the Flannel CNI, we need to deploy the Multus_. The build process of it is as: + +:: + git clone https://github.com/Intel-Corp/multus-cni.git + cd multus-cni + ./build + cp bin/multus /opt/cni/bin + +To use the Multus_ CNI, we should put the Multus CNI binary to /opt/cni/bin/ where the Flannel CNI and other +CNIs are put. + +The following CNI configuration sample for 2 Flannel interfaces is located in /etc/cni/net.d/, here we name it +as 10-2flannels.conf: + +:: + { + "name": "flannel-networks", + "type": "multus", + "delegates": [ + { + "type": "flannel", + "name": "flannel.2", + "subnetFile": "/run/flannel/subnet2.env", + "dataDir": "/var/lib/cni/flannel/2", + "delegate": { + "bridge": "kbr1", + "isDefaultGateway": false + } + }, + { + "type": "flannel", + "name": "flannel.1", + "subnetFile": "/run/flannel/subnet.env", + "dataDir": "/var/lib/cni/flannel", + "masterplugin": true, + "delegate": { + "bridge": "kbr0", + "isDefaultGateway": true + } + } + ] + } + +For the 2nd Flannel CNI, it will use the subnet file /run/flannel/subnet2.env instead of the default /run/flannel/subnet.env, +which is generated by the 2nd Flanneld process, and the subnet data would be output to the directory: +/var/lib/cni/flannel/2 + +Here we set the 1st Flannel interface as the default gateway to route traffic to outside world. + + +Etcd Based Configuration(Optional) +================================== + +.. _cluster: https://coreos.com/etcd/docs/latest/v2/clustering.html +.. _etcdctl: https://coreos.com/etcd/docs/latest/dev-guide/interacting_v3.html + +Etcd_ is an open-source distributed key value store that provides shared configuration and service discovery. +It can be run as a separate process or as as a cluster_. +For arm64 support, the environment variable or etcd option "ETCD_UNSUPPORTED_ARCH=arm64" should be set before +running the etcd process. + +We use etcdctl_ tool to set the etcd key-value used for Flannel backend configuration. + +For the 1st network, the etcd prefix is '/coreos.com/network/config', and set the subnet to "10.1.0.0/16": + +:: + etcdctl set /coreos.com/network/config '{ "Network": "10.1.0.0/16", "Backend": {"Type":"udp", "Port":8285}}' + +or: + +:: + etcdctl set /coreos.com/network/config '{ "Network": "10.1.0.0/16"}' + + +For the 2nd network, the etcd prefix is '/coreos.com/network2/config', and set the subnet to "10.3.0.0/16" with +UDP port 8286: + +:: + etcdctl set /coreos.com/network2/config '{ "Network": "10.3.0.0/16", "Backend": {"Type":"udp", "Port":8286}}' + +We can show the configuration by: + +:: + etcdctl get /coreos.com/network/config + etcdctl get /coreos.com/network2/config + + +Flanneld Configuration based on Etcd +------------------------------------ + +Refer to the Kubernetes installation guide on arm64_, the Flanneld is installed as a Kubernetes DaemonSet in the +kube-flannel.yml. For Flanneld to use the etcd backend, we could change the container start command to use etcd +backend: + +:: + ... + containers: + - name: kube-flannel + image: quay.io/coreos/flannel:v0.8.0-arm64 + command: [ "/opt/bin/flanneld", "--ip-masq", "--etcd-endpoints=http://ETCD_CLUSTER_IP1:2379", "--etcd-prefix=/coreos.com/network" ] + securityContext: + privileged: true + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + volumeMounts: + - name: run + mountPath: /run + - name: flannel-cfg + mountPath: /etc/kube-flannel/ + +Here as we don't use the "--kube-subnet-mgr" option, the last 2 lines of + +:: + - name: flannel-cfg + mountPath: /etc/kube-flannel/ + +can be ignored. + +To start the 2nd Flanneld process, we can add the 2nd Flanneld container section to kube-flannel.yml just below +the 1st Flanneld container: + +:: + containers: + - name: kube-flannel2 + image: quay.io/coreos/flannel:v0.8.0-arm64 + command: [ "/opt/bin/flanneld", "--ip-masq", "--etcd-endpoints=http://ETCD_CLUSTER_IP1:2379", "--etcd-prefix=/coreos.com/network2", "--subnet-file=/run/flannel/subnet2.env" ] + securityContext: + privileged: true + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + volumeMounts: + - name: run + mountPath: /run + +The option "-subnet-file" for the 2nd Flanneld is to output a subnet file for the 2nd Flannel subnet configuration +of the Flannel CNI which is called by Multus CNI. + + +Contacts +======== + +Trevor Tao(Zijin Tao), Yibo Cai, Di Xu, Bin Lu, Song Zhu and Kaly Xin from Arm have made contributions to this document. + +Trevor Tao: trevor.tao@arm.com +Yibo Cai: yibo.cai@arm.com +Di Xu: di.xu@arm.com +Bin Lu: bin.lu@arm.com +Song Zhu: song.zhu@arm.com +Kaly xin: kaly.xin@arm.com diff --git a/src/vagrant/kubeadm_onap/Vagrantfile b/src/vagrant/kubeadm_onap/Vagrantfile new file mode 100644 index 0000000..fe24252 --- /dev/null +++ b/src/vagrant/kubeadm_onap/Vagrantfile @@ -0,0 +1,41 @@ +$num_workers=1 + +Vagrant.require_version ">= 1.8.6" +Vagrant.configure("2") do |config| + + config.vm.box = "yk0/ubuntu-xenial" + config.vm.provision "shell", path: "host_setup.sh", privileged: false + + config.vm.define "master" do |config| + config.vm.hostname = "master" + config.vm.provision "shell", path: "master_setup.sh", privileged: false + config.vm.network :private_network, ip: "192.168.0.10" + config.vm.provider :libvirt do |libvirt| + libvirt.memory = 4096 + libvirt.cpus = 4 + end + end + + (1 .. $num_workers).each do |i| + config.vm.define vm_name = "worker%d" % [i] do |config| + config.vm.hostname = vm_name + config.vm.provision "shell", path: "worker_setup.sh", privileged: false + config.vm.network :private_network, ip: "192.168.0.#{i+20}" + config.vm.provider :libvirt do |libvirt| + libvirt.memory = 81920 + libvirt.cpus = 32 + end + end + end + + config.vm.define "onap" do |config| + config.vm.hostname = "onap" + config.vm.provision "shell", path: "onap_setup.sh", privileged: false + config.vm.network :private_network, ip: "192.168.0.5" + config.vm.provider :libvirt do |libvirt| + libvirt.memory = 2048 + libvirt.cpus = 1 + end + end + +end diff --git a/src/vagrant/kubeadm_onap/host_setup.sh b/src/vagrant/kubeadm_onap/host_setup.sh new file mode 100755 index 0000000..87b0062 --- /dev/null +++ b/src/vagrant/kubeadm_onap/host_setup.sh @@ -0,0 +1,39 @@ +#!/bin/bash + +set -ex + +cat << EOF | sudo tee /etc/hosts +127.0.0.1 localhost +192.168.0.5 onap +192.168.0.10 master +192.168.0.21 worker1 +192.168.0.22 worker2 +192.168.0.23 worker3 +EOF + +sudo apt-key adv --keyserver hkp://ha.pool.sks-keyservers.net:80 --recv-keys 58118E89F3A912897C070ADBF76221572C52609D +sudo apt-key adv -k 58118E89F3A912897C070ADBF76221572C52609D +cat << EOF | sudo tee /etc/apt/sources.list.d/docker.list +deb [arch=amd64] https://apt.dockerproject.org/repo ubuntu-xenial main +EOF + +curl -s http://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add - +cat <<EOF | sudo tee /etc/apt/sources.list.d/kubernetes.list +deb http://apt.kubernetes.io/ kubernetes-xenial main +EOF +sudo apt-get update +sudo apt-get install -y --allow-downgrades docker-engine=1.12.6-0~ubuntu-xenial kubelet=1.7.0-00 kubeadm=1.7.0-00 kubectl=1.7.0-00 kubernetes-cni=0.5.1-00 + +sudo systemctl stop docker +cat << EOF | sudo tee /etc/docker/daemon.json +{ + "storage-driver": "overlay" +} +EOF +sudo systemctl daemon-reload +sudo systemctl start docker + +sudo systemctl stop kubelet +sudo rm -rf /var/lib/kubelet +sudo systemctl daemon-reload +sudo systemctl start kubelet diff --git a/src/vagrant/kubeadm_onap/master_setup.sh b/src/vagrant/kubeadm_onap/master_setup.sh new file mode 100755 index 0000000..fa451a2 --- /dev/null +++ b/src/vagrant/kubeadm_onap/master_setup.sh @@ -0,0 +1,13 @@ +#!/bin/bash + +set -ex + +sudo kubeadm init --apiserver-advertise-address=192.168.0.10 --service-cidr=10.96.0.0/24 --pod-network-cidr=10.32.0.0/12 --token 8c5adc.1cec8dbf339093f0 +mkdir ~/.kube +sudo cp /etc/kubernetes/admin.conf ~/.kube/config +sudo chown $(id -u):$(id -g) ~/.kube/config + +kubectl apply -f http://git.io/weave-kube-1.6 +curl https://raw.githubusercontent.com/kubernetes/helm/master/scripts/get | bash +helm init +kubectl create clusterrolebinding --user system:serviceaccount:kube-system:default kube-system-cluster-admin --clusterrole cluster-admin diff --git a/src/vagrant/kubeadm_onap/onap_setup.sh b/src/vagrant/kubeadm_onap/onap_setup.sh new file mode 100755 index 0000000..3e1d9b4 --- /dev/null +++ b/src/vagrant/kubeadm_onap/onap_setup.sh @@ -0,0 +1,42 @@ +#!/bin/bash + +set -ex + +sudo apt-get install -y putty-tools python-openstackclient +mkdir ~/.kube +r=0 +while [ "$r" == "0" ] +do + sleep 30 + echo "y\n" | plink -ssh -pw vagrant vagrant@master "cat ~/.kube/config" > ~/.kube/config || true + r=$(kubectl get pods -n kube-system | grep "tiller-deploy.*Run" | wc -l) +done + +curl https://raw.githubusercontent.com/kubernetes/helm/master/scripts/get | bash +git clone http://gerrit.onap.org/r/oom +cd oom; git checkout release-1.1.0 +source /vagrant/openstack/openrc +cat <<EOF | tee ~/oom/kubernetes/config/onap-parameters.yaml +OPENSTACK_UBUNTU_14_IMAGE: "ubuntu1404" +OPENSTACK_PUBLIC_NET_ID: "e8f51956-00dd-4425-af36-045716781ffc" +OPENSTACK_OAM_NETWORK_ID: "d4769dfb-c9e4-4f72-b3d6-1d18f4ac4ee6" +OPENSTACK_OAM_SUBNET_ID: "191f7580-acf6-4c2b-8ec0-ba7d99b3bc4e" +OPENSTACK_OAM_NETWORK_CIDR: "10.0.0.0/16" +OPENSTACK_USERNAME: "admin" +OPENSTACK_API_KEY: "adim" +OPENSTACK_TENANT_NAME: "admin" +OPENSTACK_TENANT_ID: "47899782ed714295b1151681fdfd51f5" +OPENSTACK_REGION: "RegionOne" +OPENSTACK_KEYSTONE_URL: "http://192.168.0.30:5000/v2.0" +OPENSTACK_FLAVOUR_MEDIUM: "m1.medium" +OPENSTACK_SERVICE_TENANT_NAME: "service" +DMAAP_TOPIC: "AUTO" +DEMO_ARTIFACTS_VERSION: "1.1.0-SNAPSHOT" +EOF +cd ~/oom/kubernetes/oneclick && ./deleteAll.bash -n onap || true +(kubectl delete ns onap; helm del --purge onap-config) || true +echo "y\n" | plink -ssh -pw vagrant vagrant@worker1 "sudo rm -rf /dockerdata-nfs/onap" +cd ~/oom/kubernetes/config && ./createConfig.sh -n onap +while true; do sleep 30; kubectl get pods --all-namespaces | grep onap | wc -l | grep "^0$" && break; done +source ~/oom/kubernetes/oneclick/setenv.bash +cd ~/oom/kubernetes/oneclick && ./createAll.bash -n onap diff --git a/src/vagrant/kubeadm_onap/openstack/Vagrantfile b/src/vagrant/kubeadm_onap/openstack/Vagrantfile new file mode 100644 index 0000000..f2a806d --- /dev/null +++ b/src/vagrant/kubeadm_onap/openstack/Vagrantfile @@ -0,0 +1,37 @@ +$num_compute_nodes = 0 + +Vagrant.configure("2") do |config| + + config.vm.box = "yk0/ubuntu-xenial" + + config.vm.provision "shell", path: "bootstrap.sh", privileged: false + + config.vm.define "control" do |config| + config.vm.hostname = "control" + config.vm.network "private_network", ip: "192.168.0.30" + config.vm.network "private_network", ip: "192.168.1.30" + config.vm.provision "shell", path: "setup_control.sh", privileged: false + config.vm.provider :libvirt do |libvirt| + libvirt.memory = 32768 + libvirt.cpus = 8 + libvirt.storage :file, :type => "qcow2", :size => "500G" + libvirt.storage :file, :type => "qcow2", :size => "500G" + end + end + + (1 .. $num_compute_nodes).each do |n| + config.vm.define vm_name = "compute-#{n}" do |config| + config.vm.hostname = vm_name + config.vm.network "private_network", ip: "192.168.0.#{n+40}" + config.vm.network "private_network", ip: "192.168.1.#{n+40}" + config.vm.provision "shell", path: "setup_compute.sh", privileged: false + config.vm.provider :libvirt do |libvirt| + libvirt.memory = 81920 + libvirt.cpus = 32 + libvirt.nested = true + libvirt.storage :file, :type => "qcow2", :size => "500G" + end + end + end + +end diff --git a/src/vagrant/kubeadm_onap/openstack/bootstrap.sh b/src/vagrant/kubeadm_onap/openstack/bootstrap.sh new file mode 100644 index 0000000..9978c81 --- /dev/null +++ b/src/vagrant/kubeadm_onap/openstack/bootstrap.sh @@ -0,0 +1,14 @@ +#!/usr/bin/env bash +set -ex + +sudo apt-get update -y +sudo apt-get install git -y +git clone https://github.com/openstack-dev/devstack +cd devstack; git checkout stable/ocata +sudo apt-get install openvswitch-switch -y +sudo ovs-vsctl add-br br-ex +inet=$(ip a | grep 'inet.*eth2' | cut -f6 -d' ') +sudo ip addr flush eth2 +sudo ovs-vsctl add-port br-ex eth2 +sudo ifconfig br-ex $inet up +echo "source /vagrant/openrc" >> $HOME/.bash_profile diff --git a/src/vagrant/kubeadm_onap/openstack/compute.conf b/src/vagrant/kubeadm_onap/openstack/compute.conf new file mode 100644 index 0000000..28b6f4e --- /dev/null +++ b/src/vagrant/kubeadm_onap/openstack/compute.conf @@ -0,0 +1,20 @@ +[[local|localrc]] +SCREEN_LOGDIR=/opt/stack/log +LOGFILE=stack.sh.log +LOG_COLOR=False + +HOST_IP=HOSTIP +HOST_NAME=HOSTNAME +SERVICE_HOST=192.168.0.30 +SERVICE_HOST_NAME=control + +ADMIN_PASSWORD=admin +SERVICE_PASSWORD=admin +DATABASE_PASSWORD=mysql +RABBIT_PASSWORD=rabbit + +disable_all_services +enable_service rabbit,n-cpu,n-novnc,placement-client,q-agt,n-api-meta + +OVS_BRIDGE_MAPPINGS=public:br-ex +OVS_PHYSICAL_BRIDGE=br-ex diff --git a/src/vagrant/kubeadm_onap/openstack/control.conf b/src/vagrant/kubeadm_onap/openstack/control.conf new file mode 100644 index 0000000..72a3609 --- /dev/null +++ b/src/vagrant/kubeadm_onap/openstack/control.conf @@ -0,0 +1,40 @@ +[[local|localrc]] +SCREEN_LOGDIR=/opt/stack/log +LOGFILE=stack.sh.log +LOG_COLOR=False + +MULTI_HOST=1 +HOST_IP=192.168.0.30 +HOST_NAME=control +SERVICE_HOST=192.168.0.30 +SERVICE_HOST_NAME=control + +ADMIN_PASSWORD=admin +SERVICE_PASSWORD=admin +DATABASE_PASSWORD=mysql +RABBIT_PASSWORD=rabbit + +enable_plugin heat https://git.openstack.org/openstack/heat stable/ocata +enable_plugin designate https://git.openstack.org/openstack/designate stable/ocata + +disable_all_services +enable_service key,rabbit,mysql +enable_service n-api,n-cond,n-sch,n-novnc,n-crt,n-cauth,placement-api +enable_service q-svc,q-dhcp,q-meta,q-l3,q-agt +enable_service c-sch,c-api,c-vol +enable_service g-api,g-reg +enable_service h-eng,h-api,h-api-cfn,h-api-cw +enable_service horizon +enable_service designate,designate-central,designate-api,designate-pool-manager,designate-zone-manager,designate-mdns +enable_service rabbit,n-cpu,n-novnc,placement-client,q-agt,n-api-meta + +DESIGNATE_BACKEND_DRIVER=bind9 + +## Neutron options +FLOATING_RANGE="192.168.1.0/24" +PUBLIC_NETWORK_GATEWAY="192.168.1.1" +FIXED_RANGE="10.0.0.0/16" +Q_FLOATING_ALLOCATION_POOL=start=192.168.1.200,end=192.168.1.250 +Q_USE_SECGROUP=False +Q_L3_ENABLED=True +Q_USE_PROVIDERNET_FOR_PUBLIC=True diff --git a/src/vagrant/kubeadm_onap/openstack/create_vm.sh b/src/vagrant/kubeadm_onap/openstack/create_vm.sh new file mode 100644 index 0000000..6597ae4 --- /dev/null +++ b/src/vagrant/kubeadm_onap/openstack/create_vm.sh @@ -0,0 +1,5 @@ +#!/bin/bash + +source ~/devstack/openrc admin admin +netid=$(openstack network list --name private -f value | cut -f1 -d' ') +openstack server create --flavor 1 --image=cirros-0.3.4-x86_64-uec --nic net-id=$netid vm1 diff --git a/src/vagrant/kubeadm_onap/openstack/openrc b/src/vagrant/kubeadm_onap/openstack/openrc new file mode 100644 index 0000000..8e1cd1c --- /dev/null +++ b/src/vagrant/kubeadm_onap/openstack/openrc @@ -0,0 +1,4 @@ +export OS_PROJECT_NAME=admin +export OS_USERNAME=admin +export OS_PASSWORD=admin +export OS_AUTH_URL=http://192.168.0.30:5000/v2.0 diff --git a/src/vagrant/kubeadm_onap/openstack/setup_cell.sh b/src/vagrant/kubeadm_onap/openstack/setup_cell.sh new file mode 100644 index 0000000..4426501 --- /dev/null +++ b/src/vagrant/kubeadm_onap/openstack/setup_cell.sh @@ -0,0 +1,6 @@ +#!/bin/bash +set -ex + +source ~/devstack/openrc admin admin +nova-manage cell_v2 discover_hosts +nova-manage cell_v2 map_cell_and_hosts diff --git a/src/vagrant/kubeadm_onap/openstack/setup_compute.sh b/src/vagrant/kubeadm_onap/openstack/setup_compute.sh new file mode 100644 index 0000000..92c18cf --- /dev/null +++ b/src/vagrant/kubeadm_onap/openstack/setup_compute.sh @@ -0,0 +1,19 @@ +#!/usr/bin/env bash +set -ex + +sudo pvcreate /dev/vdb +sudo vgextend ubuntubox-vg /dev/vdb +sudo lvextend -L+500G /dev/mapper/ubuntubox--vg-root +sudo resize2fs /dev/mapper/ubuntubox--vg-root + +cd devstack +cp /vagrant/compute.conf local.conf +host=$(hostname) +ip=$(ifconfig | grep 192.168.0 | cut -f2 -d: | cut -f1 -d' ') +sed -i -e "s/HOSTIP/$ip/" -e "s/HOSTNAME/$host/" local.conf +./stack.sh + + +sudo apt-get update -y +sudo apt-get install -y putty +echo y | plink -ssh -l vagrant -pw vagrant 192.168.0.30 "bash /vagrant/setup_cell.sh" diff --git a/src/vagrant/kubeadm_onap/openstack/setup_control.sh b/src/vagrant/kubeadm_onap/openstack/setup_control.sh new file mode 100644 index 0000000..1f19a16 --- /dev/null +++ b/src/vagrant/kubeadm_onap/openstack/setup_control.sh @@ -0,0 +1,40 @@ +#!/usr/bin/env bash +set -ex + +sudo pvcreate /dev/vdb +sudo vgextend ubuntubox-vg /dev/vdb +sudo lvextend -L+500G /dev/mapper/ubuntubox--vg-root +sudo resize2fs /dev/mapper/ubuntubox--vg-root + +cd devstack +cp /vagrant/control.conf local.conf +./stack.sh + +sudo pvcreate /dev/vdc +sudo vgextend stack-volumes-lvmdriver-1 /dev/vdc + +source /vagrant/openrc + +#openstack network create public --external --provider-network-type=flat --provider-physical-network=public +#openstack subnet create --network=public --subnet-range=192.168.1.0/24 --allocation-pool start=192.168.1.200,end=192.168.1.250 --gateway 192.168.1.1 public-subnet +openstack security group list -f value | cut -f1 -d" " | xargs -I {} openstack security group rule create --ingress --ethertype=IPv4 --protocol=0 {} + +wget https://cloud-images.ubuntu.com/releases/14.04.1/release/ubuntu-14.04-server-cloudimg-amd64-disk1.img +wget https://cloud-images.ubuntu.com/releases/16.04/release/ubuntu-16.04-server-cloudimg-amd64-disk1.img +wget https://cloud.centos.org/centos/7/images/CentOS-7-x86_64-GenericCloud.qcow2 +openstack image create ubuntu1404 --file ubuntu-14.04-server-cloudimg-amd64-disk1.img --disk-format qcow2 +openstack image create ubuntu1604 --file ubuntu-16.04-server-cloudimg-amd64-disk1.img --disk-format qcow2 +openstack image create centos7 --file CentOS-7-x86_64-GenericCloud.qcow2 --disk-format qcow2 + +openstack quota set admin --instances 32 +openstack quota set admin --cores 128 +openstack quota set admin --ram 409600 + +openstack flavor delete m1.medium || true +openstack flavor create --public m1.medium --id auto --ram 4096 --vcpus 2 --disk 40 +openstack flavor delete m1.large || true +openstack flavor create --public m1.large --id auto --ram 8192 --vcpus 2 --disk 40 +openstack flavor delete m1.xlarge || true +openstack flavor create --public m1.xlarge --id auto --ram 12288 --vcpus 4 --disk 40 +openstack flavor delete m1.xxlarge || true +openstack flavor create --public m1.xxlarge --id auto --ram 16384 --vcpus 4 --disk 40 diff --git a/src/vagrant/kubeadm_onap/worker_setup.sh b/src/vagrant/kubeadm_onap/worker_setup.sh new file mode 100755 index 0000000..aa60df3 --- /dev/null +++ b/src/vagrant/kubeadm_onap/worker_setup.sh @@ -0,0 +1,11 @@ +#!/bin/bash + +set -ex + +sudo mkdir /dockerdata-nfs +sudo chmod 755 /dockerdata-nfs +sudo kubeadm join --token 8c5adc.1cec8dbf339093f0 192.168.0.10:6443 || true + +sudo apt-get install -y putty-tools +mkdir ~/.kube +echo "y\n" | plink -ssh -pw vagrant vagrant@master "cat ~/.kube/config" > ~/.kube/config |