diff options
37 files changed, 780 insertions, 76 deletions
@@ -4,6 +4,11 @@ project_creation_date: 'Dec 13, 2016' project_category: 'Integration & Testing' lifecycle_state: 'Incubation' project_lead: &opnfv_container4nfv_ptl + name: 'Xuan Jia' + email: 'jason.jiaxuan@gmail.com' + company: 'gmail' + id: 'xuanjia' + timezone: '' primary_contact: *opnfv_container4nfv_ptl issue_tracking: type: 'jira' diff --git a/src/arm/cni-deploy/.gitignore b/src/arm/cni-deploy/.gitignore new file mode 100644 index 0000000..a8b42eb --- /dev/null +++ b/src/arm/cni-deploy/.gitignore @@ -0,0 +1 @@ +*.retry diff --git a/src/arm/cni-deploy/deploy.yml b/src/arm/cni-deploy/deploy.yml new file mode 100644 index 0000000..c54353a --- /dev/null +++ b/src/arm/cni-deploy/deploy.yml @@ -0,0 +1,32 @@ +--- +- name: Fixup default flannel + hosts: kube-master + gather_facts: "no" + vars_files: + - "vars/global" + roles: + - {role: flannel, tags: [flannel]} + +- name: Deploy Multus CNI + hosts: all + gather_facts: "no" + vars_files: + - "vars/global" + roles: + - {role: multus, tags: [multus]} + +- name: Deploy SRIOV CNI + hosts: all + gather_facts: "no" + vars_files: + - "vars/global" + roles: + - {role: sriov, tags: [sriov]} + +- name: Deploy Vhostuser CNI and VPP + hosts: all + gather_facts: "yes" + vars_files: + - "vars/global" + roles: + - {role: vhost-vpp, tags: [vhost-vpp]} diff --git a/src/arm/cni-deploy/inventory/inventory.cfg b/src/arm/cni-deploy/inventory/inventory.cfg new file mode 100644 index 0000000..cd8bb25 --- /dev/null +++ b/src/arm/cni-deploy/inventory/inventory.cfg @@ -0,0 +1,18 @@ +# compass-tasks: /opt/kargo_k8s/inventory/inventory.cfg + +[all] +host2 ansible_ssh_host=10.1.0.51 ansible_ssh_pass=root ansible_user=root +host1 ansible_ssh_host=10.1.0.50 ansible_ssh_pass=root ansible_user=root + +[kube-master] +host1 + +[etcd] +host1 + +[kube-node] +host2 + +[k8s-cluster:children] +kube-node +kube-master diff --git a/src/arm/cni-deploy/roles/flannel/files/cni-flannel-ds.yml b/src/arm/cni-deploy/roles/flannel/files/cni-flannel-ds.yml new file mode 100644 index 0000000..a99983b --- /dev/null +++ b/src/arm/cni-deploy/roles/flannel/files/cni-flannel-ds.yml @@ -0,0 +1,86 @@ +--- +apiVersion: extensions/v1beta1 +kind: DaemonSet +metadata: + name: kube-flannel + namespace: "kube-system" + labels: + tier: node + k8s-app: flannel +spec: + template: + metadata: + labels: + tier: node + k8s-app: flannel + spec: + serviceAccountName: flannel + containers: + - name: kube-flannel + image: quay.io/coreos/flannel:v0.9.1-arm64 + imagePullPolicy: IfNotPresent + resources: + limits: + cpu: 300m + memory: 500M + requests: + cpu: 150m + memory: 64M + command: ["/opt/bin/flanneld", "--ip-masq", "--kube-subnet-mgr"] + securityContext: + privileged: true + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + volumeMounts: + - name: run + mountPath: /run + - name: cni + mountPath: /etc/cni/net.d + - name: flannel-cfg + mountPath: /etc/kube-flannel/ + # - name: install-cni + # image: linaro/flannel-cni-arm64:v0.3.0 + # command: ["/install-cni.sh"] + # env: + # # The CNI network config to install on each node. + # - name: CNI_NETWORK_CONFIG + # valueFrom: + # configMapKeyRef: + # name: kube-flannel-cfg + # key: cni-conf.json + # - name: CNI_CONF_NAME + # value: "10-flannel.conflist" + # volumeMounts: + # - name: cni + # mountPath: /host/etc/cni/net.d + # - name: host-cni-bin + # mountPath: /host/opt/cni/bin/ + hostNetwork: true + tolerations: + - key: node-role.kubernetes.io/master + operator: Exists + effect: NoSchedule + volumes: + - name: run + hostPath: + path: /run + - name: cni + hostPath: + path: /etc/cni/net.d + - name: flannel-cfg + configMap: + name: kube-flannel-cfg + # - name: host-cni-bin + # hostPath: + # path: /opt/cni/bin + updateStrategy: + rollingUpdate: + maxUnavailable: 20% + type: RollingUpdate diff --git a/src/arm/cni-deploy/roles/flannel/tasks/main.yml b/src/arm/cni-deploy/roles/flannel/tasks/main.yml new file mode 100644 index 0000000..4f1a910 --- /dev/null +++ b/src/arm/cni-deploy/roles/flannel/tasks/main.yml @@ -0,0 +1,12 @@ +--- +- name: Copy flannel daemonset file + copy: + src: cni-flannel-ds.yml + dest: /tmp/cni-flannel-ds.yml + +- name: Apply flannel daemonset + shell: kubectl apply -f /tmp/cni-flannel-ds.yml + ignore_errors: "yes" + +- name: Sleep 10 seconds + wait_for: timeout=10 diff --git a/src/arm/cni-deploy/roles/multus/files/10-multus.conf b/src/arm/cni-deploy/roles/multus/files/10-multus.conf new file mode 100644 index 0000000..3726413 --- /dev/null +++ b/src/arm/cni-deploy/roles/multus/files/10-multus.conf @@ -0,0 +1,13 @@ +{ + "name": "multus-cni-network", + "type": "multus", + "kubeconfig": "/etc/kubernetes/node-kubeconfig.yaml", + "delegates": [{ + "type": "flannel", + "masterplugin": true, + "delegate": { + "isDefaultGateway": true + } + }] +} + diff --git a/src/arm/cni-deploy/roles/multus/files/clusterrole.yml b/src/arm/cni-deploy/roles/multus/files/clusterrole.yml new file mode 100644 index 0000000..fb056d4 --- /dev/null +++ b/src/arm/cni-deploy/roles/multus/files/clusterrole.yml @@ -0,0 +1,16 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: multus-crd-overpowered +rules: + - apiGroups: + - '*' + resources: + - '*' + verbs: + - '*' + - nonResourceURLs: + - '*' + verbs: + - '*' diff --git a/src/arm/cni-deploy/roles/multus/files/crdnetwork.yml b/src/arm/cni-deploy/roles/multus/files/crdnetwork.yml new file mode 100644 index 0000000..9aefdb8 --- /dev/null +++ b/src/arm/cni-deploy/roles/multus/files/crdnetwork.yml @@ -0,0 +1,15 @@ +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: networks.kubernetes.com +spec: + group: kubernetes.com + version: v1 + scope: Namespaced + names: + plural: networks + singular: network + kind: Network + shortNames: + - net diff --git a/src/arm/cni-deploy/roles/multus/files/flannel-obj.yml b/src/arm/cni-deploy/roles/multus/files/flannel-obj.yml new file mode 100644 index 0000000..bd7891d --- /dev/null +++ b/src/arm/cni-deploy/roles/multus/files/flannel-obj.yml @@ -0,0 +1,13 @@ +--- +apiVersion: "kubernetes.com/v1" +kind: Network +metadata: + name: flannel-networkobj +plugin: flannel +args: '[ + { + "delegate": { + "isDefaultGateway": true + } + } +]' diff --git a/src/arm/cni-deploy/roles/multus/handlers/main.yml b/src/arm/cni-deploy/roles/multus/handlers/main.yml new file mode 100644 index 0000000..8474d34 --- /dev/null +++ b/src/arm/cni-deploy/roles/multus/handlers/main.yml @@ -0,0 +1,4 @@ +- name: Restart kubelet + service: + name: kubelet + state: restarted diff --git a/src/arm/cni-deploy/roles/multus/tasks/crd.yml b/src/arm/cni-deploy/roles/multus/tasks/crd.yml new file mode 100644 index 0000000..cacf98a --- /dev/null +++ b/src/arm/cni-deploy/roles/multus/tasks/crd.yml @@ -0,0 +1,44 @@ +--- +- name: Copy yaml files + copy: + src: "{{ item }}" + dest: "/tmp/{{ item }}" + with_items: + - clusterrole.yml + - crdnetwork.yml + - flannel-obj.yml + +- name: Copy macvlan template + template: + src: macvlan-obj.yml.j2 + dest: /tmp/macvlan-obj.yml + +- name: Copy Multus testpod template + template: + src: multus-testpod.yml.j2 + dest: /root/multus-testpod.yml + +- name: Create cluster role + shell: kubectl apply -f /tmp/clusterrole.yml + +- name: Check if role binding is created + shell: kubectl get clusterrolebinding multus-node-{{ item }} + register: check_rb + ignore_errors: "yes" + with_items: "{{ groups['all'] }}" + +- name: Create role binding + shell: > + kubectl create clusterrolebinding multus-node-{{ item }} + --clusterrole=multus-crd-overpowered + --user=system:node:{{ item }} + when: check_rb is failed + with_items: "{{ groups['all'] }}" + +- name: Create network CRD + shell: kubectl apply -f /tmp/crdnetwork.yml + +- name: Create flannel and macvlan network objects + shell: > + kubectl apply -f /tmp/flannel-obj.yml && + kubectl apply -f /tmp/macvlan-obj.yml diff --git a/src/arm/cni-deploy/roles/multus/tasks/main.yml b/src/arm/cni-deploy/roles/multus/tasks/main.yml new file mode 100644 index 0000000..a200215 --- /dev/null +++ b/src/arm/cni-deploy/roles/multus/tasks/main.yml @@ -0,0 +1,24 @@ +--- +- name: Build Multus CNI + shell: > + docker run --rm --network host -v /opt/cni/bin:/opt/cni/bin golang:1.9 + bash -c "git clone {{ multus_repo }} multus_cni && cd multus_cni && + git checkout {{ multus_commit }} && ./build && cp bin/multus /opt/cni/bin/" + args: + creates: /opt/cni/bin/multus + +- name: Remove default CNI configuration + shell: rm -f /etc/cni/net.d/* + args: + warn: "no" + +- name: Set Multus as default CNI + copy: + src: 10-multus.conf + dest: /etc/cni/net.d/ + notify: + - Restart kubelet + +- name: Import CRD task + import_tasks: crd.yml + when: inventory_hostname == groups["kube-master"][0] diff --git a/src/arm/cni-deploy/roles/multus/templates/macvlan-obj.yml.j2 b/src/arm/cni-deploy/roles/multus/templates/macvlan-obj.yml.j2 new file mode 100644 index 0000000..b5a549f --- /dev/null +++ b/src/arm/cni-deploy/roles/multus/templates/macvlan-obj.yml.j2 @@ -0,0 +1,22 @@ +--- +apiVersion: "kubernetes.com/v1" +kind: Network +metadata: + name: macvlan-networkobj +plugin: macvlan +args: '[ + { + "master": "{{ macvlan_master }}", + "mode": "vepa", + "ipam": { + "type": "host-local", + "subnet": "{{ macvlan_subnet }}", + "rangeStart": "{{ macvlan_range_start }}", + "rangeEnd": "{{ macvlan_range_end }}", + "routes": [ + { "dst": "0.0.0.0/0" } + ], + "gateway": "{{ macvlan_gateway }}" + } + } +]' diff --git a/src/arm/cni-deploy/roles/multus/templates/multus-testpod.yml.j2 b/src/arm/cni-deploy/roles/multus/templates/multus-testpod.yml.j2 new file mode 100644 index 0000000..4884846 --- /dev/null +++ b/src/arm/cni-deploy/roles/multus/templates/multus-testpod.yml.j2 @@ -0,0 +1,19 @@ +--- +apiVersion: v1 +kind: Pod +metadata: + name: multus-test + annotations: + networks: '[ + { "name": "flannel-networkobj" }, + { "name": "macvlan-networkobj" } + ]' +spec: + containers: + - name: multus-test + image: "busybox" + command: ["sleep", "100d"] + stdin: true + tty: true + nodeSelector: + kubernetes.io/hostname: "{{ groups['kube-node'][0] }}" diff --git a/src/arm/cni-deploy/roles/sriov/tasks/crd.yml b/src/arm/cni-deploy/roles/sriov/tasks/crd.yml new file mode 100644 index 0000000..5cc7892 --- /dev/null +++ b/src/arm/cni-deploy/roles/sriov/tasks/crd.yml @@ -0,0 +1,13 @@ +--- +- name: Copy SRIOV template + template: + src: sriov-obj.yml.j2 + dest: /tmp/sriov-obj.yml + +- name: Copy SRIOV testpod template + template: + src: sriov-testpod.yml.j2 + dest: /root/sriov-testpod.yml + +- name: Create SRIOV network object + shell: kubectl apply -f /tmp/sriov-obj.yml diff --git a/src/arm/cni-deploy/roles/sriov/tasks/main.yml b/src/arm/cni-deploy/roles/sriov/tasks/main.yml new file mode 100644 index 0000000..9c190ad --- /dev/null +++ b/src/arm/cni-deploy/roles/sriov/tasks/main.yml @@ -0,0 +1,12 @@ +--- +- name: Build SRIOV CNI + shell: > + docker run --rm --network host -v /opt/cni/bin:/opt/cni/bin golang:1.9 + bash -c "git clone {{ sriov_repo }} sriov_cni && cd sriov_cni && + git checkout {{ sriov_commit }} && ./build && cp bin/sriov /opt/cni/bin/" + args: + creates: /opt/cni/bin/sriov + +- name: Import CRD task + import_tasks: crd.yml + when: inventory_hostname == groups["kube-master"][0] diff --git a/src/arm/cni-deploy/roles/sriov/templates/sriov-obj.yml.j2 b/src/arm/cni-deploy/roles/sriov/templates/sriov-obj.yml.j2 new file mode 100644 index 0000000..6c67968 --- /dev/null +++ b/src/arm/cni-deploy/roles/sriov/templates/sriov-obj.yml.j2 @@ -0,0 +1,25 @@ +--- +apiVersion: "kubernetes.com/v1" +kind: Network +metadata: + name: sriov-networkobj +plugin: sriov +args: '[ + { + "master": "{{ sriov_master }}", + "pfOnly": true, + "if0name": "net0", + "ipam": { + "type": "host-local", + "subnet": "{{ sriov_subnet }}", + "rangeStart": "{{ sriov_range_start }}", + "rangeEnd": "{{ sriov_range_end }}", + "routes": [ + { + "dst": "0.0.0.0/0" + } + ], + "gateway": "{{ sriov_gateway }}" + } + } +]' diff --git a/src/arm/cni-deploy/roles/sriov/templates/sriov-testpod.yml.j2 b/src/arm/cni-deploy/roles/sriov/templates/sriov-testpod.yml.j2 new file mode 100644 index 0000000..c1d01bc --- /dev/null +++ b/src/arm/cni-deploy/roles/sriov/templates/sriov-testpod.yml.j2 @@ -0,0 +1,19 @@ +--- +apiVersion: v1 +kind: Pod +metadata: + name: sriov-test + annotations: + networks: '[ + { "name": "flannel-networkobj" }, + { "name": "sriov-networkobj" } + ]' +spec: + containers: + - name: sriov-test + image: "busybox" + command: ["sleep", "100d"] + stdin: true + tty: true + nodeSelector: + kubernetes.io/hostname: "{{ groups['kube-node'][0] }}" diff --git a/src/arm/cni-deploy/roles/vhost-vpp/files/0001-net-virtio-ethdev.patch b/src/arm/cni-deploy/roles/vhost-vpp/files/0001-net-virtio-ethdev.patch new file mode 100644 index 0000000..171ff4d --- /dev/null +++ b/src/arm/cni-deploy/roles/vhost-vpp/files/0001-net-virtio-ethdev.patch @@ -0,0 +1,16 @@ +diff --git a/drivers/net/virtio/virtio_ethdev.c b/drivers/net/virtio/virtio_ethdev.c +index e320811..c1b1640 100644 +--- a/drivers/net/virtio/virtio_ethdev.c ++++ b/drivers/net/virtio/virtio_ethdev.c +@@ -1754,6 +1754,11 @@ virtio_dev_start(struct rte_eth_dev *dev) + virtqueue_notify(rxvq->vq); + } + ++ for (i = 0; i < dev->data->nb_tx_queues; i++) { ++ txvq = dev->data->tx_queues[i]; ++ virtqueue_notify(txvq->vq); ++ } ++ + PMD_INIT_LOG(DEBUG, "Notified backend at initialization"); + + for (i = 0; i < dev->data->nb_rx_queues; i++) { diff --git a/src/arm/cni-deploy/roles/vhost-vpp/files/Dockerfile.vpp1710-dpdk1708 b/src/arm/cni-deploy/roles/vhost-vpp/files/Dockerfile.vpp1710-dpdk1708 new file mode 100644 index 0000000..2f83534 --- /dev/null +++ b/src/arm/cni-deploy/roles/vhost-vpp/files/Dockerfile.vpp1710-dpdk1708 @@ -0,0 +1,24 @@ +FROM ubuntu:xenial + +RUN apt-get update && \ + apt-get install -y git make openssl libcrypto++-dev libnuma-dev && \ + apt-get autoclean + +RUN git clone https://gerrit.fd.io/r/vpp -b stable/1710 /root/vpp-1710 + +WORKDIR /root/vpp-1710 +COPY ./0001-net-virtio-ethdev.patch dpdk/dpdk-17.08_patches/0001-net-virtio-ethdev.patch +RUN sed -i "s/sudo -E //g" Makefile +RUN make UNATTENDED=yes install-dep + +WORKDIR /root/vpp-1710/build-root +RUN ./bootstrap.sh +RUN make PLATFORM=vpp TAG=vpp_debug vpp-install +RUN mkdir -p /etc/vpp && \ + cp /root/vpp-1710/src/vpp/conf/startup.conf /etc/vpp/startup.conf && \ + cp /root/vpp-1710/build-root/install-vpp_debug-native/vpp/bin/* /usr/bin && \ + ln -s /root/vpp-1710/build-root/install-vpp_debug-native/vpp/lib64/vpp_plugins /usr/lib/vpp_plugins +RUN groupadd vpp + +ENV PATH "$PATH:/root/vpp-1710/build-root/install-vpp_debug-native/dpdk/bin" +ENV PATH "$PATH:/root/vpp-1710/build-root/install-vpp_debug-native/vpp/bin" diff --git a/src/arm/cni-deploy/roles/vhost-vpp/files/setvpp.sh b/src/arm/cni-deploy/roles/vhost-vpp/files/setvpp.sh new file mode 100755 index 0000000..15b0d27 --- /dev/null +++ b/src/arm/cni-deploy/roles/vhost-vpp/files/setvpp.sh @@ -0,0 +1,30 @@ +#!/bin/bash + +set -x + +cid=`sed -ne '/hostname/p' /proc/1/task/1/mountinfo | awk -F '/' '{print $6}'` +cid_s=${cid:0:12} +filename=${cid_s}-net1.json +ifstring=`cat /vhost-user-net-plugin/${cid}/${cid_s}-net1.json | awk -F ',' '{print $4}'` +ifmac=`echo ${ifstring} | awk -F '\"' '{print $4}'` + +ipstr=$(cat /vhost-user-net-plugin/${cid}/${cid_s}-net1-ip4.conf |grep "ipAddr") +ipaddr=$(echo $ipstr | awk -F '\"' '{print $4}') +ipaddr1=$(echo $ipaddr | cut -d / -f 1) + +vdev_str="vdev virtio_user0,path=/vhost-user-net-plugin/$cid/$cid_s-net1,mac=$ifmac" + +sed -i.bak '/# dpdk/a\dpdk \{' /etc/vpp/startup.conf +sed -i.bak "/# vdev eth_bond1,mode=1/a\\$vdev_str" /etc/vpp/startup.conf +sed -i.bak '/# socket-mem/a\\}' /etc/vpp/startup.conf + +vpp -c /etc/vpp/startup.conf & + +sleep 40 + +vppctl set int state VirtioUser0/0/0 up +vppctl set int ip address VirtioUser0/0/0 ${ipaddr1}/24 +vppctl show int +vppctl show int address + +echo ${ipaddr1} > /vhost-user-net-plugin/$(hostname) diff --git a/src/arm/cni-deploy/roles/vhost-vpp/files/startup.conf b/src/arm/cni-deploy/roles/vhost-vpp/files/startup.conf new file mode 100644 index 0000000..ae86e38 --- /dev/null +++ b/src/arm/cni-deploy/roles/vhost-vpp/files/startup.conf @@ -0,0 +1,21 @@ +unix { + nodaemon + log /tmp/vpp.log + full-coredump + cli-listen /run/vpp/cli.sock + gid vpp +} +api-trace { + on +} +api-segment { + gid vpp +} +cpu { + main-core 1 + corelist-workers 2-3 + workers 2 +} +dpdk { + uio-driver vfio-pci +} diff --git a/src/arm/cni-deploy/roles/vhost-vpp/files/vhostuser-obj.yml b/src/arm/cni-deploy/roles/vhost-vpp/files/vhostuser-obj.yml new file mode 100644 index 0000000..1e9bc66 --- /dev/null +++ b/src/arm/cni-deploy/roles/vhost-vpp/files/vhostuser-obj.yml @@ -0,0 +1,28 @@ +--- +apiVersion: "kubernetes.com/v1" +kind: Network +metadata: + name: vhostuser-networkobj +plugin: vhostuser +args: '[ + { + "type": "vhostuser", + "name": "vhostuser-network", + "if0name": "net1", + "vhost": { + "vhost_tool": "/opt/cni/bin/vpp-config.py" + }, + "ipam": { + "type": "host-local", + "subnet": "10.56.217.0/24", + "rangeStart": "10.56.217.131", + "rangeEnd": "10.56.217.190", + "routes": [ + { + "dst": "0.0.0.0/0" + } + ], + "gateway": "10.56.217.1" + } + } +]' diff --git a/src/arm/cni-deploy/roles/vhost-vpp/tasks/crd.yml b/src/arm/cni-deploy/roles/vhost-vpp/tasks/crd.yml new file mode 100644 index 0000000..ad36c90 --- /dev/null +++ b/src/arm/cni-deploy/roles/vhost-vpp/tasks/crd.yml @@ -0,0 +1,13 @@ +--- +- name: Copy Vhostuser yaml + copy: + src: vhostuser-obj.yml + dest: /tmp/vhostuser-obj.yml + +- name: Copy VPP testpod template + template: + src: vpp-testpod.yml.j2 + dest: /root/vpp-testpod.yml + +- name: Create Vhostuser network object + shell: kubectl apply -f /tmp/vhostuser-obj.yml diff --git a/src/arm/cni-deploy/roles/vhost-vpp/tasks/main.yml b/src/arm/cni-deploy/roles/vhost-vpp/tasks/main.yml new file mode 100644 index 0000000..df890ea --- /dev/null +++ b/src/arm/cni-deploy/roles/vhost-vpp/tasks/main.yml @@ -0,0 +1,18 @@ +--- +- name: Build Vhostuser CNI + shell: > + docker run --rm --network host -v /opt/cni/bin:/opt/cni/bin golang:1.9 + bash -c "git clone {{ vhostuser_repo }} vhostuser_cni && cd vhostuser_cni + && git checkout {{ vhostuser_commit }} && ./build + && cp bin/vhostuser /opt/cni/bin/ + && cp tests/vpp-config-debug.py /opt/cni/bin/vpp-config.py" + args: + creates: /opt/cni/bin/vhostuser + +- name: Import CRD task + import_tasks: crd.yml + when: inventory_hostname == groups["kube-master"][0] + +- name: Import VPP task + import_tasks: vpp.yml + when: inventory_hostname in groups["kube-node"] diff --git a/src/arm/cni-deploy/roles/vhost-vpp/tasks/vpp.yml b/src/arm/cni-deploy/roles/vhost-vpp/tasks/vpp.yml new file mode 100644 index 0000000..7f5be05 --- /dev/null +++ b/src/arm/cni-deploy/roles/vhost-vpp/tasks/vpp.yml @@ -0,0 +1,47 @@ +--- +- name: Create dest directories + file: + path: "{{ item }}" + state: directory + with_items: + - /tmp/vpp1710/ + - /var/lib/cni/vhostuser/ + - /etc/vpp/ + +- name: Copy VPP files + copy: + src: "{{ item.src }}" + dest: "{{ item.dest }}" + with_items: + - {src: "Dockerfile.vpp1710-dpdk1708", dest: "/tmp/vpp1710/Dockerfile"} + - {src: "0001-net-virtio-ethdev.patch", dest: "/tmp/vpp1710/0001-net-virtio-ethdev.patch"} + - {src: "setvpp.sh", dest: "/var/lib/cni/vhostuser/setvpp.sh"} + - {src: "startup.conf", dest: "/etc/vpp/startup.conf"} + +- name: Check if VPP image exists + shell: docker inspect --type=image vpp-1710:virtio-patched > /dev/null 2>&1 + ignore_errors: "yes" + register: check_vpp + +- name: Building VPP container. Be patient... + shell: docker build -t vpp-1710:virtio-patched --network host . + args: + chdir: /tmp/vpp1710/ + when: check_vpp is failed + +- name: Copy VPP binaries to host + shell: > + docker run --rm -v /root/vpp-1710/build-root:/root/vpp-host vpp-1710:virtio-patched + /bin/cp -a /root/vpp-1710/build-root/install-vpp_debug-native /root/vpp-host + && /bin/cp /root/vpp-1710/build-root/install-vpp_debug-native/vpp/bin/* /usr/bin + && /bin/rm -rf /usr/lib/vpp_plugins + && ln -s /root/vpp-1710/build-root/install-vpp_debug-native/vpp/lib64/vpp_plugins /usr/lib/vpp_plugins + && (groupadd vpp || true) + +- name: Copy libcrypto.so.1.0.0 for CentOS + shell: > + docker run --rm -v /usr/lib64:/root/lib64-centos vpp-1710:virtio-patched + /bin/cp /lib/aarch64-linux-gnu/libcrypto.so.1.0.0 /root/lib64-centos/ + args: + creates: /usr/lib64/libcrypto.so.1.0.0 + when: ansible_os_family == "RedHat" diff --git a/src/arm/cni-deploy/roles/vhost-vpp/templates/vpp-testpod.yml.j2 b/src/arm/cni-deploy/roles/vhost-vpp/templates/vpp-testpod.yml.j2 new file mode 100644 index 0000000..2efd4e0 --- /dev/null +++ b/src/arm/cni-deploy/roles/vhost-vpp/templates/vpp-testpod.yml.j2 @@ -0,0 +1,68 @@ +--- +apiVersion: v1 +kind: Pod +metadata: + name: vpp-test1 + annotations: + networks: '[ + { "name": "flannel-networkobj" }, + { "name": "vhostuser-networkobj" } + ]' +spec: + containers: + - name: vpp-test1 + image: vpp-1710:virtio-patched + imagePullPolicy: "Never" + stdin: true + terminationMessagePath: /dev/termination-log + tty: true + securityContext: + privileged: true + volumeMounts: + - mountPath: /vhost-user-net-plugin + name: vhost-user-net-plugin + - mountPath: /mnt/huge + name: huge + nodeSelector: + kubernetes.io/hostname: "{{ groups['kube-node'][0] }}" + volumes: + - name: vhost-user-net-plugin + hostPath: + path: /var/lib/cni/vhostuser + - name: huge + hostPath: + path: /mnt/huge +--- +apiVersion: v1 +kind: Pod +metadata: + name: vpp-test2 + annotations: + networks: '[ + { "name": "flannel-networkobj" }, + { "name": "vhostuser-networkobj" } + ]' +spec: + containers: + - name: vpp-test2 + image: vpp-1710:virtio-patched + imagePullPolicy: "Never" + stdin: true + terminationMessagePath: /dev/termination-log + tty: true + securityContext: + privileged: true + volumeMounts: + - mountPath: /vhost-user-net-plugin + name: vhost-user-net-plugin + - mountPath: /mnt/huge + name: huge + nodeSelector: + kubernetes.io/hostname: "{{ groups['kube-node'][0] }}" + volumes: + - name: vhost-user-net-plugin + hostPath: + path: /var/lib/cni/vhostuser + - name: huge + hostPath: + path: /mnt/huge diff --git a/src/arm/cni-deploy/vars/global b/src/arm/cni-deploy/vars/global new file mode 100644 index 0000000..35d76b4 --- /dev/null +++ b/src/arm/cni-deploy/vars/global @@ -0,0 +1,20 @@ +multus_repo: https://github.com/Intel-Corp/multus-cni +multus_commit: 61959e04 + +sriov_repo: https://github.com/hustcat/sriov-cni +sriov_commit: 8b7ed984 + +vhostuser_repo: https://github.com/yibo-cai/vhost-user-net-plugin +vhostuser_commit: e8dc9d8e + +macvlan_master: eth2 +macvlan_subnet: 192.168.166.0/24 +macvlan_range_start: 192.168.166.11 +macvlan_range_end: 192.168.166.30 +macvlan_gateway: 192.168.166.1 + +sriov_master: eth2 +sriov_subnet: 192.168.166.0/24 +sriov_range_start: 192.168.166.31 +sriov_range_end: 192.168.166.50 +sriov_gateway: 192.168.166.1 diff --git a/src/vagrant/kubeadm_onap/Vagrantfile b/src/vagrant/kubeadm_onap/Vagrantfile index fe24252..699f607 100644 --- a/src/vagrant/kubeadm_onap/Vagrantfile +++ b/src/vagrant/kubeadm_onap/Vagrantfile @@ -1,17 +1,17 @@ -$num_workers=1 +$num_workers=4 Vagrant.require_version ">= 1.8.6" Vagrant.configure("2") do |config| - config.vm.box = "yk0/ubuntu-xenial" - config.vm.provision "shell", path: "host_setup.sh", privileged: false + config.vm.box = "ceph/ubuntu-xenial" config.vm.define "master" do |config| config.vm.hostname = "master" + config.vm.provision "shell", path: "host_setup.sh", privileged: false config.vm.provision "shell", path: "master_setup.sh", privileged: false config.vm.network :private_network, ip: "192.168.0.10" config.vm.provider :libvirt do |libvirt| - libvirt.memory = 4096 + libvirt.memory = 8192 libvirt.cpus = 4 end end @@ -19,23 +19,14 @@ Vagrant.configure("2") do |config| (1 .. $num_workers).each do |i| config.vm.define vm_name = "worker%d" % [i] do |config| config.vm.hostname = vm_name + config.vm.provision "shell", path: "host_setup.sh", privileged: false config.vm.provision "shell", path: "worker_setup.sh", privileged: false config.vm.network :private_network, ip: "192.168.0.#{i+20}" config.vm.provider :libvirt do |libvirt| - libvirt.memory = 81920 - libvirt.cpus = 32 + libvirt.memory = 40960 + libvirt.cpus = 16 end end end - config.vm.define "onap" do |config| - config.vm.hostname = "onap" - config.vm.provision "shell", path: "onap_setup.sh", privileged: false - config.vm.network :private_network, ip: "192.168.0.5" - config.vm.provider :libvirt do |libvirt| - libvirt.memory = 2048 - libvirt.cpus = 1 - end - end - end diff --git a/src/vagrant/kubeadm_onap/host_setup.sh b/src/vagrant/kubeadm_onap/host_setup.sh index 64e1733..9cfd266 100755 --- a/src/vagrant/kubeadm_onap/host_setup.sh +++ b/src/vagrant/kubeadm_onap/host_setup.sh @@ -4,13 +4,15 @@ set -ex cat << EOF | sudo tee /etc/hosts 127.0.0.1 localhost -192.168.0.5 onap 192.168.0.10 master 192.168.0.21 worker1 192.168.0.22 worker2 192.168.0.23 worker3 +192.168.0.24 worker4 EOF +sudo ifconfig eth1 mtu 1400 + sudo apt-key adv --keyserver hkp://ha.pool.sks-keyservers.net:80 --recv-keys 58118E89F3A912897C070ADBF76221572C52609D sudo apt-key adv -k 58118E89F3A912897C070ADBF76221572C52609D cat << EOF | sudo tee /etc/apt/sources.list.d/docker.list @@ -22,18 +24,17 @@ cat <<EOF | sudo tee /etc/apt/sources.list.d/kubernetes.list deb http://apt.kubernetes.io/ kubernetes-xenial main EOF sudo apt-get update -sudo apt-get install -y --allow-unauthenticated --allow-downgrades docker-engine=1.12.6-0~ubuntu-xenial kubelet=1.7.0-00 kubeadm=1.7.0-00 kubectl=1.7.0-00 kubernetes-cni=0.5.1-00 +sudo apt-get install -y --allow-unauthenticated --allow-downgrades docker-engine=1.12.6-0~ubuntu-xenial kubelet=1.9.1-00 kubeadm=1.9.1-00 kubectl=1.9.1-00 kubernetes-cni=0.6.0-00 -sudo systemctl stop docker cat << EOF | sudo tee /etc/docker/daemon.json { - "storage-driver": "overlay" + "insecure-registries" : [ "nexus3.onap.org:10001" ] } EOF sudo systemctl daemon-reload -sudo systemctl start docker +sudo systemctl restart docker -sudo systemctl stop kubelet -sudo rm -rf /var/lib/kubelet +sudo swapoff -a sudo systemctl daemon-reload +sudo systemctl stop kubelet sudo systemctl start kubelet diff --git a/src/vagrant/kubeadm_onap/master_setup.sh b/src/vagrant/kubeadm_onap/master_setup.sh index fa451a2..8840541 100755 --- a/src/vagrant/kubeadm_onap/master_setup.sh +++ b/src/vagrant/kubeadm_onap/master_setup.sh @@ -1,13 +1,28 @@ #!/bin/bash - set -ex -sudo kubeadm init --apiserver-advertise-address=192.168.0.10 --service-cidr=10.96.0.0/24 --pod-network-cidr=10.32.0.0/12 --token 8c5adc.1cec8dbf339093f0 +sudo apt-get -y install ntp +cat << EOF | sudo tee /etc/ntp.conf +server 127.127.1.0 +fudge 127.127.1.0 stratum 10 +EOF +sudo service ntp restart + +sudo apt install nfs-kernel-server -y +sudo mkdir /dockerdata-nfs +sudo chmod 777 /dockerdata-nfs +cat << EOF | sudo tee /etc/exports +/dockerdata-nfs *(rw,sync,no_subtree_check,no_root_squash) +EOF +sudo systemctl restart nfs-kernel-server.service + +sudo kubeadm init --apiserver-advertise-address=192.168.0.10 --service-cidr=10.96.0.0/16 --pod-network-cidr=10.244.0.0/16 --token 8c5adc.1cec8dbf339093f0 mkdir ~/.kube -sudo cp /etc/kubernetes/admin.conf ~/.kube/config -sudo chown $(id -u):$(id -g) ~/.kube/config +sudo cp /etc/kubernetes/admin.conf $HOME/.kube/config +sudo chown $(id -u):$(id -g) $HOME/.kube/config + +wget https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml +sed -i "s/kube-subnet-mgr/kube-subnet-mgr\n - --iface=eth1/" kube-flannel.yml +kubectl apply -f kube-flannel.yml -kubectl apply -f http://git.io/weave-kube-1.6 -curl https://raw.githubusercontent.com/kubernetes/helm/master/scripts/get | bash -helm init -kubectl create clusterrolebinding --user system:serviceaccount:kube-system:default kube-system-cluster-admin --clusterrole cluster-admin +/vagrant/onap_setup.sh diff --git a/src/vagrant/kubeadm_onap/onap_setup.sh b/src/vagrant/kubeadm_onap/onap_setup.sh index 4dfe1e1..e4edd8f 100755 --- a/src/vagrant/kubeadm_onap/onap_setup.sh +++ b/src/vagrant/kubeadm_onap/onap_setup.sh @@ -2,42 +2,19 @@ set -ex -sudo apt-get install -y putty-tools python-openstackclient -mkdir ~/.kube -r=0 -while [ "$r" == "0" ] -do - sleep 30 - echo "y\n" | plink -ssh -pw vagrant vagrant@master "cat ~/.kube/config" > ~/.kube/config || true - r=$(kubectl get pods -n kube-system | grep "tiller-deploy.*Run" | wc -l) -done +kubectl create clusterrolebinding --user system:serviceaccount:kube-system:default kube-system-cluster-admin --clusterrole cluster-admin +wget https://storage.googleapis.com/kubernetes-helm/helm-v2.8.2-linux-amd64.tar.gz +tar xzvf helm-v2.8.2-linux-amd64.tar.gz +sudo mv linux-amd64/helm /usr/local/bin/ +helm init +helm serve & +helm repo remove stable +helm repo add local http://127.0.0.1:8879 -curl https://raw.githubusercontent.com/kubernetes/helm/master/scripts/get | bash -git clone http://gerrit.onap.org/r/oom -cd oom; git checkout amsterdam -source /vagrant/openstack/openrc -cat <<EOF | tee ~/oom/kubernetes/config/onap-parameters.yaml -OPENSTACK_UBUNTU_14_IMAGE: "ubuntu1404" -OPENSTACK_PUBLIC_NET_ID: "e8f51956-00dd-4425-af36-045716781ffc" -OPENSTACK_OAM_NETWORK_ID: "d4769dfb-c9e4-4f72-b3d6-1d18f4ac4ee6" -OPENSTACK_OAM_SUBNET_ID: "191f7580-acf6-4c2b-8ec0-ba7d99b3bc4e" -OPENSTACK_OAM_NETWORK_CIDR: "10.0.0.0/16" -OPENSTACK_USERNAME: "admin" -OPENSTACK_API_KEY: "adim" -OPENSTACK_TENANT_NAME: "admin" -OPENSTACK_TENANT_ID: "47899782ed714295b1151681fdfd51f5" -OPENSTACK_REGION: "RegionOne" -OPENSTACK_KEYSTONE_URL: "http://192.168.0.30:5000/v2.0" -OPENSTACK_FLAVOUR_MEDIUM: "m1.medium" -OPENSTACK_SERVICE_TENANT_NAME: "service" -DMAAP_TOPIC: "AUTO" -DEMO_ARTIFACTS_VERSION: "1.1.0-SNAPSHOT" -EOF -cd ~/oom/kubernetes/oneclick && ./deleteAll.bash -n onap || true -(kubectl delete ns onap; helm del --purge onap-config) || true -echo "y\n" | plink -ssh -pw vagrant vagrant@worker1 "sudo rm -rf /dockerdata-nfs/onap" -cd ~/oom/kubernetes/config && ./createConfig.sh -n onap -while true; do sleep 30; kubectl get pods --all-namespaces | grep onap | wc -l | grep "^0$" && break; done -source ~/oom/kubernetes/oneclick/setenv.bash -sed -i "s/aaiServiceClusterIp:.*/aaiServiceClusterIp: 10.96.0.254/" ~/oom/kubernetes/aai/values.yaml -cd ~/oom/kubernetes/oneclick && ./createAll.bash -n onap +git clone -b beijing http://gerrit.onap.org/r/oom +cd oom/kubernetes + +sudo apt-get install make -y +make all +sleep 300 +helm install local/onap -n dev --namespace onap diff --git a/src/vagrant/kubeadm_onap/registry_setup.sh b/src/vagrant/kubeadm_onap/registry_setup.sh new file mode 100644 index 0000000..669268b --- /dev/null +++ b/src/vagrant/kubeadm_onap/registry_setup.sh @@ -0,0 +1,30 @@ +#!/bin/bash +set -ex + +sudo apt-get update -y +sudo apt install -y jq docker.io + +NEXUS_REPO=nexus3.onap.org:10001 +LOCAL_REPO=192.168.0.2:5000 + +cat << EOF | sudo tee /etc/docker/daemon.json +{ + "insecure-registries" : [ "$LOCAL_REPO" ] +} +EOF +sudo systemctl daemon-reload +sudo systemctl restart docker + +sudo docker run -d -p 5000:5000 --restart=always --name registry registry:2 + +dockers=$(curl -X GET https://$NEXUS_REPO/v2/_catalog | jq -r ".repositories[]") +for d in $dockers +do + tags=$(curl -X GET https://$NEXUS_REPO/v2/$d/tags/list | jq -r ".tags[]") + for t in $tags + do + sudo docker pull $NEXUS_REPO/$d:$t + sudo docker tag $NEXUS_REPO/$d:$t $LOCAL_REPO/$d:$t + sudo docker push $LOCAL_REPO/$d:$t + done +done diff --git a/src/vagrant/kubeadm_onap/setup_swap.sh b/src/vagrant/kubeadm_onap/setup_swap.sh new file mode 100644 index 0000000..c2432b7 --- /dev/null +++ b/src/vagrant/kubeadm_onap/setup_swap.sh @@ -0,0 +1,5 @@ +sudo swapoff -a +sudo fallocate -l 50G /swapfile +sudo mkswap /swapfile +sudo swapon /swapfile +sudo swapon --show diff --git a/src/vagrant/kubeadm_onap/setup_tunnel.sh b/src/vagrant/kubeadm_onap/setup_tunnel.sh new file mode 100644 index 0000000..3a6ef75 --- /dev/null +++ b/src/vagrant/kubeadm_onap/setup_tunnel.sh @@ -0,0 +1,3 @@ +sudo ip link add tunnel0 type gretap local <local> remote <remote> +sudo ifconfig tunnel0 up +sudo brctl addif <br> tunnel0 diff --git a/src/vagrant/kubeadm_onap/worker_setup.sh b/src/vagrant/kubeadm_onap/worker_setup.sh index aa60df3..e65a65c 100755 --- a/src/vagrant/kubeadm_onap/worker_setup.sh +++ b/src/vagrant/kubeadm_onap/worker_setup.sh @@ -1,11 +1,15 @@ #!/bin/bash - set -ex -sudo mkdir /dockerdata-nfs -sudo chmod 755 /dockerdata-nfs -sudo kubeadm join --token 8c5adc.1cec8dbf339093f0 192.168.0.10:6443 || true +sudo apt-get -y install ntp +cat << EOF | sudo tee /etc/ntp.conf +pool master +EOF +sudo service ntp restart -sudo apt-get install -y putty-tools -mkdir ~/.kube -echo "y\n" | plink -ssh -pw vagrant vagrant@master "cat ~/.kube/config" > ~/.kube/config +sudo kubeadm join --discovery-token-unsafe-skip-ca-verification --token 8c5adc.1cec8dbf339093f0 192.168.0.10:6443 || true + +sudo apt-get install nfs-common -y +sudo mkdir /dockerdata-nfs +sudo chmod 777 /dockerdata-nfs +sudo mount master:/dockerdata-nfs /dockerdata-nfs |