summaryrefslogtreecommitdiffstats
path: root/tools/k8s/cluster-deployment
diff options
context:
space:
mode:
Diffstat (limited to 'tools/k8s/cluster-deployment')
-rw-r--r--tools/k8s/cluster-deployment/k8scluster/.ansible-lint3
-rw-r--r--tools/k8s/cluster-deployment/k8scluster/README.md60
-rw-r--r--tools/k8s/cluster-deployment/k8scluster/ansible.cfg9
-rw-r--r--tools/k8s/cluster-deployment/k8scluster/hosts5
-rw-r--r--tools/k8s/cluster-deployment/k8scluster/k8sclustermanagement.yml4
-rw-r--r--tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/defaults/main.yml28
-rw-r--r--tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/files/configMap-sriov-device-plugin.yaml20
-rw-r--r--tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/files/kube-flannel-daemonset.yml606
-rw-r--r--tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/files/multus-daemonset.yml251
-rw-r--r--tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/files/ovs-daemonset.yml101
-rw-r--r--tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/files/sriov-cni-daemonset.yaml47
-rw-r--r--tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/files/sriov-device-plugin-daemonset.yaml127
-rw-r--r--tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/files/userspace-daemonset.yml46
-rw-r--r--tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/clear-flannel.yml8
-rw-r--r--tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/clear-k8s-master.yml22
-rw-r--r--tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/clear-k8s-workers-drain.yml8
-rw-r--r--tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/clear-k8s-workers-reset.yml11
-rw-r--r--tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/clear-kubevirt-ovs.yml8
-rw-r--r--tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/clear-multus.yml8
-rw-r--r--tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/clear-sriov.yml30
-rw-r--r--tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/clear-userspace.yml8
-rw-r--r--tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/cni-pre-deploy.yml17
-rw-r--r--tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/configure_master_node.yml14
-rw-r--r--tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/deploy-flannel.yml11
-rw-r--r--tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/deploy-kubevirt-ovs.yml12
-rw-r--r--tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/deploy-multus.yml10
-rw-r--r--tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/deploy-sriov.yml26
-rw-r--r--tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/deploy-userspace.yml13
-rw-r--r--tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/foldersettings.yml10
-rw-r--r--tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/main.yml83
-rw-r--r--tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/workers.yml15
31 files changed, 1621 insertions, 0 deletions
diff --git a/tools/k8s/cluster-deployment/k8scluster/.ansible-lint b/tools/k8s/cluster-deployment/k8scluster/.ansible-lint
new file mode 100644
index 00000000..036ecf52
--- /dev/null
+++ b/tools/k8s/cluster-deployment/k8scluster/.ansible-lint
@@ -0,0 +1,3 @@
+skip_list:
+ - '306'
+ - '301' \ No newline at end of file
diff --git a/tools/k8s/cluster-deployment/k8scluster/README.md b/tools/k8s/cluster-deployment/k8scluster/README.md
new file mode 100644
index 00000000..78fdbd03
--- /dev/null
+++ b/tools/k8s/cluster-deployment/k8scluster/README.md
@@ -0,0 +1,60 @@
+# OPNFV - k8s cluster setup
+
+This project aims to set up and programmatically deploy a Kubernetes cluster on CentOS 7 machines with the help of Kubeadm. It uses ansible and requires very little intervention.
+
+## Getting Started
+The following steps aim to describe the minimum required to successfully run this script.
+
+
+### Prerequisites
+
+Kubernetes and Ansible should be installed on the master node and docker and kubelet services should be running on the master and worker nodes.
+
+
+### Setup
+In order to configure the cluster an inventory file should be included. The inventory file (e.g.,`hosts`) has the following structure:
+
+```
+[master]
+master ansible_host={enter-master-ip} ansible_connection=ssh ansible_ssh_user={insert-user} ansible_ssh_pass={insert-password} ansible_ssh_common_args='-o StrictHostKeyChecking=no'
+
+[workers]
+worker ansible_host={enter-master-ip} ansible_connection=ssh ansible_ssh_user={insert-user} ansible_ssh_pass={insert-password} ansible_ssh_common_args='-o StrictHostKeyChecking=no'
+
+```
+In this configuration file, connection details should be filled in. In case more nodes within the cluster are needed, add lines as necessary to the workers group within the `hosts` file.
+
+
+### Usage
+In order to use the script, download or clone [this repository] (https://gerrit.opnfv.org/gerrit/vswitchperf) to the root of what will be the master node.
+
+Navigate to its contents and execute the following command as regular user (this will prevent errors throughout configuration and deployment) on whichever machine you wish to use as the master node (this host will be the one running kubectl):
+
+```
+ansible-playbook k8sclustermanagement.yml -i hosts –tags “deploy”
+
+```
+You can verify the installation by running:
+```
+kubectl get nodes
+```
+And verifying the readiness of the nodes. More information may be obtained with `kubectl describe nodes` if needed.
+
+
+To clear the cluster, execute the following command
+
+```
+ansible-playbook k8sclustermanagement.yml -i hosts_garr –tags “clear”
+```
+
+To deploy only CNI plugins
+
+```
+ansible-playbook k8sclustermanagement.yml -i hosts_garr –tags “cni”
+```
+
+
+
+### Debugging
+
+In case a step goes wrong within the installation, ansible should display a message, however, there's also files to debug if the installation had something to do within k8s. In the case of the master node, we should be able to find a `log_init.txt` with necessary logs. On worker nodes, the relevant file is `node_joined.txt`.
diff --git a/tools/k8s/cluster-deployment/k8scluster/ansible.cfg b/tools/k8s/cluster-deployment/k8scluster/ansible.cfg
new file mode 100644
index 00000000..0cbe08f3
--- /dev/null
+++ b/tools/k8s/cluster-deployment/k8scluster/ansible.cfg
@@ -0,0 +1,9 @@
+[defaults]
+interpreter_python=/usr/bin/python3
+
+# enable logging
+log_path = ./cluster-deployment.log
+
+[ssh_connection]
+pipelining = True
+
diff --git a/tools/k8s/cluster-deployment/k8scluster/hosts b/tools/k8s/cluster-deployment/k8scluster/hosts
new file mode 100644
index 00000000..dd928a8e
--- /dev/null
+++ b/tools/k8s/cluster-deployment/k8scluster/hosts
@@ -0,0 +1,5 @@
+[master]
+master ansible_host=10.10.120.22 ansible_connection=ssh ansible_ssh_user=ENTER_USER ansible_ssh_pass=ENTER_PASS ansible_ssh_common_args='-o StrictHostKeyChecking=no'
+
+[workers]
+worker ansible_host=10.10.120.21 ansible_connection=ssh ansible_ssh_user=ENTER_USER ansible_ssh_pass=ENTER_PASS ansible_ssh_common_args='-o StrictHostKeyChecking=no'
diff --git a/tools/k8s/cluster-deployment/k8scluster/k8sclustermanagement.yml b/tools/k8s/cluster-deployment/k8scluster/k8sclustermanagement.yml
new file mode 100644
index 00000000..5430bed5
--- /dev/null
+++ b/tools/k8s/cluster-deployment/k8scluster/k8sclustermanagement.yml
@@ -0,0 +1,4 @@
+---
+- hosts: all
+ roles:
+ - clustermanager \ No newline at end of file
diff --git a/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/defaults/main.yml b/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/defaults/main.yml
new file mode 100644
index 00000000..15f1f186
--- /dev/null
+++ b/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/defaults/main.yml
@@ -0,0 +1,28 @@
+#Edit these values only as per your environment
+#Enter your master node advertise ip address and cidr range for the pods.
+kube_ad_addr: "{{ ansible_host }}"
+kube_cidr_v: 10.244.0.0/16
+
+###################################################################################
+# Dont Edit these below values, these are mandatory to configure kubernetes cluster
+#packages:
+#- docker
+#- kubeadm
+#- kubectl
+
+#services:
+#- docker
+#- kubelet
+#- firewalld
+
+#ports:
+#- "6443/tcp"
+#- "10250/tcp"
+
+token_file: $HOME/log_init.txt
+###################################################################################
+# Dont Edit these above values, these are mandatory to configure kubernetes cluster
+
+
+
+PIP_executable_version: pip3.6 \ No newline at end of file
diff --git a/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/files/configMap-sriov-device-plugin.yaml b/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/files/configMap-sriov-device-plugin.yaml
new file mode 100644
index 00000000..4efeac61
--- /dev/null
+++ b/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/files/configMap-sriov-device-plugin.yaml
@@ -0,0 +1,20 @@
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: sriovdp-config
+ namespace: kube-system
+data:
+ config.json: |
+ {
+ "resourceList": [{
+ "resourceName": "intel_sriov_dpdk_a",
+ "selectors": {
+ "vendors": ["8086"],
+ "devices": ["10ed"],
+ "drivers": ["ixgbevf"],
+ "pfNames": ["eno3"]
+ }
+ }
+ ]
+ }
+
diff --git a/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/files/kube-flannel-daemonset.yml b/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/files/kube-flannel-daemonset.yml
new file mode 100644
index 00000000..00110ad6
--- /dev/null
+++ b/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/files/kube-flannel-daemonset.yml
@@ -0,0 +1,606 @@
+#
+# cloned from https://github.com/coreos/flannel/blob/v0.12.0/Documentation/kube-flannel.yml
+#
+---
+apiVersion: policy/v1beta1
+kind: PodSecurityPolicy
+metadata:
+ name: psp.flannel.unprivileged
+ annotations:
+ seccomp.security.alpha.kubernetes.io/allowedProfileNames: docker/default
+ seccomp.security.alpha.kubernetes.io/defaultProfileName: docker/default
+ apparmor.security.beta.kubernetes.io/allowedProfileNames: runtime/default
+ apparmor.security.beta.kubernetes.io/defaultProfileName: runtime/default
+spec:
+ privileged: false
+ volumes:
+ - configMap
+ - secret
+ - emptyDir
+ - hostPath
+ allowedHostPaths:
+ - pathPrefix: "/etc/cni/net.d"
+ - pathPrefix: "/etc/kube-flannel"
+ - pathPrefix: "/run/flannel"
+ readOnlyRootFilesystem: false
+ # Users and groups
+ runAsUser:
+ rule: RunAsAny
+ supplementalGroups:
+ rule: RunAsAny
+ fsGroup:
+ rule: RunAsAny
+ # Privilege Escalation
+ allowPrivilegeEscalation: false
+ defaultAllowPrivilegeEscalation: false
+ # Capabilities
+ allowedCapabilities: ['NET_ADMIN']
+ defaultAddCapabilities: []
+ requiredDropCapabilities: []
+ # Host namespaces
+ hostPID: false
+ hostIPC: false
+ hostNetwork: true
+ hostPorts:
+ - min: 0
+ max: 65535
+ # SELinux
+ seLinux:
+ # SELinux is unused in CaaSP
+ rule: 'RunAsAny'
+---
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+ name: flannel
+rules:
+ - apiGroups: ['extensions']
+ resources: ['podsecuritypolicies']
+ verbs: ['use']
+ resourceNames: ['psp.flannel.unprivileged']
+ - apiGroups:
+ - ""
+ resources:
+ - pods
+ verbs:
+ - get
+ - apiGroups:
+ - ""
+ resources:
+ - nodes
+ verbs:
+ - list
+ - watch
+ - apiGroups:
+ - ""
+ resources:
+ - nodes/status
+ verbs:
+ - patch
+---
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+ name: flannel
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: flannel
+subjects:
+- kind: ServiceAccount
+ name: flannel
+ namespace: kube-system
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: flannel
+ namespace: kube-system
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: kube-flannel-cfg
+ namespace: kube-system
+ labels:
+ tier: node
+ app: flannel
+data:
+ cni-conf.json: |
+ {
+ "name": "cbr0",
+ "cniVersion": "0.3.1",
+ "plugins": [
+ {
+ "type": "flannel",
+ "delegate": {
+ "hairpinMode": true,
+ "isDefaultGateway": true
+ }
+ },
+ {
+ "type": "portmap",
+ "capabilities": {
+ "portMappings": true
+ }
+ }
+ ]
+ }
+ net-conf.json: |
+ {
+ "Network": "10.244.0.0/16",
+ "Backend": {
+ "Type": "vxlan"
+ }
+ }
+---
+apiVersion: apps/v1
+kind: DaemonSet
+metadata:
+ name: kube-flannel-ds-amd64
+ namespace: kube-system
+ labels:
+ tier: node
+ app: flannel
+spec:
+ selector:
+ matchLabels:
+ app: flannel
+ template:
+ metadata:
+ labels:
+ tier: node
+ app: flannel
+ spec:
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: beta.kubernetes.io/os
+ operator: In
+ values:
+ - linux
+ - key: beta.kubernetes.io/arch
+ operator: In
+ values:
+ - amd64
+ hostNetwork: true
+ tolerations:
+ - operator: Exists
+ effect: NoSchedule
+ serviceAccountName: flannel
+ initContainers:
+ - name: install-cni
+ image: quay.io/coreos/flannel:v0.12.0-amd64
+ command:
+ - cp
+ args:
+ - -f
+ - /etc/kube-flannel/cni-conf.json
+ - /etc/cni/net.d/10-flannel.conflist
+ volumeMounts:
+ - name: cni
+ mountPath: /etc/cni/net.d
+ - name: flannel-cfg
+ mountPath: /etc/kube-flannel/
+ containers:
+ - name: kube-flannel
+ image: quay.io/coreos/flannel:v0.12.0-amd64
+ command:
+ - /opt/bin/flanneld
+ args:
+ - --ip-masq
+ - --kube-subnet-mgr
+ resources:
+ requests:
+ cpu: "100m"
+ memory: "50Mi"
+ limits:
+ cpu: "100m"
+ memory: "50Mi"
+ securityContext:
+ privileged: false
+ capabilities:
+ add: ["NET_ADMIN"]
+ env:
+ - name: POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ - name: POD_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ volumeMounts:
+ - name: run
+ mountPath: /run/flannel
+ - name: flannel-cfg
+ mountPath: /etc/kube-flannel/
+ volumes:
+ - name: run
+ hostPath:
+ path: /run/flannel
+ - name: cni
+ hostPath:
+ path: /etc/cni/net.d
+ - name: flannel-cfg
+ configMap:
+ name: kube-flannel-cfg
+---
+apiVersion: apps/v1
+kind: DaemonSet
+metadata:
+ name: kube-flannel-ds-arm64
+ namespace: kube-system
+ labels:
+ tier: node
+ app: flannel
+spec:
+ selector:
+ matchLabels:
+ app: flannel
+ template:
+ metadata:
+ labels:
+ tier: node
+ app: flannel
+ spec:
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: beta.kubernetes.io/os
+ operator: In
+ values:
+ - linux
+ - key: beta.kubernetes.io/arch
+ operator: In
+ values:
+ - arm64
+ hostNetwork: true
+ tolerations:
+ - operator: Exists
+ effect: NoSchedule
+ serviceAccountName: flannel
+ initContainers:
+ - name: install-cni
+ image: quay.io/coreos/flannel:v0.12.0-arm64
+ command:
+ - cp
+ args:
+ - -f
+ - /etc/kube-flannel/cni-conf.json
+ - /etc/cni/net.d/10-flannel.conflist
+ volumeMounts:
+ - name: cni
+ mountPath: /etc/cni/net.d
+ - name: flannel-cfg
+ mountPath: /etc/kube-flannel/
+ containers:
+ - name: kube-flannel
+ image: quay.io/coreos/flannel:v0.12.0-arm64
+ command:
+ - /opt/bin/flanneld
+ args:
+ - --ip-masq
+ - --kube-subnet-mgr
+ resources:
+ requests:
+ cpu: "100m"
+ memory: "50Mi"
+ limits:
+ cpu: "100m"
+ memory: "50Mi"
+ securityContext:
+ privileged: false
+ capabilities:
+ add: ["NET_ADMIN"]
+ env:
+ - name: POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ - name: POD_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ volumeMounts:
+ - name: run
+ mountPath: /run/flannel
+ - name: flannel-cfg
+ mountPath: /etc/kube-flannel/
+ volumes:
+ - name: run
+ hostPath:
+ path: /run/flannel
+ - name: cni
+ hostPath:
+ path: /etc/cni/net.d
+ - name: flannel-cfg
+ configMap:
+ name: kube-flannel-cfg
+---
+apiVersion: apps/v1
+kind: DaemonSet
+metadata:
+ name: kube-flannel-ds-arm
+ namespace: kube-system
+ labels:
+ tier: node
+ app: flannel
+spec:
+ selector:
+ matchLabels:
+ app: flannel
+ template:
+ metadata:
+ labels:
+ tier: node
+ app: flannel
+ spec:
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: beta.kubernetes.io/os
+ operator: In
+ values:
+ - linux
+ - key: beta.kubernetes.io/arch
+ operator: In
+ values:
+ - arm
+ hostNetwork: true
+ tolerations:
+ - operator: Exists
+ effect: NoSchedule
+ serviceAccountName: flannel
+ initContainers:
+ - name: install-cni
+ image: quay.io/coreos/flannel:v0.12.0-arm
+ command:
+ - cp
+ args:
+ - -f
+ - /etc/kube-flannel/cni-conf.json
+ - /etc/cni/net.d/10-flannel.conflist
+ volumeMounts:
+ - name: cni
+ mountPath: /etc/cni/net.d
+ - name: flannel-cfg
+ mountPath: /etc/kube-flannel/
+ containers:
+ - name: kube-flannel
+ image: quay.io/coreos/flannel:v0.12.0-arm
+ command:
+ - /opt/bin/flanneld
+ args:
+ - --ip-masq
+ - --kube-subnet-mgr
+ resources:
+ requests:
+ cpu: "100m"
+ memory: "50Mi"
+ limits:
+ cpu: "100m"
+ memory: "50Mi"
+ securityContext:
+ privileged: false
+ capabilities:
+ add: ["NET_ADMIN"]
+ env:
+ - name: POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ - name: POD_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ volumeMounts:
+ - name: run
+ mountPath: /run/flannel
+ - name: flannel-cfg
+ mountPath: /etc/kube-flannel/
+ volumes:
+ - name: run
+ hostPath:
+ path: /run/flannel
+ - name: cni
+ hostPath:
+ path: /etc/cni/net.d
+ - name: flannel-cfg
+ configMap:
+ name: kube-flannel-cfg
+---
+apiVersion: apps/v1
+kind: DaemonSet
+metadata:
+ name: kube-flannel-ds-ppc64le
+ namespace: kube-system
+ labels:
+ tier: node
+ app: flannel
+spec:
+ selector:
+ matchLabels:
+ app: flannel
+ template:
+ metadata:
+ labels:
+ tier: node
+ app: flannel
+ spec:
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: beta.kubernetes.io/os
+ operator: In
+ values:
+ - linux
+ - key: beta.kubernetes.io/arch
+ operator: In
+ values:
+ - ppc64le
+ hostNetwork: true
+ tolerations:
+ - operator: Exists
+ effect: NoSchedule
+ serviceAccountName: flannel
+ initContainers:
+ - name: install-cni
+ image: quay.io/coreos/flannel:v0.12.0-ppc64le
+ command:
+ - cp
+ args:
+ - -f
+ - /etc/kube-flannel/cni-conf.json
+ - /etc/cni/net.d/10-flannel.conflist
+ volumeMounts:
+ - name: cni
+ mountPath: /etc/cni/net.d
+ - name: flannel-cfg
+ mountPath: /etc/kube-flannel/
+ containers:
+ - name: kube-flannel
+ image: quay.io/coreos/flannel:v0.12.0-ppc64le
+ command:
+ - /opt/bin/flanneld
+ args:
+ - --ip-masq
+ - --kube-subnet-mgr
+ resources:
+ requests:
+ cpu: "100m"
+ memory: "50Mi"
+ limits:
+ cpu: "100m"
+ memory: "50Mi"
+ securityContext:
+ privileged: false
+ capabilities:
+ add: ["NET_ADMIN"]
+ env:
+ - name: POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ - name: POD_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ volumeMounts:
+ - name: run
+ mountPath: /run/flannel
+ - name: flannel-cfg
+ mountPath: /etc/kube-flannel/
+ volumes:
+ - name: run
+ hostPath:
+ path: /run/flannel
+ - name: cni
+ hostPath:
+ path: /etc/cni/net.d
+ - name: flannel-cfg
+ configMap:
+ name: kube-flannel-cfg
+---
+apiVersion: apps/v1
+kind: DaemonSet
+metadata:
+ name: kube-flannel-ds-s390x
+ namespace: kube-system
+ labels:
+ tier: node
+ app: flannel
+spec:
+ selector:
+ matchLabels:
+ app: flannel
+ template:
+ metadata:
+ labels:
+ tier: node
+ app: flannel
+ spec:
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: beta.kubernetes.io/os
+ operator: In
+ values:
+ - linux
+ - key: beta.kubernetes.io/arch
+ operator: In
+ values:
+ - s390x
+ hostNetwork: true
+ tolerations:
+ - operator: Exists
+ effect: NoSchedule
+ serviceAccountName: flannel
+ initContainers:
+ - name: install-cni
+ image: quay.io/coreos/flannel:v0.12.0-s390x
+ command:
+ - cp
+ args:
+ - -f
+ - /etc/kube-flannel/cni-conf.json
+ - /etc/cni/net.d/10-flannel.conflist
+ volumeMounts:
+ - name: cni
+ mountPath: /etc/cni/net.d
+ - name: flannel-cfg
+ mountPath: /etc/kube-flannel/
+ containers:
+ - name: kube-flannel
+ image: quay.io/coreos/flannel:v0.12.0-s390x
+ command:
+ - /opt/bin/flanneld
+ args:
+ - --ip-masq
+ - --kube-subnet-mgr
+ resources:
+ requests:
+ cpu: "100m"
+ memory: "50Mi"
+ limits:
+ cpu: "100m"
+ memory: "50Mi"
+ securityContext:
+ privileged: false
+ capabilities:
+ add: ["NET_ADMIN"]
+ env:
+ - name: POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ - name: POD_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ volumeMounts:
+ - name: run
+ mountPath: /run/flannel
+ - name: flannel-cfg
+ mountPath: /etc/kube-flannel/
+ volumes:
+ - name: run
+ hostPath:
+ path: /run/flannel
+ - name: cni
+ hostPath:
+ path: /etc/cni/net.d
+ - name: flannel-cfg
+ configMap:
+ name: kube-flannel-cfg
+
diff --git a/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/files/multus-daemonset.yml b/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/files/multus-daemonset.yml
new file mode 100644
index 00000000..97990192
--- /dev/null
+++ b/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/files/multus-daemonset.yml
@@ -0,0 +1,251 @@
+#
+# https://github.com/intel/multus-cni/blob/v3.4.1/images/multus-daemonset.yml
+#
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ name: network-attachment-definitions.k8s.cni.cncf.io
+spec:
+ group: k8s.cni.cncf.io
+ scope: Namespaced
+ names:
+ plural: network-attachment-definitions
+ singular: network-attachment-definition
+ kind: NetworkAttachmentDefinition
+ shortNames:
+ - net-attach-def
+ versions:
+ - name: v1
+ served: true
+ storage: true
+ schema:
+ openAPIV3Schema:
+ type: object
+ properties:
+ spec:
+ type: object
+ properties:
+ config:
+ type: string
+---
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: multus
+rules:
+ - apiGroups: ["k8s.cni.cncf.io"]
+ resources:
+ - '*'
+ verbs:
+ - '*'
+ - apiGroups:
+ - ""
+ resources:
+ - pods
+ - pods/status
+ verbs:
+ - get
+ - update
+---
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: multus
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: multus
+subjects:
+- kind: ServiceAccount
+ name: multus
+ namespace: kube-system
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: multus
+ namespace: kube-system
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: multus-cni-config
+ namespace: kube-system
+ labels:
+ tier: node
+ app: multus
+data:
+ # NOTE: If you'd prefer to manually apply a configuration file, you may create one here.
+ # In the case you'd like to customize the Multus installation, you should change the arguments to the Multus pod
+ # change the "args" line below from
+ # - "--multus-conf-file=auto"
+ # to:
+ # "--multus-conf-file=/tmp/multus-conf/70-multus.conf"
+ # Additionally -- you should ensure that the name "70-multus.conf" is the alphabetically first name in the
+ # /etc/cni/net.d/ directory on each node, otherwise, it will not be used by the Kubelet.
+ cni-conf.json: |
+ {
+ "name": "multus-cni-network",
+ "type": "multus",
+ "capabilities": {
+ "portMappings": true
+ },
+ "delegates": [
+ {
+ "cniVersion": "0.3.1",
+ "name": "default-cni-network",
+ "plugins": [
+ {
+ "type": "flannel",
+ "name": "flannel.1",
+ "delegate": {
+ "isDefaultGateway": true,
+ "hairpinMode": true
+ }
+ },
+ {
+ "type": "portmap",
+ "capabilities": {
+ "portMappings": true
+ }
+ }
+ ]
+ }
+ ],
+ "kubeconfig": "/etc/cni/net.d/multus.d/multus.kubeconfig"
+ }
+---
+apiVersion: apps/v1
+kind: DaemonSet
+metadata:
+ name: kube-multus-ds-amd64
+ namespace: kube-system
+ labels:
+ tier: node
+ app: multus
+ name: multus
+spec:
+ selector:
+ matchLabels:
+ name: multus
+ updateStrategy:
+ type: RollingUpdate
+ template:
+ metadata:
+ labels:
+ tier: node
+ app: multus
+ name: multus
+ spec:
+ hostNetwork: true
+ nodeSelector:
+ kubernetes.io/arch: amd64
+ tolerations:
+ - operator: Exists
+ effect: NoSchedule
+ serviceAccountName: multus
+ containers:
+ - name: kube-multus
+ image: nfvpe/multus:v3.4
+ command: ["/entrypoint.sh"]
+ args:
+ - "--multus-conf-file=auto"
+ - "--cni-version=0.3.1"
+ resources:
+ requests:
+ cpu: "100m"
+ memory: "50Mi"
+ limits:
+ cpu: "100m"
+ memory: "50Mi"
+ securityContext:
+ privileged: true
+ volumeMounts:
+ - name: cni
+ mountPath: /host/etc/cni/net.d
+ - name: cnibin
+ mountPath: /host/opt/cni/bin
+ - name: multus-cfg
+ mountPath: /tmp/multus-conf
+ volumes:
+ - name: cni
+ hostPath:
+ path: /etc/cni/net.d
+ - name: cnibin
+ hostPath:
+ path: /opt/cni/bin
+ - name: multus-cfg
+ configMap:
+ name: multus-cni-config
+ items:
+ - key: cni-conf.json
+ path: 70-multus.conf
+---
+apiVersion: apps/v1
+kind: DaemonSet
+metadata:
+ name: kube-multus-ds-ppc64le
+ namespace: kube-system
+ labels:
+ tier: node
+ app: multus
+ name: multus
+spec:
+ selector:
+ matchLabels:
+ name: multus
+ updateStrategy:
+ type: RollingUpdate
+ template:
+ metadata:
+ labels:
+ tier: node
+ app: multus
+ name: multus
+ spec:
+ hostNetwork: true
+ nodeSelector:
+ kubernetes.io/arch: ppc64le
+ tolerations:
+ - operator: Exists
+ effect: NoSchedule
+ serviceAccountName: multus
+ containers:
+ - name: kube-multus
+ # ppc64le support requires multus:latest for now. support 3.3 or later.
+ image: nfvpe/multus:latest-ppc64le
+ command: ["/entrypoint.sh"]
+ args:
+ - "--multus-conf-file=auto"
+ - "--cni-version=0.3.1"
+ resources:
+ requests:
+ cpu: "100m"
+ memory: "90Mi"
+ limits:
+ cpu: "100m"
+ memory: "90Mi"
+ securityContext:
+ privileged: true
+ volumeMounts:
+ - name: cni
+ mountPath: /host/etc/cni/net.d
+ - name: cnibin
+ mountPath: /host/opt/cni/bin
+ - name: multus-cfg
+ mountPath: /tmp/multus-conf
+ volumes:
+ - name: cni
+ hostPath:
+ path: /etc/cni/net.d
+ - name: cnibin
+ hostPath:
+ path: /opt/cni/bin
+ - name: multus-cfg
+ configMap:
+ name: multus-cni-config
+ items:
+ - key: cni-conf.json
+ path: 70-multus.conf
+
diff --git a/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/files/ovs-daemonset.yml b/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/files/ovs-daemonset.yml
new file mode 100644
index 00000000..8a854c06
--- /dev/null
+++ b/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/files/ovs-daemonset.yml
@@ -0,0 +1,101 @@
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+ name: ovs-cni-marker-cr
+rules:
+- apiGroups:
+ - ""
+ resources:
+ - nodes
+ - nodes/status
+ verbs:
+ - get
+ - update
+ - patch
+---
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+ name: ovs-cni-marker-crb
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: ovs-cni-marker-cr
+subjects:
+- kind: ServiceAccount
+ name: ovs-cni-marker
+ namespace: kube-system
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: ovs-cni-marker
+ namespace: kube-system
+---
+apiVersion: apps/v1
+kind: DaemonSet
+metadata:
+ name: ovs-cni-amd64
+ namespace: kube-system
+ labels:
+ tier: node
+ app: ovs-cni
+spec:
+ selector:
+ matchLabels:
+ app: ovs-cni
+ template:
+ metadata:
+ labels:
+ tier: node
+ app: ovs-cni
+ spec:
+ serviceAccountName: ovs-cni-marker
+ hostNetwork: true
+ nodeSelector:
+ beta.kubernetes.io/arch: amd64
+ tolerations:
+ - key: node-role.kubernetes.io/master
+ operator: Exists
+ effect: NoSchedule
+ containers:
+ - name: ovs-cni-plugin
+ image: quay.io/kubevirt/ovs-cni-plugin:latest
+ imagePullPolicy: IfNotPresent
+ resources:
+ requests:
+ cpu: "100m"
+ memory: "50Mi"
+ limits:
+ cpu: "100m"
+ memory: "50Mi"
+ securityContext:
+ privileged: true
+ volumeMounts:
+ - name: cnibin
+ mountPath: /host/opt/cni/bin
+ - name: ovs-cni-marker
+ image: quay.io/kubevirt/ovs-cni-marker:latest
+ imagePullPolicy: IfNotPresent
+ securityContext:
+ privileged: true
+ args:
+ - -node-name
+ - $(NODE_NAME)
+ - -ovs-socket
+ - /host/var/run/openvswitch/db.sock
+ volumeMounts:
+ - name: ovs-var-run
+ mountPath: /host/var/run/openvswitch
+ env:
+ - name: NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ volumes:
+ - name: cnibin
+ hostPath:
+ path: /opt/cni/bin
+ - name: ovs-var-run
+ hostPath:
+ path: /var/run/openvswitch
diff --git a/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/files/sriov-cni-daemonset.yaml b/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/files/sriov-cni-daemonset.yaml
new file mode 100644
index 00000000..6a28c146
--- /dev/null
+++ b/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/files/sriov-cni-daemonset.yaml
@@ -0,0 +1,47 @@
+---
+apiVersion: apps/v1
+kind: DaemonSet
+metadata:
+ name: kube-sriov-cni-ds-amd64
+ namespace: kube-system
+ labels:
+ tier: node
+ app: sriov-cni
+spec:
+ selector:
+ matchLabels:
+ name: sriov-cni
+ template:
+ metadata:
+ labels:
+ name: sriov-cni
+ tier: node
+ app: sriov-cni
+ spec:
+ hostNetwork: true
+ nodeSelector:
+ beta.kubernetes.io/arch: amd64
+ tolerations:
+ - key: node-role.kubernetes.io/master
+ operator: Exists
+ effect: NoSchedule
+ containers:
+ - name: kube-sriov-cni
+ image: nfvpe/sriov-cni
+ imagePullPolicy: IfNotPresent
+ securityContext:
+ privileged: true
+ resources:
+ requests:
+ cpu: "100m"
+ memory: "50Mi"
+ limits:
+ cpu: "100m"
+ memory: "50Mi"
+ volumeMounts:
+ - name: cnibin
+ mountPath: /host/opt/cni/bin
+ volumes:
+ - name: cnibin
+ hostPath:
+ path: /opt/cni/bin
diff --git a/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/files/sriov-device-plugin-daemonset.yaml b/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/files/sriov-device-plugin-daemonset.yaml
new file mode 100644
index 00000000..9168b98c
--- /dev/null
+++ b/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/files/sriov-device-plugin-daemonset.yaml
@@ -0,0 +1,127 @@
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: sriov-device-plugin
+ namespace: kube-system
+
+---
+apiVersion: apps/v1
+kind: DaemonSet
+metadata:
+ name: kube-sriov-device-plugin-amd64
+ namespace: kube-system
+ labels:
+ tier: node
+ app: sriovdp
+spec:
+ selector:
+ matchLabels:
+ name: sriov-device-plugin
+ template:
+ metadata:
+ labels:
+ name: sriov-device-plugin
+ tier: node
+ app: sriovdp
+ spec:
+ hostNetwork: true
+ hostPID: true
+ nodeSelector:
+ beta.kubernetes.io/arch: amd64
+ tolerations:
+ - key: node-role.kubernetes.io/master
+ operator: Exists
+ effect: NoSchedule
+ serviceAccountName: sriov-device-plugin
+ containers:
+ - name: kube-sriovdp
+ image: nfvpe/sriov-device-plugin
+ imagePullPolicy: IfNotPresent
+ args:
+ - --log-dir=sriovdp
+ - --log-level=10
+ securityContext:
+ privileged: true
+ volumeMounts:
+ - name: devicesock
+ mountPath: /var/lib/kubelet/
+ readOnly: false
+ - name: log
+ mountPath: /var/log
+ - name: config-volume
+ mountPath: /etc/pcidp
+ volumes:
+ - name: devicesock
+ hostPath:
+ path: /var/lib/kubelet/
+ - name: log
+ hostPath:
+ path: /var/log
+ - name: config-volume
+ configMap:
+ name: sriovdp-config
+ items:
+ - key: config.json
+ path: config.json
+
+---
+apiVersion: apps/v1
+kind: DaemonSet
+metadata:
+ name: kube-sriov-device-plugin-ppc64le
+ namespace: kube-system
+ labels:
+ tier: node
+ app: sriovdp
+spec:
+ selector:
+ matchLabels:
+ name: sriov-device-plugin
+ template:
+ metadata:
+ labels:
+ name: sriov-device-plugin
+ tier: node
+ app: sriovdp
+ spec:
+ hostNetwork: true
+ hostPID: true
+ nodeSelector:
+ beta.kubernetes.io/arch: ppc64le
+ tolerations:
+ - key: node-role.kubernetes.io/master
+ operator: Exists
+ effect: NoSchedule
+ serviceAccountName: sriov-device-plugin
+ containers:
+ - name: kube-sriovdp
+ image: nfvpe/sriov-device-plugin:ppc64le
+ imagePullPolicy: IfNotPresent
+ args:
+ - --log-dir=sriovdp
+ - --log-level=10
+ securityContext:
+ privileged: true
+ volumeMounts:
+ - name: devicesock
+ mountPath: /var/lib/kubelet/
+ readOnly: false
+ - name: log
+ mountPath: /var/log
+ - name: config-volume
+ mountPath: /etc/pcidp
+ volumes:
+ - name: devicesock
+ hostPath:
+ path: /var/lib/kubelet/
+ - name: log
+ hostPath:
+ path: /var/log
+ - name: config-volume
+ configMap:
+ name: sriovdp-config
+ items:
+ - key: config.json
+ path: config.json
+
diff --git a/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/files/userspace-daemonset.yml b/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/files/userspace-daemonset.yml
new file mode 100644
index 00000000..74bb520c
--- /dev/null
+++ b/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/files/userspace-daemonset.yml
@@ -0,0 +1,46 @@
+apiVersion: apps/v1
+kind: DaemonSet
+metadata:
+ name: userspace-cni-amd64
+ namespace: kube-system
+ labels:
+ tier: node
+ app: userspace-cni
+spec:
+ selector:
+ matchLabels:
+ app: userspace-cni
+ template:
+ metadata:
+ labels:
+ tier: node
+ app: userspace-cni
+ spec:
+ hostNetwork: true
+ nodeSelector:
+ beta.kubernetes.io/arch: amd64
+ tolerations:
+ - key: node-role.kubernetes.io/master
+ operator: Exists
+ effect: NoSchedule
+ containers:
+ - name: userspace-cni-plugin
+ image: parthyadav/userspace-cni:latest
+ imagePullPolicy: IfNotPresent
+ resources:
+ requests:
+ cpu: "100m"
+ memory: "50Mi"
+ limits:
+ cpu: "100m"
+ memory: "50Mi"
+ securityContext:
+ privileged: true
+ volumeMounts:
+ - name: cnibin
+ mountPath: /host/opt/cni/bin
+ volumes:
+ - name: cnibin
+ hostPath:
+ path: /opt/cni/bin
+
diff --git a/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/clear-flannel.yml b/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/clear-flannel.yml
new file mode 100644
index 00000000..9d0ffda4
--- /dev/null
+++ b/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/clear-flannel.yml
@@ -0,0 +1,8 @@
+---
+
+- name: Delete Kube-flannel
+ k8s:
+ state: absent
+ definition: "{{ lookup('file', 'kube-flannel-daemonset.yml') }}"
+
+
diff --git a/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/clear-k8s-master.yml b/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/clear-k8s-master.yml
new file mode 100644
index 00000000..f797ddb6
--- /dev/null
+++ b/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/clear-k8s-master.yml
@@ -0,0 +1,22 @@
+---
+- name: Drain master node
+ command: kubectl drain {{ ansible_hostname }} --delete-local-data --force --ignore-daemonsets
+
+- name: Delete master node
+ command: kubectl delete node {{ ansible_hostname }}
+
+- name: Kubeadm reset (master)
+ shell: yes y | sudo kubeadm reset
+
+- name: Delete /etc/cni/net.d/ (master)
+ command: sudo rm -rf /etc/cni/net.d/
+
+- name: Delete $HOME/.kube/
+ file:
+ path: $HOME/.kube/
+ state: absent
+
+- name: Delete init log file
+ file:
+ path: "{{ token_file }}"
+ state: absent \ No newline at end of file
diff --git a/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/clear-k8s-workers-drain.yml b/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/clear-k8s-workers-drain.yml
new file mode 100644
index 00000000..46ae50ec
--- /dev/null
+++ b/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/clear-k8s-workers-drain.yml
@@ -0,0 +1,8 @@
+---
+- name: Drain worker node
+ delegate_to: "{{ groups['master'][0] }}"
+ command: kubectl drain {{ ansible_hostname }} --delete-local-data --force --ignore-daemonsets
+
+- name: Delete worker node
+ delegate_to: "{{ groups['master'][0] }}"
+ command: kubectl delete node {{ ansible_hostname }}
diff --git a/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/clear-k8s-workers-reset.yml b/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/clear-k8s-workers-reset.yml
new file mode 100644
index 00000000..62a8c01f
--- /dev/null
+++ b/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/clear-k8s-workers-reset.yml
@@ -0,0 +1,11 @@
+---
+- name: Kubeadm reset (worker)
+ shell: yes y | sudo kubeadm reset
+
+- name: Delete /etc/cni/net.d/ (worker)
+ command: sudo rm -rf /etc/cni/net.d/
+
+- name: Remove node_joined.txt
+ file:
+ path: $HOME/node_joined.txt
+ state: absent \ No newline at end of file
diff --git a/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/clear-kubevirt-ovs.yml b/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/clear-kubevirt-ovs.yml
new file mode 100644
index 00000000..30740a44
--- /dev/null
+++ b/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/clear-kubevirt-ovs.yml
@@ -0,0 +1,8 @@
+---
+
+- name: Delete ovs-cni-plugin
+ k8s:
+ state: absent
+ definition: "{{ lookup('file', 'ovs-daemonset.yml') }}"
+
+
diff --git a/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/clear-multus.yml b/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/clear-multus.yml
new file mode 100644
index 00000000..44eabbd1
--- /dev/null
+++ b/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/clear-multus.yml
@@ -0,0 +1,8 @@
+---
+
+- name: Delete Multus
+ k8s:
+ state: absent
+ definition: "{{ lookup('file', 'multus-daemonset.yml') }}"
+
+
diff --git a/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/clear-sriov.yml b/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/clear-sriov.yml
new file mode 100644
index 00000000..6d725ce8
--- /dev/null
+++ b/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/clear-sriov.yml
@@ -0,0 +1,30 @@
+---
+
+- name: Delete SRIOV CNI Daemonset
+ k8s:
+ state: absent
+ apply: yes
+ definition: "{{ lookup('file', 'sriov-cni-daemonset.yaml') }}"
+
+- name: Delete SRIOV Device Plugin
+ k8s:
+ state: absent
+ apply: yes
+ definition: "{{ lookup('file', 'sriov-device-plugin-daemonset.yaml') }}"
+
+- name: Deploy SRIOV Device Plugin Config
+ k8s:
+ state: absent
+ apply: yes
+ definition: "{{ lookup('file', 'configMap-sriov-device-plugin.yaml') }}"
+
+
+
+
+
+
+
+
+
+
+
diff --git a/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/clear-userspace.yml b/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/clear-userspace.yml
new file mode 100644
index 00000000..72b3d869
--- /dev/null
+++ b/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/clear-userspace.yml
@@ -0,0 +1,8 @@
+---
+
+- name: Delete userspace-cni plugin
+ k8s:
+ state: absent
+ definition: "{{ lookup('file', 'userspace-daemonset.yml') }}"
+
+
diff --git a/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/cni-pre-deploy.yml b/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/cni-pre-deploy.yml
new file mode 100644
index 00000000..b2f280ef
--- /dev/null
+++ b/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/cni-pre-deploy.yml
@@ -0,0 +1,17 @@
+---
+- name: Install openshift python package
+ pip:
+ name: openshift
+ executable: "{{ PIP_executable_version }}"
+ when: inventory_hostname in groups['master']
+ become: yes
+
+- name: Check whether /etc/cni/net.d/ exists
+ stat:
+ path: /etc/cni/net.d
+ register: files_to_delete
+
+- name: Delete /etc/cni/net.d/
+ become: yes
+ command: sudo rm -r /etc/cni/net.d/
+ when: files_to_delete.stat.exists and files_to_delete.stat.isdir \ No newline at end of file
diff --git a/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/configure_master_node.yml b/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/configure_master_node.yml
new file mode 100644
index 00000000..4980e17e
--- /dev/null
+++ b/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/configure_master_node.yml
@@ -0,0 +1,14 @@
+---
+- name: Pulling images required for setting up a Kubernetes cluster
+ become: yes
+ command: kubeadm config images pull
+
+- name: Initializing Kubernetes cluster
+ become: yes
+ command: kubeadm init --apiserver-advertise-address={{ kube_ad_addr }} --pod-network-cidr={{ kube_cidr_v }}
+ register: output
+
+- name: Storing Logs and Generated token for future purpose.
+ copy:
+ content: "{{ output.stdout }}"
+ dest: "{{ token_file }}" \ No newline at end of file
diff --git a/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/deploy-flannel.yml b/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/deploy-flannel.yml
new file mode 100644
index 00000000..367d682f
--- /dev/null
+++ b/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/deploy-flannel.yml
@@ -0,0 +1,11 @@
+---
+
+- name: Clean flannel
+ import_tasks: clear-flannel.yml
+
+- name: Deploy Kube-flannel
+ k8s:
+ state: present
+ definition: "{{ lookup('file', 'kube-flannel-daemonset.yml') }}"
+ wait: yes
+
diff --git a/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/deploy-kubevirt-ovs.yml b/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/deploy-kubevirt-ovs.yml
new file mode 100644
index 00000000..9913cae4
--- /dev/null
+++ b/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/deploy-kubevirt-ovs.yml
@@ -0,0 +1,12 @@
+---
+
+- name: Clean kubevirt-ovs
+ include: clear-kubevirt-ovs.yml
+
+- name: Deploy ovs-cni-plugin
+ k8s:
+ state: present
+ apply: yes
+ definition: "{{ lookup('file', 'ovs-daemonset.yml') }}"
+
+
diff --git a/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/deploy-multus.yml b/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/deploy-multus.yml
new file mode 100644
index 00000000..6fb77e42
--- /dev/null
+++ b/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/deploy-multus.yml
@@ -0,0 +1,10 @@
+---
+
+- name: Clear Multus
+ include: clear-multus.yml
+
+- name: Deploy Multus
+ k8s:
+ state: present
+ definition: "{{ lookup('file', 'multus-daemonset.yml') }}"
+
diff --git a/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/deploy-sriov.yml b/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/deploy-sriov.yml
new file mode 100644
index 00000000..aaff5cf0
--- /dev/null
+++ b/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/deploy-sriov.yml
@@ -0,0 +1,26 @@
+---
+
+- name: clean sriov
+ include: clear-sriov.yml
+
+- name: Deploy SRIOV Device Plugin Config
+ k8s:
+ state: present
+ apply: yes
+ definition: "{{ lookup('file', 'configMap-sriov-device-plugin.yaml') }}"
+ wait: yes
+
+- name: Deploy SRIOV Device Plugin
+ k8s:
+ state: present
+ apply: yes
+ definition: "{{ lookup('file', 'sriov-device-plugin-daemonset.yaml') }}"
+
+- name: Deploy SRIOV CNI
+ k8s:
+ state: present
+ apply: yes
+ definition: "{{ lookup('file', 'sriov-cni-daemonset.yaml') }}"
+
+
+
diff --git a/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/deploy-userspace.yml b/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/deploy-userspace.yml
new file mode 100644
index 00000000..32e3b9b1
--- /dev/null
+++ b/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/deploy-userspace.yml
@@ -0,0 +1,13 @@
+---
+
+- name: Clean userspace-cni
+ include: clear-userspace.yml
+
+- name: Deploy userspace-cni plugin
+ k8s:
+ state: present
+ apply: yes
+ definition: "{{ lookup('file', 'userspace-daemonset.yml') }}"
+
+
+
diff --git a/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/foldersettings.yml b/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/foldersettings.yml
new file mode 100644
index 00000000..1a8c1879
--- /dev/null
+++ b/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/foldersettings.yml
@@ -0,0 +1,10 @@
+---
+- name: .kube directory creation in $HOME/
+ file:
+ path: $HOME/.kube
+ state: directory
+
+- name: Copying required files
+ shell: |
+ sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
+ sudo chown $(id -u):$(id -g) $HOME/.kube/config \ No newline at end of file
diff --git a/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/main.yml b/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/main.yml
new file mode 100644
index 00000000..28c3f501
--- /dev/null
+++ b/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/main.yml
@@ -0,0 +1,83 @@
+- name: include master tasks
+ import_tasks: configure_master_node.yml
+ when: inventory_hostname in groups['master']
+ tags: deploy
+
+- name: include folder settings for kube config
+ import_tasks: foldersettings.yml
+ when: inventory_hostname in groups['master']
+ tags: deploy
+
+- name: include join worker tasks
+ import_tasks: workers.yml
+ when: inventory_hostname in groups['workers']
+ tags: deploy, join
+
+- name: cni pre-deploy
+ import_tasks: cni-pre-deploy.yml
+ tags: deploy, cni
+
+- name: deploy flannel
+ import_tasks: deploy-flannel.yml
+ when: inventory_hostname in groups['master']
+ tags: deploy, cni
+
+- name: clear flannel
+ import_tasks: clear-flannel.yml
+ when: inventory_hostname in groups['master']
+ tags: clear
+
+- name: deploy multus
+ import_tasks: deploy-multus.yml
+ when: inventory_hostname in groups['master']
+ tags: deploy, cni
+
+- name: clear multus
+ import_tasks: clear-multus.yml
+ when: inventory_hostname in groups['master']
+ tags: clear
+
+- name: deploy kubevirt-ovs
+ import_tasks: deploy-kubevirt-ovs.yml
+ when: inventory_hostname in groups['master']
+ tags: deploy, cni
+
+- name: clear kubevirt-ovs
+ import_tasks: clear-kubevirt-ovs.yml
+ when: inventory_hostname in groups['master']
+ tags: clear
+
+- name: deploy sriov
+ import_tasks: deploy-sriov.yml
+ when: inventory_hostname in groups['master']
+ tags: deploy, cni
+
+- name: clear sriov
+ import_tasks: clear-sriov.yml
+ when: inventory_hostname in groups['master']
+ tags: clear
+
+- name: deploy userspace
+ import_tasks: deploy-userspace.yml
+ when: inventory_hostname in groups['master']
+ tags: deploy, cni
+
+- name: clear userspace
+ import_tasks: clear-userspace.yml
+ when: inventory_hostname in groups['master']
+ tags: clear
+
+- name: drain and delete workers from master
+ import_tasks: clear-k8s-workers-drain.yml
+ when: inventory_hostname in groups['workers']
+ tags: clear
+
+- name: reset workers
+ import_tasks: clear-k8s-workers-reset.yml
+ when: inventory_hostname in groups['workers']
+ tags: clear
+
+- name: clear master
+ import_tasks: clear-k8s-master.yml
+ when: inventory_hostname in groups['master']
+ tags: clear \ No newline at end of file
diff --git a/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/workers.yml b/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/workers.yml
new file mode 100644
index 00000000..a0a815c4
--- /dev/null
+++ b/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/workers.yml
@@ -0,0 +1,15 @@
+---
+- name: check node is already in cluster
+ delegate_to: "{{ groups.master[0] }}"
+ command: "kubectl get nodes -n kube-system -o name"
+ register: get_node_register
+ changed_when: false
+
+- name: get join command
+ delegate_to: "{{ groups.master[0] }}"
+ command: kubeadm token create --print-join-command
+ register: join_command_raw
+
+- name: join cluster
+ shell: "sudo {{ join_command_raw.stdout_lines[0] }} --ignore-preflight-errors=all > $HOME/node_joined.txt"
+ when: ( 'node/' + ansible_hostname ) not in get_node_register.stdout_lines