diff options
61 files changed, 1816 insertions, 488 deletions
diff --git a/INFO.yaml b/INFO.yaml new file mode 100644 index 00000000..3712ad36 --- /dev/null +++ b/INFO.yaml @@ -0,0 +1,56 @@ +--- +project: 'Compass4nfv' +project_creation_date: '' +project_category: 'Integration and testing' +lifecycle_state: 'Proposal approved' +project_lead: &opnfv_compass4nfv_ptl + name: 'Justin chi' + email: 'chigang@huawei.com' + id: 'chigang' + company: 'huawei.com' + timezone: 'Unknown' +primary_contact: *opnfv_compass4nfv_ptl +issue_tracking: + type: 'jira' + url: 'https://jira.opnfv.org/projects/COMPASS' + key: 'COMPASS' +mailing_list: + type: 'mailman2' + url: 'opnfv-tech-discuss@lists.opnfv.org' + tag: '[compass4nfv]' +realtime_discussion: + type: irc + server: 'freenode.net' + channel: '#opnfv-compass4nfv' +meetings: + - type: 'gotomeeting+irc' + agenda: # eg: 'https://wiki.opnfv.org/display/' + url: # eg: 'https://global.gotomeeting.com/join/819733085' + server: 'freenode.net' + channel: '#opnfv-meeting' + repeats: 'weekly' + time: # eg: '16:00 UTC' +repositories: + - 'compass4nfv' +committers: + - <<: *opnfv_compass4nfv_ptl + - name: 'Prakash Ramchandran' + email: 'prakash.ramchandran@huawei.com' + company: 'huawei.com' + id: 'rprakash' + - name: 'shuai chen' + email: 'chenshuai@huawei.com' + company: 'huawei.com' + id: 'chenshuai' + - name: 'Yifei Xue' + email: 'xueyifei@huawei.com' + company: 'huawei.com' + id: 'xueyifei1988' + - name: 'HU Xinhui' + email: 'xinhui_hu@foxmail.com' + company: 'foxmail.com' + id: 'huxinhui' +tsc: + # yamllint disable rule:line-length + approval: 'http//ircbot.wl.linuxfoundation.org/meetings/opnfv-meeting/2015/opnfv-meeting.2015-07-21-14.02.html' + # yamllint enable rule:line-length diff --git a/ci/deploy_ci.sh b/ci/deploy_ci.sh index bd5adcc7..dd8786af 100755 --- a/ci/deploy_ci.sh +++ b/ci/deploy_ci.sh @@ -18,12 +18,13 @@ case $DEPLOY_SCENARIO in echo "os-onos-sfc-ha scenario supports mitaka only" exit 1 ;; - k8-nosdn-nofeature-ha) - export COMPASS_OS_VERSION=centos7 - export KUBERNETES_VERSION="v1.7.3" - ;; esac +if [[ "$DEPLOY_SCENARIO" =~ "k8-" ]]; then + export KUBERNETES_VERSION="v1.7.3" +fi + + if [[ "$NODE_NAME" =~ "intel-pod17" ]]; then export USER_NAMESERVER=8.8.8.8 fi diff --git a/deploy/adapters/ansible/kubernetes/ansible-kubernetes.yml b/deploy/adapters/ansible/kubernetes/ansible-kubernetes.yml index bfdc8958..68dec5c2 100755 --- a/deploy/adapters/ansible/kubernetes/ansible-kubernetes.yml +++ b/deploy/adapters/ansible/kubernetes/ansible-kubernetes.yml @@ -42,3 +42,16 @@ max_fail_percentage: 0 roles: - post-k8s + +- hosts: kube_node + remote_user: root + max_fail_percentage: 0 + roles: + - role: storage + when: stor4nfv is defined and stor4nfv == "Enable" + +- hosts: storage_master + remote_user: root + max_fail_percentage: 0 + roles: + - stor4nfv diff --git a/deploy/adapters/ansible/kubernetes/roles/install-k8s-dependence/tasks/main.yml b/deploy/adapters/ansible/kubernetes/roles/install-k8s-dependence/tasks/main.yml index e683a3fe..4f8ca005 100644 --- a/deploy/adapters/ansible/kubernetes/roles/install-k8s-dependence/tasks/main.yml +++ b/deploy/adapters/ansible/kubernetes/roles/install-k8s-dependence/tasks/main.yml @@ -24,5 +24,6 @@ apt: pkg: "{{ item }}" state: "present" + update_cache: 'yes' with_items: "{{ packages }}" when: ansible_os_family == 'Debian' diff --git a/deploy/adapters/ansible/kubernetes/roles/install-k8s-dependence/vars/Debian.yml b/deploy/adapters/ansible/kubernetes/roles/install-k8s-dependence/vars/Debian.yml index 8ced18b4..0453dc44 100644 --- a/deploy/adapters/ansible/kubernetes/roles/install-k8s-dependence/vars/Debian.yml +++ b/deploy/adapters/ansible/kubernetes/roles/install-k8s-dependence/vars/Debian.yml @@ -9,6 +9,7 @@ packages: - python-iniparse - python-lxml - python-crypto + - git pip_packages: - crudini diff --git a/deploy/adapters/ansible/kubernetes/roles/install-k8s-dependence/vars/RedHat.yml b/deploy/adapters/ansible/kubernetes/roles/install-k8s-dependence/vars/RedHat.yml index b7e1d3dc..1adf1dfc 100644 --- a/deploy/adapters/ansible/kubernetes/roles/install-k8s-dependence/vars/RedHat.yml +++ b/deploy/adapters/ansible/kubernetes/roles/install-k8s-dependence/vars/RedHat.yml @@ -11,7 +11,7 @@ packages: - lsof - strace - net-tools - + - git pip_packages: - crudini diff --git a/deploy/adapters/ansible/kubernetes/roles/kargo/tasks/main.yml b/deploy/adapters/ansible/kubernetes/roles/kargo/tasks/main.yml index 187e2a52..b9d9c234 100644 --- a/deploy/adapters/ansible/kubernetes/roles/kargo/tasks/main.yml +++ b/deploy/adapters/ansible/kubernetes/roles/kargo/tasks/main.yml @@ -199,6 +199,69 @@ regexp: '"calico", "weave", "canal", "flannel"', replace: '"calico", "weave", "canal", "flannel", "2flannel"'} +# yamllint disable rule:line-length +- name: enable CSI plugin feature + lineinfile: + dest: "/opt/kargo_k8s/roles/kubespray-defaults/defaults/main.yaml" + regexp: "^kube_feature_gates:" + line: "{% raw %}kube_feature_gates: ['Initializers={{ istio_enabled|string }}', 'PersistentLocalVolumes={{ local_volumes_enabled|string }}', 'CSIPersistentVolume=True', 'MountPropagation=True']{% endraw %}" + when: + - stor4nfv is defined and stor4nfv == "Enable" +# yamllint enable rule:line-length + +- name: enable CSI plugin runtime_config + lineinfile: + dest: /opt/kargo_k8s/roles/kubernetes/master/defaults/main.yml + insertafter: '^ - admissionregistration.k8s.io/v1alpha1' + line: ' - storage.k8s.io/v1alpha1' + when: + - stor4nfv is defined and stor4nfv == "Enable" + +- name: copy sriov playbook to kargo + copy: + src: "{{ run_dir }}/roles/sriov" + dest: /opt/kargo_k8s/roles/network_plugin + +- name: copy sriov-apps playbook to kargo + copy: + src: "{{ run_dir }}/roles/sriov-apps/" + dest: /opt/kargo_k8s/roles/kubernetes-apps/network_plugin/sriov + +- name: append sriov to network plugin + blockinfile: + path: /opt/kargo_k8s/roles/network_plugin/meta/main.yml + block: " - role: network_plugin/sriov\n when: kube_network_plugin == 'sriov'\n \ + tags: sriov\n" + +- name: append sriov apps to network plugin + blockinfile: + path: /opt/kargo_k8s/roles/kubernetes-apps/network_plugin/meta/main.yml + block: " - role: kubernetes-apps/network_plugin/sriov\n \ + when: kube_network_plugin == 'sriov'\n tags: sriov\n" + +- name: append sriov to valid kube_network_plugin list + replace: + path: "{{ item.path }}" + regexp: "{{ item.regexp }}" + replace: "{{ item.replace }}" + with_items: + - {path: "/opt/kargo_k8s/roles/kubernetes/master/templates/manifests/\ +kube-controller-manager.manifest.j2", + regexp: '"cloud", "flannel"', + replace: '"cloud", "flannel", "sriov"'} + - {path: '/opt/kargo_k8s/roles/kubernetes/node/templates/kubelet.kubeadm.env.j2', + regexp: '"calico", "canal", "flannel", "weave"', + replace: '"calico", "canal", "flannel", "weave", "sriov"'} + - {path: '/opt/kargo_k8s/roles/kubernetes/node/templates/kubelet.standard.env.j2', + regexp: '"calico", "canal", "flannel", "weave"', + replace: '"calico", "canal", "flannel", "weave", "sriov"'} + - {path: '/opt/kargo_k8s/roles/kubernetes/node/templates/kubelet.rkt.service.j2', + regexp: '"calico", "weave", "canal", "flannel"', + replace: '"calico", "weave", "canal", "flannel", "sriov"'} + - {path: '/opt/kargo_k8s/roles/kubernetes/preinstall/tasks/main.yml', + regexp: '"calico", "weave", "canal", "flannel"', + replace: '"calico", "weave", "canal", "flannel", "sriov"'} + - name: run kargo playbook shell: | cd /opt/kargo_k8s diff --git a/deploy/adapters/ansible/kubernetes/roles/pre-k8s/tasks/Ubuntu.yml b/deploy/adapters/ansible/kubernetes/roles/pre-k8s/tasks/Ubuntu.yml index 8054267d..44e3b1f6 100644 --- a/deploy/adapters/ansible/kubernetes/roles/pre-k8s/tasks/Ubuntu.yml +++ b/deploy/adapters/ansible/kubernetes/roles/pre-k8s/tasks/Ubuntu.yml @@ -67,9 +67,6 @@ src: sources.list.official dest: /etc/apt/sources.list -- name: update apt cache - apt: update_cache=yes - - name: restart ntp service shell: "service ntp restart" diff --git a/deploy/adapters/ansible/kubernetes/roles/pre-k8s/vars/main.yml b/deploy/adapters/ansible/kubernetes/roles/pre-k8s/vars/main.yml index b196bd25..6d6ecf44 100644 --- a/deploy/adapters/ansible/kubernetes/roles/pre-k8s/vars/main.yml +++ b/deploy/adapters/ansible/kubernetes/roles/pre-k8s/vars/main.yml @@ -1,21 +1,21 @@ --- aptpackages: -- bridge-utils -- debootstrap -- ifenslave -- ifenslave-2.6 -- lsof -- lvm2 -- ntp -- ntpdate -- sudo -- vlan -- tcpdump + - bridge-utils + - debootstrap + - ifenslave + - ifenslave-2.6 + - lsof + - lvm2 + - ntp + - ntpdate + - sudo + - vlan + - tcpdump yumpackages: -- bridge-utils -- iputils -- lvm2 -- ntp -- tcpdump -- vim + - bridge-utils + - iputils + - lvm2 + - ntp + - tcpdump + - vim diff --git a/deploy/adapters/ansible/kubernetes/roles/setup-k8s-network/tasks/RedHat.yml b/deploy/adapters/ansible/kubernetes/roles/setup-k8s-network/tasks/RedHat.yml index c59fdfc5..5b434dbe 100644 --- a/deploy/adapters/ansible/kubernetes/roles/setup-k8s-network/tasks/RedHat.yml +++ b/deploy/adapters/ansible/kubernetes/roles/setup-k8s-network/tasks/RedHat.yml @@ -36,6 +36,17 @@ src: ifcfg-eth.j2 dest: /etc/sysconfig/network-scripts/ifcfg-{{sys_intf_mappings["external"]["interface"]}} +- name: generate ifcfg-sriov + template: + src: ifcfg-sriov.j2 + dest: /etc/sysconfig/network-scripts/ifcfg-{{ intf_sriov }} + when: intf_sriov|length > 0 + +- name: remove ifcfg-br-sriov script + file: + path: /etc/sysconfig/network-scripts/ifcfg-br-sriov + state: absent + - name: remove defualt gw lineinfile: dest: /etc/sysconfig/network diff --git a/deploy/adapters/ansible/kubernetes/roles/setup-k8s-network/templates/ifcfg-sriov.j2 b/deploy/adapters/ansible/kubernetes/roles/setup-k8s-network/templates/ifcfg-sriov.j2 new file mode 100644 index 00000000..c4005854 --- /dev/null +++ b/deploy/adapters/ansible/kubernetes/roles/setup-k8s-network/templates/ifcfg-sriov.j2 @@ -0,0 +1,12 @@ +DEVICE={{ intf_sriov }} +BOOTPROTO=none +ONBOOT=yes +IPADDR={{ ip_settings[inventory_hostname]["tenant"]["ip"] }} +NETMASK=255.255.255.0 +DEFROUTE="no" +{% if sys_intf_mappings["tenant"]["vlan_tag"] | int %} +{% set intf_vlan = "yes" %} +{% else %} +{% set intf_vlan = "no" %} +{% endif %} +VLAN={{ intf_vlan }} diff --git a/deploy/adapters/ansible/kubernetes/roles/setup-k8s-network/vars/main.yml b/deploy/adapters/ansible/kubernetes/roles/setup-k8s-network/vars/main.yml new file mode 100644 index 00000000..e525bf5c --- /dev/null +++ b/deploy/adapters/ansible/kubernetes/roles/setup-k8s-network/vars/main.yml @@ -0,0 +1,7 @@ +--- +intf_sriov: |- + {%- set intf_sriov = sys_intf_mappings["tenant"]["interface"] %} + {%- if sys_intf_mappings["tenant"]["vlan_tag"] | int %} + {%- set intf_sriov = intf_sriov + '.' + sys_intf_mappings["tenant"]["vlan_tag"]|string %} + {%- endif %} + {{- intf_sriov }} diff --git a/deploy/adapters/ansible/kubernetes/roles/sriov-apps/tasks/main.yml b/deploy/adapters/ansible/kubernetes/roles/sriov-apps/tasks/main.yml new file mode 100644 index 00000000..662fa7bf --- /dev/null +++ b/deploy/adapters/ansible/kubernetes/roles/sriov-apps/tasks/main.yml @@ -0,0 +1,20 @@ +# Copyright (C) 2018, ARM Limited and contributors. +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +--- +- name: "Sriov | Create ServiceAccount ClusterRole and ClusterRoleBinding" + command: "{{ bin_dir }}/kubectl apply -f {{ kube_config_dir }}/cni-sriov-rbac.yml" + run_once: true + when: rbac_enabled and sriov_rbac_manifest.changed + +- name: Sriov | Create Network Resources + kube: + name: "kube-sriov" + kubectl: "{{ bin_dir }}/kubectl" + filename: "{{ kube_config_dir }}/cni-sriov.yml" + namespace: "{{system_namespace}}" + state: "{{ item | ternary('latest','present') }}" + with_items: "{{ sriov_manifest.changed }}" + when: inventory_hostname == groups['kube-master'][0] diff --git a/deploy/adapters/ansible/kubernetes/roles/sriov/defaults/main.yml b/deploy/adapters/ansible/kubernetes/roles/sriov/defaults/main.yml new file mode 100644 index 00000000..44263956 --- /dev/null +++ b/deploy/adapters/ansible/kubernetes/roles/sriov/defaults/main.yml @@ -0,0 +1,7 @@ +# Copyright (C) 2018, ARM Limited and contributors. +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +--- +# Limits for apps diff --git a/deploy/adapters/ansible/kubernetes/roles/sriov/handlers/main.yml b/deploy/adapters/ansible/kubernetes/roles/sriov/handlers/main.yml new file mode 100644 index 00000000..221279b1 --- /dev/null +++ b/deploy/adapters/ansible/kubernetes/roles/sriov/handlers/main.yml @@ -0,0 +1,62 @@ +# Copyright (C) 2018, ARM Limited and contributors. +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +--- +- name: Sriov | delete default docker bridge + command: ip link delete docker0 + failed_when: false + notify: Sriov | restart docker + +# special cases for atomic because it defaults to live-restore: true +# So we disable live-restore to pickup the new flannel IP. After +# we enable it, we have to restart docker again to pickup the new +# setting and restore the original behavior +- name: Sriov | restart docker + command: /bin/true + notify: + - Sriov | reload systemd + - Sriov | reload docker.socket + - Sriov | configure docker live-restore true (atomic) + - Sriov | reload docker + - Sriov | pause while Docker restarts + - Sriov | wait for docker + +- name: Sriov | reload systemd + shell: systemctl daemon-reload + +- name: Sriov | reload docker.socket + service: + name: docker.socket + state: restarted + when: ansible_os_family in ['CoreOS', 'Container Linux by CoreOS'] + +- name: Sriov | configure docker live-restore true (atomic) + replace: + name: /etc/docker/daemon.json + regexp: '"live-restore":.*true' + replace: '"live-restore": false' + when: is_atomic + +- name: Sriov | reload docker + service: + name: docker + state: restarted + +- name: Sriov | pause while Docker restarts + pause: + seconds: 10 + prompt: "Waiting for docker restart" + +- name: Sriov | wait for docker + command: "{{ docker_bin_dir }}/docker images" + register: docker_ready + retries: 10 + delay: 5 + until: docker_ready.rc == 0 + +- name: Sriov | reload kubelet + service: + name: kubelet + state: restarted diff --git a/deploy/adapters/ansible/kubernetes/roles/sriov/tasks/main.yml b/deploy/adapters/ansible/kubernetes/roles/sriov/tasks/main.yml new file mode 100644 index 00000000..0e3e2f6d --- /dev/null +++ b/deploy/adapters/ansible/kubernetes/roles/sriov/tasks/main.yml @@ -0,0 +1,106 @@ +# Copyright (C) 2018, ARM Limited and contributors. +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +--- +- name: Sriov | Verify if br_netfilter module exists + shell: "modinfo br_netfilter" + register: modinfo_br_netfilter + failed_when: modinfo_br_netfilter.rc not in [0, 1] + changed_when: false + +- name: Sriov | Enable br_netfilter module + modprobe: + name: br_netfilter + state: present + when: modinfo_br_netfilter.rc == 0 + +# kube-proxy needs net.bridge.bridge-nf-call-iptables enabled +# when found if br_netfilter is not a module +- name: Sriov | Check if bridge-nf-call-iptables key exists + command: "sysctl net.bridge.bridge-nf-call-iptables" + failed_when: false + changed_when: false + register: sysctl_bridge_nf_call_iptables + +- name: Sriov | Enable bridge-nf-call tables + sysctl: + name: "{{ item }}" + state: present + value: 1 + reload: "yes" + when: modinfo_br_netfilter.rc == 1 and sysctl_bridge_nf_call_iptables.rc == 0 + with_items: + - net.bridge.bridge-nf-call-iptables + - net.bridge.bridge-nf-call-arptables + - net.bridge.bridge-nf-call-ip6tables + +- name: Sriov | Install Multus CNI + shell: |- + /usr/bin/docker run --rm --network=host -v /opt/cni/bin/:/opt/cni/bin/ golang:1.9 \ + bash -c "git clone https://github.com/Intel-Corp/multus-cni && cd multus-cni \ + && ./build && cp bin/multus /opt/cni/bin" + +- name: Sriov | Install Sriov CNI + shell: |- + /usr/bin/docker run --rm --network=host -v /opt/cni/bin/:/opt/cni/bin/ golang:1.9 \ + bash -c "git clone https://github.com/hustcat/sriov-cni && cd sriov-cni \ + && ./build && cp bin/sriov /opt/cni/bin" + +- name: Sriov | Install Flannel CNI + shell: |- + /usr/bin/docker run --rm --network=host -v /opt/cni/bin/:/host/opt/cni/bin/ \ + {{ flannel_cni_image_repo }}:{{ flannel_cni_image_tag }} \ + sh -c "cp /opt/cni/bin/* /host/opt/cni/bin/" + +- name: Sriov | Remove all file in /etc/cni/net.d + shell: |- + rm -rf /etc/cni/net.d/ + mkdir -p /etc/cni/net.d/ + +- name: Sriov | Generate Sriov CNI Conf + copy: + content: | + { + "name": "minion-cni-network", + "type": "multus", + "kubeconfig": "/etc/kubernetes/node-kubeconfig.yaml", + "delegates": [ + { + "type": "flannel", + "masterplugin": true, + "delegate": { + "isDefaultGateway": true + } + } + ] + } + dest: "/etc/cni/net.d/multus-cni.conf" + owner: root + group: root + mode: 0644 + +- name: Sriov | Enable DHCP CNI + shell: /opt/cni/bin/dhcp daemon & + + +- name: Sriov | Create cni-sriov-rbac manifest + template: + src: cni-sriov-rbac.yml.j2 + dest: "{{ kube_config_dir }}/cni-sriov-rbac.yml" + register: sriov_rbac_manifest + when: inventory_hostname == groups['kube-master'][0] and rbac_enabled + +- name: Sriov | Create cni-sriov manifest + template: + src: cni-sriov.yml.j2 + dest: "{{ kube_config_dir }}/cni-sriov.yml" + register: sriov_manifest + when: inventory_hostname == groups['kube-master'][0] + +- name: Sriov | Sriov tests manifest + template: + src: sriov-test-pod.yml + dest: "{{ kube_config_dir }}/sriov-test-pod.yml" + when: inventory_hostname == groups['kube-master'][0] diff --git a/deploy/adapters/ansible/kubernetes/roles/sriov/templates/cni-sriov-rbac.yml.j2 b/deploy/adapters/ansible/kubernetes/roles/sriov/templates/cni-sriov-rbac.yml.j2 new file mode 100644 index 00000000..1298aeaa --- /dev/null +++ b/deploy/adapters/ansible/kubernetes/roles/sriov/templates/cni-sriov-rbac.yml.j2 @@ -0,0 +1,49 @@ +# Copyright (C) 2018, ARM Limited and contributors. +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: sriov + namespace: "{{system_namespace}}" +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1beta1 +metadata: + name: sriov +rules: + - apiGroups: + - "" + resources: + - pods + verbs: + - get + - apiGroups: + - "" + resources: + - nodes + verbs: + - list + - watch + - apiGroups: + - "" + resources: + - nodes/status + verbs: + - patch +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1beta1 +metadata: + name: sriov +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: sriov +subjects: +- kind: ServiceAccount + name: sriov + namespace: "{{system_namespace}}" diff --git a/deploy/adapters/ansible/kubernetes/roles/sriov/templates/cni-sriov.yml.j2 b/deploy/adapters/ansible/kubernetes/roles/sriov/templates/cni-sriov.yml.j2 new file mode 100644 index 00000000..90c7f28c --- /dev/null +++ b/deploy/adapters/ansible/kubernetes/roles/sriov/templates/cni-sriov.yml.j2 @@ -0,0 +1,159 @@ +# Copyright (C) 2018, ARM Limited and contributors. +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +--- +apiVersion: extensions/v1beta1 +kind: ThirdPartyResource +metadata: + name: network.kubernetes.com +description: "A specification of a Network obj in the kubernetes" +versions: +- name: v1 +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: flannel + namespace: {{system_namespace}} +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: kube-flannel-cfg + namespace: {{system_namespace}} + labels: + tier: node + app: flannel +data: + cni-conf.json: | + { + "name": "cbr0", + "type": "flannel", + "delegate": { + "isDefaultGateway": true + } + } + net-conf.json: | + { + "Network": "10.244.0.0/16", + "Backend": { + "Type": "udp" + } + } +--- +apiVersion: extensions/v1beta1 +kind: DaemonSet +metadata: + name: kube-flannel-ds + namespace: {{system_namespace}} + labels: + tier: node + app: flannel +spec: + template: + metadata: + labels: + tier: node + app: flannel + spec: + hostNetwork: true + tolerations: + - key: node-role.kubernetes.io/master + operator: Exists + effect: NoSchedule + serviceAccountName: flannel + containers: + - name: kube-flannel + image: {{ flannel_image_repo }}:{{ flannel_image_tag }} + imagePullPolicy: {{ k8s_image_pull_policy }} + command: [ "/opt/bin/flanneld", "--ip-masq", "--kube-subnet-mgr" ] + securityContext: + privileged: true + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + volumeMounts: + - name: run + mountPath: /run + - name: flannel-cfg + mountPath: /etc/kube-flannel/ + volumes: + - name: run + hostPath: + path: /run + - name: flannel-cfg + configMap: + name: kube-flannel-cfg +--- +apiVersion: "kubernetes.com/v1" +kind: Network +metadata: + name: flannel-conf + namespace: default +plugin: flannel +args: '[ + { + "masterplugin": true, + "delegate": { + "isDefaultGateway": true + } + } +]' +--- +apiVersion: "kubernetes.com/v1" +kind: Network +metadata: + name: sriov-conf1 + namespace: default +plugin: sriov +args: '[ + { + "master": "eth1.101", + "pfOnly": true, + "ipam": { + "type": "host-local", + "subnet": "192.168.123.0/24", + "rangeStart": "192.168.123.11", + "rangeEnd": "192.168.123.21", + "routes": [ + { + "dst": "0.0.0.0/0" + } + ], + "gateway": "192.168.123.1" + } + } +]' +--- +apiVersion: "kubernetes.com/v1" +kind: Network +metadata: + name: sriov-conf2 + namespace: default +plugin: sriov +args: '[ + { + "master": "eth1.101", + "pfOnly": true, + "ipam": { + "type": "host-local", + "subnet": "192.168.123.0/24", + "rangeStart": "192.168.123.31", + "rangeEnd": "192.168.123.41", + "routes": [ + { + "dst": "0.0.0.0/0" + } + ], + "gateway": "192.168.123.1" + } + } +]' diff --git a/deploy/adapters/ansible/kubernetes/roles/sriov/templates/sriov-test-pod.yml b/deploy/adapters/ansible/kubernetes/roles/sriov/templates/sriov-test-pod.yml new file mode 100644 index 00000000..849aca85 --- /dev/null +++ b/deploy/adapters/ansible/kubernetes/roles/sriov/templates/sriov-test-pod.yml @@ -0,0 +1,51 @@ +# Copyright (C) 2018, ARM Limited and contributors. +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +--- +apiVersion: v1 +kind: Pod +metadata: + name: multus-test1 + annotations: + networks: '[ + { "name": "flannel-conf" }, + { "name": "sriov-conf1" } + ]' +spec: + containers: + - name: multus-test + image: "busybox" + command: ["top"] + stdin: true + tty: true + nodeSelector: + kubernetes.io/hostname: "host1" + tolerations: + - key: "node-role.kubernetes.io/master" + operator: "Exists" + effect: "NoSchedule" +--- +apiVersion: v1 +kind: Pod +metadata: + name: multus-test2 + annotations: + networks: '[ + { "name": "flannel-conf" }, + { "name": "sriov-conf2" } + ]' +spec: + containers: + - name: multus-test + image: "busybox" + command: ["top"] + stdin: true + tty: true + nodeSelector: + kubernetes.io/hostname: "host2" + tolerations: + - key: "node-role.kubernetes.io/master" + operator: "Exists" + effect: "NoSchedule" diff --git a/deploy/compass_conf/flavor/kubernetes.conf b/deploy/compass_conf/flavor/kubernetes.conf index 71acadff..e7e8ffc8 100755 --- a/deploy/compass_conf/flavor/kubernetes.conf +++ b/deploy/compass_conf/flavor/kubernetes.conf @@ -4,7 +4,7 @@ FLAVORS = [{ 'display_name': 'ansible-kubernetes', 'template': 'ansible-kubernetes.tmpl', 'roles': [ - 'kube_master', 'etcd', 'kube_node', 'ha' + 'kube_master', 'etcd', 'kube_node', 'ha', 'storage_master', 'storage_node' ], }] diff --git a/deploy/compass_conf/flavor_mapping/HA-ansible-multinodes-ocata.conf b/deploy/compass_conf/flavor_mapping/HA-ansible-multinodes-ocata.conf index 5771a645..cb467eee 100755 --- a/deploy/compass_conf/flavor_mapping/HA-ansible-multinodes-ocata.conf +++ b/deploy/compass_conf/flavor_mapping/HA-ansible-multinodes-ocata.conf @@ -13,51 +13,51 @@ CONFIG_MAPPING = { "config": { "rabbitmq": { "username": "guest", - "password": "guest" + "password": "" }, "compute": { "username": "nova", - "password": "nova" + "password": "" }, "dashboard": { "username": "dashboard", - "password": "dashboard" + "password": "" }, "identity": { "username": "keystone", - "password": "keystone" + "password": "" }, "image": { "username": "glance", - "password": "glance" + "password": "" }, "metering": { "username": "ceilometer", - "password": "ceilometer" + "password": "" }, "alarming": { "username": "aodh", - "password": "aodh" + "password": "" }, "network": { "username": "neutron", - "password": "neutron" + "password": "" }, "mysql": { "username": "root", - "password": "root" + "password": "" }, "volume": { "username": "cinder", - "password": "cinder" + "password": "" }, "heat": { "username": "heat", - "password": "heat" + "password": "" }, "policy": { "username": "congress", - "password": "congress" + "password": "" } } } @@ -72,51 +72,51 @@ CONFIG_MAPPING = { "config":{ "admin": { "username": "admin", - "password": "admin" + "password": "" }, "demo": { "username": "demo", - "password": "demo" + "password": "" }, "compute": { "username": "nova", - "password": "nova" + "password": "" }, "dashboard": { "username": "dashboard", - "password": "dashboard" + "password": "" }, "identity": { "username": "keystone", - "password": "keystone" + "password": "" }, "image": { "username": "glance", - "password": "glance" + "password": "" }, "metering": { "username": "ceilometer", - "password": "ceilometer" + "password": "" }, "alarming": { "username": "aodh", - "password": "aodh" + "password": "" }, "network": { "username": "quantum", - "password": "quantum" + "password": "" }, "object-store": { "username": "swift", - "password": "swift" + "password": "" }, "heat": { "username": "heat", - "password": "heat" + "password": "" }, "volume": { "username": "cinder", - "password": "cinder" + "password": "" } } } diff --git a/deploy/compass_conf/flavor_mapping/HA-ansible-multinodes-pike.conf b/deploy/compass_conf/flavor_mapping/HA-ansible-multinodes-pike.conf index d108c9b5..a205a5ed 100755 --- a/deploy/compass_conf/flavor_mapping/HA-ansible-multinodes-pike.conf +++ b/deploy/compass_conf/flavor_mapping/HA-ansible-multinodes-pike.conf @@ -13,51 +13,51 @@ CONFIG_MAPPING = { "config": { "rabbitmq": { "username": "guest", - "password": "guest" + "password": "" }, "compute": { "username": "nova", - "password": "nova" + "password": "" }, "dashboard": { "username": "dashboard", - "password": "dashboard" + "password": "" }, "identity": { "username": "keystone", - "password": "keystone" + "password": "" }, "image": { "username": "glance", - "password": "glance" + "password": "" }, "metering": { "username": "ceilometer", - "password": "ceilometer" + "password": "" }, "alarming": { "username": "aodh", - "password": "aodh" + "password": "" }, "network": { "username": "neutron", - "password": "neutron" + "password": "" }, "mysql": { "username": "root", - "password": "root" + "password": "" }, "volume": { "username": "cinder", - "password": "cinder" + "password": "" }, "heat": { "username": "heat", - "password": "heat" + "password": "" }, "policy": { "username": "congress", - "password": "congress" + "password": "" } } } @@ -72,51 +72,51 @@ CONFIG_MAPPING = { "config":{ "admin": { "username": "admin", - "password": "admin" + "password": "" }, "demo": { "username": "demo", - "password": "demo" + "password": "" }, "compute": { "username": "nova", - "password": "nova" + "password": "" }, "dashboard": { "username": "dashboard", - "password": "dashboard" + "password": "" }, "identity": { "username": "keystone", - "password": "keystone" + "password": "" }, "image": { "username": "glance", - "password": "glance" + "password": "" }, "metering": { "username": "ceilometer", - "password": "ceilometer" + "password": "" }, "alarming": { "username": "aodh", - "password": "aodh" + "password": "" }, "network": { "username": "quantum", - "password": "quantum" + "password": "" }, "object-store": { "username": "swift", - "password": "swift" + "password": "" }, "heat": { "username": "heat", - "password": "heat" + "password": "" }, "volume": { "username": "cinder", - "password": "cinder" + "password": "" } } } diff --git a/deploy/compass_conf/flavor_mapping/allinone.conf b/deploy/compass_conf/flavor_mapping/allinone.conf index 4752a805..11fadeaf 100755 --- a/deploy/compass_conf/flavor_mapping/allinone.conf +++ b/deploy/compass_conf/flavor_mapping/allinone.conf @@ -13,35 +13,35 @@ CONFIG_MAPPING = { "config": { "rabbitmq": { "username": "guest", - "password": "guest" + "password": "" }, "compute": { "username": "nova", - "password": "nova" + "password": "" }, "dashboard": { "username": "dashboard", - "password": "dashboard" + "password": "" }, "identity": { "username": "keystone", - "password": "keystone" + "password": "" }, "image": { "username": "glance", - "password": "glance" + "password": "" }, "metering": { "username": "ceilometer", - "password": "ceilometer" + "password": "" }, "mysql": { "username": "root", - "password": "root" + "password": "" }, "volume": { "username": "cinder", - "password": "cinder" + "password": "" } } } @@ -56,35 +56,35 @@ CONFIG_MAPPING = { "config":{ "admin": { "username": "admin", - "password": "admin" + "password": "" }, "compute": { "username": "nova", - "password": "nova" + "password": "" }, "dashboard": { "username": "dashboard", - "password": "dashboard" + "password": "" }, "image": { "username": "glance", - "password": "glance" + "password": "" }, "metering": { "username": "ceilometer", - "password": "ceilometer" + "password": "" }, "network": { "username": "quantum", - "password": "quantum" + "password": "" }, "object-store": { "username": "swift", - "password": "swift" + "password": "" }, "volume": { "username": "cinder", - "password": "cinder" + "password": "" } } } diff --git a/deploy/compass_conf/flavor_mapping/ha-multinodes.conf b/deploy/compass_conf/flavor_mapping/ha-multinodes.conf index 34d76be1..1a3dc29f 100755 --- a/deploy/compass_conf/flavor_mapping/ha-multinodes.conf +++ b/deploy/compass_conf/flavor_mapping/ha-multinodes.conf @@ -13,35 +13,35 @@ CONFIG_MAPPING = { "config": { "rabbitmq": { "username": "guest", - "password": "guest" + "password": "" }, "compute": { "username": "nova", - "password": "nova" + "password": "" }, "dashboard": { "username": "dashboard", - "password": "dashboard" + "password": "" }, "identity": { "username": "keystone", - "password": "keystone" + "password": "" }, "image": { "username": "glance", - "password": "glance" + "password": "" }, "metering": { "username": "ceilometer", - "password": "ceilometer" + "password": "" }, "mysql": { "username": "root", - "password": "root" + "password": "" }, "volume": { "username": "cinder", - "password": "cinder" + "password": "" } } } @@ -56,35 +56,35 @@ CONFIG_MAPPING = { "config":{ "admin": { "username": "admin", - "password": "admin" + "password": "" }, "compute": { "username": "nova", - "password": "nova" + "password": "" }, "dashboard": { "username": "dashboard", - "password": "dashboard" + "password": "" }, "image": { "username": "glance", - "password": "glance" + "password": "" }, "metering": { "username": "ceilometer", - "password": "ceilometer" + "password": "" }, "network": { "username": "quantum", - "password": "quantum" + "password": "" }, "object-store": { "username": "swift", - "password": "swift" + "password": "" }, "volume": { "username": "cinder", - "password": "cinder" + "password": "" } } } diff --git a/deploy/compass_conf/flavor_mapping/multinodes.conf b/deploy/compass_conf/flavor_mapping/multinodes.conf index bed52f06..003d7989 100755 --- a/deploy/compass_conf/flavor_mapping/multinodes.conf +++ b/deploy/compass_conf/flavor_mapping/multinodes.conf @@ -13,35 +13,35 @@ CONFIG_MAPPING = { "config": { "rabbitmq": { "username": "guest", - "password": "guest" + "password": "" }, "compute": { "username": "nova", - "password": "nova" + "password": "" }, "dashboard": { "username": "dashboard", - "password": "dashboard" + "password": "" }, "identity": { "username": "keystone", - "password": "keystone" + "password": "" }, "image": { "username": "glance", - "password": "glance" + "password": "" }, "metering": { "username": "ceilometer", - "password": "ceilometer" + "password": "" }, "mysql": { "username": "root", - "password": "root" + "password": "" }, "volume": { "username": "cinder", - "password": "cinder" + "password": "" } } } @@ -56,35 +56,35 @@ CONFIG_MAPPING = { "config":{ "admin": { "username": "admin", - "password": "admin" + "password": "" }, "compute": { "username": "nova", - "password": "nova" + "password": "" }, "dashboard": { "username": "dashboard", - "password": "dashboard" + "password": "" }, "image": { "username": "glance", - "password": "glance" + "password": "" }, "metering": { "username": "ceilometer", - "password": "ceilometer" + "password": "" }, "network": { "username": "quantum", - "password": "quantum" + "password": "" }, "object-store": { "username": "swift", - "password": "swift" + "password": "" }, "volume": { "username": "cinder", - "password": "cinder" + "password": "" } } } diff --git a/deploy/compass_conf/flavor_mapping/single-contoller-multi-compute.conf b/deploy/compass_conf/flavor_mapping/single-contoller-multi-compute.conf index c7bbff73..26cc8610 100755 --- a/deploy/compass_conf/flavor_mapping/single-contoller-multi-compute.conf +++ b/deploy/compass_conf/flavor_mapping/single-contoller-multi-compute.conf @@ -13,35 +13,35 @@ CONFIG_MAPPING = { "config": { "rabbitmq": { "username": "guest", - "password": "guest" + "password": "" }, "compute": { "username": "nova", - "password": "nova" + "password": "" }, "dashboard": { "username": "dashboard", - "password": "dashboard" + "password": "" }, "identity": { "username": "keystone", - "password": "keystone" + "password": "" }, "image": { "username": "glance", - "password": "glance" + "password": "" }, "metering": { "username": "ceilometer", - "password": "ceilometer" + "password": "" }, "mysql": { "username": "root", - "password": "root" + "password": "" }, "volume": { "username": "cinder", - "password": "cinder" + "password": "" } } } @@ -56,35 +56,35 @@ CONFIG_MAPPING = { "config":{ "admin": { "username": "admin", - "password": "admin" + "password": "" }, "compute": { "username": "nova", - "password": "nova" + "password": "" }, "dashboard": { "username": "dashboard", - "password": "dashboard" + "password": "" }, "image": { "username": "glance", - "password": "glance" + "password": "" }, "metering": { "username": "ceilometer", - "password": "ceilometer" + "password": "" }, "network": { "username": "quantum", - "password": "quantum" + "password": "" }, "object-store": { "username": "swift", - "password": "swift" + "password": "" }, "volume": { "username": "cinder", - "password": "cinder" + "password": "" } } } diff --git a/deploy/compass_conf/package_installer/ansible-kubernetes.conf b/deploy/compass_conf/package_installer/ansible-kubernetes.conf index 820691b7..044af9a9 100755 --- a/deploy/compass_conf/package_installer/ansible-kubernetes.conf +++ b/deploy/compass_conf/package_installer/ansible-kubernetes.conf @@ -7,7 +7,7 @@ SETTINGS = { 'playbook_file': 'site.yml', 'inventory_file': 'inventory.py', 'inventory_json_file': 'inventory.json', - 'inventory_group': ['kube_master', 'etcd', 'kube_node', 'ha'], + 'inventory_group': ['kube_master', 'etcd', 'kube_node', 'ha', 'ceph_adm', 'ceph_mon', 'ceph_osd', 'storage_master', 'storage_node'], 'group_variable': 'all', 'etc_hosts_path': 'roles/pre-k8s/templates/hosts', 'runner_dirs': ['roles','kubernetes/roles'] diff --git a/deploy/compass_conf/role/kubernetes_ansible.conf b/deploy/compass_conf/role/kubernetes_ansible.conf index c27779ad..3e79cbb9 100755 --- a/deploy/compass_conf/role/kubernetes_ansible.conf +++ b/deploy/compass_conf/role/kubernetes_ansible.conf @@ -15,6 +15,15 @@ ROLES = [{ 'role': 'ha', 'display_name': 'ha', 'description': 'ha' +}, { + 'role': 'storage_master', + 'display_name': 'storage master', + 'description': 'storage master', + 'optional': True +}, { + 'role': 'storage_node', + 'display_name': 'storage node', + 'description': 'storage node', + 'optional': True } - ] diff --git a/deploy/compass_conf/templates/ansible_installer/kubernetes/vars/ansible-kubernetes.tmpl b/deploy/compass_conf/templates/ansible_installer/kubernetes/vars/ansible-kubernetes.tmpl index 59130fec..0d529561 100644 --- a/deploy/compass_conf/templates/ansible_installer/kubernetes/vars/ansible-kubernetes.tmpl +++ b/deploy/compass_conf/templates/ansible_installer/kubernetes/vars/ansible-kubernetes.tmpl @@ -40,6 +40,7 @@ deploy_type: $getVar('deploy_type', 'virtual') public_cidr: $kube_nodes[0]['install']['subnet'] storage_cidr: "{{ ip_settings[inventory_hostname]['storage']['cidr'] }}" mgmt_cidr: "{{ ip_settings[inventory_hostname]['mgmt']['cidr'] }}" +external_cidr: "{{ ip_settings[inventory_hostname]['external']['cidr'] }}" public_net_info: "{{ network_cfg.public_net_info }}" host_ip_settings: "{{ ip_settings[inventory_hostname] }}" @@ -93,31 +94,6 @@ host_index: #end for ERLANG_TOKEN: YOWSJSJIGGAUFZTIBRAD -#set credentials = $getVar('service_credentials', {}) -#set console_credentials = $getVar('console_credentials', {}) -#set rabbit_username = $credentials.rabbitmq.username -#set rabbit_password = $credentials.rabbitmq.password -#set keystone_dbpass = $credentials.identity.password -#set keystone_pass = $console_credentials.identity.password -#set glance_dbpass = $credentials.image.password -#set glance_pass = $console_credentials.image.password -#set nova_dbpass = $credentials.compute.password -#set nova_pass = $console_credentials.compute.password -#set dash_dbpass = $credentials.dashboard.password -#set cinder_dbpass = $credentials.volume.password -#set cinder_pass = $console_credentials.volume.password -#set heat_dbpass = $credentials.heat.password -#set heat_pass = $console_credentials.heat.password -#set neutron_dbpass = $credentials.network.password -#set neutron_pass = $console_credentials.network.password -#set ceilometer_dbpass = $credentials.metering.password -#set ceilometer_pass = $console_credentials.metering.password -#set aodh_dbpass = $credentials.alarming.password -#set aodh_pass = $console_credentials.alarming.password -#set congress_dbpass = $credentials.policy.password -#set congress_pass = $console_credentials.policy.password -#set admin_pass = $console_credentials.admin.password -#set demo_pass = $console_credentials.demo.password cluster_name: $cluster_name @@ -135,30 +111,6 @@ ADMIN_TOKEN: admin CEILOMETER_TOKEN: c095d479023a0fd58a54 erlang.cookie: DJJVECFMCJPVYQTJTDWG -RABBIT_USER: $rabbit_username -RABBIT_PASS: $rabbit_password -KEYSTONE_DBPASS: $keystone_dbpass -KEYSTONE_PASS: $keystone_pass -CEILOMETER_DBPASS: $ceilometer_dbpass -CEILOMETER_PASS: $ceilometer_pass -AODH_DBPASS: $aodh_dbpass -AODH_PASS: $aodh_pass -GLANCE_DBPASS: $glance_dbpass -GLANCE_PASS: $glance_pass -NOVA_DBPASS: $nova_dbpass -NOVA_PASS: $nova_pass -DASH_DBPASS: $dash_dbpass -CINDER_DBPASS: $cinder_dbpass -CINDER_PASS: $cinder_pass -NEUTRON_DBPASS: $neutron_dbpass -NEUTRON_PASS: $neutron_pass -HEAT_DBPASS: $heat_dbpass -HEAT_PASS: $heat_pass -CONGRESS_DBPASS: $congress_dbpass -CONGRESS_PASS: $congress_pass -DEMO_PASS: $demo_pass -ADMIN_PASS: $admin_pass - #set plugins = $getVar('plugins', []) #for item in plugins #set keys = $item.keys() @@ -194,10 +146,6 @@ build_in_image_name: cirros-0.3.3-x86_64-disk.img physical_device: /dev/sdb -odl_username: admin -odl_password: admin -odl_api_port: 8080 - odl_pkg_url: https://nexus.opendaylight.org/content/groups/public/org/opendaylight/integration/distribution-karaf/0.2.2-Helium-SR2/distribution-karaf-0.2.2-Helium-SR2.tar.gz odl_pkg_name: karaf.tar.gz odl_home: "/opt/opendaylight-0.2.2/" diff --git a/deploy/compass_conf/templates/ansible_installer/openstack_ocata/vars/HA-ansible-multinodes.tmpl b/deploy/compass_conf/templates/ansible_installer/openstack_ocata/vars/HA-ansible-multinodes.tmpl index 527f480a..d7a0e1b4 100755 --- a/deploy/compass_conf/templates/ansible_installer/openstack_ocata/vars/HA-ansible-multinodes.tmpl +++ b/deploy/compass_conf/templates/ansible_installer/openstack_ocata/vars/HA-ansible-multinodes.tmpl @@ -142,31 +142,6 @@ host_index: #end for ERLANG_TOKEN: YOWSJSJIGGAUFZTIBRAD -#set credentials = $getVar('service_credentials', {}) -#set console_credentials = $getVar('console_credentials', {}) -#set rabbit_username = $credentials.rabbitmq.username -#set rabbit_password = $credentials.rabbitmq.password -#set keystone_dbpass = $credentials.identity.password -#set keystone_pass = $console_credentials.identity.password -#set glance_dbpass = $credentials.image.password -#set glance_pass = $console_credentials.image.password -#set nova_dbpass = $credentials.compute.password -#set nova_pass = $console_credentials.compute.password -#set dash_dbpass = $credentials.dashboard.password -#set cinder_dbpass = $credentials.volume.password -#set cinder_pass = $console_credentials.volume.password -#set heat_dbpass = $credentials.heat.password -#set heat_pass = $console_credentials.heat.password -#set neutron_dbpass = $credentials.network.password -#set neutron_pass = $console_credentials.network.password -#set ceilometer_dbpass = $credentials.metering.password -#set ceilometer_pass = $console_credentials.metering.password -#set aodh_dbpass = $credentials.alarming.password -#set aodh_pass = $console_credentials.alarming.password -#set congress_dbpass = $credentials.policy.password -#set congress_pass = $console_credentials.policy.password -#set admin_pass = $console_credentials.admin.password -#set demo_pass = $console_credentials.demo.password cluster_name: $cluster_name @@ -184,30 +159,6 @@ ADMIN_TOKEN: admin CEILOMETER_TOKEN: c095d479023a0fd58a54 erlang.cookie: DJJVECFMCJPVYQTJTDWG -RABBIT_USER: $rabbit_username -RABBIT_PASS: $rabbit_password -KEYSTONE_DBPASS: $keystone_dbpass -KEYSTONE_PASS: $keystone_pass -CEILOMETER_DBPASS: $ceilometer_dbpass -CEILOMETER_PASS: $ceilometer_pass -AODH_DBPASS: $aodh_dbpass -AODH_PASS: $aodh_pass -GLANCE_DBPASS: $glance_dbpass -GLANCE_PASS: $glance_pass -NOVA_DBPASS: $nova_dbpass -NOVA_PASS: $nova_pass -DASH_DBPASS: $dash_dbpass -CINDER_DBPASS: $cinder_dbpass -CINDER_PASS: $cinder_pass -NEUTRON_DBPASS: $neutron_dbpass -NEUTRON_PASS: $neutron_pass -HEAT_DBPASS: $heat_dbpass -HEAT_PASS: $heat_pass -CONGRESS_DBPASS: $congress_dbpass -CONGRESS_PASS: $congress_pass -DEMO_PASS: $demo_pass -ADMIN_PASS: $admin_pass - #set plugins = $getVar('plugins', []) #for item in plugins #set keys = $item.keys() @@ -255,9 +206,6 @@ api_workers: 1 physical_device: /dev/sdb -odl_username: admin -odl_password: admin -odl_api_port: 8080 odl_pkg_url: https://nexus.opendaylight.org/content/groups/public/org/opendaylight/integration/distribution-karaf/0.2.2-Helium-SR2/distribution-karaf-0.2.2-Helium-SR2.tar.gz odl_pkg_name: karaf.tar.gz diff --git a/deploy/compass_conf/templates/ansible_installer/openstack_ocata/vars/allinone.tmpl b/deploy/compass_conf/templates/ansible_installer/openstack_ocata/vars/allinone.tmpl index 2fc97c41..aa4d7e67 100755 --- a/deploy/compass_conf/templates/ansible_installer/openstack_ocata/vars/allinone.tmpl +++ b/deploy/compass_conf/templates/ansible_installer/openstack_ocata/vars/allinone.tmpl @@ -17,20 +17,6 @@ INTERFACE_NAME: $network_external_nic INTERNAL_INTERFACE: $network_internal_nic #end for -#set credentials = $getVar('service_credentials', {}) -#set console_credentials = $getVar('console_credentials', {}) -#set rabbit_username = $credentials.rabbitmq.username -#set rabbit_password = $credentials.rabbitmq.password -#set keystone_dbpass = $credentials.identity.password -#set glance_dbpass = $credentials.image.password -#set glance_pass = $console_credentials.image.password -#set nova_dbpass = $credentials.compute.password -#set nova_pass = $console_credentials.compute.password -#set dash_dbpass = $credentials.dashboard.password -#set cinder_dbpass = $credentials.volume.password -#set cinder_pass = $console_credentials.volume.password -#set admin_pass = $console_credentials.admin.password -#set neutron_pass = $console_credentials.network.password compute_controller_host: "{{ controller_host }}" db_host: "{{ controller_host }}" @@ -55,20 +41,6 @@ ocata_cloud_archive: deb http://ubuntu-cloud.archive.canonical.com/ubuntu xenial ADMIN_TOKEN: admin CEILOMETER_TOKEN: c095d479023a0fd58a54 -RABBIT_USER: $rabbit_username -RABBIT_PASS: $rabbit_password -KEYSTONE_DBPASS: $keystone_dbpass -DEMO_PASS: demo_secret -ADMIN_PASS: $admin_pass -GLANCE_DBPASS: $glance_dbpass -GLANCE_PASS: $glance_pass -NOVA_DBPASS: $nova_dbpass -NOVA_PASS: $nova_pass -DASH_DBPASS: $dash_dbpass -CINDER_DBPASS: $cinder_dbpass -CINDER_PASS: $cinder_pass -NEUTRON_DBPASS: $neutron_pass -NEUTRON_PASS: $neutron_pass NEUTRON_TYPE_DRIVERS: ['flat', 'gre', 'vxlan'] NEUTRON_TENANT_NETWORK_TYPES: ['vxlan'] #NEUTRON_MECHANISM_DRIVERS: ['opendaylight'] diff --git a/deploy/compass_conf/templates/ansible_installer/openstack_ocata/vars/multinodes.tmpl b/deploy/compass_conf/templates/ansible_installer/openstack_ocata/vars/multinodes.tmpl index 9fa649bc..11f6fa79 100755 --- a/deploy/compass_conf/templates/ansible_installer/openstack_ocata/vars/multinodes.tmpl +++ b/deploy/compass_conf/templates/ansible_installer/openstack_ocata/vars/multinodes.tmpl @@ -86,21 +86,6 @@ INTERFACE_NAME: $network_external_nic INTERNAL_INTERFACE: $network_internal_nic #end for -#set credentials = $getVar('service_credentials', {}) -#set console_credentials = $getVar('console_credentials', {}) -#set rabbit_username = $credentials.rabbitmq.username -#set rabbit_password = $credentials.rabbitmq.password -#set keystone_dbpass = $credentials.identity.password -#set glance_dbpass = $credentials.image.password -#set glance_pass = $console_credentials.image.password -#set nova_dbpass = $credentials.compute.password -#set nova_pass = $console_credentials.compute.password -#set dash_dbpass = $credentials.dashboard.password -#set cinder_dbpass = $credentials.volume.password -#set cinder_pass = $console_credentials.volume.password -#set admin_pass = $console_credentials.admin.password -#set neutron_pass = $console_credentials.network.password - cluster_name: $cluster_name odl_controller: 10.1.0.15 @@ -116,20 +101,6 @@ ocata_cloud_archive: deb http://ubuntu-cloud.archive.canonical.com/ubuntu xenial ADMIN_TOKEN: admin CEILOMETER_TOKEN: c095d479023a0fd58a54 -RABBIT_USER: $rabbit_username -RABBIT_PASS: $rabbit_password -KEYSTONE_DBPASS: $keystone_dbpass -DEMO_PASS: demo_secret -ADMIN_PASS: $admin_pass -GLANCE_DBPASS: $glance_dbpass -GLANCE_PASS: $glance_pass -NOVA_DBPASS: $nova_dbpass -NOVA_PASS: $nova_pass -DASH_DBPASS: $dash_dbpass -CINDER_DBPASS: $cinder_dbpass -CINDER_PASS: $cinder_pass -NEUTRON_DBPASS: $neutron_pass -NEUTRON_PASS: $neutron_pass NEUTRON_TYPE_DRIVERS: ['flat', 'gre', 'vxlan'] NEUTRON_TENANT_NETWORK_TYPES: ['vxlan'] #NEUTRON_MECHANISM_DRIVERS: ['opendaylight'] @@ -151,9 +122,6 @@ physical_device: /dev/sdb internal_interface: "ansible_{{ INTERNAL_INTERFACE }}" internal_ip: "{{ hostvars[inventory_hostname][internal_interface]['ipv4']['address'] }}" HA_VIP: "{{ internal_ip }}" -odl_username: admin -odl_password: admin -odl_api_port: 8080 odl_pkg_url: https://nexus.opendaylight.org/content/groups/public/org/opendaylight/integration/distribution-karaf/0.2.2-Helium-SR2/distribution-karaf-0.2.2-Helium-SR2.tar.gz odl_pkg_name: karaf.tar.gz diff --git a/deploy/compass_conf/templates/ansible_installer/openstack_ocata/vars/single-controller.tmpl b/deploy/compass_conf/templates/ansible_installer/openstack_ocata/vars/single-controller.tmpl index 57c23366..e7544170 100755 --- a/deploy/compass_conf/templates/ansible_installer/openstack_ocata/vars/single-controller.tmpl +++ b/deploy/compass_conf/templates/ansible_installer/openstack_ocata/vars/single-controller.tmpl @@ -29,21 +29,6 @@ INTERFACE_NAME: $network_external_nic INTERNAL_INTERFACE: $network_internal_nic #end for -#set credentials = $getVar('service_credentials', {}) -#set console_credentials = $getVar('console_credentials', {}) -#set rabbit_username = $credentials.rabbitmq.username -#set rabbit_password = $credentials.rabbitmq.password -#set keystone_dbpass = $credentials.identity.password -#set glance_dbpass = $credentials.image.password -#set glance_pass = $console_credentials.image.password -#set nova_dbpass = $credentials.compute.password -#set nova_pass = $console_credentials.compute.password -#set dash_dbpass = $credentials.dashboard.password -#set cinder_dbpass = $credentials.volume.password -#set cinder_pass = $console_credentials.volume.password -#set admin_pass = $console_credentials.admin.password -#set neutron_pass = $console_credentials.network.password - cluster_name: $cluster_name deploy_type: $getVar('deploy_type', 'virtual') compute_controller_host: "{{ controller_host }}" @@ -67,20 +52,6 @@ ocata_cloud_archive: deb http://ubuntu-cloud.archive.canonical.com/ubuntu xenial ADMIN_TOKEN: admin CEILOMETER_TOKEN: c095d479023a0fd58a54 -RABBIT_USER: $rabbit_username -RABBIT_PASS: $rabbit_password -KEYSTONE_DBPASS: $keystone_dbpass -DEMO_PASS: demo_secret -ADMIN_PASS: $admin_pass -GLANCE_DBPASS: $glance_dbpass -GLANCE_PASS: $glance_pass -NOVA_DBPASS: $nova_dbpass -NOVA_PASS: $nova_pass -DASH_DBPASS: $dash_dbpass -CINDER_DBPASS: $cinder_dbpass -CINDER_PASS: $cinder_pass -NEUTRON_DBPASS: $neutron_pass -NEUTRON_PASS: $neutron_pass NEUTRON_TYPE_DRIVERS: ['flat', 'gre', 'vxlan'] NEUTRON_TENANT_NETWORK_TYPES: ['vxlan'] #NEUTRON_MECHANISM_DRIVERS: ['opendaylight'] @@ -103,6 +74,3 @@ physical_device: /dev/sdb internal_interface: "ansible_{{ INTERNAL_INTERFACE }}" internal_ip: "{{ hostvars[inventory_hostname][internal_interface]['ipv4']['address'] }}" HA_VIP: "{{ internal_ip }}" -odl_username: admin -odl_password: admin -odl_api_port: 8080 diff --git a/deploy/compass_conf/templates/ansible_installer/openstack_pike/vars/HA-ansible-multinodes.tmpl b/deploy/compass_conf/templates/ansible_installer/openstack_pike/vars/HA-ansible-multinodes.tmpl index 6b226e6f..25deaa55 100755 --- a/deploy/compass_conf/templates/ansible_installer/openstack_pike/vars/HA-ansible-multinodes.tmpl +++ b/deploy/compass_conf/templates/ansible_installer/openstack_pike/vars/HA-ansible-multinodes.tmpl @@ -142,31 +142,6 @@ host_index: #end for ERLANG_TOKEN: YOWSJSJIGGAUFZTIBRAD -#set credentials = $getVar('service_credentials', {}) -#set console_credentials = $getVar('console_credentials', {}) -#set rabbit_username = $credentials.rabbitmq.username -#set rabbit_password = $credentials.rabbitmq.password -#set keystone_dbpass = $credentials.identity.password -#set keystone_pass = $console_credentials.identity.password -#set glance_dbpass = $credentials.image.password -#set glance_pass = $console_credentials.image.password -#set nova_dbpass = $credentials.compute.password -#set nova_pass = $console_credentials.compute.password -#set dash_dbpass = $credentials.dashboard.password -#set cinder_dbpass = $credentials.volume.password -#set cinder_pass = $console_credentials.volume.password -#set heat_dbpass = $credentials.heat.password -#set heat_pass = $console_credentials.heat.password -#set neutron_dbpass = $credentials.network.password -#set neutron_pass = $console_credentials.network.password -#set ceilometer_dbpass = $credentials.metering.password -#set ceilometer_pass = $console_credentials.metering.password -#set aodh_dbpass = $credentials.alarming.password -#set aodh_pass = $console_credentials.alarming.password -#set congress_dbpass = $credentials.policy.password -#set congress_pass = $console_credentials.policy.password -#set admin_pass = $console_credentials.admin.password -#set demo_pass = $console_credentials.demo.password cluster_name: $cluster_name @@ -184,30 +159,6 @@ ADMIN_TOKEN: admin CEILOMETER_TOKEN: c095d479023a0fd58a54 erlang.cookie: DJJVECFMCJPVYQTJTDWG -RABBIT_USER: $rabbit_username -RABBIT_PASS: $rabbit_password -KEYSTONE_DBPASS: $keystone_dbpass -KEYSTONE_PASS: $keystone_pass -CEILOMETER_DBPASS: $ceilometer_dbpass -CEILOMETER_PASS: $ceilometer_pass -AODH_DBPASS: $aodh_dbpass -AODH_PASS: $aodh_pass -GLANCE_DBPASS: $glance_dbpass -GLANCE_PASS: $glance_pass -NOVA_DBPASS: $nova_dbpass -NOVA_PASS: $nova_pass -DASH_DBPASS: $dash_dbpass -CINDER_DBPASS: $cinder_dbpass -CINDER_PASS: $cinder_pass -NEUTRON_DBPASS: $neutron_dbpass -NEUTRON_PASS: $neutron_pass -HEAT_DBPASS: $heat_dbpass -HEAT_PASS: $heat_pass -CONGRESS_DBPASS: $congress_dbpass -CONGRESS_PASS: $congress_pass -DEMO_PASS: $demo_pass -ADMIN_PASS: $admin_pass - #set plugins = $getVar('plugins', []) #for item in plugins #set keys = $item.keys() @@ -255,8 +206,6 @@ api_workers: 1 physical_device: /dev/sdb -odl_username: admin -odl_password: admin odl_api_port: 8080 odl_pkg_url: https://nexus.opendaylight.org/content/groups/public/org/opendaylight/integration/distribution-karaf/0.2.2-Helium-SR2/distribution-karaf-0.2.2-Helium-SR2.tar.gz diff --git a/deploy/compass_conf/templates/ansible_installer/openstack_pike/vars/allinone.tmpl b/deploy/compass_conf/templates/ansible_installer/openstack_pike/vars/allinone.tmpl index 0fb6581f..31ea7be8 100755 --- a/deploy/compass_conf/templates/ansible_installer/openstack_pike/vars/allinone.tmpl +++ b/deploy/compass_conf/templates/ansible_installer/openstack_pike/vars/allinone.tmpl @@ -17,20 +17,6 @@ INTERFACE_NAME: $network_external_nic INTERNAL_INTERFACE: $network_internal_nic #end for -#set credentials = $getVar('service_credentials', {}) -#set console_credentials = $getVar('console_credentials', {}) -#set rabbit_username = $credentials.rabbitmq.username -#set rabbit_password = $credentials.rabbitmq.password -#set keystone_dbpass = $credentials.identity.password -#set glance_dbpass = $credentials.image.password -#set glance_pass = $console_credentials.image.password -#set nova_dbpass = $credentials.compute.password -#set nova_pass = $console_credentials.compute.password -#set dash_dbpass = $credentials.dashboard.password -#set cinder_dbpass = $credentials.volume.password -#set cinder_pass = $console_credentials.volume.password -#set admin_pass = $console_credentials.admin.password -#set neutron_pass = $console_credentials.network.password compute_controller_host: "{{ controller_host }}" db_host: "{{ controller_host }}" @@ -55,20 +41,6 @@ pike_cloud_archive: deb http://ubuntu-cloud.archive.canonical.com/ubuntu xenial- ADMIN_TOKEN: admin CEILOMETER_TOKEN: c095d479023a0fd58a54 -RABBIT_USER: $rabbit_username -RABBIT_PASS: $rabbit_password -KEYSTONE_DBPASS: $keystone_dbpass -DEMO_PASS: demo_secret -ADMIN_PASS: $admin_pass -GLANCE_DBPASS: $glance_dbpass -GLANCE_PASS: $glance_pass -NOVA_DBPASS: $nova_dbpass -NOVA_PASS: $nova_pass -DASH_DBPASS: $dash_dbpass -CINDER_DBPASS: $cinder_dbpass -CINDER_PASS: $cinder_pass -NEUTRON_DBPASS: $neutron_pass -NEUTRON_PASS: $neutron_pass NEUTRON_TYPE_DRIVERS: ['flat', 'gre', 'vxlan'] NEUTRON_TENANT_NETWORK_TYPES: ['vxlan'] #NEUTRON_MECHANISM_DRIVERS: ['opendaylight'] @@ -91,6 +63,3 @@ internal_interface: "ansible_{{ INTERNAL_INTERFACE }}" internal_ip: "{{ hostvars[inventory_hostname][internal_interface]['ipv4']['address'] }}" HA_VIP: "{{ internal_ip }}" -odl_username: admin -odl_password: admin -odl_api_port: 8080 diff --git a/deploy/compass_conf/templates/ansible_installer/openstack_pike/vars/multinodes.tmpl b/deploy/compass_conf/templates/ansible_installer/openstack_pike/vars/multinodes.tmpl index 10056042..51f8295b 100755 --- a/deploy/compass_conf/templates/ansible_installer/openstack_pike/vars/multinodes.tmpl +++ b/deploy/compass_conf/templates/ansible_installer/openstack_pike/vars/multinodes.tmpl @@ -86,21 +86,6 @@ INTERFACE_NAME: $network_external_nic INTERNAL_INTERFACE: $network_internal_nic #end for -#set credentials = $getVar('service_credentials', {}) -#set console_credentials = $getVar('console_credentials', {}) -#set rabbit_username = $credentials.rabbitmq.username -#set rabbit_password = $credentials.rabbitmq.password -#set keystone_dbpass = $credentials.identity.password -#set glance_dbpass = $credentials.image.password -#set glance_pass = $console_credentials.image.password -#set nova_dbpass = $credentials.compute.password -#set nova_pass = $console_credentials.compute.password -#set dash_dbpass = $credentials.dashboard.password -#set cinder_dbpass = $credentials.volume.password -#set cinder_pass = $console_credentials.volume.password -#set admin_pass = $console_credentials.admin.password -#set neutron_pass = $console_credentials.network.password - cluster_name: $cluster_name odl_controller: 10.1.0.15 @@ -116,20 +101,6 @@ pike_cloud_archive: deb http://ubuntu-cloud.archive.canonical.com/ubuntu xenial- ADMIN_TOKEN: admin CEILOMETER_TOKEN: c095d479023a0fd58a54 -RABBIT_USER: $rabbit_username -RABBIT_PASS: $rabbit_password -KEYSTONE_DBPASS: $keystone_dbpass -DEMO_PASS: demo_secret -ADMIN_PASS: $admin_pass -GLANCE_DBPASS: $glance_dbpass -GLANCE_PASS: $glance_pass -NOVA_DBPASS: $nova_dbpass -NOVA_PASS: $nova_pass -DASH_DBPASS: $dash_dbpass -CINDER_DBPASS: $cinder_dbpass -CINDER_PASS: $cinder_pass -NEUTRON_DBPASS: $neutron_pass -NEUTRON_PASS: $neutron_pass NEUTRON_TYPE_DRIVERS: ['flat', 'gre', 'vxlan'] NEUTRON_TENANT_NETWORK_TYPES: ['vxlan'] #NEUTRON_MECHANISM_DRIVERS: ['opendaylight'] diff --git a/deploy/compass_conf/templates/ansible_installer/openstack_pike/vars/single-controller.tmpl b/deploy/compass_conf/templates/ansible_installer/openstack_pike/vars/single-controller.tmpl index 08423749..3ffd9b2f 100755 --- a/deploy/compass_conf/templates/ansible_installer/openstack_pike/vars/single-controller.tmpl +++ b/deploy/compass_conf/templates/ansible_installer/openstack_pike/vars/single-controller.tmpl @@ -29,20 +29,6 @@ INTERFACE_NAME: $network_external_nic INTERNAL_INTERFACE: $network_internal_nic #end for -#set credentials = $getVar('service_credentials', {}) -#set console_credentials = $getVar('console_credentials', {}) -#set rabbit_username = $credentials.rabbitmq.username -#set rabbit_password = $credentials.rabbitmq.password -#set keystone_dbpass = $credentials.identity.password -#set glance_dbpass = $credentials.image.password -#set glance_pass = $console_credentials.image.password -#set nova_dbpass = $credentials.compute.password -#set nova_pass = $console_credentials.compute.password -#set dash_dbpass = $credentials.dashboard.password -#set cinder_dbpass = $credentials.volume.password -#set cinder_pass = $console_credentials.volume.password -#set admin_pass = $console_credentials.admin.password -#set neutron_pass = $console_credentials.network.password cluster_name: $cluster_name deploy_type: $getVar('deploy_type', 'virtual') @@ -67,20 +53,6 @@ pike_cloud_archive: deb http://ubuntu-cloud.archive.canonical.com/ubuntu xenial- ADMIN_TOKEN: admin CEILOMETER_TOKEN: c095d479023a0fd58a54 -RABBIT_USER: $rabbit_username -RABBIT_PASS: $rabbit_password -KEYSTONE_DBPASS: $keystone_dbpass -DEMO_PASS: demo_secret -ADMIN_PASS: $admin_pass -GLANCE_DBPASS: $glance_dbpass -GLANCE_PASS: $glance_pass -NOVA_DBPASS: $nova_dbpass -NOVA_PASS: $nova_pass -DASH_DBPASS: $dash_dbpass -CINDER_DBPASS: $cinder_dbpass -CINDER_PASS: $cinder_pass -NEUTRON_DBPASS: $neutron_pass -NEUTRON_PASS: $neutron_pass NEUTRON_TYPE_DRIVERS: ['flat', 'gre', 'vxlan'] NEUTRON_TENANT_NETWORK_TYPES: ['vxlan'] #NEUTRON_MECHANISM_DRIVERS: ['opendaylight'] @@ -103,6 +75,3 @@ physical_device: /dev/sdb internal_interface: "ansible_{{ INTERNAL_INTERFACE }}" internal_ip: "{{ hostvars[inventory_hostname][internal_interface]['ipv4']['address'] }}" HA_VIP: "{{ internal_ip }}" -odl_username: admin -odl_password: admin -odl_api_port: 8080 diff --git a/deploy/conf/hardware_environment/huawei-pod1/k8-nosdn-stor4nfv-ha.yml b/deploy/conf/hardware_environment/huawei-pod1/k8-nosdn-stor4nfv-ha.yml new file mode 100644 index 00000000..6cf62db7 --- /dev/null +++ b/deploy/conf/hardware_environment/huawei-pod1/k8-nosdn-stor4nfv-ha.yml @@ -0,0 +1,74 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## + +--- +TYPE: baremetal +FLAVOR: cluster +POWER_TOOL: ipmitool + +ipmiUser: root +ipmiVer: '2.0' + +plugins: + - stor4nfv: "Enable" + +hosts: + - name: host1 + mac: 'F8:4A:BF:55:A2:8D' + interfaces: + - eth1: 'F8:4A:BF:55:A2:8E' + ipmiIp: 172.16.130.26 + ipmiPass: Opnfv@pod1 + roles: + - kube_master + - etcd + - ha + + - name: host2 + mac: 'D8:49:0B:DA:5A:B7' + interfaces: + - eth1: 'D8:49:0B:DA:5A:B8' + ipmiIp: 172.16.130.27 + ipmiPass: Opnfv@pod1 + roles: + - kube_master + - etcd + - ha + + - name: host3 + mac: '78:D7:52:A0:B1:99' + interfaces: + - eth1: '78:D7:52:A0:B1:9A' + ipmiIp: 172.16.130.29 + ipmiPass: Opnfv@pod1 + roles: + - kube_master + - etcd + - ha + - storage_master + + - name: host4 + mac: 'D8:49:0B:DA:5B:5D' + interfaces: + - eth1: 'D8:49:0B:DA:5B:5E' + ipmiIp: 172.16.130.30 + ipmiPass: Opnfv@pod1 + roles: + - kube_node + - storage_node + + - name: host5 + mac: 'D8:49:0B:DA:56:85' + interfaces: + - eth1: 'D8:49:0B:DA:56:86' + ipmiIp: 172.16.130.31 + ipmiPass: Opnfv@pod1 + roles: + - kube_node + - storage_node diff --git a/deploy/conf/hardware_environment/huawei-pod2/k8-nosdn-nofeature-ha.yml b/deploy/conf/hardware_environment/huawei-pod2/k8-nosdn-nofeature-ha.yml new file mode 100644 index 00000000..a96a5259 --- /dev/null +++ b/deploy/conf/hardware_environment/huawei-pod2/k8-nosdn-nofeature-ha.yml @@ -0,0 +1,73 @@ +############################################################################## +# Copyright (c) 2018 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## + +--- + +TYPE: baremetal +FLAVOR: cluster +POWER_TOOL: ipmitool + +ipmiVer: '2.0' + +hosts: + - name: host1 + mac: 'EC:38:8F:79:0C:2C' + ipmiUser: root + ipmiPass: Opnfv@pod2 + ipmiIp: 172.16.130.20 + interfaces: + - eth1: 'EC:38:8F:79:0C:2D' + roles: + - kube_master + - etcd + - ha + + - name: host2 + mac: 'EC:38:8F:79:0C:48' + ipmiIp: 172.16.130.19 + ipmiUser: root + ipmiPass: Opnfv@pod2 + interfaces: + - eth1: 'EC:38:8F:79:0C:49' + roles: + - kube_master + - etcd + - ha + + - name: host3 + mac: 'EC:38:8F:79:10:CC' + ipmiIp: 172.16.130.18 + ipmiUser: root + ipmiPass: Opnfv@pod2 + interfaces: + - eth1: 'EC:38:8F:79:10:CD' + roles: + - kube_master + - etcd + - ha + + - name: host4 + mac: 'EC:38:8F:79:0C:6C' + ipmiIp: 172.16.130.17 + ipmiUser: root + ipmiPass: Opnfv@pod2 + interfaces: + - eth1: 'EC:38:8F:79:0C:6D' + roles: + - kube_node + + - name: host5 + mac: 'EC:38:8F:7A:E6:ED' + ipmiIp: 172.16.130.16 + ipmiUser: root + ipmiPass: Opnfv@pod2 + interfaces: + - eth1: 'EC:38:8F:7A:E6:EE' + roles: + - kube_node diff --git a/deploy/conf/hardware_environment/huawei-pod2/k8-nosdn-stor4nfv-ha.yml b/deploy/conf/hardware_environment/huawei-pod2/k8-nosdn-stor4nfv-ha.yml new file mode 100644 index 00000000..4fbea1f0 --- /dev/null +++ b/deploy/conf/hardware_environment/huawei-pod2/k8-nosdn-stor4nfv-ha.yml @@ -0,0 +1,79 @@ +############################################################################## +# Copyright (c) 2018 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## + +--- + +TYPE: baremetal +FLAVOR: cluster +POWER_TOOL: ipmitool + +ipmiVer: '2.0' + +plugins: + - stor4nfv: "Enable" + +hosts: + - name: host1 + mac: 'EC:38:8F:79:0C:2C' + ipmiUser: root + ipmiPass: Opnfv@pod2 + ipmiIp: 172.16.130.20 + interfaces: + - eth1: 'EC:38:8F:79:0C:2D' + roles: + - kube_master + - etcd + - ha + + - name: host2 + mac: 'EC:38:8F:79:0C:48' + ipmiIp: 172.16.130.19 + ipmiUser: root + ipmiPass: Opnfv@pod2 + interfaces: + - eth1: 'EC:38:8F:79:0C:49' + roles: + - kube_master + - etcd + - ha + + - name: host3 + mac: 'EC:38:8F:79:10:CC' + ipmiIp: 172.16.130.18 + ipmiUser: root + ipmiPass: Opnfv@pod2 + interfaces: + - eth1: 'EC:38:8F:79:10:CD' + roles: + - kube_master + - etcd + - ha + - storage_master + + - name: host4 + mac: 'EC:38:8F:79:0C:6C' + ipmiIp: 172.16.130.17 + ipmiUser: root + ipmiPass: Opnfv@pod2 + interfaces: + - eth1: 'EC:38:8F:79:0C:6D' + roles: + - kube_node + - storage_node + + - name: host5 + mac: 'EC:38:8F:7A:E6:ED' + ipmiIp: 172.16.130.16 + ipmiUser: root + ipmiPass: Opnfv@pod2 + interfaces: + - eth1: 'EC:38:8F:7A:E6:EE' + roles: + - kube_node + - storage_node diff --git a/deploy/conf/hardware_environment/huawei-pod8/k8-nosdn-nofeature-ha.yml b/deploy/conf/hardware_environment/huawei-pod8/k8-nosdn-nofeature-ha.yml new file mode 100644 index 00000000..60b7466b --- /dev/null +++ b/deploy/conf/hardware_environment/huawei-pod8/k8-nosdn-nofeature-ha.yml @@ -0,0 +1,73 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## + +--- + +TYPE: baremetal +FLAVOR: cluster +POWER_TOOL: ipmitool + +ipmiVer: '2.0' + +hosts: + - name: host1 + mac: '14:30:04:F2:24:CB' + ipmiUser: root + ipmiPass: Opnfv@podarm + ipmiIp: 172.16.130.81 + interfaces: + - eth1: '14:30:04:F2:24:CC' + roles: + - kube_master + - etcd + - ha + + - name: host2 + mac: '14:30:04:F2:24:3B' + ipmiIp: 172.16.130.82 + ipmiUser: root + ipmiPass: Opnfv@podarm + interfaces: + - eth1: '14:30:04:F2:24:3C' + roles: + - kube_master + - etcd + - ha + + - name: host3 + mac: '14:30:04:F2:24:FB' + ipmiIp: 172.16.130.83 + ipmiUser: root + ipmiPass: Opnfv@podarm + interfaces: + - eth1: '14:30:04:F2:24:FC' + roles: + - kube_master + - etcd + - ha + + - name: host4 + mac: '14:30:04:F2:25:EB' + ipmiIp: 172.16.130.84 + ipmiUser: root + ipmiPass: Opnfv@podarm + interfaces: + - eth1: '14:30:04:F2:25:EC' + roles: + - kube_node + + - name: host5 + mac: '14:30:04:F2:25:8B' + ipmiIp: 172.16.130.85 + ipmiUser: root + ipmiPass: Opnfv@podarm + interfaces: + - eth1: '14:30:04:F2:25:8C' + roles: + - kube_node diff --git a/deploy/conf/hardware_environment/huawei-pod8/k8-nosdn-nofeature-noha.yml b/deploy/conf/hardware_environment/huawei-pod8/k8-nosdn-nofeature-noha.yml new file mode 100644 index 00000000..7a1a92c1 --- /dev/null +++ b/deploy/conf/hardware_environment/huawei-pod8/k8-nosdn-nofeature-noha.yml @@ -0,0 +1,39 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## + +--- + +TYPE: baremetal +FLAVOR: cluster +POWER_TOOL: ipmitool + +ipmiVer: '2.0' + +hosts: + - name: host1 + mac: '14:30:04:F2:24:CB' + ipmiUser: root + ipmiPass: Opnfv@podarm + ipmiIp: 172.16.130.81 + interfaces: + - eth1: '14:30:04:F2:24:CC' + roles: + - kube_master + - etcd + - ha + + - name: host2 + mac: '14:30:04:F2:24:3B' + ipmiIp: 172.16.130.82 + ipmiUser: root + ipmiPass: Opnfv@podarm + interfaces: + - eth1: '14:30:04:F2:24:3C' + roles: + - kube_node diff --git a/deploy/conf/hardware_environment/huawei-pod8/network.yml b/deploy/conf/hardware_environment/huawei-pod8/network.yml new file mode 100644 index 00000000..6a75f34d --- /dev/null +++ b/deploy/conf/hardware_environment/huawei-pod8/network.yml @@ -0,0 +1,126 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## + +--- +nic_mappings: [] +bond_mappings: [] + +provider_net_mappings: + - name: br-provider + network: physnet + interface: eth10 + type: ovs + role: + - controller + +sys_intf_mappings: + - name: mgmt + interface: eth0 + type: normal + vlan_tag: None + role: + - controller + - compute + + - name: tenant + interface: eth1 + type: normal + vlan_tag: 1401 + role: + - controller + - compute + + - name: storage + interface: eth1 + type: normal + vlan_tag: 1402 + role: + - controller + - compute + + - name: external + interface: eth1 + type: normal + vlan_tag: None + role: + - controller + - compute + +ip_settings: + - name: mgmt + ip_ranges: + - - "10.1.0.50" + - "10.1.0.100" + dhcp_ranges: + - - "10.1.0.2" + - "10.1.0.49" + cidr: "10.1.0.0/24" + gw: "10.1.0.1" + role: + - controller + - compute + + - name: tenant + ip_ranges: + - - "172.16.1.1" + - "172.16.1.50" + cidr: "172.16.1.0/24" + role: + - controller + - compute + + - name: storage + ip_ranges: + - - "172.16.2.1" + - "172.16.2.50" + cidr: "172.16.2.0/24" + role: + - controller + - compute + + - name: external + ip_ranges: + - - "172.30.14.10" + - "172.30.14.50" + cidr: "172.30.14.0/24" + gw: "172.30.14.1" + role: + - controller + - compute + +internal_vip: + ip: 10.1.0.222 + netmask: "24" + interface: mgmt + +public_vip: + ip: 172.30.14.222 + netmask: "24" + interface: external + +onos_nic: eth2 +tenant_net_info: + type: vxlan + range: "1:1000" + provider_network: None + +public_net_info: + enable: "True" + network: ext-net + type: flat + segment_id: 10 + subnet: ext-subnet + provider_network: physnet + router: router-ext + enable_dhcp: "False" + no_gateway: "False" + external_gw: "172.30.14.1" + floating_ip_cidr: "172.30.14.0/24" + floating_ip_start: "172.30.14.100" + floating_ip_end: "172.30.14.200" diff --git a/deploy/conf/hardware_environment/intel-pod17/k8-nosdn-nofeature-ha.yml b/deploy/conf/hardware_environment/intel-pod17/k8-nosdn-nofeature-ha.yml index 7cc2c215..660f5e2e 100644 --- a/deploy/conf/hardware_environment/intel-pod17/k8-nosdn-nofeature-ha.yml +++ b/deploy/conf/hardware_environment/intel-pod17/k8-nosdn-nofeature-ha.yml @@ -27,6 +27,7 @@ hosts: roles: - kube_master - etcd + - ha - name: host2 mac: 'A4:BF:01:14:01:13' @@ -39,6 +40,7 @@ hosts: roles: - kube_master - etcd + - ha - name: host3 mac: 'A4:BF:01:14:71:1E' @@ -51,6 +53,7 @@ hosts: roles: - kube_master - etcd + - ha - name: host4 mac: 'A4:BF:01:16:2F:17' diff --git a/deploy/conf/network_cfg_sriov.yaml b/deploy/conf/network_cfg_sriov.yaml new file mode 100644 index 00000000..fcde4c95 --- /dev/null +++ b/deploy/conf/network_cfg_sriov.yaml @@ -0,0 +1,109 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## + +--- +nic_mappings: [] +bond_mappings: [] + +provider_net_mappings: + - name: br-provider + network: physnet + interface: eth10 + type: ovs + role: + - controller + +sys_intf_mappings: + - name: mgmt + interface: eth0 + type: normal + vlan_tag: None + role: + - controller + - compute + + - name: tenant + interface: eth1 + type: normal + vlan_tag: 101 + role: + - controller + - compute + + - name: external + interface: eth1 + type: normal + vlan_tag: None + role: + - controller + - compute + +ip_settings: + - name: mgmt + ip_ranges: + - - "10.1.0.50" + - "10.1.0.100" + dhcp_ranges: + - - "10.1.0.2" + - "10.1.0.49" + cidr: "10.1.0.0/24" + gw: "10.1.0.1" + role: + - controller + - compute + + - name: tenant + ip_ranges: + - - "172.16.1.2" + - "172.16.1.250" + cidr: "172.16.1.0/24" + role: + - controller + - compute + + - name: external + ip_ranges: + - - "192.16.1.210" + - "192.16.1.220" + cidr: "192.16.1.0/24" + gw: "192.16.1.1" + role: + - controller + - compute + +internal_vip: + ip: 10.1.0.222 + netmask: "24" + interface: mgmt + +public_vip: + ip: 192.16.1.222 + netmask: "24" + interface: external + +onos_nic: eth2 +tenant_net_info: + type: vxlan + range: "1:1000" + provider_network: None + +public_net_info: + enable: "True" + network: ext-net + type: flat + segment_id: 1000 + subnet: ext-subnet + provider_network: physnet + router: router-ext + enable_dhcp: "False" + no_gateway: "False" + external_gw: "192.16.1.1" + floating_ip_cidr: "192.16.1.0/24" + floating_ip_start: "192.16.1.101" + floating_ip_end: "192.16.1.199" diff --git a/deploy/conf/vm_environment/k8-nosdn-stor4nfv-ha.yml b/deploy/conf/vm_environment/k8-nosdn-stor4nfv-ha.yml new file mode 100644 index 00000000..e5e458f7 --- /dev/null +++ b/deploy/conf/vm_environment/k8-nosdn-stor4nfv-ha.yml @@ -0,0 +1,45 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## + +--- +TYPE: virtual +FLAVOR: cluster + +plugins: + - stor4nfv: "Enable" + +hosts: + - name: host1 + roles: + - kube_master + - etcd + - ha + + - name: host2 + roles: + - kube_master + - etcd + - ha + + - name: host3 + roles: + - kube_master + - etcd + - ha + - storage_master + + - name: host4 + roles: + - kube_node + - storage_node + + - name: host5 + roles: + - kube_node + - storage_node diff --git a/deploy/conf/vm_environment/k8-nosdn-stor4nfv-noha.yml b/deploy/conf/vm_environment/k8-nosdn-stor4nfv-noha.yml new file mode 100644 index 00000000..f8c29b3e --- /dev/null +++ b/deploy/conf/vm_environment/k8-nosdn-stor4nfv-noha.yml @@ -0,0 +1,28 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## + +--- +TYPE: virtual +FLAVOR: cluster + +plugins: + - stor4nfv: "Enable" + +hosts: + - name: host1 + roles: + - kube_master + - etcd + - storage_master + - ha + + - name: host2 + roles: + - kube_node + - storage_node diff --git a/docs/release/installation/k8s-deploy-arm.rst b/docs/release/installation/k8s-deploy-arm.rst new file mode 100644 index 00000000..b5b54c20 --- /dev/null +++ b/docs/release/installation/k8s-deploy-arm.rst @@ -0,0 +1,106 @@ +.. This work is licensed under a Creative Commons Attribution 4.0 International Licence. +.. http://creativecommons.org/licenses/by/4.0 +.. (c) by Yibo Cai (Arm) + +Validated platform +================== + +================ ========= ================ ======== + Jump server Node +--------------------------- -------------------------- +distro libvirt distro k8s +================ ========= ================ ======== +ubuntu 16.04.3 1.3.1 centos7 1708 1.7.5 +================ ========= ================ ======== + +Prepare jump server +=================== +This document assumes you are using a baremetal Arm server as Compass4NFV jump server. It's possible to deploy jump server inside a virtual machine, this case is not covered here. + +#. Install Ubuntu 16.04.3 aarch64 on jump server. + +#. Install required packages. + + .. code-block:: bash + + $ sudo apt install docker.io libvirt-bin virt-manager qemu qemu-efi + +#. Disable DHCP of default libvirt network. + + Libvirt creates a default network at intallation, which enables DHCP and occupies port 67. It conflicts with compass-cobbler container. + + .. code-block:: bash + + $ sudo virsh net-edit default + + .. code-block:: xml + + <!-- remove below lines and save/quit -> + <dhcp> + <range start='192.168.122.2' end='192.168.122.254'/> + </dhcp> + + .. code-block:: bash + + $ sudo virsh net-destroy default + $ sudo virsh net-start default + +#. Make sure ports 67, 69, 80, 443 are free. + + Compass-cobber requires ports 67, 69 to provide DHCP and TFTP services. Compass-deck provides HTTP and HTTPS through ports 80, 443. All these ports should be free before deployment. + +#. Tear down apparmor service. + + .. code-block:: bash + + $ sudo service apparmor teardown + +#. Enable password-less sudo for current user (optional). + + +Build Arm tarball +================= + +Clone Compass4NFV code. Run below command to build deployment tarball for Arm. + +.. code-block:: bash + + $ COMPASS_ISO_REPO='http://people.linaro.org/~yibo.cai/compass' ./build.sh + +It downloads and archives Ubuntu/CentOS installation ISO and Compass core docker images for later deployment. + + +Deploy K8s in VM +================ +This section introduces the steps to deploy K8s cluster in virtual machines running on jump server. Two VM nodes will be created, one master and one minion, with flannel networking. + +Clear old Compass core +---------------------- + +Compass core consists of five containers which are responsible for deploying K8s clusters. + +- *compass-deck*: provides API service and web UI +- *compass-tasks*: deploy K8s to nodes +- *compass-cobbler*: deploy OS to nodes +- *compass-db*: mysql service +- *compass-mq*: rabbitmq service + +Run below command to remove running Compass containers for a clean deployment. + +.. code-block:: bash + + $ docker rm -f `docker ps | grep compass | cut -f1 -d' '` + +Deploy OS and K8s +----------------- +To deploy OS and K8s on two virtual nodes, run: + +.. code-block:: bash + + $ ADAPTER_OS_PATTERN='(?i)CentOS-7.*arm.*' \ + OS_VERSION=centos7 \ + KUBERNETES_VERSION=v1.7.5 \ + DHA=${PWD}/deploy/conf/vm_environment/k8-nosdn-nofeature-noha.yml \ + NETWORK=${PWD}/deploy/conf/vm_environment/network.yml \ + VIRT_NUMBER=2 VIRT_CPUS=4 VIRT_MEM=8192 VIRT_DISK=50G \ + ./deploy.sh diff --git a/plugins/barometer/roles/collectd/tasks/collectd.yml b/plugins/barometer/roles/collectd/tasks/collectd.yml index 4167e71b..0f6a6266 100644 --- a/plugins/barometer/roles/collectd/tasks/collectd.yml +++ b/plugins/barometer/roles/collectd/tasks/collectd.yml @@ -143,6 +143,17 @@ dest: /root/collectd_sample_configs/virt.conf when: libvirt_result|succeeded +- name: configure virt conf extra stats when rdt is present + remote_user: root + shell: | + sed -i '/ExtraStats/s/^#//g' /root/collectd_sample_configs/virt.conf + when: rdt_result|failed and libvirt_result|succeeded + +- name: configure rrdtool conf + template: + src: rrdtool.conf.j2 + dest: /root/collectd_sample_configs/rrdtool.conf + - name: configure intel_pmu conf remote_user: root template: diff --git a/plugins/barometer/roles/collectd/templates/default_plugins.conf.j2 b/plugins/barometer/roles/collectd/templates/default_plugins.conf.j2 index bd1850e6..a4d632a6 100644 --- a/plugins/barometer/roles/collectd/templates/default_plugins.conf.j2 +++ b/plugins/barometer/roles/collectd/templates/default_plugins.conf.j2 @@ -11,21 +11,21 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -#Hostname "" +Hostname "{{ inventory_hostname }}" LoadPlugin cpufreq LoadPlugin disk -#LoadPlugin ethstat -#LoadPlugin ipc -#LoadPlugin ipmi +LoadPlugin ethstat +LoadPlugin ipc +LoadPlugin ipmi LoadPlugin load LoadPlugin memory LoadPlugin numa LoadPlugin processes -#LoadPlugin df -#LoadPlugin turbostat -#LoadPlugin uptime -#LoadPlugin contextswitch +LoadPlugin df +LoadPlugin turbostat +LoadPlugin uptime +LoadPlugin contextswitch LoadPlugin irq LoadPlugin swap diff --git a/plugins/barometer/roles/collectd/templates/rrdtool.conf.j2 b/plugins/barometer/roles/collectd/templates/rrdtool.conf.j2 new file mode 100644 index 00000000..50bd6603 --- /dev/null +++ b/plugins/barometer/roles/collectd/templates/rrdtool.conf.j2 @@ -0,0 +1,21 @@ +# Copyright 2017-18 OPNFV +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +Loadplugin rrdtool + +<Plugin rrdtool> + DataDir "/tmp/collectd/rrd" + CacheFlush 120 + WritesPerSecond 50 +</Plugin> + diff --git a/plugins/barometer/roles/collectd/templates/virt.conf.j2 b/plugins/barometer/roles/collectd/templates/virt.conf.j2 index 8048bc13..c6161237 100644 --- a/plugins/barometer/roles/collectd/templates/virt.conf.j2 +++ b/plugins/barometer/roles/collectd/templates/virt.conf.j2 @@ -27,6 +27,6 @@ LoadPlugin virt # InterfaceFormat name # PluginInstanceFormat name # Instances 1 - ExtraStats "cpu_util disk disk_err domain_state fs_info job_stats_background pcpu perf vcpupin" +# ExtraStats "cpu_util disk disk_err domain_state fs_info job_stats_background pcpu perf vcpupin" </Plugin> diff --git a/plugins/stor4nfv/plugin.desc b/plugins/stor4nfv/plugin.desc new file mode 100644 index 00000000..ae589a60 --- /dev/null +++ b/plugins/stor4nfv/plugin.desc @@ -0,0 +1,45 @@ +# Plugin for Stor4nfv service for Compass4nfv. +# Stor4NFV provides a storage solution based on Ceph and +# OpenSDS, and focuses on the optimization for storage +# intensive use cases of NFV, like I/O performance improvements. +# +# More details can be found in the development document. +# ############################################################## +--- +plugin: + # plugin name,it is also as the switch to enable/disable plugin in scenario + # files + name: stor4nfv + + description: provide a storage solution based on Ceph and OpenSDS + + maintainers: + + # host os type: ubuntu/centos + os_version: ubuntu + + # true: this plugin is deployed separately on a new node + # false: this plugin is deployed on controller or compute node + independent_hosts: false + + # artifact: package download url for this plugin + artifacts: + url: + + # orchestration + # A plugin can have mutiple components, each component may need to be + # installed on different inventory or have its own configuration. + # due to Compass4nfv currently only supports ansible, so each component + # of the installation and configuration script need to be use ansible. + # cm : congfiguration management tool : only ansible support + # role: each component corresponds to ansible script that locates in the same + # directory as plugin.desc. + # phrase: + # inventory: + orchestration: + cm: ansible + roles: + - role: stor4nfv + phrase: post_k8s + inventory: + - localhost diff --git a/plugins/stor4nfv/roles/stor4nfv/files/configure_vars.sh b/plugins/stor4nfv/roles/stor4nfv/files/configure_vars.sh new file mode 100644 index 00000000..878f1542 --- /dev/null +++ b/plugins/stor4nfv/roles/stor4nfv/files/configure_vars.sh @@ -0,0 +1,39 @@ +#!/bin/bash +# ############################################################################# +# Copyright (c) 2018 Intel Corp. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +# ############################################################################# + +cd $HOME/gopath/src/github.com/stor4nfv/stor4nfv/ci/ansible + +sed -i 's/^workplace.*/workplace: \/root/g' group_vars/common.yml + +sed -i 's/^enabled_backend.*/enabled_backend: ceph/g' group_vars/osdsdock.yml + +sed -i 's/^ceph_pool_name.*/ceph_pool_name: "rbd"/g' group_vars/osdsdock.yml + +sed -i 's/^ceph_origin.*/ceph_origin: repository/g' group_vars/ceph/all.yml + +sed -i 's/^ceph_repository.*/ceph_repository: community/g' group_vars/ceph/all.yml + +sed -i 's/^ceph_stable_release.*/ceph_stable_release: luminous/g' group_vars/ceph/all.yml + +sed -i 's|^public_network.*|public_network: '"$1"'|g' group_vars/ceph/all.yml + +sed -i 's|^cluster_network.*|cluster_network: '"$2"'|g' group_vars/ceph/all.yml + +sed -i 's/^monitor_interface.*/monitor_interface: eth0/g' group_vars/ceph/all.yml + +sed -i 's/^devices:.*/devices: [\/dev\/loop0, \/dev\/loop1, \/dev\/loop2]/g' group_vars/ceph/osds.yml + +sed -i 's/^osd_scenario.*/osd_scenario: collocated/g' group_vars/ceph/osds.yml + +sed -i 's/^db_endpoint.*/db_endpoint: localhost:62379,localhost:62380/g' group_vars/osdsdb.yml + +sed -i 's/^etcd_port.*/etcd_port: 62379/g' group_vars/osdsdb.yml + +sed -i 's/^etcd_peer_port.*/etcd_peer_port: 62380/g' group_vars/osdsdb.yml diff --git a/plugins/stor4nfv/roles/stor4nfv/files/install_ansible.sh b/plugins/stor4nfv/roles/stor4nfv/files/install_ansible.sh new file mode 100644 index 00000000..b4a3a30a --- /dev/null +++ b/plugins/stor4nfv/roles/stor4nfv/files/install_ansible.sh @@ -0,0 +1,18 @@ +#!/bin/bash +# ############################################################################# +# Copyright (c) 2018 Intel Corp. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +# ############################################################################# + +add-apt-repository ppa:ansible/ansible + +apt-get update +apt-get install -y ansible +sleep 5 + +ansible --version + diff --git a/plugins/stor4nfv/roles/stor4nfv/tasks/main.yml b/plugins/stor4nfv/roles/stor4nfv/tasks/main.yml new file mode 100644 index 00000000..89d13f41 --- /dev/null +++ b/plugins/stor4nfv/roles/stor4nfv/tasks/main.yml @@ -0,0 +1,14 @@ +# ############################################################################# +# Copyright (c) 2018 Intel Corp. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +# ############################################################################# +--- +- include: opensds.yml +- include: nbp.yml + when: + - ansible_distribution == 'Ubuntu' + - stor4nfv is defined and stor4nfv == "Enable" diff --git a/plugins/stor4nfv/roles/stor4nfv/tasks/nbp.yml b/plugins/stor4nfv/roles/stor4nfv/tasks/nbp.yml new file mode 100644 index 00000000..5361f5cd --- /dev/null +++ b/plugins/stor4nfv/roles/stor4nfv/tasks/nbp.yml @@ -0,0 +1,27 @@ +# ############################################################################# +# Copyright (c) 2018 Intel Corp. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +# ############################################################################# +--- + +- name: render nbp inventory + remote_user: root + template: + src: nbp.hosts.j2 + dest: $HOME/gopath/src/github.com/stor4nfv/stor4nfv/ci/nbp-ansible/nbp.hosts + +- name: set nbp to csi + remote_user: root + shell: | + cd $HOME/gopath/src/github.com/stor4nfv/stor4nfv/ci/nbp-ansible; + sed -i 's/^nbp_plugin_type.*/nbp_plugin_type: csi/g' group_vars/common.yml + +- name: run playbook + remote_user: root + shell: | + cd $HOME/gopath/src/github.com/stor4nfv/stor4nfv/ci/nbp-ansible; + ansible-playbook site.yml -i nbp.hosts | tee /var/log/stor4nfv-nbp.log diff --git a/plugins/stor4nfv/roles/stor4nfv/tasks/opensds.yml b/plugins/stor4nfv/roles/stor4nfv/tasks/opensds.yml new file mode 100644 index 00000000..afd65bc7 --- /dev/null +++ b/plugins/stor4nfv/roles/stor4nfv/tasks/opensds.yml @@ -0,0 +1,84 @@ +# ############################################################################# +# Copyright (c) 2018 Intel Corp. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +# ############################################################################# +--- + +- name: install prerequisites package + apt: + name: "{{ item }}" + state: present + with_items: + - make + - gcc + +- name: copy install_ansible script + remote_user: root + copy: + src: install_ansible.sh + dest: /opt/install_ansible.sh + mode: 0777 + +- name: install ansible + command: su -s /bin/sh -c "/opt/install_ansible.sh" + +- name: install ceph-ansible code + remote_user: root + shell: | + cd /opt; + git clone https://github.com/ceph/ceph-ansible.git + +- name: fetch stor4nfv source code + remote_user: root + shell: | + mkdir -p $HOME/gopath/src/github.com/stor4nfv && cd $HOME/gopath/src/github.com/stor4nfv; + git clone https://gerrit.opnfv.org/gerrit/stor4nfv.git + +- name: copy configure_vars script + remote_user: root + copy: + src: configure_vars.sh + dest: /opt/configure_vars.sh + mode: 0777 + +- name: configure variables for ceph, opensds + remote_user: root + shell: | + cd /opt; + ./configure_vars.sh {{ mgmt_cidr }} {{ external_cidr }} + +- name: render ceph inventory + remote_user: root + template: + src: ceph.hosts.j2 + dest: $HOME/gopath/src/github.com/stor4nfv/stor4nfv/ci/ansible/group_vars/ceph/ceph.hosts + +- name: render opensds inventory + remote_user: root + template: + src: opensds.hosts.j2 + dest: $HOME/gopath/src/github.com/stor4nfv/stor4nfv/ci/ansible/local.hosts + +- name: force to run the second etcd cluster + remote_user: root + lineinfile: + dest: $HOME/gopath/src/github.com/stor4nfv/stor4nfv/ci/ansible/roles/osdsdb/scenarios/etcd.yml + state: absent + regexp: 'when: service_etcd_status.rc != 0' + +- name: run playbook + remote_user: root + shell: | + cd $HOME/gopath/src/github.com/stor4nfv/stor4nfv/ci/ansible; + ansible-playbook site.yml -i local.hosts | tee /var/log/stor4nfv.log + register: stor4nfv_result + +- name: export opensds endpoint + remote_user: root + shell: | + export OPENSDS_ENDPOINT=http://127.0.0.1:50040 + when: stor4nfv_result|succeeded diff --git a/plugins/stor4nfv/roles/stor4nfv/templates/ceph.hosts.j2 b/plugins/stor4nfv/roles/stor4nfv/templates/ceph.hosts.j2 new file mode 100644 index 00000000..fda313d4 --- /dev/null +++ b/plugins/stor4nfv/roles/stor4nfv/templates/ceph.hosts.j2 @@ -0,0 +1,14 @@ +[mons] +{% for controller in hostvars[inventory_hostname]['groups']['kube_master'] %} +{{ controller }} ansible_ssh_host={{ hostvars[controller]['ansible_ssh_host'] }} +{% endfor %} + +[osds] +{% for controller in hostvars[inventory_hostname]['groups']['storage_node'] %} +{{ controller }} ansible_ssh_host={{ hostvars[controller]['ansible_ssh_host'] }} +{% endfor %} + +[mgrs] +{% for controller in hostvars[inventory_hostname]['groups']['kube_master'] %} +{{ controller }} ansible_ssh_host={{ hostvars[controller]['ansible_ssh_host'] }} +{% endfor %} diff --git a/plugins/stor4nfv/roles/stor4nfv/templates/nbp.hosts.j2 b/plugins/stor4nfv/roles/stor4nfv/templates/nbp.hosts.j2 new file mode 100644 index 00000000..0e8fa4c9 --- /dev/null +++ b/plugins/stor4nfv/roles/stor4nfv/templates/nbp.hosts.j2 @@ -0,0 +1,4 @@ +[worker-nodes] +{% for worker in hostvars[inventory_hostname]['groups']['storage_master'] %} +{{ worker }} ansible_ssh_host={{ hostvars[worker]['ansible_ssh_host'] }} +{% endfor %} diff --git a/plugins/stor4nfv/roles/stor4nfv/templates/opensds.hosts.j2 b/plugins/stor4nfv/roles/stor4nfv/templates/opensds.hosts.j2 new file mode 100644 index 00000000..97068b90 --- /dev/null +++ b/plugins/stor4nfv/roles/stor4nfv/templates/opensds.hosts.j2 @@ -0,0 +1,9 @@ +[controllers] +{% for controller in hostvars[inventory_hostname]['groups']['storage_master'] %} +{{ controller }} ansible_ssh_host={{ hostvars[controller]['ansible_ssh_host'] }} +{% endfor %} + +[docks] +{% for dock in hostvars[inventory_hostname]['groups']['storage_master'] %} +{{ dock }} ansible_ssh_host={{ hostvars[dock]['ansible_ssh_host'] }} +{% endfor %} |