diff options
Diffstat (limited to 'deploy')
44 files changed, 1034 insertions, 49 deletions
diff --git a/deploy/adapters/ansible/kubernetes/ansible-kubernetes.yml b/deploy/adapters/ansible/kubernetes/ansible-kubernetes.yml index bfdc8958..68dec5c2 100755 --- a/deploy/adapters/ansible/kubernetes/ansible-kubernetes.yml +++ b/deploy/adapters/ansible/kubernetes/ansible-kubernetes.yml @@ -42,3 +42,16 @@ max_fail_percentage: 0 roles: - post-k8s + +- hosts: kube_node + remote_user: root + max_fail_percentage: 0 + roles: + - role: storage + when: stor4nfv is defined and stor4nfv == "Enable" + +- hosts: storage_master + remote_user: root + max_fail_percentage: 0 + roles: + - stor4nfv diff --git a/deploy/adapters/ansible/kubernetes/roles/2flannel-apps/tasks/main.yml b/deploy/adapters/ansible/kubernetes/roles/2flannel-apps/tasks/main.yml new file mode 100644 index 00000000..a7ff10e2 --- /dev/null +++ b/deploy/adapters/ansible/kubernetes/roles/2flannel-apps/tasks/main.yml @@ -0,0 +1,22 @@ +--- +- name: "2Flannel | Create ServiceAccount ClusterRole and ClusterRoleBinding" + command: "{{ bin_dir }}/kubectl apply -f {{ kube_config_dir }}/cni-2flannel-rbac.yml" + run_once: true + when: rbac_enabled and two_flannel_rbac_manifest.changed + +- name: 2Flannel | Start Resources + kube: + name: "kube-2flannel" + kubectl: "{{ bin_dir }}/kubectl" + filename: "{{ kube_config_dir }}/cni-2flannel.yml" + resource: "ds" + namespace: "{{system_namespace}}" + state: "{{ item | ternary('latest','present') }}" + with_items: "{{ two_flannel_manifest.changed }}" + when: inventory_hostname == groups['kube-master'][0] + +- name: 2Flannel | Wait for flannel subnet.env file presence + wait_for: + path: /run/2flannel/networks/subnet1.env + delay: 5 + timeout: 600 diff --git a/deploy/adapters/ansible/kubernetes/roles/2flannel/defaults/main.yml b/deploy/adapters/ansible/kubernetes/roles/2flannel/defaults/main.yml new file mode 100644 index 00000000..f9e61eda --- /dev/null +++ b/deploy/adapters/ansible/kubernetes/roles/2flannel/defaults/main.yml @@ -0,0 +1,21 @@ +--- +# Flannel public IP +# The address that flannel should advertise as how to access the system +# Disabled until https://github.com/coreos/flannel/issues/712 is fixed +# flannel_public_ip: "{{ access_ip|default(ip|default(ansible_default_ipv4.address)) }}" + +## interface that should be used for flannel operations +## This is actually an inventory node-level item +# flannel_interface: + +# You can choose what type of flannel backend to use +# please refer to flannel's docs : https://github.com/coreos/flannel/blob/master/README.md + +# Limits for apps +flannel_memory_limit: 500M +flannel_cpu_limit: 300m +flannel_memory_requests: 64M +flannel_cpu_requests: 150m + +two_flannel_network1: "{{ kube_pods_subnet }}" +two_flannel_network2: "10.235.64.0/18" diff --git a/deploy/adapters/ansible/kubernetes/roles/2flannel/handlers/main.yml b/deploy/adapters/ansible/kubernetes/roles/2flannel/handlers/main.yml new file mode 100644 index 00000000..44ead00e --- /dev/null +++ b/deploy/adapters/ansible/kubernetes/roles/2flannel/handlers/main.yml @@ -0,0 +1,61 @@ +--- +- name: 2Flannel | delete default docker bridge + command: ip link delete docker0 + failed_when: false + notify: 2Flannel | restart docker + +- name: 2Flannel | delete 2flannel interface + command: ip link delete flannel0 && ip link delete flannel1 + failed_when: false + +# special cases for atomic because it defaults to live-restore: true +# So we disable live-restore to pickup the new flannel IP. After +# we enable it, we have to restart docker again to pickup the new +# setting and restore the original behavior +- name: 2Flannel | restart docker + command: /bin/true + notify: + - 2Flannel | reload systemd + - 2Flannel | reload docker.socket + - 2Flannel | configure docker live-restore true (atomic) + - 2Flannel | reload docker + - 2Flannel | pause while Docker restarts + - 2Flannel | wait for docker + +- name: 2Flannel | reload systemd + shell: systemctl daemon-reload + +- name: 2Flannel | reload docker.socket + service: + name: docker.socket + state: restarted + when: ansible_os_family in ['CoreOS', 'Container Linux by CoreOS'] + +- name: 2Flannel | configure docker live-restore true (atomic) + replace: + name: /etc/docker/daemon.json + regexp: '"live-restore":.*true' + replace: '"live-restore": false' + when: is_atomic + +- name: 2Flannel | reload docker + service: + name: docker + state: restarted + +- name: 2Flannel | pause while Docker restarts + pause: + seconds: 10 + prompt: "Waiting for docker restart" + +- name: 2Flannel | wait for docker + command: "{{ docker_bin_dir }}/docker images" + register: docker_ready + retries: 10 + delay: 5 + until: docker_ready.rc == 0 + +- name: 2Flannel | reload kubelet + service: + name: kubelet + state: restarted diff --git a/deploy/adapters/ansible/kubernetes/roles/2flannel/tasks/main.yml b/deploy/adapters/ansible/kubernetes/roles/2flannel/tasks/main.yml new file mode 100644 index 00000000..e7adeefe --- /dev/null +++ b/deploy/adapters/ansible/kubernetes/roles/2flannel/tasks/main.yml @@ -0,0 +1,73 @@ +--- +- include: pre-upgrade.yml + +- name: 2Flannel | Verify if br_netfilter module exists + shell: "modinfo br_netfilter" + register: modinfo_br_netfilter + failed_when: modinfo_br_netfilter.rc not in [0, 1] + changed_when: false + +- name: 2Flannel | Enable br_netfilter module + modprobe: + name: br_netfilter + state: present + when: modinfo_br_netfilter.rc == 0 + +# kube-proxy needs net.bridge.bridge-nf-call-iptables enabled when found +# if br_netfilter is not a module +- name: 2Flannel | Check if bridge-nf-call-iptables key exists + command: "sysctl net.bridge.bridge-nf-call-iptables" + failed_when: false + changed_when: false + register: sysctl_bridge_nf_call_iptables + +- name: 2Flannel | Enable bridge-nf-call tables + sysctl: + name: "{{ item }}" + state: present + value: 1 + reload: "yes" + when: modinfo_br_netfilter.rc == 1 and sysctl_bridge_nf_call_iptables.rc == 0 + with_items: + - net.bridge.bridge-nf-call-iptables + - net.bridge.bridge-nf-call-arptables + - net.bridge.bridge-nf-call-ip6tables + +- name: 2Flannel | Install Multus CNI + shell: |- + /usr/bin/docker run --rm --network=host -v /opt/cni/bin/:/opt/cni/bin/ golang:1.9 \ + bash -c \ + "git clone https://github.com/Intel-Corp/multus-cni && \ + cd multus-cni && ./build && cp bin/multus /opt/cni/bin" + +- name: 2Flannel | Create cni-flannel-rbac manifest + template: + src: cni-2flannel-rbac.yml.j2 + dest: "{{ kube_config_dir }}/cni-2flannel-rbac.yml" + register: two_flannel_rbac_manifest + when: inventory_hostname == groups['kube-master'][0] and rbac_enabled + +- name: 2Flannel | Create cni-flannel manifest + template: + src: cni-2flannel.yml.j2 + dest: "{{ kube_config_dir }}/cni-2flannel.yml" + register: two_flannel_manifest + when: inventory_hostname == groups['kube-master'][0] + +- name: 2Flannel | Set 2Flannel etcd configuration + shell: |- + ETCDCTL_CA_FILE=/etc/ssl/etcd/ssl/ca.pem \ + ETCDCTL_CERT_FILE=/etc/ssl/etcd/ssl/node-{{ ansible_hostname }}.pem \ + ETCDCTL_KEY_FILE=/etc/ssl/etcd/ssl/node-{{ ansible_hostname }}-key.pem \ + {{ bin_dir }}/etcdctl --peers={{ etcd_access_addresses }} \ + set /{{ cluster_name }}/{{ item.config }}/network/config \ + '{ "Network": "{{ item.network }}", "Backend": {"Type":"udp", "Port":{{ item.port}} }}' + with_items: + - {config: "2flannel.1", + network: "{{ two_flannel_network1 }}", + port: 8285} + - {config: "2flannel.2", + network: "{{ two_flannel_network2 }}", + port: 8286} + delegate_to: "{{groups['etcd'][0]}}" + run_once: true diff --git a/deploy/adapters/ansible/kubernetes/roles/2flannel/tasks/pre-upgrade.yml b/deploy/adapters/ansible/kubernetes/roles/2flannel/tasks/pre-upgrade.yml new file mode 100644 index 00000000..a49002fe --- /dev/null +++ b/deploy/adapters/ansible/kubernetes/roles/2flannel/tasks/pre-upgrade.yml @@ -0,0 +1,14 @@ +--- +- name: 2Flannel pre-upgrade | Purge legacy flannel systemd unit file + file: + path: "/etc/systemd/system/docker.service.d/flannel-options.conf" + state: absent + notify: + - 2Flannel | delete default docker bridge + +- name: 2Flannel pre-upgrade | Purge legacy Flannel static pod manifest + file: + path: "{{ kube_manifest_dir }}/flannel-pod.manifest" + state: absent + notify: + - 2Flannel | delete flannel interface diff --git a/deploy/adapters/ansible/kubernetes/roles/2flannel/templates/cni-2flannel-rbac.yml.j2 b/deploy/adapters/ansible/kubernetes/roles/2flannel/templates/cni-2flannel-rbac.yml.j2 new file mode 100644 index 00000000..b4d1be11 --- /dev/null +++ b/deploy/adapters/ansible/kubernetes/roles/2flannel/templates/cni-2flannel-rbac.yml.j2 @@ -0,0 +1,44 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: 2flannel + namespace: "{{system_namespace}}" +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1beta1 +metadata: + name: 2flannel +rules: + - apiGroups: + - "" + resources: + - pods + verbs: + - get + - apiGroups: + - "" + resources: + - nodes + verbs: + - list + - watch + - apiGroups: + - "" + resources: + - nodes/status + verbs: + - patch +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1beta1 +metadata: + name: 2flannel +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: 2flannel +subjects: +- kind: ServiceAccount + name: 2flannel + namespace: "{{system_namespace}}" diff --git a/deploy/adapters/ansible/kubernetes/roles/2flannel/templates/cni-2flannel.yml.j2 b/deploy/adapters/ansible/kubernetes/roles/2flannel/templates/cni-2flannel.yml.j2 new file mode 100644 index 00000000..9c159d37 --- /dev/null +++ b/deploy/adapters/ansible/kubernetes/roles/2flannel/templates/cni-2flannel.yml.j2 @@ -0,0 +1,211 @@ +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: kube-2flannel-cfg1 + namespace: "{{system_namespace}}" + labels: + tier: node + app: 2flannel +data: + cni-conf.json: | + { + "name": "2flannel-networks", + "type": "multus", + "delegates": [ + { + "type": "flannel", + "name": "flannel1", + "subnetFile": "/run/2flannel/networks/subnet2.env", + "dataDir": "/var/lib/cni/flannel/2", + "delegate": { + "bridge": "kbr1", + "isDefaultGateway": false + } + }, + { + "type": "flannel", + "name": "flannel0", + "subnetFile": "/run/2flannel/networks/subnet1.env", + "dataDir": "/var/lib/cni/flannel/1", + "masterplugin": true, + "delegate": { + "bridge": "kbr0", + "isDefaultGateway": true + } + } + ] + } + net-conf.json: | + { + "Network": {{ two_flannel_network1 }}, + "Backend": { + "Type": "udp", + "Port": 8285 + } + } +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: kube-2flannel-cfg2 + namespace: "{{system_namespace}}" + labels: + tier: node + app: 2flannel +data: + net-conf.json: | + { + "Network": {{ two_flannel_network2 }}, + "Backend": { + "Type": "udp", + "Port": 8286 + } + } +--- +apiVersion: extensions/v1beta1 +kind: DaemonSet +metadata: + name: kube-2flannel + namespace: "{{system_namespace}}" + labels: + tier: node + k8s-app: 2flannel +spec: + template: + metadata: + labels: + tier: node + k8s-app: 2flannel + spec: +{% if rbac_enabled %} + serviceAccountName: 2flannel +{% endif %} + containers: + - name: kube-2flannel-1 + image: {{ flannel_image_repo }}:{{ flannel_image_tag }} + imagePullPolicy: {{ k8s_image_pull_policy }} + resources: + limits: + cpu: {{ flannel_cpu_limit }} + memory: {{ flannel_memory_limit }} + requests: + cpu: {{ flannel_cpu_requests }} + memory: {{ flannel_memory_requests }} + command: [ "/opt/bin/flanneld", "--ip-masq", + "-etcd-endpoints={{ etcd_access_addresses }}", + "-etcd-prefix=/{{ cluster_name }}/2flannel.1/network", + "-etcd-cafile=/etc/ssl/etcd/ssl/ca.pem", + "-etcd-certfile=/etc/ssl/etcd/ssl/node-$(NODE_NAME).pem", + "-etcd-keyfile=/etc/ssl/etcd/ssl/node-$(NODE_NAME)-key.pem", + "-subnet-file=/run/2flannel/networks/subnet1.env" ] + securityContext: + privileged: true + env: + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + volumeMounts: + - name: run + mountPath: /run + - name: cni + mountPath: /etc/cni/net.d + - name: ssl + mountPath: /etc/ssl/etcd/ssl/ + - name: 2flannel-cfg1 + mountPath: /etc/kube-flannel/ + - name: kube-2flannel-2 + image: {{ flannel_image_repo }}:{{ flannel_image_tag }} + imagePullPolicy: {{ k8s_image_pull_policy }} + resources: + limits: + cpu: {{ flannel_cpu_limit }} + memory: {{ flannel_memory_limit }} + requests: + cpu: {{ flannel_cpu_requests }} + memory: {{ flannel_memory_requests }} + command: [ "/opt/bin/flanneld", "--ip-masq", + "-etcd-endpoints={{ etcd_access_addresses }}", + "-etcd-prefix=/{{ cluster_name }}/2flannel.2/network", + "-etcd-cafile=/etc/ssl/etcd/ssl/ca.pem", + "-etcd-certfile=/etc/ssl/etcd/ssl/node-$(NODE_NAME).pem", + "-etcd-keyfile=/etc/ssl/etcd/ssl/node-$(NODE_NAME)-key.pem", + "-subnet-file=/run/2flannel/networks/subnet2.env" ] + securityContext: + privileged: true + env: + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + volumeMounts: + - name: run + mountPath: /run + - name: cni + mountPath: /etc/cni/net.d + - name: ssl + mountPath: /etc/ssl/etcd/ssl/ + - name: 2flannel-cfg2 + mountPath: /etc/kube-flannel/ + - name: install-cni + image: {{ flannel_cni_image_repo }}:{{ flannel_cni_image_tag }} + command: ["/install-cni.sh"] + env: + # The CNI network config to install on each node. + - name: CNI_NETWORK_CONFIG + valueFrom: + configMapKeyRef: + name: kube-2flannel-cfg1 + key: cni-conf.json + - name: CNI_CONF_NAME + value: "10-multus-2flannel.conf" + volumeMounts: + - name: cni + mountPath: /host/etc/cni/net.d + - name: host-cni-bin + mountPath: /host/opt/cni/bin/ + hostNetwork: true + tolerations: + - key: node-role.kubernetes.io/master + operator: Exists + effect: NoSchedule + volumes: + - name: run + hostPath: + path: /run + - name: cni + hostPath: + path: /etc/cni/net.d + - name: ssl + hostPath: + path: /etc/ssl/etcd/ssl/ + - name: 2flannel-cfg1 + configMap: + name: kube-2flannel-cfg1 + - name: 2flannel-cfg2 + configMap: + name: kube-2flannel-cfg2 + - name: host-cni-bin + hostPath: + path: /opt/cni/bin + updateStrategy: + rollingUpdate: + maxUnavailable: {{ serial | default('20%') }} + type: RollingUpdate diff --git a/deploy/adapters/ansible/kubernetes/roles/kargo/tasks/main.yml b/deploy/adapters/ansible/kubernetes/roles/kargo/tasks/main.yml index af52ad04..6d947623 100644 --- a/deploy/adapters/ansible/kubernetes/roles/kargo/tasks/main.yml +++ b/deploy/adapters/ansible/kubernetes/roles/kargo/tasks/main.yml @@ -57,7 +57,6 @@ - netaddr - jinja2 - - name: copy inventories generate script copy: src: generate_inventories.py @@ -126,6 +125,13 @@ regexp: '^#loadbalancer_apiserver_localhost: true' line: 'loadbalancer_apiserver_localhost: true' +- name: use the user name and password login the dashboard + lineinfile: + dest: /opt/kargo_k8s/inventory/group_vars/k8s-cluster.yml + regexp: '^#kube_basic_auth: false' + line: 'kube_basic_auth: true' + + - name: add vip to ssl keys lineinfile: dest: /opt/kargo_k8s/inventory/group_vars/k8s-cluster.yml @@ -149,6 +155,68 @@ - extra-vars-{{ ansible_architecture }}.yml - extra-vars.yml +- name: copy 2flannel playbook to kargo + copy: + src: "{{ run_dir }}/roles/2flannel" + dest: /opt/kargo_k8s/roles/network_plugin + +- name: copy 2flannel-apps playbook to kargo + copy: + src: "{{ run_dir }}/roles/2flannel-apps/" + dest: /opt/kargo_k8s/roles/kubernetes-apps/network_plugin/2flannel + +- name: append 2flannel to network plugin + blockinfile: + path: /opt/kargo_k8s/roles/network_plugin/meta/main.yml + block: " - role: network_plugin/2flannel\n \ + when: kube_network_plugin == '2flannel'\n tags: 2flannel\n" + +- name: append 2flannel apps to network plugin + blockinfile: + path: /opt/kargo_k8s/roles/kubernetes-apps/network_plugin/meta/main.yml + block: " - role: kubernetes-apps/network_plugin/2flannel\n \ + when: kube_network_plugin == '2flannel'\n tags: 2flannel\n" + +- name: append 2flannel to valid kube_network_plugin list + replace: + path: "/opt/kargo_k8s/roles/kubernetes/{{ item.path }}" + regexp: "{{ item.regexp }}" + replace: "{{ item.replace }}" + with_items: + - {path: 'master/templates/manifests/kube-controller-manager.manifest.j2', + regexp: '"cloud", "flannel"', + replace: '"cloud", "flannel", "2flannel"'} + - {path: 'node/templates/kubelet.kubeadm.env.j2', + regexp: '"calico", "canal", "flannel", "weave"', + replace: '"calico", "canal", "flannel", "weave", "2flannel"'} + - {path: 'node/templates/kubelet.standard.env.j2', + regexp: '"calico", "canal", "flannel", "weave"', + replace: '"calico", "canal", "flannel", "weave", "2flannel"'} + - {path: 'node/templates/kubelet.rkt.service.j2', + regexp: '"calico", "weave", "canal", "flannel"', + replace: '"calico", "weave", "canal", "flannel", "2flannel"'} + - {path: 'preinstall/tasks/main.yml', + regexp: '"calico", "weave", "canal", "flannel"', + replace: '"calico", "weave", "canal", "flannel", "2flannel"'} + +# yamllint disable rule:line-length +- name: enable CSI plugin feature + lineinfile: + dest: "/opt/kargo_k8s/roles/kubespray-defaults/defaults/main.yaml" + regexp: "^kube_feature_gates:" + line: "{% raw %}kube_feature_gates: ['Initializers={{ istio_enabled|string }}', 'PersistentLocalVolumes={{ local_volumes_enabled|string }}', 'CSIPersistentVolume=True', 'MountPropagation=True']{% endraw %}" + when: + - stor4nfv is defined and stor4nfv == "Enable" +# yamllint enable rule:line-length + +- name: enable CSI plugin runtime_config + lineinfile: + dest: /opt/kargo_k8s/roles/kubernetes/master/defaults/main.yml + insertafter: '^ - admissionregistration.k8s.io/v1alpha1' + line: ' - storage.k8s.io/v1alpha1' + when: + - stor4nfv is defined and stor4nfv == "Enable" + - name: run kargo playbook shell: | cd /opt/kargo_k8s diff --git a/deploy/adapters/ansible/kubernetes/roles/kargo/vars/main.yml b/deploy/adapters/ansible/kubernetes/roles/kargo/vars/main.yml index b73056e5..21287b02 100644 --- a/deploy/adapters/ansible/kubernetes/roles/kargo/vars/main.yml +++ b/deploy/adapters/ansible/kubernetes/roles/kargo/vars/main.yml @@ -3,3 +3,4 @@ helm_flag: true apiserver_loadbalancer_domain_name: "{{ public_vip.ip }}" vipaddress: "{{ public_vip.ip }}" exlb_port: 8383 +kubelet_fail_swap_on: false diff --git a/deploy/adapters/ansible/kubernetes/roles/pre-k8s/tasks/main.yml b/deploy/adapters/ansible/kubernetes/roles/pre-k8s/tasks/main.yml index 76203440..c915ec09 100644 --- a/deploy/adapters/ansible/kubernetes/roles/pre-k8s/tasks/main.yml +++ b/deploy/adapters/ansible/kubernetes/roles/pre-k8s/tasks/main.yml @@ -12,3 +12,7 @@ - include: "{{ ansible_os_family }}.yml" when: ansible_os_family == 'RedHat' and ansible_distribution_major_version == '7' + +- name: close the swap partition + shell: | + swapoff -a diff --git a/deploy/adapters/ansible/openstack/HA-ansible-multinodes.yml b/deploy/adapters/ansible/openstack/HA-ansible-multinodes.yml index 9a442da3..6ea57c04 100644 --- a/deploy/adapters/ansible/openstack/HA-ansible-multinodes.yml +++ b/deploy/adapters/ansible/openstack/HA-ansible-multinodes.yml @@ -58,6 +58,13 @@ - collectd - hosts: + - controller + remote_user: root + roles: + - influxdb + - grafana + +- hosts: - neutron_openvswitch_agent - compute remote_user: root diff --git a/deploy/adapters/ansible/roles/config-osa/files/redhat-7.yml b/deploy/adapters/ansible/roles/config-osa/files/redhat-7.yml new file mode 100644 index 00000000..ccfe7da2 --- /dev/null +++ b/deploy/adapters/ansible/roles/config-osa/files/redhat-7.yml @@ -0,0 +1,5 @@ +--- + +tacker_distro_packages: + - rsync + - git-core diff --git a/deploy/adapters/ansible/roles/config-osa/tasks/main.yml b/deploy/adapters/ansible/roles/config-osa/tasks/main.yml index 8bb56656..5f4f2fb2 100755 --- a/deploy/adapters/ansible/roles/config-osa/tasks/main.yml +++ b/deploy/adapters/ansible/roles/config-osa/tasks/main.yml @@ -361,3 +361,10 @@ dest: /opt/openstack-ansible/playbooks/setup-openstack.yml insertafter: "^- include: os-trove" line: "- include: os-tacker-install.yml" + +- name: add variables file of tacker for centos + copy: + src: redhat-7.yml + dest: /etc/ansible/roles/os_tacker/vars/redhat-7.yml + +- include: set_openstack_release.yml diff --git a/deploy/adapters/ansible/roles/config-osa/tasks/set_openstack_release.yml b/deploy/adapters/ansible/roles/config-osa/tasks/set_openstack_release.yml new file mode 100644 index 00000000..c886eabf --- /dev/null +++ b/deploy/adapters/ansible/roles/config-osa/tasks/set_openstack_release.yml @@ -0,0 +1,15 @@ +############################################################################ +# Copyright (c) 2018 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +- name: set openstack release + lineinfile: + dest: "{{ run_dir }}/group_vars/all" + line: "openstack_release: {{ openstack_release }}" + +- meta: refresh_inventory diff --git a/deploy/adapters/ansible/roles/config-osa/vars/main.yml b/deploy/adapters/ansible/roles/config-osa/vars/main.yml index 0b3b0c1e..65f67c18 100644 --- a/deploy/adapters/ansible/roles/config-osa/vars/main.yml +++ b/deploy/adapters/ansible/roles/config-osa/vars/main.yml @@ -11,3 +11,6 @@ LOCAL_REPOSITORY_IP: "192.168.137.222" ceph_host: "{{ hostvars[inventory_hostname]['groups']['ceph_osd'][0] }}" repo_dest_path: "/var/www/repo/os-releases/15.1.4/ubuntu-16.04-x86_64/" networking_sfc_version: 4.0.0 +# yamllint disable rule:line-length +openstack_release: "{{ lookup('yamlfile', '/opt/openstack-ansible/group_vars/all/all.yml key=openstack_release') }}" +# yamllint enable rule:line-length diff --git a/deploy/adapters/ansible/roles/post-openstack/tasks/main.yml b/deploy/adapters/ansible/roles/post-openstack/tasks/main.yml index fb0dc67d..d5582391 100644 --- a/deploy/adapters/ansible/roles/post-openstack/tasks/main.yml +++ b/deploy/adapters/ansible/roles/post-openstack/tasks/main.yml @@ -69,3 +69,20 @@ flat: "yes" when: - inventory_hostname == groups['network_hosts'][0] + +# yamllint disable rule:line-length +- name: fix horizon upload image issue + lineinfile: + dest: "/openstack/venvs/horizon-{{ openstack_release }}/lib/python2.7/site-packages/openstack_dashboard/local/local_settings.py" + regexp: "^HORIZON_IMAGES_UPLOAD_MODE" + line: "HORIZON_IMAGES_UPLOAD_MODE = 'legacy'" + when: + - inventory_hostname in groups['dashboard_containers'] +# yamllint enable rule:line-length + +- name: restart apache2 + service: + name: apache2 + state: restarted + when: + - inventory_hostname in groups['dashboard_containers'] diff --git a/deploy/playbook_done.py b/deploy/ansible_plugins/callback/playbook_done.py index 6b1043d4..4784ff63 100644 --- a/deploy/playbook_done.py +++ b/deploy/ansible_plugins/callback/playbook_done.py @@ -17,6 +17,8 @@ """Ansible playbook callback after a playbook run has completed.""" import sys +from distutils.version import LooseVersion +from ansible import __version__ as __ansible_version__ from ansible.plugins.callback import CallbackBase compass_bin = "/opt/compass/bin" @@ -85,7 +87,10 @@ class CallbackModule(CallbackBase): return def v2_playbook_on_stats(self, stats): - all_vars = self.play.get_variable_manager().get_vars(self.loader) + if LooseVersion(__ansible_version__) < LooseVersion("2.4"): + all_vars = self.play.get_variable_manager().get_vars(self.loader) + else: + all_vars = self.play.get_variable_manager().get_vars() host_vars = all_vars["hostvars"] hosts = sorted(stats.processed.keys()) cluster_name = host_vars[hosts[0]]['cluster_name'] diff --git a/deploy/status_callback.py b/deploy/ansible_plugins/callback/status_callback.py index 6169b87f..b87d2094 100644 --- a/deploy/status_callback.py +++ b/deploy/ansible_plugins/callback/status_callback.py @@ -11,6 +11,8 @@ import httplib import simplejson as json import sys # noqa:F401 +from distutils.version import LooseVersion +from ansible import __version__ as __ansible_version__ from ansible.plugins.callback import CallbackBase COMPASS_HOST = "compass-deck" @@ -101,7 +103,10 @@ class CallbackModule(CallbackBase): def v2_playbook_on_stats(self, stats): self._display.display("playbook_on_stats enter") - all_vars = self.play.get_variable_manager().get_vars(self.loader) + if LooseVersion(__ansible_version__) < LooseVersion("2.4"): + all_vars = self.play.get_variable_manager().get_vars(self.loader) + else: + all_vars = self.play.get_variable_manager().get_vars() host_vars = all_vars["hostvars"] hosts = sorted(stats.processed.keys()) cluster_name = host_vars[hosts[0]]['cluster_name'] diff --git a/deploy/ansible_plugins/lookup/yamlfile.py b/deploy/ansible_plugins/lookup/yamlfile.py new file mode 100644 index 00000000..c915adc7 --- /dev/null +++ b/deploy/ansible_plugins/lookup/yamlfile.py @@ -0,0 +1,55 @@ +#!/bin/venv python + +import yaml +import sys + +compass_bin = "/opt/compass/bin" +sys.path.append(compass_bin) +import switch_virtualenv # noqa: F401 + +from ansible.errors import AnsibleError # noqa: E402 +from ansible.plugins.lookup import LookupBase # noqa: E402 + + +class LookupModule(LookupBase): + + def read_yaml(self, yaml_path, key, default=None): + if not key: + return None + + with open(yaml_path) as fd: + yaml_data = yaml.safe_load(fd) + + if key in yaml_data: + return yaml_data[key] + else: + return default + + def run(self, terms, variables=None, **kwargs): + res = [] + if not isinstance(terms, list): + terms = [terms] + + for term in terms: + params = term.split() + yaml_path = params[0] + + param_dict = { + 'key': None, + 'default': None + } + + try: + for param in params[1:]: + key, value = param.split('=') + assert(key in param_dict) + param_dict[key] = value + except (AttributeError, AssertionError), e: + raise AnsibleError(e) + + data = self.read_yaml(yaml_path, + param_dict['key'], + param_dict['default']) + res.append(data) + + return res diff --git a/deploy/compass_conf/flavor/kubernetes.conf b/deploy/compass_conf/flavor/kubernetes.conf index 71acadff..e7e8ffc8 100755 --- a/deploy/compass_conf/flavor/kubernetes.conf +++ b/deploy/compass_conf/flavor/kubernetes.conf @@ -4,7 +4,7 @@ FLAVORS = [{ 'display_name': 'ansible-kubernetes', 'template': 'ansible-kubernetes.tmpl', 'roles': [ - 'kube_master', 'etcd', 'kube_node', 'ha' + 'kube_master', 'etcd', 'kube_node', 'ha', 'storage_master', 'storage_node' ], }] diff --git a/deploy/compass_conf/package_installer/ansible-kubernetes.conf b/deploy/compass_conf/package_installer/ansible-kubernetes.conf index 820691b7..044af9a9 100755 --- a/deploy/compass_conf/package_installer/ansible-kubernetes.conf +++ b/deploy/compass_conf/package_installer/ansible-kubernetes.conf @@ -7,7 +7,7 @@ SETTINGS = { 'playbook_file': 'site.yml', 'inventory_file': 'inventory.py', 'inventory_json_file': 'inventory.json', - 'inventory_group': ['kube_master', 'etcd', 'kube_node', 'ha'], + 'inventory_group': ['kube_master', 'etcd', 'kube_node', 'ha', 'ceph_adm', 'ceph_mon', 'ceph_osd', 'storage_master', 'storage_node'], 'group_variable': 'all', 'etc_hosts_path': 'roles/pre-k8s/templates/hosts', 'runner_dirs': ['roles','kubernetes/roles'] diff --git a/deploy/compass_conf/role/kubernetes_ansible.conf b/deploy/compass_conf/role/kubernetes_ansible.conf index c27779ad..3e79cbb9 100755 --- a/deploy/compass_conf/role/kubernetes_ansible.conf +++ b/deploy/compass_conf/role/kubernetes_ansible.conf @@ -15,6 +15,15 @@ ROLES = [{ 'role': 'ha', 'display_name': 'ha', 'description': 'ha' +}, { + 'role': 'storage_master', + 'display_name': 'storage master', + 'description': 'storage master', + 'optional': True +}, { + 'role': 'storage_node', + 'display_name': 'storage node', + 'description': 'storage node', + 'optional': True } - ] diff --git a/deploy/compass_conf/templates/ansible_installer/kubernetes/ansible_cfg/ansible-kubernetes.tmpl b/deploy/compass_conf/templates/ansible_installer/kubernetes/ansible_cfg/ansible-kubernetes.tmpl index f09fa9c8..da4554e3 100644 --- a/deploy/compass_conf/templates/ansible_installer/kubernetes/ansible_cfg/ansible-kubernetes.tmpl +++ b/deploy/compass_conf/templates/ansible_installer/kubernetes/ansible_cfg/ansible-kubernetes.tmpl @@ -3,7 +3,7 @@ log_path = /var/ansible/run/kubernetes-$cluster_name/ansible.log host_key_checking = False callback_whitelist = playbook_done, status_callback -callback_plugins = /opt/ansible_callbacks +callback_plugins = /opt/ansible_plugins/callback forks=100 [ssh_connection] diff --git a/deploy/compass_conf/templates/ansible_installer/kubernetes/vars/ansible-kubernetes.tmpl b/deploy/compass_conf/templates/ansible_installer/kubernetes/vars/ansible-kubernetes.tmpl index f132365a..f6d96c78 100644 --- a/deploy/compass_conf/templates/ansible_installer/kubernetes/vars/ansible-kubernetes.tmpl +++ b/deploy/compass_conf/templates/ansible_installer/kubernetes/vars/ansible-kubernetes.tmpl @@ -40,6 +40,7 @@ deploy_type: $getVar('deploy_type', 'virtual') public_cidr: $kube_nodes[0]['install']['subnet'] storage_cidr: "{{ ip_settings[inventory_hostname]['storage']['cidr'] }}" mgmt_cidr: "{{ ip_settings[inventory_hostname]['mgmt']['cidr'] }}" +external_cidr: "{{ ip_settings[inventory_hostname]['external']['cidr'] }}" public_net_info: "{{ network_cfg.public_net_info }}" host_ip_settings: "{{ ip_settings[inventory_hostname] }}" @@ -159,6 +160,13 @@ CONGRESS_PASS: $congress_pass DEMO_PASS: $demo_pass ADMIN_PASS: $admin_pass +#set plugins = $getVar('plugins', []) +#for item in plugins +#set keys = $item.keys() +#set values = $item.values() +$keys[0]: $values[0] +#end for + #set neutron_service_plugins=['router'] #if $getVar('enable_fwaas', True) diff --git a/deploy/compass_conf/templates/ansible_installer/openstack_pike/ansible_cfg/HA-ansible-multinodes.tmpl b/deploy/compass_conf/templates/ansible_installer/openstack_pike/ansible_cfg/HA-ansible-multinodes.tmpl index cd8c8d30..6fdb6b16 100755 --- a/deploy/compass_conf/templates/ansible_installer/openstack_pike/ansible_cfg/HA-ansible-multinodes.tmpl +++ b/deploy/compass_conf/templates/ansible_installer/openstack_pike/ansible_cfg/HA-ansible-multinodes.tmpl @@ -3,7 +3,8 @@ log_path = /var/ansible/run/openstack_pike-$cluster_name/ansible.log host_key_checking = False callback_whitelist = playbook_done, status_callback -callback_plugins = /opt/ansible_callbacks +callback_plugins = /opt/ansible_plugins/callback +lookup_plugins = /opt/ansible_plugins/lookup forks=100 [ssh_connection] diff --git a/deploy/compass_vm.sh b/deploy/compass_vm.sh index cf215f3b..17171578 100755 --- a/deploy/compass_vm.sh +++ b/deploy/compass_vm.sh @@ -23,7 +23,7 @@ function check_container_alive() { docker exec -it compass-mq bash -c "exit" 1>/dev/null 2>&1 local mq_state=$? - if [ $((deck_state||tasks_state||cobbler_state||db_state||mq-state)) == 0 ]; then + if [ $((deck_state||tasks_state||cobbler_state||db_state||mq_state)) == 0 ]; then echo "true" else echo "false" diff --git a/deploy/conf/base.conf b/deploy/conf/base.conf index 5395405d..274847ae 100644 --- a/deploy/conf/base.conf +++ b/deploy/conf/base.conf @@ -2,17 +2,10 @@ export DHA=${DHA:-$COMPASS_DIR/deploy/conf/vm_environment/os-nosdn-nofeature-ha. export NEUTRON=${NEUTRON:-$COMPASS_DIR/deploy/conf/neutron_cfg.yaml} export NETWORK=${NETWORK:-$COMPASS_DIR/deploy/conf/network_cfg.yaml} export TAR_URL=${TAR_URL:-file://`pwd`/work/building/compass.tar.gz} -export INSTALL_IP=${INSTALL_IP:-10.1.0.1} -export EXT_NAT_MASK=${EXT_NAT_MASK:-255.255.252.0} -export EXT_NAT_GW=${EXT_NAT_GW:-192.16.1.1} -export EXT_NAT_IP_START=${EXT_NAT_IP_START:-192.16.1.3} -export EXT_NAT_IP_END=${EXT_NAT_IP_END:-192.16.1.254} -export EXTERNAL_NIC=${EXTERNAL_NIC:-eth0} +export PXE_NIC=${PXE_NIC:-eth0} export DOMAIN="ods.com" export PARTITIONS="/=30%,/home=5%,/tmp=5%,/var=60%" -export SUBNETS="10.1.0.0/24,172.16.2.0/24,172.16.3.0/24,172.16.4.0/24" -export MANAGEMENT_IP_START=${MANAGEMENT_IP_START:-'10.1.0.50'} export MANAGEMENT_INTERFACE=${MANAGEMENT_INTERFACE:-eth0} export DASHBOARD_URL="" export ENABLE_SECGROUP=${ENABLE_SECGROUP:-"true"} @@ -27,7 +20,6 @@ export NETWORK_MAPPING=${NETWORK_MAPPING:-"install=${MANAGEMENT_INTERFACE}"} export PROXY="" export IGNORE_PROXY="" export SEARCH_PATH="ods.com" -export GATEWAY="10.1.0.1" export SERVER_CREDENTIAL="root=root" export LOCAL_REPO_URL="" export OS_CONFIG_FILENAME="" diff --git a/deploy/conf/cluster.conf b/deploy/conf/cluster.conf index 750ff113..0d947100 100644 --- a/deploy/conf/cluster.conf +++ b/deploy/conf/cluster.conf @@ -24,10 +24,10 @@ export ADAPTER_NAME="openstack_$OPENSTACK_VERSION" export ADAPTER_FLAVOR_PATTERN="HA-ansible-multinodes-$OPENSTACK_VERSION" if [[ "x"$KUBERNETES_VERSION != "x" ]]; then + unset OPENSTACK_VERSION export ADAPTER_NAME=kubernetes export ADAPTER_FLAVOR_PATTERN=ansible-kubernetes export ADAPTER_TARGET_SYSTEM_PATTERN='^kubernetes$' fi export DEFAULT_ROLES="" -export VIP="10.1.0.222" diff --git a/deploy/conf/compass.conf b/deploy/conf/compass.conf index 9d9145f1..be5d17c3 100644 --- a/deploy/conf/compass.conf +++ b/deploy/conf/compass.conf @@ -1,20 +1,25 @@ -export COMPASS_VIRT_CPUS=4 -export COMPASS_VIRT_MEM=4096 +export CLUSTER_NAME=${CLUSTER_NAME:-opnfv} +export INSTALL_IP=${INSTALL_IP:-$INSTALL_GW} export COMPASS_SERVER=$INSTALL_IP export COMPASS_DECK_PORT="5050" export COMPASS_SERVER_URL="http://$INSTALL_IP:$COMPASS_DECK_PORT/api" export HTTP_SERVER_URL="http://$INSTALL_IP:$COMPASS_DECK_PORT/api" export COMPASS_USER_EMAIL="admin@huawei.com" export COMPASS_USER_PASSWORD="admin" + export COMPASS_DNS1=${COMPASS_DNS1:-'8.8.8.8'} export COMPASS_DNS2=${COMPASS_DNS2:-} -export COMPASS_EXTERNAL_IP=${COMPASS_EXTERNAL_IP:-} -export COMPASS_EXTERNAL_MASK=${COMPASS_EXTERNAL_MASK:-} -export COMPASS_EXTERNAL_GW=${COMPASS_EXTERNAL_GW:-} export LANGUAGE="EN" export TIMEZONE="America/Los_Angeles" export NTP_SERVER="$COMPASS_SERVER" export NAMESERVERS=${USER_NAMESERVER:-"$COMPASS_SERVER"} export COMPASS_REPO_PORT="5151" export OFFLINE_DEPLOY=${OFFLINE_DEPLOY:-'Disable'} -export COMPOSE_IMAGES="[compass-db,compass-mq,compass-deck,compass-tasks,compass-cobbler]" + +if [[ "x"$COMPOSE_IMAGES == "x" && "x"$OPENSTACK_VERSION != "x" ]]; then + export COMPOSE_IMAGES="[compass-db,compass-mq,compass-deck,compass-tasks-osa,compass-cobbler]" +fi + +if [[ "x"$COMPOSE_IMAGES == "x" && "x"$KUBERNETES_VERSION != "x" ]]; then + export COMPOSE_IMAGES="[compass-db,compass-mq,compass-deck,compass-tasks-k8s,compass-cobbler]" +fi diff --git a/deploy/conf/hardware_environment/huawei-pod1/k8-nosdn-stor4nfv-ha.yml b/deploy/conf/hardware_environment/huawei-pod1/k8-nosdn-stor4nfv-ha.yml new file mode 100644 index 00000000..6cf62db7 --- /dev/null +++ b/deploy/conf/hardware_environment/huawei-pod1/k8-nosdn-stor4nfv-ha.yml @@ -0,0 +1,74 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## + +--- +TYPE: baremetal +FLAVOR: cluster +POWER_TOOL: ipmitool + +ipmiUser: root +ipmiVer: '2.0' + +plugins: + - stor4nfv: "Enable" + +hosts: + - name: host1 + mac: 'F8:4A:BF:55:A2:8D' + interfaces: + - eth1: 'F8:4A:BF:55:A2:8E' + ipmiIp: 172.16.130.26 + ipmiPass: Opnfv@pod1 + roles: + - kube_master + - etcd + - ha + + - name: host2 + mac: 'D8:49:0B:DA:5A:B7' + interfaces: + - eth1: 'D8:49:0B:DA:5A:B8' + ipmiIp: 172.16.130.27 + ipmiPass: Opnfv@pod1 + roles: + - kube_master + - etcd + - ha + + - name: host3 + mac: '78:D7:52:A0:B1:99' + interfaces: + - eth1: '78:D7:52:A0:B1:9A' + ipmiIp: 172.16.130.29 + ipmiPass: Opnfv@pod1 + roles: + - kube_master + - etcd + - ha + - storage_master + + - name: host4 + mac: 'D8:49:0B:DA:5B:5D' + interfaces: + - eth1: 'D8:49:0B:DA:5B:5E' + ipmiIp: 172.16.130.30 + ipmiPass: Opnfv@pod1 + roles: + - kube_node + - storage_node + + - name: host5 + mac: 'D8:49:0B:DA:56:85' + interfaces: + - eth1: 'D8:49:0B:DA:56:86' + ipmiIp: 172.16.130.31 + ipmiPass: Opnfv@pod1 + roles: + - kube_node + - storage_node diff --git a/deploy/conf/hardware_environment/huawei-pod2/k8-nosdn-nofeature-ha.yml b/deploy/conf/hardware_environment/huawei-pod2/k8-nosdn-nofeature-ha.yml new file mode 100644 index 00000000..a96a5259 --- /dev/null +++ b/deploy/conf/hardware_environment/huawei-pod2/k8-nosdn-nofeature-ha.yml @@ -0,0 +1,73 @@ +############################################################################## +# Copyright (c) 2018 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## + +--- + +TYPE: baremetal +FLAVOR: cluster +POWER_TOOL: ipmitool + +ipmiVer: '2.0' + +hosts: + - name: host1 + mac: 'EC:38:8F:79:0C:2C' + ipmiUser: root + ipmiPass: Opnfv@pod2 + ipmiIp: 172.16.130.20 + interfaces: + - eth1: 'EC:38:8F:79:0C:2D' + roles: + - kube_master + - etcd + - ha + + - name: host2 + mac: 'EC:38:8F:79:0C:48' + ipmiIp: 172.16.130.19 + ipmiUser: root + ipmiPass: Opnfv@pod2 + interfaces: + - eth1: 'EC:38:8F:79:0C:49' + roles: + - kube_master + - etcd + - ha + + - name: host3 + mac: 'EC:38:8F:79:10:CC' + ipmiIp: 172.16.130.18 + ipmiUser: root + ipmiPass: Opnfv@pod2 + interfaces: + - eth1: 'EC:38:8F:79:10:CD' + roles: + - kube_master + - etcd + - ha + + - name: host4 + mac: 'EC:38:8F:79:0C:6C' + ipmiIp: 172.16.130.17 + ipmiUser: root + ipmiPass: Opnfv@pod2 + interfaces: + - eth1: 'EC:38:8F:79:0C:6D' + roles: + - kube_node + + - name: host5 + mac: 'EC:38:8F:7A:E6:ED' + ipmiIp: 172.16.130.16 + ipmiUser: root + ipmiPass: Opnfv@pod2 + interfaces: + - eth1: 'EC:38:8F:7A:E6:EE' + roles: + - kube_node diff --git a/deploy/conf/hardware_environment/huawei-pod2/k8-nosdn-stor4nfv-ha.yml b/deploy/conf/hardware_environment/huawei-pod2/k8-nosdn-stor4nfv-ha.yml new file mode 100644 index 00000000..4fbea1f0 --- /dev/null +++ b/deploy/conf/hardware_environment/huawei-pod2/k8-nosdn-stor4nfv-ha.yml @@ -0,0 +1,79 @@ +############################################################################## +# Copyright (c) 2018 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## + +--- + +TYPE: baremetal +FLAVOR: cluster +POWER_TOOL: ipmitool + +ipmiVer: '2.0' + +plugins: + - stor4nfv: "Enable" + +hosts: + - name: host1 + mac: 'EC:38:8F:79:0C:2C' + ipmiUser: root + ipmiPass: Opnfv@pod2 + ipmiIp: 172.16.130.20 + interfaces: + - eth1: 'EC:38:8F:79:0C:2D' + roles: + - kube_master + - etcd + - ha + + - name: host2 + mac: 'EC:38:8F:79:0C:48' + ipmiIp: 172.16.130.19 + ipmiUser: root + ipmiPass: Opnfv@pod2 + interfaces: + - eth1: 'EC:38:8F:79:0C:49' + roles: + - kube_master + - etcd + - ha + + - name: host3 + mac: 'EC:38:8F:79:10:CC' + ipmiIp: 172.16.130.18 + ipmiUser: root + ipmiPass: Opnfv@pod2 + interfaces: + - eth1: 'EC:38:8F:79:10:CD' + roles: + - kube_master + - etcd + - ha + - storage_master + + - name: host4 + mac: 'EC:38:8F:79:0C:6C' + ipmiIp: 172.16.130.17 + ipmiUser: root + ipmiPass: Opnfv@pod2 + interfaces: + - eth1: 'EC:38:8F:79:0C:6D' + roles: + - kube_node + - storage_node + + - name: host5 + mac: 'EC:38:8F:7A:E6:ED' + ipmiIp: 172.16.130.16 + ipmiUser: root + ipmiPass: Opnfv@pod2 + interfaces: + - eth1: 'EC:38:8F:7A:E6:EE' + roles: + - kube_node + - storage_node diff --git a/deploy/conf/hardware_environment/intel-pod17/k8-nosdn-nofeature-ha.yml b/deploy/conf/hardware_environment/intel-pod17/k8-nosdn-nofeature-ha.yml index 7cc2c215..660f5e2e 100644 --- a/deploy/conf/hardware_environment/intel-pod17/k8-nosdn-nofeature-ha.yml +++ b/deploy/conf/hardware_environment/intel-pod17/k8-nosdn-nofeature-ha.yml @@ -27,6 +27,7 @@ hosts: roles: - kube_master - etcd + - ha - name: host2 mac: 'A4:BF:01:14:01:13' @@ -39,6 +40,7 @@ hosts: roles: - kube_master - etcd + - ha - name: host3 mac: 'A4:BF:01:14:71:1E' @@ -51,6 +53,7 @@ hosts: roles: - kube_master - etcd + - ha - name: host4 mac: 'A4:BF:01:16:2F:17' diff --git a/deploy/conf/virtual.conf b/deploy/conf/virtual.conf index 1e9034f3..dcee7d30 100644 --- a/deploy/conf/virtual.conf +++ b/deploy/conf/virtual.conf @@ -3,7 +3,13 @@ export VIRT_CPUS=${VIRT_CPUS:-8} export VIRT_MEM=${VIRT_MEM:-16384} export VIRT_DISK=${VIRT_DISK:-200G} +export EXT_NAT_MASK=${EXT_NAT_MASK:-255.255.252.0} +export EXT_NAT_GW=${EXT_NAT_GW:-192.16.1.1} +export EXT_NAT_IP_START=${EXT_NAT_IP_START:-192.16.1.3} +export EXT_NAT_IP_END=${EXT_NAT_IP_END:-192.16.1.254} + export SWITCH_IPS="1.1.1.1" export SWITCH_CREDENTIAL="version=2c,community=public" export DEPLOYMENT_TIMEOUT="300" export POLL_SWITCHES_FLAG="nopoll_switches" +export NAT_EXTERNAL=${NAT_EXTERNAL:true} diff --git a/deploy/conf/vm_environment/k8-nosdn-nofeature-noha.yml b/deploy/conf/vm_environment/k8-nosdn-nofeature-noha.yml index 9912d59b..9c2c23fb 100644 --- a/deploy/conf/vm_environment/k8-nosdn-nofeature-noha.yml +++ b/deploy/conf/vm_environment/k8-nosdn-nofeature-noha.yml @@ -16,6 +16,7 @@ hosts: roles: - kube_master - etcd + - ha - name: host2 roles: diff --git a/deploy/conf/vm_environment/k8-nosdn-stor4nfv-ha.yml b/deploy/conf/vm_environment/k8-nosdn-stor4nfv-ha.yml new file mode 100644 index 00000000..e5e458f7 --- /dev/null +++ b/deploy/conf/vm_environment/k8-nosdn-stor4nfv-ha.yml @@ -0,0 +1,45 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## + +--- +TYPE: virtual +FLAVOR: cluster + +plugins: + - stor4nfv: "Enable" + +hosts: + - name: host1 + roles: + - kube_master + - etcd + - ha + + - name: host2 + roles: + - kube_master + - etcd + - ha + + - name: host3 + roles: + - kube_master + - etcd + - ha + - storage_master + + - name: host4 + roles: + - kube_node + - storage_node + + - name: host5 + roles: + - kube_node + - storage_node diff --git a/deploy/conf/vm_environment/k8-nosdn-stor4nfv-noha.yml b/deploy/conf/vm_environment/k8-nosdn-stor4nfv-noha.yml new file mode 100644 index 00000000..f8c29b3e --- /dev/null +++ b/deploy/conf/vm_environment/k8-nosdn-stor4nfv-noha.yml @@ -0,0 +1,28 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## + +--- +TYPE: virtual +FLAVOR: cluster + +plugins: + - stor4nfv: "Enable" + +hosts: + - name: host1 + roles: + - kube_master + - etcd + - storage_master + - ha + + - name: host2 + roles: + - kube_node + - storage_node diff --git a/deploy/config_parse.py b/deploy/config_parse.py index 1575ca37..ceaab0f5 100644 --- a/deploy/config_parse.py +++ b/deploy/config_parse.py @@ -13,8 +13,6 @@ import yaml import sys from Cheetah.Template import Template -PXE_INTF = "eth0" - def init(file): with open(file) as fd: @@ -64,9 +62,10 @@ def hostmacs(s, seq, host=None): def export_network_file(dha, network, ofile): - env = {} - - mgmt_net = [item for item in network['ip_settings'] + install_network_env = {} + host_network_env = {} + ip_settings = network['ip_settings'] + mgmt_net = [item for item in ip_settings if item['name'] == 'mgmt'][0] mgmt_gw = mgmt_net['gw'] mgmt_cidr = mgmt_net['cidr'] @@ -74,12 +73,15 @@ def export_network_file(dha, network, ofile): mgmt_netmask = '.'.join([str((0xffffffff << (32 - prefix) >> i) & 0xff) for i in [24, 16, 8, 0]]) dhcp_ip_range = ' '.join(mgmt_net['dhcp_ranges'][0]) - env.update({'INSTALL_GW': mgmt_gw}) - env.update({'INSTALL_CIDR': mgmt_cidr}) - env.update({'INSTALL_NETMASK': mgmt_netmask}) - env.update({'INSTALL_IP_RANGE': dhcp_ip_range}) - export_env_dict(env, ofile) - + internal_vip = network['internal_vip']['ip'] + install_network_env.update({'INSTALL_GW': mgmt_gw}) + install_network_env.update({'INSTALL_CIDR': mgmt_cidr}) + install_network_env.update({'INSTALL_NETMASK': mgmt_netmask}) + install_network_env.update({'INSTALL_IP_RANGE': dhcp_ip_range}) + install_network_env.update({'VIP': internal_vip}) + export_env_dict(install_network_env, ofile) + + pxe_nic = os.environ['PXE_NIC'] host_ip_range = mgmt_net['ip_ranges'][0] host_ips = netaddr.iter_iprange(host_ip_range[0], host_ip_range[1]) host_networks = [] @@ -87,8 +89,11 @@ def export_network_file(dha, network, ofile): host_name = host['name'] host_ip = str(host_ips.next()) host_networks.append( - "{0}:{1}={2}|is_mgmt".format(host_name, PXE_INTF, host_ip)) - host_network_env = {"HOST_NETWORKS": ';'.join(host_networks)} + '{0}:{1}={2}|is_mgmt'.format(host_name, pxe_nic, host_ip)) + host_subnets = [item['cidr'] for item in ip_settings] + host_network_env.update({'NETWORK_MAPPING': "install=" + pxe_nic}) + host_network_env.update({'HOST_NETWORKS': ';'.join(host_networks)}) + host_network_env.update({'SUBNETS': ','.join(host_subnets)}) export_env_dict(host_network_env, ofile, True) diff --git a/deploy/deploy_host.sh b/deploy/deploy_host.sh index 7a91bd88..512386e5 100755 --- a/deploy/deploy_host.sh +++ b/deploy/deploy_host.sh @@ -37,7 +37,7 @@ function deploy_host(){ --adapter_flavor_pattern="${ADAPTER_FLAVOR_PATTERN}" --repo_name="${REPO_NAME}" \ --http_proxy="${PROXY}" --https_proxy="${PROXY}" --no_proxy="${IGNORE_PROXY}" \ --ntp_server="${NTP_SERVER}" --dns_servers="${NAMESERVERS}" --domain="${DOMAIN}" \ - --search_path="${SEARCH_PATH}" --default_gateway="${GATEWAY}" \ + --search_path="${SEARCH_PATH}" --default_gateway="${INSTALL_GW}" \ --server_credential="${SERVER_CREDENTIAL}" --local_repo_url="${LOCAL_REPO_URL}" \ --os_config_json_file="${OS_CONFIG_FILENAME}" --service_credentials="${SERVICE_CREDENTIALS}" \ --console_credentials="${CONSOLE_CREDENTIALS}" --host_networks="${HOST_NETWORKS}" \ diff --git a/deploy/host_virtual.sh b/deploy/host_virtual.sh index 03a1230f..d955b747 100755 --- a/deploy/host_virtual.sh +++ b/deploy/host_virtual.sh @@ -52,6 +52,11 @@ function launch_host_vms() { vm_template_file="$vm_template_dir/host.xml" vm_template_arch="$vm_template_dir/host-$COMPASS_ARCH.xml" [ -f $vm_template_arch ] && vm_template_file=$vm_template_arch + if [[ "$NAT_EXTERNAL" == "false" ]]; then + NET_IAAS="external" + else + NET_IAAS="external_nat" + fi log_info "bringing up pxe boot vms" i=0 @@ -67,7 +72,7 @@ function launch_host_vms() { -e "s#REPLACE_IMAGE#$vm_dir/disk.img#g" \ -e "s/REPLACE_BOOT_MAC/${mac_array[i]}/g" \ -e "s/REPLACE_NET_INSTALL/install/g" \ - -e "s/REPLACE_NET_IAAS/external_nat/g" \ + -e "s/REPLACE_NET_IAAS/$NET_IAAS/g" \ "$vm_template_file" \ > $vm_dir/libvirt.xml diff --git a/deploy/launch.sh b/deploy/launch.sh index 98d9e4d6..2b32c337 100755 --- a/deploy/launch.sh +++ b/deploy/launch.sh @@ -15,6 +15,7 @@ mkdir -p $WORK_DIR/script export DEPLOY_FIRST_TIME=${DEPLOY_FIRST_TIME:-"true"} export DEPLOY_RECOVERY=${DEPLOY_RECOVERY:-"false"} +source ${COMPASS_DIR}/deploy/conf/base.conf source ${COMPASS_DIR}/deploy/prepare.sh prepare_python_env source ${COMPASS_DIR}/util/log.sh @@ -24,7 +25,6 @@ check_input_para source $(process_default_para $*) || exit 1 source ${COMPASS_DIR}/deploy/conf/${FLAVOR}.conf source ${COMPASS_DIR}/deploy/conf/${TYPE}.conf -source ${COMPASS_DIR}/deploy/conf/base.conf source ${COMPASS_DIR}/deploy/conf/compass.conf source ${COMPASS_DIR}/deploy/network.sh source ${COMPASS_DIR}/deploy/host_${TYPE}.sh diff --git a/deploy/network.sh b/deploy/network.sh index 698771b3..eea62277 100755 --- a/deploy/network.sh +++ b/deploy/network.sh @@ -76,9 +76,9 @@ function setup_bridge_external() sudo virsh net-destroy external sudo virsh net-undefine external - #save_network_info + save_network_info sed -e "s/REPLACE_NAME/external/g" \ - -e "s/REPLACE_OVS/br-external_nat/g" \ + -e "s/REPLACE_OVS/br-external/g" \ $COMPASS_DIR/deploy/template/network/bridge_ovs.xml \ > $WORK_DIR/network/external.xml @@ -86,14 +86,12 @@ function setup_bridge_external() sudo virsh net-start external sudo virsh net-autostart external - python $COMPASS_DIR/deploy/setup_vnic.py } function recover_bridge_external() { sudo virsh net-start external - python $COMPASS_DIR/deploy/setup_vnic.py } function setup_nat_net() { @@ -128,7 +126,12 @@ function recover_nat_net() { function setup_virtual_net() { setup_nat_net install $INSTALL_GW $INSTALL_NETMASK - setup_nat_net external_nat $EXT_NAT_GW $EXT_NAT_MASK $EXT_NAT_IP_START $EXT_NAT_IP_END + + if [[ "$NAT_EXTERNAL" == "false" ]]; then + setup_bridge_external + else + setup_nat_net external_nat $EXT_NAT_GW $EXT_NAT_MASK $EXT_NAT_IP_START $EXT_NAT_IP_END + fi } function recover_virtual_net() { diff --git a/deploy/template/vm/host-aarch64.xml b/deploy/template/vm/host-aarch64.xml index 7f32a198..69202fd2 100644 --- a/deploy/template/vm/host-aarch64.xml +++ b/deploy/template/vm/host-aarch64.xml @@ -9,9 +9,6 @@ <boot dev='hd'/> <boot dev='network'/> </os> - <features> - <gic version='2'/> - </features> <cpu mode='host-passthrough'/> <clock offset='utc'/> <on_poweroff>destroy</on_poweroff> |