aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rwxr-xr-xbuild.sh12
-rw-r--r--build/build.yaml10
-rw-r--r--build/parser.py43
-rwxr-xr-x[-rw-r--r--]deploy/adapters/ansible/kubernetes/roles/kargo/files/extra-vars-aarch64.yml0
-rw-r--r--deploy/adapters/ansible/kubernetes/roles/kargo/files/extra-vars.yml7
-rwxr-xr-xdeploy/adapters/ansible/kubernetes/roles/kargo/files/generate_inventories.py95
-rw-r--r--deploy/adapters/ansible/kubernetes/roles/kargo/files/mirrors.repo32
-rwxr-xr-x[-rw-r--r--]deploy/adapters/ansible/kubernetes/roles/kargo/files/mirrors_aarch64.repo0
-rw-r--r--deploy/adapters/ansible/kubernetes/roles/kargo/files/openssl.conf.j234
-rwxr-xr-x[-rw-r--r--]deploy/adapters/ansible/kubernetes/roles/kargo/tasks/main.yml178
-rwxr-xr-xdeploy/adapters/ansible/kubernetes/roles/kargo/templates/extra-vars.yml.j240
-rw-r--r--deploy/adapters/ansible/kubernetes/roles/kargo/templates/inventory.j226
-rwxr-xr-x[-rw-r--r--]deploy/adapters/ansible/kubernetes/roles/kargo/vars/main.yml6
-rw-r--r--deploy/adapters/ansible/kubernetes/roles/pre-k8s/tasks/main.yml2
-rw-r--r--deploy/adapters/ansible/openstack/HA-ansible-multinodes.yml1
-rwxr-xr-xdeploy/adapters/ansible/roles/config-osa/tasks/main.yml12
-rw-r--r--deploy/adapters/ansible/roles/post-openstack/files/manager.py.patch12
-rw-r--r--deploy/adapters/ansible/roles/post-openstack/tasks/main.yml4
-rw-r--r--deploy/adapters/ansible/roles/post-openstack/tasks/nova_patch.yml23
-rw-r--r--deploy/adapters/ansible/roles/setup-openvswitch/tasks/main.yml2
-rwxr-xr-xdeploy/adapters/ansible/roles/setup-openvswitch/templates/controller.j22
-rw-r--r--deploy/client.py14
-rwxr-xr-xdeploy/compass_conf/templates/cobbler/ubuntu-16.04.3-server-x86_64/system.tmpl56
-rwxr-xr-xdeploy/compass_vm.sh14
-rw-r--r--deploy/conf/hardware_environment/huawei-pod1/k8-nosdn-nofeature-ha.yml8
-rw-r--r--deploy/conf/hardware_environment/huawei-pod1/k8-nosdn-stor4nfv-ha.yml8
-rw-r--r--deploy/conf/hardware_environment/huawei-pod1/os-nosdn-bar-ha.yml8
-rw-r--r--deploy/conf/hardware_environment/huawei-pod1/os-nosdn-kvm-ha.yml8
-rw-r--r--deploy/conf/hardware_environment/huawei-pod1/os-nosdn-nofeature-ha.yml8
-rw-r--r--deploy/conf/hardware_environment/huawei-pod1/os-nosdn-openo-ha.yml8
-rw-r--r--deploy/conf/hardware_environment/huawei-pod1/os-nosdn-stor4nfv-ha.yml8
-rw-r--r--deploy/conf/hardware_environment/huawei-pod1/os-ocl-nofeature-ha.yml8
-rw-r--r--deploy/conf/hardware_environment/huawei-pod1/os-odl-sfc-ha.yml8
-rw-r--r--deploy/conf/hardware_environment/huawei-pod1/os-odl_l2-moon-ha.yml8
-rw-r--r--deploy/conf/hardware_environment/huawei-pod1/os-odl_l2-nofeature-ha.yml8
-rw-r--r--deploy/conf/hardware_environment/huawei-pod1/os-odl_l3-nofeature-ha.yml8
-rw-r--r--deploy/conf/hardware_environment/huawei-pod1/os-onos-nofeature-ha.yml8
-rw-r--r--deploy/conf/hardware_environment/huawei-pod1/os-onos-sfc-ha.yml8
-rw-r--r--deploy/conf/vm_environment/k8-nosdn-onap-noha.yml46
-rw-r--r--deploy/config_parse.py99
-rwxr-xr-xdeploy/deploy_host.sh2
-rwxr-xr-xdeploy/deploy_parameter.sh2
-rwxr-xr-xdeploy/host_baremetal.sh10
-rwxr-xr-xdeploy/host_virtual.sh59
-rwxr-xr-xdeploy/launch.sh1
-rwxr-xr-xdeploy/prepare.sh7
-rw-r--r--docs/release/installation/vmdeploy.rst4
-rw-r--r--plugins/onap/roles/tasks/Ubuntu.yml117
-rw-r--r--plugins/onap/roles/tasks/main.yml11
-rw-r--r--plugins/onap/roles/templates/exports.j21
-rw-r--r--plugins/onap/roles/vars/main.yml13
-rw-r--r--plugins/stor4nfv/roles/os-stor4nfv/tasks/post-install.yml11
-rw-r--r--util/check_valid.py14
-rwxr-xr-xutil/docker-compose/roles/machines/tasks/main.yml14
54 files changed, 581 insertions, 567 deletions
diff --git a/build.sh b/build.sh
index d7118432..db1ce9fb 100755
--- a/build.sh
+++ b/build.sh
@@ -99,13 +99,13 @@ function download_packages()
function build_tar()
{
cd $CACHE_DIR
- sudo rm -rf compass_dists
+ sudo rm -rf compass_dists $TAR_DIR/$TAR_NAME
mkdir -p compass_dists
- sudo cp -f *.tar *.iso *.tgz compass_dists
- sudo cp $COMPASS_PATH/build/build*.yaml compass_dists
- sudo cp -rf $COMPASS_PATH/util/docker-compose ./
- sudo tar -zcf compass.tar.gz docker-compose compass_dists
- sudo mv compass.tar.gz $TAR_DIR/$TAR_NAME
+ cp -f *.tar *.iso *.tgz compass_dists
+ cp $COMPASS_PATH/build/build*.yaml compass_dists
+ cp -rf $COMPASS_PATH/util/docker-compose ./
+ tar -zcf compass.tar.gz docker-compose compass_dists
+ mv compass.tar.gz $TAR_DIR/$TAR_NAME
cd -
}
diff --git a/build/build.yaml b/build/build.yaml
index ef42ba9c..75faa280 100644
--- a/build/build.yaml
+++ b/build/build.yaml
@@ -15,7 +15,7 @@ packages:
- http://artifacts.opnfv.org/compass4nfv/package/master/ubuntu-16.04.3-server-amd64.iso
- name: harbor-offline-installer-v1.5.0.tgz
- description: "The package of harbor v1.5.5"
+ description: "The package of harbor v1.5.0"
get_method: cached
url:
- http://192.168.137.222/download/harbor-offline-installer-v1.5.0.tgz
@@ -24,17 +24,17 @@ packages:
- name: compass-deck
description: "RESTful API and DB Handlers for Compass"
get_method: docker
- url: compass4nfv/compass-deck:7.0.0
+ url: opnfv/compass-deck:latest
- name: compass-tasks-osa
description: "compass task container integrated with openstack-ansible"
get_method: docker
- url: compass4nfv/compass-tasks-osa:7.0.0
+ url: opnfv/compass-tasks-osa:latest
- name: compass-tasks-k8s
description: "compass task container integrated with kubespray"
get_method: docker
- url: compass4nfv/compass-tasks-k8s:7.0.0
+ url: huxinhui/compass-tasks-k8s:7.0.1
- name: compass-cobbler
description: "cobbler container for compass"
@@ -54,7 +54,7 @@ packages:
- name: yardstick
description: "yardstick container for compass"
get_method: docker
- url: opnfv/yardstick:latest
+ url: opnfv/yardstick:stable
- name: compass-repo-osa-ubuntu
description: "compass repo container for deployment"
diff --git a/build/parser.py b/build/parser.py
index b0a87f93..971d2844 100644
--- a/build/parser.py
+++ b/build/parser.py
@@ -18,6 +18,14 @@ def load_env():
return cache_dir
+def exec_command(cmd, ignore_error=False):
+ rc = os.system(cmd)
+ if not ignore_error and rc != 0:
+ sys.exit(1)
+ else:
+ return rc
+
+
def get_from_cache(cache, package):
filename = package.get("name")
remotefile = list(package.get("url"))
@@ -25,13 +33,13 @@ def get_from_cache(cache, package):
localmd5file = localfile + ".md5"
print "removing local md5 file...."
cmd = "rm -f " + localmd5file
- os.system(cmd)
+ exec_command(cmd)
print "downloading remote md5 file to local...."
for file in remotefile:
remotemd5file = file + ".md5"
cmd = "curl --connect-timeout 10 -o {0} {1}".format(
localmd5file, remotemd5file)
- rc = os.system(cmd)
+ rc = exec_command(cmd, True)
if os.path.exists(localfile):
print "calculate md5sum of local file"
cmd = "md5sum " + localfile + "|cut -d ' ' -f 1"
@@ -45,43 +53,38 @@ def get_from_cache(cache, package):
if rc == 0:
break
print "downloading remote file to local...."
- cmd = "aria2c --max-connection-per-server=4 --allow-overwrite=true --dir={0} \
- --out={1} {2}".format(cache, filename, " ".join(remotefile))
+ cmd = "aria2c --max-tries 1 --max-connection-per-server=4 \
+ --allow-overwrite=true --dir={0} --out={1} {2}".format(
+ cache, filename, " ".join(remotefile))
print cmd
- rc = os.system(cmd)
- if rc != 0:
- sys.exit(1)
+ exec_command(cmd)
def get_from_git(cache, package):
localfile = cache + "/" + package.get("name")
cmd = "rm -rf " + localfile
print cmd
- os.system(cmd)
+ exec_command(cmd)
cmd = "git clone " + package.get("url") + " " + localfile
print cmd
- rc = os.system(cmd)
- if rc != 0:
- sys.exit(1)
+ exec_command(cmd)
def get_from_docker(cache, package):
+ package_ouput = cache+"/"+package.get("name")+".tar"
cmd = "sudo docker pull "+package.get("url")
- os.system(cmd)
- cmd = "sudo docker save "+package.get("url")+" -o "+cache+"/"
- cmd += package.get("name")+".tar"
- rc = os.system(cmd)
- if rc != 0:
- sys.exit(1)
+ exec_command(cmd)
+ cmd = "sudo docker save "+package.get("url")+" -o "+package_ouput
+ exec_command(cmd)
+ cmd = "user=$(whoami); sudo chown -R $user:$user "+package_ouput
+ exec_command(cmd)
def get_from_curl(cache, package):
cmd = "curl --connect-timeout 10 -o " + cache + "/"
cmd += package.get("name") + " " + package.get("url")
print cmd
- rc = os.system(cmd)
- if rc != 0:
- sys.exit(1)
+ exec_command(cmd)
def usage():
diff --git a/deploy/adapters/ansible/kubernetes/roles/kargo/files/extra-vars-aarch64.yml b/deploy/adapters/ansible/kubernetes/roles/kargo/files/extra-vars-aarch64.yml
index ae3dce76..ae3dce76 100644..100755
--- a/deploy/adapters/ansible/kubernetes/roles/kargo/files/extra-vars-aarch64.yml
+++ b/deploy/adapters/ansible/kubernetes/roles/kargo/files/extra-vars-aarch64.yml
diff --git a/deploy/adapters/ansible/kubernetes/roles/kargo/files/extra-vars.yml b/deploy/adapters/ansible/kubernetes/roles/kargo/files/extra-vars.yml
deleted file mode 100644
index e13e33ca..00000000
--- a/deploy/adapters/ansible/kubernetes/roles/kargo/files/extra-vars.yml
+++ /dev/null
@@ -1,7 +0,0 @@
----
-# Override default kubespray variables
-
-# Just a placeholder to satisfy ansible
-dummy_var: 0
-
-# helm_enabled: true
diff --git a/deploy/adapters/ansible/kubernetes/roles/kargo/files/generate_inventories.py b/deploy/adapters/ansible/kubernetes/roles/kargo/files/generate_inventories.py
deleted file mode 100755
index 8f836011..00000000
--- a/deploy/adapters/ansible/kubernetes/roles/kargo/files/generate_inventories.py
+++ /dev/null
@@ -1,95 +0,0 @@
-##############################################################################
-# Copyright (c) 2016-2018 compass4nfv and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-
-import yaml
-import sys
-import os
-from jinja2 import Environment
-try:
- import json
-except ImportError:
- import simplejson as json
-
-INVENTORY_TEMPLATE = """
-[all]
-{% for host, vales in hostvars.iteritems() %}
-{{ host }} ansible_ssh_host={{ vales['ansible_ssh_host'] }} \
-ansible_ssh_pass=root ansible_user=root
-{% endfor %}
-[kube-master]
-{% for host in kube_master %}
-{{ host }}
-{% endfor %}
-
-[etcd]
-{% for host in etcd %}
-{{ host }}
-{% endfor %}
-
-[kube-node]
-{% for host in kube_node %}
-{{ host }}
-{% endfor %}
-
-[k8s-cluster:children]
-kube-node
-kube-master
-
-[calico-rr]
-[vault]
-"""
-
-
-def _byteify(data, ignore_dicts=False):
-
- if isinstance(data, unicode):
- return data.encode('utf-8')
- if isinstance(data, list):
- return [_byteify(item, ignore_dicts=True) for item in data]
- if isinstance(data, dict) and not ignore_dicts:
- return {
- _byteify(key, ignore_dicts=True):
- _byteify(value, ignore_dicts=True)
- for key, value in data.iteritems()
- }
- return data
-
-
-def load_inventory(inventory):
- if not os.path.exists(inventory):
- raise RuntimeError('file: %s not exist' % inventory)
- with open(inventory, 'r') as fd:
- return json.load(fd, object_hook=_byteify)
-
-
-def create_inventory_file(inventories_path,
- hostvars, kube_master, etcd, kube_node):
- content = Environment().from_string(INVENTORY_TEMPLATE).render(
- hostvars=hostvars, kube_master=kube_master,
- etcd=etcd, kube_node=kube_node)
- with open(inventories_path, 'w+') as f:
- f.write(content)
-
-
-def main(inventories_path, local_inventory):
- inventory_data = load_inventory(local_inventory)
- hostvars = inventory_data['_meta']['hostvars']
- kube_node = inventory_data['kube_node']['hosts']
- kube_master = inventory_data['kube_master']['hosts']
- etcd = inventory_data['etcd']['hosts']
-
- create_inventory_file(inventories_path,
- hostvars, kube_master, etcd, kube_node)
-
-
-if __name__ == "__main__":
- path = yaml.load(sys.argv[1])
- local_inventory = yaml.load(sys.argv[2])
-
- main(path, local_inventory)
diff --git a/deploy/adapters/ansible/kubernetes/roles/kargo/files/mirrors.repo b/deploy/adapters/ansible/kubernetes/roles/kargo/files/mirrors.repo
deleted file mode 100644
index 4900db69..00000000
--- a/deploy/adapters/ansible/kubernetes/roles/kargo/files/mirrors.repo
+++ /dev/null
@@ -1,32 +0,0 @@
-[base]
-name=CentOS-$releasever - Base
-mirrorlist=http://mirrorlist.centos.org/?release=$releasever&arch=$basearch&repo=os&infra=$infra
-#baseurl=http://mirror.centos.org/centos/$releasever/os/$basearch/
-gpgcheck=1
-gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-7
-
-#released updates
-[updates]
-name=CentOS-$releasever - Updates
-mirrorlist=http://mirrorlist.centos.org/?release=$releasever&arch=$basearch&repo=updates&infra=$infra
-#baseurl=http://mirror.centos.org/centos/$releasever/updates/$basearch/
-gpgcheck=1
-gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-7
-
-#additional packages that may be useful
-[extras]
-name=CentOS-$releasever - Extras
-mirrorlist=http://mirrorlist.centos.org/?release=$releasever&arch=$basearch&repo=extras&infra=$infra
-#baseurl=http://mirror.centos.org/centos/$releasever/extras/$basearch/
-gpgcheck=1
-gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-7
-
-#additional packages that extend functionality of existing packages
-[centosplus]
-name=CentOS-$releasever - Plus
-mirrorlist=http://mirrorlist.centos.org/?release=$releasever&arch=$basearch&repo=centosplus&infra=$infra
-#baseurl=http://mirror.centos.org/centos/$releasever/centosplus/$basearch/
-gpgcheck=1
-enabled=0
-gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-7
-
diff --git a/deploy/adapters/ansible/kubernetes/roles/kargo/files/mirrors_aarch64.repo b/deploy/adapters/ansible/kubernetes/roles/kargo/files/mirrors_aarch64.repo
index 1d622d3c..1d622d3c 100644..100755
--- a/deploy/adapters/ansible/kubernetes/roles/kargo/files/mirrors_aarch64.repo
+++ b/deploy/adapters/ansible/kubernetes/roles/kargo/files/mirrors_aarch64.repo
diff --git a/deploy/adapters/ansible/kubernetes/roles/kargo/files/openssl.conf.j2 b/deploy/adapters/ansible/kubernetes/roles/kargo/files/openssl.conf.j2
deleted file mode 100644
index d998d4cb..00000000
--- a/deploy/adapters/ansible/kubernetes/roles/kargo/files/openssl.conf.j2
+++ /dev/null
@@ -1,34 +0,0 @@
-[req]
-req_extensions = v3_req
-distinguished_name = req_distinguished_name
-[req_distinguished_name]
-[ v3_req ]
-basicConstraints = CA:FALSE
-keyUsage = nonRepudiation, digitalSignature, keyEncipherment
-subjectAltName = @alt_names
-[alt_names]
-DNS.1 = kubernetes
-DNS.2 = kubernetes.default
-DNS.3 = kubernetes.default.svc
-DNS.4 = kubernetes.default.svc.{{ dns_domain }}
-DNS.5 = localhost
-{% for host in groups['kube-master'] %}
-DNS.{{ 5 + loop.index }} = {{ host }}
-{% endfor %}
-{% if loadbalancer_apiserver is defined and apiserver_loadbalancer_domain_name is defined %}
-{% set idx = groups['kube-master'] | length | int + 5 + 1 %}
-DNS.{{ idx | string }} = {{ apiserver_loadbalancer_domain_name }}
-{% endif %}
-{% for host in groups['kube-master'] %}
-IP.{{ 2 * loop.index - 1 }} = {{ hostvars[host]['access_ip'] | default(hostvars[host]['ansible_default_ipv4']['address']) }}
-IP.{{ 2 * loop.index }} = {{ hostvars[host]['ip'] | default(hostvars[host]['ansible_default_ipv4']['address']) }}
-{% endfor %}
-{% set idx = groups['kube-master'] | length | int * 2 + 1 %}
-IP.{{ idx }} = {{ kube_apiserver_ip }}
-IP.{{ idx + 1 }} = 127.0.0.1
-{% if supplementary_addresses_in_ssl_keys is defined %}
-{% set is = idx + 1 %}
-{% for addr in supplementary_addresses_in_ssl_keys %}
-IP.{{ is + loop.index }} = {{ addr }}
-{% endfor %}
-{% endif %}
diff --git a/deploy/adapters/ansible/kubernetes/roles/kargo/tasks/main.yml b/deploy/adapters/ansible/kubernetes/roles/kargo/tasks/main.yml
index 338f2574..512121e2 100644..100755
--- a/deploy/adapters/ansible/kubernetes/roles/kargo/tasks/main.yml
+++ b/deploy/adapters/ansible/kubernetes/roles/kargo/tasks/main.yml
@@ -7,166 +7,39 @@
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
---
-- name: clean local repo conf
- file:
- path: /etc/yum.repos.d
- state: absent
- run_once: "True"
- when: ansible_os_family == 'RedHat'
-
-- name: create local repo conf dir
- file:
- path: /etc/yum.repos.d
- state: directory
- run_once: "True"
- when: ansible_os_family == 'RedHat'
-
-- name: configure local mirror repo
- copy:
- src: "{{ item }}"
- dest: /etc/yum.repos.d/mirrors.repo
- with_first_found:
- - mirrors_{{ ansible_architecture }}.repo
- - mirrors.repo
- run_once: "True"
- when: ansible_os_family == 'RedHat'
-
-- name: clean local pip conf to use official pip repo
- file:
- path: /root/.pip/pip.conf
- state: absent
- run_once: "True"
-
-- name: install dependency for ansible update
- yum:
- name: "{{ item }}"
- state: latest
- with_items:
- - git
- - libffi-devel
- - openssl-devel
- - python-devel
- run_once: "True"
- when: ansible_os_family == 'RedHat'
-
-- name: update python packages
- pip:
- name: "{{ item }}"
- state: latest
- with_items:
- - netaddr
- - jinja2
-
-- name: copy inventories generate script
- copy:
- src: generate_inventories.py
- dest: /tmp/generate_inventories.py
+- name: check the kubespray sample path
+ stat: path=/opt/kargo_k8s/inventory/sample
+ register: sample_stat
+
+- name: Move kubespray group_vars folder
+ command: mv /opt/kargo_k8s/inventory/sample/group_vars /opt/kargo_k8s/inventory/
+ when: sample_stat.stat.exists
+
+- name: generate kubespray inventory configure file
+ template:
+ src: "inventory.j2"
+ dest: "/opt/kargo_k8s/inventory/inventory.cfg"
tags:
- ansible
-- name: copy inventoriy.json file
- copy:
- src: "{{ run_dir }}/inventories/inventory.json"
- dest: /tmp/inventory.json
- tags:
- - ansible
-
-- name: generate kargo inventories
- shell: >
- python /tmp/generate_inventories.py \
- "/opt/kargo_k8s/inventory/inventory.cfg" \
- "/tmp/inventory.json"
- tags:
- - ansible
-
-- name: configure target hosts
- shell: |
- cd /opt/kargo_k8s
- ansible -i inventory/inventory.cfg -m ping all
- ansible -i inventory/inventory.cfg all -m shell -a "rm /etc/yum.repos.d/*"
- ansible -i inventory/inventory.cfg all -m copy -a \
- "src=/etc/yum.repos.d/mirrors.repo dest=/etc/yum.repos.d"
- tags:
- - ansible
-
-- name: enable helm
- lineinfile:
- dest: /opt/kargo_k8s/inventory/group_vars/k8s-cluster.yml
- regexp: '^helm_enabled:'
- line: 'helm_enabled: {{ helm_flag }}'
-
-- name: enable external lb | set lb domain_nam
- lineinfile:
- dest: /opt/kargo_k8s/inventory/group_vars/all.yml
- regexp: '^## apiserver_loadbalancer_domain_name:'
- line: 'apiserver_loadbalancer_domain_name: {{ apiserver_loadbalancer_domain_name }}'
-
-- name: enable external lb |
- lineinfile:
- dest: /opt/kargo_k8s/inventory/group_vars/all.yml
- regexp: '^#loadbalancer_apiserver:'
- line: 'loadbalancer_apiserver:'
-
-- name: enable external lb | set vip address
- lineinfile:
- dest: /opt/kargo_k8s/inventory/group_vars/all.yml
- regexp: '^# address: 1.2.3.4'
- line: ' address: {{ vipaddress }}'
-
-- name: enable external lb | set vip port
- lineinfile:
- dest: /opt/kargo_k8s/inventory/group_vars/all.yml
- regexp: '^# port: 1234'
- line: ' port: {{ exlb_port }}'
-
-- name: enable internal lb
- lineinfile:
- dest: /opt/kargo_k8s/inventory/group_vars/all.yml
- regexp: '^#loadbalancer_apiserver_localhost: true'
- line: 'loadbalancer_apiserver_localhost: true'
-
-- name: enable http proxy
- lineinfile:
- dest: /opt/kargo_k8s/inventory/group_vars/all.yml
- regexp: '^#http_proxy:'
- line: 'http_proxy: {{ http_proxy }}'
- when: http_proxy != ''
-
-- name: enable https proxy
- lineinfile:
- dest: /opt/kargo_k8s/inventory/group_vars/all.yml
- regexp: '^#https_proxy:'
- line: 'https_proxy: {{ https_proxy }}'
- when: https_proxy !=''
-
-- name: use the user name and password login the dashboard
- lineinfile:
- dest: /opt/kargo_k8s/inventory/group_vars/k8s-cluster.yml
- regexp: '^#kube_basic_auth: false'
- line: 'kube_basic_auth: true'
-
-- name: add vip to ssl keys
- lineinfile:
- dest: /opt/kargo_k8s/inventory/group_vars/k8s-cluster.yml
- line: 'supplementary_addresses_in_ssl_keys: [{{ vipaddress }}]'
-
-- name: rm openssl file
- file:
- path: /opt/kargo_k8s/roles/kubernetes/secrets/templates/openssl.conf.j2
- state: absent
-
-- name: copy openssl.conf.j2
- copy:
- src: openssl.conf.j2
- dest: /opt/kargo_k8s/roles/kubernetes/secrets/templates/openssl.conf.j2
-
-- name: copy overrided variables
+- name: copy overrided variables for arm architecture
copy:
src: "{{ item }}"
dest: /opt/kargo_k8s/extra-vars.yml
with_first_found:
- extra-vars-{{ ansible_architecture }}.yml
- extra-vars.yml
+ - skip: true
+
+- name: copy overrided variables for kubespray
+ template:
+ src: "{{ item }}"
+ dest: "/opt/kargo_k8s/extra-vars.yml"
+ with_first_found:
+ - extra-vars-{{ ansible_architecture }}.yml.j2
+ - extra-vars.yml.j2
+ tags:
+ - ansible
- name: copy 2flannel playbook to kargo
copy:
@@ -277,9 +150,10 @@ kube-controller-manager.manifest.j2",
- name: run kargo playbook
shell: |
- cd /opt/kargo_k8s
ansible-playbook -i inventory/inventory.cfg cluster.yml \
-e "@extra-vars.yml" -b -v 2>&1 | tee kargo.log
+ args:
+ chdir: "/opt/kargo_k8s"
tags:
- ansible
diff --git a/deploy/adapters/ansible/kubernetes/roles/kargo/templates/extra-vars.yml.j2 b/deploy/adapters/ansible/kubernetes/roles/kargo/templates/extra-vars.yml.j2
new file mode 100755
index 00000000..1d7a2fa2
--- /dev/null
+++ b/deploy/adapters/ansible/kubernetes/roles/kargo/templates/extra-vars.yml.j2
@@ -0,0 +1,40 @@
+---
+# Override default kubespray variables
+
+#dashboard_port: "{{dashboard_port|default('31746')}}"
+
+# kubespray configure
+apiserver_loadbalancer_domain_name: "{{ public_vip.ip }}"
+loadbalancer_apiserver:
+ address: "{{ public_vip.ip }}"
+ port: {{ loadbalancer_apiserver_port|default(8383) }}
+loadbalancer_apiserver_localhost: {{ loadbalancer_apiserver_localhost|default(true) }}
+
+kube_basic_auth: {{ kube_basic_auth |default(true) }}
+kube_network_plugin: {{ kube_network_plugin|default('calico') }}
+# Monitoring apps for k8s
+efk_enabled: {{ efk_enabled |default(true)}}
+# Helm deployment
+helm_enabled: {{ helm_enabled |default(true)}}
+# Istio deployment
+istio_enabled: {{ istio_enabled |default(false)}}
+supplementary_addresses_in_ssl_keys: ["{{ public_vip.ip }}"]
+#storage
+local_volume_provisioner_enabled: {{local_volume_provisioner_enabled |default(false) }}
+# local_volume_provisioner_namespace: "system_namespace"
+# local_volume_provisioner_base_dir: /mnt/disks
+# local_volume_provisioner_mount_dir: /mnt/disks
+# local_volume_provisioner_storage_class: local-storage
+
+# CephFS provisioner deployment
+cephfs_provisioner_enabled: {{ cephfs_provisioner_enabled |default(false)}}
+# cephfs_provisioner_namespace: "cephfs-provisioner"
+# cephfs_provisioner_cluster: ceph
+# cephfs_provisioner_monitors: "172.24.0.1:6789,172.24.0.2:6789,172.24.0.3:6789"
+# cephfs_provisioner_admin_id: admin
+# cephfs_provisioner_secret: secret
+# cephfs_provisioner_storage_class: cephfs
+# cephfs_provisioner_reclaim_policy: Delete
+# cephfs_provisioner_claim_root: /volumes
+# cephfs_provisioner_deterministic_names: true
+
diff --git a/deploy/adapters/ansible/kubernetes/roles/kargo/templates/inventory.j2 b/deploy/adapters/ansible/kubernetes/roles/kargo/templates/inventory.j2
new file mode 100644
index 00000000..0120ae18
--- /dev/null
+++ b/deploy/adapters/ansible/kubernetes/roles/kargo/templates/inventory.j2
@@ -0,0 +1,26 @@
+[all]
+{% for host, vales in hostvars.iteritems() %}
+{{ host }} ansible_ssh_host={{ vales['ansible_ssh_host'] }} ansible_ssh_pass=root ansible_user=root
+{% endfor %}
+
+[kube-master]
+{% for host in hostvars[inventory_hostname]['groups']['kube_master'] %}
+{{ host }}
+{% endfor %}
+
+[etcd]
+{% for host in hostvars[inventory_hostname]['groups']['etcd'] %}
+{{ host }}
+{% endfor %}
+
+[kube-node]
+{% for host in hostvars[inventory_hostname]['groups']['kube_node'] %}
+{{ host }}
+{% endfor %}
+
+[k8s-cluster:children]
+kube-node
+kube-master
+
+[calico-rr]
+[vault]
diff --git a/deploy/adapters/ansible/kubernetes/roles/kargo/vars/main.yml b/deploy/adapters/ansible/kubernetes/roles/kargo/vars/main.yml
index 80490955..af9c9675 100644..100755
--- a/deploy/adapters/ansible/kubernetes/roles/kargo/vars/main.yml
+++ b/deploy/adapters/ansible/kubernetes/roles/kargo/vars/main.yml
@@ -1,9 +1,3 @@
---
-helm_flag: true
-apiserver_loadbalancer_domain_name: "{{ public_vip.ip }}"
-vipaddress: "{{ public_vip.ip }}"
-exlb_port: 8383
-kubelet_fail_swap_on: false
-
http_proxy: "{{ proxy }}"
https_proxy: "{{ proxy }}"
diff --git a/deploy/adapters/ansible/kubernetes/roles/pre-k8s/tasks/main.yml b/deploy/adapters/ansible/kubernetes/roles/pre-k8s/tasks/main.yml
index c915ec09..844d76a3 100644
--- a/deploy/adapters/ansible/kubernetes/roles/pre-k8s/tasks/main.yml
+++ b/deploy/adapters/ansible/kubernetes/roles/pre-k8s/tasks/main.yml
@@ -15,4 +15,6 @@
- name: close the swap partition
shell: |
+ systemctl disable swap.target
+ systemctl mask swap.target
swapoff -a
diff --git a/deploy/adapters/ansible/openstack/HA-ansible-multinodes.yml b/deploy/adapters/ansible/openstack/HA-ansible-multinodes.yml
index 874fcfb5..152a7dc0 100644
--- a/deploy/adapters/ansible/openstack/HA-ansible-multinodes.yml
+++ b/deploy/adapters/ansible/openstack/HA-ansible-multinodes.yml
@@ -107,6 +107,7 @@
- utility_all[0]
- network_hosts[0]
- horizon
+ - compute
remote_user: root
roles:
- post-openstack
diff --git a/deploy/adapters/ansible/roles/config-osa/tasks/main.yml b/deploy/adapters/ansible/roles/config-osa/tasks/main.yml
index 6bb60f92..ab2714a9 100755
--- a/deploy/adapters/ansible/roles/config-osa/tasks/main.yml
+++ b/deploy/adapters/ansible/roles/config-osa/tasks/main.yml
@@ -363,6 +363,12 @@
insertafter: '^ cache_prep_commands:'
line: ' rm /etc/resolv.conf || true'
+- name: fix apt prompt issue
+ lineinfile:
+ dest: /etc/ansible/roles/lxc_hosts/vars/ubuntu-16.04.yml
+ state: absent
+ regexp: "apt-get upgrade -y"
+
- name: set pre-staged retry to 120
replace:
dest: /etc/ansible/roles/lxc_hosts/tasks/lxc_cache_preparation_systemd_new.yml
@@ -422,3 +428,9 @@
when:
- checkresult.rc == 0
- offline_deployment is defined and offline_deployment == "Disable"
+
+- name: fix keepalived
+ lineinfile:
+ dest: /opt/openstack-ansible/inventory/group_vars/haproxy/keepalived.yml
+ regexp: 'check_script: "/bin/kill -0 `cat /var/run/haproxy.pid`"'
+ line: ' check_script: "/bin/kill -0 `cat /var/run/haproxy.pid` || true"'
diff --git a/deploy/adapters/ansible/roles/post-openstack/files/manager.py.patch b/deploy/adapters/ansible/roles/post-openstack/files/manager.py.patch
new file mode 100644
index 00000000..198ff5be
--- /dev/null
+++ b/deploy/adapters/ansible/roles/post-openstack/files/manager.py.patch
@@ -0,0 +1,12 @@
+--- manager.py 2018-11-07 03:51:22.764685289 -0800
++++ manager.py.new 2018-11-07 03:58:21.014139558 -0800
+@@ -314,8 +314,7 @@
+ if self._events is None:
+ # NOTE(danms): We really should have a more specific error
+ # here, but this is what we use for our default error case
+- raise exception.NovaException('In shutdown, no new events '
+- 'can be scheduled')
++ self._events = {}
+
+ @utils.synchronized(self._lock_name(instance))
+ def _create_or_get_event():
diff --git a/deploy/adapters/ansible/roles/post-openstack/tasks/main.yml b/deploy/adapters/ansible/roles/post-openstack/tasks/main.yml
index 0bd9aeff..2a63acf0 100644
--- a/deploy/adapters/ansible/roles/post-openstack/tasks/main.yml
+++ b/deploy/adapters/ansible/roles/post-openstack/tasks/main.yml
@@ -86,3 +86,7 @@
state: restarted
when:
- inventory_hostname in groups['horizon']
+
+- include: nova_patch.yml
+ when:
+ - inventory_hostname in groups['compute']
diff --git a/deploy/adapters/ansible/roles/post-openstack/tasks/nova_patch.yml b/deploy/adapters/ansible/roles/post-openstack/tasks/nova_patch.yml
new file mode 100644
index 00000000..d9cfad9c
--- /dev/null
+++ b/deploy/adapters/ansible/roles/post-openstack/tasks/nova_patch.yml
@@ -0,0 +1,23 @@
+##############################################################################
+# Copyright (c) 2016-2018 HUAWEI TECHNOLOGIES CO.,LTD and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+- name: copy manager.py.patch
+ copy:
+ src: manager.py.patch
+ dest: /openstack/venvs/nova-{{ openstack_release }}/lib/python2.7/site-packages/nova/compute
+
+- name: patch manager.py.patch
+ shell:
+ patch -p0 < manager.py.patch
+ args:
+ chdir: /openstack/venvs/nova-{{ openstack_release }}/lib/python2.7/site-packages/nova/compute
+
+- name: restart nova-compute
+ shell:
+ systemctl restart nova-compute
diff --git a/deploy/adapters/ansible/roles/setup-openvswitch/tasks/main.yml b/deploy/adapters/ansible/roles/setup-openvswitch/tasks/main.yml
index 2deecb3b..0ad47d3c 100644
--- a/deploy/adapters/ansible/roles/setup-openvswitch/tasks/main.yml
+++ b/deploy/adapters/ansible/roles/setup-openvswitch/tasks/main.yml
@@ -19,8 +19,10 @@
- include_vars: "{{ ansible_os_family }}.yml"
when:
+ - odl_sfc is not defined or odl_sfc != "Enable"
- NEUTRON_MECHANISM_DRIVERS[0] == "opendaylight"
- include: odl.yml
when:
+ - odl_sfc is not defined or odl_sfc != "Enable"
- NEUTRON_MECHANISM_DRIVERS[0] == "opendaylight"
diff --git a/deploy/adapters/ansible/roles/setup-openvswitch/templates/controller.j2 b/deploy/adapters/ansible/roles/setup-openvswitch/templates/controller.j2
index 315e9fb9..bdc4d447 100755
--- a/deploy/adapters/ansible/roles/setup-openvswitch/templates/controller.j2
+++ b/deploy/adapters/ansible/roles/setup-openvswitch/templates/controller.j2
@@ -86,3 +86,5 @@ iface br-storage inet static
bridge_ports {{ intf_storage }}
address {{ ip_settings[inventory_hostname]["storage"]["ip"] }}
netmask 255.255.255.0
+
+source /etc/network/interfaces.d/*.cfg
diff --git a/deploy/client.py b/deploy/client.py
index e612160b..3a9b0dec 100644
--- a/deploy/client.py
+++ b/deploy/client.py
@@ -79,8 +79,8 @@ opts = [
cfg.BoolOpt('poll_switches',
help='if the client polls switches',
default=True),
- cfg.StrOpt('machines',
- help='comma separated mac addresses of machines',
+ cfg.StrOpt('machine_file',
+ help='mac addresses and ipmi info of machines',
default=''),
cfg.StrOpt('subnets',
help='comma seperated subnets',
@@ -327,12 +327,12 @@ class CompassClient(object):
'get all machines status: %s, resp: %s', status, resp)
raise RuntimeError('failed to get machines')
- machines_to_add = list(set([
- machine for machine in CONF.machines.split(',')
- if machine
- ]))
+ with open(CONF.machine_file) as fd:
+ machines_to_add = [str(m["mac"]) for m in yaml.load(fd)]
+ resp = byteify(resp)
machines_db = [str(m["mac"]) for m in resp]
+
LOG.info(
'machines in db: %s\n to add: %s',
machines_db,
@@ -983,8 +983,6 @@ class CompassClient(object):
ansible_log = "%s/work/deploy/docker/ansible/run/%s-%s/ansible.log" \
% (compass_dir, CONF.adapter_name, CONF.cluster_name)
os.system("sudo touch %s" % ansible_log)
- os.system("sudo chmod +x -R %s/work/deploy/docker/ansible/run/"
- % compass_dir)
ansible_print = multiprocessing.Process(target=print_log,
args=(ansible_log,))
ansible_print.start()
diff --git a/deploy/compass_conf/templates/cobbler/ubuntu-16.04.3-server-x86_64/system.tmpl b/deploy/compass_conf/templates/cobbler/ubuntu-16.04.3-server-x86_64/system.tmpl
index cfcc883e..366cbc13 100755
--- a/deploy/compass_conf/templates/cobbler/ubuntu-16.04.3-server-x86_64/system.tmpl
+++ b/deploy/compass_conf/templates/cobbler/ubuntu-16.04.3-server-x86_64/system.tmpl
@@ -11,36 +11,38 @@
"proxy": "$getVar('http_proxy', '')",
"modify_interface":
#set networks = $networks
+ #set mac = $mac
#set rekeyed = {}
#set promic_nic = ""
- #for $nic, $val in $networks.iteritems():
- #set ip_key = '-'.join(('ipaddress', $nic))
- #set netmask_key = '-'.join(('netmask', $nic))
- #set mgmt_key = '-'.join(('management', $nic))
- #set static_key = '-'.join(('static', $nic))
- #set $rekeyed[$ip_key] = $val.ip
- #set $rekeyed[$netmask_key] = $val.netmask
- #set $rekeyed[$static_key] = True
+ #for $nic in $mac
+ #set mac_key = '-'.join(('macaddress', $nic))
+ #set $rekeyed[$mac_key] = $mac[$nic]
+ #if $nic in $networks
+ #set val = $networks[$nic]
+ #set ip_key = '-'.join(('ipaddress', $nic))
+ #set netmask_key = '-'.join(('netmask', $nic))
+ #set mgmt_key = '-'.join(('management', $nic))
+ #set static_key = '-'.join(('static', $nic))
+ #set $rekeyed[$ip_key] = $val.ip
+ #set $rekeyed[$netmask_key] = $val.netmask
+ #set $rekeyed[$static_key] = True
- #set dns_key = '-'.join(('dnsname', $nic))
- #if $val.is_mgmt
- #set $rekeyed[$dns_key] = $dns
- #else
- #if '.' in $dns
- #set $dns_name, $dns_domain = $dns.split('.', 1)
- #set $dns_nic = '%s-%s.%s' % ($dns_name, $nic, $dns_domain)
+ #set dns_key = '-'.join(('dnsname', $nic))
+ #if $val.is_mgmt
+ #set $rekeyed[$dns_key] = $dns
#else
- #set $dns_nic = '%s-%s' % ($dns, $nic)
+ #if '.' in $dns
+ #set $dns_name, $dns_domain = $dns.split('.', 1)
+ #set $dns_nic = '%s-%s.%s' % ($dns_name, $nic, $dns_domain)
+ #else
+ #set $dns_nic = '%s-%s' % ($dns, $nic)
+ #end if
+ #set $rekeyed[$dns_key] = $dns_nic
#end if
- #set $rekeyed[$dns_key] = $dns_nic
- #end if
- #if $val.is_promiscuous:
- #set promic_nic = $nic
- #end if
- #if $val.is_mgmt:
- #set mac_key = '-'.join(('macaddress', $nic))
- #set $rekeyed[$mac_key] = $mac
+ #if $val.is_promiscuous
+ #set promic_nic = $nic
+ #end if
#end if
#end for
#set nic_info = json.dumps($rekeyed, encoding='utf-8')
@@ -71,5 +73,9 @@
"ignore_proxy": "$no_proxy",
"local_repo": "$getVar('local_repo', '')",
"disk_num": "1"
- }
+ },
+ "power_address": "$power_manage.ip",
+ "power_user": "$power_manage.username",
+ "power_pass": "$power_manage.password",
+ "power_type": "$getVar('power_type', 'ipmilan')"
}
diff --git a/deploy/compass_vm.sh b/deploy/compass_vm.sh
index f7bc378e..edce626b 100755
--- a/deploy/compass_vm.sh
+++ b/deploy/compass_vm.sh
@@ -12,15 +12,15 @@ rsa_file=$compass_vm_dir/boot.rsa
ssh_args="-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i $rsa_file"
function check_container_alive() {
- docker exec -it compass-deck bash -c "exit" 1>/dev/null 2>&1
+ sudo docker exec -it compass-deck bash -c "exit"
local deck_state=$?
- docker exec -it compass-tasks bash -c "exit" 1>/dev/null 2>&1
+ sudo docker exec -it compass-tasks bash -c "exit"
local tasks_state=$?
- docker exec -it compass-cobbler bash -c "exit" 1>/dev/null 2>&1
+ sudo docker exec -it compass-cobbler bash -c "exit"
local cobbler_state=$?
- docker exec -it compass-db bash -c "exit" 1>/dev/null 2>&1
+ sudo docker exec -it compass-db bash -c "exit"
local db_state=$?
- docker exec -it compass-mq bash -c "exit" 1>/dev/null 2>&1
+ sudo docker exec -it compass-mq bash -c "exit"
local mq_state=$?
if [ $((deck_state||tasks_state||cobbler_state||db_state||mq_state)) == 0 ]; then
@@ -47,9 +47,7 @@ function install_compass_core() {
}
function set_compass_machine() {
- local config_file=$WORK_DIR/installer/docker-compose/group_vars/all
- sed -i '/pxe_boot_macs/d' $config_file
- echo "pxe_boot_macs: [${machines}]" >> $config_file
+ cp $WORK_DIR/script/machine $WORK_DIR/docker/
ansible-playbook $WORK_DIR/installer/docker-compose/add_machine.yml
}
diff --git a/deploy/conf/hardware_environment/huawei-pod1/k8-nosdn-nofeature-ha.yml b/deploy/conf/hardware_environment/huawei-pod1/k8-nosdn-nofeature-ha.yml
index 2cedcf4d..29ca9ac0 100644
--- a/deploy/conf/hardware_environment/huawei-pod1/k8-nosdn-nofeature-ha.yml
+++ b/deploy/conf/hardware_environment/huawei-pod1/k8-nosdn-nofeature-ha.yml
@@ -31,7 +31,7 @@ hosts:
mac: 'D8:49:0B:DA:5A:B7'
interfaces:
- eth1: 'D8:49:0B:DA:5A:B8'
- ipmiIp: 172.16.130.27
+ ipmiIp: 172.16.130.29
ipmiPass: Opnfv@pod1
roles:
- kube_master
@@ -39,10 +39,10 @@ hosts:
- ha
- name: host3
- mac: '78:D7:52:A0:B1:99'
+ mac: '70:7B:E8:77:7B:60'
interfaces:
- - eth1: '78:D7:52:A0:B1:9A'
- ipmiIp: 172.16.130.29
+ - eth1: '70:7B:E8:77:7B:5F'
+ ipmiIp: 172.16.130.27
ipmiPass: Opnfv@pod1
roles:
- kube_master
diff --git a/deploy/conf/hardware_environment/huawei-pod1/k8-nosdn-stor4nfv-ha.yml b/deploy/conf/hardware_environment/huawei-pod1/k8-nosdn-stor4nfv-ha.yml
index 6cf62db7..6efdadc3 100644
--- a/deploy/conf/hardware_environment/huawei-pod1/k8-nosdn-stor4nfv-ha.yml
+++ b/deploy/conf/hardware_environment/huawei-pod1/k8-nosdn-stor4nfv-ha.yml
@@ -34,7 +34,7 @@ hosts:
mac: 'D8:49:0B:DA:5A:B7'
interfaces:
- eth1: 'D8:49:0B:DA:5A:B8'
- ipmiIp: 172.16.130.27
+ ipmiIp: 172.16.130.29
ipmiPass: Opnfv@pod1
roles:
- kube_master
@@ -42,10 +42,10 @@ hosts:
- ha
- name: host3
- mac: '78:D7:52:A0:B1:99'
+ mac: '70:7B:E8:77:7B:60'
interfaces:
- - eth1: '78:D7:52:A0:B1:9A'
- ipmiIp: 172.16.130.29
+ - eth1: '70:7B:E8:77:7B:5F'
+ ipmiIp: 172.16.130.27
ipmiPass: Opnfv@pod1
roles:
- kube_master
diff --git a/deploy/conf/hardware_environment/huawei-pod1/os-nosdn-bar-ha.yml b/deploy/conf/hardware_environment/huawei-pod1/os-nosdn-bar-ha.yml
index 59e7180a..32f561d5 100644
--- a/deploy/conf/hardware_environment/huawei-pod1/os-nosdn-bar-ha.yml
+++ b/deploy/conf/hardware_environment/huawei-pod1/os-nosdn-bar-ha.yml
@@ -35,7 +35,7 @@ hosts:
mac: 'D8:49:0B:DA:5A:B7'
interfaces:
- eth1: 'D8:49:0B:DA:5A:B8'
- ipmiIp: 172.16.130.27
+ ipmiIp: 172.16.130.29
ipmiPass: Opnfv@pod1
roles:
- controller
@@ -43,10 +43,10 @@ hosts:
- ceph-mon
- name: host3
- mac: '78:D7:52:A0:B1:99'
+ mac: '70:7B:E8:77:7B:60'
interfaces:
- - eth1: '78:D7:52:A0:B1:9A'
- ipmiIp: 172.16.130.29
+ - eth1: '70:7B:E8:77:7B:5F'
+ ipmiIp: 172.16.130.27
ipmiPass: Opnfv@pod1
roles:
- controller
diff --git a/deploy/conf/hardware_environment/huawei-pod1/os-nosdn-kvm-ha.yml b/deploy/conf/hardware_environment/huawei-pod1/os-nosdn-kvm-ha.yml
index 9c00916b..03267ffc 100644
--- a/deploy/conf/hardware_environment/huawei-pod1/os-nosdn-kvm-ha.yml
+++ b/deploy/conf/hardware_environment/huawei-pod1/os-nosdn-kvm-ha.yml
@@ -35,7 +35,7 @@ hosts:
mac: 'D8:49:0B:DA:5A:B7'
interfaces:
- eth1: 'D8:49:0B:DA:5A:B8'
- ipmiIp: 172.16.130.27
+ ipmiIp: 172.16.130.29
ipmiPass: Opnfv@pod1
roles:
- controller
@@ -43,10 +43,10 @@ hosts:
- ceph-mon
- name: host3
- mac: '78:D7:52:A0:B1:99'
+ mac: '70:7B:E8:77:7B:60'
interfaces:
- - eth1: '78:D7:52:A0:B1:9A'
- ipmiIp: 172.16.130.29
+ - eth1: '70:7B:E8:77:7B:5F'
+ ipmiIp: 172.16.130.27
ipmiPass: Opnfv@pod1
roles:
- controller
diff --git a/deploy/conf/hardware_environment/huawei-pod1/os-nosdn-nofeature-ha.yml b/deploy/conf/hardware_environment/huawei-pod1/os-nosdn-nofeature-ha.yml
index 6b329be6..7ca9e795 100644
--- a/deploy/conf/hardware_environment/huawei-pod1/os-nosdn-nofeature-ha.yml
+++ b/deploy/conf/hardware_environment/huawei-pod1/os-nosdn-nofeature-ha.yml
@@ -32,7 +32,7 @@ hosts:
mac: 'D8:49:0B:DA:5A:B7'
interfaces:
- eth1: 'D8:49:0B:DA:5A:B8'
- ipmiIp: 172.16.130.27
+ ipmiIp: 172.16.130.29
ipmiPass: Opnfv@pod1
roles:
- controller
@@ -40,10 +40,10 @@ hosts:
- ceph-mon
- name: host3
- mac: '78:D7:52:A0:B1:99'
+ mac: '70:7B:E8:77:7B:60'
interfaces:
- - eth1: '78:D7:52:A0:B1:9A'
- ipmiIp: 172.16.130.29
+ - eth1: '70:7B:E8:77:7B:5F'
+ ipmiIp: 172.16.130.27
ipmiPass: Opnfv@pod1
roles:
- controller
diff --git a/deploy/conf/hardware_environment/huawei-pod1/os-nosdn-openo-ha.yml b/deploy/conf/hardware_environment/huawei-pod1/os-nosdn-openo-ha.yml
index 6027bddb..b2440ee4 100644
--- a/deploy/conf/hardware_environment/huawei-pod1/os-nosdn-openo-ha.yml
+++ b/deploy/conf/hardware_environment/huawei-pod1/os-nosdn-openo-ha.yml
@@ -40,7 +40,7 @@ hosts:
mac: 'D8:49:0B:DA:5A:B7'
interfaces:
- eth1: 'D8:49:0B:DA:5A:B8'
- ipmiIp: 172.16.130.27
+ ipmiIp: 172.16.130.29
ipmiPass: Opnfv@pod1
roles:
- controller
@@ -48,10 +48,10 @@ hosts:
- ceph-mon
- name: host3
- mac: '78:D7:52:A0:B1:99'
+ mac: '70:7B:E8:77:7B:60'
interfaces:
- - eth1: '78:D7:52:A0:B1:9A'
- ipmiIp: 172.16.130.29
+ - eth1: '70:7B:E8:77:7B:5F'
+ ipmiIp: 172.16.130.27
ipmiPass: Opnfv@pod1
roles:
- controller
diff --git a/deploy/conf/hardware_environment/huawei-pod1/os-nosdn-stor4nfv-ha.yml b/deploy/conf/hardware_environment/huawei-pod1/os-nosdn-stor4nfv-ha.yml
index 5045975f..5ce9b47a 100644
--- a/deploy/conf/hardware_environment/huawei-pod1/os-nosdn-stor4nfv-ha.yml
+++ b/deploy/conf/hardware_environment/huawei-pod1/os-nosdn-stor4nfv-ha.yml
@@ -35,7 +35,7 @@ hosts:
mac: 'D8:49:0B:DA:5A:B7'
interfaces:
- eth1: 'D8:49:0B:DA:5A:B8'
- ipmiIp: 172.16.130.27
+ ipmiIp: 172.16.130.29
ipmiPass: Opnfv@pod1
roles:
- controller
@@ -43,10 +43,10 @@ hosts:
- ceph-mon
- name: host3
- mac: '78:D7:52:A0:B1:99'
+ mac: '70:7B:E8:77:7B:60'
interfaces:
- - eth1: '78:D7:52:A0:B1:9A'
- ipmiIp: 172.16.130.29
+ - eth1: '70:7B:E8:77:7B:5F'
+ ipmiIp: 172.16.130.27
ipmiPass: Opnfv@pod1
roles:
- controller
diff --git a/deploy/conf/hardware_environment/huawei-pod1/os-ocl-nofeature-ha.yml b/deploy/conf/hardware_environment/huawei-pod1/os-ocl-nofeature-ha.yml
index 287383b2..bfab0151 100644
--- a/deploy/conf/hardware_environment/huawei-pod1/os-ocl-nofeature-ha.yml
+++ b/deploy/conf/hardware_environment/huawei-pod1/os-ocl-nofeature-ha.yml
@@ -33,17 +33,17 @@ hosts:
mac: 'D8:49:0B:DA:5A:B7'
interfaces:
- eth1: 'D8:49:0B:DA:5A:B8'
- ipmiIp: 172.16.130.27
+ ipmiIp: 172.16.130.29
ipmiPass: Opnfv@pod1
roles:
- compute
- ceph-osd
- name: host3
- mac: '78:D7:52:A0:B1:99'
+ mac: '70:7B:E8:77:7B:60'
interfaces:
- - eth1: '78:D7:52:A0:B1:9A'
- ipmiIp: 172.16.130.29
+ - eth1: '70:7B:E8:77:7B:5F'
+ ipmiIp: 172.16.130.27
ipmiPass: Opnfv@pod1
roles:
- compute
diff --git a/deploy/conf/hardware_environment/huawei-pod1/os-odl-sfc-ha.yml b/deploy/conf/hardware_environment/huawei-pod1/os-odl-sfc-ha.yml
index 8095fe03..c5fd84ee 100644
--- a/deploy/conf/hardware_environment/huawei-pod1/os-odl-sfc-ha.yml
+++ b/deploy/conf/hardware_environment/huawei-pod1/os-odl-sfc-ha.yml
@@ -38,7 +38,7 @@ hosts:
mac: 'D8:49:0B:DA:5A:B7'
interfaces:
- eth1: 'D8:49:0B:DA:5A:B8'
- ipmiIp: 172.16.130.27
+ ipmiIp: 172.16.130.29
ipmiPass: Opnfv@pod1
roles:
- controller
@@ -47,10 +47,10 @@ hosts:
- ceph-mon
- name: host3
- mac: '78:D7:52:A0:B1:99'
+ mac: '70:7B:E8:77:7B:60'
interfaces:
- - eth1: '78:D7:52:A0:B1:9A'
- ipmiIp: 172.16.130.29
+ - eth1: '70:7B:E8:77:7B:5F'
+ ipmiIp: 172.16.130.27
ipmiPass: Opnfv@pod1
roles:
- controller
diff --git a/deploy/conf/hardware_environment/huawei-pod1/os-odl_l2-moon-ha.yml b/deploy/conf/hardware_environment/huawei-pod1/os-odl_l2-moon-ha.yml
index d2447c1d..71b114f2 100644
--- a/deploy/conf/hardware_environment/huawei-pod1/os-odl_l2-moon-ha.yml
+++ b/deploy/conf/hardware_environment/huawei-pod1/os-odl_l2-moon-ha.yml
@@ -37,7 +37,7 @@ hosts:
mac: 'D8:49:0B:DA:5A:B7'
interfaces:
- eth1: 'D8:49:0B:DA:5A:B8'
- ipmiIp: 172.16.130.27
+ ipmiIp: 172.16.130.29
ipmiPass: Opnfv@pod1
roles:
- controller
@@ -46,10 +46,10 @@ hosts:
- ceph-mon
- name: host3
- mac: '78:D7:52:A0:B1:99'
+ mac: '70:7B:E8:77:7B:60'
interfaces:
- - eth1: '78:D7:52:A0:B1:9A'
- ipmiIp: 172.16.130.29
+ - eth1: '70:7B:E8:77:7B:5F'
+ ipmiIp: 172.16.130.27
ipmiPass: Opnfv@pod1
roles:
- controller
diff --git a/deploy/conf/hardware_environment/huawei-pod1/os-odl_l2-nofeature-ha.yml b/deploy/conf/hardware_environment/huawei-pod1/os-odl_l2-nofeature-ha.yml
index 71edf08c..2c2757a6 100644
--- a/deploy/conf/hardware_environment/huawei-pod1/os-odl_l2-nofeature-ha.yml
+++ b/deploy/conf/hardware_environment/huawei-pod1/os-odl_l2-nofeature-ha.yml
@@ -36,7 +36,7 @@ hosts:
mac: 'D8:49:0B:DA:5A:B7'
interfaces:
- eth1: 'D8:49:0B:DA:5A:B8'
- ipmiIp: 172.16.130.27
+ ipmiIp: 172.16.130.29
ipmiPass: Opnfv@pod1
roles:
- controller
@@ -45,10 +45,10 @@ hosts:
- ceph-mon
- name: host3
- mac: '78:D7:52:A0:B1:99'
+ mac: '70:7B:E8:77:7B:60'
interfaces:
- - eth1: '78:D7:52:A0:B1:9A'
- ipmiIp: 172.16.130.29
+ - eth1: '70:7B:E8:77:7B:5F'
+ ipmiIp: 172.16.130.27
ipmiPass: Opnfv@pod1
roles:
- controller
diff --git a/deploy/conf/hardware_environment/huawei-pod1/os-odl_l3-nofeature-ha.yml b/deploy/conf/hardware_environment/huawei-pod1/os-odl_l3-nofeature-ha.yml
index e3e71d0b..cc621d76 100644
--- a/deploy/conf/hardware_environment/huawei-pod1/os-odl_l3-nofeature-ha.yml
+++ b/deploy/conf/hardware_environment/huawei-pod1/os-odl_l3-nofeature-ha.yml
@@ -38,7 +38,7 @@ hosts:
mac: 'D8:49:0B:DA:5A:B7'
interfaces:
- eth1: 'D8:49:0B:DA:5A:B8'
- ipmiIp: 172.16.130.27
+ ipmiIp: 172.16.130.29
ipmiPass: Opnfv@pod1
roles:
- controller
@@ -47,10 +47,10 @@ hosts:
- ceph-mon
- name: host3
- mac: '78:D7:52:A0:B1:99'
+ mac: '70:7B:E8:77:7B:60'
interfaces:
- - eth1: '78:D7:52:A0:B1:9A'
- ipmiIp: 172.16.130.29
+ - eth1: '70:7B:E8:77:7B:5F'
+ ipmiIp: 172.16.130.27
ipmiPass: Opnfv@pod1
roles:
- controller
diff --git a/deploy/conf/hardware_environment/huawei-pod1/os-onos-nofeature-ha.yml b/deploy/conf/hardware_environment/huawei-pod1/os-onos-nofeature-ha.yml
index fb4b1788..57d0a2d5 100644
--- a/deploy/conf/hardware_environment/huawei-pod1/os-onos-nofeature-ha.yml
+++ b/deploy/conf/hardware_environment/huawei-pod1/os-onos-nofeature-ha.yml
@@ -33,7 +33,7 @@ hosts:
mac: 'D8:49:0B:DA:5A:B7'
interfaces:
- eth1: 'D8:49:0B:DA:5A:B8'
- ipmiIp: 172.16.130.27
+ ipmiIp: 172.16.130.29
ipmiPass: Opnfv@pod1
roles:
- controller
@@ -42,10 +42,10 @@ hosts:
- ceph-mon
- name: host3
- mac: '78:D7:52:A0:B1:99'
+ mac: '70:7B:E8:77:7B:60'
interfaces:
- - eth1: '78:D7:52:A0:B1:9A'
- ipmiIp: 172.16.130.29
+ - eth1: '70:7B:E8:77:7B:5F'
+ ipmiIp: 172.16.130.27
ipmiPass: Opnfv@pod1
roles:
- controller
diff --git a/deploy/conf/hardware_environment/huawei-pod1/os-onos-sfc-ha.yml b/deploy/conf/hardware_environment/huawei-pod1/os-onos-sfc-ha.yml
index 1f40241f..c8aee2dc 100644
--- a/deploy/conf/hardware_environment/huawei-pod1/os-onos-sfc-ha.yml
+++ b/deploy/conf/hardware_environment/huawei-pod1/os-onos-sfc-ha.yml
@@ -35,7 +35,7 @@ hosts:
mac: 'D8:49:0B:DA:5A:B7'
interfaces:
- eth1: 'D8:49:0B:DA:5A:B8'
- ipmiIp: 172.16.130.27
+ ipmiIp: 172.16.130.29
ipmiPass: Opnfv@pod1
roles:
- controller
@@ -44,10 +44,10 @@ hosts:
- ceph-mon
- name: host3
- mac: '78:D7:52:A0:B1:99'
+ mac: '70:7B:E8:77:7B:60'
interfaces:
- - eth1: '78:D7:52:A0:B1:9A'
- ipmiIp: 172.16.130.29
+ - eth1: '70:7B:E8:77:7B:5F'
+ ipmiIp: 172.16.130.27
ipmiPass: Opnfv@pod1
roles:
- controller
diff --git a/deploy/conf/vm_environment/k8-nosdn-onap-noha.yml b/deploy/conf/vm_environment/k8-nosdn-onap-noha.yml
new file mode 100644
index 00000000..d7b85a85
--- /dev/null
+++ b/deploy/conf/vm_environment/k8-nosdn-onap-noha.yml
@@ -0,0 +1,46 @@
+##############################################################################
+# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+---
+TYPE: virtual
+FLAVOR: cluster
+
+plugins:
+ - onap: "Enable"
+
+hosts:
+ - name: host1
+ roles:
+ - kube_master
+ - etcd
+ - ha
+
+ - name: host2
+ roles:
+ - kube_node
+
+ - name: host3
+ roles:
+ - kube_node
+
+ - name: host4
+ roles:
+ - kube_node
+
+ - name: host5
+ roles:
+ - kube_node
+
+ - name: host6
+ roles:
+ - kube_node
+
+ - name: host7
+ roles:
+ - kube_node
diff --git a/deploy/config_parse.py b/deploy/config_parse.py
index 9cdf1acb..88a31e9a 100644
--- a/deploy/config_parse.py
+++ b/deploy/config_parse.py
@@ -11,27 +11,44 @@ import os
import netaddr
import yaml
import sys
+import random
from Cheetah.Template import Template
-def init(file):
+def load_yaml(file):
with open(file) as fd:
return yaml.safe_load(fd)
-def export_env_dict(env_dict, ofile, direct=False):
- if not os.path.exists(ofile):
- raise IOError("output file: %s not exist" % ofile)
+def dump_yaml(data, file):
+ with open(file, "w") as fd:
+ yaml.safe_dump(data, fd, default_flow_style=False)
+
+
+def mac_generator():
+ def random_hex():
+ return random.choice("0123456789ABCDEF")
+ mac = "00:00"
+ for i in xrange(4):
+ mac += ":{0}{1}".format(random_hex(), random_hex())
+ return mac
+
+
+def export_env_dict(env_dict, output_path, direct=False):
+ if not os.path.exists(output_path):
+ raise IOError("output file: %s not exist" % output_path)
if direct:
for k, v in env_dict.items():
- os.system("echo 'export %s=\"%s\"' >> %s" % (k, v, ofile))
+ os.system("echo 'export %s=\"%s\"' >> %s" %
+ (k, v, output_path))
else:
for k, v in env_dict.items():
- os.system("echo 'export %s=${%s:-%s}' >> %s" % (k, k, v, ofile))
+ os.system("echo 'export %s=${%s:-%s}' >> %s" %
+ (k, k, v, output_path))
def decorator(func):
- def wrapter(s, seq):
+ def wrapter(s, seq=None):
host_list = s.get('hosts', [])
result = []
for host in host_list:
@@ -41,8 +58,10 @@ def decorator(func):
result.append(s)
if len(result) == 0:
return ""
- else:
+ elif seq:
return "\"" + seq.join(result) + "\""
+ else:
+ return result
return wrapter
@@ -57,11 +76,15 @@ def hostroles(s, seq, host=None):
@decorator
-def hostmacs(s, seq, host=None):
- return host.get('mac', '')
+def hostmachines(s, seq, host=None):
+ return {'mac': host.get('interfaces', {}),
+ 'power_type': host.get('power_type', ''),
+ 'power_ip': host.get('power_ip', ''),
+ 'power_user': host.get('power_user', ''),
+ 'power_pass': host.get('power_pass', '')}
-def export_network_file(dha, network, ofile):
+def export_network_file(dha, network, output_path):
install_network_env = {}
host_network_env = {}
ip_settings = network['ip_settings']
@@ -79,7 +102,7 @@ def export_network_file(dha, network, ofile):
install_network_env.update({'INSTALL_NETMASK': mgmt_netmask})
install_network_env.update({'INSTALL_IP_RANGE': dhcp_ip_range})
install_network_env.update({'VIP': internal_vip})
- export_env_dict(install_network_env, ofile)
+ export_env_dict(install_network_env, output_path)
pxe_nic = os.environ['PXE_NIC']
host_ip_range = mgmt_net['ip_ranges'][0]
@@ -94,10 +117,10 @@ def export_network_file(dha, network, ofile):
host_network_env.update({'NETWORK_MAPPING': "install=" + pxe_nic})
host_network_env.update({'HOST_NETWORKS': ';'.join(host_networks)})
host_network_env.update({'SUBNETS': ','.join(host_subnets)})
- export_env_dict(host_network_env, ofile, True)
+ export_env_dict(host_network_env, output_path, True)
-def export_dha_file(dha, dha_file, ofile):
+def export_dha_file(dha, output_path, machine_path):
env = {}
env.update(dha)
if env.get('hosts', []):
@@ -121,19 +144,28 @@ def export_dha_file(dha, dha_file, ofile):
env.update({'FLAVOR': dha.get('FLAVOR', "cluster")})
env.update({'HOSTNAMES': hostnames(dha, ',')})
env.update({'HOST_ROLES': hostroles(dha, ';')})
- env.update({'DHA': dha_file})
- value = hostmacs(dha, ',')
- if len(value) > 0:
- env.update({'HOST_MACS': value})
+ machine = []
+ if dha.get('TYPE') == "virtual":
+ virtual_mac = []
+ for host in dha.get('hosts'):
+ mac = mac_generator()
+ machine.append({"mac": {"eth0": mac}, "power_type": "libvirt"})
+ virtual_mac.append(mac)
+ env.update({'HOST_MACS': ",".join(virtual_mac)})
+ else:
+ value = hostmachines(dha)
+ for item in value:
+ machine.append(item)
+ dump_yaml(machine, machine_path)
if dha.get('TYPE', "virtual") == "virtual":
env.update({'VIRT_NUMBER': len(dha['hosts'])})
- export_env_dict(env, ofile)
+ export_env_dict(env, output_path)
-def export_reset_file(dha, tmpl_dir, output_dir, ofile):
+def export_reset_file(dha, tmpl_dir, output_dir, output_path):
tmpl_file_name = dha.get('POWER_TOOL', '')
if not tmpl_file_name:
return
@@ -151,28 +183,31 @@ def export_reset_file(dha, tmpl_dir, output_dir, ofile):
f.write(tmpl.respond())
power_manage_env = {'POWER_MANAGE': reset_file_name}
- export_env_dict(power_manage_env, ofile, True)
+ export_env_dict(power_manage_env, output_path, True)
if __name__ == "__main__":
- if len(sys.argv) != 6:
- print("parameter wrong%d %s" % (len(sys.argv), sys.argv))
+ if len(sys.argv) != 7:
+ print("parameter wrong %d %s" % (len(sys.argv), sys.argv))
sys.exit(1)
- _, dha_file, network_file, tmpl_dir, output_dir, output_file = sys.argv
+ _, dha_file, network_file, tmpl_dir, output_dir, output_file,\
+ machine_file = sys.argv
if not os.path.exists(dha_file):
print("%s is not exist" % dha_file)
sys.exit(1)
- ofile = os.path.join(output_dir, output_file)
- os.system("touch %s" % ofile)
- os.system("echo \#config file deployment parameter > %s" % ofile)
+ output_path = os.path.join(output_dir, output_file)
+ machine_path = os.path.join(output_dir, machine_file)
+ os.system("touch %s" % output_path)
+ os.system("echo \#config file deployment parameter > %s" % output_path)
+ os.system("touch %s" % machine_path)
- dha_data = init(dha_file)
- network_data = init(network_file)
+ dha_data = load_yaml(dha_file)
+ network_data = load_yaml(network_file)
- export_dha_file(dha_data, dha_file, ofile)
- export_network_file(dha_data, network_data, ofile)
- export_reset_file(dha_data, tmpl_dir, output_dir, ofile)
+ export_dha_file(dha_data, output_path, machine_path)
+ export_network_file(dha_data, network_data, output_path)
+ export_reset_file(dha_data, tmpl_dir, output_dir, output_path)
sys.exit(0)
diff --git a/deploy/deploy_host.sh b/deploy/deploy_host.sh
index 94abf768..7a7b5dd5 100755
--- a/deploy/deploy_host.sh
+++ b/deploy/deploy_host.sh
@@ -43,7 +43,7 @@ function deploy_host(){
--console_credentials="${CONSOLE_CREDENTIALS}" --host_networks="${HOST_NETWORKS}" \
--network_mapping="${NETWORK_MAPPING}" --package_config_json_file="${PACKAGE_CONFIG_FILENAME}" \
--host_roles="${HOST_ROLES}" --default_roles="${DEFAULT_ROLES}" --switch_ips="${SWITCH_IPS}" \
- --machines=${machines//\'} --switch_credential="${SWITCH_CREDENTIAL}" --deploy_type="${TYPE}" \
+ --machine_file=${machine_file} --switch_credential="${SWITCH_CREDENTIAL}" --deploy_type="${TYPE}" \
--deployment_timeout="${DEPLOYMENT_TIMEOUT}" --${POLL_SWITCHES_FLAG} --dashboard_url="${DASHBOARD_URL}" \
--cluster_vip="${VIP}" --network_cfg="$NETWORK" --neutron_cfg="$NEUTRON" \
--enable_secgroup="${ENABLE_SECGROUP}" --enable_fwaas="${ENABLE_FWAAS}" --expansion="${EXPANSION}" \
diff --git a/deploy/deploy_parameter.sh b/deploy/deploy_parameter.sh
index 78223fcd..d45d4171 100755
--- a/deploy/deploy_parameter.sh
+++ b/deploy/deploy_parameter.sh
@@ -100,7 +100,7 @@ function process_default_para()
"$DHA" "$NETWORK" \
"${COMPASS_DIR}/deploy/template" \
"${WORK_DIR}/script" \
- "deploy_config.sh"
+ "deploy_config.sh" machine
echo ${WORK_DIR}/script/deploy_config.sh
}
diff --git a/deploy/host_baremetal.sh b/deploy/host_baremetal.sh
index 4c63f823..3c303567 100755
--- a/deploy/host_baremetal.sh
+++ b/deploy/host_baremetal.sh
@@ -15,14 +15,6 @@ function reboot_hosts() {
}
function get_host_macs() {
- if [[ "$EXPANSION" == "false" ]]; then
- machines=`echo $HOST_MACS | sed -e 's/,/'\',\''/g' -e 's/^/'\''/g' -e 's/$/'\''/g'`
- echo $machines > $WORK_DIR/switch_machines
- else
- machines_old=`cat $WORK_DIR/switch_machines`
- machines_add=`echo $HOST_MACS | sed -e 's/,/'\',\''/g' -e 's/^/'\''/g' -e 's/$/'\''/g'`
- echo $machines_add $machines_old > $WORK_DIR/switch_machines
- machines=`echo $machines_add $machines_old|sed 's/ /,/g'`
- fi
+ local machines=`echo $HOST_MACS | sed -e 's/,/'\',\''/g' -e 's/^/'\''/g' -e 's/$/'\''/g'`
echo $machines
}
diff --git a/deploy/host_virtual.sh b/deploy/host_virtual.sh
index d955b747..c3133cf6 100755
--- a/deploy/host_virtual.sh
+++ b/deploy/host_virtual.sh
@@ -25,20 +25,21 @@ function tear_down_machines() {
}
function reboot_hosts() {
+ echo "reboot"
# We do need it for aarch64
- if [ "$COMPASS_ARCH" = "aarch64" ]; then
- old_ifs=$IFS
- IFS=,
- for i in $HOSTNAMES; do
- sudo virsh destroy $i
- sleep 3
- sudo virsh start $i
- sleep 3
- done
- IFS=$old_ifs
- else
- log_warn "reboot_hosts do nothing"
- fi
+# if [ "$COMPASS_ARCH" = "aarch64" ]; then
+# old_ifs=$IFS
+# IFS=,
+# for i in $HOSTNAMES; do
+# sudo virsh destroy $i
+# sleep 3
+# sudo virsh start $i
+# sleep 3
+# done
+# IFS=$old_ifs
+# else
+# log_warn "reboot_hosts do nothing"
+# fi
}
function launch_host_vms() {
@@ -97,37 +98,7 @@ function recover_host_vms() {
}
function get_host_macs() {
- local mac_generator=${COMPASS_DIR}/deploy/mac_generator.sh
- local machines=
-
- if [[ $REDEPLOY_HOST == "true" ]]; then
- mac_array=`cat $WORK_DIR/switch_machines`
- machines=`echo $mac_array|sed 's/ /,/g'`
- else
- if [[ -z $HOST_MACS ]]; then
- if [[ "$EXPANSION" == "false" ]]; then
- chmod +x $mac_generator
- mac_array=`$mac_generator $VIRT_NUMBER`
- echo $mac_array > $WORK_DIR/switch_machines
- machines=`echo $mac_array|sed 's/ /,/g'`
- else
- machines_old=`cat $WORK_DIR/switch_machines`
- chmod +x $mac_generator
- machines_add=`$mac_generator $VIRT_NUMBER`
- echo $machines_add $machines_old > $WORK_DIR/switch_machines
- machines=`echo $machines_add $machines_old|sed 's/ /,/g'`
- fi
- else
- if [[ "$EXPANSION" == "false" ]]; then
- machines=`echo $HOST_MACS | sed -e 's/,/'\',\''/g' -e 's/^/'\''/g' -e 's/$/'\''/g'`
- else
- machines_old=`cat $WORK_DIR/switch_machines`
- machines_add=`echo $HOST_MACS | sed -e 's/,/'\',\''/g' -e 's/^/'\''/g' -e 's/$/'\''/g'`
- echo $machines_add $machines_old > $WORK_DIR/switch_machines
- machines=`echo $machines_add $machines_old|sed 's/ /,/g'`
- fi
- fi
- fi
+ local machines=`echo $HOST_MACS | sed -e 's/,/'\',\''/g' -e 's/^/'\''/g' -e 's/$/'\''/g'`
echo $machines
}
diff --git a/deploy/launch.sh b/deploy/launch.sh
index 8a009003..b11127ca 100755
--- a/deploy/launch.sh
+++ b/deploy/launch.sh
@@ -53,6 +53,7 @@ if [[ "$EXPANSION" == "false" ]]; then
exit 1
fi
+ export machine_file=$WORK_DIR/script/machine
export machines
CONTAINER_ALIVE=$(check_container_alive)
diff --git a/deploy/prepare.sh b/deploy/prepare.sh
index 38d98e1d..f11ae74c 100755
--- a/deploy/prepare.sh
+++ b/deploy/prepare.sh
@@ -48,7 +48,14 @@ function extract_tar()
function prepare_env() {
sudo sed -i -e 's/^#user =.*/user = "root"/g' /etc/libvirt/qemu.conf
sudo sed -i -e 's/^#group =.*/group = "root"/g' /etc/libvirt/qemu.conf
+ sudo sed -i 's/^.\?listen_tls.*/listen_tls = 0/g' /etc/libvirt/libvirtd.conf
+ sudo sed -i 's/^.\?listen_tcp.*/listen_tcp = 1/g' /etc/libvirt/libvirtd.conf
+ sudo sed -i 's/^.\?tcp_port.*/tcp_port = "16509"/g' /etc/libvirt/libvirtd.conf
+ sudo sed -i 's/^.\?listen_addr.*/listen_addr = "0.0.0.0"/g' /etc/libvirt/libvirtd.conf
+ sudo sed -i 's/^.\?auth_tcp.*/auth_tcp = "none"/g' /etc/libvirt/libvirtd.conf
+ sudo sed -i 's/^.\?libvirtd_opts.*/libvirtd_opts="-d -l"/g' /etc/default/libvirt-bin
sudo service libvirt-bin restart
+
if sudo service openvswitch-switch status|grep stop; then
sudo service openvswitch-switch start
fi
diff --git a/docs/release/installation/vmdeploy.rst b/docs/release/installation/vmdeploy.rst
index 63c3cc5e..4f1336c5 100644
--- a/docs/release/installation/vmdeploy.rst
+++ b/docs/release/installation/vmdeploy.rst
@@ -19,7 +19,6 @@ If you want to deploy noha with1 controller and 1 compute, run the following com
.. code-block:: bash
export SCENARIO=os-nosdn-nofeature-noha.yml
- export VIRT_NUMBER=2
curl https://raw.githubusercontent.com/opnfv/compass4nfv/stable/fraser/quickstart.sh | bash
Nodes Configuration (Virtual Deployment)
@@ -28,8 +27,6 @@ Nodes Configuration (Virtual Deployment)
virtual machine setting
~~~~~~~~~~~~~~~~~~~~~~~
- - VIRT_NUMBER -- the number of nodes for virtual deployment.
-
- VIRT_CPUS -- the number of CPUs allocated per virtual machine.
- VIRT_MEM -- the memory size(MB) allocated per virtual machine.
@@ -38,7 +35,6 @@ virtual machine setting
.. code-block:: bash
- export VIRT_NUMBER=${VIRT_NUMBER:-5}
export VIRT_CPUS=${VIRT_CPU:-4}
export VIRT_MEM=${VIRT_MEM:-16384}
export VIRT_DISK=${VIRT_DISK:-200G}
diff --git a/plugins/onap/roles/tasks/Ubuntu.yml b/plugins/onap/roles/tasks/Ubuntu.yml
new file mode 100644
index 00000000..a51e5f17
--- /dev/null
+++ b/plugins/onap/roles/tasks/Ubuntu.yml
@@ -0,0 +1,117 @@
+---
+- name: download helm
+ get_url:
+ url: "{{ helm_url }}"
+ dest: /tmp/helm.tar.gz
+ when: inventory_hostname == groups['kube_master'][0]
+ run_once: true
+
+- name: prepare helm
+ shell:
+ tar -zxf /tmp/helm.tar.gz -C /tmp;
+ mv /tmp/linux-amd64/helm /usr/local/bin/helm
+ when: inventory_hostname == groups['kube_master'][0]
+ run_once: true
+
+- name: install tiller
+ shell: >
+ kubectl create serviceaccount --namespace kube-system tiller;
+ kubectl create clusterrolebinding tiller-cluster-rule
+ --clusterrole=cluster-admin
+ --serviceaccount=kube-system:tiller;
+ helm init --service-account tiller
+ when: inventory_hostname == groups['kube_master'][0]
+ run_once: true
+
+- name: git clone oom
+ git:
+ repo: "{{ oom_repo }}"
+ dest: "{{ oom_dest }}"
+ version: "{{ oom_version }}"
+ when: inventory_hostname == groups['kube_master'][0]
+ run_once: true
+
+- name: prepare local repo
+ shell:
+ nohup /bin/sh -c "helm serve &"
+ while true; do curl -s 127.0.0.1:8879 > /dev/null; if [ $? -eq 0 ]; then break; fi; done
+ helm repo add local http://127.0.0.1:8879
+ when: inventory_hostname == groups['kube_master'][0]
+ run_once: true
+
+- name: add helm plugin
+ shell:
+ cp -rf "{{ oom_dest }}/kubernetes/helm/plugins" ~/.helm/
+ when: inventory_hostname == groups['kube_master'][0]
+ run_once: true
+
+- name: make
+ shell:
+ make all
+ args:
+ chdir: "{{ oom_dest }}/kubernetes"
+ when: inventory_hostname == groups['kube_master'][0]
+
+- name: install nfs master
+ apt:
+ pkg: "nfs-kernel-server"
+ state: "present"
+ update_cache: "yes"
+ when: inventory_hostname == groups['kube_master'][0]
+
+- name: create /dockerdata-nfs
+ file:
+ path: /dockerdata-nfs
+ owner: nobody
+ group: nogroup
+ state: directory
+ mode: 0777
+ when: inventory_hostname == groups['kube_master'][0]
+
+- name: install nfs slave
+ apt:
+ pkg: "nfs-common"
+ state: "present"
+ update_cache: "yes"
+ when: inventory_hostname != groups['kube_master'][0]
+
+- name: create /dockerdata-nfs
+ file:
+ path: /dockerdata-nfs
+ state: directory
+ when: inventory_hostname != groups['kube_master'][0]
+
+- name: render /etc/exports
+ template:
+ src: exports.j2
+ dest: /etc/exports
+ when: inventory_hostname == groups['kube_master'][0]
+
+- name: restart nfs service
+ shell:
+ exportfs -a;
+ systemctl restart nfs-kernel-server
+ when: inventory_hostname == groups['kube_master'][0]
+
+- name: register master hostname
+ debug:
+ msg: "{{ ip_settings[groups['kube_master'][0]]['external']['ip'] }}"
+ register: master_ip
+
+- name:
+ shell:
+ mount {{ master_ip.msg }}:/dockerdata-nfs /dockerdata-nfs/
+ when: inventory_hostname != groups['kube_master'][0]
+
+# yamllint disable rule:line-length
+- name: add mount info
+ lineinfile:
+ path: /etc/fstab
+ line: "{{ master_ip.msg }}:/dockerdata-nfs /dockerdata-nfs nfs auto,nofail,noatime,nolock,intr,tcp,actimeo=1800 0 0"
+ when: inventory_hostname != groups['kube_master'][0]
+# yamllint enable rule:line-length
+
+- name: deploy onap
+ shell:
+ helm deploy dev local/onap --namespace onap
+ when: inventory_hostname == groups['kube_master'][0]
diff --git a/plugins/onap/roles/tasks/main.yml b/plugins/onap/roles/tasks/main.yml
new file mode 100644
index 00000000..c9e80427
--- /dev/null
+++ b/plugins/onap/roles/tasks/main.yml
@@ -0,0 +1,11 @@
+#############################################################################
+# Copyright (c) 2019 Intel Corp.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+#############################################################################
+---
+- include: "{{ ansible_distribution }}.yml"
+ when: onap is defined and onap == "Enable"
diff --git a/plugins/onap/roles/templates/exports.j2 b/plugins/onap/roles/templates/exports.j2
new file mode 100644
index 00000000..8f5a3f65
--- /dev/null
+++ b/plugins/onap/roles/templates/exports.j2
@@ -0,0 +1 @@
+/dockerdata-nfs{% for host in groups.all %}{% if host != groups.kube_master[0] %} {{ ip_settings[host]['external']['ip'] }}(rw,sync,no_root_squash,no_subtree_check){% endif %}{% endfor %}
diff --git a/plugins/onap/roles/vars/main.yml b/plugins/onap/roles/vars/main.yml
new file mode 100644
index 00000000..83b591a5
--- /dev/null
+++ b/plugins/onap/roles/vars/main.yml
@@ -0,0 +1,13 @@
+#############################################################################
+# Copyright (c) 2019 HUAWEI TECHNOLOGIES CO.,LTD and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+helm_url: https://storage.googleapis.com/kubernetes-helm/helm-v2.9.1-linux-amd64.tar.gz
+oom_repo: https://gerrit.onap.org/r/oom
+oom_dest: /home/oom
+oom_version: casablanca
diff --git a/plugins/stor4nfv/roles/os-stor4nfv/tasks/post-install.yml b/plugins/stor4nfv/roles/os-stor4nfv/tasks/post-install.yml
index d14c0c68..d441f420 100644
--- a/plugins/stor4nfv/roles/os-stor4nfv/tasks/post-install.yml
+++ b/plugins/stor4nfv/roles/os-stor4nfv/tasks/post-install.yml
@@ -37,9 +37,10 @@
shell:
_raw_params: |
function _modify_osdsdock_endpoint {
- local ip_addr=(lxc-info -n $(lxc-ls --line | grep ceph-mon) -iH)|grep "10.1"
+ local ip_addr=$((lxc-info -n $(lxc-ls --line | grep ceph-mon) -iH)|grep "10.1")
if [ ! -z "${ip_addr}" ]; then
- sed -i 's/^50050.*/api_endpoint = $ip_addr:50050/g' /etc/opensds/opensds.conf
+ sed -i 's/api_endpoint = localhost:50050/api_endpoint = '"$ip_addr"':50050/g' \
+ /etc/opensds/opensds.conf
fi
}
_modify_osdsdock_endpoint
@@ -82,6 +83,7 @@
register: opensds_driver_exists
when:
- inventory_hostname in groups['ceph-mon']
+ - groups['ceph_adm'][0] in inventory_hostname
- name: copy opensds conf
remote_user: root
@@ -92,6 +94,7 @@
cp -r /tmp/driver /etc/opensds;
when:
- inventory_hostname in groups['ceph-mon']
+ - groups['ceph_adm'][0] in inventory_hostname
- name: create ceph pool
remote_user: root
@@ -99,6 +102,7 @@
ceph osd pool create rbd 24 && ceph osd pool set rbd size 1
when:
- inventory_hostname in groups['ceph-mon']
+ - groups['ceph_adm'][0] in inventory_hostname
- name: ensure osdsdock exists
stat:
@@ -106,6 +110,7 @@
register: opensds_exists
when:
- inventory_hostname in groups['ceph-mon']
+ - groups['ceph_adm'][0] in inventory_hostname
- name: start osdsdock daemon
remote_user: root
@@ -113,6 +118,7 @@
cd /opt && ./bin/osdsdock -daemon
when:
- inventory_hostname in groups['ceph-mon']
+ - groups['ceph_adm'][0] in inventory_hostname
- name: ensure osdsctl exists
stat:
@@ -134,3 +140,4 @@
osdsctl profile create '{"name": "default", "description": "default policy"}'
when:
- inventory_hostname in groups['ceph_adm']
+ ignore_errors: "true"
diff --git a/util/check_valid.py b/util/check_valid.py
index e6a72e71..9348d272 100644
--- a/util/check_valid.py
+++ b/util/check_valid.py
@@ -98,25 +98,19 @@ def check_dha_file(dha):
invalid = False
if dha['TYPE'] == 'baremetal':
for i in dha['hosts']:
- if not is_valid_mac(i['mac']):
- err_print('''invalid address:
- hosts:
- - name: %s
- mac: %s''' % (i['name'], i['mac']))
- invalid = True
for j in i['interfaces']:
- if not is_valid_mac(j.values()[0]):
+ if not is_valid_mac(i['interfaces'].get(j)):
err_print('''invalid address:
hosts:
- name: %s
interfaces:
- - %s: %s''' % (i['name'], j.keys()[0], j.values()[0])) # noqa: E501
+ - %s: %s''' % (i['name'], j, i['interfaces'].get(j))) # noqa: E501
invalid = True
- if not is_valid_ip(i['ipmiIp']):
+ if not is_valid_ip(i['power_ip']):
err_print('''invalid address:
hosts:
- name: %s
- ipmiIp: %s''' % (i['name'], i['ipmiIp']))
+ power_ip: %s''' % (i['name'], i['power_ip']))
invalid = True
if not invalid:
diff --git a/util/docker-compose/roles/machines/tasks/main.yml b/util/docker-compose/roles/machines/tasks/main.yml
index 365a9d90..7b4a9f18 100755
--- a/util/docker-compose/roles/machines/tasks/main.yml
+++ b/util/docker-compose/roles/machines/tasks/main.yml
@@ -1,21 +1,15 @@
---
-- name: create switch file if test mode enabled
- template: src=switch_machines_file.j2
- dest="{{ docker_compose_dir }}/switch_machines_file"
- tags:
- - redploy
-
- name: docker cp switch_machines_file
shell: |
- docker cp "{{ docker_compose_dir }}/switch_machines_file" \
- compass-deck:/tmp/switch_machines_file
+ docker cp "{{ docker_compose_dir }}/machine" \
+ compass-deck:/tmp/machine
tags:
- redploy
- name: inject switches and mac addresses
shell: |
docker exec compass-deck bash -c \
- "/opt/compass/bin/manage_db.py set_switch_machines \
- --switch_machines_file /tmp/switch_machines_file"
+ "/opt/compass/bin/manage_db.py set_machine \
+ --machine_file /tmp/machine"
tags:
- redploy