summaryrefslogtreecommitdiffstats
path: root/deploy
diff options
context:
space:
mode:
Diffstat (limited to 'deploy')
-rw-r--r--deploy/adapters/ansible/kubernetes/roles/kargo/files/extra-vars-aarch64.yml33
-rw-r--r--deploy/adapters/ansible/kubernetes/roles/kargo/files/extra-vars.yml7
-rw-r--r--deploy/adapters/ansible/kubernetes/roles/kargo/tasks/main.yml11
-rw-r--r--deploy/adapters/ansible/roles/config-osa/tasks/fix_pip_version.yml25
-rwxr-xr-xdeploy/adapters/ansible/roles/config-osa/tasks/main.yml43
-rw-r--r--deploy/adapters/ansible/roles/config-osa/templates/user_variables.yml.j23
-rw-r--r--deploy/adapters/ansible/roles/pre-openstack/tasks/RedHat.yml4
-rw-r--r--deploy/adapters/ansible/roles/pre-openstack/tasks/Ubuntu.yml4
-rw-r--r--deploy/compass_conf/adapter/ansible_kubernetes.conf2
-rwxr-xr-xdeploy/compass_vm.sh14
-rw-r--r--deploy/conf/compass.conf18
-rw-r--r--deploy/playbook_done.py7
-rw-r--r--deploy/status_callback.py76
13 files changed, 168 insertions, 79 deletions
diff --git a/deploy/adapters/ansible/kubernetes/roles/kargo/files/extra-vars-aarch64.yml b/deploy/adapters/ansible/kubernetes/roles/kargo/files/extra-vars-aarch64.yml
new file mode 100644
index 00000000..26e3fa75
--- /dev/null
+++ b/deploy/adapters/ansible/kubernetes/roles/kargo/files/extra-vars-aarch64.yml
@@ -0,0 +1,33 @@
+---
+# Override default kubespray variables
+
+# roles/download/defaults/main.yml
+etcd_version: v3.2.4-arm64
+flannel_version: "v0.8.0-arm64"
+flannel_cni_image_repo: "linaro/flannel-cni-arm64"
+hyperkube_image_repo: "gcr.io/google-containers/hyperkube-arm64"
+hyperkube_image_tag: "{{ kube_version }}"
+pod_infra_image_repo: "gcr.io/google_containers/pause-arm64"
+nginx_image_tag: 1.13
+kubedns_image_repo: "gcr.io/google_containers/k8s-dns-kube-dns-arm64"
+dnsmasq_nanny_image_repo: "gcr.io/google_containers/k8s-dns-dnsmasq-nanny-arm64"
+dnsmasq_sidecar_image_repo: "gcr.io/google_containers/k8s-dns-sidecar-arm64"
+kubednsautoscaler_image_repo: "gcr.io/google_containers/\
+cluster-proportional-autoscaler-arm64"
+
+# inventory/group_vars/k8s-cluster.yml
+kube_network_plugin: flannel
+helm_enabled: false
+docker_options: "--insecure-registry={{ kube_service_addresses }} \
+--graph={{ docker_daemon_graph }} {{ docker_log_opts }} \
+--add-runtime docker-runc=/usr/libexec/docker/docker-runc-current \
+--default-runtime=docker-runc \
+--exec-opt native.cgroupdriver=systemd \
+--userland-proxy-path=/usr/libexec/docker/docker-proxy-current \
+--signature-verification=false"
+
+# roles/docker/vars/redhat.yml
+docker_package_info:
+ pkg_mgr: yum
+ pkgs:
+ - name: docker
diff --git a/deploy/adapters/ansible/kubernetes/roles/kargo/files/extra-vars.yml b/deploy/adapters/ansible/kubernetes/roles/kargo/files/extra-vars.yml
new file mode 100644
index 00000000..e13e33ca
--- /dev/null
+++ b/deploy/adapters/ansible/kubernetes/roles/kargo/files/extra-vars.yml
@@ -0,0 +1,7 @@
+---
+# Override default kubespray variables
+
+# Just a placeholder to satisfy ansible
+dummy_var: 0
+
+# helm_enabled: true
diff --git a/deploy/adapters/ansible/kubernetes/roles/kargo/tasks/main.yml b/deploy/adapters/ansible/kubernetes/roles/kargo/tasks/main.yml
index 4df8dffa..2763e53e 100644
--- a/deploy/adapters/ansible/kubernetes/roles/kargo/tasks/main.yml
+++ b/deploy/adapters/ansible/kubernetes/roles/kargo/tasks/main.yml
@@ -96,9 +96,18 @@
regexp: '^helm_enabled:'
line: 'helm_enabled: {{ helm_flag }}'
+- name: copy overrided variables
+ copy:
+ src: "{{ item }}"
+ dest: /opt/kargo_k8s/extra-vars.yml
+ with_first_found:
+ - extra-vars-{{ ansible_architecture }}.yml
+ - extra-vars.yml
+
- name: run kargo playbook
shell: |
cd /opt/kargo_k8s
- ansible-playbook -i inventory/inventory.cfg cluster.yml -b -v 2>&1 | tee kargo.log
+ ansible-playbook -i inventory/inventory.cfg cluster.yml \
+ -e "@extra-vars.yml" -b -v 2>&1 | tee kargo.log
tags:
- ansible
diff --git a/deploy/adapters/ansible/roles/config-osa/tasks/fix_pip_version.yml b/deploy/adapters/ansible/roles/config-osa/tasks/fix_pip_version.yml
new file mode 100644
index 00000000..61d263b4
--- /dev/null
+++ b/deploy/adapters/ansible/roles/config-osa/tasks/fix_pip_version.yml
@@ -0,0 +1,25 @@
+# #############################################################################
+# Copyright (c) 2017 HUAWEI TECHNOLOGIES CO.,LTD and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+# #############################################################################
+---
+- name: copy the repo_fix_andas.yml
+ template:
+ src: repo_fix_pandas.yml
+ dest: /etc/ansible/roles/repo_build/tasks/repo_fix_pandas.yml
+
+- name: fix the python-ldap version
+ lineinfile:
+ dest: /etc/ansible/roles/os_keystone/defaults/main.yml
+ regexp: '^ - python-ldap'
+ line: ' - python-ldap==2.5.2'
+
+- name: add pkgconfig in gnocchi requires pip packages
+ lineinfile:
+ dest: /etc/ansible/roles/repo_build/defaults/main.yml
+ insertafter: "repo_pip_packages:"
+ line: ' - pkgconfig'
diff --git a/deploy/adapters/ansible/roles/config-osa/tasks/main.yml b/deploy/adapters/ansible/roles/config-osa/tasks/main.yml
index 9b657fe9..ceceb956 100755
--- a/deploy/adapters/ansible/roles/config-osa/tasks/main.yml
+++ b/deploy/adapters/ansible/roles/config-osa/tasks/main.yml
@@ -66,6 +66,19 @@
insertafter: '^ - qemu-img-ev'
line: ' - libvirt'
+- name: fix the problem of nova cell_v2 discovery
+ blockinfile:
+ dest: /etc/ansible/roles/os_nova/tasks/nova_db_post_setup.yml
+ insertafter: 'nova-manage cell_v2 discover_hosts'
+ block: |
+ # add retry
+ register: cell_v2_discovery
+ until: cell_v2_discovery == 0
+ retries: 10
+ delay: 5
+ ignore_errors: true
+ when: hostvars[hostvars[inventory_hostname]['groups']['controller'][0]]['local_mirror'] == 'Ubuntu'
+
- name: remove CentOS-Base.repo after ceph-osd
blockinfile:
dest: /etc/ansible/roles/ceph-osd/tasks/start_osds.yml
@@ -76,6 +89,25 @@
/etc/yum.repos.d/CentOS-Base.repo.bak;
when: hostvars[hostvars[inventory_hostname]['groups']['controller'][0]]['local_mirror'] == 'CentOS'
+- name: fix the problem in pip_install
+ blockinfile:
+ dest: /etc/ansible/roles/pip_install/tasks/pre_install_yum.yml
+ insertafter: 'pip_install_package_state'
+ block: |
+ # add retry
+ register: repo_result
+ until: repo_result | success
+ retries: 5
+ delay: 10
+ when: hostvars[hostvars[inventory_hostname]['groups']['controller'][0]]['local_mirror'] == 'CentOS'
+
+- name: update the directory of chrony key
+ lineinfile:
+ dest: /etc/ansible/roles/ansible-hardening/templates/chrony.conf.j2
+ regexp: '^keyfile'
+ line: 'keyfile /etc/chrony.keys'
+ when: hostvars[hostvars[inventory_hostname]['groups']['controller'][0]]['local_mirror'] == 'CentOS'
+
- name: add mariadb local repository
blockinfile:
dest: /etc/openstack_deploy/user_variables.yml
@@ -295,11 +327,6 @@
when:
- "{{ hostvars[inventory_hostname]['groups']['controller'] | length < 2 }}"
-- name: copy the repo_fix_andas.yml
- template:
- src: repo_fix_pandas.yml
- dest: /etc/ansible/roles/repo_build/tasks/repo_fix_pandas.yml
-
# - name: change repore build
# lineinfile:
# dest: /etc/ansible/roles/repo_build/tasks/main.yml
@@ -308,10 +335,6 @@
- include: meters.yml
-- name: fix the python-ldap version
- lineinfile:
- dest: /etc/ansible/roles/os_keystone/defaults/main.yml
- regexp: '^ - python-ldap'
- line: ' - python-ldap==2.5.2'
+- include: fix_pip_version.yml
- include: fix_rescue.yml
diff --git a/deploy/adapters/ansible/roles/config-osa/templates/user_variables.yml.j2 b/deploy/adapters/ansible/roles/config-osa/templates/user_variables.yml.j2
index 88a3233b..5fa999a5 100644
--- a/deploy/adapters/ansible/roles/config-osa/templates/user_variables.yml.j2
+++ b/deploy/adapters/ansible/roles/config-osa/templates/user_variables.yml.j2
@@ -63,3 +63,6 @@ neutron_provider_networks:
{% endif %}
security_sshd_permit_root_login: yes
+
+security_ntp_servers:
+ - 45.79.111.114
diff --git a/deploy/adapters/ansible/roles/pre-openstack/tasks/RedHat.yml b/deploy/adapters/ansible/roles/pre-openstack/tasks/RedHat.yml
index 6ac191a3..d423ed04 100644
--- a/deploy/adapters/ansible/roles/pre-openstack/tasks/RedHat.yml
+++ b/deploy/adapters/ansible/roles/pre-openstack/tasks/RedHat.yml
@@ -90,7 +90,9 @@
dest: /etc/modules-load.d/openstack-ansible.conf
- name: restart ntp service
- shell: "systemctl enable ntpd.service && systemctl start ntpd.service"
+ shell: |
+ systemctl stop ntpd.service;
+ systemctl disable ntpd.service;
- name: change the MaxSessions
lineinfile:
diff --git a/deploy/adapters/ansible/roles/pre-openstack/tasks/Ubuntu.yml b/deploy/adapters/ansible/roles/pre-openstack/tasks/Ubuntu.yml
index 5bb77485..2433ac17 100644
--- a/deploy/adapters/ansible/roles/pre-openstack/tasks/Ubuntu.yml
+++ b/deploy/adapters/ansible/roles/pre-openstack/tasks/Ubuntu.yml
@@ -55,7 +55,9 @@
state: absent
- name: restart ntp service
- shell: "service ntp restart"
+ shell: |
+ service ntp stop;
+ systemctl disable ntp;
- name: add the appropriate kernel modules
copy:
diff --git a/deploy/compass_conf/adapter/ansible_kubernetes.conf b/deploy/compass_conf/adapter/ansible_kubernetes.conf
index a3ab671a..70e5564c 100644
--- a/deploy/compass_conf/adapter/ansible_kubernetes.conf
+++ b/deploy/compass_conf/adapter/ansible_kubernetes.conf
@@ -3,5 +3,5 @@ DISPLAY_NAME = 'Kubernetes'
PARENT = 'general'
PACKAGE_INSTALLER = 'ansible_installer_kubernetes'
OS_INSTALLER = 'cobbler'
-SUPPORTED_OS_PATTERNS = ['(?i)ubuntu-16\.04', '(?i)CentOS-7.*16.*', '(?i)CentOS-7.*arm.*']
+SUPPORTED_OS_PATTERNS = ['(?i)ubuntu-16\.04\.3', '(?i)CentOS-7.*17.*', '(?i)CentOS-7.*arm.*']
DEPLOYABLE = True
diff --git a/deploy/compass_vm.sh b/deploy/compass_vm.sh
index cc0471ae..7689f41c 100755
--- a/deploy/compass_vm.sh
+++ b/deploy/compass_vm.sh
@@ -27,11 +27,11 @@ function install_compass_core() {
}
function set_compass_machine() {
- local config_file=$WORK_DIR/installer/compass-docker-compose/group_vars/all
+ local config_file=$WORK_DIR/installer/docker-compose/group_vars/all
sed -i '/pxe_boot_macs/d' $config_file
echo "pxe_boot_macs: [${machines}]" >> $config_file
- ansible-playbook $WORK_DIR/installer/compass-docker-compose/add_machine.yml
+ ansible-playbook $WORK_DIR/installer/docker-compose/add_machine.yml
}
function install_compass() {
@@ -129,13 +129,9 @@ function wait_ok() {
}
function launch_compass() {
- local group_vars=$WORK_DIR/installer/compass-docker-compose/group_vars/all
+ local group_vars=$WORK_DIR/installer/docker-compose/group_vars/all
sed -i "s#^\(compass_dir:\).*#\1 $COMPASS_DIR#g" $group_vars
- sed -i "s#^\(compass_deck:\).*#\1 $COMPASS_DECK#g" $group_vars
- sed -i "s#^\(compass_tasks:\).*#\1 $COMPASS_TASKS#g" $group_vars
- sed -i "s#^\(compass_cobbler:\).*#\1 $COMPASS_COBBLER#g" $group_vars
- sed -i "s#^\(compass_db:\).*#\1 $COMPASS_DB#g" $group_vars
- sed -i "s#^\(compass_mq:\).*#\1 $COMPASS_MQ#g" $group_vars
+ sed -i "s#^\(compose_images:\).*#\1 $COMPOSE_IMAGES#g" $group_vars
if [[ $OFFLINE_DEPLOY == "Enable" ]]; then
sed -i "s#.*\(compass_repo:\).*#\1 $COMPASS_REPO#g" $group_vars
@@ -150,7 +146,7 @@ function launch_compass() {
sed -i "s#^\(deck_port:\).*#\1 $COMPASS_DECK_PORT#g" $group_vars
sed -i "s#^\(repo_port:\).*#\1 $COMPASS_REPO_PORT#g" $group_vars
- ansible-playbook $WORK_DIR/installer/compass-docker-compose/bring_up_compass.yml
+ ansible-playbook $WORK_DIR/installer/docker-compose/bring_up_compass.yml
}
function recover_compass() {
diff --git a/deploy/conf/compass.conf b/deploy/conf/compass.conf
index 8a3bb3a3..9d9145f1 100644
--- a/deploy/conf/compass.conf
+++ b/deploy/conf/compass.conf
@@ -17,20 +17,4 @@ export NTP_SERVER="$COMPASS_SERVER"
export NAMESERVERS=${USER_NAMESERVER:-"$COMPASS_SERVER"}
export COMPASS_REPO_PORT="5151"
export OFFLINE_DEPLOY=${OFFLINE_DEPLOY:-'Disable'}
-
-# Set docker registry for architectures other than x86_64
-COMPASS_DOCKER_REPO=${COMPASS_DOCKER_REPO:-compass4nfv}
-
-export COMPASS_DECK="$COMPASS_DOCKER_REPO/compass-deck"
-# export COMPASS_TASKS="$COMPASS_DOCKER_REPO/compass-tasks-osa:euphrates"
-export COMPASS_TASKS="yifei0van0xue/compass-tasks-osa"
-
-if [[ "x"$KUBERNETES_VERSION != "x" ]]; then
- export COMPASS_TASKS="$COMPASS_DOCKER_REPO/compass-tasks-k8s"
-fi
-
-# export COMPASS_COBBLER="$COMPASS_DOCKER_REPO/compass-cobbler"
-export COMPASS_DB="$COMPASS_DOCKER_REPO/compass-db"
-export COMPASS_MQ="$COMPASS_DOCKER_REPO/compass-mq"
-export COMPASS_REPO="$COMPASS_DOCKER_REPO/compass-repo-osa-ubuntu:euphrates"
-export COMPASS_COBBLER="yifei0van0xue/compass-cobbler"
+export COMPOSE_IMAGES="[compass-db,compass-mq,compass-deck,compass-tasks,compass-cobbler]"
diff --git a/deploy/playbook_done.py b/deploy/playbook_done.py
index 24c8c55b..6b1043d4 100644
--- a/deploy/playbook_done.py
+++ b/deploy/playbook_done.py
@@ -15,16 +15,15 @@
# limitations under the License.
"""Ansible playbook callback after a playbook run has completed."""
-import os
import sys
from ansible.plugins.callback import CallbackBase
-current_dir = os.path.dirname(os.path.realpath(__file__))
-sys.path.append(current_dir + '/..')
+compass_bin = "/opt/compass/bin"
+sys.path.append(compass_bin)
+import switch_virtualenv # noqa: F401
-# import switch_virtualenv # noqa
from compass.apiclient.restful import Client # noqa: E402
from compass.utils import flags # noqa: E402
diff --git a/deploy/status_callback.py b/deploy/status_callback.py
index 4bbbc321..f0615f3f 100644
--- a/deploy/status_callback.py
+++ b/deploy/status_callback.py
@@ -16,13 +16,13 @@ from ansible.plugins.callback import CallbackBase
COMPASS_HOST = "compass-deck"
-def task_error(display, host, data):
- display.display("task_error: host=%s,data=%s" % (host, data))
-
-# if isinstance(data, dict):
-# invocation = data.pop('invocation', {})
-
- notify_host(display, COMPASS_HOST, host, "failed")
+# def task_error(display, host, data):
+# display.display("task_error: host=%s,data=%s" % (host, data))
+#
+# if isinstance(data, dict):
+# invocation = data.pop('invocation', {})
+#
+# notify_host(display, COMPASS_HOST, host, "failed")
class CallbackModule(CallbackBase):
@@ -101,39 +101,47 @@ class CallbackModule(CallbackBase):
def v2_playbook_on_stats(self, stats):
self._display.display("playbook_on_stats enter")
+ all_vars = self.play.get_variable_manager().get_vars(self.loader)
+ host_vars = all_vars["hostvars"]
hosts = sorted(stats.processed.keys())
- failures = False
- unreachable = False
-
- for host in hosts:
- summary = stats.summarize(host)
- # self._display.display("host: %s \nsummary: %s\n" % (host, summary)) # noqa
-
- if summary['failures'] > 0:
- failures = True
- if summary['unreachable'] > 0:
- unreachable = True
+ cluster_name = host_vars[hosts[0]]['cluster_name']
headers = {"Content-type": "application/json",
"Accept": "*/*"}
-
conn = httplib.HTTPConnection(COMPASS_HOST, 80)
token = auth(conn)
headers["X-Auth-Token"] = token
- get_url = "/api/hosts"
+ get_url = "/api/clusterhosts"
conn.request("GET", get_url, "", headers)
resp = conn.getresponse()
raise_for_status(resp)
- host_data = json.loads(resp.read())
- clusterhosts = [item["name"] for item in host_data]
-
- if failures or unreachable:
- host_status = "error"
- else:
- host_status = "succ"
+ clusterhost_data = json.loads(resp.read())
+ clusterhost_mapping = {}
+ for item in clusterhost_data:
+ if item["clustername"] == cluster_name:
+ clusterhost_mapping.update({item["hostname"]:
+ item["clusterhost_id"]})
+
+ force_error = False
+ if "localhost" in hosts:
+ summary = stats.summarize("localhost")
+ if summary['failures'] > 0 or summary['unreachable'] > 0:
+ force_error = True
+
+ for hostname, hostid in clusterhost_mapping.iteritems():
+ if hostname not in hosts:
+ continue
+
+ summary = stats.summarize(hostname)
+ # self._display.display("host: %s \nsummary: %s\n" % (host, summary)) # noqa
- for host in clusterhosts:
- notify_host(self._display, "compass-deck", host, host_status)
+ if summary['failures'] > 0 or summary['unreachable'] > 0 \
+ or force_error:
+ status = "error"
+ else:
+ status = "succ"
+ self._display.display("hostname: %s" % hostname)
+ notify_host(self._display, COMPASS_HOST, hostid, status)
def raise_for_status(resp):
@@ -157,17 +165,15 @@ def auth(conn):
return json.loads(resp.read())["token"]
-def notify_host(display, compass_host, host, status):
- display.display("hostname: %s" % host)
- host = host.strip("host")
- url = "/api/clusterhosts/%s/state" % host
+def notify_host(display, compass_host, hostid, status):
+ url = "/api/clusterhosts/%s/state" % hostid
if status == "succ":
body = {"state": "SUCCESSFUL"}
elif status == "error":
body = {"state": "ERROR"}
else:
- display.error("notify_host: host %s with status %s is not supported"
- % (host, status))
+ display.error("notify_host: hostid %s with status %s is not supported"
+ % (hostid, status))
return
headers = {"Content-type": "application/json",