summaryrefslogtreecommitdiffstats
path: root/deploy
diff options
context:
space:
mode:
Diffstat (limited to 'deploy')
-rwxr-xr-xdeploy/adapters/ansible/kubernetes/ansible-kubernetes.yml6
-rw-r--r--deploy/adapters/ansible/kubernetes/roles/ha/files/chk_k8s_master.sh9
-rw-r--r--deploy/adapters/ansible/kubernetes/roles/ha/handlers/main.yml14
-rw-r--r--deploy/adapters/ansible/kubernetes/roles/ha/tasks/main.yml83
-rw-r--r--deploy/adapters/ansible/kubernetes/roles/ha/templates/haproxy.cfg48
-rw-r--r--deploy/adapters/ansible/kubernetes/roles/ha/templates/keepalived.conf49
-rw-r--r--deploy/adapters/ansible/kubernetes/roles/ha/vars/Debian.yml11
-rw-r--r--deploy/adapters/ansible/kubernetes/roles/ha/vars/RedHat.yml11
-rw-r--r--deploy/adapters/ansible/kubernetes/roles/ha/vars/main.yml16
-rw-r--r--deploy/adapters/ansible/kubernetes/roles/install-k8s-dependence/tasks/main.yml4
-rw-r--r--deploy/adapters/ansible/kubernetes/roles/install-k8s-dependence/vars/Debian.yml1
-rw-r--r--deploy/adapters/ansible/kubernetes/roles/install-k8s-dependence/vars/RedHat.yml1
-rw-r--r--deploy/adapters/ansible/kubernetes/roles/install-k8s-dependence/vars/main.yml1
-rw-r--r--deploy/adapters/ansible/kubernetes/roles/kargo/files/openssl.conf.j234
-rw-r--r--deploy/adapters/ansible/kubernetes/roles/kargo/tasks/main.yml47
-rw-r--r--deploy/adapters/ansible/kubernetes/roles/kargo/vars/main.yml3
-rw-r--r--deploy/adapters/ansible/roles/config-osa/templates/user_variables.yml.j25
-rw-r--r--deploy/adapters/ansible/roles/post-openstack/tasks/main.yml1
-rw-r--r--deploy/adapters/ansible/roles/setup-host/tasks/main.yml12
-rw-r--r--deploy/adapters/ansible/roles/setup-infrastructure/tasks/main.yml2
-rw-r--r--deploy/adapters/ansible/roles/setup-openstack/tasks/main.yml2
-rw-r--r--deploy/adapters/cobbler/snippets/kickstart_sysctl.conf1
-rw-r--r--deploy/adapters/cobbler/snippets/preseed_sysctl.conf1
-rw-r--r--deploy/adapters/cobbler/snippets/sysctl.xml1
-rw-r--r--deploy/client.py17
-rwxr-xr-xdeploy/compass_conf/flavor/kubernetes.conf2
-rwxr-xr-xdeploy/compass_conf/package_installer/ansible-kubernetes.conf2
-rwxr-xr-xdeploy/compass_conf/role/kubernetes_ansible.conf7
-rw-r--r--deploy/compass_conf/templates/ansible_installer/kubernetes/vars/ansible-kubernetes.tmpl4
-rwxr-xr-xdeploy/compass_vm.sh20
-rw-r--r--deploy/conf/base.conf1
-rw-r--r--deploy/conf/hardware_environment/huawei-pod1/k8-nosdn-nofeature-ha.yml3
-rw-r--r--deploy/conf/vm_environment/k8-nosdn-nofeature-ha.yml3
-rw-r--r--deploy/config_parse.py1
-rwxr-xr-xdeploy/launch.sh5
-rw-r--r--deploy/status_callback.py2
36 files changed, 402 insertions, 28 deletions
diff --git a/deploy/adapters/ansible/kubernetes/ansible-kubernetes.yml b/deploy/adapters/ansible/kubernetes/ansible-kubernetes.yml
index eb80066e..bfdc8958 100755
--- a/deploy/adapters/ansible/kubernetes/ansible-kubernetes.yml
+++ b/deploy/adapters/ansible/kubernetes/ansible-kubernetes.yml
@@ -25,6 +25,12 @@
roles:
- install-k8s-dependence
+- hosts: ha
+ remote_user: root
+ max_fail_percentage: 0
+ roles:
+ - ha
+
- hosts: localhost
remote_user: root
max_fail_percentage: 0
diff --git a/deploy/adapters/ansible/kubernetes/roles/ha/files/chk_k8s_master.sh b/deploy/adapters/ansible/kubernetes/roles/ha/files/chk_k8s_master.sh
new file mode 100644
index 00000000..62e79b3b
--- /dev/null
+++ b/deploy/adapters/ansible/kubernetes/roles/ha/files/chk_k8s_master.sh
@@ -0,0 +1,9 @@
+#!/bin/bash
+
+count=`ss -tnl | grep 6443 | wc -l`
+
+if [ $count = 0 ]; then
+ exit 1
+else
+ exit 0
+fi
diff --git a/deploy/adapters/ansible/kubernetes/roles/ha/handlers/main.yml b/deploy/adapters/ansible/kubernetes/roles/ha/handlers/main.yml
new file mode 100644
index 00000000..03ed82ec
--- /dev/null
+++ b/deploy/adapters/ansible/kubernetes/roles/ha/handlers/main.yml
@@ -0,0 +1,14 @@
+##############################################################################
+# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+- name: restart haproxy
+ service: name=haproxy state=restarted enabled=yes
+
+- name: restart keepalived
+ service: name=keepalived state=restarted enabled=yes
diff --git a/deploy/adapters/ansible/kubernetes/roles/ha/tasks/main.yml b/deploy/adapters/ansible/kubernetes/roles/ha/tasks/main.yml
new file mode 100644
index 00000000..c7e58376
--- /dev/null
+++ b/deploy/adapters/ansible/kubernetes/roles/ha/tasks/main.yml
@@ -0,0 +1,83 @@
+##############################################################################
+# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+- include_vars: "{{ ansible_os_family }}.yml"
+
+- name: install keepalived haproxy
+ action: "{{ ansible_pkg_mgr }} name={{ item }} state=present"
+ with_items: "{{ packages | union(packages_noarch) }}"
+
+- name: generate ha service list
+ lineinfile: dest=/opt/service create=yes line= '{{ item }}'
+ with_items: "{{ services | union(services_noarch) }}"
+
+- name: install pexpect
+ pip: name=pexpect state=present extra_args='--pre'
+
+- name: activate ip_nonlocal_bind
+ sysctl: name=net.ipv4.ip_nonlocal_bind value=1
+ state=present reload=yes
+
+- name: set net.ipv4.tcp_keepalive_intvl
+ sysctl: name=net.ipv4.tcp_keepalive_intvl value=1
+ state=present reload=yes
+
+- name: set net.ipv4.tcp_keepalive_probes
+ sysctl: name=net.ipv4.tcp_keepalive_probes value=5
+ state=present reload=yes
+
+- name: set net.ipv4.tcp_keepalive_time
+ sysctl: name=net.ipv4.tcp_keepalive_time value=5
+ state=present reload=yes
+
+- name: update haproxy cfg
+ template: src=haproxy.cfg dest=/etc/haproxy/haproxy.cfg
+ notify: restart haproxy
+
+- name: set haproxy enable flag
+ lineinfile: dest=/etc/default/haproxy state=present
+ regexp="ENABLED=*"
+ line="ENABLED=1"
+ notify: restart haproxy
+ when: ansible_os_family == "Debian"
+
+- name: set haproxy log
+ lineinfile: dest=/etc/rsyslog.conf state=present
+ regexp="local0.* /var/log/haproxy.log"
+ line="local0.* /var/log/haproxy.log"
+
+- name: set rsyslog udp module
+ lineinfile: dest=/etc/rsyslog.conf state=present
+ regexp="^#$ModLoad imudp"
+ line="$ModLoad imudp"
+
+- name: set rsyslog udp port
+ lineinfile: dest=/etc/rsyslog.conf state=present
+ regexp="^#$UDPServerRun 514"
+ line="$UDPServerRun 514"
+
+- name: set keepalived start param
+ lineinfile: dest=/etc/default/keepalived state=present
+ regexp="^DAEMON_ARGS=*"
+ line="DAEMON_ARGS=\"-D -d -S 1\""
+ when: ansible_os_family == "Debian"
+
+- name: set keepalived log
+ lineinfile: dest=/etc/rsyslog.conf state=present
+ regexp="local1.* /var/log/keepalived.log"
+ line="local1.* /var/log/keepalived.log"
+
+- name: update keepalived info
+ template: src=keepalived.conf dest=/etc/keepalived/keepalived.conf
+ notify: restart keepalived
+
+- name: restart rsyslog
+ shell: service rsyslog restart
+
+- meta: flush_handlers
diff --git a/deploy/adapters/ansible/kubernetes/roles/ha/templates/haproxy.cfg b/deploy/adapters/ansible/kubernetes/roles/ha/templates/haproxy.cfg
new file mode 100644
index 00000000..5cd240c0
--- /dev/null
+++ b/deploy/adapters/ansible/kubernetes/roles/ha/templates/haproxy.cfg
@@ -0,0 +1,48 @@
+
+global
+ #chroot /var/run/haproxy
+ daemon
+ user haproxy
+ group haproxy
+ maxconn 4000
+ pidfile /var/run/haproxy/haproxy.pid
+ #log 127.0.0.1 local0
+ tune.bufsize 1000000
+ stats socket /var/run/haproxy.sock
+ stats timeout 2m
+
+defaults
+ log global
+ maxconn 8000
+ option redispatch
+ option dontlognull
+ option splice-auto
+ timeout http-request 10s
+ timeout queue 1m
+ timeout connect 10s
+ timeout client 50s
+ timeout server 50s
+ timeout check 10s
+ retries 3
+
+listen kubernetes-apiserver-https
+ bind {{ public_vip.ip }}:8383
+ option ssl-hello-chk
+ mode tcp
+ option tcpka
+ option tcplog
+ timeout client 3h
+ timeout server 3h
+ balance roundrobin
+{% for host,ip in haproxy_hosts.items() %}
+ server {{ host }} {{ ip }}:6443 weight 1 check inter 2000 rise 2 fall 5
+{% endfor %}
+
+listen stats
+ mode http
+ bind 0.0.0.0:9999
+ stats enable
+ stats refresh 30s
+ stats uri /
+ stats realm Global\ statistics
+ stats auth admin:admin
diff --git a/deploy/adapters/ansible/kubernetes/roles/ha/templates/keepalived.conf b/deploy/adapters/ansible/kubernetes/roles/ha/templates/keepalived.conf
new file mode 100644
index 00000000..c649bed5
--- /dev/null
+++ b/deploy/adapters/ansible/kubernetes/roles/ha/templates/keepalived.conf
@@ -0,0 +1,49 @@
+global_defs {
+ router_id {{ inventory_hostname }}
+}
+
+vrrp_sync_group VG1 {
+ group {
+ internal_vip
+ public_vip
+ }
+}
+
+vrrp_instance internal_vip {
+ interface {{ sys_intf_mappings.mgmt.interface }}
+ virtual_router_id {{ vrouter_id_internal }}
+ state BACKUP
+ nopreempt
+ advert_int 1
+ priority {{ 50 + (host_index[inventory_hostname] * 50) }}
+
+ authentication {
+ auth_type PASS
+ auth_pass 1234
+ }
+
+
+ virtual_ipaddress {
+ {{ internal_vip.ip }}/{{ internal_vip.netmask }} dev {{ sys_intf_mappings.mgmt.interface }}
+ }
+}
+
+vrrp_instance public_vip {
+ interface {{ sys_intf_mappings.external.interface }}
+ virtual_router_id {{ vrouter_id_public }}
+ state BACKUP
+ nopreempt
+ advert_int 1
+ priority {{ 50 + (host_index[inventory_hostname] * 50) }}
+
+ authentication {
+ auth_type PASS
+ auth_pass 4321
+ }
+
+ virtual_ipaddress {
+ {{ network_cfg.public_vip.ip }}/{{ network_cfg.public_vip.netmask }} dev {{ sys_intf_mappings.external.interface }}
+ }
+
+}
+
diff --git a/deploy/adapters/ansible/kubernetes/roles/ha/vars/Debian.yml b/deploy/adapters/ansible/kubernetes/roles/ha/vars/Debian.yml
new file mode 100644
index 00000000..b9f46bdf
--- /dev/null
+++ b/deploy/adapters/ansible/kubernetes/roles/ha/vars/Debian.yml
@@ -0,0 +1,11 @@
+##############################################################################
+# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+services: []
+packages: []
diff --git a/deploy/adapters/ansible/kubernetes/roles/ha/vars/RedHat.yml b/deploy/adapters/ansible/kubernetes/roles/ha/vars/RedHat.yml
new file mode 100644
index 00000000..b9f46bdf
--- /dev/null
+++ b/deploy/adapters/ansible/kubernetes/roles/ha/vars/RedHat.yml
@@ -0,0 +1,11 @@
+##############################################################################
+# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+services: []
+packages: []
diff --git a/deploy/adapters/ansible/kubernetes/roles/ha/vars/main.yml b/deploy/adapters/ansible/kubernetes/roles/ha/vars/main.yml
new file mode 100644
index 00000000..77735d1e
--- /dev/null
+++ b/deploy/adapters/ansible/kubernetes/roles/ha/vars/main.yml
@@ -0,0 +1,16 @@
+##############################################################################
+# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+packages_noarch:
+ - keepalived
+ - haproxy
+
+services_noarch:
+ - keepalived
+ - haproxy
diff --git a/deploy/adapters/ansible/kubernetes/roles/install-k8s-dependence/tasks/main.yml b/deploy/adapters/ansible/kubernetes/roles/install-k8s-dependence/tasks/main.yml
index 6487e4ef..e683a3fe 100644
--- a/deploy/adapters/ansible/kubernetes/roles/install-k8s-dependence/tasks/main.yml
+++ b/deploy/adapters/ansible/kubernetes/roles/install-k8s-dependence/tasks/main.yml
@@ -9,6 +9,10 @@
---
- include_vars: "{{ ansible_os_family }}.yml"
+- name: Install yum epel-release
+ command: yum -y install epel-release
+ when: ansible_os_family == 'RedHat' and ansible_distribution_major_version == '7'
+
- name: Install yum packages
yum:
pkg: "{{ item }}"
diff --git a/deploy/adapters/ansible/kubernetes/roles/install-k8s-dependence/vars/Debian.yml b/deploy/adapters/ansible/kubernetes/roles/install-k8s-dependence/vars/Debian.yml
index e016b855..8ced18b4 100644
--- a/deploy/adapters/ansible/kubernetes/roles/install-k8s-dependence/vars/Debian.yml
+++ b/deploy/adapters/ansible/kubernetes/roles/install-k8s-dependence/vars/Debian.yml
@@ -2,6 +2,7 @@
packages:
- ubuntu-cloud-keyring
- python-dev
+ - python-pip
- openvswitch-switch
- openvswitch-switch-dpdk
- python-memcache
diff --git a/deploy/adapters/ansible/kubernetes/roles/install-k8s-dependence/vars/RedHat.yml b/deploy/adapters/ansible/kubernetes/roles/install-k8s-dependence/vars/RedHat.yml
index 3ec18e7f..b7e1d3dc 100644
--- a/deploy/adapters/ansible/kubernetes/roles/install-k8s-dependence/vars/RedHat.yml
+++ b/deploy/adapters/ansible/kubernetes/roles/install-k8s-dependence/vars/RedHat.yml
@@ -1,6 +1,7 @@
---
packages:
- python-devel
+ - python-pip
- gcc
- redhat-lsb-core
- python-crypto
diff --git a/deploy/adapters/ansible/kubernetes/roles/install-k8s-dependence/vars/main.yml b/deploy/adapters/ansible/kubernetes/roles/install-k8s-dependence/vars/main.yml
index 713b6b5f..7158325a 100644
--- a/deploy/adapters/ansible/kubernetes/roles/install-k8s-dependence/vars/main.yml
+++ b/deploy/adapters/ansible/kubernetes/roles/install-k8s-dependence/vars/main.yml
@@ -8,7 +8,6 @@
##############################################################################
---
packages_noarch:
- - python-pip
- ntp
services_noarch: []
diff --git a/deploy/adapters/ansible/kubernetes/roles/kargo/files/openssl.conf.j2 b/deploy/adapters/ansible/kubernetes/roles/kargo/files/openssl.conf.j2
new file mode 100644
index 00000000..d998d4cb
--- /dev/null
+++ b/deploy/adapters/ansible/kubernetes/roles/kargo/files/openssl.conf.j2
@@ -0,0 +1,34 @@
+[req]
+req_extensions = v3_req
+distinguished_name = req_distinguished_name
+[req_distinguished_name]
+[ v3_req ]
+basicConstraints = CA:FALSE
+keyUsage = nonRepudiation, digitalSignature, keyEncipherment
+subjectAltName = @alt_names
+[alt_names]
+DNS.1 = kubernetes
+DNS.2 = kubernetes.default
+DNS.3 = kubernetes.default.svc
+DNS.4 = kubernetes.default.svc.{{ dns_domain }}
+DNS.5 = localhost
+{% for host in groups['kube-master'] %}
+DNS.{{ 5 + loop.index }} = {{ host }}
+{% endfor %}
+{% if loadbalancer_apiserver is defined and apiserver_loadbalancer_domain_name is defined %}
+{% set idx = groups['kube-master'] | length | int + 5 + 1 %}
+DNS.{{ idx | string }} = {{ apiserver_loadbalancer_domain_name }}
+{% endif %}
+{% for host in groups['kube-master'] %}
+IP.{{ 2 * loop.index - 1 }} = {{ hostvars[host]['access_ip'] | default(hostvars[host]['ansible_default_ipv4']['address']) }}
+IP.{{ 2 * loop.index }} = {{ hostvars[host]['ip'] | default(hostvars[host]['ansible_default_ipv4']['address']) }}
+{% endfor %}
+{% set idx = groups['kube-master'] | length | int * 2 + 1 %}
+IP.{{ idx }} = {{ kube_apiserver_ip }}
+IP.{{ idx + 1 }} = 127.0.0.1
+{% if supplementary_addresses_in_ssl_keys is defined %}
+{% set is = idx + 1 %}
+{% for addr in supplementary_addresses_in_ssl_keys %}
+IP.{{ is + loop.index }} = {{ addr }}
+{% endfor %}
+{% endif %}
diff --git a/deploy/adapters/ansible/kubernetes/roles/kargo/tasks/main.yml b/deploy/adapters/ansible/kubernetes/roles/kargo/tasks/main.yml
index 2763e53e..af52ad04 100644
--- a/deploy/adapters/ansible/kubernetes/roles/kargo/tasks/main.yml
+++ b/deploy/adapters/ansible/kubernetes/roles/kargo/tasks/main.yml
@@ -67,7 +67,7 @@
- name: copy inventoriy.json file
copy:
- src: /var/ansible/run/kubernetes-opnfv2/inventories/inventory.json
+ src: "{{ run_dir }}/inventories/inventory.json"
dest: /tmp/inventory.json
tags:
- ansible
@@ -96,6 +96,51 @@
regexp: '^helm_enabled:'
line: 'helm_enabled: {{ helm_flag }}'
+- name: enable external lb | set lb domain_nam
+ lineinfile:
+ dest: /opt/kargo_k8s/inventory/group_vars/all.yml
+ regexp: '^## apiserver_loadbalancer_domain_name:'
+ line: 'apiserver_loadbalancer_domain_name: {{ apiserver_loadbalancer_domain_name }}'
+
+- name: enable external lb |
+ lineinfile:
+ dest: /opt/kargo_k8s/inventory/group_vars/all.yml
+ regexp: '^#loadbalancer_apiserver:'
+ line: 'loadbalancer_apiserver:'
+
+- name: enable external lb | set vip address
+ lineinfile:
+ dest: /opt/kargo_k8s/inventory/group_vars/all.yml
+ regexp: '^# address: 1.2.3.4'
+ line: ' address: {{ vipaddress }}'
+
+- name: enable external lb | set vip port
+ lineinfile:
+ dest: /opt/kargo_k8s/inventory/group_vars/all.yml
+ regexp: '^# port: 1234'
+ line: ' port: {{ exlb_port }}'
+
+- name: enable internal lb
+ lineinfile:
+ dest: /opt/kargo_k8s/inventory/group_vars/all.yml
+ regexp: '^#loadbalancer_apiserver_localhost: true'
+ line: 'loadbalancer_apiserver_localhost: true'
+
+- name: add vip to ssl keys
+ lineinfile:
+ dest: /opt/kargo_k8s/inventory/group_vars/k8s-cluster.yml
+ line: 'supplementary_addresses_in_ssl_keys: [{{ vipaddress }}]'
+
+- name: rm openssl file
+ file:
+ path: /opt/kargo_k8s/roles/kubernetes/secrets/templates/openssl.conf.j2
+ state: absent
+
+- name: copy openssl.conf.j2
+ copy:
+ src: openssl.conf.j2
+ dest: /opt/kargo_k8s/roles/kubernetes/secrets/templates/openssl.conf.j2
+
- name: copy overrided variables
copy:
src: "{{ item }}"
diff --git a/deploy/adapters/ansible/kubernetes/roles/kargo/vars/main.yml b/deploy/adapters/ansible/kubernetes/roles/kargo/vars/main.yml
index 2d396d06..b73056e5 100644
--- a/deploy/adapters/ansible/kubernetes/roles/kargo/vars/main.yml
+++ b/deploy/adapters/ansible/kubernetes/roles/kargo/vars/main.yml
@@ -1,2 +1,5 @@
---
helm_flag: true
+apiserver_loadbalancer_domain_name: "{{ public_vip.ip }}"
+vipaddress: "{{ public_vip.ip }}"
+exlb_port: 8383
diff --git a/deploy/adapters/ansible/roles/config-osa/templates/user_variables.yml.j2 b/deploy/adapters/ansible/roles/config-osa/templates/user_variables.yml.j2
index 130b5ad1..03e3a2af 100644
--- a/deploy/adapters/ansible/roles/config-osa/templates/user_variables.yml.j2
+++ b/deploy/adapters/ansible/roles/config-osa/templates/user_variables.yml.j2
@@ -45,6 +45,11 @@ neutron_plugin_type: ml2.ovs
neutron_ml2_drivers_type: "local,flat,{{ tenant_net_info['type'] }}"
+neutron_plugin_base:
+ - router
+ - metering
+ - trunk
+
neutron_provider_networks:
network_flat_networks: "*"
network_types: "{{ tenant_net_info['type'] }}"
diff --git a/deploy/adapters/ansible/roles/post-openstack/tasks/main.yml b/deploy/adapters/ansible/roles/post-openstack/tasks/main.yml
index 7022a4b5..fb0dc67d 100644
--- a/deploy/adapters/ansible/roles/post-openstack/tasks/main.yml
+++ b/deploy/adapters/ansible/roles/post-openstack/tasks/main.yml
@@ -40,6 +40,7 @@
--subnet-range "{{ public_net_info.floating_ip_cidr }}" \
"{{ public_net_info.subnet }}"
when:
+ - public_net_info.enable == "True"
- inventory_hostname == groups['utility_all'][0]
- name: create openstack flavors
diff --git a/deploy/adapters/ansible/roles/setup-host/tasks/main.yml b/deploy/adapters/ansible/roles/setup-host/tasks/main.yml
index 4eba3d00..0a63f7f5 100644
--- a/deploy/adapters/ansible/roles/setup-host/tasks/main.yml
+++ b/deploy/adapters/ansible/roles/setup-host/tasks/main.yml
@@ -10,7 +10,7 @@
---
- name: openstack-hosts-setup
- shell: "export ANSIBLE_LOG_PATH=/var/ansible/run/openstack_pike-opnfv2/ansible.log; \
+ shell: "export ANSIBLE_LOG_PATH={{ run_dir }}/ansible.log; \
export ANSIBLE_SCP_IF_SSH=y; \
cd /opt/openstack-ansible/playbooks; \
openstack-ansible openstack-hosts-setup.yml \
@@ -25,7 +25,7 @@
when: openstack_hosts_setup_result.stdout.find('Mark openstack-hosts-setup completed') == -1
- name: security-hardening
- shell: "export ANSIBLE_LOG_PATH=/var/ansible/run/openstack_pike-opnfv2/ansible.log; \
+ shell: "export ANSIBLE_LOG_PATH={{ run_dir }}/ansible.log; \
export ANSIBLE_SCP_IF_SSH=y; \
cd /opt/openstack-ansible/playbooks; \
openstack-ansible security-hardening.yml \
@@ -40,7 +40,7 @@
when: security_hardening_result.stdout.find('Mark security-hardening completed') == -1
- name: lxc-hosts-setup
- shell: "export ANSIBLE_LOG_PATH=/var/ansible/run/openstack_pike-opnfv2/ansible.log; \
+ shell: "export ANSIBLE_LOG_PATH={{ run_dir }}/ansible.log; \
export ANSIBLE_SCP_IF_SSH=y; \
cd /opt/openstack-ansible/playbooks; \
openstack-ansible lxc-hosts-setup.yml \
@@ -55,7 +55,7 @@
when: lxc_hosts_setup_result.stdout.find('Mark lxc-hosts-setup completed') == -1
- name: lxc-containers-create
- shell: "export ANSIBLE_LOG_PATH=/var/ansible/run/openstack_pike-opnfv2/ansible.log; \
+ shell: "export ANSIBLE_LOG_PATH={{ run_dir }}/ansible.log; \
export ANSIBLE_SCP_IF_SSH=y; \
cd /opt/openstack-ansible/playbooks; \
openstack-ansible lxc-containers-create.yml \
@@ -66,7 +66,7 @@
register: failed_container
- name: destroy the failed_container
- shell: "export ANSIBLE_LOG_PATH=/var/ansible/run/openstack_pike-opnfv2/ansible.log; \
+ shell: "export ANSIBLE_LOG_PATH={{ run_dir }}/ansible.log; \
export ANSIBLE_SCP_IF_SSH=y; \
cd /opt/openstack-ansible/playbooks; \
openstack-ansible lxc-containers-destroy.yml \
@@ -77,7 +77,7 @@
ignore_errors: "True"
- name: retry to setup failed_container
- shell: "export ANSIBLE_LOG_PATH=/var/ansible/run/openstack_pike-opnfv2/ansible.log; \
+ shell: "export ANSIBLE_LOG_PATH={{ run_dir }}/ansible.log; \
export ANSIBLE_SCP_IF_SSH=y; \
cd /opt/openstack-ansible/playbooks; \
openstack-ansible lxc-containers-create.yml --limit {{item}} \
diff --git a/deploy/adapters/ansible/roles/setup-infrastructure/tasks/main.yml b/deploy/adapters/ansible/roles/setup-infrastructure/tasks/main.yml
index 7cf5c86f..4e3a926f 100644
--- a/deploy/adapters/ansible/roles/setup-infrastructure/tasks/main.yml
+++ b/deploy/adapters/ansible/roles/setup-infrastructure/tasks/main.yml
@@ -8,7 +8,7 @@
##############################################################################
---
- name: setup infrastructure
- shell: "export ANSIBLE_LOG_PATH=/var/ansible/run/openstack_pike-opnfv2/ansible.log; \
+ shell: "export ANSIBLE_LOG_PATH={{ run_dir }}/ansible.log; \
export ANSIBLE_SCP_IF_SSH=y; \
cd /opt/openstack-ansible/playbooks; \
openstack-ansible setup-infrastructure.yml \
diff --git a/deploy/adapters/ansible/roles/setup-openstack/tasks/main.yml b/deploy/adapters/ansible/roles/setup-openstack/tasks/main.yml
index a6ecb82f..c572936d 100644
--- a/deploy/adapters/ansible/roles/setup-openstack/tasks/main.yml
+++ b/deploy/adapters/ansible/roles/setup-openstack/tasks/main.yml
@@ -8,7 +8,7 @@
##############################################################################
---
- name: setup openstack
- shell: "export ANSIBLE_LOG_PATH=/var/ansible/run/openstack_pike-opnfv2/ansible.log; \
+ shell: "export ANSIBLE_LOG_PATH={{ run_dir }}/ansible.log; \
export ANSIBLE_SCP_IF_SSH=y; \
cd /opt/openstack-ansible/playbooks; \
openstack-ansible setup-openstack.yml \
diff --git a/deploy/adapters/cobbler/snippets/kickstart_sysctl.conf b/deploy/adapters/cobbler/snippets/kickstart_sysctl.conf
index c227ecfa..112f010d 100644
--- a/deploy/adapters/cobbler/snippets/kickstart_sysctl.conf
+++ b/deploy/adapters/cobbler/snippets/kickstart_sysctl.conf
@@ -59,7 +59,6 @@ net.ipv4.ip_local_port_range = 15000 61000
net.ipv4.tcp_fin_timeout=30
# fast cycling of sockets in time_wait state and re-using them
-net.ipv4.tcp_tw_recycle = 1
net.ipv4.tcp_tw_reuse = 1
# increase the maximum number of requests queued to a listen socket
diff --git a/deploy/adapters/cobbler/snippets/preseed_sysctl.conf b/deploy/adapters/cobbler/snippets/preseed_sysctl.conf
index c227ecfa..112f010d 100644
--- a/deploy/adapters/cobbler/snippets/preseed_sysctl.conf
+++ b/deploy/adapters/cobbler/snippets/preseed_sysctl.conf
@@ -59,7 +59,6 @@ net.ipv4.ip_local_port_range = 15000 61000
net.ipv4.tcp_fin_timeout=30
# fast cycling of sockets in time_wait state and re-using them
-net.ipv4.tcp_tw_recycle = 1
net.ipv4.tcp_tw_reuse = 1
# increase the maximum number of requests queued to a listen socket
diff --git a/deploy/adapters/cobbler/snippets/sysctl.xml b/deploy/adapters/cobbler/snippets/sysctl.xml
index fe13bf7f..42f95976 100644
--- a/deploy/adapters/cobbler/snippets/sysctl.xml
+++ b/deploy/adapters/cobbler/snippets/sysctl.xml
@@ -62,7 +62,6 @@ net.ipv4.ip_local_port_range = 15000 61000
net.ipv4.tcp_fin_timeout=30
# fast cycling of sockets in time_wait state and re-using them
-net.ipv4.tcp_tw_recycle = 1
net.ipv4.tcp_tw_reuse = 1
# increase the maximum number of requests queued to a listen socket
diff --git a/deploy/client.py b/deploy/client.py
index 910aa7f8..891e632c 100644
--- a/deploy/client.py
+++ b/deploy/client.py
@@ -399,17 +399,20 @@ class CompassClient(object):
except:
raise RuntimeError('subnet %s format is invalid' % subnet)
- if CONF.expansion == "false":
+ subnet_exist = False
+ for subnet_in_db in subnets_in_db:
+ if subnet == subnet_in_db['subnet']:
+ subnet_mapping[subnet] = subnet_in_db['id']
+ subnet_exist = True
+ break
+
+ if not subnet_exist:
status, resp = self.client.add_subnet(subnet)
LOG.info('add subnet %s status %s response %s',
subnet, status, resp)
if not self.is_ok(status):
raise RuntimeError('failed to add subnet %s' % subnet)
subnet_mapping[resp['subnet']] = resp['id']
- else:
- for subnet_in_db in subnets_in_db:
- if subnet == subnet_in_db['subnet']:
- subnet_mapping[subnet] = subnet_in_db['id']
self.subnet_mapping = subnet_mapping
@@ -475,8 +478,8 @@ class CompassClient(object):
if host['hostname'] in hostnames:
self.host_mapping[host['hostname']] = host['id']
- if CONF.expansion == "false":
- assert(len(self.host_mapping) == len(machines))
+ # if CONF.expansion == "false":
+ # assert(len(self.host_mapping) == len(machines))
def set_cluster_os_config(self, cluster_id):
"""set cluster os config."""
diff --git a/deploy/compass_conf/flavor/kubernetes.conf b/deploy/compass_conf/flavor/kubernetes.conf
index 35c43155..71acadff 100755
--- a/deploy/compass_conf/flavor/kubernetes.conf
+++ b/deploy/compass_conf/flavor/kubernetes.conf
@@ -4,7 +4,7 @@ FLAVORS = [{
'display_name': 'ansible-kubernetes',
'template': 'ansible-kubernetes.tmpl',
'roles': [
- 'kube_master', 'etcd', 'kube_node'
+ 'kube_master', 'etcd', 'kube_node', 'ha'
],
}]
diff --git a/deploy/compass_conf/package_installer/ansible-kubernetes.conf b/deploy/compass_conf/package_installer/ansible-kubernetes.conf
index 32590c82..820691b7 100755
--- a/deploy/compass_conf/package_installer/ansible-kubernetes.conf
+++ b/deploy/compass_conf/package_installer/ansible-kubernetes.conf
@@ -7,7 +7,7 @@ SETTINGS = {
'playbook_file': 'site.yml',
'inventory_file': 'inventory.py',
'inventory_json_file': 'inventory.json',
- 'inventory_group': ['kube_master', 'etcd', 'kube_node'],
+ 'inventory_group': ['kube_master', 'etcd', 'kube_node', 'ha'],
'group_variable': 'all',
'etc_hosts_path': 'roles/pre-k8s/templates/hosts',
'runner_dirs': ['roles','kubernetes/roles']
diff --git a/deploy/compass_conf/role/kubernetes_ansible.conf b/deploy/compass_conf/role/kubernetes_ansible.conf
index ae096f47..c27779ad 100755
--- a/deploy/compass_conf/role/kubernetes_ansible.conf
+++ b/deploy/compass_conf/role/kubernetes_ansible.conf
@@ -11,5 +11,10 @@ ROLES = [{
'role': 'kube_node',
'display_name': 'kube node',
'description': 'kube Node'
-}
+}, {
+ 'role': 'ha',
+ 'display_name': 'ha',
+ 'description': 'ha'
+}
+
]
diff --git a/deploy/compass_conf/templates/ansible_installer/kubernetes/vars/ansible-kubernetes.tmpl b/deploy/compass_conf/templates/ansible_installer/kubernetes/vars/ansible-kubernetes.tmpl
index 440bf7d7..f132365a 100644
--- a/deploy/compass_conf/templates/ansible_installer/kubernetes/vars/ansible-kubernetes.tmpl
+++ b/deploy/compass_conf/templates/ansible_installer/kubernetes/vars/ansible-kubernetes.tmpl
@@ -23,6 +23,8 @@
#set kube_masters = $getVar('kube_master', [])
#set kube_nodes = $getVar('kube_node', [])
+run_dir: $getVar('run_dir', '')
+
enable_secgroup: $getVar('enable_secgroup', True)
enable_fwaas: $getVar('enable_fwaas', True)
enable_vpnaas: $getVar('enable_vpnaas', True)
@@ -82,7 +84,7 @@ dashboard_host: "{{ internal_ip }}"
haproxy_hosts:
#for $item in $has
#set $hostname=$item["hostname"]
- $hostname: $ip_settings[$hostname]["mgmt"]["ip"]
+ $hostname: $ip_settings[$hostname]["external"]["ip"]
#end for
host_index:
diff --git a/deploy/compass_vm.sh b/deploy/compass_vm.sh
index 7689f41c..cf215f3b 100755
--- a/deploy/compass_vm.sh
+++ b/deploy/compass_vm.sh
@@ -10,6 +10,26 @@
compass_vm_dir=$WORK_DIR/vm/compass
rsa_file=$compass_vm_dir/boot.rsa
ssh_args="-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i $rsa_file"
+
+function check_container_alive() {
+ docker exec -it compass-deck bash -c "exit" 1>/dev/null 2>&1
+ local deck_state=$?
+ docker exec -it compass-tasks bash -c "exit" 1>/dev/null 2>&1
+ local tasks_state=$?
+ docker exec -it compass-cobbler bash -c "exit" 1>/dev/null 2>&1
+ local cobbler_state=$?
+ docker exec -it compass-db bash -c "exit" 1>/dev/null 2>&1
+ local db_state=$?
+ docker exec -it compass-mq bash -c "exit" 1>/dev/null 2>&1
+ local mq_state=$?
+
+ if [ $((deck_state||tasks_state||cobbler_state||db_state||mq-state)) == 0 ]; then
+ echo "true"
+ else
+ echo "false"
+ fi
+}
+
function tear_down_compass() {
sudo virsh destroy compass > /dev/null 2>&1
sudo virsh undefine compass > /dev/null 2>&1
diff --git a/deploy/conf/base.conf b/deploy/conf/base.conf
index c2bf5291..5395405d 100644
--- a/deploy/conf/base.conf
+++ b/deploy/conf/base.conf
@@ -8,7 +8,6 @@ export EXT_NAT_GW=${EXT_NAT_GW:-192.16.1.1}
export EXT_NAT_IP_START=${EXT_NAT_IP_START:-192.16.1.3}
export EXT_NAT_IP_END=${EXT_NAT_IP_END:-192.16.1.254}
export EXTERNAL_NIC=${EXTERNAL_NIC:-eth0}
-export CLUSTER_NAME="opnfv2"
export DOMAIN="ods.com"
export PARTITIONS="/=30%,/home=5%,/tmp=5%,/var=60%"
export SUBNETS="10.1.0.0/24,172.16.2.0/24,172.16.3.0/24,172.16.4.0/24"
diff --git a/deploy/conf/hardware_environment/huawei-pod1/k8-nosdn-nofeature-ha.yml b/deploy/conf/hardware_environment/huawei-pod1/k8-nosdn-nofeature-ha.yml
index 995d0107..2cedcf4d 100644
--- a/deploy/conf/hardware_environment/huawei-pod1/k8-nosdn-nofeature-ha.yml
+++ b/deploy/conf/hardware_environment/huawei-pod1/k8-nosdn-nofeature-ha.yml
@@ -25,6 +25,7 @@ hosts:
roles:
- kube_master
- etcd
+ - ha
- name: host2
mac: 'D8:49:0B:DA:5A:B7'
@@ -35,6 +36,7 @@ hosts:
roles:
- kube_master
- etcd
+ - ha
- name: host3
mac: '78:D7:52:A0:B1:99'
@@ -45,6 +47,7 @@ hosts:
roles:
- kube_master
- etcd
+ - ha
- name: host4
mac: 'D8:49:0B:DA:5B:5D'
diff --git a/deploy/conf/vm_environment/k8-nosdn-nofeature-ha.yml b/deploy/conf/vm_environment/k8-nosdn-nofeature-ha.yml
index 003f41be..42262057 100644
--- a/deploy/conf/vm_environment/k8-nosdn-nofeature-ha.yml
+++ b/deploy/conf/vm_environment/k8-nosdn-nofeature-ha.yml
@@ -16,16 +16,19 @@ hosts:
roles:
- kube_master
- etcd
+ - ha
- name: host2
roles:
- kube_master
- etcd
+ - ha
- name: host3
roles:
- kube_master
- etcd
+ - ha
- name: host4
roles:
diff --git a/deploy/config_parse.py b/deploy/config_parse.py
index 3d8dedc5..1575ca37 100644
--- a/deploy/config_parse.py
+++ b/deploy/config_parse.py
@@ -104,6 +104,7 @@ def export_dha_file(dha, dha_file, ofile):
plugin_list.append(plugin_str)
env.update({'plugins': ','.join(plugin_list)})
+ env.update({'CLUSTER_NAME': dha.get('NAME', "opnfv")})
env.update({'TYPE': dha.get('TYPE', "virtual")})
env.update({'FLAVOR': dha.get('FLAVOR', "cluster")})
env.update({'HOSTNAMES': hostnames(dha, ',')})
diff --git a/deploy/launch.sh b/deploy/launch.sh
index 6cbad1fa..98d9e4d6 100755
--- a/deploy/launch.sh
+++ b/deploy/launch.sh
@@ -55,7 +55,8 @@ if [[ "$EXPANSION" == "false" ]]; then
export machines
- if [[ "$DEPLOY_COMPASS" == "true" ]]; then
+ CONTAINER_ALIVE=$(check_container_alive)
+ if [[ "$DEPLOY_COMPASS" == "true" && "$CONTAINER_ALIVE" == "false" ]]; then
if ! prepare_env;then
echo "prepare_env failed"
exit 1
@@ -71,7 +72,7 @@ if [[ "$EXPANSION" == "false" ]]; then
log_error "launch_compass failed"
exit 1
fi
- else
+ elif [[ "$DEPLOY_COMPASS" == "true" && "$CONTAINER_ALIVE" == "true" ]]; then
refresh_compass_core
fi
else
diff --git a/deploy/status_callback.py b/deploy/status_callback.py
index f0615f3f..6169b87f 100644
--- a/deploy/status_callback.py
+++ b/deploy/status_callback.py
@@ -8,7 +8,7 @@
##############################################################################
import httplib
-import json
+import simplejson as json
import sys # noqa:F401
from ansible.plugins.callback import CallbackBase