aboutsummaryrefslogtreecommitdiffstats
path: root/mcp/config/states
diff options
context:
space:
mode:
Diffstat (limited to 'mcp/config/states')
-rwxr-xr-xmcp/config/states/akraino_iec62
-rwxr-xr-xmcp/config/states/baremetal_init35
-rwxr-xr-xmcp/config/states/kubernetes39
-rwxr-xr-xmcp/config/states/maas86
-rwxr-xr-xmcp/config/states/networks3
-rwxr-xr-xmcp/config/states/onap65
-rwxr-xr-xmcp/config/states/opendaylight8
-rwxr-xr-xmcp/config/states/openstack_ha71
-rwxr-xr-xmcp/config/states/openstack_noha44
-rwxr-xr-xmcp/config/states/quagga (renamed from mcp/config/states/dpdk)8
-rwxr-xr-xmcp/config/states/tacker12
-rwxr-xr-xmcp/config/states/virtual_control_plane19
-rwxr-xr-xmcp/config/states/virtual_init43
13 files changed, 342 insertions, 153 deletions
diff --git a/mcp/config/states/akraino_iec b/mcp/config/states/akraino_iec
new file mode 100755
index 000000000..efe0d4df0
--- /dev/null
+++ b/mcp/config/states/akraino_iec
@@ -0,0 +1,62 @@
+#!/bin/bash -e
+# shellcheck disable=SC1090
+##############################################################################
+# Copyright (c) 2019 Mirantis Inc., Enea AB and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+CI_DEBUG=${CI_DEBUG:-0}; [[ "${CI_DEBUG}" =~ (false|0) ]] || set -x
+SCRIPTS_DIR=$(dirname "${BASH_SOURCE[0]}")/../../scripts
+
+source "${SCRIPTS_DIR}/lib.sh"
+source "${SCRIPTS_DIR}/xdf_data.sh"
+source "${SCRIPTS_DIR}/globals.sh"
+
+IEC_REPO_URI='https://gerrit.akraino.org/r/iec'
+IEC_USER_L='ubuntu'
+# shellcheck disable=SC2154
+if [[ "${base_image}" =~ centos ]]; then
+ IEC_USER='centos'
+else
+ IEC_USER=${IEC_USER_L}
+fi
+IEC_REPO_PATH=/var/lib/akraino/iec
+IEC_SCRIPTS_PATH="${IEC_REPO_PATH}/src/foundation/scripts"
+
+POD_NETWORK_CIDR='100.100.0.0/16' # Avoid overlapping Fuel's PXE/admin net
+
+# shellcheck disable=SC2174
+mkdir -p -m 777 "$(dirname ${IEC_REPO_PATH})"
+[ -e "${IEC_REPO_PATH}" ] || su - "${IEC_USER_L}" -c \
+ "git clone '${IEC_REPO_URI}' '${IEC_REPO_PATH}'"
+# shellcheck disable=SC2086
+wait_for 3.0 "! salt-cp 'iec*' -C '${IEC_REPO_PATH}/' \
+ '$(dirname ${IEC_REPO_PATH})' | grep -e False"
+salt -C 'iec*' cmd.run "chown -R ${IEC_USER}:${IEC_USER} ${IEC_REPO_PATH}"
+
+salt -C 'iec*' cmd.run runas="${IEC_USER}" "${IEC_SCRIPTS_PATH}/k8s_common.sh"
+
+IEC_MASTER_IP=$(salt --out txt -C 'iec* and *01*' pillar.get \
+ _param:single_address | cut -d ' ' -f2)
+salt -C 'iec* and *01*' cmd.run runas="${IEC_USER}" \
+ stdin="$(sed -z 's/\n/\\n/g' "${SCRIPTS_DIR}/$(basename "${SSH_KEY}")")" \
+ "mkdir -p .ssh && touch .ssh/id_rsa && chmod 600 .ssh/id_rsa && \
+ cat > .ssh/id_rsa && \
+ ${IEC_SCRIPTS_PATH}/k8s_master.sh ${IEC_MASTER_IP} ${POD_NETWORK_CIDR}"
+
+KUBE_NODE_CNT=$(salt --out txt -C 'iec* and *01*' cmd.run runas="${IEC_USER}" \
+ 'kubectl get nodes | grep -c -e "^iec"' | cut -d ' ' -f2)
+if [ "${KUBE_NODE_CNT}" != "$(salt-key | grep -c -e '^iec')" ]; then
+ KUBE_JOIN_CMD=$(salt --out txt -C 'iec* and *01*' cmd.run runas="${IEC_USER}" \
+ 'kubeadm token create --print-join-command' | cut -d ' ' -f2-)
+ salt -C 'iec* and not *01*' cmd.run "${KUBE_JOIN_CMD}"
+fi
+salt -C 'iec* and *01*' cmd.run runas="${IEC_USER}" 'kubectl get nodes'
+
+salt -C 'iec* and *01*' cmd.run runas="${IEC_USER}" \
+ "${IEC_SCRIPTS_PATH}/setup-cni.sh '' ${POD_NETWORK_CIDR}"
+salt -C 'iec* and *01*' cmd.run runas="${IEC_USER}" "${IEC_SCRIPTS_PATH}/nginx.sh"
+salt -C 'iec* and *01*' cmd.run runas="${IEC_USER}" "${IEC_SCRIPTS_PATH}/helm.sh"
diff --git a/mcp/config/states/baremetal_init b/mcp/config/states/baremetal_init
index 891eae365..dcedfbeda 100755
--- a/mcp/config/states/baremetal_init
+++ b/mcp/config/states/baremetal_init
@@ -11,32 +11,31 @@ CI_DEBUG=${CI_DEBUG:-0}; [[ "${CI_DEBUG}" =~ (false|0) ]] || set -x
# shellcheck disable=SC1090
source "$(dirname "${BASH_SOURCE[0]}")/../../scripts/lib.sh"
+source "$(dirname "${BASH_SOURCE[0]}")/../../scripts/xdf_data.sh"
+
+cluster_nodes_query="${control_nodes_query} or cmp*"
# KVM, compute node prereqs
# patch the networking module for Debian based distros
debian_ip_source=/usr/lib/python2.7/dist-packages/salt/modules/debian_ip.py
-salt -C 'kvm* or cmp*' file.line $debian_ip_source \
+salt -C "${cluster_nodes_query}" file.line $debian_ip_source \
content='iface = iface.lower()' mode='delete'
-salt -C 'kvm* or cmp*' file.replace $debian_ip_source \
+salt -C "${cluster_nodes_query}" file.replace $debian_ip_source \
pattern="^\s{8}__salt__\['pkg.install'\]\('vlan'\)" \
repl="\n if not __salt__['pkg.version']('vlan'):\n __salt__['pkg.install']('vlan')"
-salt -C 'kvm*' pkg.install bridge-utils
-salt -C 'kvm*' state.apply linux.network,linux.system.kernel
-salt -C 'kvm* or cmp*' state.apply salt.minion
-wait_for 5.0 "salt -C 'cmp*' state.apply linux.system"
-# wrap distro `route` binary to silence errors when route already exists
-wait_for 5.0 "salt -C 'kvm* or cmp*' state.apply opnfv.route_wrapper"
-salt -C 'cmp*' state.apply linux.network || true
-wait_for 30.0 "salt -C 'kvm* or cmp*' test.ping"
-
-# disable dhcp offered routes on compute nodes
-salt -C 'cmp*' file.write /etc/dhcp/dhclient-enter-hooks.d/no-default-route \
- args='unset new_routers'
+salt -C "${cluster_nodes_query}" state.apply linux.system.repo
+salt -C "${cluster_nodes_query}" pkg.install force_yes=true bridge-utils,python-jinja2
+salt -C "${cluster_nodes_query}" service.restart salt-minion
+wait_for 5.0 "salt -C '${cluster_nodes_query}' state.apply salt.minion"
+salt -C "${cluster_nodes_query}" file.remove /etc/resolv.conf
+salt -C "${cluster_nodes_query}" file.touch /etc/resolv.conf
+wait_for 5.0 "salt -C '${cluster_nodes_query}' state.apply linux,ntp"
+wait_for 30.0 "salt -C '${cluster_nodes_query}' test.ping"
-salt -C 'kvm* or cmp*' system.reboot
-wait_for 90.0 "salt -C 'kvm* or cmp*' test.ping"
+salt -C "${cluster_nodes_query}" cmd.run 'reboot'
+wait_for 90.0 "salt -C '${cluster_nodes_query}' test.ping"
-salt -C 'kvm* or cmp*' state.apply linux,ntp
-salt -C 'kvm* or cmp*' pkg.upgrade refresh=False
+salt -C "${cluster_nodes_query}" state.apply linux,ntp
+salt -C "${cluster_nodes_query}" pkg.upgrade refresh=False dist_upgrade=True
diff --git a/mcp/config/states/kubernetes b/mcp/config/states/kubernetes
new file mode 100755
index 000000000..0894b10a6
--- /dev/null
+++ b/mcp/config/states/kubernetes
@@ -0,0 +1,39 @@
+#!/bin/bash -e
+##############################################################################
+# Copyright (c) 2019 Mirantis Inc., Enea AB and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+CI_DEBUG=${CI_DEBUG:-0}; [[ "${CI_DEBUG}" =~ (false|0) ]] || set -x
+
+# Create and distribute SSL certificates for services
+salt-call state.sls salt.minion
+
+# Install etcd
+salt -I 'etcd:server' state.sls etcd.server.service
+salt -I 'etcd:server' cmd.run ". /var/lib/etcd/configenv && etcdctl cluster-health"
+
+# Install Kubernetes and Calico
+salt -I 'kubernetes:master' state.sls kubernetes.master.kube-addons
+salt -I 'kubernetes:pool' state.sls kubernetes.pool
+salt -I 'kubernetes:pool' cmd.run "calicoctl node status"
+salt -I 'kubernetes:pool' cmd.run "calicoctl get ippool"
+
+# Setup NAT for Calico
+salt -I 'kubernetes:master' state.sls etcd.server.setup
+
+# Run whole master to check consistency
+salt -I 'kubernetes:master' state.sls kubernetes exclude=kubernetes.master.setup
+
+# Register addons
+salt -I 'kubernetes:master' state.sls kubernetes.master.setup
+
+# Upload config
+K8S_CONFIG=kubernetes.config
+K8S_HOST_ID=$(salt -I 'kubernetes:master' --out=yaml cp.push \
+ /etc/kubernetes/admin-kube-config \
+ upload_path="$K8S_CONFIG" | cut -d':' -f1)
+cd /opt && ln -sf "/var/cache/salt/master/minions/${K8S_HOST_ID}/files/${K8S_CONFIG}"
diff --git a/mcp/config/states/maas b/mcp/config/states/maas
index 10cfd01bf..28ef4cae0 100755
--- a/mcp/config/states/maas
+++ b/mcp/config/states/maas
@@ -1,7 +1,7 @@
#!/bin/bash -e
-# shellcheck disable=SC1090,SC2155
+# shellcheck disable=SC1090
##############################################################################
-# Copyright (c) 2017 Mirantis Inc., Enea AB and others.
+# Copyright (c) 2018 Mirantis Inc., Enea AB and others.
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
@@ -17,78 +17,18 @@ source "$(dirname "${BASH_SOURCE[0]}")/../../scripts/lib.sh"
bm_nodes=$(salt --out yaml 'mas01*' pillar.get maas:region:machines | \
awk '/^\s+\w+[[:digit:]]+:$/ {gsub(/:$/, "*"); printf "%s ", $1}')
-# Wait for MaaS commissioning/deploy to finish, retry on failure
-function maas_fixup() {
- local statuscmd="salt 'mas01*' --out yaml state.apply maas.machines.status"
- local ncount=$(salt --out yaml 'mas01*' pillar.get maas:region:machines | \
- grep -cE '^\s{2}\w+:$')
-
- # wait_for has 10sec timeout * 96 = 16 min > 15min for Failed state
- wait_for 96 "${statuscmd} | tee /dev/stderr | " \
- "grep -Eq '((Deployed|Ready): ${ncount}|status: (Failed|Allocated))'"
- local statusout=$(eval "${statuscmd}")
-
- local fcnodes=$(echo "${statusout}" | \
- grep -Pzo 'status: Failed commissioning\n\s+system_id: \K.+\n')
- local ftnodes=$(echo "${statusout}" | \
- grep -Pzo 'status: Failed testing\n\s+system_id: \K.+\n')
- for node_system_id in ${fcnodes}; do
- salt -C 'mas01*' state.apply maas.machines.delete \
- pillar="{'system_id': '${node_system_id}'}"
- sleep 10
- done
- for node_system_id in ${ftnodes}; do
- salt -C 'mas01*' state.apply maas.machines.override_failed_testing \
- pillar="{'system_id': '${node_system_id}'}"
- sleep 10
- done
- if [ -n "${fcnodes}" ] || [ -n "${ftnodes}" ]; then
- salt -C 'mas01*' state.apply maas.machines
- return 1
- fi
-
- local fdnodes=$(echo "${statusout}" | \
- grep -Pzo 'status: (Failed deployment|Allocated)\n\s+system_id: \K.+\n')
- local rnodes=$(echo "${statusout}" | \
- grep -Pzo 'status: Ready\n\s+system_id: \K.+\n')
- for node_system_id in ${fdnodes}; do
- salt -C 'mas01*' state.apply maas.machines.mark_broken_fixed \
- pillar="{'system_id': '${node_system_id}'}"
- sleep 10
- done
- if [ -n "${fdnodes}" ] || [ -n "${rnodes}" ]; then
- for node_system_id in ${fdnodes} ${rnodes}; do
- # For now, we allocate 30GB (fixed) for / on cmp nodes
- local node_hostname=$(echo "${statusout}" | \
- grep -Pzo 'hostname: \K.+(?=\n.+\n\s+system_id: '"${node_system_id}"')')
- if [[ "${node_hostname}" =~ ^cmp ]]; then
- salt -C 'mas01*' state.apply maas.machines.set_storage_layout \
- pillar="{'system_id': '${node_system_id}', 'lv_size': '32212254720'}"
- sleep 10
- fi
- done
- salt -C 'mas01*' state.apply maas.machines.deploy
- return 1
- fi
-
- return 0
-}
+wait_for 60.0 "salt --out yaml -C 'mas01*' service.status maas-fixup | fgrep -q 'false'"
# Optionally destroy MaaS machines from a previous run
if [ "${ERASE_ENV}" -gt 1 ]; then
- set +e; dnodes=$(salt 'mas01*' --out yaml state.apply maas.machines.status | \
- grep -Pzo '\s+system_id: \K.+\n'); set -e
cleanup_uefi
- for node_system_id in ${dnodes}; do
- salt -C 'mas01*' state.apply maas.machines.delete \
- pillar="{'system_id': '${node_system_id}'}"
- sleep 10
+ for node_hostname in ${bm_nodes//\*/}; do
+ salt -C 'mas01*' maasng.delete_machine "${node_hostname}" || true
done
fi
# MaaS rack/region controller, node commissioning
-salt -C 'mas01*' state.apply linux,salt,openssh,ntp
-salt -C 'mas01*' state.apply maas.pxe_nat
+wait_for 10.0 "salt -C 'mas01*' state.apply salt,iptables"
salt -C 'mas01*' state.apply maas.cluster
wait_for 10 "salt -C 'mas01*' state.apply maas.region"
@@ -103,13 +43,15 @@ salt-key --out yaml | awk '!/^(minions|- cfg01|- mas01)/ {print $2}' | \
xargs --no-run-if-empty -I{} salt-key -yd {}
# MaaS node deployment
-wait_for 10 maas_fixup
-
-salt -C 'mas01*' pillar.item\
- maas:region:admin:username \
- maas:region:admin:password
+if [ -n "${bm_nodes}" ]; then
+ notify "[NOTE] MaaS operations might take a long time, please be patient" 2
+ salt -C 'mas01*' state.apply maas.machines.wait_for_ready_or_deployed
+ salt -C 'mas01*' state.apply maas.machines.storage
+ salt -C 'mas01*' state.apply maas.machines.deploy
+ salt -C 'mas01*' state.apply maas.machines.wait_for_deployed
+fi
# Check all baremetal nodes are available
-wait_for 5.0 "(for n in ${bm_nodes}; do salt \${n} test.ping 2>/dev/null || exit; done)"
+wait_for 10.0 "(for n in ${bm_nodes}; do salt \${n} test.ping 2>/dev/null || exit; done)"
wait_for 10.0 "salt -C '* and not cfg01* and not mas01*' saltutil.sync_all"
diff --git a/mcp/config/states/networks b/mcp/config/states/networks
index 222a0726d..e9a9f5190 100755
--- a/mcp/config/states/networks
+++ b/mcp/config/states/networks
@@ -35,3 +35,6 @@ salt -C 'I@nova:controller and *01*' cmd.run ". /root/keystonercv3; \
openstack subnet create --gateway ${PUBLIC_NET_GATEWAY} --no-dhcp \
--allocation-pool start=${POOL_START_IP},end=${POOL_END_IP} \
--network floating_net --subnet-range ${PUBLIC_NET} floating_subnet"
+
+# Discover compute hosts after they are registered
+salt -C 'I@nova:controller and *01*' state.sls_id nova_controller_discover_hosts nova
diff --git a/mcp/config/states/onap b/mcp/config/states/onap
new file mode 100755
index 000000000..d196074d9
--- /dev/null
+++ b/mcp/config/states/onap
@@ -0,0 +1,65 @@
+#!/bin/bash -e
+##############################################################################
+# Copyright (c) 2018 Tieto
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+# Deploy ONAP on top of OPNFV installed by Fuel/MCP
+# ONAP installation is managed by OPNFV Auto project
+
+AUTO_INSTALL_DIR=/opt/auto
+AUTO_REPO='https://gerrit.opnfv.org/gerrit/auto'
+ONAP_INSTALL_SCRIPT='ci/deploy-onap-fuel.sh'
+
+echo "Clone Auto Repo"
+salt -C 'I@nova:controller and *01*' cmd.run "\
+ rm -rf $AUTO_INSTALL_DIR; \
+ git clone $AUTO_REPO $AUTO_INSTALL_DIR"
+
+echo "ONAP installation starts at $(date)"
+echo "It can take several hours to finish."
+
+# detect compute HW configuration, i.e. minimal values available across
+# all compute nodes
+CMP_COUNT=$(salt -C 'I@nova:compute' grains.get id --out txt | wc -l)
+CMP_MIN_MEM=$(salt -C 'I@nova:compute' grains.get mem_total --out txt |\
+ sed -re 's/^[^:]+: ([0-9]+)$/\1/g' | sort -n | head -n1)
+CMP_MIN_CPUS=$(salt -C 'I@nova:compute' grains.get num_cpus --out txt |\
+ sed -re 's/^[^:]+: ([0-9]+)$/\1/g' | sort -n | head -n1)
+# check disk size for storage of instances; if shared storage is mounted,
+# then return its size, otherwise sum up avalable space of root disk of all
+# compute nodes
+STORAGE_PATH='/var/lib/nova/instances'
+MOUNT_COUNT=$(salt "cmp*" mount.is_mounted $STORAGE_PATH --out txt |\
+ grep True | wc -l)
+if [ $MOUNT_COUNT -eq $CMP_COUNT ] ; then
+ CMP_STORAGE_TOTAL=$(salt "cmp*" cmd.run "df -BGB $STORAGE_PATH" --out txt |\
+ grep "$STORAGE_PATH" |\
+ sed -re 's/^.* +([0-9]+)GB +([0-9]+GB +){2}.*$/\1/g' |\
+ sort -n | head -n1)
+else
+ CMP_STORAGE_TOTAL=0
+ for STORAGE in $(salt "cmp*" cmd.run "df -BGB /" --out txt | grep '/$' |\
+ sed -re 's/^.* +([0-9]+GB +){2}([0-9]+)GB +.*$/\2/g') ; do
+ CMP_STORAGE_TOTAL=$(($CMP_STORAGE_TOTAL+$STORAGE));
+ done
+fi
+
+# Deploy ONAP with detected configuration
+# execute installation from the 1st controller node
+CTL01=$(salt -C 'I@nova:controller and *01*' grains.get id --out txt |\
+ head -n1 | cut -d':' -f1)
+ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no \
+ -i /root/fuel/mcp/scripts/mcp.rsa -l ubuntu $CTL01 "bash -s" <<COMMANDS
+ sudo -i
+ source /root/keystonercv3
+ cd $AUTO_INSTALL_DIR
+ export CMP_COUNT=$CMP_COUNT
+ export CMP_MIN_MEM=$CMP_MIN_MEM
+ export CMP_MIN_CPUS=$CMP_MIN_CPUS
+ export CMP_STORAGE_TOTAL=$CMP_STORAGE_TOTAL
+ export AUTO_INSTALL_DIR=$AUTO_INSTALL_DIR
+ $ONAP_INSTALL_SCRIPT | tee $AUTO_INSTALL_DIR/auto_deploy.log
+COMMANDS
diff --git a/mcp/config/states/opendaylight b/mcp/config/states/opendaylight
index de15d0cef..ae8b4cc92 100755
--- a/mcp/config/states/opendaylight
+++ b/mcp/config/states/opendaylight
@@ -14,8 +14,14 @@ source "$(dirname "${BASH_SOURCE[0]}")/../../scripts/lib.sh"
# Get OpenDaylight server options with prefix odl_
function odl() {
- salt --out txt -I 'opendaylight:server' pillar.get "opendaylight:server:odl_$1" | cut -d ' ' -f2
+ salt --out txt -C 'I@opendaylight:server and *01*' pillar.get "opendaylight:server:odl_$1" | cut -d ' ' -f2
}
wait_for 5.0 "salt -I 'opendaylight:server' state.sls opendaylight"
wait_for 20 "salt --out yaml -C 'I@neutron:server and *01*' network.connect $(odl bind_ip) $(odl rest_port) | fgrep -q 'result: true'"
+
+# https://bugs.launchpad.net/networking-odl/+bug/1822559
+FILE=/usr/lib/python3/dist-packages/networking_odl/cmd/set_ovs_hostconfigs.py
+PFILE=/var/tmp/odl_hostconfig.patch
+salt -I 'linux:network:bridge:openvswitch' pkg.install python3-networking-odl
+salt -I 'linux:network:bridge:openvswitch' cmd.run "patch -R -s --dry-run $FILE < $PFILE || patch $FILE < $PFILE"
diff --git a/mcp/config/states/openstack_ha b/mcp/config/states/openstack_ha
index 07bd6b48b..11da4e33a 100755
--- a/mcp/config/states/openstack_ha
+++ b/mcp/config/states/openstack_ha
@@ -15,8 +15,8 @@ source "$(dirname "${BASH_SOURCE[0]}")/../../scripts/lib.sh"
wait_for 5.0 "salt -I 'keepalived:cluster' state.sls keepalived -b 1"
wait_for 5.0 "salt -I 'keepalived:cluster' pillar.get keepalived:cluster:instance:VIP:address"
-salt -C 'I@rabbitmq:server and *01*' state.sls rabbitmq
-salt -I 'rabbitmq:server' state.sls rabbitmq
+wait_for 5.0 "salt -I 'rabbitmq:cluster:role:master' state.sls rabbitmq"
+salt -I 'rabbitmq:cluster:role:slave' state.sls rabbitmq
salt -I 'rabbitmq:server' cmd.run "rabbitmqctl cluster_status"
salt -I 'glusterfs:server' state.sls glusterfs.server.service
@@ -25,7 +25,7 @@ salt -I 'glusterfs:server' cmd.run "gluster peer status; gluster volume status"
salt -I 'glusterfs:client' state.sls glusterfs.client
salt -I 'galera:master' state.sls galera
-salt -I 'galera:slave' state.sls galera
+salt -I 'galera:slave' state.sls galera -b 1
salt -I 'galera:master' mysql.status | grep -A1 wsrep_cluster_size
wait_for 3.0 "salt -I 'memcached:server' state.sls memcached"
@@ -34,46 +34,65 @@ salt -I 'haproxy:proxy' state.sls haproxy
salt -I 'haproxy:proxy' service.status haproxy
salt -I 'haproxy:proxy' service.restart rsyslog
-set +e; salt -I 'keystone:server' state.sls keystone.server -b 1; set -e
+salt -I 'keystone:server:role:primary' state.sls keystone.server
+salt -I 'keystone:server:role:secondary' state.sls keystone.server
salt -I 'keystone:server' service.restart apache2
-salt -I 'keystone:server' state.sls keystone.server -b 1
wait_for 30.0 "salt -I 'keystone:client' state.sls keystone.client"
salt -I 'keystone:server' cmd.run ". /root/keystonercv3; openstack service list"
-salt -I 'glance:server' state.sls glance -b 1
-salt -I 'nova:controller' state.sls nova -b 1
-salt -I 'heat:server' state.sls heat -b 1
+salt -I 'glance:server:role:primary' state.sls glance
+salt -I 'glance:server:role:secondary' state.sls glance
+salt -I 'nova:controller:role:primary' state.sls nova
+salt -I 'nova:controller:role:secondary' state.sls nova
+salt -I 'heat:server:role:primary' state.sls heat
+salt -I 'heat:server:role:secondary' state.sls heat
-wait_for 5.0 "salt -I 'cinder:controller' state.sls cinder -b 1"
+salt -I 'cinder:controller:role:primary' state.sls cinder
+salt -I 'cinder:controller:role:secondary' state.sls cinder
wait_for 3.0 "salt -I 'cinder:volume' state.sls cinder"
-salt -I 'neutron:server' state.sls neutron -b 1
+salt -I 'neutron:server:role:primary' state.sls neutron
+salt -I 'neutron:server:role:secondary' state.sls neutron
salt -I 'neutron:gateway' state.sls neutron.gateway
+if salt 'cmp*' match.pillar 'neutron:compute:backend:engine:ovn' \
+ --out yaml --static | grep -q -e 'true' ; then
+ salt -I 'neutron:compute' state.sls neutron.compute
+fi
-salt -I 'nova:compute' state.sls nova
-
-salt -C 'I@mongodb:server and *01*' state.sls mongodb || true
-wait_for 10.0 "salt -C 'I@mongodb:server and *01*' cmd.run 'mongo localhost:27017/admin'"
-salt -C 'I@mongodb:server and *01*' cmd.run 'mongo localhost:27017/admin --eval "rs.initiate()"'
-salt -I 'mongodb:server' state.sls mongodb
-
-salt -I 'aodh:server' state.sls aodh -b 1
+salt -I 'nova:compute' state.sls nova,armband
+
+salt -I 'barbican:server:role:primary' state.sls barbican
+salt -I 'barbican:server:role:secondary' state.sls barbican
+salt -I 'barbican:client' state.sls barbican
+
+# remove config files coming from packages
+for service in gnocchi panko; do
+ salt -I "${service}:server" pkg.install ${service}-api
+ salt -I "${service}:server" file.remove "/etc/apache2/sites-enabled/${service}-api.conf"
+done
+
+salt -I 'redis:cluster:role:master' state.sls redis
+salt -I 'redis:server' state.sls redis
+salt -I 'gnocchi:server:role:primary' state.sls gnocchi
+salt -I 'gnocchi:server:role:secondary' state.sls gnocchi
+salt -I 'panko:server:role:primary' state.sls panko
+salt -I 'panko:server:role:secondary' state.sls panko
+salt -I 'aodh:server:role:primary' state.sls aodh
+salt -I 'aodh:server:role:secondary' state.sls aodh
salt -I 'ceilometer:server' state.sls ceilometer
salt -I 'ceilometer:agent' state.sls ceilometer
-salt -I 'horizon:server' state.sls horizon
+wait_for 3.0 "salt -I 'horizon:server' state.sls apache,horizon"
salt -I 'nginx:server' state.sls nginx
-# workaround for Ubuntu Pike Horizon missing css, FUEL-324
+# Workaround Horizon missing CSS/JS, see FUEL-324
if ! salt -C 'I@horizon:server and *01*' --out=yaml pkg.version openstack-dashboard | grep -qE ':.*mcp'; then
salt -I 'horizon:server' file.symlink \
/var/lib/openstack-dashboard/static \
/usr/share/openstack-dashboard/static
- salt -I 'horizon:server' cmd.run "/usr/share/openstack-dashboard/manage.py collectstatic --noinput"
- salt -I 'horizon:server' cmd.run "/usr/share/openstack-dashboard/manage.py compress --force"
- salt -I 'horizon:server' file.append /etc/openstack-dashboard/local_settings.py \
- "AVAILABLE_THEMES = [ ('default', 'Default', 'themes/default'),]"
+ salt -I 'horizon:server' cmd.run "python3 /usr/share/openstack-dashboard/manage.py collectstatic --noinput"
+ salt -I 'horizon:server' cmd.run "python3 /usr/share/openstack-dashboard/manage.py compress --force"
salt -I 'horizon:server' service.reload apache2
fi
@@ -84,7 +103,3 @@ dashboard_host=$(salt -C 'I@nginx:server and *01*' --out=yaml cp.push \
upload_path='certs/os_cacert' | cut -d':' -f1)
cd /etc/ssl/certs && \
ln -sf "/var/cache/salt/master/minions/${dashboard_host}/files/certs/os_cacert"
-
-# glance v1 api is required by orchestra tests
-salt -I 'glance:server' ini.set_option /etc/glance/glance-api.conf '{DEFAULT: {enable_v1_api: True}}'
-salt -I 'glance:server' service.restart glance-api
diff --git a/mcp/config/states/openstack_noha b/mcp/config/states/openstack_noha
index 4c8cbcf8a..6b503504b 100755
--- a/mcp/config/states/openstack_noha
+++ b/mcp/config/states/openstack_noha
@@ -23,17 +23,14 @@ salt -I 'mysql:server' state.sls mysql
salt -I 'memcached:server' state.sls memcached
salt -I 'haproxy:proxy' state.sls haproxy
-set +e; salt -I 'keystone:server' state.sls keystone.server; set -e
-salt -I 'keystone:server' service.restart apache2
salt -I 'keystone:server' state.sls keystone.server
+salt -I 'keystone:server' service.restart apache2
salt -I 'keystone:server' state.sls keystone.client
salt -I 'keystone:server' cmd.run ". /root/keystonercv3; openstack service list"
salt -I 'glance:server' state.sls glance
-# apply nova state twice to complete broken db sync
-salt -I 'nova:controller' state.sls nova
salt -I 'nova:controller' state.sls nova
salt -I 'heat:server' state.sls heat
@@ -41,24 +38,35 @@ salt -I 'heat:server' state.sls heat
salt -I 'cinder:controller' state.sls cinder
wait_for 3 "salt -I 'cinder:volume' state.sls cinder"
-salt -I 'neutron:server' state.sls neutron
+salt -I 'neutron:server' state.sls etcd,neutron
salt -I 'neutron:compute' state.sls neutron
-salt -I 'nova:compute' state.sls nova
+salt -I 'nova:compute' state.sls nova,armband
+
+salt -I 'barbican:server' state.sls barbican
+salt -I 'barbican:client' state.sls barbican
+
+# remove config files coming from packages
+for service in gnocchi panko; do
+ salt -I "${service}:server" pkg.install ${service}-api
+ salt -I "${service}:server" file.remove "/etc/apache2/sites-enabled/${service}-api.conf"
+done
-wait_for 3 "salt -I 'mongodb:server' state.sls mongodb"
+salt -I 'redis:server' state.sls redis
+salt -I 'gnocchi:server' state.sls gnocchi
+salt -I 'panko:server' state.sls panko
salt -I 'aodh:server' state.sls aodh
salt -I 'ceilometer:server' state.sls ceilometer
salt -I 'ceilometer:agent' state.sls ceilometer
-salt -I 'horizon:server' state.sls horizon
-
-# workaround for the pike horizon is missing css, FUEL-324
-salt -I 'horizon:server' file.symlink \
- /var/lib/openstack-dashboard/static \
- /usr/share/openstack-dashboard/static
-salt -I 'horizon:server' cmd.run "/usr/share/openstack-dashboard/manage.py collectstatic --noinput"
-salt -I 'horizon:server' cmd.run "/usr/share/openstack-dashboard/manage.py compress --force"
-salt -I 'horizon:server' file.append /etc/openstack-dashboard/local_settings.py \
- "AVAILABLE_THEMES = [ ('default', 'Default', 'themes/default'),]"
-salt -I 'horizon:server' service.reload apache2
+salt -I 'horizon:server' state.sls apache,horizon,nginx
+
+# Workaround Horizon missing CSS/JS, see FUEL-324
+if ! salt -C 'I@horizon:server and *01*' --out=yaml pkg.version openstack-dashboard | grep -qE ':.*mcp'; then
+ salt -I 'horizon:server' file.symlink \
+ /var/lib/openstack-dashboard/static \
+ /usr/share/openstack-dashboard/static
+ salt -I 'horizon:server' cmd.run "python3 /usr/share/openstack-dashboard/manage.py collectstatic --noinput"
+ salt -I 'horizon:server' cmd.run "python3 /usr/share/openstack-dashboard/manage.py compress --force"
+ salt -I 'horizon:server' service.reload apache2
+fi
diff --git a/mcp/config/states/dpdk b/mcp/config/states/quagga
index 281b78fed..e3c9de7da 100755
--- a/mcp/config/states/dpdk
+++ b/mcp/config/states/quagga
@@ -1,6 +1,6 @@
#!/bin/bash -e
##############################################################################
-# Copyright (c) 2017 Mirantis Inc., Enea AB and others.
+# Copyright (c) 2018 Intracom Telecom and others.
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
@@ -9,8 +9,4 @@
CI_DEBUG=${CI_DEBUG:-0}; [[ "${CI_DEBUG}" =~ (false|0) ]] || set -x
-# shellcheck disable=SC1090
-source "$(dirname "${BASH_SOURCE[0]}")/../../scripts/lib.sh"
-
-salt -I 'nova:compute' alternatives.set ovs-vswitchd /usr/lib/openvswitch-switch-dpdk/ovs-vswitchd-dpdk
-salt -I 'nova:compute' service.restart openvswitch-switch
+salt -I 'quagga:server' state.sls quagga -b 1
diff --git a/mcp/config/states/tacker b/mcp/config/states/tacker
new file mode 100755
index 000000000..bd8bc9991
--- /dev/null
+++ b/mcp/config/states/tacker
@@ -0,0 +1,12 @@
+#!/bin/bash -e
+##############################################################################
+# Copyright (c) 2018 Mirantis Inc., Enea AB and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+CI_DEBUG=${CI_DEBUG:-0}; [[ "${CI_DEBUG}" =~ (false|0) ]] || set -x
+
+salt -I 'tacker:server' state.sls tacker -b 1
diff --git a/mcp/config/states/virtual_control_plane b/mcp/config/states/virtual_control_plane
index c391cfe6f..f2e861ac2 100755
--- a/mcp/config/states/virtual_control_plane
+++ b/mcp/config/states/virtual_control_plane
@@ -27,13 +27,13 @@ if [ "${ERASE_ENV}" -eq 1 ]; then
fi
# KVM libvirt first, VCP deployment
-wait_for 5.0 "salt -C 'kvm*' state.sls libvirt"
+wait_for 5.0 "salt -C 'kvm*' state.sls armband,libvirt"
salt -C 'kvm* or cmp*' state.apply salt
-wait_for 10.0 "salt -C 'kvm*' state.sls salt.control"
+wait_for 10.0 "salt -C 'kvm*' state.sls salt.control,linux.system.kernel"
vcp_nodes=$(salt --out yaml 'kvm01*' pillar.get salt:control:cluster:internal:node | \
- awk '/\s+\w+:$/ {gsub(/:$/, "*"); printf "%s ", $1}')
+ awk '/\s+\w+[[:digit:]]+:$/ {gsub(/:$/, "*"); printf "%s ", $1}')
# Check all vcp nodes are available
wait_for 25.0 "(for n in ${vcp_nodes}; do salt \${n} test.ping 2>/dev/null || exit; done)"
@@ -48,17 +48,16 @@ cd /srv/salt/env/prd/maas/files && ln -sf \
salt -C 'E@^(?!cfg01|mas01|kvm|cmp00).*' cp.get_file \
"salt://maas/files/$(basename "${APT_CONF_D_CURTIN}")" "${APT_CONF_D_CURTIN}"
+salt -C 'E@^(?!cfg01|mas01|kvm|cmp00).*' state.sls linux.system.repo
+salt -C 'E@^(?!cfg01|mas01|kvm|cmp00).*' pkg.install force_yes=true python-jinja2
+salt -C 'E@^(?!cfg01|mas01|kvm|cmp00).*' service.restart salt-minion
wait_for 10.0 "salt -C 'E@^(?!cfg01|mas01|kvm|cmp00).*' state.apply salt"
wait_for 10.0 "salt -C 'E@^(?!cfg01|mas01|kvm|cmp00).*' state.apply linux,ntp"
wait_for 10.0 "salt -C 'E@^(?!cfg01|mas01|kvm|cmp00).*' ssh.set_auth_key ${SUDO_USER} \
$(awk 'NR==1{print $2}' "$(eval echo "~${SUDO_USER}/.ssh/authorized_keys")")"
-# Disable proxy dhcp routes after installation
-salt -C 'prx*' file.write /etc/dhcp/dhclient-enter-hooks.d/no-default-route \
- args='unset new_routers'
-salt -C 'prx*' state.apply opnfv.route_wrapper
-salt -C 'prx*' system.reboot
-wait_for 30.0 "salt -C 'prx*' test.ping"
+salt -C 'E@^(?!cfg01|mas01|kvm|cmp00).*' cmd.run 'reboot'
+wait_for 30.0 "salt -C 'E@^(?!cfg01|mas01|kvm|cmp00).*' test.ping"
-salt -C 'E@^(?!cfg01|mas01|kvm|cmp00).*' pkg.upgrade refresh=False
+salt -C 'E@^(?!cfg01|mas01|kvm|cmp00).*' pkg.upgrade refresh=False dist_upgrade=True
diff --git a/mcp/config/states/virtual_init b/mcp/config/states/virtual_init
new file mode 100755
index 000000000..b5bb18955
--- /dev/null
+++ b/mcp/config/states/virtual_init
@@ -0,0 +1,43 @@
+#!/bin/bash -e
+# shellcheck disable=SC1090
+##############################################################################
+# Copyright (c) 2018 Mirantis Inc., Enea AB and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+CI_DEBUG=${CI_DEBUG:-0}; [[ "${CI_DEBUG}" =~ (false|0) ]] || set -x
+
+# shellcheck disable=SC1090
+source "$(dirname "${BASH_SOURCE[0]}")/../../scripts/lib.sh"
+source "$(dirname "${BASH_SOURCE[0]}")/../../scripts/xdf_data.sh"
+
+CI_DEBUG=${CI_DEBUG:-0}; [[ "${CI_DEBUG}" =~ (false|0) ]] || set -x
+# shellcheck disable=SC2154,SC2086,SC2116
+LOCAL_VIRT_NODES=$(echo ${virtual_nodes[*]}) # unquoted to filter space
+[[ ! "${cluster_states[*]}" =~ maas ]] || LOCAL_VIRT_NODES='mas01'
+NODE_MASK="${LOCAL_VIRT_NODES// /|}"
+
+wait_for 5.0 "salt-call state.sls reclass,linux.network,salt.minion \
+ exclude='[{id: reclass_packages}, {id: /etc/reclass/reclass-config.yml}]'"
+wait_for 3.0 "salt -C 'E@^(${NODE_MASK}).*' saltutil.refresh_pillar"
+
+# NOTE: domain name changes are not yet supported without a clean redeploy
+
+# Init specific to VMs on FN (all for virtual, mas for baremetal)
+wait_for 3.0 "(for n in ${LOCAL_VIRT_NODES}; do salt -C \${n}.* test.ping || exit; done)"
+wait_for 3.0 "salt -C 'E@^(${NODE_MASK}).*' saltutil.sync_all"
+[[ ! "${NODE_MASK}" =~ mas01 ]] || exit 0
+
+if [[ "${base_image}" =~ centos ]]; then
+ # CentOS uses an older kernel, skip non-existing sysctl options
+ EXCLUDE_IDS="exclude='[{id: linux_kernel_net.core.netdev_budget_usecs}]'"
+fi
+wait_for 3.0 "salt -C 'E@^(${NODE_MASK}).*' state.sls linux ${EXCLUDE_IDS}"
+wait_for 3.0 "salt -C 'E@^(${NODE_MASK}).*' pkg.upgrade refresh=False dist_upgrade=True"
+
+salt -C "E@^(${NODE_MASK}).*" cmd.run 'reboot'
+wait_for 90.0 "salt -C 'E@^(${NODE_MASK}).*' test.ping"
+wait_for 3.0 "salt -C 'E@^(${NODE_MASK}).*' state.apply salt,ntp"