diff options
author | Alexandru Avadanii <Alexandru.Avadanii@enea.com> | 2018-03-07 16:36:11 +0000 |
---|---|---|
committer | Gerrit Code Review <gerrit@opnfv.org> | 2018-03-07 16:36:11 +0000 |
commit | 6da4060d054c66f3d0c17cfa885f6bfe171c5121 (patch) | |
tree | 4adfeae072956db27e8e8c1475160a85e526ffa1 | |
parent | 5f0ac8951f2630642f05dd6d06c414a9402db954 (diff) | |
parent | 256e2a50b9b340bb78a8c85fdd7cacf96d0fb637 (diff) |
Merge "[vnode parsing] Move core logic to j2"
-rwxr-xr-x | ci/deploy.sh | 60 | ||||
-rw-r--r-- | mcp/config/scenario/defaults-x86_64.yaml | 27 | ||||
-rw-r--r-- | mcp/config/scenario/defaults.yaml (renamed from mcp/config/scenario/defaults-aarch64.yaml) | 27 | ||||
-rw-r--r-- | mcp/scripts/lib.sh | 64 | ||||
-rw-r--r-- | mcp/scripts/lib_template.sh | 88 | ||||
-rw-r--r-- | mcp/scripts/user-data.admin.sh.j2 | 2 | ||||
-rw-r--r-- | mcp/scripts/user-data.mcp.sh.j2 | 4 | ||||
-rw-r--r-- | mcp/scripts/xdf_data.sh.j2 | 53 |
8 files changed, 181 insertions, 144 deletions
diff --git a/ci/deploy.sh b/ci/deploy.sh index e07aca57b..7542bd9d1 100755 --- a/ci/deploy.sh +++ b/ci/deploy.sh @@ -1,5 +1,5 @@ #!/bin/bash -e -# shellcheck disable=SC2034,SC2154,SC1090,SC1091 +# shellcheck disable=SC2034,SC2154,SC1090,SC1091,SC2155 ############################################################################## # Copyright (c) 2017 Ericsson AB, Mirantis Inc., Enea AB and others. # jonas.bjurel@ericsson.com @@ -133,6 +133,7 @@ ERASE_ENV=${ERASE_ENV:-0} source "${DEPLOY_DIR}/globals.sh" source "${DEPLOY_DIR}/lib.sh" +source "${DEPLOY_DIR}/lib_template.sh" # # END of variables to customize @@ -247,66 +248,31 @@ fi # Clone git submodules and apply our patches make -C "${REPO_ROOT_PATH}/mcp/patches" deepclean patches-import -# Expand scenario files, pod_config based on PDF -SCENARIO_DIR="$(readlink -f "../config/scenario")" -do_templates "${REPO_ROOT_PATH}" "${STORAGE_DIR}" "${TARGET_LAB}" \ - "${TARGET_POD}" "${BASE_CONFIG_URI}" "${SCENARIO_DIR}" - # Check scenario file existence -if [ ! -f "${SCENARIO_DIR}/${DEPLOY_SCENARIO}.yaml" ]; then +SCENARIO_DIR="$(readlink -f "../config/scenario")" +if [ ! -f "${SCENARIO_DIR}/${DEPLOY_SCENARIO}.yaml" ] && \ + [ ! -f "${SCENARIO_DIR}/${DEPLOY_SCENARIO}.yaml.j2" ]; then notify_e "[ERROR] Scenario definition file is missing!" fi -# Check defaults file existence -if [ ! -f "${SCENARIO_DIR}/defaults-$(uname -i).yaml" ]; then - notify_e "[ERROR] Scenario defaults file is missing!" -fi - -# Get scenario data and (jumpserver) arch defaults -eval "$(parse_yaml "${SCENARIO_DIR}/defaults-$(uname -i).yaml")" -eval "$(parse_yaml "${SCENARIO_DIR}/${DEPLOY_SCENARIO}.yaml")" -export CLUSTER_DOMAIN=${cluster_domain} - # key might not exist yet ... generate_ssh_key export MAAS_SSH_KEY="$(cat "$(basename "${SSH_KEY}").pub")" # Expand jinja2 templates based on PDF data and env vars -do_templates "${REPO_ROOT_PATH}" "${STORAGE_DIR}" "${TARGET_LAB}" \ - "${TARGET_POD}" "${BASE_CONFIG_URI}" - -# Serialize vnode data as '<name0>,<ram0>,<vcpu0>|<name1>,<ram1>,<vcpu1>[...]' -for node in "${virtual_nodes[@]}"; do - virtual_custom_ram="virtual_${node}_ram" - virtual_custom_vcpus="virtual_${node}_vcpus" - virtual_nodes_data+="${node}," - virtual_nodes_data+="${!virtual_custom_ram:-$virtual_default_ram}," - virtual_nodes_data+="${!virtual_custom_vcpus:-$virtual_default_vcpus}|" -done -virtual_nodes_data=${virtual_nodes_data%|} - -# Serialize repos, packages to (pre-)install/remove for: -# - foundation node VM base image (virtual: all VMs, baremetal: cfg01|mas01) -# - virtualized control plane VM base image (only when VCP is used) -base_image_flavors=common -if [[ "${cluster_states[*]}" =~ virtual_control ]]; then - base_image_flavors+=" control" -fi -for sc in ${base_image_flavors}; do - for va in apt_keys apt_repos pkg_install pkg_remove; do - key=virtual_${sc}_${va} - eval "${key}=\${${key}[@]// /|}" - eval "${key}=\${${key}// /,}" - virtual_repos_pkgs+="${!key}^" - done -done -virtual_repos_pkgs=${virtual_repos_pkgs%^} +export MCP_JUMP_ARCH=$(uname -i) +do_templates_scenario "${STORAGE_DIR}" "${TARGET_LAB}" "${TARGET_POD}" \ + "${BASE_CONFIG_URI}" "${SCENARIO_DIR}" +do_templates_cluster "${STORAGE_DIR}" "${TARGET_LAB}" "${TARGET_POD}" \ + "${REPO_ROOT_PATH}" \ + "${SCENARIO_DIR}/defaults.yaml" \ + "${SCENARIO_DIR}/${DEPLOY_SCENARIO}.yaml" # Determine additional data (e.g. jump bridge names) based on XDF source "${DEPLOY_DIR}/xdf_data.sh" -notify "[NOTE] Using bridges: ${OPNFV_BRIDGES[*]}" 2 # Jumpserver prerequisites check +notify "[NOTE] Using bridges: ${OPNFV_BRIDGES[*]}" 2 jumpserver_check_requirements "${virtual_nodes[*]}" "${OPNFV_BRIDGES[@]}" # Infra setup diff --git a/mcp/config/scenario/defaults-x86_64.yaml b/mcp/config/scenario/defaults-x86_64.yaml deleted file mode 100644 index 18b0826f9..000000000 --- a/mcp/config/scenario/defaults-x86_64.yaml +++ /dev/null @@ -1,27 +0,0 @@ -############################################################################## -# Copyright (c) 2017 Mirantis Inc., Enea AB and others. -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the Apache License, Version 2.0 -# which accompanies this distribution, and is available at -# http://www.apache.org/licenses/LICENSE-2.0 -############################################################################## ---- -base_image: https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img -virtual: - default: - vcpus: 2 - ram: 4096 - common: - apt: - keys: - - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.11/SALTSTACK-GPG-KEY.pub - repos: - # <repo name> <repo prio> deb [arch=<arch>] <repo url> <repo dist> <repo comp> - - saltstack 500 deb [arch=amd64] http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.11 xenial main - pkg: - install: - - salt-minion - control: - pkg: - install: - - cloud-init diff --git a/mcp/config/scenario/defaults-aarch64.yaml b/mcp/config/scenario/defaults.yaml index 1efa12ae4..e6915d468 100644 --- a/mcp/config/scenario/defaults-aarch64.yaml +++ b/mcp/config/scenario/defaults.yaml @@ -1,13 +1,33 @@ ############################################################################## -# Copyright (c) 2017 Mirantis Inc. and others. +# Copyright (c) 2018 Mirantis Inc., Enea AB and others. # All rights reserved. This program and the accompanying materials # are made available under the terms of the Apache License, Version 2.0 # which accompanies this distribution, and is available at # http://www.apache.org/licenses/LICENSE-2.0 ############################################################################## --- -base_image: https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-arm64-uefi1.img -virtual: +x86_64: + base_image: https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img + default: + vcpus: 2 + ram: 4096 + common: + apt: + keys: + - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.11/SALTSTACK-GPG-KEY.pub + repos: + # <repo name> <repo prio> deb [arch=<arch>] <repo url> <repo dist> <repo comp> + - saltstack 500 deb [arch=amd64] http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.11 xenial main + pkg: + install: + - salt-minion + control: + apt: ~ + pkg: + install: + - cloud-init +aarch64: + base_image: https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-arm64-uefi1.img default: vcpus: 6 ram: 4096 @@ -25,6 +45,7 @@ virtual: - linux-headers-generic-hwe-16.04-edge - salt-minion control: + apt: ~ pkg: install: - cloud-init diff --git a/mcp/scripts/lib.sh b/mcp/scripts/lib.sh index e8edf9587..5be4c556d 100644 --- a/mcp/scripts/lib.sh +++ b/mcp/scripts/lib.sh @@ -1,7 +1,7 @@ #!/bin/bash -e # shellcheck disable=SC2155,SC1001,SC2015,SC2128 ############################################################################## -# Copyright (c) 2017 Mirantis Inc., Enea AB and others. +# Copyright (c) 2018 Mirantis Inc., Enea AB and others. # All rights reserved. This program and the accompanying materials # are made available under the terms of the Apache License, Version 2.0 # which accompanies this distribution, and is available at @@ -543,65 +543,3 @@ function get_nova_compute_pillar_data { echo "${value}" fi } - -function do_templates() { - local git_repo_root=$1; shift - local image_dir=$1; shift - local target_lab=$1; shift - local target_pod=$1; shift - local lab_config_uri=$1; shift - local scenario_dir=${1:-} - - RECLASS_CLUSTER_DIR=$(cd "${git_repo_root}/mcp/reclass/classes/cluster"; pwd) - PHAROS_GEN_CFG="./pharos/config/utils/generate_config.py" - PHAROS_IA=$(readlink -f "./pharos/config/installers/fuel/pod_config.yml.j2") - PHAROS_VALIDATE_SCHEMA_SCRIPT="./pharos/config/utils/validate_schema.py" - PHAROS_SCHEMA_PDF="./pharos/config/pdf/pod1.schema.yaml" - PHAROS_SCHEMA_IDF="./pharos/config/pdf/idf-pod1.schema.yaml" - BASE_CONFIG_PDF="${lab_config_uri}/labs/${target_lab}/${target_pod}.yaml" - BASE_CONFIG_IDF="${lab_config_uri}/labs/${target_lab}/idf-${target_pod}.yaml" - LOCAL_PDF="${image_dir}/$(basename "${BASE_CONFIG_PDF}")" - LOCAL_IDF="${image_dir}/$(basename "${BASE_CONFIG_IDF}")" - - # Two-stage expansion, first stage handles pod_config and scenarios only - if [ -n "${scenario_dir}" ]; then - # Make sample PDF/IDF available via default lab-config (pharos submodule) - ln -sf "$(readlink -f "../config/labs/local")" "./pharos/labs/" - - # Expand scenario file and main reclass input (pod_config.yaml) based on PDF - if ! curl --create-dirs -o "${LOCAL_PDF}" "${BASE_CONFIG_PDF}"; then - notify_e "[ERROR] Could not retrieve PDF (Pod Descriptor File)!" - elif ! curl -o "${LOCAL_IDF}" "${BASE_CONFIG_IDF}"; then - notify_e "[ERROR] Could not retrieve IDF (Installer Descriptor File)!" - fi - # Check first if configuration files are valid - if [[ ! "$target_pod" =~ "virtual" ]]; then - if ! "${PHAROS_VALIDATE_SCHEMA_SCRIPT}" -y "${LOCAL_PDF}" \ - -s "${PHAROS_SCHEMA_PDF}"; then - notify_e "[ERROR] PDF does not match yaml schema!" - elif ! "${PHAROS_VALIDATE_SCHEMA_SCRIPT}" -y "${LOCAL_IDF}" \ - -s "${PHAROS_SCHEMA_IDF}"; then - notify_e "[ERROR] IDF does not match yaml schema!" - fi - fi - if ! "${PHAROS_GEN_CFG}" -y "${LOCAL_PDF}" \ - -j "${PHAROS_IA}" -v > "${image_dir}/pod_config.yml"; then - notify_e "[ERROR] Could not convert PDF+IDF to reclass model input!" - fi - template_dirs="${scenario_dir}" - template_err_str='Could not convert j2 scenario definitions!' - else - # Expand reclass and virsh network templates based on PDF + IDF - printenv | \ - awk '/^(SALT|MCP|MAAS|CLUSTER).*=/ { gsub(/=/,": "); print }' >> "${LOCAL_PDF}" - template_dirs="${RECLASS_CLUSTER_DIR} $(readlink -f virsh_net) $(readlink -f ./*j2)" - template_err_str='Could not convert PDF to network definitions!' - fi - # shellcheck disable=SC2086 - j2args=$(find $template_dirs -name '*.j2' -exec echo -j {} \;) - # shellcheck disable=SC2086 - if ! "${PHAROS_GEN_CFG}" -y "${LOCAL_PDF}" ${j2args} -b -v \ - -i "$(dirname "${PHAROS_IA}")"; then - notify_e "[ERROR] ${template_err_str}" - fi -} diff --git a/mcp/scripts/lib_template.sh b/mcp/scripts/lib_template.sh new file mode 100644 index 000000000..0fbe628b7 --- /dev/null +++ b/mcp/scripts/lib_template.sh @@ -0,0 +1,88 @@ +#!/bin/bash -e +# shellcheck disable=SC2155,SC1001,SC2015,SC2128 +############################################################################## +# Copyright (c) 2018 Mirantis Inc., Enea AB and others. +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +# +# Library of shell functions dedicated to j2 template handling +# + +PHAROS_GEN_CFG='./pharos/config/utils/generate_config.py' +PHAROS_IA='./pharos/config/installers/fuel/pod_config.yml.j2' +PHAROS_VALIDATE_SCHEMA_SCRIPT='./pharos/config/utils/validate_schema.py' +PHAROS_SCHEMA_PDF='./pharos/config/pdf/pod1.schema.yaml' +PHAROS_SCHEMA_IDF='./pharos/config/pdf/idf-pod1.schema.yaml' + +# Handles pod_config and scenarios only +function do_templates_scenario { + local image_dir=$1; shift + local target_lab=$1; shift + local target_pod=$1; shift + local lab_config_uri=$1; shift + local scenario_dir=$1 + + BASE_CONFIG_PDF="${lab_config_uri}/labs/${target_lab}/${target_pod}.yaml" + BASE_CONFIG_IDF="${lab_config_uri}/labs/${target_lab}/idf-${target_pod}.yaml" + LOCAL_PDF="${image_dir}/$(basename "${BASE_CONFIG_PDF}")" + LOCAL_IDF="${image_dir}/$(basename "${BASE_CONFIG_IDF}")" + + # Make sample PDF/IDF available via default lab-config (pharos submodule) + ln -sf "$(readlink -f "../config/labs/local")" "./pharos/labs/" + + # Expand scenario file and main reclass input (pod_config.yaml) based on PDF + if ! curl --create-dirs -o "${LOCAL_PDF}" "${BASE_CONFIG_PDF}"; then + notify_e "[ERROR] Could not retrieve PDF (Pod Descriptor File)!" + elif ! curl -o "${LOCAL_IDF}" "${BASE_CONFIG_IDF}"; then + notify_e "[ERROR] Could not retrieve IDF (Installer Descriptor File)!" + fi + # Check first if configuration files are valid + if [[ ! "$target_pod" =~ "virtual" ]]; then + if ! "${PHAROS_VALIDATE_SCHEMA_SCRIPT}" -y "${LOCAL_PDF}" \ + -s "${PHAROS_SCHEMA_PDF}"; then + notify_e "[ERROR] PDF does not match yaml schema!" + elif ! "${PHAROS_VALIDATE_SCHEMA_SCRIPT}" -y "${LOCAL_IDF}" \ + -s "${PHAROS_SCHEMA_IDF}"; then + notify_e "[ERROR] IDF does not match yaml schema!" + fi + fi + if ! "${PHAROS_GEN_CFG}" -y "${LOCAL_PDF}" \ + -j "${PHAROS_IA}" -v > "${image_dir}/pod_config.yml"; then + notify_e "[ERROR] Could not convert PDF+IDF to reclass model input!" + fi + printenv | \ + awk '/^(SALT|MCP|MAAS).*=/ { gsub(/=/,": "); print }' >> "${LOCAL_PDF}" + j2args=$(find "${scenario_dir}" -name '*.j2' -exec echo -j {} \;) + # shellcheck disable=SC2086 + if ! "${PHAROS_GEN_CFG}" -y "${LOCAL_PDF}" ${j2args} -b -v \ + -i "$(dirname "$(readlink -f "${PHAROS_IA}")")"; then + notify_e '[ERROR] Could not convert j2 scenario definitions!' + fi +} + +# Expand reclass and virsh network templates based on PDF + IDF + others +function do_templates_cluster { + local image_dir=$1; shift + local target_lab=$1; shift + local target_pod=$1; shift + local git_repo_root=$1; shift + local extra_yaml=("$@") + + RECLASS_CLUSTER_DIR=$(cd "${git_repo_root}/mcp/reclass/classes/cluster"; pwd) + LOCAL_PDF="${image_dir}/${target_pod}.yaml" + + for _yaml in "${extra_yaml[@]}"; do + awk '/^---$/{f=1;next;}f' "${_yaml}" >> "${LOCAL_PDF}" + done + # shellcheck disable=SC2046 + j2args=$(find "${RECLASS_CLUSTER_DIR}" "$(readlink -f virsh_net)" $(readlink -f ./*j2) \ + -name '*.j2' -exec echo -j {} \;) + # shellcheck disable=SC2086 + if ! "${PHAROS_GEN_CFG}" -y "${LOCAL_PDF}" ${j2args} -b -v \ + -i "$(dirname "$(readlink -f "${PHAROS_IA}")")"; then + notify_e '[ERROR] Could not convert PDF to network definitions!' + fi +} diff --git a/mcp/scripts/user-data.admin.sh.j2 b/mcp/scripts/user-data.admin.sh.j2 index b4c89b29c..d9b86c79c 100644 --- a/mcp/scripts/user-data.admin.sh.j2 +++ b/mcp/scripts/user-data.admin.sh.j2 @@ -8,7 +8,7 @@ ############################################################################## rm /etc/salt/minion_id rm -f /etc/salt/pki/minion/minion_master.pub -echo "id: $(hostname).{{ conf.CLUSTER_DOMAIN }}" > /etc/salt/minion +echo "id: $(hostname).{{ conf.cluster.domain }}" > /etc/salt/minion {#- should be in sync with 'opnfv_infra_config_pxe_address' in 'pharos/config/installers/fuel/pod_config.yml.j2 #} echo "master: {{ conf.idf.net_config.admin.network | ipaddr_index(2) }}" >> /etc/salt/minion service salt-minion restart diff --git a/mcp/scripts/user-data.mcp.sh.j2 b/mcp/scripts/user-data.mcp.sh.j2 index 4bd0d1d09..bd80961e6 100644 --- a/mcp/scripts/user-data.mcp.sh.j2 +++ b/mcp/scripts/user-data.mcp.sh.j2 @@ -1,6 +1,6 @@ #!/bin/bash ############################################################################## -# Copyright (c) 2017 Mirantis Inc., Enea AB and others. +# Copyright (c) 2018 Mirantis Inc., Enea AB and others. # All rights reserved. This program and the accompanying materials # are made available under the terms of the Apache License, Version 2.0 # which accompanies this distribution, and is available at @@ -8,6 +8,6 @@ ############################################################################## rm /etc/salt/minion_id rm -f /etc/salt/pki/minion/minion_master.pub -echo "id: $(hostname).{{ conf.CLUSTER_DOMAIN }}" > /etc/salt/minion +echo "id: $(hostname).{{ conf.cluster.domain }}" > /etc/salt/minion echo "master: {{ conf.SALT_MASTER }}" >> /etc/salt/minion service salt-minion restart diff --git a/mcp/scripts/xdf_data.sh.j2 b/mcp/scripts/xdf_data.sh.j2 index 9479f2356..ad3c0fe1e 100644 --- a/mcp/scripts/xdf_data.sh.j2 +++ b/mcp/scripts/xdf_data.sh.j2 @@ -1,4 +1,5 @@ #!/bin/bash -e +# shellcheck disable=SC2034 ############################################################################## # Copyright (c) 2018 Mirantis Inc., Enea AB and others. # All rights reserved. This program and the accompanying materials @@ -10,11 +11,61 @@ # Data derived from XDF (PDF/IDF/SDF/etc), used as input in deploy.sh # -# Determine bridge names based on IDF, where all bridges are now mandatory +{%- set arch = conf[conf.MCP_JUMP_ARCH] -%} + +{%- macro bash_arr(_l) -%} + ({%- for n in _l -%}'{{ n }}' {% endfor -%}) +{%- endmacro -%} + +{#- Pack list as `sep`-separated string, replacing spaces with '|' -#} +{%- macro pack(x = [], sep = ',') -%} + {{ x | join(sep) | replace(' ', '|') }} +{%- endmacro -%} + +{#- Pack all vnode data as string -#} +{%- macro serialize_vnodes() -%} + {%- set V = conf.virtual -%} + {%- set arr = [] -%} + {%- for n in V.nodes -%} + {%- if n not in V -%}{%- do V.update({n: {}}) -%}{%- endif -%} + {%- do arr.append(pack([n, V[n].ram or arch.default.ram, + V[n].vcpus or arch.default.vcpus])) -%} + {%- endfor -%} + '{{ pack(arr, '|') }}' +{%- endmacro -%} + +{#- Pack apt_pkg data as string -#} +{%- macro serialize_apt_pkg() -%} + {%- set arr = [] -%} + {%- set sections = [arch.common] -%} + {%- if 'virtual_control_plane' in conf.cluster.states -%} + {%- do sections.append(arch.control) -%} + {%- endif -%} + {%- for c in sections -%} + {%- do arr.append(pack([pack(c.apt['keys']), pack(c.apt.repos), + pack(c.pkg.install), pack(c.pkg.remove)], '^')) -%} + {%- endfor -%} + '{{ pack(arr, '^') }}' +{%- endmacro -%} + {%- set bridges = conf.idf.fuel.jumphost.bridges %} +# Determine bridge names based on IDF, where all bridges are now mandatory OPNFV_BRIDGES=( '{{ bridges.admin or "pxebr" }}' '{{ bridges.mgmt or "mgmt" }}' '{{ bridges.private or "internal" }}' '{{ bridges.public or "public" }}' ) + +export CLUSTER_DOMAIN={{ conf.cluster.domain }} +cluster_states={{ bash_arr(conf.cluster.states) }} +virtual_nodes={{ bash_arr(conf.virtual.nodes) }} +base_image={{ arch.base_image }} + +# Serialize vnode data as '<name0>,<ram0>,<vcpu0>|<name1>,<ram1>,<vcpu1>[...]' +virtual_nodes_data={{ serialize_vnodes() }} + +# Serialize repos, packages to (pre-)install/remove for: +# - foundation node VM base image (virtual: all VMs, baremetal: cfg01|mas01) +# - virtualized control plane VM base image (only when VCP is used) +virtual_repos_pkgs={{ serialize_apt_pkg() }} |