summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAlexandru Avadanii <Alexandru.Avadanii@enea.com>2018-09-10 15:42:04 +0000
committerGerrit Code Review <gerrit@opnfv.org>2018-09-10 15:42:04 +0000
commitd33f39782f945cae306ce44eead411ff9301a813 (patch)
treeb37311bf1e3486228774d5f4f97a0c13afa7a199
parent531ca8731f9b61e79131bcfab420742b4fe3b494 (diff)
parent4fb4b307ffc3f5d250221f06fc85d384bcde0f33 (diff)
Merge "[nosdn-noha] Meet EPA testcases requirements (NUMA)"
-rwxr-xr-xci/deploy.sh13
-rw-r--r--mcp/config/scenario/os-nosdn-nofeature-noha.yaml28
-rw-r--r--mcp/scripts/lib.sh40
-rw-r--r--mcp/scripts/requirements_deb.yaml1
-rw-r--r--mcp/scripts/xdf_data.sh.j218
5 files changed, 92 insertions, 8 deletions
diff --git a/ci/deploy.sh b/ci/deploy.sh
index 672237205..5aa6c66b1 100755
--- a/ci/deploy.sh
+++ b/ci/deploy.sh
@@ -38,7 +38,7 @@ $(notify "USAGE:" 2)
$(basename "$0") -l lab-name -p pod-name -s deploy-scenario \\
[-b Lab Config Base URI] \\
[-S storage-dir] [-L /path/to/log/file.tar.gz] \\
- [-f] [-F] [-e | -E[E]] [-d] [-D] [-N]
+ [-f] [-F] [-e | -E[E]] [-d] [-D] [-N] [-m]
$(notify "OPTIONS:" 2)
-b Base-uri for the stack-configuration structure
@@ -55,6 +55,7 @@ $(notify "OPTIONS:" 2)
-s Deploy-scenario short-name
-S Storage dir for VM images and other deploy artifacts
-L Deployment log path and file name
+ -m Use single socket CPU compute nodes (only affects virtual computes)
-N Experimental: Do not virtualize control plane (novcp)
$(notify_i "Description:" 2)
@@ -95,6 +96,9 @@ $(notify_i "Input parameters to the build script are:" 2)
-p POD name as defined in the configuration directory, e.g. pod2
For the sample configuration in <./mcp/config>, POD name is 'virtual1'
for virtual deployments or 'pod1' for baremetal (based on lf-pod2).
+-m Use single socket compute nodes. Instead of using default NUMA-enabled
+ topology for virtual compute nodes created via libvirt, configure a
+ single guest CPU socket.
-N Experimental: Instead of virtualizing the control plane (VCP), deploy
control plane directly on baremetal nodes
-P Skip installing dependency distro packages on current host
@@ -138,6 +142,7 @@ NO_DEPLOY_ENVIRONMENT=${NO_DEPLOY_ENVIRONMENT:-0}
ERASE_ENV=${ERASE_ENV:-0}
MCP_VCP=${MCP_VCP:-1}
MCP_DOCKER_TAG=${MCP_DOCKER_TAG:-latest}
+MCP_CMP_SS=${MCP_CMP_SS:-0}
source "${DEPLOY_DIR}/globals.sh"
source "${DEPLOY_DIR}/lib.sh"
@@ -183,6 +188,9 @@ do
L)
DEPLOY_LOG="${OPTARG}"
;;
+ m)
+ MCP_CMP_SS=1
+ ;;
N)
MCP_VCP=0
;;
@@ -240,6 +248,7 @@ else
notify "[NOTE] Installing required distro pkgs" 2
jumpserver_pkg_install 'deploy'
docker_install "${MCP_STORAGE_DIR}"
+ virtinst_install "${MCP_STORAGE_DIR}"
fi
if ! virsh list >/dev/null 2>&1; then
@@ -266,7 +275,7 @@ export MAAS_SSH_KEY="$(cat "$(basename "${SSH_KEY}").pub")"
MCP_DPDK_MODE=$([[ "$DEPLOY_SCENARIO" =~ ovs ]] && echo 1 || echo 0)
# Expand jinja2 templates based on PDF data and env vars
export MCP_REPO_ROOT_PATH MCP_VCP MCP_DPDK_MODE MCP_STORAGE_DIR MCP_DOCKER_TAG \
- MCP_JUMP_ARCH=$(uname -i)
+ MCP_CMP_SS MCP_JUMP_ARCH=$(uname -i)
do_templates_scenario "${MCP_STORAGE_DIR}" "${TARGET_LAB}" "${TARGET_POD}" \
"${BASE_CONFIG_URI}" "${SCENARIO_DIR}" \
"${SCENARIO_DIR}/${DEPLOY_SCENARIO}.yaml"
diff --git a/mcp/config/scenario/os-nosdn-nofeature-noha.yaml b/mcp/config/scenario/os-nosdn-nofeature-noha.yaml
index 179313bc1..7e0739037 100644
--- a/mcp/config/scenario/os-nosdn-nofeature-noha.yaml
+++ b/mcp/config/scenario/os-nosdn-nofeature-noha.yaml
@@ -25,3 +25,31 @@ virtual:
ram: 14336
gtw01:
ram: 2048
+ cmp001:
+ vcpus: 8
+ ram: 16384
+ cpu_topology:
+ sockets: 2
+ cores: 2
+ threads: 2
+ numa:
+ cell0:
+ memory: 4194304
+ cpus: 0-3
+ cell1:
+ memory: 4194304
+ cpus: 4-7
+ cmp002:
+ vcpus: 8
+ ram: 16384
+ cpu_topology:
+ sockets: 2
+ cores: 2
+ threads: 2
+ numa:
+ cell0:
+ memory: 4194304
+ cpus: 0-3
+ cell1:
+ memory: 4194304
+ cpus: 4-7
diff --git a/mcp/scripts/lib.sh b/mcp/scripts/lib.sh
index 6572074f5..9b1e32c04 100644
--- a/mcp/scripts/lib.sh
+++ b/mcp/scripts/lib.sh
@@ -404,7 +404,8 @@ function create_networks {
function create_vms {
local image_dir=$1; shift
# vnode data should be serialized with the following format:
- # '<name0>,<ram0>,<vcpu0>|<name1>,<ram1>,<vcpu1>[...]'
+ # <name0>,<ram0>,<vcpu0>[,<sockets0>,<cores0>,<threads0>[,<cell0name0>,<cell0memory0>,
+ # <cell0cpus0>,<cell1name0>,<cell1memory0>,<cell1cpus0>]]|<name1>,...'
IFS='|' read -r -a vnodes <<< "$1"; shift
# AArch64: prepare arch specific arguments
@@ -419,6 +420,19 @@ function create_vms {
if [ -z "${serialized_vnode_data}" ]; then continue; fi
IFS=',' read -r -a vnode_data <<< "${serialized_vnode_data}"
+ # prepare VM CPU model, count, topology (optional), NUMA cells (optional, requires topo)
+ local virt_cpu_args=' --cpu host-passthrough'
+ local idx=6 # cell0.name index in serialized data
+ while [ -n "${vnode_data[${idx}]}" ]; do
+ virt_cpu_args+=",${vnode_data[${idx}]}.memory=${vnode_data[$((idx + 1))]}"
+ virt_cpu_args+=",${vnode_data[${idx}]}.cpus=${vnode_data[$((idx + 2))]}"
+ idx=$((idx + 3))
+ done
+ virt_cpu_args+=" --vcpus vcpus=${vnode_data[2]}"
+ if [ -n "${vnode_data[5]}" ]; then
+ virt_cpu_args+=",sockets=${vnode_data[3]},cores=${vnode_data[4]},threads=${vnode_data[5]}"
+ fi
+
# prepare network args
local vnode_networks=("$@")
if [[ "${vnode_data[0]}" =~ ^(cfg01|mas01) ]]; then
@@ -438,10 +452,12 @@ function create_vms {
virt_extra_storage="--disk path=${image_dir}/mcp_${vnode_data[0]}_storage.qcow2,format=qcow2,bus=virtio,cache=none,io=native"
fi
+ [ ! -e "${image_dir}/virt-manager" ] || VIRT_PREFIX="${image_dir}/virt-manager/"
# shellcheck disable=SC2086
- virt-install --name "${vnode_data[0]}" \
- --ram "${vnode_data[1]}" --vcpus "${vnode_data[2]}" \
- --cpu host-passthrough --accelerate ${net_args} \
+ ${VIRT_PREFIX}virt-install --name "${vnode_data[0]}" \
+ ${virt_cpu_args} --accelerate \
+ ${net_args} \
+ --ram "${vnode_data[1]}" \
--disk path="${image_dir}/mcp_${vnode_data[0]}.qcow2",format=qcow2,bus=virtio,cache=none,io=native \
${virt_extra_storage} \
--os-type linux --os-variant none \
@@ -629,3 +645,19 @@ function docker_install {
fi
fi
}
+
+function virtinst_install {
+ local image_dir=$1
+ VIRT_VER=$(virt-install --version 2>&1)
+ if [ "${VIRT_VER//./}" -lt 140 ]; then
+ VIRT_TGZ="${image_dir}/virt-manager.tar.gz"
+ VIRT_VER='1.4.3'
+ VIRT_URL="https://github.com/virt-manager/virt-manager/archive/v${VIRT_VER}.tar.gz"
+ notify_n "[WARN] Using virt-install ${VIRT_VER} from ${VIRT_TGZ}" 3
+ if [ ! -e "${VIRT_TGZ}" ]; then
+ curl -L "${VIRT_URL}" -o "${VIRT_TGZ}"
+ mkdir -p "${image_dir}/virt-manager"
+ tar xzf "${VIRT_TGZ}" -C "${image_dir}/virt-manager" --strip-components=1
+ fi
+ fi
+}
diff --git a/mcp/scripts/requirements_deb.yaml b/mcp/scripts/requirements_deb.yaml
index c04f2a1d7..d2cc21539 100644
--- a/mcp/scripts/requirements_deb.yaml
+++ b/mcp/scripts/requirements_deb.yaml
@@ -21,6 +21,7 @@ deploy:
- e2fsprogs
- git
- kpartx
+ - libglib2.0-bin
- libvirt-bin
- make
- mkisofs
diff --git a/mcp/scripts/xdf_data.sh.j2 b/mcp/scripts/xdf_data.sh.j2
index ee5fffab7..e762fe957 100644
--- a/mcp/scripts/xdf_data.sh.j2
+++ b/mcp/scripts/xdf_data.sh.j2
@@ -37,8 +37,20 @@
{%- for n in V.nodes[section] or [] -%}
{%- if section_map[section] < 0 or conf.nodes[section_map[section] + loop.index0].node.type == 'virtual' -%}
{%- if n not in V -%}{%- do V.update({n: {}}) -%}{%- endif -%}
+ {%- set cpu_topo = 'cpu_topology' in V[n] and not conf.MCP_CMP_SS -%}
+ {%- if 'numa' in V[n] and cpu_topo -%}
+ {%- for k, v in V[n].numa.iteritems() -%}
+ {%- set c = pack([k, v.memory, v.cpus]) -%}
+ {%- do V[n].update({'s_numa': c if 's_numa' not in V[n] else pack([c, V[n].s_numa])}) -%}
+ {%- endfor -%}
+ {%- endif -%}
{%- do arr.append(pack([n, V[n].ram or arch.default.ram,
- V[n].vcpus or arch.default.vcpus])) -%}
+ V[n].vcpus or arch.default.vcpus,
+ '' if not cpu_topo else pack([
+ V[n].cpu_topology.sockets,
+ V[n].cpu_topology.cores,
+ V[n].cpu_topology.threads,
+ '' if 's_numa' not in V[n] else V[n].s_numa])])) -%}
{%- endif -%}
{%- endfor -%}
{%- endfor -%}
@@ -93,7 +105,9 @@ virtual_nodes={{ filter_nodes('virtual') }}
control_nodes_query={{ filter_nodes(['baremetal', 'virtual'], True, ['control']) }}
base_image={{ arch.base_image }}
-# Serialize vnode data as '<name0>,<ram0>,<vcpu0>|<name1>,<ram1>,<vcpu1>[...]'
+# Serialize vnode data as:
+# <name0>,<ram0>,<vcpu0>[,<sockets0>,<cores0>,<threads0>[,<cell0name0>,<cell0memory0>,
+# <cell0cpus0>,<cell1name0>,<cell1memory0>,<cell1cpus0>]]|<name1>,...'
virtual_nodes_data={{ serialize_vnodes() }}
# Serialize repos, packages to (pre-)install/remove for: