1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
|
From: Alexandru Avadanii <Alexandru.Avadanii@enea.com>
Date: Tue, 25 Jul 2017 19:11:56 +0200
Subject: [PATCH] ci/deploy.sh: Rework bridge arguments for MCP
Signed-off-by: Alexandru Avadanii <Alexandru.Avadanii@enea.com>
---
ci/deploy.sh | 37 ++++++++++++++++++++++++++-----------
mcp/scripts/lib.sh | 42 ++++++++++++++++++++++++++++++------------
2 files changed, 56 insertions(+), 23 deletions(-)
diff --git a/ci/deploy.sh b/ci/deploy.sh
index edf05be..864b231 100755
--- a/ci/deploy.sh
+++ b/ci/deploy.sh
@@ -29,13 +29,16 @@ cat << EOF
xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
`basename $0`: Deploys the Fuel@OPNFV stack
-usage: `basename $0` -b base-uri [-B PXE Bridge] [-f] [-F] [-H] -l lab-name -p pod-name -s deploy-scenario [-S image-dir] [-T timeout] -i iso
+usage: `basename $0` -b base-uri
+ [-B PXE Bridge [-B Mgmt Bridge [-B Internal Bridge [-B Public Bridge]]]]
+ [-f] [-F] [-H] -l lab-name -p pod-name -s deploy-scenario
+ [-S image-dir] [-T timeout] -i iso
-s deployment-scenario [-S optional Deploy-scenario path URI]
[-R optional local relen repo (containing deployment Scenarios]
OPTIONS:
-b Base-uri for the stack-configuration structure
- -B PXE Bridge for booting of Fuel master
+ -B Bridge(s): 1st usage = PXE, 2nd = Mgmt, 3rd = Internal, 4th = Public
-d Dry-run
-f Deploy on existing Fuel master
-e Do not launch environment deployment
@@ -59,10 +62,13 @@ and provides a fairly simple mechanism to execute a deployment.
Input parameters to the build script is:
-b Base URI to the configuration directory (needs to be provided in a URI
style, it can be a local resource: file:// or a remote resource http(s)://)
--B PXE Bridge for booting of Fuel master. It can be specified several times,
+-B Bridges for to be used by deploy script. It can be specified several times,
or as a comma separated list of bridges, or both: -B br1 -B br2,br3
- One NIC connected to each specified bridge will be created in the Fuel VM,
- in the same order as provided in the command line. The default is pxebr.
+ First occurence sets PXE Brige, next Mgmt, then Internal and Public.
+ For an empty value, the deploy script will use virsh to create the default
+ expected network (e.g. -B pxe,,,public will use existing "pxe" and "public"
+ bridges, respectively create "mgmt" and "internal").
+ The default is pxebr.
-d Dry-run - Produces deploy config files (config/dea.yaml and
config/dha.yaml), but does not execute deploy
-f Deploy on existing Fuel master
@@ -112,7 +118,7 @@ clean() {
#
SCRIPT_PATH=$(readlink -f $(dirname ${BASH_SOURCE[0]}))
DEPLOY_DIR=$(cd ${SCRIPT_PATH}/../mcp/scripts; pwd)
-PXE_BRIDGE=''
+OPNFV_BRIDGES=('pxe' 'mgmt' 'internal' 'public')
NO_HEALTH_CHECK=''
USE_EXISTING_FUEL=''
FUEL_CREATION_ONLY=''
@@ -124,6 +130,7 @@ if ! [ -z $DEPLOY_TIMEOUT ]; then
else
DEPLOY_TIMEOUT=""
fi
+
#
# END of variables to customize
############################################################################
@@ -146,9 +153,17 @@ do
fi
;;
B)
- for bridge in ${OPTARG//,/ }; do
- PXE_BRIDGE+=" -b $bridge"
+ OIFS=${IFS}
+ IFS=','
+ OPT_BRIDGES=($OPTARG)
+ OPNFV_BRIDGE_IDX=0
+ for bridge in ${OPT_BRIDGES[@]}; do
+ if [ -n "${bridge}" ]; then
+ OPNFV_BRIDGES[${OPNFV_BRIDGE_IDX}]="${bridge}"
+ fi
+ OPNFV_BRIDGE_IDX=$[OPNFV_BRIDGE_IDX + 1]
done
+ IFS=${OIFS}
;;
d)
DRY_RUN=1
@@ -261,9 +276,9 @@ export SSH_OPTS="-o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -i
# Infra setup
generate_ssh_key
prepare_vms virtual_nodes $base_image
-create_networks
-create_vms virtual_nodes virtual_nodes_ram virtual_nodes_vcpus
-update_pxe_network
+create_networks OPNFV_BRIDGES
+create_vms virtual_nodes virtual_nodes_ram virtual_nodes_vcpus OPNFV_BRIDGES
+update_pxe_network OPNFV_BRIDGES
start_vms virtual_nodes
check_connection
diff --git a/mcp/scripts/lib.sh b/mcp/scripts/lib.sh
index 55df750..ec55c6d 100644
--- a/mcp/scripts/lib.sh
+++ b/mcp/scripts/lib.sh
@@ -41,15 +41,19 @@ prepare_vms() {
}
create_networks() {
+ local -n vnode_networks=$1
# create required networks
- for net in pxe mgmt internal public; do
+ for net in "${vnode_networks[@]}"; do
if virsh net-info $net >/dev/null 2>&1; then
virsh net-destroy ${net}
virsh net-undefine ${net}
fi
- virsh net-define net_${net}.xml
- virsh net-autostart ${net}
- virsh net-start ${net}
+ # in case of custom network, host should already have the bridge in place
+ if [ -f net_${net}.xml ]; then
+ virsh net-define net_${net}.xml
+ virsh net-autostart ${net}
+ virsh net-start ${net}
+ fi
done
}
@@ -57,6 +61,7 @@ create_vms() {
local -n vnodes=$1
local -n vnodes_ram=$2
local -n vnodes_vcpus=$3
+ local -n vnode_networks=$4
# AArch64: prepare arch specific arguments
local virt_extra_args=""
@@ -65,13 +70,22 @@ create_vms() {
virt_extra_args="$virt_extra_args --video=vga"
fi
+ # prepare network args
+ net_args=""
+ for net in "${vnode_networks[@]}"; do
+ net_type="network"
+ # in case of custom network, host should already have the bridge in place
+ if [ ! -f net_${net}.xml ]; then
+ net_type="bridge"
+ fi
+ net_args="${net_args} --network ${net_type}=${net},model=virtio"
+ done
+
# create vms with specified options
for node in "${vnodes[@]}"; do
- virt-install --name ${node} --ram ${vnodes_ram[$node]} --vcpus ${vnodes_vcpus[$node]} --cpu host-passthrough --accelerate \
- --network network:pxe,model=virtio \
- --network network:mgmt,model=virtio \
- --network network:internal,model=virtio \
- --network network:public,model=virtio \
+ virt-install --name ${node} \
+ --ram ${vnodes_ram[$node]} --vcpus ${vnodes_vcpus[$node]} \
+ --cpu host-passthrough --accelerate ${net_args} \
--disk path=$(pwd)/images/mcp_${node}.qcow2,format=qcow2,bus=virtio,cache=none,io=native \
--os-type linux --os-variant none \
--boot hd --vnc --console pty --autostart --noreboot \
@@ -82,9 +96,13 @@ create_vms() {
}
update_pxe_network() {
- # set static ip address for salt master node
- virsh net-update pxe add ip-dhcp-host \
- "<host mac='$(virsh domiflist cfg01 | awk '/pxe/ {print $5}')' name='cfg01' ip='$SALT_MASTER'/>" --live
+ local -n vnode_networks=$1
+ if virsh net-info "${vnode_networks[0]}" >/dev/null 2>&1; then
+ # set static ip address for salt master node, only if managed via virsh
+ # NOTE: below expr assume PXE network is always the first in domiflist
+ virsh net-update "${vnode_networks[0]}" add ip-dhcp-host \
+ "<host mac='$(virsh domiflist cfg01 | awk '/network/ {print $5; exit}')' name='cfg01' ip='$SALT_MASTER'/>" --live
+ fi
}
start_vms() {
|