1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
|
#!/bin/bash
set -ex
source common/tools.sh
#need to put multiple cases here where decide this bundle to deploy by default use the odl bundle.
# Below parameters are the default and we can according the release
opnfvsdn=nosdn
opnfvtype=noha
openstack=pike
opnfvlab=default
opnfvlabfile=
opnfvrel=e
opnfvfeature=none
opnfvdistro=xenial
opnfvarch=amd64
opnfvmodel=openstack
virtinstall=0
maasinstall=0
usage() { echo "Usage: $0
[-s|--sdn <nosdn|odl|opencontrail>]
[-t|--type <noha|ha|tip>]
[-o|--openstack <ocata|pike>]
[-l|--lab <default|custom>]
[-f|--feature <ipv6,dpdk,lxd,dvr,openbaton,multus>]
[-d|--distro <xenial>]
[-a|--arch <amd64|ppc64el|aarch64>]
[-m|--model <openstack|kubernetes>]
[-i|--virtinstall <0|1>]
[--maasinstall <0|1>]
[--labfile <labconfig.yaml file>]
[-r|--release <e>]" 1>&2 exit 1;
}
#A string with command options
options=$@
# An array with all the arguments
arguments=($options)
# Loop index
index=0
for argument in $options
do
# Incrementing index
index=`expr $index + 1`
# The conditions
case $argument in
-h|--help )
usage;
;;
-s|--sdn )
if ([ "arguments[index]" != "" ]); then
opnfvsdn=${arguments[index]}
fi;
;;
-t|--type )
if ([ "arguments[index]" != "" ]); then
opnfvtype=${arguments[index]}
fi;
;;
-o|--openstack )
if ([ "arguments[index]" != "" ]); then
openstack=${arguments[index]}
fi;
;;
-l|--lab )
if ([ "arguments[index]" != "" ]); then
opnfvlab=${arguments[index]}
fi;
;;
-r|--release )
if ([ "arguments[index]" != "" ]); then
opnfvrel=${arguments[index]}
fi;
;;
-f|--feature )
if ([ "arguments[index]" != "" ]); then
opnfvfeature=${arguments[index]}
fi;
;;
-d|--distro )
if ([ "arguments[index]" != "" ]); then
opnfdistro=${arguments[index]}
fi;
;;
-a|--arch )
if ([ "arguments[index]" != "" ]); then
opnfvarch=${arguments[index]}
fi;
;;
-m|--model )
if ([ "arguments[index]" != "" ]); then
opnfvmodel=${arguments[index]}
fi;
;;
-i|--virtinstall )
if ([ "arguments[index]" != "" ]); then
virtinstall=${arguments[index]}
fi;
;;
--maasinstall )
if ([ "arguments[index]" != "" ]); then
maasinstall=${arguments[index]}
fi;
;;
--labfile )
if ([ "arguments[index]" != "" ]); then
labfile=${arguments[index]}
fi;
;;
esac
done
#by default maas creates two VMs in case of three more VM needed.
createresource() {
# TODO: make sure this function run with the same parameters used in 03-maasdeploy.sh
PROFILE=${PROFILE:-ubuntu}
MAAS_IP=$(grep " ip_address" deployconfig.yaml | cut -d ':' -f 2 | sed -e 's/ //')
API_SERVER="http://$MAAS_IP:5240/MAAS/api/2.0"
API_KEY=`sudo maas-region apikey --username=ubuntu`
maas login $PROFILE $API_SERVER $API_KEY
# if we have a virshurl configuration we use it, else we use local
VIRSHURL=$(cat labconfig.json | jq -r '.opnfv.virshurl')
if ([ $VIRSHURL == "" ] || [ "$VIRSHURL" == "null" ]); then
VIRSHIP=$MAAS_IP
VIRSHURL="qemu+ssh://$USER@$VIRSHIP/system "
VIRSHHOST=""
else
VIRSHHOST=$(echo $VIRSHURL| cut -d\/ -f 3 | cut -d@ -f2)
VIRSHIP="" # TODO: parse from $VIRSHURL if needed
fi
for node in node3-control node4-control
do
node_id=$(maas $PROFILE machines read | \
jq -r ".[] | select(.hostname == \"$node\").system_id")
if [[ -z "$node_id" ]]; then
sudo virt-install --connect qemu:///system --name $node \
--ram 8192 --cpu host --vcpus 4 \
--disk size=120,format=qcow2,bus=virtio,cache=directsync,io=native,pool=default \
--network bridge=virbr0,model=virtio \
--network bridge=virbr0,model=virtio \
--boot network,hd,menu=off \
--noautoconsole --vnc --print-xml | tee _node.xml
node_mac=$(grep "mac address" _node.xml | head -1 | cut -d "'" -f 2)
sudo virsh -c $VIRSHURL define --file _node.xml
rm -f _node.xml
maas $PROFILE nodes new autodetect_nodegroup='yes' name=$node \
tags='control' hostname=$name power_type='virsh' \
mac_addresses=$node3controlmac \
power_parameters_power_address="qemu+ssh://$USER@192.168.122.1/system" \
architecture='amd64/generic' power_parameters_power_id='node3-control'
sudo virsh -c $VIRSHURL autostart $node
node_id=$(maas $PROFILE machines read | \
jq -r ".[] | select(.hostname == \"$node\").system_id")
fi
if [[ -z "$node_id" ]]; then
echo_error "Error: failed to create node $node ."
exit 1
fi
maas $PROFILE tag update-nodes control add=$node_id || true
done
}
#copy the files and create extra resources needed for HA deployment
# in case of default VM labs.
deploy() {
if [ ! -f ./labconfig.yaml ] && [ -e ~/joid_config/labconfig.yaml ]; then
cp ~/joid_config/labconfig.yaml ./labconfig.yaml
if [ ! -f ./deployconfig.yaml ] && [ -e ~/joid_config/deployconfig.yaml ]; then
cp ~/joid_config/deployconfig.yaml ./deployconfig.yaml
else
python genDeploymentConfig.py -l labconfig.yaml > deployconfig.yaml
fi
else
if [ -e ./labconfig.yaml ]; then
if [ ! -f ./deployconfig.yaml ] && [ -e ~/joid_config/deployconfig.yaml ]; then
cp ~/joid_config/deployconfig.yaml ./deployconfig.yaml
else
python genDeploymentConfig.py -l labconfig.yaml > deployconfig.yaml
fi
else
if [ "$maasinstall" -eq 0 ]; then
echo_error "MAAS not deployed please deploy MAAS first."
else
echo_info "MAAS not deployed this will deploy MAAS first."
fi
fi
fi
# Install MAAS and expecting the labconfig.yaml at local directory.
if [ "$maasinstall" -eq 1 ]; then
./clean.sh || true
PROFILE=${PROFILE:-ubuntu}
MAAS_IP=$(grep " ip_address" deployconfig.yaml | cut -d ':' -f 2 | sed -e 's/ //')
API_SERVER="http://$MAAS_IP:5240/MAAS/api/2.0"
if which maas > /dev/null; then
API_KEY=`sudo maas-region apikey --username=ubuntu`
maas login $PROFILE $API_SERVER $API_KEY
# make sure there is no machine entry in maas
for m in $(maas $PROFILE machines read | jq -r '.[].system_id')
do
maas $PROFILE machine delete $m || true
done
podno=$(maas $PROFILE pods read | jq -r ".[]".id)
maas $PROFILE pod delete $podno || true
fi
./cleanvm.sh || true
if [ "$virtinstall" -eq 1 ]; then
./03-maasdeploy.sh virtual
else
if [ -z "$labfile" ]; then
if [ ! -e ./labconfig.yaml ]; then
echo_error "Labconfig file must be specified when using custom"
else
echo_warning "Labconfig was not specified, using ./labconfig.yaml instead"
fi
elif [ ! -e "$labfile" ]; then
echo_warning "Labconfig not found locally, trying download"
wget $labfile -t 3 -T 10 -O ./labconfig.yaml || true
count=`wc -l labconfig.yaml | cut -d " " -f 1`
if [ $count -lt 10 ]; then
echo_error "Unable to download labconfig"
exit 1
fi
else
echo_info "Using $labfile to setup deployment"
cp $labfile ./labconfig.yaml
fi
./03-maasdeploy.sh custom
fi
fi
#create json file which is missing in case of new deployment after maas and git tree cloned freshly.
python -c 'import sys, yaml, json; json.dump(yaml.load(sys.stdin), sys.stdout, indent=4)' < labconfig.yaml > labconfig.json
python -c 'import sys, yaml, json; json.dump(yaml.load(sys.stdin), sys.stdout, indent=4)' < deployconfig.yaml > deployconfig.json
if [[ "$opnfvtype" = "ha" && "$opnfvlab" = "default" ]]; then
createresource
fi
#bootstrap the node
./01-bootstrap.sh
juju model-config default-series=$opnfvdistro enable-os-refresh-update=false enable-os-upgrade=false
# case default deploy the opnfv platform:
./02-deploybundle.sh $opnfvtype $openstack $opnfvlab $opnfvsdn $opnfvfeature $opnfvdistro $opnfvmodel
}
#check whether charms are still executing the code even juju-deployer says installed.
check_status() {
waitstatus=$1
waittime=$2
retval=0
timeoutiter=0
echo_info "Executing the relationships within charms..."
while [ $retval -eq 0 ]; do
if juju status | grep -q $waitstatus; then
echo_info "Still waiting for $waitstatus units"
if [ $timeoutiter -ge $waittime ]; then
echo_error 'Timed out'
retval=1
else
sleep 30
fi
timeoutiter=$((timeoutiter+1))
else
echo_info 'Done executing the relationships'
retval=1
fi
done
if [[ "$opnfvmodel" = "openstack" ]]; then
juju expose ceph-radosgw || true
#juju ssh ceph/0 \ 'sudo radosgw-admin user create --uid="ubuntu" --display-name="Ubuntu Ceph"'
fi
echo_info "Deployment finishing..."
}
# In the case of a virtual deployment
if [ "$virtinstall" -eq 1 ]; then
./clean.sh || true
fi
echo_info "Deployment started"
deploy
check_status executing 180
echo_info "Deployment finished"
juju status --format=tabular
# translate bundle.yaml to json
python -c 'import sys, yaml, json; json.dump(yaml.load(sys.stdin), sys.stdout, indent=4)' < bundles.yaml > bundles.json
# Configuring deployment
if ([ $opnfvmodel == "openstack" ]); then
if ([ $opnfvsdn == "ocl" ]); then
echo_info "Patching OpenContrail controller container"
juju run --application contrail-controller sudo docker cp contrail-controller:/etc/contrail/vnc_api_lib.ini /tmp
juju run --application contrail-controller cp /tmp/vnc_api_lib.ini /tmp/vnc_api_lib.ini2
juju run --application contrail-controller 'echo "AUTHN_DOMAIN = admin_domain" >> /tmp/vnc_api_lib.ini2'
juju run --application contrail-controller sudo docker cp /tmp/vnc_api_lib.ini2 contrail-controller:/etc/contrail/vnc_api_lib.ini
juju run --application contrail-controller sudo docker exec contrail-controller service contrail-api restart
juju run --application contrail-controller sudo docker cp /tmp/vnc_api_lib.ini2 contrail-analytics:/etc/contrail/vnc_api_lib.ini
echo_info "Wait for OpenContrail components to stabilize"
sleep 600
fi
echo_info "Configuring OpenStack deployment"
./openstack.sh "$opnfvsdn" "$opnfvlab" "$opnfvdistro" "$openstack" || true
# creating heat domain after pushing the public API into /etc/hosts
status=`juju run-action heat/0 domain-setup`
echo $status
if ([ $opnftype == "ha" ]); then
status=`juju run-action heat/1 domain-setup`
echo $status
status=`juju run-action heat/2 domain-setup`
echo $status
fi
sudo ../juju/get-cloud-images || true
../juju/joid-configure-openstack || true
if grep -q 'openbaton' bundles.yaml; then
juju add-relation openbaton keystone
fi
elif ([ $opnfvmodel == "kubernetes" ]); then
#Workarounf for master chanrm as it takes 5 minutes to run properly
check_status waiting 50
check_status executing 50
echo_info "Configuring Kubernetes deployment"
./k8.sh $opnfvfeature
fi
# expose the juju gui-url to login into juju gui
echo_info "Juju GUI can be accessed using the following URL and credentials:"
juju gui --show-credentials --no-browser
echo "Finished deployment and configuration"
|