1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
|
#!/bin/bash
set -ex
#need to put multiple cases here where decide this bundle to deploy by default use the odl bundle.
# Below parameters are the default and we can according the release
opnfvsdn=nosdn
opnfvtype=noha
openstack=ocata
opnfvlab=default
opnfvrel=e
opnfvfeature=none
opnfvdistro=xenial
opnfvarch=amd64
opnfvmodel=openstack
virtinstall=0
jujuver=`juju --version`
read_config() {
opnfvrel=`grep release: deploy.yaml | cut -d ":" -f2`
openstack=`grep openstack: deploy.yaml | cut -d ":" -f2`
opnfvtype=`grep type: deploy.yaml | cut -d ":" -f2`
opnfvlab=`grep lab: deploy.yaml | cut -d ":" -f2`
opnfvsdn=`grep sdn: deploy.yaml | cut -d ":" -f2`
}
usage() { echo "Usage: $0 [-s <nosdn|odl|opencontrail>]
[-t <noha|ha|tip>]
[-o <juno|liberty>]
[-l <default|intelpod5>]
[-f <ipv6,dpdk,lxd,dvr>]
[-d <trusty|xenial>]
[-a <amd64>]
[-m <openstack|kubernetes>]
[-i <0|1>]
[-r <a|b>]" 1>&2 exit 1; }
while getopts ":s:t:o:l:h:r:f:d:a:m:i:" opt; do
case "${opt}" in
s)
opnfvsdn=${OPTARG}
;;
t)
opnfvtype=${OPTARG}
;;
o)
openstack=${OPTARG}
;;
l)
opnfvlab=${OPTARG}
;;
r)
opnfvrel=${OPTARG}
;;
f)
opnfvfeature=${OPTARG}
;;
d)
opnfvdistro=${OPTARG}
;;
a)
opnfvarch=${OPTARG}
;;
m)
opnfvmodel=${OPTARG}
;;
i)
virtinstall=${OPTARG}
;;
h)
usage
;;
*)
;;
esac
done
#by default maas creates two VMs in case of three more VM needed.
createresource() {
# TODO: make sure this function run with the same parameters used in 03-maasdeploy.sh
PROFILE=${PROFILE:-ubuntu}
MAAS_IP=$(grep " ip_address" deployconfig.yaml | cut -d ':' -f 2 | sed -e 's/ //')
API_SERVER="http://$MAAS_IP:5240/MAAS/api/2.0"
API_KEY=`sudo maas-region apikey --username=ubuntu`
maas login $PROFILE $API_SERVER $API_KEY
for node in node3-control node4-control
do
node_id=$(maas $PROFILE machines read | \
jq -r ".[] | select(.hostname == \"$node\").system_id")
if [[ -z "$node_id" ]]; then
sudo virt-install --connect qemu:///system --name $node \
--ram 8192 --cpu host --vcpus 4 \
--disk size=120,format=qcow2,bus=virtio,cache=directsync,io=native,pool=default \
--network bridge=virbr0,model=virtio \
--network bridge=virbr0,model=virtio \
--boot network,hd,menu=off \
--noautoconsole --vnc --print-xml | tee _node.xml
node_mac=$(grep "mac address" _node.xml | head -1 | cut -d "'" -f 2)
sudo virsh -c qemu:///system define --file _node.xml
rm -f _node.xml
maas $PROFILE nodes new autodetect_nodegroup='yes' name=$node \
tags='control' hostname=$name power_type='virsh' \
mac_addresses=$node3controlmac \
power_parameters_power_address="qemu+ssh://$USER@192.168.122.1/system" \
architecture='amd64/generic' power_parameters_power_id='node3-control'
node_id=$(maas $PROFILE machines read | \
jq -r ".[] | select(.hostname == \"$node\").system_id")
fi
if [[ -z "$node_id" ]]; then
echo "Error: failed to create node $node ."
exit 1
fi
maas $PROFILE tag update-nodes control add=$node_id || true
done
}
#copy the files and create extra resources needed for HA deployment
# in case of default VM labs.
deploy() {
if [[ "$jujuver" > "2" ]]; then
if [ ! -f ./labconfig.yaml ] && [ -e ~/joid_config/labconfig.yaml ]; then
cp ~/joid_config/labconfig.yaml ./labconfig.yaml
if [ ! -f ./deployconfig.yaml ] && [ -e ~/joid_config/deployconfig.yaml ]; then
cp ~/joid_config/deployconfig.yaml ./deployconfig.yaml
else
python genDeploymentConfig.py -l labconfig.yaml > deployconfig.yaml
fi
else
if [ -e ./labconfig.yaml ]; then
if [ ! -f ./deployconfig.yaml ] && [ -e ~/joid_config/deployconfig.yaml ]; then
cp ~/joid_config/deployconfig.yaml ./deployconfig.yaml
else
python genDeploymentConfig.py -l labconfig.yaml > deployconfig.yaml
fi
else
echo " MAAS not deployed please deploy MAAS first."
fi
fi
#create json file which is missing in case of new deployment after maas and git tree cloned freshly.
python -c 'import sys, yaml, json; json.dump(yaml.load(sys.stdin), sys.stdout, indent=4)' < labconfig.yaml > labconfig.json
python -c 'import sys, yaml, json; json.dump(yaml.load(sys.stdin), sys.stdout, indent=4)' < deployconfig.yaml > deployconfig.json
else
if [ ! -f ./environments.yaml ] && [ -e ~/.juju/environments.yaml ]; then
cp ~/.juju/environments.yaml ./environments.yaml
elif [ ! -f ./environments.yaml ] && [ -e ~/joid_config/environments.yaml ]; then
cp ~/joid_config/environments.yaml ./environments.yaml
fi
#copy the script which needs to get deployed as part of ofnfv release
echo "...... deploying now ......"
echo " " >> environments.yaml
echo " enable-os-refresh-update: false" >> environments.yaml
echo " enable-os-upgrade: false" >> environments.yaml
echo " admin-secret: admin" >> environments.yaml
echo " default-series: $opnfvdistro" >> environments.yaml
cp environments.yaml ~/.juju/
cp environments.yaml ~/joid_config/
fi
if [[ "$opnfvtype" = "ha" && "$opnfvlab" = "default" ]]; then
createresource
fi
#bootstrap the node
./01-bootstrap.sh
if [[ "$jujuver" > "2" ]]; then
juju model-config default-series=$opnfvdistro enable-os-refresh-update=false enable-os-upgrade=false
fi
#case default deploy the opnfv platform:
./02-deploybundle.sh $opnfvtype $openstack $opnfvlab $opnfvsdn $opnfvfeature $opnfvdistro $opnfvmodel
}
#check whether charms are still executing the code even juju-deployer says installed.
check_status() {
waitstatus=$1
retval=0
timeoutiter=0
echo -n "executing the relationship within charms ."
while [ $retval -eq 0 ]; do
if juju status | grep -q $waitstatus; then
echo -n '.'
if [ $timeoutiter -ge 120 ]; then
echo 'timed out'
retval=1
else
sleep 30
fi
timeoutiter=$((timeoutiter+1))
else
echo 'done'
retval=1
fi
done
if [[ "$opnfvmodel" = "openstack" ]]; then
juju expose ceph-radosgw || true
#juju ssh ceph/0 \ 'sudo radosgw-admin user create --uid="ubuntu" --display-name="Ubuntu Ceph"'
fi
echo "...... deployment finishing ......."
}
# In the case of a virtual deployment
if [ "$virtinstall" -eq 1 ]; then
./clean.sh || true
fi
echo "...... deployment started ......"
deploy
check_status executing
echo "...... deployment finished ......."
echo "...... configuring public access ......."
# translate bundle.yaml to json
python -c 'import sys, yaml, json; json.dump(yaml.load(sys.stdin), sys.stdout, indent=4)' < bundles.yaml > bundles.json
# get services list having a public interface
srv_list=$(cat bundles.json | jq -r ".services | to_entries[] | {\"key\": .key, \"value\": .value[\"bindings\"]} | select (.value!=null) | select(.value[] | contains(\"public-api\"))".key)
# get cnt list from service list
cnt_list=$(for cnt in $srv_list; do juju status $cnt --format=json | jq -r ".machines[].containers | to_entries[]".key; done)
# get public network gateway (supposing it is the first ip of the network)
public_api_gw=$(cat labconfig.json | jq --raw-output ".opnfv.spaces[] | select(.type==\"public\")".gateway)
admin_gw=$(cat labconfig.json | jq --raw-output ".opnfv.spaces[] | select(.type==\"admin\")".gateway)
if ([ $admin_gw ] && [ $admin_gw != "null" ]); then
# set default gateway to public api gateway
for cnt in $cnt_list; do
echo "changing default gw on $cnt"
if ([ $public_api_gw ] && [ $public_api_gw != "null" ]); then
juju ssh $cnt "sudo ip r d default && sudo ip r a default via $public_api_gw";
juju ssh $cnt "gw_dev=\$(ip r l | grep 'via $public_api_gw' | cut -d \ -f5) &&\
sudo cp /etc/network/interfaces /etc/network/interfaces.bak &&\
echo 'removing old default gateway' &&\
sudo perl -i -pe 's/^\ *gateway $admin_gw\n$//' /etc/network/interfaces &&\
sudo perl -i -pe \"s/iface \$gw_dev inet static/iface \$gw_dev inet static\\n gateway $public_api_gw/\" /etc/network/interfaces \
";
fi
done
fi
echo "...... configure ......."
if ([ $opnfvmodel == "openstack" ]); then
./openstack.sh "$opnfvsdn" "$opnfvlab" "$opnfvdistro" "$openstack" || true
# creating heat domain after pushing the public API into /etc/hosts
if [[ "$jujuver" > "2" ]]; then
status=`juju run-action heat/0 domain-setup`
echo $status
else
status=`juju action do heat/0 domain-setup`
echo $status
fi
sudo ../juju/get-cloud-images || true
../juju/joid-configure-openstack || true
if grep -q 'openbaton' bundles.yaml; then
juju add-relation openbaton keystone
fi
elif ([ $opnfvmodel == "kubernetes" ]); then
./k8.sh
fi
# expose the juju gui-url to login into juju gui
echo " ...... JUJU GUI can be access using the below URL ...... "
juju gui --show-credentials --no-browser
echo "...... finished ......."
|