1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
|
#!/bin/bash
#placeholder for deployment script.
set -ex
virtinstall=0
labname=$1
#install the packages needed
sudo apt-add-repository ppa:maas-deployers/stable -y
sudo apt-add-repository ppa:juju/stable -y
sudo apt-add-repository ppa:maas/stable -y
sudo apt-add-repository cloud-archive:mitaka -y
sudo apt-get update -y
sudo apt-get dist-upgrade -y
sudo apt-get install openssh-server git maas-deployer juju juju-deployer maas-cli python-pip python-openstackclient python-congressclient gsutil -y
#first parameter should be custom and second should be either
# absolute location of file (including file name) or url of the
# file to download.
if [ "$1" == "custom" ]; then
if [ -e $2 ]; then
cp $2 ./labconfig.yaml || true
python deploy.py
else
wget $2 -t 3 -T 10 -O ./labconfig.yaml || true
count=`wc -l labconfig.yaml | cut -d " " -f 1`
if [ $count -lt 10 ]; then
rm -rf labconfig.yaml
else
python deploy.py
fi
fi
if [ ! -e ./labconfig.yaml ]; then
virtinstall=1
cp ../labconfig/default/deployment.yaml ./
cp ../labconfig/default/labconfig.yaml ./
fi
labname=`grep "maas_name" deployment.yaml | cut -d ':' -f 2 | sed -e 's/ //'`
else
case "$1" in
'intelpod5' )
cp ../labconfig/intel/pod5/labconfig.yaml ./
#to be removed later once converted for all labs.
python deploy.py
;;
'intelpod6' )
cp ../labconfig/intel/pod6/labconfig.yaml ./
#to be removed later once converted for all labs.
python deploy.py
;;
'intelpod9' )
cp ../labconfig/intel/pod6/labconfig.yaml ./
#to be removed later once converted for all labs.
python deploy.py
;;
'orangepod1' )
cp ../labconfig/orange/pod1/labconfig.yaml ./
#to be removed later once converted for all labs.
python deploy.py
;;
'orangepod2' )
cp ../labconfig/orange/pod1/labconfig.yaml ./
#to be removed later once converted for all labs.
python deploy.py
;;
'attvirpod1' )
cp ../labconfig/att/virpod1/labconfig.yaml ./
#to be removed later once converted for all labs.
python deploy.py
;;
'juniperpod1' )
cp maas/juniper/pod1/deployment.yaml ./deployment.yaml
;;
'cengnlynxpod1' )
cp maas/cengn_lynx/pod1/deployment.yaml ./deployment.yaml
;;
'cengnpod1' )
cp ../labconfig/cengn/pod1/labconfig.yaml ./
#to be removed later once converted for all labs.
python deploy.py
;;
* )
virtinstall=1
labname="default"
./cleanvm.sh
cp ../labconfig/default/deployment.yaml ./
cp ../labconfig/default/deployconfig.yaml ./
;;
esac
fi
#make sure no password asked during the deployment.
echo "$USER ALL=(ALL) NOPASSWD:ALL" > 90-joid-init
if [ -e /etc/sudoers.d/90-joid-init ]; then
sudo cp /etc/sudoers.d/90-joid-init 91-joid-init
sudo chown $USER:$USER 91-joid-init
sudo chmod 660 91-joid-init
sudo cat 90-joid-init >> 91-joid-init
sudo chown root:root 91-joid-init
sudo mv 91-joid-init /etc/sudoers.d/
else
sudo chown root:root 90-joid-init
sudo mv 90-joid-init /etc/sudoers.d/
fi
echo "... Deployment of maas Started ...."
if [ ! -e $HOME/.ssh/id_rsa ]; then
ssh-keygen -N '' -f $HOME/.ssh/id_rsa
fi
#define the pool and try to start even though its already exist.
# For fresh install this may or may not there.
sudo apt-get install libvirt-bin -y
sudo adduser $USER libvirtd
sudo virsh pool-define-as default --type dir --target /var/lib/libvirt/images/ || true
sudo virsh pool-start default || true
sudo virsh pool-autostart default || true
# To avoid problem between apiclient/maas_client and apiclient from google
# we remove the package google-api-python-client from yardstick installer
if [ $(pip list |grep google-api-python-client |wc -l) == 1 ]; then
sudo pip uninstall google-api-python-client
fi
sudo pip install shyaml
juju init -f
cat $HOME/.ssh/id_rsa.pub >> $HOME/.ssh/authorized_keys
if [ "$virtinstall" -eq 1 ]; then
sudo virsh net-dumpxml default > default-net-org.xml
sudo sed -i '/dhcp/d' default-net-org.xml
sudo sed -i '/range/d' default-net-org.xml
sudo virsh net-define default-net-org.xml
sudo virsh net-destroy default
sudo virsh net-start default
fi
#Below function will mark the interfaces in Auto mode to enbled by MAAS
enableautomode() {
listofnodes=`maas maas nodes list | grep system_id | cut -d '"' -f 4`
for nodes in $listofnodes
do
maas maas interface link-subnet $nodes $1 mode=$2 subnet=$3
done
}
#Below function will mark the interfaces in Auto mode to enbled by MAAS
# using hostname of the node added into MAAS
enableautomodebyname() {
if [ ! -z "$4" ]; then
for i in `seq 1 7`;
do
nodes=`maas maas nodes list hostname=node$i-$4 | grep system_id | cut -d '"' -f 4`
if [ ! -z "$nodes" ]; then
maas maas interface link-subnet $nodes $1 mode=$2 subnet=$3
fi
done
fi
}
#Below function will create vlan and update interface with the new vlan
# will return the vlan id created
crvlanupdsubnet() {
newvlanid=`maas maas vlans create $2 name=$3 vid=$4 | grep resource | cut -d '/' -f 6 `
maas maas subnet update $5 vlan=$newvlanid
eval "$1"="'$newvlanid'"
}
#Below function will create interface with new vlan and bind to physical interface
crnodevlanint() {
listofnodes=`maas maas nodes list | grep system_id | cut -d '"' -f 4`
for nodes in $listofnodes
do
parentid=`maas maas interface read $nodes $2 | grep interfaces | cut -d '/' -f 8`
maas maas interfaces create-vlan $nodes vlan=$1 parent=$parentid
done
}
#just make sure the ssh keys added into maas for the current user
sed --i "s@/home/ubuntu@$HOME@g" ./deployment.yaml
sed --i "s@qemu+ssh://ubuntu@qemu+ssh://$USER@g" ./deployment.yaml
cp ./deployment.yaml ~/.juju/
if [ -e ./deployconfig.yaml ]; then
cp ./deployconfig.yaml ~/.juju/
fi
sudo maas-deployer -c deployment.yaml -d --force
sudo chown $USER:$USER environments.yaml
echo "... Deployment of maas finish ...."
maas_ip=`grep " ip_address" deployment.yaml | cut -d ':' -f 2 | sed -e 's/ //'`
apikey=`grep maas-oauth: environments.yaml | cut -d "'" -f 2`
maas login maas http://${maas_ip}/MAAS/api/1.0 ${apikey}
maas maas sshkeys new key="`cat $HOME/.ssh/id_rsa.pub`"
#Added the Qtip public to run the Qtip test after install on bare metal nodes.
maas maas sshkeys new key="`cat ./maas/sshkeys/QtipKey.pub`"
#adding compute and control nodes VM to MAAS for deployment purpose.
if [ "$virtinstall" -eq 1 ]; then
# create two more VMs to do the deployment.
sudo virt-install --connect qemu:///system --name node1-control --ram 8192 --vcpus 4 --disk size=120,format=qcow2,bus=virtio,io=native,pool=default --network bridge=virbr0,model=virtio --network bridge=virbr0,model=virtio --boot network,hd,menu=off --noautoconsole --vnc --print-xml | tee node1-control
sudo virt-install --connect qemu:///system --name node2-compute --ram 8192 --vcpus 4 --disk size=120,format=qcow2,bus=virtio,io=native,pool=default --network bridge=virbr0,model=virtio --network bridge=virbr0,model=virtio --boot network,hd,menu=off --noautoconsole --vnc --print-xml | tee node2-compute
sudo virt-install --connect qemu:///system --name node5-compute --ram 8192 --vcpus 4 --disk size=120,format=qcow2,bus=virtio,io=native,pool=default --network bridge=virbr0,model=virtio --network bridge=virbr0,model=virtio --boot network,hd,menu=off --noautoconsole --vnc --print-xml | tee node5-compute
node1controlmac=`grep "mac address" node1-control | head -1 | cut -d "'" -f 2`
node2computemac=`grep "mac address" node2-compute | head -1 | cut -d "'" -f 2`
node5computemac=`grep "mac address" node5-compute | head -1 | cut -d "'" -f 2`
sudo virsh -c qemu:///system define --file node1-control
sudo virsh -c qemu:///system define --file node2-compute
sudo virsh -c qemu:///system define --file node5-compute
maas maas tags new name='control'
maas maas tags new name='compute'
controlnodeid=`maas maas nodes new autodetect_nodegroup='yes' name='node1-control' tags='control' hostname='node1-control' power_type='virsh' mac_addresses=$node1controlmac power_parameters_power_address='qemu+ssh://'$USER'@192.168.122.1/system' architecture='amd64/generic' power_parameters_power_id='node1-control' | grep system_id | cut -d '"' -f 4 `
maas maas tag update-nodes control add=$controlnodeid
computenodeid=`maas maas nodes new autodetect_nodegroup='yes' name='node2-compute' tags='compute' hostname='node2-compute' power_type='virsh' mac_addresses=$node2computemac power_parameters_power_address='qemu+ssh://'$USER'@192.168.122.1/system' architecture='amd64/generic' power_parameters_power_id='node2-compute' | grep system_id | cut -d '"' -f 4 `
maas maas tag update-nodes compute add=$computenodeid
computenodeid=`maas maas nodes new autodetect_nodegroup='yes' name='node5-compute' tags='compute' hostname='node5-compute' power_type='virsh' mac_addresses=$node5computemac power_parameters_power_address='qemu+ssh://'$USER'@192.168.122.1/system' architecture='amd64/generic' power_parameters_power_id='node5-compute' | grep system_id | cut -d '"' -f 4 `
maas maas tag update-nodes compute add=$computenodeid
fi
#read interface needed in Auto mode and enable it. Will be rmeoved once auto enablement will be implemented in the maas-deployer.
enable_if(){
if [ -e ~/.juju/deployconfig.yaml ]; then
cp ~/.juju/deployconfig.yaml ./deployconfig.yaml
enableiflist=`grep "interface-enable" deployconfig.yaml | cut -d ' ' -f 4 `
datanet=`grep "dataNetwork" deployconfig.yaml | cut -d ' ' -f 4 | sed -e 's/ //'`
stornet=`grep "storageNetwork" deployconfig.yaml | cut -d ' ' -f 4 | sed -e 's/ //'`
pubnet=`grep "publicNetwork" deployconfig.yaml | cut -d ' ' -f 4 | sed -e 's/ //'`
# split EXTERNAL_NETWORK=first ip;last ip; gateway;network
if [ "$datanet" != "''" ]; then
EXTNET=(${enableiflist//,/ })
i="0"
while [ ! -z "${EXTNET[i]}" ];
do
enableautomode ${EXTNET[i]} AUTO $datanet || true
i=$[$i+1]
done
fi
if [ "$stornet" != "''" ]; then
EXTNET=(${enableiflist//,/ })
i="0"
while [ ! -z "${EXTNET[i]}" ];
do
enableautomode ${EXTNET[i]} AUTO $stornet || true
i=$[$i+1]
done
fi
if [ "$pubnet" != "''" ]; then
EXTNET=(${enableiflist//,/ })
i="0"
while [ ! -z "${EXTNET[i]}" ];
do
enableautomode ${EXTNET[i]} AUTO $pubnet || true
i=$[$i+1]
done
fi
fi
}
# Enable vlan interfaces with maas
case "$labname" in
'intelpod9' )
maas refresh
crvlanupdsubnet vlan902 1 "DataNetwork" 902 2 || true
crvlanupdsubnet vlan905 2 "PublicNetwork" 905 3 || true
crnodevlanint $vlan902 eth0 || true
crnodevlanint $vlan905 eth1 || true
enableautomodebyname eth0.902 AUTO "10.9.12.0/24" compute || true
enableautomodebyname eth1.905 AUTO "10.9.15.0/24" compute || true
enableautomodebyname eth0.902 AUTO "10.9.12.0/24" control || true
enableautomodebyname eth1.905 AUTO "10.9.15.0/24" control || true
;;
'juniperpod1' )
;;
'cengnlynxpod1' )
maas refresh
crvlanupdsubnet vlan1201 1 "DataNetwork" 1201 2 || true
crvlanupdsubnet vlan1202 2 "PublicNetwork" 1202 3 || true
crnodevlanint $vlan1201 eth1 || true
crnodevlanint $vlan1202 eth1 || true
enableautomode eth1.1201 AUTO "172.16.121.3/24" compute || true
enableautomode eth1.1201 AUTO "172.16.121.3/24" control || true
;;
esac
enable_if
echo " .... MAAS deployment finished successfully ...."
|