aboutsummaryrefslogtreecommitdiffstats
path: root/testcases/functest_utils.py
blob: 6af55f7a7f7f195816bca2f0b7ecbabed6ec20a0 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
#!/usr/bin/env python
#
# jose.lausuch@ericsson.com
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
#

import re, json, os, urllib2, shutil, subprocess, sys


def check_credentials():
    """
    Check if the OpenStack credentials (openrc) are sourced
    """
    #TODO: there must be a short way to do this, doing if os.environ["something"] == "" throws an error
    try:
       os.environ['OS_AUTH_URL']
    except KeyError:
        return False
    try:
       os.environ['OS_USERNAME']
    except KeyError:
        return False
    try:
       os.environ['OS_PASSWORD']
    except KeyError:
        return False
    try:
       os.environ['OS_TENANT_NAME']
    except KeyError:
        return False
    return True


def get_credentials(service):
    """Returns a creds dictionary filled with the following keys:
    * username
    * password/api_key (depending on the service)
    * tenant_name/project_id (depending on the service)
    * auth_url
    :param service: a string indicating the name of the service
                    requesting the credentials.
    """
    #TODO: get credentials from the openrc file
    creds = {}
    # Unfortunately, each of the OpenStack client will request slightly
    # different entries in their credentials dict.
    if service.lower() in ("nova", "cinder"):
        password = "api_key"
        tenant = "project_id"
    else:
        password = "password"
        tenant = "tenant_name"

    # The most common way to pass these info to the script is to do it through
    # environment variables.
    creds.update({
        "username": os.environ.get('OS_USERNAME', "admin"),                                # add your cloud username details
        password: os.environ.get("OS_PASSWORD", 'admin'),                                # add password
        "auth_url": os.environ.get("OS_AUTH_URL","http://192.168.20.71:5000/v2.0"),        # Auth URL
        tenant: os.environ.get("OS_TENANT_NAME", "admin"),
    })

    return creds



def get_instance_status(nova_client,instance):
    try:
        instance = nova_client.servers.get(instance.id)
        return instance.status
    except:
        return None


def get_instance_by_name(nova_client, instance_name):
    try:
        instance = nova_client.servers.find(name=instance_name)
        return instance
    except:
        return None



def create_neutron_net(neutron_client, name):
    json_body = {'network': {'name': name,
                    'admin_state_up': True}}
    try:
        network = neutron_client.create_network(body=json_body)
        network_dict = network['network']
        return network_dict['id']
    except:
        print "Error:", sys.exc_info()[0]
        return False

def delete_neutron_net(neutron_client, network_id):
    try:
        neutron_client.delete_network(network_id)
        return True
    except:
        print "Error:", sys.exc_info()[0]
        return False

def create_neutron_subnet(neutron_client, name, cidr, net_id):
    json_body = {'subnets': [{'name': name, 'cidr': cidr,
                           'ip_version': 4, 'network_id': net_id}]}
    try:
        subnet = neutron_client.create_subnet(body=json_body)
        return subnet['subnets'][0]['id']
    except:
        print "Error:", sys.exc_info()[0]
        return False

def delete_neutron_subnet(neutron_client, subnet_id):
    try:
        neutron_client.delete_subnet(subnet_id)
        return True
    except:
        print "Error:", sys.exc_info()[0]
        return False

def create_neutron_router(neutron_client, name):
    json_body = {'router': {'name': name, 'admin_state_up': True}}
    try:
        router = neutron_client.create_router(json_body)
        return router['router']['id']
    except:
        print "Error:", sys.exc_info()[0]
        return False

def delete_neutron_router(neutron_client, router_id):
    json_body = {'router': {'id': router_id}}
    try:
        neutron_client.delete_router(router=router_id)
        return True
    except:
        print "Error:", sys.exc_info()[0]
        return False


def add_interface_router(neutron_client, router_id, subnet_id):
    json_body = {"subnet_id": subnet_id}
    try:
        neutron_client.add_interface_router(router=router_id, body=json_body)
        return True
    except:
        print "Error:", sys.exc_info()[0]
        return False


def remove_interface_router(neutron_client, router_id, subnet_id):
    json_body = {"subnet_id": subnet_id}
    try:
        neutron_client.remove_interface_router(router=router_id, body=json_body)
        return True
    except:
        print "Error:", sys.exc_info()[0]
        return False

def create_neutron_port(neutron_client, name, network_id, ip):
    json_body = {'port': {
    'admin_state_up': True,
    'name': name,
    'network_id': network_id,
    'fixed_ips': [{"ip_address": ip}]
    }}
    try:
        port = neutron_client.create_port(body=json_body)
        return port['port']['id']
    except:
        print "Error:", sys.exc_info()[0]
        return False


def get_network_id(neutron_client, network_name):
    networks = neutron_client.list_networks()['networks']
    id  = ''
    for n in networks:
        if n['name'] == network_name:
            id = n['id']
            break
    return id

def check_neutron_net(neutron_client, net_name):
    for network in neutron_client.list_networks()['networks']:
        if network['name'] == net_name :
            for subnet in network['subnets']:
                return True
    return False

def get_image_id(glance_client, image_name):
    images = glance_client.images.list()
    id = ''
    for i in images:
        if i.name == image_name:
            id = i.id
            break
    return id

def create_glance_image(glance_client, image_name, file_path):
    try:
        with open(file_path) as fimage:
            image = glance_client.images.create(name=image_name, is_public=True, disk_format="qcow2",
                             container_format="bare", data=fimage)
        return image.id
    except:
        return False

def get_flavor_id(nova_client, flavor_name):
    flavors = nova_client.flavors.list(detailed=True)
    id = ''
    for f in flavors:
        if f.name == flavor_name:
            id = f.id
            break
    return id

def get_flavor_id_by_ram_range(nova_client, min_ram, max_ram):
    flavors = nova_client.flavors.list(detailed=True)
    id = ''
    for f in flavors:
        if min_ram <= f.ram and f.ram <= max_ram:
            id = f.id
            break
    return id


def get_tenant_id(keystone_client, tenant_name):
    tenants = keystone_client.tenants.list()
    id = ''
    for t in tenants:
        if t.name == tenant_name:
            id = t.id
            break
    return id

def get_role_id(keystone_client, role_name):
    roles = keystone_client.roles.list()
    id = ''
    for r in roles:
        if r.name == role_name:
            id = r.id
            break
    return id

def get_user_id(keystone_client, user_name):
    users = keystone_client.users.list()
    id = ''
    for u in users:
        if u.name == user_name:
            id = u.id
            break
    return id

def create_tenant(keystone_client, tenant_name, tenant_description):
    try:
        tenant = keystone_client.tenants.create(tenant_name, tenant_description, enabled=True)
        return tenant.id
    except:
        print "Error:", sys.exc_info()[0]
        return False

def delete_tenant(keystone_client, tenant_id):
    try:
        tenant = keystone_client.tenants.delete(tenant_id)
        return True
    except:
        print "Error:", sys.exc_info()[0]
        return False

def add_role_user(keystone_client, user_id, role_id, tenant_id):
    try:
        keystone_client.roles.add_user_role(user_id, role_id, tenant_id)
        return True
    except:
        print "Error:", sys.exc_info()[0]
        return False


def check_internet_connectivity(url='http://www.opnfv.org/'):
    """
    Check if there is access to the internet
    """
    try:
        urllib2.urlopen(url, timeout=5)
        return True
    except urllib.URLError:
        return False


def download_url(url, dest_path):
    """
    Download a file to a destination path given a URL
    """
    name = url.rsplit('/')[-1]
    dest = dest_path + name
    try:
        response = urllib2.urlopen(url)
    except (urllib2.HTTPError, urllib2.URLError):
        return False

    with open(dest, 'wb') as f:
        f.write(response.read())
    return True


def execute_command(cmd, logger=None):
    """
    Execute Linux command
    """
    if logger:
        logger.debug('Executing command : {}'.format(cmd))
    output_file = "output.txt"
    f = open(output_file, 'w+')
    p = subprocess.call(cmd,shell=True, stdout=f, stderr=subprocess.STDOUT)
    f.close()
    f = open(output_file, 'r')
    result = f.read()
    if result != "" and logger:
        logger.debug(result)
    if p == 0 :
        return True
    else:
        if logger:
            logger.error("Error when executing command %s" %cmd)
        exit(-1)
ss="s2"> \"which efibootmgr > /dev/null 2>&1 && \ efibootmgr | grep -oP '(?<=Boot)[0-9]+(?=.*ubuntu)' | \ xargs -I{} efibootmgr --delete-bootnum --bootnum {}; \ rm -rf /boot/efi/*\"" || true } function cleanup_vms { # clean up existing nodes for node in $(virsh list --name | grep -P '\w{3}\d{2}'); do virsh destroy "${node}" done for node in $(virsh list --name --all | grep -P '\w{3}\d{2}'); do virsh domblklist "${node}" | awk '/^.da/ {print $2}' | \ xargs --no-run-if-empty -I{} sudo rm -f {} virsh undefine "${node}" --remove-all-storage --nvram || \ virsh undefine "${node}" --remove-all-storage done } function prepare_vms { local base_image=$1; shift local image_dir=$1; shift local repos_pkgs_str=$1; shift # ^-sep list of repos, pkgs to install/rm local vnodes=("$@") local image=base_image_opnfv_fuel.img local vcp_image=${image%.*}_vcp.img local _o=${base_image/*\/} local _h=$(echo "${repos_pkgs_str}.$(md5sum "${image_dir}/${_o}")" | \ md5sum | cut -c -8) local _tmp cleanup_uefi cleanup_vms get_base_image "${base_image}" "${image_dir}" IFS='^' read -r -a repos_pkgs <<< "${repos_pkgs_str}" echo "[INFO] Lookup cache / build patched base image for fingerprint: ${_h}" _tmp="${image%.*}.${_h}.img" if [ "${image_dir}/${_tmp}" -ef "${image_dir}/${image}" ]; then echo "[INFO] Patched base image found" else rm -f "${image_dir}/${image%.*}"* if [[ ! "${repos_pkgs_str}" =~ ^\^+$ ]]; then echo "[INFO] Patching base image ..." cp "${image_dir}/${_o}" "${image_dir}/${_tmp}" __kernel_modules "${image_dir}" mount_image "${_tmp}" "${image_dir}" apt_repos_pkgs_image "${repos_pkgs[@]:0:4}" cleanup_mounts else echo "[INFO] No patching required, using vanilla base image" ln -sf "${image_dir}/${_o}" "${image_dir}/${_tmp}" fi ln -sf "${image_dir}/${_tmp}" "${image_dir}/${image}" fi # Create config ISO and resize OS disk image for each foundation node VM for node in "${vnodes[@]}"; do if [[ "${node}" =~ ^(cfg01|mas01) ]]; then user_data='user-data.mcp.sh' else user_data='user-data.admin.sh' fi ./create-config-drive.sh -k "$(basename "${SSH_KEY}").pub" \ -u "${user_data}" -h "${node}" "${image_dir}/mcp_${node}.iso" cp "${image_dir}/${image}" "${image_dir}/mcp_${node}.qcow2" qemu-img resize "${image_dir}/mcp_${node}.qcow2" 100G # Prepare dedicated drive for cinder on cmp nodes if [[ "${node}" =~ ^(cmp) ]]; then qemu-img create "${image_dir}/mcp_${node}_storage.qcow2" 100G fi done # VCP VMs base image specific changes if [[ ! "${repos_pkgs_str}" =~ \^{3}$ ]] && [ -n "${repos_pkgs[*]:4}" ]; then echo "[INFO] Lookup cache / build patched VCP image for md5sum: ${_h}" _tmp="${vcp_image%.*}.${_h}.img" if [ "${image_dir}/${_tmp}" -ef "${image_dir}/${vcp_image}" ]; then echo "[INFO] Patched VCP image found" else echo "[INFO] Patching VCP image ..." cp "${image_dir}/${image}" "${image_dir}/${_tmp}" __kernel_modules "${image_dir}" mount_image "${_tmp}" "${image_dir}" apt_repos_pkgs_image "${repos_pkgs[@]:4:4}" cleanup_mounts ln -sf "${image_dir}/${_tmp}" "${image_dir}/${vcp_image}" fi fi } function jumpserver_pkg_install { local req_type=$1 if [ -n "$(command -v apt-get)" ]; then pkg_type='deb'; pkg_cmd='sudo apt-get install -y' else pkg_type='rpm'; pkg_cmd='sudo yum install -y --skip-broken' fi eval "$(parse_yaml "./requirements_${pkg_type}.yaml")" for section in 'common' "$(uname -i)"; do section_var="${req_type}_${section}[*]" pkg_list+=" ${!section_var}" done # shellcheck disable=SC2086 ${pkg_cmd} ${pkg_list} } function jumpserver_check_requirements { # shellcheck disable=SC2178 local vnodes=$1; shift local br=("$@") local err_br_not_found='Linux bridge not found!' local err_br_virsh_net='is a virtual network, Linux bridge expected!' local warn_br_endpoint="Endpoints might be inaccessible from external hosts!" # MaaS requires a Linux bridge for PXE/admin if [[ "${vnodes}" =~ mas01 ]]; then if ! brctl showmacs "${br[0]}" >/dev/null 2>&1; then notify_e "[ERROR] PXE/admin (${br[0]}) ${err_br_not_found}" fi # Assume virsh network name matches bridge name (true if created by us) if virsh net-info "${br[0]}" >/dev/null 2>&1; then notify_e "[ERROR] ${br[0]} ${err_br_virsh_net}" fi fi # If virtual nodes are present, public should be a Linux bridge if [ "$(echo "${vnodes}" | wc -w)" -gt 2 ]; then if ! brctl showmacs "${br[3]}" >/dev/null 2>&1; then if [[ "${vnodes}" =~ mas01 ]]; then # Baremetal nodes *require* a proper public network notify_e "[ERROR] Public (${br[3]}) ${err_br_not_found}" else notify_n "[WARN] Public (${br[3]}) ${err_br_not_found}" 3 notify_n "[WARN] ${warn_br_endpoint}" 3 fi fi if virsh net-info "${br[3]}" >/dev/null 2>&1; then if [[ "${vnodes}" =~ mas01 ]]; then notify_e "[ERROR] ${br[3]} ${err_br_virsh_net}" else notify_n "[WARN] ${br[3]} ${err_br_virsh_net}" 3 notify_n "[WARN] ${warn_br_endpoint}" 3 fi fi fi } function create_networks { local all_vnode_networks=("mcpcontrol" "$@") # create required networks, including constant "mcpcontrol" for net in "${all_vnode_networks[@]}"; do if virsh net-info "${net}" >/dev/null 2>&1; then virsh net-destroy "${net}" || true virsh net-undefine "${net}" fi # in case of custom network, host should already have the bridge in place if [ -f "virsh_net/net_${net}.xml" ] && \ [ ! -d "/sys/class/net/${net}/bridge" ]; then virsh net-define "virsh_net/net_${net}.xml" virsh net-autostart "${net}" virsh net-start "${net}" fi done # create veth pairs for relevant networks (mcpcontrol, pxebr, mgmt) for i in $(seq 0 2 4); do sudo ip link del "veth_mcp$i" || true sudo ip link add "veth_mcp$i" type veth peer name "veth_mcp$((i+1))" sudo ip link set "veth_mcp$i" up mtu 9000 sudo ip link set "veth_mcp$((i+1))" up mtu 9000 sudo brctl addif "${all_vnode_networks[$((i/2))]}" "veth_mcp$i" done } function create_vms { local image_dir=$1; shift # vnode data should be serialized with the following format: # '<name0>,<ram0>,<vcpu0>|<name1>,<ram1>,<vcpu1>[...]' IFS='|' read -r -a vnodes <<< "$1"; shift # AArch64: prepare arch specific arguments local virt_extra_args="" if [ "$(uname -i)" = "aarch64" ]; then # No Cirrus VGA on AArch64, use virtio instead virt_extra_args="$virt_extra_args --video=virtio" fi # create vms with specified options for serialized_vnode_data in "${vnodes[@]}"; do if [ -z "${serialized_vnode_data}" ]; then continue; fi IFS=',' read -r -a vnode_data <<< "${serialized_vnode_data}" # prepare network args local vnode_networks=("$@") if [[ "${vnode_data[0]}" =~ ^(cfg01|mas01) ]]; then net_args=" --network network=mcpcontrol,model=virtio" # 3rd interface gets connected to PXE/Admin Bridge (cfg01, mas01) vnode_networks[2]="${vnode_networks[0]}" else net_args=" --network bridge=${vnode_networks[0]},model=virtio" fi for net in "${vnode_networks[@]:1}"; do net_args="${net_args} --network bridge=${net},model=virtio" done # dedicated storage drive for cinder on cmp nodes virt_extra_storage= if [[ "${vnode_data[0]}" =~ ^(cmp) ]]; then virt_extra_storage="--disk path=${image_dir}/mcp_${vnode_data[0]}_storage.qcow2,format=qcow2,bus=virtio,cache=none,io=native" fi # shellcheck disable=SC2086 virt-install --name "${vnode_data[0]}" \ --ram "${vnode_data[1]}" --vcpus "${vnode_data[2]}" \ --cpu host-passthrough --accelerate ${net_args} \ --disk path="${image_dir}/mcp_${vnode_data[0]}.qcow2",format=qcow2,bus=virtio,cache=none,io=native \ ${virt_extra_storage} \ --os-type linux --os-variant none \ --boot hd --vnc --console pty --autostart --noreboot \ --disk path="${image_dir}/mcp_${vnode_data[0]}.iso",device=cdrom \ --noautoconsole \ ${virt_extra_args} done } function update_mcpcontrol_network { # set static ip address for salt master node, MaaS node local amac=$(virsh domiflist mas01 2>&1| awk '/mcpcontrol/ {print $5; exit}') [ -z "${amac}" ] || virsh net-update "mcpcontrol" add ip-dhcp-host \ "<host mac='${amac}' name='mas01' ip='${MAAS_IP}'/>" --live --config } function reset_vms { local vnodes=("$@") local cmd_str="ssh ${SSH_OPTS} ${SSH_SALT}" # reset non-infrastructure vms, wait for them to come back online for node in "${vnodes[@]}"; do if [[ ! "${node}" =~ (cfg01|mas01) ]]; then virsh reset "${node}" fi done for node in "${vnodes[@]}"; do if [[ ! "${node}" =~ (cfg01|mas01) ]]; then wait_for 20.0 "${cmd_str} sudo salt -C '${node}*' saltutil.sync_all" fi done } function start_vms { local vnodes=("$@") # start vms for node in "${vnodes[@]}"; do virsh start "${node}" sleep $((RANDOM%5+1)) done } function prepare_containers { local image_dir=$1 [ -n "${image_dir}" ] || exit 1 [ -n "${MCP_REPO_ROOT_PATH}" ] || exit 1 docker-compose --version > /dev/null 2>&1 || COMPOSE_PREFIX="${image_dir}/" "${COMPOSE_PREFIX}docker-compose" -f docker-compose/docker-compose.yaml down if [ ! "${MCP_DOCKER_TAG}" = 'verify' ]; then "${COMPOSE_PREFIX}docker-compose" -f docker-compose/docker-compose.yaml pull fi sudo rm -rf "${image_dir}/"{salt,hosts,pki} "${image_dir}/nodes/"* mkdir -p "${image_dir}/salt/"{master.d,minion.d} touch "${image_dir}/hosts" } function start_containers { local image_dir=$1 [ -n "${image_dir}" ] || exit 1 docker-compose --version > /dev/null 2>&1 || COMPOSE_PREFIX="${image_dir}/" "${COMPOSE_PREFIX}docker-compose" -f docker-compose/docker-compose.yaml up -d } function check_connection { local total_attempts=60 local sleep_time=5 set +e echo '[INFO] Attempting to get into Salt master ...' # wait until ssh on Salt master is available # shellcheck disable=SC2034 for attempt in $(seq "${total_attempts}"); do # shellcheck disable=SC2086 ssh ${SSH_OPTS} "ubuntu@${SALT_MASTER}" uptime case $? in 0) echo "${attempt}> Success"; break ;; *) echo "${attempt}/${total_attempts}> ssh server ain't ready yet, waiting for ${sleep_time} seconds ..." ;; esac sleep $sleep_time done set -e } function parse_yaml { local prefix=$2 local s local w local fs s='[[:space:]]*' w='[a-zA-Z0-9_]*' fs="$(echo @|tr @ '\034')" sed -e 's|---||g' -ne "s|^\($s\)\($w\)$s:$s\"\(.*\)\"$s\$|\1$fs\2$fs\3|p" \ -e "s|^\($s\)\($w\)$s[:-]$s\(.*\)$s\$|\1$fs\2$fs\3|p" "$1" | awk -F"$fs" '{ indent = length($1)/2; vname[indent] = $2; for (i in vname) {if (i > indent) {delete vname[i]}} if (length($3) > 0) { vn=""; for (i=0; i<indent; i++) {vn=(vn)(vname[i])("_")} printf("%s%s%s=(\"%s\")\n", "'"$prefix"'",vn, $2, $3); } }' | sed 's/_=/+=/g' } function wait_for { # Execute in a subshell to prevent local variable override during recursion ( local total_attempts=$1; shift local cmdstr=$* local sleep_time=10 echo -e "\n[wait_for] Waiting for cmd to return success: ${cmdstr}" # shellcheck disable=SC2034 for attempt in $(seq "${total_attempts}"); do echo "[wait_for] Attempt ${attempt}/${total_attempts%.*} for: ${cmdstr}" if [ "${total_attempts%.*}" = "${total_attempts}" ]; then eval "${cmdstr}" && echo "[wait_for] OK: ${cmdstr}" && return 0 || true else ! (eval "${cmdstr}" || echo 'No response') |& tee /dev/stderr | \ grep -Eq '(Not connected|No response)' && \ echo "[wait_for] OK: ${cmdstr}" && return 0 || true fi sleep "${sleep_time}" done echo "[wait_for] ERROR: Failed after max attempts: ${cmdstr}" return 1 ) } function do_udev_cfg { local _conf='/etc/udev/rules.d/99-opnfv-fuel-vnet-mtu.rules' # http://linuxaleph.blogspot.com/2013/01/how-to-network-jumbo-frames-to-kvm-guest.html echo 'SUBSYSTEM=="net", ACTION=="add|change", KERNEL=="vnet*", RUN+="/bin/sh -c '"'/bin/sleep 1; /sbin/ip link set %k mtu 9000'\"" |& sudo tee "${_conf}" echo 'SUBSYSTEM=="net", ACTION=="add|change", KERNEL=="*-nic", RUN+="/bin/sh -c '"'/bin/sleep 1; /sbin/ip link set %k mtu 9000'\"" |& sudo tee -a "${_conf}" sudo udevadm control --reload sudo udevadm trigger } function do_sysctl_cfg { local _conf='/etc/sysctl.d/99-opnfv-fuel-bridge.conf' # https://wiki.libvirt.org/page/Net.bridge.bridge-nf-call_and_sysctl.conf if modprobe br_netfilter bridge; then echo 'net.bridge.bridge-nf-call-arptables = 0' |& sudo tee "${_conf}" echo 'net.bridge.bridge-nf-call-iptables = 0' |& sudo tee -a "${_conf}" echo 'net.bridge.bridge-nf-call-ip6tables = 0' |& sudo tee -a "${_conf}" # Some distros / sysadmins explicitly blacklist br_netfilter sudo sysctl -q -p "${_conf}" || true fi } function get_nova_compute_pillar_data { local value=$(salt -C 'I@nova:compute and *01*' pillar.get _param:"${1}" --out yaml | cut -d ' ' -f2) if [ "${value}" != "''" ]; then echo "${value}" fi } function docker_install { local image_dir=$1 # Mininum effort attempt at installing Docker if missing if ! docker --version; then curl -fsSL https://get.docker.com -o get-docker.sh sudo sh get-docker.sh rm get-docker.sh # On RHEL distros, the Docker service should be explicitly started sudo systemctl start docker else DOCKER_VER=$(docker version --format '{{.Server.Version}}') if [ "${DOCKER_VER%%.*}" -lt 2 ]; then notify_e "[ERROR] Docker version ${DOCKER_VER} is too old, please upgrade it." fi fi # Distro-provided docker-compose might be simply broken (Ubuntu 16.04, CentOS 7) if ! docker-compose --version > /dev/null 2>&1; then COMPOSE_BIN="${image_dir}/docker-compose" COMPOSE_VERSION='1.22.0' notify_n "[WARN] Using docker-compose ${COMPOSE_VERSION} in ${COMPOSE_BIN}" 3 if [ ! -e "${COMPOSE_BIN}" ]; then COMPOSE_URL="https://github.com/docker/compose/releases/download/${COMPOSE_VERSION}" sudo curl -L "${COMPOSE_URL}/docker-compose-$(uname -s)-$(uname -m)" -o "${COMPOSE_BIN}" sudo chmod +x "${COMPOSE_BIN}" fi fi }