summaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
authorFeng Pan <fpan@redhat.com>2017-08-23 17:26:53 +0000
committerGerrit Code Review <gerrit@opnfv.org>2017-08-23 17:26:53 +0000
commitc6f04a5dee991a131a49c2fde9c5990fe2edac4e (patch)
tree6385096492e6526bf091bae4f3b956a1e865fbba /lib
parent52b4c2556b909a1e61b50f0ff75778bed962ba85 (diff)
parentf4d388ea508ba00771e43a219ac64e0d430b73bd (diff)
Merge "Migrates Apex to Python"
Diffstat (limited to 'lib')
-rw-r--r--lib/ansible/playbooks/build_dependencies.yml10
-rw-r--r--lib/ansible/playbooks/configure_undercloud.yml116
-rw-r--r--lib/ansible/playbooks/deploy_dependencies.yml66
-rw-r--r--lib/ansible/playbooks/deploy_overcloud.yml68
-rw-r--r--lib/ansible/playbooks/post_deploy_overcloud.yml45
-rw-r--r--lib/ansible/playbooks/post_deploy_undercloud.yml118
-rw-r--r--lib/ansible/playbooks/templates/external_vlan_ifcfg.yml.j29
-rw-r--r--lib/ansible/playbooks/templates/virsh_network_default.xml.j210
-rw-r--r--lib/ansible/playbooks/templates/virsh_network_ovs.xml.j26
-rw-r--r--lib/ansible/playbooks/templates/virsh_pool.xml.j26
-rw-r--r--lib/ansible/playbooks/undercloud_aarch64.yml49
-rw-r--r--lib/common-functions.sh308
-rwxr-xr-xlib/configure-deps-functions.sh173
-rwxr-xr-xlib/configure-vm203
-rw-r--r--lib/installer/domain.xml34
-rwxr-xr-xlib/overcloud-deploy-functions.sh503
-rwxr-xr-xlib/parse-functions.sh70
-rwxr-xr-xlib/post-install-functions.sh281
-rw-r--r--lib/python/apex/__init__.py15
-rw-r--r--lib/python/apex/clean.py39
-rw-r--r--lib/python/apex/common/__init__.py0
-rw-r--r--lib/python/apex/common/constants.py30
-rw-r--r--lib/python/apex/common/utils.py31
-rw-r--r--lib/python/apex/deploy_settings.py195
-rw-r--r--lib/python/apex/inventory.py98
-rw-r--r--lib/python/apex/ip_utils.py230
-rw-r--r--lib/python/apex/network_environment.py219
-rw-r--r--lib/python/apex/network_settings.py360
-rwxr-xr-xlib/python/apex_python_utils.py265
-rw-r--r--lib/python/build_utils.py108
-rwxr-xr-xlib/undercloud-functions.sh291
-rw-r--r--lib/utility-functions.sh85
-rwxr-xr-xlib/virtual-setup-functions.sh164
33 files changed, 499 insertions, 3706 deletions
diff --git a/lib/ansible/playbooks/build_dependencies.yml b/lib/ansible/playbooks/build_dependencies.yml
index dec8ab7c..afe12b74 100644
--- a/lib/ansible/playbooks/build_dependencies.yml
+++ b/lib/ansible/playbooks/build_dependencies.yml
@@ -11,15 +11,17 @@
libguestfs-tools,bsdtar,libvirt,yum-utils,
python2-oslo-config,python2-debtcollector,
make, python34-pip, python-virtualenv,libguestfs-tools-c,
- supermin,supermin5,perl-Sys-Guestfs,python-libguestfs
+ supermin,supermin5,perl-Sys-Guestfs,python-libguestfs,
+ libvirt-devel,python34-docutils,python-docutils
- name: Install Virtualization group
yum:
name: "@Virtualization Host"
- - name: Install python ipmi from OPNFV artifacts
- yum:
- name: 'http://artifacts.opnfv.org/apex/dependencies/python3-ipmi-0.3.0-1.noarch.rpm'
+ - pip:
+ name: python-ipmi
+ executable: pip3.4
- pip:
name: tox
+ executable: pip3.4
- pip:
name: gitpython
executable: pip3.4
diff --git a/lib/ansible/playbooks/configure_undercloud.yml b/lib/ansible/playbooks/configure_undercloud.yml
new file mode 100644
index 00000000..7b236624
--- /dev/null
+++ b/lib/ansible/playbooks/configure_undercloud.yml
@@ -0,0 +1,116 @@
+---
+- hosts: all
+ tasks:
+ - name: Generate SSH key for stack if missing
+ shell: test -e ~/.ssh/id_rsa || ssh-keygen -t rsa -N "" -f ~/.ssh/id_rsa
+ - name: Fix ssh key for stack
+ shell: restorecon -r /home/stack
+ become: yes
+ - file:
+ path: /home/stack/nics
+ state: directory
+ owner: stack
+ group: stack
+ mode: 0775
+ - copy:
+ src: /root/.ssh/id_rsa.pub
+ dest: /home/stack/jumphost_id_rsa.pub
+ owner: stack
+ owner: stack
+ mode: 0644
+ - copy:
+ src: "{{ apex_temp_dir }}/{{ item }}.yaml"
+ dest: "/home/stack/nics/{{ item }}.yaml"
+ owner: stack
+ group: stack
+ mode: 0644
+ with_items:
+ - controller
+ - compute
+ - lineinfile:
+ path: /etc/sudoers
+ regexp: 'Defaults\s*requiretty'
+ state: absent
+ become: yes
+ - name: openstack-configs undercloud
+ shell: openstack-config --set undercloud.conf DEFAULT {{ item }}
+ with_items: "{{ undercloud_config }}"
+ - name: openstack-configs ironic
+ shell: openstack-config --set /etc/ironic/ironic.conf {{ item }}
+ become: yes
+ with_items: "{{ ironic_config }}"
+ - name: openstack-configs undercloud aarch64
+ shell: openstack-config --set undercloud.conf DEFAULT ipxe_enabled false
+ when: "{{ aarch64 }}"
+ - lineinfile:
+ path: /usr/lib/python2.7/site-packages/ironic/common/pxe_utils.py
+ regexp: '_link_ip_address_pxe_configs'
+ line: '_link_mac_pxe_configs(task)'
+ when: "{{ aarch64 }}"
+ - name: undercloud install
+ shell: openstack undercloud install &> apex-undercloud-install.log
+ become: yes
+ become_user: stack
+ - name: openstack-configs nova
+ shell: openstack-config --set /etc/nova/nova.conf DEFAULT {{ item }}
+ become: yes
+ with_items: "{{ nova_config }}"
+ - name: restart nova services
+ service:
+ name: "{{ item }}"
+ state: restarted
+ enabled: yes
+ with_items:
+ - openstack-nova-conductor
+ - openstack-nova-compute
+ - openstack-nova-api
+ - openstack-nova-scheduler
+ - name: openstack-configs neutron
+ shell: openstack-config --set /etc/neutron/neutron.conf DEFAULT {{ item }}
+ become: yes
+ with_items: "{{ neutron_config }}"
+ - name: restart neutron services
+ service:
+ name: "{{ item }}"
+ state: restarted
+ enabled: yes
+ with_items:
+ - neutron-server
+ - neutron-dhcp-agent
+ - name: configure external network vlan ifcfg
+ template:
+ src: external_vlan_ifcfg.yml.j2
+ dest: "/etc/sysconfig/network-scripts/ifcfg-vlan{{ external_network.vlan }}"
+ owner: root
+ group: root
+ mode: 0644
+ become: yes
+ when:
+ - external_network.vlan != "native"
+ - external_network.enabled
+ - name: bring up vlan ifcfg
+ shell: "ifup vlan{{ external_network.vlan }}"
+ become: yes
+ when:
+ - external_network.vlan != "native"
+ - external_network.enabled
+ - name: assign IP to native eth2
+ shell: ip a a {{ external_network.ip }}/{{ external_network.prefix }} dev eth2
+ become: yes
+ when:
+ - external_network.vlan == "native"
+ - external_network.enabled
+ - name: bring up eth2
+ shell: ip link set up dev eth2
+ when:
+ - external_network.vlan == "native"
+ - external_network.enabled
+ become: yes
+ - name: fetch storage environment file
+ fetch:
+ src: /usr/share/openstack-tripleo-heat-templates/environments/storage-environment.yaml
+ dest: "{{ apex_temp_dir }}/"
+ flat: yes
+
+- include: undercloud_aarch64.yml
+ when: aarch64
diff --git a/lib/ansible/playbooks/deploy_dependencies.yml b/lib/ansible/playbooks/deploy_dependencies.yml
new file mode 100644
index 00000000..77231622
--- /dev/null
+++ b/lib/ansible/playbooks/deploy_dependencies.yml
@@ -0,0 +1,66 @@
+---
+- hosts: localhost
+ tasks:
+ - sysctl:
+ name: net.ipv4.ip_forward
+ state: present
+ value: 1
+ sysctl_set: yes
+ - systemd:
+ name: dhcpd
+ state: stopped
+ enabled: no
+ ignore_errors: yes
+ - systemd:
+ name: libvirtd
+ state: started
+ enabled: yes
+ - systemd:
+ name: openvswitch
+ state: started
+ enabled: yes
+ - virt_net:
+ command: define
+ name: default
+ xml: '{{ lookup("template", "virsh_network_default.xml.j2") }}'
+ state: active
+ autostart: yes
+ - openvswitch_bridge:
+ bridge: 'br-{{ item }}'
+ state: present
+ with_items: '{{ virsh_enabled_networks }}'
+ - virt_net:
+ command: define
+ name: '{{ item }}'
+ xml: '{{ lookup("template", "virsh_network_ovs.xml.j2") }}'
+ autostart: yes
+ with_items: '{{ virsh_enabled_networks }}'
+ - virt_net:
+ command: create
+ name: '{{ item }}'
+ with_items: '{{ virsh_enabled_networks }}'
+ - virt_pool:
+ name: default
+ command: define
+ autostart: yes
+ state: active
+ xml: '{{ lookup("template", "virsh_pool.xml.j2") }}'
+ - lineinfile:
+ path: /etc/modprobe.d/kvm_intel.conf
+ line: 'options kvm-intel nested=1'
+ create: yes
+ when: ansible_architecture == "x86_64"
+ - modprobe:
+ name: "{{ item }}"
+ state: present
+ with_items:
+ - kvm
+ - kvm_intel
+ when: ansible_architecture == "x86_64"
+ - name: Generate SSH key for root if missing
+ shell: test -e ~/.ssh/id_rsa || ssh-keygen -t rsa -N "" -f ~/.ssh/id_rsa
+ - name: Manually patch vmbc to work with python3.x
+ lineinfile:
+ line: " conn.defineXML(ET.tostring(tree, encoding='unicode'))"
+ regexp: "tostring"
+ path: /usr/lib/python3.4/site-packages/virtualbmc/vbmc.py
diff --git a/lib/ansible/playbooks/deploy_overcloud.yml b/lib/ansible/playbooks/deploy_overcloud.yml
new file mode 100644
index 00000000..76bbbc67
--- /dev/null
+++ b/lib/ansible/playbooks/deploy_overcloud.yml
@@ -0,0 +1,68 @@
+---
+- hosts: all
+ tasks:
+ - name: Copy all files to undercloud
+ copy:
+ src: "{{ apex_temp_dir }}/{{ item }}"
+ dest: "/home/stack/{{ item }}"
+ owner: stack
+ group: stack
+ mode: 0644
+ with_items:
+ - network-environment.yaml
+ - instackenv.json
+ - opnfv-environment.yaml
+ - overcloud-full.qcow2
+ - deploy_command
+ - virtual-environment.yaml
+ - baremetal-environment.yaml
+ - copy:
+ src: "{{ apex_temp_dir }}/storage-environment.yaml"
+ dest: /usr/share/openstack-tripleo-heat-templates/environments/storage-environment.yaml
+ owner: root
+ group: root
+ mode: 0664
+ - systemd:
+ name: openstack-swift-proxy
+ state: restarted
+ enabled: yes
+ become: yes
+ - name: Upload glance images
+ shell: "{{ stackrc }} && openstack overcloud image upload"
+ become: yes
+ become_user: stack
+ - name: Import inventory (baremetal)
+ shell: "{{ stackrc }} && {{ item }}"
+ with_items:
+ - openstack overcloud node import instackenv.json
+ - openstack overcloud node introspect --all-manageable --provide
+ when: not virtual
+ - name: Import inventory (virtual)
+ shell: "{{ stackrc }} && openstack overcloud node import --provide instackenv.json"
+ when: virtual
+ - name: Set flavors
+ shell: '{{ stackrc }} && openstack flavor set --property "cpu_arch"="x86_64" {{ item }}'
+ with_items:
+ - baremetal
+ - control
+ - compute
+ - name: Configure DNS server for ctlplane network
+ shell: "{{ stackrc }} && openstack subnet set ctlplane-subnet {{ dns_server_args }}"
+ - name: Execute Overcloud Deployment
+ shell: "{{ stackrc }} && bash deploy_command"
+ - name: Show Keystone output
+ shell: "{{ overcloudrc }} && {{ item }}"
+ when: debug
+ with_items:
+ - openstack endpoint list
+ - openstack service list
+ - name: Get overcloud nodes and IPs
+ shell: "{{ stackrc }} && openstack server list -f json"
+ register: nova_list
+ - name: Write nova list output to file
+ local_action: copy content="{{ nova_list.stdout }}" dest="{{ apex_temp_dir }}/nova_output"
+ - name: Fetch overcloudrc
+ fetch:
+ src: /home/stack/overcloudrc
+ dest: "{{ apex_temp_dir }}/"
+ flat: yes
diff --git a/lib/ansible/playbooks/post_deploy_overcloud.yml b/lib/ansible/playbooks/post_deploy_overcloud.yml
new file mode 100644
index 00000000..fdf70240
--- /dev/null
+++ b/lib/ansible/playbooks/post_deploy_overcloud.yml
@@ -0,0 +1,45 @@
+---
+- hosts: all
+ tasks:
+ - name: Bring up br-phy for OVS DPDK
+ shell: ifup br-phy
+ when:
+ - dataplane == 'ovs_dpdk'
+ - "'compute' in ansible_hostname"
+ become: yes
+ - name: Restart OVS Agent for DPDK
+ shell: systemctl restart neutron-openvswitch-agent
+ when:
+ - dataplane == 'ovs_dpdk'
+ - "'compute' in ansible_hostname"
+ - sdn == false
+ - name: SFC config workaround
+ file:
+ src: /etc/neutron/networking_sfc.conf
+ dest: /etc/neutron/conf.d/neutron-server/networking_sfc.conf
+ state: link
+ become: yes
+ when:
+ - sfc
+ - "'controller' in ansible_hostname"
+ - name: Ensure ZRPCD is up
+ systemd:
+ name: zrpcd
+ state: started
+ enabled: yes
+ become: yes
+ when:
+ - vpn
+ - "'controller-0' in ansible_hostname"
+ - name: VSPERF build base machine
+ shell: /build_base_machine.sh
+ args:
+ chdir: /var/opt/vsperf/systems/
+ become: yes
+ when:
+ - vsperf
+ - "'compute-0' in ansible_hostname"
+ - name: Fetch logs from node
+ fetch:
+ src: /var/log/messages
+ dest: "{{ apex_temp_dir }}"
diff --git a/lib/ansible/playbooks/post_deploy_undercloud.yml b/lib/ansible/playbooks/post_deploy_undercloud.yml
new file mode 100644
index 00000000..ba0746b2
--- /dev/null
+++ b/lib/ansible/playbooks/post_deploy_undercloud.yml
@@ -0,0 +1,118 @@
+---
+- hosts: all
+ tasks:
+ - name: Enable ssh to overcloud nodes from jumphost
+ shell: "cat /home/stack/jumphost_id_rsa.pub | ssh -T {{ SSH_OPTIONS }} heat-admin@{{ item.value }} 'cat >> ~/.ssh/authorized_keys'"
+ with_dict: "{{ overcloud_nodes }}"
+ become: yes
+ become_user: stack
+ - name: Configure external network
+ shell: "{{ overcloudrc }} && {{ item }}"
+ with_items: "{{ external_network_cmds }}"
+ - name: Configure gluon networks
+ shell: "{{ overcloudrc }} && {{ item }}"
+ when: gluon
+ with_items:
+ - openstack network create gluon-network --share --provider-network-type vxlan
+ - openstack subnet create gluon-subnet --no-gateway --no-dhcp --network GluonNetwork --subnet-range 0.0.0.0/1
+ - name: Find admin project id
+ shell: "{{ overcloudrc }} && openstack project list | grep admin | awk '{print $2}'"
+ register: os_project_id
+ - name: Inject OS_PROJECT_ID and OS_TENANT_NAME into overcloudrc
+ lineinfile:
+ line: "{{ item }}"
+ path: /home/stack/overcloudrc
+ with_items:
+ - "export OS_PROJECT_ID={{ os_project_id.stdout }}"
+ - "export OS_TENANT_NAME=admin"
+ - name: Install Docker
+ yum:
+ name: docker
+ state: present
+ when: yardstick or dovetail
+ become: yes
+ - systemd:
+ name: docker
+ state: started
+ enabled: yes
+ when: yardstick or dovetail
+ become: yes
+ - name: Pull yardstick docker image
+ docker_image:
+ name: opnfv/yardstick
+ when: yardstick
+ become: yes
+ - name: Pull dovetail docker image
+ docker_image:
+ name: opnfv/dovetail
+ when: dovetail
+ become: yes
+ - name: Register SDN VIP
+ shell: "{{ stackrc }} && neutron port-list | grep control_virtual_ip | grep -Eo '([0-9]+\\.){3}[0-9]+'"
+ register: sdn_vip
+ become: yes
+ become_user: stack
+ when: sdn != false
+ - name: Write SDN controller VIP to overcloudrc
+ lineinfile:
+ line: "export SDN_CONTROLLER_IP={{ sdn_vip.stdout }}"
+ regexp: 'SDN_CONTROLLER_IP'
+ path: "/home/stack/{{ item }}"
+ when: sdn != false
+ with_items:
+ - overcloudrc
+ - overcloudrc.v3
+ - name: Undercloud NAT - MASQUERADE interface
+ iptables:
+ table: nat
+ chain: POSTROUTING
+ out_interface: eth0
+ jump: MASQUERADE
+ when:
+ - virtual
+ - not external_network_ipv6
+ become: yes
+ - name: Undercloud NAT - MASQUERADE interface with subnet
+ iptables:
+ table: nat
+ chain: POSTROUTING
+ out_interface: eth0
+ jump: MASQUERADE
+ source: "{{ external_cidr }}"
+ when:
+ - virtual
+ - not external_network_ipv6
+ become: yes
+ - name: Undercloud NAT - Allow Forwarding
+ iptables:
+ chain: FORWARD
+ in_interface: eth2
+ jump: ACCEPT
+ when:
+ - virtual
+ - not external_network_ipv6
+ become: yes
+ - name: Undercloud NAT - Allow Stateful Forwarding
+ iptables:
+ chain: FORWARD
+ in_interface: eth2
+ jump: ACCEPT
+ source: "{{ external_cidr }}"
+ ctstate: ESTABLISHED,RELATED
+ when:
+ - virtual
+ - not external_network_ipv6
+ become: yes
+ - name: Undercloud NAT - Save iptables
+ shell: service iptables save
+ become: yes
+ when:
+ - virtual
+ - not external_network_ipv6
+ - name: Create congress datasources
+ shell: "{{ overcloudrc }} && openstack congress datasource create {{ item }}"
+ become: yes
+ become_user: stack
+ when: congress
+ with_items: "{{ congress_datasources }}"
+ ignore_errors: yes
diff --git a/lib/ansible/playbooks/templates/external_vlan_ifcfg.yml.j2 b/lib/ansible/playbooks/templates/external_vlan_ifcfg.yml.j2
new file mode 100644
index 00000000..c478a7d9
--- /dev/null
+++ b/lib/ansible/playbooks/templates/external_vlan_ifcfg.yml.j2
@@ -0,0 +1,9 @@
+DEVICE=vlan{{ external_network.vlan }}
+ONBOOT=yes
+DEVICETYPE=ovs
+TYPE=OVSIntPort
+BOOTPROTO=static
+IPADDR={{ external_network.ip }}
+PREFIX={{ external_network.prefix }}
+OVS_BRIDGE=br-ctlplane
+OVS_OPTIONS="tag={{ external_network.vlan }}"
diff --git a/lib/ansible/playbooks/templates/virsh_network_default.xml.j2 b/lib/ansible/playbooks/templates/virsh_network_default.xml.j2
new file mode 100644
index 00000000..d7241d0c
--- /dev/null
+++ b/lib/ansible/playbooks/templates/virsh_network_default.xml.j2
@@ -0,0 +1,10 @@
+<network>
+ <name>default</name>
+ <bridge name="virbr0"/>
+ <forward/>
+ <ip address="192.168.122.1" netmask="255.255.255.0">
+ <dhcp>
+ <range start="192.168.122.2" end="192.168.122.254"/>
+ </dhcp>
+ </ip>
+</network>
diff --git a/lib/ansible/playbooks/templates/virsh_network_ovs.xml.j2 b/lib/ansible/playbooks/templates/virsh_network_ovs.xml.j2
new file mode 100644
index 00000000..75a06eea
--- /dev/null
+++ b/lib/ansible/playbooks/templates/virsh_network_ovs.xml.j2
@@ -0,0 +1,6 @@
+<network ipv6='yes'>
+ <name>{{ item }}</name>
+ <forward mode='bridge'/>
+ <bridge name='br-{{ item }}'/>
+ <virtualport type='openvswitch'/>
+</network>
diff --git a/lib/ansible/playbooks/templates/virsh_pool.xml.j2 b/lib/ansible/playbooks/templates/virsh_pool.xml.j2
new file mode 100644
index 00000000..f6ea498a
--- /dev/null
+++ b/lib/ansible/playbooks/templates/virsh_pool.xml.j2
@@ -0,0 +1,6 @@
+<pool type='dir'>
+ <name>default</name>
+ <target>
+ <path>/var/lib/libvirt/images</path>
+ </target>
+</pool>
diff --git a/lib/ansible/playbooks/undercloud_aarch64.yml b/lib/ansible/playbooks/undercloud_aarch64.yml
new file mode 100644
index 00000000..5b607c3e
--- /dev/null
+++ b/lib/ansible/playbooks/undercloud_aarch64.yml
@@ -0,0 +1,49 @@
+---
+- hosts: all
+ tasks:
+ - name: aarch64 configuration
+ block:
+ - shell: yum -y reinstall grub2-efi shim
+ - copy:
+ src: /boot/efi/EFI/centos/grubaa64.efi
+ dest: /tftpboot/grubaa64.efi
+ remote_src: yes
+ - file:
+ path: /tftpboot/EFI/centos
+ state: directory
+ mode: 0755
+ - copy:
+ content: |
+ set default=master
+ set timeout=5
+ set hidden_timeout_quiet=false
+ menuentry "master" {
+ configfile /tftpboot/\\\$net_default_ip.conf
+ }
+ dest: /tftpboot/EFI/centos/grub.cfg
+ mode: 0644
+ - shell: 'openstack-config --set /etc/ironic/ironic.conf pxe uefi_pxe_config_template $pybasedir/drivers/modules/pxe_grub_config.template'
+ - shell: 'openstack-config --set /etc/ironic/ironic.conf pxe uefi_pxe_bootfile_name grubaa64.efi'
+ - systemd:
+ name: openstack-ironic-conductor
+ state: restarted
+ enabled: yes
+ - replace:
+ path: /usr/lib/python2.7/site-packages/ironic/drivers/modules/pxe_grub_config.template
+ regexp: 'linuxefi'
+ replace: 'linux'
+ - replace:
+ path: /usr/lib/python2.7/site-packages/ironic/drivers/modules/pxe_grub_config.template
+ regexp: 'initrdefi'
+ replace: 'initrd'
+ - lineinfile:
+ path: /tftpboot/map-file
+ insertafter: EOF
+ state: present
+ line: ''
+ - shell: "echo 'r ^/EFI/centos/grub.cfg-(.*) /tftpboot/pxelinux.cfg/\\1' | sudo tee --append /tftpboot/map-file"
+ - systemd:
+ name: xinetd
+ state: restarted
+ enabled: yes
+ become: yes
diff --git a/lib/common-functions.sh b/lib/common-functions.sh
deleted file mode 100644
index 709dbf97..00000000
--- a/lib/common-functions.sh
+++ /dev/null
@@ -1,308 +0,0 @@
-#!/usr/bin/env bash
-##############################################################################
-# Copyright (c) 2015 Tim Rozet (Red Hat), Dan Radez (Red Hat) and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-
-# Common Functions used by OPNFV Apex
-# author: Tim Rozet (trozet@redhat.com)
-
-##converts subnet mask to prefix
-##params: subnet mask
-function prefix2mask {
- # Number of args to shift, 255..255, first non-255 byte, zeroes
- set -- $(( 5 - ($1 / 8) )) 255 255 255 255 $(( (255 << (8 - ($1 % 8))) & 255 )) 0 0 0
- [ $1 -gt 1 ] && shift $1 || shift
- echo ${1-0}.${2-0}.${3-0}.${4-0}
-}
-
-##find ip of interface
-##params: interface name, address family
-function find_ip {
- local af
- if [[ -z "$1" ]]; then
- return 1
- fi
- if [[ -z "$2" ]]; then
- af=4
- else
- af=$2
- fi
-
- python3 -B $LIB/python/apex_python_utils.py find-ip -i $1 -af $af
-}
-
-##attach interface to OVS and set the network config correctly
-##params: bride to attach to, interface to attach, network type (optional)
-##external indicates attaching to a external interface
-function attach_interface_to_ovs {
- local bridge interface
- local if_ip if_mask if_gw if_file ovs_file if_prefix
- local if_metric if_dns1 if_dns2
-
- if [[ -z "$1" || -z "$2" ]]; then
- return 1
- else
- bridge=$1
- interface=$2
- fi
-
- if ovs-vsctl list-ports ${bridge} | grep ${interface}; then
- return 0
- fi
-
- if_file=/etc/sysconfig/network-scripts/ifcfg-${interface}
- ovs_file=/etc/sysconfig/network-scripts/ifcfg-${bridge}
-
- if [ -e "$if_file" ]; then
- if_ip=$(sed -n 's/^IPADDR=\(.*\)$/\1/p' ${if_file})
- if_mask=$(sed -n 's/^NETMASK=\(.*\)$/\1/p' ${if_file})
- if_gw=$(sed -n 's/^GATEWAY=\(.*\)$/\1/p' ${if_file})
- if_metric=$(sed -n 's/^METRIC=\(.*\)$/\1/p' ${if_file})
- if_dns1=$(sed -n 's/^DNS1=\(.*\)$/\1/p' ${if_file})
- if_dns2=$(sed -n 's/^DNS2=\(.*\)$/\1/p' ${if_file})
- else
- echo "ERROR: ifcfg file missing for ${interface}"
- return 1
- fi
-
- if [ -z "$if_mask" ]; then
- # we can look for PREFIX here, then convert it to NETMASK
- if_prefix=$(sed -n 's/^PREFIX=[^0-9]*\([0-9][0-9]*\)[^0-9]*$/\1/p' ${if_file})
- if_mask=$(prefix2mask ${if_prefix})
- fi
-
- if [[ -z "$if_ip" || -z "$if_mask" ]]; then
- echo "ERROR: IPADDR or NETMASK/PREFIX missing for ${interface}"
- return 1
- elif [[ -z "$if_gw" && "$3" == "external" ]]; then
- echo "ERROR: GATEWAY missing for ${interface}, which is external"
- return 1
- fi
-
- # move old config file to .orig
- mv -f ${if_file} ${if_file}.orig
- echo "DEVICE=${interface}
-DEVICETYPE=ovs
-TYPE=OVSPort
-PEERDNS=no
-BOOTPROTO=static
-NM_CONTROLLED=no
-ONBOOT=yes
-OVS_BRIDGE=${bridge}
-PROMISC=yes" > ${if_file}
-
-
- # create bridge cfg
- echo "DEVICE=${bridge}
-DEVICETYPE=ovs
-IPADDR=${if_ip}
-NETMASK=${if_mask}
-BOOTPROTO=static
-ONBOOT=yes
-TYPE=OVSBridge
-PROMISC=yes
-PEERDNS=no" > ${ovs_file}
-
- if [ -n "$if_gw" ]; then
- echo "GATEWAY=${if_gw}" >> ${ovs_file}
- fi
-
- if [ -n "$if_metric" ]; then
- echo "METRIC=${if_metric}" >> ${ovs_file}
- fi
-
- if [[ -n "$if_dns1" || -n "$if_dns2" ]]; then
- sed -i '/PEERDNS/c\PEERDNS=yes' ${ovs_file}
-
- if [ -n "$if_dns1" ]; then
- echo "DNS1=${if_dns1}" >> ${ovs_file}
- fi
-
- if [ -n "$if_dns2" ]; then
- echo "DNS2=${if_dns2}" >> ${ovs_file}
- fi
- fi
-
- sudo systemctl restart network
-}
-
-##detach interface from OVS and set the network config correctly
-##params: bridge to detach from
-##assumes only 1 real interface attached to OVS
-function detach_interface_from_ovs {
- local bridge
- local port_output ports_no_orig
- local net_path
- local if_ip if_mask if_gw if_prefix
- local if_metric if_dns1 if_dns2
-
- net_path=/etc/sysconfig/network-scripts/
- if [[ -z "$1" ]]; then
- return 1
- else
- bridge=$1
- fi
-
- # if no interfaces attached then return
- if ! ovs-vsctl list-ports ${bridge} | grep -Ev "vnet[0-9]*"; then
- return 0
- fi
-
- # look for .orig ifcfg files to use
- port_output=$(ovs-vsctl list-ports ${bridge} | grep -Ev "vnet[0-9]*")
- while read -r line; do
- if [ -z "$line" ]; then
- continue
- elif [ -e ${net_path}/ifcfg-${line}.orig ]; then
- mv -f ${net_path}/ifcfg-${line}.orig ${net_path}/ifcfg-${line}
- elif [ -e ${net_path}/ifcfg-${bridge} ]; then
- if_ip=$(sed -n 's/^IPADDR=\(.*\)$/\1/p' ${net_path}/ifcfg-${bridge})
- if_mask=$(sed -n 's/^NETMASK=\(.*\)$/\1/p' ${net_path}/ifcfg-${bridge})
- if_gw=$(sed -n 's/^GATEWAY=\(.*\)$/\1/p' ${net_path}/ifcfg-${bridge})
- if_metric=$(sed -n 's/^METRIC=\(.*\)$/\1/p' ${net_path}/ifcfg-${bridge})
- if_dns1=$(sed -n 's/^DNS1=\(.*\)$/\1/p' ${net_path}/ifcfg-${bridge})
- if_dns2=$(sed -n 's/^DNS2=\(.*\)$/\1/p' ${net_path}/ifcfg-${bridge})
-
- if [ -z "$if_mask" ]; then
- if_prefix=$(sed -n 's/^PREFIX=[^0-9]*\([0-9][0-9]*\)[^0-9]*$/\1/p' ${net_path}/ifcfg-${bridge})
- if_mask=$(prefix2mask ${if_prefix})
- fi
-
- if [[ -z "$if_ip" || -z "$if_mask" ]]; then
- echo "ERROR: IPADDR or PREFIX/NETMASK missing for ${bridge} and no .orig file for interface ${line}"
- return 1
- fi
-
- # create if cfg
- echo "DEVICE=${line}
-IPADDR=${if_ip}
-NETMASK=${if_mask}
-BOOTPROTO=static
-ONBOOT=yes
-TYPE=Ethernet
-NM_CONTROLLED=no
-PEERDNS=no" > ${net_path}/ifcfg-${line}
-
- if [ -n "$if_gw" ]; then
- echo "GATEWAY=${if_gw}" >> ${net_path}/ifcfg-${line}
- fi
-
- if [ -n "$if_metric" ]; then
- echo "METRIC=${if_metric}" >> ${net_path}/ifcfg-${line}
- fi
-
- if [[ -n "$if_dns1" || -n "$if_dns2" ]]; then
- sed -i '/PEERDNS/c\PEERDNS=yes' ${net_path}/ifcfg-${line}
-
- if [ -n "$if_dns1" ]; then
- echo "DNS1=${if_dns1}" >> ${net_path}/ifcfg-${line}
- fi
-
- if [ -n "$if_dns2" ]; then
- echo "DNS2=${if_dns2}" >> ${net_path}/ifcfg-${line}
- fi
- fi
- break
- else
- echo "ERROR: Real interface ${line} attached to bridge, but no interface or ${bridge} ifcfg file exists"
- return 1
- fi
-
- done <<< "$port_output"
-
- # modify the bridge ifcfg file
- # to remove IP params
- sudo sed -i 's/IPADDR=.*//' ${net_path}/ifcfg-${bridge}
- sudo sed -i 's/NETMASK=.*//' ${net_path}/ifcfg-${bridge}
- sudo sed -i 's/GATEWAY=.*//' ${net_path}/ifcfg-${bridge}
- sudo sed -i 's/DNS1=.*//' ${net_path}/ifcfg-${bridge}
- sudo sed -i 's/DNS2=.*//' ${net_path}/ifcfg-${bridge}
- sudo sed -i 's/METRIC=.*//' ${net_path}/ifcfg-${bridge}
- sudo sed -i 's/PEERDNS=.*//' ${net_path}/ifcfg-${bridge}
-
- sudo systemctl restart network
-}
-
-# Update iptables rule for external network reach internet
-# for virtual deployments
-# params: external_cidr
-function configure_undercloud_nat {
- local external_cidr
- if [[ -z "$1" ]]; then
- return 1
- else
- external_cidr=$1
- fi
-
- ssh -T ${SSH_OPTIONS[@]} "root@$UNDERCLOUD" <<EOI
-iptables -t nat -A POSTROUTING -o eth0 -j MASQUERADE
-iptables -t nat -A POSTROUTING -s ${external_cidr} -o eth0 -j MASQUERADE
-iptables -A FORWARD -i eth2 -j ACCEPT
-iptables -A FORWARD -s ${external_cidr} -m state --state ESTABLISHED,RELATED -j ACCEPT
-service iptables save
-EOI
-}
-
-# Interactive prompt handler
-# params: step stage, ex. deploy, undercloud install, etc
-function prompt_user {
- while [ 1 ]; do
- echo -n "Would you like to proceed with ${1}? (y/n) "
- read response
- if [ "$response" == 'y' ]; then
- return 0
- elif [ "$response" == 'n' ]; then
- return 1
- else
- continue
- fi
- done
-}
-
-##checks if prefix exists in string
-##params: string, prefix
-##usage: contains_prefix "deploy_setting_launcher=1" "deploy_setting"
-contains_prefix() {
- local mystr=$1
- local prefix=$2
- if echo $mystr | grep -E "^$prefix.*$" > /dev/null; then
- return 0
- else
- return 1
- fi
-}
-
-##verify internet connectivity
-#params: none
-function verify_internet {
- if ping -c 2 $ping_site > /dev/null; then
- if ping -c 2 $dnslookup_site > /dev/null; then
- echo "${blue}Internet connectivity detected${reset}"
- return 0
- else
- echo "${red}Internet connectivity detected, but DNS lookup failed${reset}"
- return 1
- fi
- else
- echo "${red}No internet connectivity detected${reset}"
- return 1
- fi
-}
-
-##tests if overcloud nodes have external connectivity
-#params:none
-function test_overcloud_connectivity {
- for node in $(undercloud_connect stack ". stackrc && nova list" | grep -Eo "controller-[0-9]+|compute-[0-9]+" | tr -d -) ; do
- if ! overcloud_connect $node "ping -c 2 $ping_site > /dev/null"; then
- echo "${blue}Node ${node} was unable to ping site ${ping_site}${reset}"
- return 1
- fi
- done
- echo "${blue}Overcloud external connectivity OK${reset}"
-}
-
diff --git a/lib/configure-deps-functions.sh b/lib/configure-deps-functions.sh
deleted file mode 100755
index 4c00fbf3..00000000
--- a/lib/configure-deps-functions.sh
+++ /dev/null
@@ -1,173 +0,0 @@
-#!/usr/bin/env bash
-##############################################################################
-# Copyright (c) 2015 Tim Rozet (Red Hat), Dan Radez (Red Hat) and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-
-##download dependencies if missing and configure host
-#params: none
-function configure_deps {
- if ! verify_internet; then
- echo "${red}Will not download dependencies${reset}"
- internet=false
- fi
-
- # verify ip forwarding
- if sysctl net.ipv4.ip_forward | grep 0; then
- sudo sysctl -w net.ipv4.ip_forward=1
- sudo sh -c "echo 'net.ipv4.ip_forward = 1' >> /etc/sysctl.conf"
- fi
-
- # ensure no dhcp server is running on jumphost
- if ! sudo systemctl status dhcpd | grep dead; then
- echo "${red}WARN: DHCP Server detected on jumphost, disabling...${reset}"
- sudo systemctl stop dhcpd
- sudo systemctl disable dhcpd
- fi
-
- # ensure networks are configured
- systemctl status libvirtd || systemctl start libvirtd
- systemctl status openvswitch || systemctl start openvswitch
-
- # For baremetal we only need to create/attach Undercloud to admin and external
- if [ "$virtual" == "FALSE" ]; then
- virsh_enabled_networks="admin external"
- else
- virsh_enabled_networks=$enabled_network_list
- fi
-
- # ensure default network is configured correctly
- libvirt_dir="/usr/share/libvirt/networks"
- virsh net-list --all | grep default || virsh net-define ${libvirt_dir}/default.xml
- virsh net-list --all | grep -E "default\s+active" > /dev/null || virsh net-start default
- virsh net-list --all | grep -E "default\s+active\s+yes" > /dev/null || virsh net-autostart --network default
-
- if [[ -z "$virtual" || "$virtual" == "FALSE" ]]; then
- for network in ${enabled_network_list}; do
- echo "${blue}INFO: Creating Virsh Network: $network & OVS Bridge: ${NET_MAP[$network]}${reset}"
- ovs-vsctl list-br | grep "^${NET_MAP[$network]}$" > /dev/null || ovs-vsctl add-br ${NET_MAP[$network]}
- virsh net-list --all | grep " $network " > /dev/null || (cat > ${libvirt_dir}/apex-virsh-net.xml && virsh net-define ${libvirt_dir}/apex-virsh-net.xml) << EOF
-<network ipv6='yes'>
- <name>$network</name>
- <forward mode='bridge'/>
- <bridge name='${NET_MAP[$network]}'/>
- <virtualport type='openvswitch'/>
-</network>
-EOF
- if ! (virsh net-list --all | grep " $network " > /dev/null); then
- echo "${red}ERROR: unable to create network: ${network}${reset}"
- exit 1;
- fi
- rm -f ${libvirt_dir}/apex-virsh-net.xml &> /dev/null;
- virsh net-list | grep -E "$network\s+active" > /dev/null || virsh net-start $network
- virsh net-list | grep -E "$network\s+active\s+yes" > /dev/null || virsh net-autostart --network $network
- done
-
- echo -e "${blue}INFO: Bridges set: ${reset}"
- ovs-vsctl list-br
-
- # bridge interfaces to correct OVS instances for baremetal deployment
- for network in ${enabled_network_list}; do
- if [[ "$network" != "admin" && "$network" != "external" ]]; then
- continue
- fi
- this_interface=$(eval echo \${${network}_installer_vm_members})
- # check if this a bridged interface for this network
- if [[ ! -z "$this_interface" || "$this_interface" != "none" ]]; then
- if ! attach_interface_to_ovs ${NET_MAP[$network]} ${this_interface} ${network}; then
- echo -e "${red}ERROR: Unable to bridge interface ${this_interface} to bridge ${NET_MAP[$network]} for enabled network: ${network}${reset}"
- exit 1
- else
- echo -e "${blue}INFO: Interface ${this_interface} bridged to bridge ${NET_MAP[$network]} for enabled network: ${network}${reset}"
- fi
- else
- echo "${red}ERROR: Unable to determine interface to bridge to for enabled network: ${network}${reset}"
- exit 1
- fi
- done
- else
- # verify virtualbmc is installed for a virtual install
- if ! rpm -q python2-virtualbmc; then
- echo -e "${red}ERROR: Package python2-virtualbmc is required to do a virtual install.$reset"
- exit 1
- fi
- for network in ${OPNFV_NETWORK_TYPES}; do
- if ! ovs-vsctl --may-exist add-br ${NET_MAP[$network]}; then
- echo -e "${red}ERROR: Failed to create ovs bridge ${NET_MAP[$network]}${reset}"
- exit 1
- fi
- echo "${blue}INFO: Creating Virsh Network: $network${reset}"
- virsh net-list --all | grep " $network " > /dev/null || (cat > ${libvirt_dir}/apex-virsh-net.xml && virsh net-define ${libvirt_dir}/apex-virsh-net.xml) << EOF
-<network ipv6='yes'>
-<name>$network</name>
-<forward mode='bridge'/>
-<bridge name='${NET_MAP[$network]}'/>
-<virtualport type='openvswitch'/>
-</network>
-EOF
- if ! (virsh net-list --all | grep $network > /dev/null); then
- echo "${red}ERROR: unable to create network: ${network}${reset}"
- exit 1;
- fi
- rm -f ${libvirt_dir}/apex-virsh-net.xml &> /dev/null;
- virsh net-list | grep -E "$network\s+active" > /dev/null || virsh net-start $network
- virsh net-list | grep -E "$network\s+active\s+yes" > /dev/null || virsh net-autostart --network $network
- done
-
- echo -e "${blue}INFO: Bridges set: ${reset}"
- ovs-vsctl list-br
- fi
-
- echo -e "${blue}INFO: virsh networks set: ${reset}"
- virsh net-list
-
- # ensure storage pool exists and is started
- virsh pool-list --all | grep default > /dev/null || virsh pool-define-as --name default dir --target /var/lib/libvirt/images
- virsh pool-list | grep -Eo "default\s+active" > /dev/null || (virsh pool-autostart default; virsh pool-start default)
-
- # Virt flag check is Arch dependent on x86
- if [ "$(uname -i)" == 'x86_64' ]; then
- if ! egrep '^flags.*(vmx|svm)' /proc/cpuinfo > /dev/null; then
- echo "${red}virtualization extensions not found, kvm kernel module insertion may fail.\n \
-Are you sure you have enabled vmx in your bios or hypervisor?${reset}"
- fi
-
- if ! lsmod | grep kvm > /dev/null; then modprobe kvm; fi
- if ! lsmod | grep kvm_intel > /dev/null; then modprobe kvm_intel; fi
-
- if ! lsmod | grep kvm > /dev/null; then
- echo "${red}kvm kernel modules not loaded!${reset}"
- return 1
- fi
-
- # try to enabled nested kvm
- if [ "$virtual" == "TRUE" ]; then
- nested_kvm=`cat /sys/module/kvm_intel/parameters/nested`
- if [ "$nested_kvm" != "Y" ]; then
- # try to enable nested kvm
- echo 'options kvm-intel nested=1' > /etc/modprobe.d/kvm_intel.conf
- if rmmod kvm_intel; then
- modprobe kvm_intel
- fi
- nested_kvm=`cat /sys/module/kvm_intel/parameters/nested`
- fi
- if [ "$nested_kvm" != "Y" ]; then
- echo "${red}Cannot enable nested kvm, falling back to qemu for deployment${reset}"
- DEPLOY_OPTIONS+=" --libvirt-type qemu"
- else
- echo "${blue}Nested kvm enabled, deploying with kvm acceleration${reset}"
- fi
- fi
- fi
-
- ##sshkeygen for root
- if [ ! -e ~/.ssh/id_rsa.pub ]; then
- ssh-keygen -t rsa -N "" -f ~/.ssh/id_rsa
- fi
-
- echo "${blue}All dependencies installed and running${reset}"
-}
diff --git a/lib/configure-vm b/lib/configure-vm
deleted file mode 100755
index 5cb45218..00000000
--- a/lib/configure-vm
+++ /dev/null
@@ -1,203 +0,0 @@
-#!/usr/bin/env python
-
-import argparse
-import math
-import os
-import random
-
-import libvirt
-
-templatedir = os.getenv('LIB', '/var/opt/opnfv/lib') + '/installer/'
-
-MAX_NUM_MACS = math.trunc(0xff/2)
-
-
-def generate_baremetal_macs(count=1):
- """Generate an Ethernet MAC address suitable for baremetal testing."""
- # NOTE(dprince): We generate our own bare metal MAC address's here
- # instead of relying on libvirt so that we can ensure the
- # locally administered bit is set low. (The libvirt default is
- # to set the 2nd MSB high.) This effectively allows our
- # fake baremetal VMs to more accurately behave like real hardware
- # and fixes issues with bridge/DHCP configurations which rely
- # on the fact that bridges assume the MAC address of the lowest
- # attached NIC.
- # MACs generated for a given machine will also be in sequential
- # order, which matches how most BM machines are laid out as well.
- # Additionally we increment each MAC by two places.
- macs = []
-
- if count > MAX_NUM_MACS:
- raise ValueError("The MAX num of MACS supported is %i." % MAX_NUM_MACS)
-
- base_nums = [0x00,
- random.randint(0x00, 0xff),
- random.randint(0x00, 0xff),
- random.randint(0x00, 0xff),
- random.randint(0x00, 0xff)]
- base_mac = ':'.join(map(lambda x: "%02x" % x, base_nums))
-
- start = random.randint(0x00, 0xff)
- if (start + (count * 2)) > 0xff:
- # leave room to generate macs in sequence
- start = 0xff - count * 2
- for num in range(0, count*2, 2):
- mac = start + num
- macs.append(base_mac + ":" + ("%02x" % mac))
- return macs
-
-def main():
- parser = argparse.ArgumentParser(
- description="Configure a kvm virtual machine for the seed image.")
- parser.add_argument('--name', default='seed',
- help='the name to give the machine in libvirt.')
- parser.add_argument('--image',
- help='Use a custom image file (must be qcow2).')
- parser.add_argument('--diskbus', default='sata',
- help='Choose an alternate bus type for the disk')
- parser.add_argument('--baremetal-interface', nargs='+', default=['brbm'],
- help='The interface which bare metal nodes will be connected to.')
- parser.add_argument('--engine', default='kvm',
- help='The virtualization engine to use')
- parser.add_argument('--arch', default='i686',
- help='The architecture to use')
- parser.add_argument('--memory', default='2097152',
- help="Maximum memory for the VM in KB.")
- parser.add_argument('--cpus', default='1',
- help="CPU count for the VM.")
- parser.add_argument('--bootdev', default='hd',
- help="What boot device to use (hd/network).")
- parser.add_argument('--seed', default=False, action='store_true',
- help='Create a seed vm with two interfaces.')
- parser.add_argument('--ovsbridge', default="",
- help='Place the seed public interface on this ovs bridge.')
- parser.add_argument('--libvirt-nic-driver', default='virtio',
- help='The libvirt network driver to use')
- parser.add_argument('--enable-serial-console', action="store_true",
- help='Enable a serial console')
- parser.add_argument('--direct-boot',
- help='Enable directboot to <value>.{vmlinux & initrd}')
- parser.add_argument('--kernel-arg', action="append", dest='kernel_args',
- help='Kernel arguments, use multiple time for multiple args.')
- parser.add_argument('--uri', default='qemu:///system',
- help='The server uri with which to connect.')
- args = parser.parse_args()
- with file(templatedir + '/domain.xml', 'rb') as f:
- source_template = f.read()
- imagefile = '/var/lib/libvirt/images/seed.qcow2'
- if args.image:
- imagefile = args.image
- imagefile = os.path.realpath(imagefile)
- params = {
- 'name': args.name,
- 'imagefile': imagefile,
- 'engine': args.engine,
- 'arch': args.arch,
- 'memory': args.memory,
- 'cpus': args.cpus,
- 'bootdev': args.bootdev,
- 'network': '',
- 'enable_serial_console': '',
- 'direct_boot': '',
- 'kernel_args': '',
- 'user_interface': '',
- }
- if args.image is not None:
- params['imagefile'] = args.image
-
- # Configure the bus type for the target disk device
- params['diskbus'] = args.diskbus
- nicparams = {
- 'nicdriver': args.libvirt_nic_driver,
- 'ovsbridge': args.ovsbridge,
- }
- if args.seed:
- if args.ovsbridge:
- params['network'] = """
- <interface type='bridge'>
- <source bridge='%(ovsbridge)s'/>
- <virtualport type='openvswitch'/>
- <model type='%(nicdriver)s'/>
- </interface>""" % nicparams
- else:
- params['network'] = """
- <!-- regular natted network, for access to the vm -->
- <interface type='network'>
- <source network='default'/>
- <model type='%(nicdriver)s'/>
- </interface>""" % nicparams
-
- macs = generate_baremetal_macs(len(args.baremetal_interface))
-
- params['bm_network'] = ""
- for bm_interface, mac in zip(args.baremetal_interface, macs):
- bm_interface_params = {
- 'bminterface': bm_interface,
- 'bmmacaddress': mac,
- 'nicdriver': args.libvirt_nic_driver,
- }
- params['bm_network'] += """
- <!-- bridged 'bare metal' network on %(bminterface)s -->
- <interface type='network'>
- <mac address='%(bmmacaddress)s'/>
- <source network='%(bminterface)s'/>
- <model type='%(nicdriver)s'/>
- </interface>""" % bm_interface_params
-
- if args.enable_serial_console:
- params['enable_serial_console'] = """
- <serial type='pty'>
- <target port='0'/>
- </serial>
- <console type='pty'>
- <target type='serial' port='0'/>
- </console>
- """
- if args.direct_boot:
- params['direct_boot'] = """
- <kernel>/var/lib/libvirt/images/%(direct_boot)s.vmlinuz</kernel>
- <initrd>/var/lib/libvirt/images/%(direct_boot)s.initrd</initrd>
- """ % { 'direct_boot': args.direct_boot }
- if args.kernel_args:
- params['kernel_args'] = """
- <cmdline>%s</cmdline>
- """ % ' '.join(args.kernel_args)
-
- if args.arch == 'aarch64':
-
- params['direct_boot'] += """
- <loader readonly='yes' type='pflash'>/usr/share/AAVMF/AAVMF_CODE.fd</loader>
- <nvram>/var/lib/libvirt/qemu/nvram/centos7.0_VARS.fd</nvram>
- """
- params['user_interface'] = """
- <controller type='virtio-serial' index='0'>
- <address type='virtio-mmio'/>
- </controller>
- <serial type='pty'>
- <target port='0'/>
- </serial>
- <console type='pty'>
- <target type='serial' port='0'/>
- </console>
- <channel type='unix'>
- <target type='virtio' name='org.qemu.guest_agent.0'/>
- <address type='virtio-serial' controller='0' bus='0' port='1'/>
- </channel>
- """
- else:
- params['user_interface'] = """
- <input type='mouse' bus='ps2'/>
- <graphics type='vnc' port='-1' autoport='yes'/>
- <video>
- <model type='cirrus' vram='9216' heads='1'/>
- </video>
- """
-
-
- libvirt_template = source_template % params
- conn=libvirt.open(args.uri)
- a = conn.defineXML(libvirt_template)
- print ("Created machine %s with UUID %s" % (args.name, a.UUIDString()))
-
-if __name__ == '__main__':
- main()
diff --git a/lib/installer/domain.xml b/lib/installer/domain.xml
deleted file mode 100644
index 57a67d87..00000000
--- a/lib/installer/domain.xml
+++ /dev/null
@@ -1,34 +0,0 @@
-<domain type='%(engine)s'>
- <name>%(name)s</name>
- <memory unit='KiB'>%(memory)s</memory>
- <vcpu>%(cpus)s</vcpu>
- <cpu mode='host-passthrough'/>
- <os>
- <type arch='%(arch)s'>hvm</type>
- <boot dev='%(bootdev)s'/>
- <bootmenu enable='no'/>
- %(direct_boot)s
- %(kernel_args)s
- </os>
- <features>
- <acpi/>
- <apic/>
- <pae/>
- </features>
- <clock offset='utc'/>
- <on_poweroff>destroy</on_poweroff>
- <on_reboot>restart</on_reboot>
- <on_crash>restart</on_crash>
- <devices>
- <controller type='scsi' model='virtio-scsi' index='0'/>
- <disk type='file' device='disk'>
- <driver name='qemu' type='qcow2' cache='unsafe'/>
- <source file='%(imagefile)s'/>
- <target dev='sda' bus='%(diskbus)s'/>
- </disk>
- %(network)s
- %(bm_network)s
- %(enable_serial_console)s
- %(user_interface)s
- </devices>
-</domain>
diff --git a/lib/overcloud-deploy-functions.sh b/lib/overcloud-deploy-functions.sh
deleted file mode 100755
index b52d0c28..00000000
--- a/lib/overcloud-deploy-functions.sh
+++ /dev/null
@@ -1,503 +0,0 @@
-#!/usr/bin/env bash
-##############################################################################
-# Copyright (c) 2015 Tim Rozet (Red Hat), Dan Radez (Red Hat) and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-
-##preping it for deployment and launch the deploy
-##params: none
-function overcloud_deploy {
- local num_compute_nodes
- local num_control_nodes
- local dpdk_cores pmd_cores socket_mem ovs_dpdk_perf_flag ovs_option_heat_arr
- declare -A ovs_option_heat_arr
-
- ovs_option_heat_arr['dpdk_cores']=HostCpusList
- ovs_option_heat_arr['pmd_cores']=NeutronDpdkCoreList
- ovs_option_heat_arr['socket_memory']=NeutronDpdkSocketMemory
- ovs_option_heat_arr['memory_channels']=NeutronDpdkMemoryChannels
-
- # OPNFV Default Environment and Network settings
- DEPLOY_OPTIONS+=" -e ${ENV_FILE}"
- DEPLOY_OPTIONS+=" -e network-environment.yaml"
-
- # get number of nodes available in inventory
- num_control_nodes=$(ssh -T ${SSH_OPTIONS[@]} "root@$UNDERCLOUD" "grep -c profile:control /home/stack/instackenv.json")
- num_compute_nodes=$(ssh -T ${SSH_OPTIONS[@]} "root@$UNDERCLOUD" "grep -c profile:compute /home/stack/instackenv.json")
-
- # Custom Deploy Environment Templates
- if [[ "${#deploy_options_array[@]}" -eq 0 || "${deploy_options_array['sdn_controller']}" == 'opendaylight' ]]; then
- if [ "${deploy_options_array['sfc']}" == 'True' ]; then
- DEPLOY_OPTIONS+=" -e /usr/share/openstack-tripleo-heat-templates/environments/neutron-sfc-opendaylight.yaml"
- elif [ "${deploy_options_array['vpn']}" == 'True' ]; then
- DEPLOY_OPTIONS+=" -e /usr/share/openstack-tripleo-heat-templates/environments/neutron-bgpvpn-opendaylight.yaml"
- if [ "${deploy_options_array['gluon']}" == 'True' ]; then
- DEPLOY_OPTIONS+=" -e /usr/share/openstack-tripleo-heat-templates/environments/services/gluon.yaml"
- fi
- elif [ "${deploy_options_array['vpp']}" == 'True' ]; then
- if [ "${deploy_options_array['odl_vpp_netvirt']}" == "True" ]; then
- DEPLOY_OPTIONS+=" -e /usr/share/openstack-tripleo-heat-templates/environments/neutron-opendaylight-netvirt-vpp.yaml"
- elif [ "${deploy_options_array['odl_vpp_routing_node']}" == "dvr" ]; then
- DEPLOY_OPTIONS+=" -e /usr/share/openstack-tripleo-heat-templates/environments/neutron-opendaylight-fdio-dvr.yaml"
- else
- DEPLOY_OPTIONS+=" -e /usr/share/openstack-tripleo-heat-templates/environments/neutron-opendaylight-honeycomb.yaml"
- fi
- else
- DEPLOY_OPTIONS+=" -e /usr/share/openstack-tripleo-heat-templates/environments/neutron-opendaylight.yaml"
- fi
- SDN_IMAGE=opendaylight
- elif [ "${deploy_options_array['sdn_controller']}" == 'opendaylight-external' ]; then
- DEPLOY_OPTIONS+=" -e /usr/share/openstack-tripleo-heat-templates/environments/opendaylight-external.yaml"
- SDN_IMAGE=opendaylight
- elif [ "${deploy_options_array['sdn_controller']}" == 'onos' ]; then
- if [ "${deploy_options_array['sfc']}" == 'True' ]; then
- DEPLOY_OPTIONS+=" -e /usr/share/openstack-tripleo-heat-templates/environments/neutron-onos-sfc.yaml"
- else
- DEPLOY_OPTIONS+=" -e /usr/share/openstack-tripleo-heat-templates/environments/neutron-onos.yaml"
- fi
- SDN_IMAGE=onos
- elif [ "${deploy_options_array['sdn_controller']}" == 'ovn' ]; then
- if [[ "$ha_enabled" == "True" ]]; then
- DEPLOY_OPTIONS+=" -e /usr/share/openstack-tripleo-heat-templates/environments/neutron-ml2-ovn-ha.yaml"
- echo "${red}OVN HA support is not not supported... exiting.${reset}"
- exit 1
- else
- DEPLOY_OPTIONS+=" -e /usr/share/openstack-tripleo-heat-templates/environments/neutron-ml2-ovn.yaml"
- fi
- SDN_IMAGE=opendaylight
- elif [ "${deploy_options_array['sdn_controller']}" == 'opencontrail' ]; then
- echo -e "${red}ERROR: OpenContrail is currently unsupported...exiting${reset}"
- exit 1
- elif [[ -z "${deploy_options_array['sdn_controller']}" || "${deploy_options_array['sdn_controller']}" == 'False' ]]; then
- echo -e "${blue}INFO: SDN Controller disabled...will deploy nosdn scenario${reset}"
- if [ "${deploy_options_array['vpp']}" == 'True' ]; then
- DEPLOY_OPTIONS+=" -e /usr/share/openstack-tripleo-heat-templates/environments/neutron-ml2-vpp.yaml"
- elif [ "${deploy_options_array['dataplane']}" == 'ovs_dpdk' ]; then
- DEPLOY_OPTIONS+=" -e /usr/share/openstack-tripleo-heat-templates/environments/neutron-ovs-dpdk.yaml"
- fi
- SDN_IMAGE=opendaylight
- else
- echo "${red}Invalid sdn_controller: ${deploy_options_array['sdn_controller']}${reset}"
- echo "${red}Valid choices are opendaylight, opendaylight-external, onos, opencontrail, False, or null${reset}"
- exit 1
- fi
-
- # Enable Tacker
- if [ "${deploy_options_array['tacker']}" == 'True' ]; then
- DEPLOY_OPTIONS+=" -e /usr/share/openstack-tripleo-heat-templates/environments/enable_tacker.yaml"
- fi
-
- # Enable Congress
- if [ "${deploy_options_array['congress']}" == 'True' ]; then
- DEPLOY_OPTIONS+=" -e /usr/share/openstack-tripleo-heat-templates/environments/enable_congress.yaml"
- fi
-
- # Enable Real Time Kernel (kvm4nfv)
- if [ "${deploy_options_array['rt_kvm']}" == 'True' ]; then
- DEPLOY_OPTIONS+=" -e /home/stack/enable_rt_kvm.yaml"
- fi
-
- # Enable Barometer service
- if [ "${deploy_options_array['barometer']}" == 'True' ]; then
- DEPLOY_OPTIONS+=" -e /usr/share/openstack-tripleo-heat-templates/environments/enable_barometer.yaml"
- fi
-
-# Make sure the correct overcloud image is available
- if [ ! -f $IMAGES/overcloud-full-${SDN_IMAGE}.qcow2 ]; then
- echo "${red} $IMAGES/overcloud-full-${SDN_IMAGE}.qcow2 is required to execute your deployment."
- echo "Please install the opnfv-apex package to provide this overcloud image for deployment.${reset}"
- exit 1
- fi
-
- echo "Copying overcloud image to Undercloud"
- ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" "rm -f overcloud-full.qcow2"
- scp ${SSH_OPTIONS[@]} $IMAGES/overcloud-full-${SDN_IMAGE}.qcow2 "stack@$UNDERCLOUD":overcloud-full.qcow2
-
- # disable neutron openvswitch agent from starting
- if [[ -n "${deploy_options_array['sdn_controller']}" && "${deploy_options_array['sdn_controller']}" != 'False' ]]; then
- echo -e "${blue}INFO: Disabling neutron-openvswitch-agent from systemd${reset}"
- ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" <<EOI
- LIBGUESTFS_BACKEND=direct virt-customize --run-command "rm -f /etc/systemd/system/multi-user.target.wants/neutron-openvswitch-agent.service" \
- --run-command "rm -f /usr/lib/systemd/system/neutron-openvswitch-agent.service" \
- -a overcloud-full.qcow2
-EOI
- fi
-
- if [ "${deploy_options_array['vpn']}" == 'True' ]; then
- echo -e "${blue}INFO: Enabling ZRPC and Quagga${reset}"
- ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" <<EOI
- LIBGUESTFS_BACKEND=direct virt-customize \
- --run-command "systemctl enable zrpcd" \
- -a overcloud-full.qcow2
-EOI
- fi
-
- # Install ovs-dpdk inside the overcloud image if it is enabled.
- if [[ "${deploy_options_array['dataplane']}" == 'ovs_dpdk' || "${deploy_options_array['dataplane']}" == 'fdio' ]]; then
- # install dpdk packages before ovs
- echo -e "${blue}INFO: Enabling kernel modules for dpdk inside overcloud image${reset}"
-
- ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" <<EOI
- cat << EOF > vfio_pci.modules
-#!/bin/bash
-exec /sbin/modprobe vfio_pci >/dev/null 2>&1
-EOF
-
- cat << EOF > uio_pci_generic.modules
-#!/bin/bash
-exec /sbin/modprobe uio_pci_generic >/dev/null 2>&1
-EOF
-
- LIBGUESTFS_BACKEND=direct virt-customize --upload vfio_pci.modules:/etc/sysconfig/modules/ \
- --upload uio_pci_generic.modules:/etc/sysconfig/modules/ \
- --run-command "chmod 0755 /etc/sysconfig/modules/vfio_pci.modules" \
- --run-command "chmod 0755 /etc/sysconfig/modules/uio_pci_generic.modules" \
- -a overcloud-full.qcow2
-
- if [ "${deploy_options_array['dataplane']}" == 'ovs_dpdk' ]; then
- sed -i "/OS::TripleO::ComputeExtraConfigPre:/c\ OS::TripleO::ComputeExtraConfigPre: ./ovs-dpdk-preconfig.yaml" network-environment.yaml
- fi
-
-EOI
-
- elif [ "${deploy_options_array['dataplane']}" != 'ovs' ]; then
- echo "${red}${deploy_options_array['dataplane']} not supported${reset}"
- exit 1
- fi
-
- if [ "$debug" == 'TRUE' ]; then
- ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" "LIBGUESTFS_BACKEND=direct virt-customize -a overcloud-full.qcow2 --root-password password:opnfvapex"
- fi
-
- # upgrade ovs into ovs ovs 2.6.1 with NSH function if SFC is enabled
- if [[ "${deploy_options_array['sfc']}" == 'True' && "${deploy_options_array['dataplane']}" == 'ovs' ]]; then
- ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" <<EOI
- LIBGUESTFS_BACKEND=direct virt-customize --run-command "yum install -y /root/ovs/rpm/rpmbuild/RPMS/x86_64/${ovs_kmod_rpm_name}" \
- --run-command "yum upgrade -y /root/ovs/rpm/rpmbuild/RPMS/x86_64/${ovs_rpm_name}" \
- -a overcloud-full.qcow2
-EOI
- fi
-
- # Patch neutron with using OVS external interface for router and add generic linux NS interface driver
- if [[ "${deploy_options_array['dataplane']}" == 'fdio' ]]; then
- ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" <<EOI
- LIBGUESTFS_BACKEND=direct virt-customize --run-command "cd /usr/lib/python2.7/site-packages/ && patch -p1 < neutron-patch-NSDriver.patch" \
- -a overcloud-full.qcow2
-EOI
-
- # Configure routing node for odl-fdio
- if [[ "${deploy_options_array['sdn_controller']}" == 'opendaylight' ]]; then
- if [[ "${deploy_options_array['odl_vpp_routing_node']}" == 'dvr' ]]; then
- ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" <<EOI
- sed -i "/OS::TripleO::Services::NeutronDhcpAgent/d" ${ENV_FILE}
- sed -i "/NeutronDhcpAgentsPerNetwork:/ c\ NeutronDhcpAgentsPerNetwork: $num_compute_nodes" ${ENV_FILE}
- sed -i "$ a\ - OS::TripleO::Services::NeutronDhcpAgent" ${ENV_FILE}
-# TODO: Update VPP version to 17.10 when specific version is known
-# LIBGUESTFS_BACKEND=direct virt-customize --run-command "yum remove -y vpp-lib" \
-# --run-command "yum install -y /root/fdio_dvr/*.rpm" \
-# --run-command "rm -f /etc/sysctl.d/80-vpp.conf" \
-# -a overcloud-full.qcow2
-EOI
- else
- ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" <<EOI
- sed -i "/opendaylight::vpp_routing_node:/c\ opendaylight::vpp_routing_node: ${deploy_options_array['odl_vpp_routing_node']}.${domain_name}" ${ENV_FILE}
-EOI
- fi
- ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" <<EOI
- sed -i "/ControllerExtraConfig:/ c\ ControllerExtraConfig:\n tripleo::profile::base::neutron::agents::honeycomb::interface_role_mapping: ['${tenant_nic_mapping_controller_members}:tenant-interface']" ${ENV_FILE}
- sed -i "/NovaComputeExtraConfig:/ c\ NovaComputeExtraConfig:\n tripleo::profile::base::neutron::agents::honeycomb::interface_role_mapping: ['${tenant_nic_mapping_compute_members}:tenant-interface','${external_nic_mapping_compute_members}:public-interface']" ${ENV_FILE}
-EOI
-
- fi
- fi
-
- if [ -n "${deploy_options_array['performance']}" ]; then
- ovs_dpdk_perf_flag="False"
- for option in "${performance_options[@]}" ; do
- if [ "${arr[1]}" == "vpp" ]; then
- if [ "${arr[0]}" == "Compute" ]; then
- role='NovaCompute'
- else
- role=${arr[0]}
- fi
- if [ "${arr[2]}" == "main-core" ]; then
- ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" <<EOI
- sed -i "/${role}ExtraConfig:/ c\ ${role}ExtraConfig:\n fdio::vpp_cpu_main_core: '${arr[3]}'" ${ENV_FILE}
-EOI
- elif [ "${arr[2]}" == "corelist-workers" ]; then
- ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" <<EOI
- sed -i "/${role}ExtraConfig:/ c\ ${role}ExtraConfig:\n fdio::vpp_cpu_corelist_workers: '${arr[3]}'" ${ENV_FILE}
-EOI
- fi
- fi
- arr=($option)
- # use compute's kernel settings for all nodes for now.
- if [ "${arr[0]}" == "Compute" ] && [ "${arr[1]}" == "kernel" ]; then
- kernel_args+=" ${arr[2]}=${arr[3]}"
- fi
- if [ "${arr[0]}" == "Compute" ] && [ "${arr[1]}" == "ovs" ]; then
- eval "${arr[2]}=${arr[3]}"
- ovs_dpdk_perf_flag="True"
- fi
- done
-
- ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" <<EOI
- sed -i "/ComputeKernelArgs:/c\ ComputeKernelArgs: '$kernel_args'" ${ENV_FILE}
- sed -i "$ a\resource_registry:\n OS::TripleO::NodeUserData: first-boot.yaml" ${ENV_FILE}
- sed -i "/NovaSchedulerDefaultFilters:/c\ NovaSchedulerDefaultFilters: 'RamFilter,ComputeFilter,AvailabilityZoneFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter,NUMATopologyFilter'" ${ENV_FILE}
-EOI
-
- if [[ "${deploy_options_array['dataplane']}" == 'ovs_dpdk' && "$ovs_dpdk_perf_flag" == "True" ]]; then
- for ovs_option in ${!ovs_option_heat_arr[@]}; do
- if [ -n "${!ovs_option}" ]; then
- ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" <<EOI
- sed -i "/${ovs_option_heat_arr[$ovs_option]}:/c\ ${ovs_option_heat_arr[$ovs_option]}: '${!ovs_option}'" ${ENV_FILE}
-EOI
- fi
- done
- fi
- fi
-
- if [[ -z "${deploy_options_array['sdn_controller']}" || "${deploy_options_array['sdn_controller']}" == 'False' ]]; then
- if [ "${deploy_options_array['dataplane']}" == "fdio" ]; then
- if [ "$tenant_nic_mapping_controller_members" == "$tenant_nic_mapping_compute_members" ]; then
- echo -e "${blue}INFO: nosdn fdio deployment...installing correct vpp packages...${reset}"
- ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" <<EOI
- sed -i "/NeutronVPPAgentPhysnets:/c\ NeutronVPPAgentPhysnets: 'datacentre:${tenant_nic_mapping_controller_members}'" ${ENV_FILE}
-EOI
- else
- echo -e "${red}Compute and Controller must use the same tenant nic name, please modify network setting file.${reset}"
- exit 1
- fi
- fi
- fi
-
- # Set ODL version accordingly
- if [[ "${deploy_options_array['sdn_controller']}" == 'opendaylight' && -n "${deploy_options_array['odl_version']}" ]]; then
- case "${deploy_options_array['odl_version']}" in
- carbon) odl_version=''
- ;;
- nitrogen) odl_version='nitrogen'
- ;;
- *) echo -e "${red}Invalid ODL version ${deploy_options_array['odl_version']}. Please use 'carbon' or 'nitrogen'.${reset}"
- exit 1
- ;;
- esac
-
- if [[ -n "$odl_version" ]]; then
- ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" <<EOI
- LIBGUESTFS_BACKEND=direct virt-customize --run-command "yum -y remove opendaylight" \
- --run-command "yum -y install /root/${odl_version}/*" \
- --run-command "rm -rf /etc/puppet/modules/opendaylight" \
- --run-command "cd /etc/puppet/modules/ && tar xzf /root/puppet-opendaylight-master.tar.gz" \
- -a overcloud-full.qcow2
-EOI
- fi
- fi
-
- # Override ODL for fdio scenarios
- if [[ "${deploy_options_array['odl_vpp_netvirt']}" == 'True' && "${deploy_options_array['sdn_controller']}" == 'opendaylight' ]]; then
- ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" <<EOI
- LIBGUESTFS_BACKEND=direct virt-customize --run-command "yum -y remove opendaylight" \
- --run-command "yum -y install /root/opendaylight-7.0.0-0.1.20170531snap665.el7.noarch.rpm" \
- -a overcloud-full.qcow2
-EOI
-# elif [[ "${deploy_options_array['sdn_controller']}" == 'opendaylight' && "${deploy_options_array['dataplane']}" == 'fdio' ]]; then
-# if [[ "${deploy_options_array['odl_vpp_routing_node']}" != 'dvr' ]]; then
-# ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" <<EOI
-# LIBGUESTFS_BACKEND=direct virt-customize --run-command "rm -rf /opt/opendaylight/*" \
-# --run-command "tar zxvf /root/fdio_odl_carbon.tar.gz -C /opt/opendaylight/ --strip-components=1" \
-# --run-command "chown odl:odl -R /opt/opendaylight" \
-# -a overcloud-full.qcow2
-#EOI
-# fi
- fi
-
- # Override ODL if we enable dvr for fdio
-# TODO: Update ODL version when specific version is known.
-# if [[ "${deploy_options_array['odl_vpp_routing_node']}" == 'dvr' ]]; then
-# ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" <<EOI
-# LIBGUESTFS_BACKEND=direct virt-customize --run-command "rm -rf /opt/opendaylight/*" \
-# --run-command "tar zxvf /root/fdio_odl_carbon.tar.gz -C /opt/opendaylight/ --strip-components=1" \
-# --run-command "chown odl:odl -R /opt/opendaylight" \
-# -a overcloud-full.qcow2
-#EOI
-# fi
-
-
-
- # check if ceph should be enabled
- if [ "${deploy_options_array['ceph']}" == 'True' ]; then
- DEPLOY_OPTIONS+=" -e /usr/share/openstack-tripleo-heat-templates/environments/storage-environment.yaml"
- fi
-
- if [ "${deploy_options_array['sdn_controller']}" == 'ovn' ]; then
- # The epoch in deloran's ovs is 1: and in leif's is 0:
- # so we have to execute a downgrade instead of an update
- ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" <<EOI
- LIBGUESTFS_BACKEND=direct virt-customize \
- --run-command "cd /root/ovs28 && yum update -y *openvswitch*" \
- --run-command "cd /root/ovs28 && yum downgrade -y *openvswitch*" \
- -a overcloud-full.qcow2
-EOI
- fi
-
- # check if HA is enabled
- if [[ "$ha_enabled" == "True" ]]; then
- if [ "$num_control_nodes" -lt 3 ]; then
- echo -e "${red}ERROR: Number of control nodes in inventory is less than 3 and HA is enabled: ${num_control_nodes}. Check your inventory file.${reset}"
- exit 1
- else
- DEPLOY_OPTIONS+=" --control-scale ${num_control_nodes}"
- DEPLOY_OPTIONS+=" -e /usr/share/openstack-tripleo-heat-templates/environments/puppet-pacemaker.yaml"
- echo -e "${blue}INFO: Number of control nodes set for deployment: ${num_control_nodes}${reset}"
- fi
- else
- if [ "$num_control_nodes" -lt 1 ]; then
- echo -e "${red}ERROR: Number of control nodes in inventory is less than 1: ${num_control_nodes}. Check your inventory file.${reset}"
- exit 1
- fi
- fi
-
- if [ "$num_compute_nodes" -le 0 ]; then
- echo -e "${red}ERROR: Invalid number of compute nodes: ${num_compute_nodes}. Check your inventory file.${reset}"
- exit 1
- else
- echo -e "${blue}INFO: Number of compute nodes set for deployment: ${num_compute_nodes}${reset}"
- DEPLOY_OPTIONS+=" --compute-scale ${num_compute_nodes}"
- fi
-
- DEPLOY_OPTIONS+=" --ntp-server $ntp_server"
-
- DEPLOY_OPTIONS+=" --control-flavor control --compute-flavor compute"
- if [[ "$virtual" == "TRUE" ]]; then
- DEPLOY_OPTIONS+=" -e virtual-environment.yaml"
- echo 'Ensuring Virtual BMC device status'
- for i in $(vbmc list | grep down | awk '{ print $2}'); do
- vbmc start $i
- sleep 5
- done
- vbmc list
- else
- DEPLOY_OPTIONS+=" -e baremetal-environment.yaml"
- fi
-
- echo -e "${blue}INFO: Deploy options set:\n${DEPLOY_OPTIONS}${reset}"
-
- ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" <<EOI
-# Create a key for use by nova for live migration
-echo "Creating nova SSH key for nova resize support"
-ssh-keygen -f nova_id_rsa -b 1024 -P ""
-public_key=\'\$(cat nova_id_rsa.pub | cut -d ' ' -f 2)\'
-sed -i "s#replace_public_key:#key: \$public_key#g" ${ENV_FILE}
-python -c 'open("opnfv-environment-new.yaml", "w").write((open("${ENV_FILE}").read().replace("replace_private_key:", "key: \"" + "".join(open("nova_id_rsa").readlines()).replace("\\n","\\\n") + "\"")))'
-mv -f opnfv-environment-new.yaml ${ENV_FILE}
-
-source stackrc
-set -o errexit
-# Workaround for APEX-207 where sometimes swift proxy is down
-if ! sudo systemctl status openstack-swift-proxy > /dev/null; then
- sudo systemctl restart openstack-swift-proxy
-fi
-echo "Uploading overcloud glance images"
-openstack overcloud image upload
-
-echo "Configuring undercloud and discovering nodes"
-
-
-if [[ -z "$virtual" ]]; then
- openstack overcloud node import instackenv.json
- openstack overcloud node introspect --all-manageable --provide
- #if [[ -n "$root_disk_list" ]]; then
- # TODO: replace node configure boot with ironic node-update
- # TODO: configure boot is not used in ocata here anymore
- #openstack overcloud node configure boot --root-device=${root_disk_list}
- #https://github.com/openstack/tripleo-quickstart-extras/blob/master/roles/overcloud-prep-images/templates/overcloud-prep-images.sh.j2#L73-L130
- #ironic node-update $ironic_node add properties/root_device='{"{{ node['key'] }}": "{{ node['value'] }}"}'
- #fi
-else
- openstack overcloud node import --provide instackenv.json
-fi
-
-openstack flavor set --property "cpu_arch"="x86_64" baremetal
-openstack flavor set --property "cpu_arch"="x86_64" control
-openstack flavor set --property "cpu_arch"="x86_64" compute
-echo "Configuring nameserver on ctlplane network"
-dns_server_ext=''
-for dns_server in ${dns_servers}; do
- dns_server_ext="\${dns_server_ext} --dns-nameserver \${dns_server}"
-done
-openstack subnet set ctlplane-subnet \${dns_server_ext}
-sed -i '/CloudDomain:/c\ CloudDomain: '${domain_name} ${ENV_FILE}
-echo "Executing overcloud deployment, this could run for an extended period without output."
-sleep 60 #wait for Hypervisor stats to check-in to nova
-# save deploy command so it can be used for debugging
-cat > deploy_command << EOF
-openstack overcloud deploy --templates $DEPLOY_OPTIONS --timeout 90
-EOF
-EOI
-
- if [ "$interactive" == "TRUE" ]; then
- if ! prompt_user "Overcloud Deployment"; then
- echo -e "${blue}INFO: User requests exit${reset}"
- exit 0
- fi
- fi
-
- ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" <<EOI
-source stackrc
-openstack overcloud deploy --templates $DEPLOY_OPTIONS --timeout 90
-if ! openstack stack list | grep CREATE_COMPLETE 1>/dev/null; then
- $(typeset -f debug_stack)
- debug_stack
- exit 1
-fi
-EOI
-
- # Configure DPDK and restart ovs agent after bringing up br-phy
- if [ "${deploy_options_array['dataplane']}" == 'ovs_dpdk' ]; then
- ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" <<EOI || (echo "DPDK config failed, exiting..."; exit 1)
-source stackrc
-set -o errexit
-for node in \$(nova list | grep novacompute | grep -Eo "[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+"); do
-echo "Checking DPDK status and bringing up br-phy on \$node"
-ssh -T ${SSH_OPTIONS[@]} "heat-admin@\$node" <<EOF
-set -o errexit
-sudo dpdk-devbind -s
-sudo ifup br-phy
-if [[ -z "${deploy_options_array['sdn_controller']}" || "${deploy_options_array['sdn_controller']}" == 'False' ]]; then
- echo "Restarting openvswitch agent to pick up VXLAN tunneling"
- sudo systemctl restart neutron-openvswitch-agent
-fi
-EOF
-done
-EOI
- elif [ "${deploy_options_array['sfc']}" == 'True' ]; then
- ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" <<EOI || (echo "SFC config failed, exiting..."; exit 1)
-source stackrc
-set -o errexit
-for node in \$(nova list | grep controller | grep -Eo "[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+"); do
-echo "Configuring networking_sfc.conf on \$node"
-ssh -T ${SSH_OPTIONS[@]} "heat-admin@\$node" <<EOF
-set -o errexit
-sudo ln -s /etc/neutron/networking_sfc.conf /etc/neutron/conf.d/neutron-server/networking_sfc.conf
-sudo systemctl restart neutron-server
-EOF
-done
-EOI
- fi
-
- if [ "$debug" == 'TRUE' ]; then
- ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" <<EOI
-source overcloudrc
-echo "Keystone Endpoint List:"
-openstack endpoint list
-echo "Keystone Service List"
-openstack service list
-EOI
- fi
-}
diff --git a/lib/parse-functions.sh b/lib/parse-functions.sh
deleted file mode 100755
index 2114c0b7..00000000
--- a/lib/parse-functions.sh
+++ /dev/null
@@ -1,70 +0,0 @@
-#!/usr/bin/env bash
-##############################################################################
-# Copyright (c) 2015 Tim Rozet (Red Hat), Dan Radez (Red Hat) and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-
-# Parser functions used by OPNFV Apex
-
-##parses network settings yaml into globals
-parse_network_settings() {
- local output
-
- if output=$(python3 -B $LIB/python/apex_python_utils.py parse-net-settings -s $NETSETS -td $APEX_TMP_DIR -e $BASE/network-environment.yaml); then
- echo -e "${blue}${output}${reset}"
- eval "$output"
- else
- echo -e "${red}ERROR: Failed to parse network settings file $NETSETS ${reset}"
- exit 1
- fi
-
- if [ "${deploy_options_array['dataplane']}" == 'ovs_dpdk' ]; then
- if [[ ! $enabled_network_list =~ "tenant" ]]; then
- echo -e "${red}ERROR: tenant network is not enabled for ovs-dpdk ${reset}"
- exit 1
- fi
- fi
-}
-
-##parses deploy settings yaml into globals
-parse_deploy_settings() {
- local output
- if output=$(python3 -B $LIB/python/apex_python_utils.py parse-deploy-settings -f $DEPLOY_SETTINGS_FILE); then
- echo -e "${blue}${output}${reset}"
- eval "$output"
- else
- echo -e "${red}ERROR: Failed to parse deploy settings file $DEPLOY_SETTINGS_FILE ${reset}"
- exit 1
- fi
-
-}
-
-##parses baremetal yaml settings into compatible json
-##writes the json to undercloud:instackenv.json
-##params: none
-##usage: parse_inventory_file
-parse_inventory_file() {
- local output
- if [ "$virtual" == "TRUE" ]; then inv_virt="--virtual"; fi
- if [[ "$ha_enabled" == "True" ]]; then inv_ha="--ha"; fi
- instackenv_output=$(python3 -B $LIB/python/apex_python_utils.py parse-inventory -f $INVENTORY_FILE $inv_virt $inv_ha)
- #Copy instackenv.json to undercloud
- echo -e "${blue}Parsed instackenv JSON:\n${instackenv_output}${reset}"
- ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" <<EOI
-cat > instackenv.json << EOF
-$instackenv_output
-EOF
-EOI
- if output=$(python3 -B $LIB/python/apex_python_utils.py parse-inventory -f $INVENTORY_FILE $inv_virt $inv_ha --export-bash); then
- echo -e "${blue}${output}${reset}"
- eval "$output"
- else
- echo -e "${red}ERROR: Failed to parse inventory bash settings file ${INVENTORY_FILE}${reset}"
- exit 1
- fi
-
-}
diff --git a/lib/post-install-functions.sh b/lib/post-install-functions.sh
deleted file mode 100755
index 7678b0d3..00000000
--- a/lib/post-install-functions.sh
+++ /dev/null
@@ -1,281 +0,0 @@
-#!/usr/bin/env bash
-##############################################################################
-# Copyright (c) 2015 Tim Rozet (Red Hat), Dan Radez (Red Hat) and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-
-##Post configuration after install
-##params: none
-function configure_post_install {
- local opnfv_attach_networks ovs_ip ip_range net_cidr tmp_ip af external_network_ipv6
- external_network_ipv6=False
- opnfv_attach_networks="admin"
- if [[ $enabled_network_list =~ "external" ]]; then
- opnfv_attach_networks+=' external'
- fi
-
- echo -e "${blue}INFO: Post Install Configuration Running...${reset}"
-
- echo -e "${blue}INFO: Configuring ssh for root to overcloud nodes...${reset}"
- # copy host key to instack
- scp ${SSH_OPTIONS[@]} /root/.ssh/id_rsa.pub "stack@$UNDERCLOUD":jumphost_id_rsa.pub
-
- # add host key to overcloud nodes authorized keys
- ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" << EOI
-source stackrc
-nodes=\$(nova list | grep -Eo "[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+")
-for node in \$nodes; do
-cat ~/jumphost_id_rsa.pub | ssh -T ${SSH_OPTIONS[@]} "heat-admin@\$node" 'cat >> ~/.ssh/authorized_keys'
-done
-EOI
-
- echo -e "${blue}INFO: Checking if OVS bridges have IP addresses...${reset}"
- for network in ${opnfv_attach_networks}; do
- ovs_ip=$(find_ip ${NET_MAP[$network]})
- tmp_ip=''
- if [ -n "$ovs_ip" ]; then
- echo -e "${blue}INFO: OVS Bridge ${NET_MAP[$network]} has IP address ${ovs_ip}${reset}"
- else
- echo -e "${blue}INFO: OVS Bridge ${NET_MAP[$network]} missing IP, will configure${reset}"
- # use last IP of allocation pool
- eval "ip_range=\${${network}_overcloud_ip_range}"
- ovs_ip=${ip_range##*,}
- eval "net_cidr=\${${network}_cidr}"
- if [[ $ovs_ip =~ ^[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+$ ]]; then
- af=4
- else
- af=6
- if [ "$network" == "external" ]; then
- ublic_network_ipv6=True
- fi
- #enable ipv6 on bridge interface
- echo 0 > /proc/sys/net/ipv6/conf/${NET_MAP[$network]}/disable_ipv6
- fi
- sudo ip addr add ${ovs_ip}/${net_cidr##*/} dev ${NET_MAP[$network]}
- sudo ip link set up ${NET_MAP[$network]}
- tmp_ip=$(find_ip ${NET_MAP[$network]} $af)
- if [ -n "$tmp_ip" ]; then
- echo -e "${blue}INFO: OVS Bridge ${NET_MAP[$network]} IP set: ${tmp_ip}${reset}"
- continue
- else
- echo -e "${red}ERROR: Unable to set OVS Bridge ${NET_MAP[$network]} with IP: ${ovs_ip}${reset}"
- return 1
- fi
- fi
- done
-
- if [ "${deploy_options_array['dataplane']}" == 'ovs_dpdk' ]; then
- echo -e "${blue}INFO: Bringing up br-phy and ovs-agent for dpdk compute nodes...${reset}"
- compute_nodes=$(undercloud_connect stack "source stackrc; nova list | grep compute | wc -l")
- i=0
- while [ "$i" -lt "$compute_nodes" ]; do
- overcloud_connect compute${i} "sudo ifup br-phy; sudo systemctl restart neutron-openvswitch-agent"
- i=$((i + 1))
- done
- fi
-
- # TODO fix this when HA SDN controllers are supported
- if [ "${deploy_options_array['sdn_controller']}" != 'False' ]; then
- echo -e "${blue}INFO: Finding SDN Controller IP for overcloudrc...${reset}"
- sdn_controller_ip=$(undercloud_connect stack "source stackrc;nova list | grep controller-0 | cut -d '|' -f 7 | grep -Eo [0-9]+\.[0-9]+\.[0-9]+\.[0-9]+")
- echo -e "${blue}INFO: SDN Controller IP is ${sdn_controller_ip} ${reset}"
- undercloud_connect stack "echo 'export SDN_CONTROLLER_IP=${sdn_controller_ip}' >> /home/stack/overcloudrc"
- fi
-
- ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" <<EOI
-source overcloudrc
-set -o errexit
-echo "Configuring Neutron external network"
-if [[ -n "$external_nic_mapping_compute_vlan" && "$external_nic_mapping_compute_vlan" != 'native' ]]; then
- openstack network create external --project service --external --provider-network-type vlan --provider-segment $external_nic_mapping_compute_vlan --provider-physical-network datacentre
-else
- openstack network create external --project service --external --provider-network-type flat --provider-physical-network datacentre
-fi
-if [ "$external_network_ipv6" == "True" ]; then
- openstack subnet create external-subnet --project service --network external --no-dhcp --gateway $external_gateway --allocation-pool start=${external_floating_ip_range%%,*},end=${external_floating_ip_range##*,} --subnet-range $external_cidr --ip-version 6 --ipv6-ra-mode slaac --ipv6-address-mode slaac
-elif [[ "$enabled_network_list" =~ "external" ]]; then
- openstack subnet create external-subnet --project service --network external --no-dhcp --gateway $external_gateway --allocation-pool start=${external_floating_ip_range%%,*},end=${external_floating_ip_range##*,} --subnet-range $external_cidr
-else
- # we re-use the introspection range for floating ips with single admin network
- openstack subnet create external-subnet --project service --network external --no-dhcp --gateway $admin_gateway --allocation-pool start=${admin_introspection_range%%,*},end=${admin_introspection_range##*,} --subnet-range $admin_cidr
-fi
-
-if [ "${deploy_options_array['gluon']}" == 'True' ]; then
- echo "Creating Gluon dummy network and subnet"
- openstack network create gluon-network --share --provider-network-type vxlan
- openstack subnet create gluon-subnet --no-gateway --no-dhcp --network GluonNetwork --subnet-range 0.0.0.0/1
-fi
-
-# Fix project_id and os_tenant_name not in overcloudrc
-# Deprecated openstack client does not need project_id
-# and os_tenant_name anymore but glance client and
-# Rally in general does need it.
-# REMOVE when not needed in Rally/glance-client anymore.
-if ! grep -q "OS_PROJECT_ID" ./overcloudrc;then
- project_id=\$(openstack project list |grep admin|awk '{print \$2}')
- echo "export OS_PROJECT_ID=\$project_id" >> ./overcloudrc
-fi
-if ! grep -q "OS_TENANT_NAME" ./overcloudrc;then
- echo "export OS_TENANT_NAME=admin" >> ./overcloudrc
-fi
-
-if [ "${deploy_options_array['dataplane']}" == 'fdio' ] || [ "${deploy_options_array['dataplane']}" == 'ovs_dpdk' ]; then
- for flavor in \$(openstack flavor list -c Name -f value); do
- echo "INFO: Configuring \$flavor to use hugepage"
- nova flavor-key \$flavor set hw:mem_page_size=large
- done
-fi
-
-if [ "${deploy_options_array['congress']}" == 'True' ]; then
- ds_configs="--config username=\$OS_USERNAME
- --config tenant_name=\$OS_PROJECT_NAME
- --config password=\$OS_PASSWORD
- --config auth_url=\$OS_AUTH_URL"
- for s in nova neutronv2 cinder glancev2 keystone; do
- ds_extra_configs=""
- if [ "\$s" == "nova" ]; then
- # nova's latest version is 2.38 but congress relies on nova to do
- # floating ip operation instead of neutron. fip support in nova
- # was depricated as of 2.35. Hard coding 2.34 for danube.
- # Carlos.Goncalves working on fixes for upstream congress that
- # should be ready for ocata.
- nova_micro_version="2.34"
- #nova_micro_version=\$(nova version-list | grep CURRENT | awk '{print \$10}')
- ds_extra_configs+="--config api_version=\$nova_micro_version"
- fi
- if openstack congress datasource create \$s "\$s" \$ds_configs \$ds_extra_configs; then
- echo "INFO: Datasource: \$s created"
- else
- echo "WARN: Datasource: \$s could NOT be created"
- fi
- done
- if openstack congress datasource create doctor "doctor"; then
- echo "INFO: Datasource: doctor created"
- else
- echo "WARN: Datsource: doctor could NOT be created"
- fi
-fi
-
-
-EOI
-
- # we need to restart neutron-server in Gluon deployments to allow the Gluon core
- # plugin to correctly register itself with Neutron
- if [ "${deploy_options_array['gluon']}" == 'True' ]; then
- echo "Restarting neutron-server to finalize Gluon installation"
- overcloud_connect "controller0" "sudo systemctl restart neutron-server"
- fi
-
- # for virtual, we NAT external network through Undercloud
- # same goes for baremetal if only jumphost has external connectivity
- if [ "$virtual" == "TRUE" ] || ! test_overcloud_connectivity && [ "$external_network_ipv6" != "True" ]; then
- if [[ "$enabled_network_list" =~ "external" ]]; then
- nat_cidr=${external_cidr}
- else
- nat_cidr=${admin_cidr}
- fi
- if ! configure_undercloud_nat ${nat_cidr}; then
- echo -e "${red}ERROR: Unable to NAT undercloud with external net: ${nat_cidr}${reset}"
- exit 1
- else
- echo -e "${blue}INFO: Undercloud VM has been setup to NAT Overcloud external network${reset}"
- fi
- fi
-
- # for sfc deployments we need the vxlan workaround
- if [ "${deploy_options_array['sfc']}" == 'True' ]; then
- ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" <<EOI
-source stackrc
-set -o errexit
-for node in \$(nova list | grep -Eo "[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+"); do
-ssh -T ${SSH_OPTIONS[@]} "heat-admin@\$node" <<EOF
-sudo ifconfig br-int up
-sudo ip route add 123.123.123.0/24 dev br-int
-EOF
-done
-EOI
- fi
-
- ### VSPERF ###
- if [[ "${deploy_options_array['vsperf']}" == 'True' ]]; then
- echo "${blue}\nVSPERF enabled, running build_base_machine.sh\n${reset}"
- overcloud_connect "compute0" "sudo sh -c 'cd /var/opt/vsperf/systems/ && ./build_base_machine.sh 2>&1 > /var/log/vsperf.log'"
- fi
-
- # install docker
- if [ "${deploy_options_array['yardstick']}" == 'True' ] || [ "${deploy_options_array['dovetail']}" == 'True' ]; then
- ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" <<EOI
-sudo yum install docker -y
-sudo systemctl start docker
-sudo systemctl enable docker
-EOI
- fi
-
- # pull yardstick image
- if [ "${deploy_options_array['yardstick']}" == 'True' ]; then
- ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" <<EOI
-sudo docker pull opnfv/yardstick
-EOI
- fi
-
- # pull dovetail image
- if [ "${deploy_options_array['dovetail']}" == 'True' ]; then
- ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" <<EOI
-sudo docker pull opnfv/dovetail
-EOI
- fi
-
- # Collect deployment logs
- ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" <<EOI
-mkdir -p ~/deploy_logs
-rm -rf deploy_logs/*
-source stackrc
-set -o errexit
-for node in \$(nova list | grep -Eo "[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+"); do
- ssh -T ${SSH_OPTIONS[@]} "heat-admin@\$node" <<EOF
- sudo cp /var/log/messages /home/heat-admin/messages.log
- sudo chown heat-admin /home/heat-admin/messages.log
-EOF
-scp ${SSH_OPTIONS[@]} heat-admin@\$node:/home/heat-admin/messages.log ~/deploy_logs/\$node.messages.log
-if [ "$debug" == "TRUE" ]; then
- nova list --ip \$node
- echo "---------------------------"
- echo "-----/var/log/messages-----"
- echo "---------------------------"
- cat ~/deploy_logs/\$node.messages.log
- echo "---------------------------"
- echo "----------END LOG----------"
- echo "---------------------------"
-
- ssh -T ${SSH_OPTIONS[@]} "heat-admin@\$node" <<EOF
-echo "$node"
-sudo openstack-status
-EOF
-fi
- ssh -T ${SSH_OPTIONS[@]} "heat-admin@\$node" <<EOF
- sudo rm -f /home/heat-admin/messages.log
-EOF
-done
-
-# Print out the undercloud IP and dashboard URL
-source stackrc
-echo "Undercloud IP: $UNDERCLOUD, please connect by doing 'opnfv-util undercloud'"
-echo "Overcloud dashboard available at http://\$(openstack stack output show overcloud PublicVip -f json | jq -r .output_value)/dashboard"
-EOI
-
-if [[ "$ha_enabled" == 'True' ]]; then
- if [ "$debug" == "TRUE" ]; then
- echo "${blue}\nChecking pacemaker service status\n${reset}"
- fi
- overcloud_connect "controller0" "for i in \$(sudo pcs status | grep '^* ' | cut -d ' ' -f 2 | cut -d '_' -f 1 | uniq); do echo \"WARNING: Service: \$i not running\"; done"
-fi
-
-if [ "${deploy_options_array['vpn']}" == 'True' ]; then
- # Check zrpcd is started
- overcloud_connect "controller0" "sudo systemctl status zrpcd > /dev/null || echo 'WARNING: zrpcd is not running on controller0'"
-fi
-}
diff --git a/lib/python/apex/__init__.py b/lib/python/apex/__init__.py
deleted file mode 100644
index b2a45f7d..00000000
--- a/lib/python/apex/__init__.py
+++ /dev/null
@@ -1,15 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 Feng Pan (fpan@redhat.com) and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-
-
-from .network_settings import NetworkSettings
-from .deploy_settings import DeploySettings
-from .network_environment import NetworkEnvironment
-from .clean import clean_nodes
-from .inventory import Inventory
diff --git a/lib/python/apex/clean.py b/lib/python/apex/clean.py
deleted file mode 100644
index 184b5ec9..00000000
--- a/lib/python/apex/clean.py
+++ /dev/null
@@ -1,39 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 Tim Rozet (trozet@redhat.com) and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-
-# Clean will eventually be migrated to this file
-
-import logging
-import pyipmi
-import pyipmi.interfaces
-import sys
-
-from .common import utils
-
-
-def clean_nodes(inventory):
- inv_dict = utils.parse_yaml(inventory)
- if inv_dict is None or 'nodes' not in inv_dict:
- logging.error("Inventory file is empty or missing nodes definition")
- sys.exit(1)
- for node, node_info in inv_dict['nodes'].items():
- logging.info("Cleaning node: {}".format(node))
- try:
- interface = pyipmi.interfaces.create_interface(
- 'ipmitool', interface_type='lanplus')
- connection = pyipmi.create_connection(interface)
- connection.session.set_session_type_rmcp(node_info['ipmi_ip'])
- connection.target = pyipmi.Target(0x20)
- connection.session.set_auth_type_user(node_info['ipmi_user'],
- node_info['ipmi_pass'])
- connection.session.establish()
- connection.chassis_control_power_down()
- except Exception as e:
- logging.error("Failure while shutting down node {}".format(e))
- sys.exit(1)
diff --git a/lib/python/apex/common/__init__.py b/lib/python/apex/common/__init__.py
deleted file mode 100644
index e69de29b..00000000
--- a/lib/python/apex/common/__init__.py
+++ /dev/null
diff --git a/lib/python/apex/common/constants.py b/lib/python/apex/common/constants.py
deleted file mode 100644
index 3aa28eab..00000000
--- a/lib/python/apex/common/constants.py
+++ /dev/null
@@ -1,30 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 Tim Rozet (trozet@redhat.com) and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-
-ADMIN_NETWORK = 'admin'
-TENANT_NETWORK = 'tenant'
-EXTERNAL_NETWORK = 'external'
-STORAGE_NETWORK = 'storage'
-API_NETWORK = 'api'
-CONTROLLER = 'controller'
-COMPUTE = 'compute'
-
-OPNFV_NETWORK_TYPES = [ADMIN_NETWORK, TENANT_NETWORK, EXTERNAL_NETWORK,
- STORAGE_NETWORK, API_NETWORK]
-DNS_SERVERS = ["8.8.8.8", "8.8.4.4"]
-NTP_SERVER = ["pool.ntp.org"]
-COMPUTE = 'compute'
-CONTROLLER = 'controller'
-ROLES = [COMPUTE, CONTROLLER]
-DOMAIN_NAME = 'localdomain.com'
-COMPUTE_PRE = "OS::TripleO::ComputeExtraConfigPre"
-CONTROLLER_PRE = "OS::TripleO::ControllerExtraConfigPre"
-PRE_CONFIG_DIR = "/usr/share/openstack-tripleo-heat-templates/puppet/" \
- "extraconfig/pre_deploy/"
-DEFAULT_ROOT_DEV = 'sda'
diff --git a/lib/python/apex/common/utils.py b/lib/python/apex/common/utils.py
deleted file mode 100644
index 8e6896fa..00000000
--- a/lib/python/apex/common/utils.py
+++ /dev/null
@@ -1,31 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 Tim Rozet (trozet@redhat.com) and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-
-import yaml
-
-
-def str2bool(var):
- if isinstance(var, bool):
- return var
- else:
- return var.lower() in ("true", "yes")
-
-
-def parse_yaml(yaml_file):
- with open(yaml_file) as f:
- parsed_dict = yaml.safe_load(f)
- return parsed_dict
-
-
-def write_str(bash_str, path=None):
- if path:
- with open(path, 'w') as file:
- file.write(bash_str)
- else:
- print(bash_str)
diff --git a/lib/python/apex/deploy_settings.py b/lib/python/apex/deploy_settings.py
deleted file mode 100644
index 06185941..00000000
--- a/lib/python/apex/deploy_settings.py
+++ /dev/null
@@ -1,195 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 Michael Chapman (michapma@redhat.com) and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-
-
-import yaml
-import logging
-
-from .common import utils
-
-REQ_DEPLOY_SETTINGS = ['sdn_controller',
- 'odl_version',
- 'tacker',
- 'congress',
- 'dataplane',
- 'sfc',
- 'vpn',
- 'vpp',
- 'ceph',
- 'gluon',
- 'rt_kvm']
-
-OPT_DEPLOY_SETTINGS = ['performance',
- 'vsperf',
- 'ceph_device',
- 'yardstick',
- 'dovetail',
- 'odl_vpp_routing_node',
- 'odl_vpp_netvirt',
- 'barometer']
-
-VALID_ROLES = ['Controller', 'Compute', 'ObjectStorage']
-VALID_PERF_OPTS = ['kernel', 'nova', 'vpp', 'ovs']
-VALID_DATAPLANES = ['ovs', 'ovs_dpdk', 'fdio']
-
-
-class DeploySettings(dict):
- """
- This class parses a APEX deploy settings yaml file into an object
-
- Currently the parsed object is dumped into a bash global definition file
- for deploy.sh consumption. This object will later be used directly as
- deployment script move to python.
- """
- def __init__(self, filename):
- init_dict = {}
- if isinstance(filename, str):
- with open(filename, 'r') as deploy_settings_file:
- init_dict = yaml.safe_load(deploy_settings_file)
- else:
- # assume input is a dict to build from
- init_dict = filename
-
- super().__init__(init_dict)
- self._validate_settings()
-
- def _validate_settings(self):
- """
- Validates the deploy settings file provided
-
- DeploySettingsException will be raised if validation fails.
- """
-
- if 'deploy_options' not in self:
- raise DeploySettingsException("No deploy options provided in"
- " deploy settings file")
- if 'global_params' not in self:
- raise DeploySettingsException("No global options provided in"
- " deploy settings file")
-
- deploy_options = self['deploy_options']
- if not isinstance(deploy_options, dict):
- raise DeploySettingsException("deploy_options should be a list")
-
- if ('gluon' in self['deploy_options'] and
- 'vpn' in self['deploy_options']):
- if (self['deploy_options']['gluon'] is True and
- self['deploy_options']['vpn'] is False):
- raise DeploySettingsException(
- "Invalid deployment configuration: "
- "If gluon is enabled, "
- "vpn also needs to be enabled")
-
- for setting, value in deploy_options.items():
- if setting not in REQ_DEPLOY_SETTINGS + OPT_DEPLOY_SETTINGS:
- raise DeploySettingsException("Invalid deploy_option {} "
- "specified".format(setting))
- if setting == 'dataplane':
- if value not in VALID_DATAPLANES:
- planes = ' '.join(VALID_DATAPLANES)
- raise DeploySettingsException(
- "Invalid dataplane {} specified. Valid dataplanes:"
- " {}".format(value, planes))
-
- for req_set in REQ_DEPLOY_SETTINGS:
- if req_set not in deploy_options:
- if req_set == 'dataplane':
- self['deploy_options'][req_set] = 'ovs'
- elif req_set == 'ceph':
- self['deploy_options'][req_set] = True
- else:
- self['deploy_options'][req_set] = False
-
- if 'performance' in deploy_options:
- if not isinstance(deploy_options['performance'], dict):
- raise DeploySettingsException("Performance deploy_option"
- "must be a dictionary.")
- for role, role_perf_sets in deploy_options['performance'].items():
- if role not in VALID_ROLES:
- raise DeploySettingsException("Performance role {}"
- "is not valid, choose"
- "from {}".format(
- role,
- " ".join(VALID_ROLES)
- ))
-
- for key in role_perf_sets:
- if key not in VALID_PERF_OPTS:
- raise DeploySettingsException("Performance option {} "
- "is not valid, choose"
- "from {}".format(
- key,
- " ".join(
- VALID_PERF_OPTS)
- ))
-
- def _dump_performance(self):
- """
- Creates performance settings string for bash consumption.
-
- Output will be in the form of a list that can be iterated over in
- bash, with each string being the direct input to the performance
- setting script in the form <role> <category> <key> <value> to
- facilitate modification of the correct image.
- """
- bash_str = 'performance_options=(\n'
- deploy_options = self['deploy_options']
- for role, settings in deploy_options['performance'].items():
- for category, options in settings.items():
- for key, value in options.items():
- bash_str += "\"{} {} {} {}\"\n".format(role,
- category,
- key,
- value)
- bash_str += ')\n'
- bash_str += '\n'
- bash_str += 'performance_roles=(\n'
- for role in self['deploy_options']['performance']:
- bash_str += role + '\n'
- bash_str += ')\n'
- bash_str += '\n'
-
- return bash_str
-
- def _dump_deploy_options_array(self):
- """
- Creates deploy settings array in bash syntax.
- """
- bash_str = ''
- for key, value in self['deploy_options'].items():
- if not isinstance(value, bool):
- bash_str += "deploy_options_array[{}]=\"{}\"\n".format(key,
- value)
- else:
- bash_str += "deploy_options_array[{}]={}\n".format(key,
- value)
- return bash_str
-
- def dump_bash(self, path=None):
- """
- Prints settings for bash consumption.
-
- If optional path is provided, bash string will be written to the file
- instead of stdout.
- """
- bash_str = ''
- for key, value in self['global_params'].items():
- bash_str += "{}={}\n".format(key, value)
- if 'performance' in self['deploy_options']:
- bash_str += self._dump_performance()
- bash_str += self._dump_deploy_options_array()
- utils.write_str(bash_str, path)
-
-
-class DeploySettingsException(Exception):
- def __init__(self, value):
- self.value = value
-
- def __str__(self):
- return self.value
diff --git a/lib/python/apex/inventory.py b/lib/python/apex/inventory.py
deleted file mode 100644
index 64f47b49..00000000
--- a/lib/python/apex/inventory.py
+++ /dev/null
@@ -1,98 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 Dan Radez (dradez@redhat.com) and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-
-import yaml
-import json
-import platform
-
-from .common import constants
-from .common import utils
-
-
-class Inventory(dict):
- """
- This class parses an APEX inventory yaml file into an object. It
- generates or detects all missing fields for deployment.
-
- It then collapses one level of identification from the object to
- convert it to a structure that can be dumped into a json file formatted
- such that Triple-O can read the resulting json as an instackenv.json file.
- """
- def __init__(self, source, ha=True, virtual=False):
- init_dict = {}
- self.root_device = constants.DEFAULT_ROOT_DEV
- if isinstance(source, str):
- with open(source, 'r') as inventory_file:
- yaml_dict = yaml.safe_load(inventory_file)
- # collapse node identifiers from the structure
- init_dict['nodes'] = list(map(lambda n: n[1],
- yaml_dict['nodes'].items()))
- else:
- # assume input is a dict to build from
- init_dict = source
-
- # move ipmi_* to pm_*
- # make mac a list
- def munge_nodes(node):
- node['pm_addr'] = node['ipmi_ip']
- node['pm_password'] = node['ipmi_pass']
- node['pm_user'] = node['ipmi_user']
- node['mac'] = [node['mac_address']]
- if 'cpus' in node:
- node['cpu'] = node['cpus']
-
- for i in ('ipmi_ip', 'ipmi_pass', 'ipmi_user', 'mac_address',
- 'disk_device'):
- if i == 'disk_device' and 'disk_device' in node.keys():
- self.root_device = node[i]
- else:
- continue
- del node[i]
-
- return node
-
- super().__init__({'nodes': list(map(munge_nodes, init_dict['nodes']))})
-
- # verify number of nodes
- if ha and len(self['nodes']) < 5:
- raise InventoryException('You must provide at least 5 '
- 'nodes for HA baremetal deployment')
- elif len(self['nodes']) < 2:
- raise InventoryException('You must provide at least 2 nodes '
- 'for non-HA baremetal deployment')
-
- if virtual:
- self['arch'] = platform.machine()
- self['host-ip'] = '192.168.122.1'
- self['power_manager'] = \
- 'nova.virt.baremetal.virtual_power_driver.VirtualPowerManager'
- self['seed-ip'] = ''
- self['ssh-key'] = 'INSERT_STACK_USER_PRIV_KEY'
- self['ssh-user'] = 'root'
-
- def dump_instackenv_json(self):
- print(json.dumps(dict(self), sort_keys=True, indent=4))
-
- def dump_bash(self, path=None):
- """
- Prints settings for bash consumption.
-
- If optional path is provided, bash string will be written to the file
- instead of stdout.
- """
- bash_str = "{}={}\n".format('root_disk_list', str(self.root_device))
- utils.write_str(bash_str, path)
-
-
-class InventoryException(Exception):
- def __init__(self, value):
- self.value = value
-
- def __str__(self):
- return self.value
diff --git a/lib/python/apex/ip_utils.py b/lib/python/apex/ip_utils.py
deleted file mode 100644
index ae60b705..00000000
--- a/lib/python/apex/ip_utils.py
+++ /dev/null
@@ -1,230 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 Feng Pan (fpan@redhat.com) and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-
-
-import ipaddress
-import subprocess
-import re
-import logging
-
-
-def get_ip_range(start_offset=None, count=None, end_offset=None,
- cidr=None, interface=None):
- """
- Generate IP range for a network (cidr) or an interface.
-
- If CIDR is provided, it will take precedence over interface. In this case,
- The entire CIDR IP address space is considered usable. start_offset will be
- calculated from the network address, and end_offset will be calculated from
- the last address in subnet.
-
- If interface is provided, the interface IP will be used to calculate
- offsets:
- - If the interface IP is in the first half of the address space,
- start_offset will be calculated from the interface IP, and end_offset
- will be calculated from end of address space.
- - If the interface IP is in the second half of the address space,
- start_offset will be calculated from the network address in the address
- space, and end_offset will be calculated from the interface IP.
-
- 2 of start_offset, end_offset and count options must be provided:
- - If start_offset and end_offset are provided, a range from
- start_offset to end_offset will be returned.
- - If count is provided, a range from either start_offset to
- (start_offset+count) or (end_offset-count) to end_offset will be
- returned. The IP range returned will be of size <count>.
- Both start_offset and end_offset must be greater than 0.
-
- Returns IP range in the format of "first_addr,second_addr" or exception
- is raised.
- """
- if cidr:
- if count and start_offset and not end_offset:
- start_index = start_offset
- end_index = start_offset + count - 1
- elif count and end_offset and not start_offset:
- end_index = -1 - end_offset
- start_index = -1 - end_index - count + 1
- elif start_offset and end_offset and not count:
- start_index = start_offset
- end_index = -1 - end_offset
- else:
- raise IPUtilsException("Argument error: must pass in exactly 2 of"
- " start_offset, end_offset and count")
-
- start_ip = cidr[start_index]
- end_ip = cidr[end_index]
- network = cidr
- elif interface:
- network = interface.network
- number_of_addr = network.num_addresses
- if interface.ip < network[int(number_of_addr / 2)]:
- if count and start_offset and not end_offset:
- start_ip = interface.ip + start_offset
- end_ip = start_ip + count - 1
- elif count and end_offset and not start_offset:
- end_ip = network[-1 - end_offset]
- start_ip = end_ip - count + 1
- elif start_offset and end_offset and not count:
- start_ip = interface.ip + start_offset
- end_ip = network[-1 - end_offset]
- else:
- raise IPUtilsException(
- "Argument error: must pass in exactly 2 of"
- " start_offset, end_offset and count")
- else:
- if count and start_offset and not end_offset:
- start_ip = network[start_offset]
- end_ip = start_ip + count - 1
- elif count and end_offset and not start_offset:
- end_ip = interface.ip - end_offset
- start_ip = end_ip - count + 1
- elif start_offset and end_offset and not count:
- start_ip = network[start_offset]
- end_ip = interface.ip - end_offset
- else:
- raise IPUtilsException(
- "Argument error: must pass in exactly 2 of"
- " start_offset, end_offset and count")
-
- else:
- raise IPUtilsException("Must pass in cidr or interface to generate"
- "ip range")
-
- range_result = _validate_ip_range(start_ip, end_ip, network)
- if range_result:
- ip_range = "{},{}".format(start_ip, end_ip)
- return ip_range
- else:
- raise IPUtilsException("Invalid IP range: {},{} for network {}"
- .format(start_ip, end_ip, network))
-
-
-def get_ip(offset, cidr=None, interface=None):
- """
- Returns an IP in a network given an offset.
-
- Either cidr or interface must be provided, cidr takes precedence.
-
- If cidr is provided, offset is calculated from network address.
- If interface is provided, offset is calculated from interface IP.
-
- offset can be positive or negative, but the resulting IP address must also
- be contained in the same subnet, otherwise an exception will be raised.
-
- returns a IP address object.
- """
- if cidr:
- ip = cidr[0 + offset]
- network = cidr
- elif interface:
- ip = interface.ip + offset
- network = interface.network
- else:
- raise IPUtilsException("Must pass in cidr or interface to generate IP")
-
- if ip not in network:
- raise IPUtilsException("IP {} not in network {}".format(ip, network))
- else:
- return str(ip)
-
-
-def get_interface(nic, address_family=4):
- """
- Returns interface object for a given NIC name in the system
-
- Only global address will be returned at the moment.
-
- Returns interface object if an address is found for the given nic,
- otherwise returns None.
- """
- if not nic.strip():
- logging.error("empty nic name specified")
- return None
- output = subprocess.getoutput("/usr/sbin/ip -{} addr show {} scope global"
- .format(address_family, nic))
- if address_family == 4:
- pattern = re.compile("\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}/\d{1,2}")
- elif address_family == 6:
- pattern = re.compile("([0-9a-f]{0,4}:){2,7}[0-9a-f]{0,4}/\d{1,3}")
- else:
- raise IPUtilsException("Invalid address family: {}"
- .format(address_family))
- match = re.search(pattern, output)
- if match:
- logging.info("found interface {} ip: {}".format(nic, match.group()))
- return ipaddress.ip_interface(match.group())
- else:
- logging.info("interface ip not found! ip address output:\n{}"
- .format(output))
- return None
-
-
-def find_gateway(interface):
- """
- Validate gateway on the system
-
- Ensures that the provided interface object is in fact configured as default
- route on the system.
-
- Returns gateway IP (reachable from interface) if default route is found,
- otherwise returns None.
- """
-
- address_family = interface.version
- output = subprocess.getoutput("/usr/sbin/ip -{} route".format(
- address_family))
-
- pattern = re.compile("default\s+via\s+(\S+)\s+")
- match = re.search(pattern, output)
-
- if match:
- gateway_ip = match.group(1)
- reverse_route_output = subprocess.getoutput("/usr/sbin/ip route get {}"
- .format(gateway_ip))
- pattern = re.compile("{}.+src\s+{}".format(gateway_ip, interface.ip))
- if not re.search(pattern, reverse_route_output):
- logging.warning("Default route doesn't match interface specified: "
- "{}".format(reverse_route_output))
- return None
- else:
- return gateway_ip
- else:
- logging.warning("Can't find gateway address on system")
- return None
-
-
-def _validate_ip_range(start_ip, end_ip, cidr):
- """
- Validates an IP range is in good order and the range is part of cidr.
-
- Returns True if validation succeeds, False otherwise.
- """
- ip_range = "{},{}".format(start_ip, end_ip)
- if end_ip <= start_ip:
- logging.warning("IP range {} is invalid: end_ip should be greater "
- "than starting ip".format(ip_range))
- return False
- if start_ip not in ipaddress.ip_network(cidr):
- logging.warning('start_ip {} is not in network {}'
- .format(start_ip, cidr))
- return False
- if end_ip not in ipaddress.ip_network(cidr):
- logging.warning('end_ip {} is not in network {}'.format(end_ip, cidr))
- return False
-
- return True
-
-
-class IPUtilsException(Exception):
- def __init__(self, value):
- self.value = value
-
- def __str__(self):
- return self.value
diff --git a/lib/python/apex/network_environment.py b/lib/python/apex/network_environment.py
deleted file mode 100644
index dd9530b8..00000000
--- a/lib/python/apex/network_environment.py
+++ /dev/null
@@ -1,219 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 Tim Rozet (trozet@redhat.com) and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-
-import yaml
-import re
-from .common.constants import (
- CONTROLLER,
- COMPUTE,
- ADMIN_NETWORK,
- TENANT_NETWORK,
- STORAGE_NETWORK,
- EXTERNAL_NETWORK,
- API_NETWORK,
- CONTROLLER_PRE,
- COMPUTE_PRE,
- PRE_CONFIG_DIR
-)
-from .network_settings import NetworkSettings
-
-HEAT_NONE = 'OS::Heat::None'
-PORTS = '/ports'
-# Resources defined by <resource name>: <prefix>
-EXTERNAL_RESOURCES = {'OS::TripleO::Network::External': None,
- 'OS::TripleO::Network::Ports::ExternalVipPort': PORTS,
- 'OS::TripleO::Controller::Ports::ExternalPort': PORTS,
- 'OS::TripleO::Compute::Ports::ExternalPort': PORTS}
-TENANT_RESOURCES = {'OS::TripleO::Network::Tenant': None,
- 'OS::TripleO::Controller::Ports::TenantPort': PORTS,
- 'OS::TripleO::Compute::Ports::TenantPort': PORTS}
-STORAGE_RESOURCES = {'OS::TripleO::Network::Storage': None,
- 'OS::TripleO::Network::Ports::StorageVipPort': PORTS,
- 'OS::TripleO::Controller::Ports::StoragePort': PORTS,
- 'OS::TripleO::Compute::Ports::StoragePort': PORTS}
-API_RESOURCES = {'OS::TripleO::Network::InternalApi': None,
- 'OS::TripleO::Network::Ports::InternalApiVipPort': PORTS,
- 'OS::TripleO::Controller::Ports::InternalApiPort': PORTS,
- 'OS::TripleO::Compute::Ports::InternalApiPort': PORTS}
-
-# A list of flags that will be set to true when IPv6 is enabled
-IPV6_FLAGS = ["NovaIPv6", "MongoDbIPv6", "CorosyncIPv6", "CephIPv6",
- "RabbitIPv6", "MemcachedIPv6"]
-
-reg = 'resource_registry'
-param_def = 'parameter_defaults'
-
-
-class NetworkEnvironment(dict):
- """
- This class creates a Network Environment to be used in TripleO Heat
- Templates.
-
- The class builds upon an existing network-environment file and modifies
- based on a NetworkSettings object.
- """
- def __init__(self, net_settings, filename, compute_pre_config=False,
- controller_pre_config=False):
- """
- Create Network Environment according to Network Settings
- """
- init_dict = {}
- if isinstance(filename, str):
- with open(filename, 'r') as net_env_fh:
- init_dict = yaml.safe_load(net_env_fh)
-
- super().__init__(init_dict)
- if not isinstance(net_settings, NetworkSettings):
- raise NetworkEnvException('Invalid Network Settings object')
-
- self._set_tht_dir()
-
- nets = net_settings['networks']
-
- admin_cidr = nets[ADMIN_NETWORK]['cidr']
- admin_prefix = str(admin_cidr.prefixlen)
- self[param_def]['ControlPlaneSubnetCidr'] = admin_prefix
- self[param_def]['ControlPlaneDefaultRoute'] = \
- nets[ADMIN_NETWORK]['installer_vm']['ip']
- self[param_def]['EC2MetadataIp'] = \
- nets[ADMIN_NETWORK]['installer_vm']['ip']
- self[param_def]['DnsServers'] = net_settings['dns_servers']
-
- if EXTERNAL_NETWORK in net_settings.enabled_network_list:
- external_cidr = net_settings.get_network(EXTERNAL_NETWORK)['cidr']
- self[param_def]['ExternalNetCidr'] = str(external_cidr)
- external_vlan = self._get_vlan(net_settings.get_network(
- EXTERNAL_NETWORK))
- if isinstance(external_vlan, int):
- self[param_def]['NeutronExternalNetworkBridge'] = '""'
- self[param_def]['ExternalNetworkVlanID'] = external_vlan
- external_range = net_settings.get_network(EXTERNAL_NETWORK)[
- 'overcloud_ip_range']
- self[param_def]['ExternalAllocationPools'] = \
- [{'start': str(external_range[0]),
- 'end': str(external_range[1])}]
- self[param_def]['ExternalInterfaceDefaultRoute'] = \
- net_settings.get_network(EXTERNAL_NETWORK)['gateway']
-
- if external_cidr.version == 6:
- postfix = '/external_v6.yaml'
- else:
- postfix = '/external.yaml'
- else:
- postfix = '/noop.yaml'
-
- # apply resource registry update for EXTERNAL_RESOURCES
- self._config_resource_reg(EXTERNAL_RESOURCES, postfix)
-
- if TENANT_NETWORK in net_settings.enabled_network_list:
- tenant_range = nets[TENANT_NETWORK]['overcloud_ip_range']
- self[param_def]['TenantAllocationPools'] = \
- [{'start': str(tenant_range[0]),
- 'end': str(tenant_range[1])}]
- tenant_cidr = nets[TENANT_NETWORK]['cidr']
- self[param_def]['TenantNetCidr'] = str(tenant_cidr)
- if tenant_cidr.version == 6:
- postfix = '/tenant_v6.yaml'
- # set overlay_ip_version option in Neutron ML2 config
- self[param_def]['NeutronOverlayIPVersion'] = "6"
- else:
- postfix = '/tenant.yaml'
-
- tenant_vlan = self._get_vlan(nets[TENANT_NETWORK])
- if isinstance(tenant_vlan, int):
- self[param_def]['TenantNetworkVlanID'] = tenant_vlan
- else:
- postfix = '/noop.yaml'
-
- # apply resource registry update for TENANT_RESOURCES
- self._config_resource_reg(TENANT_RESOURCES, postfix)
-
- if STORAGE_NETWORK in net_settings.enabled_network_list:
- storage_range = nets[STORAGE_NETWORK]['overcloud_ip_range']
- self[param_def]['StorageAllocationPools'] = \
- [{'start': str(storage_range[0]),
- 'end': str(storage_range[1])}]
- storage_cidr = nets[STORAGE_NETWORK]['cidr']
- self[param_def]['StorageNetCidr'] = str(storage_cidr)
- if storage_cidr.version == 6:
- postfix = '/storage_v6.yaml'
- else:
- postfix = '/storage.yaml'
- storage_vlan = self._get_vlan(nets[STORAGE_NETWORK])
- if isinstance(storage_vlan, int):
- self[param_def]['StorageNetworkVlanID'] = storage_vlan
- else:
- postfix = '/noop.yaml'
-
- # apply resource registry update for STORAGE_RESOURCES
- self._config_resource_reg(STORAGE_RESOURCES, postfix)
-
- if API_NETWORK in net_settings.enabled_network_list:
- api_range = nets[API_NETWORK]['overcloud_ip_range']
- self[param_def]['InternalApiAllocationPools'] = \
- [{'start': str(api_range[0]),
- 'end': str(api_range[1])}]
- api_cidr = nets[API_NETWORK]['cidr']
- self[param_def]['InternalApiNetCidr'] = str(api_cidr)
- if api_cidr.version == 6:
- postfix = '/internal_api_v6.yaml'
- else:
- postfix = '/internal_api.yaml'
- api_vlan = self._get_vlan(nets[API_NETWORK])
- if isinstance(api_vlan, int):
- self[param_def]['InternalApiNetworkVlanID'] = api_vlan
- else:
- postfix = '/noop.yaml'
-
- # apply resource registry update for API_RESOURCES
- self._config_resource_reg(API_RESOURCES, postfix)
-
- # Set IPv6 related flags to True. Not that we do not set those to False
- # when IPv4 is configured, we'll use the default or whatever the user
- # may have set.
- if net_settings.get_ip_addr_family() == 6:
- for flag in IPV6_FLAGS:
- self[param_def][flag] = True
-
- def _get_vlan(self, network):
- if isinstance(network['nic_mapping'][CONTROLLER]['vlan'], int):
- return network['nic_mapping'][CONTROLLER]['vlan']
- elif isinstance(network['nic_mapping'][COMPUTE]['vlan'], int):
- return network['nic_mapping'][COMPUTE]['vlan']
- else:
- return 'native'
-
- def _set_tht_dir(self):
- self.tht_dir = None
- for key, prefix in TENANT_RESOURCES.items():
- if prefix is None:
- prefix = ''
- m = re.split('%s/\w+\.yaml' % prefix, self[reg][key])
- if m is not None and len(m) > 1:
- self.tht_dir = m[0]
- break
- if not self.tht_dir:
- raise NetworkEnvException('Unable to parse THT Directory')
-
- def _config_resource_reg(self, resources, postfix):
- for key, prefix in resources.items():
- if prefix is None:
- if postfix == '/noop.yaml':
- self[reg][key] = HEAT_NONE
- continue
- prefix = ''
- self[reg][key] = self.tht_dir + prefix + postfix
-
-
-class NetworkEnvException(Exception):
- def __init__(self, value):
- self.value = value
-
- def __str__(self):
- return self.value
diff --git a/lib/python/apex/network_settings.py b/lib/python/apex/network_settings.py
deleted file mode 100644
index 79b0a9d1..00000000
--- a/lib/python/apex/network_settings.py
+++ /dev/null
@@ -1,360 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 Feng Pan (fpan@redhat.com) and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-
-import yaml
-import logging
-import ipaddress
-
-from copy import copy
-from .common import utils
-from . import ip_utils
-from .common.constants import (
- CONTROLLER,
- COMPUTE,
- ROLES,
- DOMAIN_NAME,
- DNS_SERVERS,
- NTP_SERVER,
- ADMIN_NETWORK,
- EXTERNAL_NETWORK,
- OPNFV_NETWORK_TYPES,
-)
-
-
-class NetworkSettings(dict):
- """
- This class parses APEX network settings yaml file into an object. It
- generates or detects all missing fields for deployment.
-
- The resulting object will be used later to generate network environment
- file as well as configuring post deployment networks.
-
- Currently the parsed object is dumped into a bash global definition file
- for deploy.sh consumption. This object will later be used directly as
- deployment script move to python.
- """
- def __init__(self, filename):
- init_dict = {}
- if isinstance(filename, str):
- with open(filename, 'r') as network_settings_file:
- init_dict = yaml.safe_load(network_settings_file)
- else:
- # assume input is a dict to build from
- init_dict = filename
- super().__init__(init_dict)
-
- if 'apex' in self:
- # merge two dics Nondestructively
- def merge(pri, sec):
- for key, val in sec.items():
- if key in pri:
- if isinstance(val, dict):
- merge(pri[key], val)
- # else
- # do not overwrite what's already there
- else:
- pri[key] = val
- # merge the apex specific config into the first class settings
- merge(self, copy(self['apex']))
-
- self.enabled_network_list = []
- self.nics = {COMPUTE: {}, CONTROLLER: {}}
- self.nics_specified = {COMPUTE: False, CONTROLLER: False}
- self._validate_input()
-
- def get_network(self, network):
- if network == EXTERNAL_NETWORK and self['networks'][network]:
- for net in self['networks'][network]:
- if 'public' in net:
- return net
-
- raise NetworkSettingsException("The external network, "
- "'public', should be defined "
- "when external networks are "
- "enabled")
- else:
- return self['networks'][network]
-
- def _validate_input(self):
- """
- Validates the network settings file and populates all fields.
-
- NetworkSettingsException will be raised if validation fails.
- """
- if not self['networks'].get(ADMIN_NETWORK, {}).get('enabled', False):
- raise NetworkSettingsException("You must enable admin network "
- "and configure it explicitly or "
- "use auto-detection")
-
- for network in OPNFV_NETWORK_TYPES:
- if network in self['networks']:
- _network = self.get_network(network)
- if _network.get('enabled', True):
- logging.info("{} enabled".format(network))
- self._config_required_settings(network)
- nicmap = _network['nic_mapping']
- self._validate_overcloud_nic_order(network)
- iface = nicmap[CONTROLLER]['members'][0]
- self._config_ip_range(network=network,
- interface=iface,
- ip_range='overcloud_ip_range',
- start_offset=21, end_offset=21)
- self.enabled_network_list.append(network)
- # TODO self._config_optional_settings(network)
- else:
- logging.info("{} disabled, will collapse with "
- "admin network".format(network))
- else:
- logging.info("{} is not in specified, will collapse with "
- "admin network".format(network))
-
- if 'dns-domain' not in self:
- self['domain_name'] = DOMAIN_NAME
- self['dns_servers'] = self.get('dns_nameservers', DNS_SERVERS)
- self['ntp_servers'] = self.get('ntp', NTP_SERVER)
-
- def _validate_overcloud_nic_order(self, network):
- """
- Detects if nic order is specified per profile (compute/controller)
- for network
-
- If nic order is specified in a network for a profile, it should be
- specified for every network with that profile other than admin network
-
- Duplicate nic names are also not allowed across different networks
-
- :param network: network to detect if nic order present
- :return: None
- """
- for role in ROLES:
- _network = self.get_network(network)
- _nicmap = _network.get('nic_mapping', {})
- _role = _nicmap.get(role, {})
- interfaces = _role.get('members', [])
-
- if interfaces:
- interface = interfaces[0]
- if not isinstance(_role.get('vlan', 'native'), int) and \
- any(y == interface for x, y in self.nics[role].items()):
- raise NetworkSettingsException(
- "Duplicate {} already specified for "
- "another network".format(interface))
- self.nics[role][network] = interface
- self.nics_specified[role] = True
- logging.info("{} nic order specified for network {"
- "}".format(role, network))
- else:
- raise NetworkSettingsException(
- "Interface members are not supplied for {} network "
- "for the {} role. Please add nic assignments"
- "".format(network, role))
-
- def _config_required_settings(self, network):
- """
- Configures either CIDR or bridged_interface setting
-
- cidr takes precedence if both cidr and bridged_interface are specified
- for a given network.
-
- When using bridged_interface, we will detect network setting on the
- given NIC in the system. The resulting config in settings object will
- be an ipaddress.network object, replacing the NIC name.
- """
- _network = self.get_network(network)
- # if vlan not defined then default it to native
- if network is not ADMIN_NETWORK:
- for role in ROLES:
- if 'vlan' not in _network['nic_mapping'][role]:
- _network['nic_mapping'][role]['vlan'] = 'native'
-
- cidr = _network.get('cidr')
-
- if cidr:
- cidr = ipaddress.ip_network(_network['cidr'])
- _network['cidr'] = cidr
- logging.info("{}_cidr: {}".format(network, cidr))
- elif 'installer_vm' in _network:
- ucloud_if_list = _network['installer_vm']['members']
- # If cidr is not specified, we need to know if we should find
- # IPv6 or IPv4 address on the interface
- ip = ipaddress.ip_address(_network['installer_vm']['ip'])
- nic_if = ip_utils.get_interface(ucloud_if_list[0], ip.version)
- if nic_if:
- logging.info("{}_bridged_interface: {}".
- format(network, nic_if))
- else:
- raise NetworkSettingsException(
- "Auto detection failed for {}: Unable to find valid "
- "ip for interface {}".format(network, ucloud_if_list[0]))
-
- else:
- raise NetworkSettingsException(
- "Auto detection failed for {}: either installer_vm "
- "members or cidr must be specified".format(network))
-
- # undercloud settings
- if network == ADMIN_NETWORK:
- provisioner_ip = _network['installer_vm']['ip']
- iface = _network['installer_vm']['members'][0]
- if not provisioner_ip:
- _network['installer_vm']['ip'] = self._gen_ip(network, 1)
- self._config_ip_range(network=network, interface=iface,
- ip_range='dhcp_range',
- start_offset=2, count=9)
- self._config_ip_range(network=network, interface=iface,
- ip_range='introspection_range',
- start_offset=11, count=9)
- elif network == EXTERNAL_NETWORK:
- provisioner_ip = _network['installer_vm']['ip']
- iface = _network['installer_vm']['members'][0]
- if not provisioner_ip:
- _network['installer_vm']['ip'] = self._gen_ip(network, 1)
- self._config_ip_range(network=network, interface=iface,
- ip_range='floating_ip_range',
- end_offset=2, count=20)
-
- gateway = _network['gateway']
- interface = _network['installer_vm']['ip']
- self._config_gateway(network, gateway, interface)
-
- def _config_ip_range(self, network, ip_range, interface=None,
- start_offset=None, end_offset=None, count=None):
- """
- Configures IP range for a given setting.
- If the setting is already specified, no change will be made.
- The spec for start_offset, end_offset and count are identical to
- ip_utils.get_ip_range.
- """
- _network = self.get_network(network)
- if ip_range not in _network:
- cidr = _network.get('cidr')
- _ip_range = ip_utils.get_ip_range(start_offset=start_offset,
- end_offset=end_offset,
- count=count,
- cidr=cidr,
- interface=interface)
- _network[ip_range] = _ip_range.split(',')
-
- logging.info("Config IP Range: {} {}".format(network, ip_range))
-
- def _gen_ip(self, network, offset):
- """
- Generate and ip offset within the given network
- """
- _network = self.get_network(network)
- cidr = _network.get('cidr')
- ip = ip_utils.get_ip(offset, cidr)
- logging.info("Config IP: {} {}".format(network, ip))
- return ip
-
- def _config_optional_settings(self, network):
- """
- Configures optional settings:
- - admin_network:
- - provisioner_ip
- - dhcp_range
- - introspection_range
- - public_network:
- - provisioner_ip
- - floating_ip_range
- - gateway
- """
- if network == ADMIN_NETWORK:
- self._config_ip(network, None, 'provisioner_ip', 1)
- self._config_ip_range(network=network,
- ip_range='dhcp_range',
- start_offset=2, count=9)
- self._config_ip_range(network=network,
- ip_range='introspection_range',
- start_offset=11, count=9)
- elif network == EXTERNAL_NETWORK:
- self._config_ip(network, None, 'provisioner_ip', 1)
- self._config_ip_range(network=network,
- ip_range='floating_ip_range',
- end_offset=2, count=20)
- self._config_gateway(network)
-
- def _config_gateway(self, network, gateway, interface):
- """
- Configures gateway setting for a given network.
-
- If cidr is specified, we always use the first address in the address
- space for gateway. Otherwise, we detect the system gateway.
- """
- _network = self.get_network(network)
- if not gateway:
- cidr = _network.get('cidr')
- if cidr:
- _gateway = ip_utils.get_ip(1, cidr)
- else:
- _gateway = ip_utils.find_gateway(interface)
-
- if _gateway:
- _network['gateway'] = _gateway
- else:
- raise NetworkSettingsException("Failed to set gateway")
-
- logging.info("Config Gateway: {} {}".format(network, gateway))
-
- def dump_bash(self, path=None):
- """
- Prints settings for bash consumption.
-
- If optional path is provided, bash string will be written to the file
- instead of stdout.
- """
- def flatten(name, obj, delim=','):
- """
- flatten lists to delim separated strings
- flatten dics to underscored key names and string values
- """
- if isinstance(obj, list):
- return "{}=\'{}\'\n".format(name,
- delim.join(map(lambda x: str(x),
- obj)))
- elif isinstance(obj, dict):
- flat_str = ''
- for k in obj:
- flat_str += flatten("{}_{}".format(name, k), obj[k])
- return flat_str
- elif isinstance(obj, str):
- return "{}='{}'\n".format(name, obj)
- else:
- return "{}={}\n".format(name, str(obj))
-
- bash_str = ''
- for network in self.enabled_network_list:
- _network = self.get_network(network)
- bash_str += flatten(network, _network)
- bash_str += flatten('enabled_network_list',
- self.enabled_network_list, ' ')
- bash_str += flatten('ip_addr_family', self.get_ip_addr_family())
- bash_str += flatten('dns_servers', self['dns_servers'], ' ')
- bash_str += flatten('domain_name', self['dns-domain'], ' ')
- bash_str += flatten('ntp_server', self['ntp_servers'][0], ' ')
- utils.write_str(bash_str, path)
-
- def get_ip_addr_family(self,):
- """
- Returns IP address family for current deployment.
-
- If any enabled network has IPv6 CIDR, the deployment is classified as
- IPv6.
- """
- return max([
- ipaddress.ip_network(self.get_network(n)['cidr']).version
- for n in self.enabled_network_list])
-
-
-class NetworkSettingsException(Exception):
- def __init__(self, value):
- self.value = value
-
- def __str__(self):
- return self.value
diff --git a/lib/python/apex_python_utils.py b/lib/python/apex_python_utils.py
deleted file mode 100755
index 70fc592d..00000000
--- a/lib/python/apex_python_utils.py
+++ /dev/null
@@ -1,265 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 Feng Pan (fpan@redhat.com), Dan Radez (dradez@redhat.com)
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-
-import apex
-import argparse
-import sys
-import logging
-import os
-import yaml
-
-from jinja2 import Environment
-from jinja2 import FileSystemLoader
-
-from apex import NetworkSettings
-from apex import NetworkEnvironment
-from apex import DeploySettings
-from apex import Inventory
-from apex import ip_utils
-
-
-def parse_net_settings(args):
- """
- Parse OPNFV Apex network_settings.yaml config file
- and dump bash syntax to set environment variables
-
- Args:
- - file: string
- file to network_settings.yaml file
- """
- settings = NetworkSettings(args.net_settings_file)
- net_env = NetworkEnvironment(settings, args.net_env_file,
- args.compute_pre_config,
- args.controller_pre_config)
- target = args.target_dir.split('/')
- target.append('network-environment.yaml')
- dump_yaml(dict(net_env), '/'.join(target))
- settings.dump_bash()
-
-
-def dump_yaml(data, file):
- """
- Dumps data to a file as yaml
- :param data: yaml to be written to file
- :param file: filename to write to
- :return:
- """
- with open(file, "w") as fh:
- yaml.dump(data, fh, default_flow_style=False)
-
-
-def parse_deploy_settings(args):
- settings = DeploySettings(args.file)
- settings.dump_bash()
-
-
-def run_clean(args):
- apex.clean_nodes(args.file)
-
-
-def parse_inventory(args):
- inventory = Inventory(args.file, ha=args.ha, virtual=args.virtual)
- if args.export_bash is True:
- inventory.dump_bash()
- else:
- inventory.dump_instackenv_json()
-
-
-def find_ip(args):
- """
- Get and print the IP from a specific interface
-
- Args:
- - interface: string
- network interface name
- - address_family: int
- 4 or 6, respective to ipv4 or ipv6
- """
- interface = ip_utils.get_interface(args.interface,
- args.address_family)
- if interface:
- print(interface.ip)
-
-
-def build_nic_template(args):
- """
- Build and print a Triple-O nic template from jinja template
-
- Args:
- - template: string
- path to jinja template to load
- - enabled_networks: comma delimited list
- list of networks defined in net_env.py
- - ext_net_type: string
- interface or br-ex, defines the external network configuration
- - address_family: string
- 4 or 6, respective to ipv4 or ipv6
- - ovs_dpdk_bridge: string
- bridge name to use as ovs_dpdk
- """
- template_dir, template = args.template.rsplit('/', 1)
-
- netsets = NetworkSettings(args.net_settings_file)
- nets = netsets.get('networks')
- ds = DeploySettings(args.deploy_settings_file).get('deploy_options')
- env = Environment(loader=FileSystemLoader(template_dir), autoescape=True)
- template = env.get_template(template)
-
- if ds['dataplane'] == 'fdio':
- nets['tenant']['nic_mapping'][args.role]['phys_type'] = 'vpp_interface'
- if ds['sdn_controller'] == 'opendaylight':
- nets['external'][0]['nic_mapping'][args.role]['phys_type'] =\
- 'vpp_interface'
- if ds.get('odl_vpp_routing_node') == 'dvr':
- nets['admin']['nic_mapping'][args.role]['phys_type'] =\
- 'linux_bridge'
- if ds.get('performance', {}).get(args.role.title(), {}).get('vpp', {})\
- .get('uio-driver'):
- nets['tenant']['nic_mapping'][args.role]['uio-driver'] =\
- ds['performance'][args.role.title()]['vpp']['uio-driver']
- if ds['sdn_controller'] == 'opendaylight':
- nets['external'][0]['nic_mapping'][args.role]['uio-driver'] =\
- ds['performance'][args.role.title()]['vpp']['uio-driver']
- if ds.get('performance', {}).get(args.role.title(), {}).get('vpp', {})\
- .get('interface-options'):
- nets['tenant']['nic_mapping'][args.role]['interface-options'] =\
- ds['performance'][args.role.title()]['vpp']['interface-options']
-
- print(template.render(nets=nets,
- role=args.role,
- external_net_af=netsets.get_ip_addr_family(),
- external_net_type=args.ext_net_type,
- ovs_dpdk_bridge=args.ovs_dpdk_bridge))
-
-
-def get_parser():
- parser = argparse.ArgumentParser()
- parser.add_argument('--debug', action='store_true', default=False,
- help="Turn on debug messages")
- parser.add_argument('-l', '--log-file', default='/var/log/apex/apex.log',
- dest='log_file', help="Log file to log to")
- subparsers = parser.add_subparsers()
- # parse-net-settings
- net_settings = subparsers.add_parser('parse-net-settings',
- help='Parse network settings file')
- net_settings.add_argument('-s', '--net-settings-file',
- default='network-settings.yaml',
- dest='net_settings_file',
- help='path to network settings file')
- net_settings.add_argument('-e', '--net-env-file',
- default="network-environment.yaml",
- dest='net_env_file',
- help='path to network environment file')
- net_settings.add_argument('-td', '--target-dir',
- default="/tmp",
- dest='target_dir',
- help='directory to write the'
- 'network-environment.yaml file')
- net_settings.add_argument('--compute-pre-config',
- default=False,
- action='store_true',
- dest='compute_pre_config',
- help='Boolean to enable Compute Pre Config')
- net_settings.add_argument('--controller-pre-config',
- action='store_true',
- default=False,
- dest='controller_pre_config',
- help='Boolean to enable Controller Pre Config')
-
- net_settings.set_defaults(func=parse_net_settings)
- # find-ip
- get_int_ip = subparsers.add_parser('find-ip',
- help='Find interface ip')
- get_int_ip.add_argument('-i', '--interface', required=True,
- help='Interface name')
- get_int_ip.add_argument('-af', '--address-family', default=4, type=int,
- choices=[4, 6], dest='address_family',
- help='IP Address family')
- get_int_ip.set_defaults(func=find_ip)
- # nic-template
- nic_template = subparsers.add_parser('nic-template',
- help='Build NIC templates')
- nic_template.add_argument('-r', '--role', required=True,
- choices=['controller', 'compute'],
- help='Role template generated for')
- nic_template.add_argument('-t', '--template', required=True,
- dest='template',
- help='Template file to process')
- nic_template.add_argument('-s', '--net-settings-file',
- default='network-settings.yaml',
- dest='net_settings_file',
- help='path to network settings file')
- nic_template.add_argument('-e', '--ext-net-type', default='interface',
- dest='ext_net_type',
- choices=['interface', 'vpp_interface', 'br-ex'],
- help='External network type')
- nic_template.add_argument('-d', '--ovs-dpdk-bridge',
- default=None, dest='ovs_dpdk_bridge',
- help='OVS DPDK Bridge Name')
- nic_template.add_argument('--deploy-settings-file',
- help='path to deploy settings file')
-
- nic_template.set_defaults(func=build_nic_template)
- # parse-deploy-settings
- deploy_settings = subparsers.add_parser('parse-deploy-settings',
- help='Parse deploy settings file')
- deploy_settings.add_argument('-f', '--file',
- default='deploy_settings.yaml',
- help='path to deploy settings file')
- deploy_settings.set_defaults(func=parse_deploy_settings)
- # parse-inventory
- inventory = subparsers.add_parser('parse-inventory',
- help='Parse inventory file')
- inventory.add_argument('-f', '--file',
- default='deploy_settings.yaml',
- help='path to deploy settings file')
- inventory.add_argument('--ha',
- default=False,
- action='store_true',
- help='Indicate if deployment is HA or not')
- inventory.add_argument('--virtual',
- default=False,
- action='store_true',
- help='Indicate if deployment inventory is virtual')
- inventory.add_argument('--export-bash',
- default=False,
- dest='export_bash',
- action='store_true',
- help='Export bash variables from inventory')
- inventory.set_defaults(func=parse_inventory)
-
- clean = subparsers.add_parser('clean',
- help='Parse deploy settings file')
- clean.add_argument('-f', '--file',
- help='path to inventory file')
- clean.set_defaults(func=run_clean)
-
- return parser
-
-
-def main():
- parser = get_parser()
- args = parser.parse_args(sys.argv[1:])
- if args.debug:
- logging.basicConfig(level=logging.DEBUG)
- else:
- apex_log_filename = args.log_file
- os.makedirs(os.path.dirname(apex_log_filename), exist_ok=True)
- logging.basicConfig(filename=apex_log_filename,
- format='%(asctime)s %(levelname)s: %(message)s',
- datefmt='%m/%d/%Y %I:%M:%S %p',
- level=logging.DEBUG)
- if hasattr(args, 'func'):
- args.func(args)
- else:
- parser.print_help()
- exit(1)
-
-if __name__ == "__main__":
- main()
diff --git a/lib/python/build_utils.py b/lib/python/build_utils.py
deleted file mode 100644
index 14327a90..00000000
--- a/lib/python/build_utils.py
+++ /dev/null
@@ -1,108 +0,0 @@
-##############################################################################
-# Copyright (c) 2017 Feng Pan (fpan@redhat.com) and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-
-import argparse
-import git
-import logging
-import os
-from pygerrit2.rest import GerritRestAPI
-import re
-import shutil
-import sys
-
-
-def clone_fork(args):
- ref = None
- logging.info("Cloning {}".format(args.repo))
-
- try:
- cm = git.Repo(search_parent_directories=True).commit().message
- except git.exc.InvalidGitRepositoryError:
- logging.debug('Current Apex directory is not a git repo: {}'
- .format(os.getcwd()))
- cm = ''
-
- logging.info("Current commit message: {}".format(cm))
- m = re.search('{}:\s*(\S+)'.format(args.repo), cm)
-
- if m:
- change_id = m.group(1)
- logging.info("Using change ID {} from {}".format(change_id, args.repo))
- rest = GerritRestAPI(url=args.url)
- change_str = "changes/{}?o=CURRENT_REVISION".format(change_id)
- change = rest.get(change_str)
- try:
- assert change['status'] not in 'ABANDONED' 'CLOSED',\
- 'Change {} is in {} state'.format(change_id, change['status'])
- if change['status'] == 'MERGED':
- logging.info('Change {} is merged, ignoring...'
- .format(change_id))
- else:
- current_revision = change['current_revision']
- ref = change['revisions'][current_revision]['ref']
- logging.info('setting ref to {}'.format(ref))
- except KeyError:
- logging.error('Failed to get valid change data structure from url '
- '{}/{}, data returned: \n{}'
- .format(change_id, change_str, change))
- raise
-
- # remove existing file or directory named repo
- if os.path.exists(args.repo):
- if os.path.isdir(args.repo):
- shutil.rmtree(args.repo)
- else:
- os.remove(args.repo)
-
- ws = git.Repo.clone_from("{}/{}".format(args.url, args.repo),
- args.repo, b=args.branch)
- if ref:
- git_cmd = ws.git
- git_cmd.fetch("{}/{}".format(args.url, args.repo), ref)
- git_cmd.checkout('FETCH_HEAD')
- logging.info('Checked out commit:\n{}'.format(ws.head.commit.message))
-
-
-def get_parser():
- parser = argparse.ArgumentParser()
- parser.add_argument('--debug', action='store_true', default=False,
- help="Turn on debug messages")
- subparsers = parser.add_subparsers()
- fork = subparsers.add_parser('clone-fork',
- help='Clone fork of dependent repo')
- fork.add_argument('-r', '--repo', required=True, help='Name of repository')
- fork.add_argument('-u', '--url',
- default='https://gerrit.opnfv.org/gerrit',
- help='Gerrit URL of repository')
- fork.add_argument('-b', '--branch',
- default='master',
- help='Branch to checkout')
- fork.set_defaults(func=clone_fork)
- return parser
-
-
-def main():
- parser = get_parser()
- args = parser.parse_args(sys.argv[1:])
- if args.debug:
- logging_level = logging.DEBUG
- else:
- logging_level = logging.INFO
-
- logging.basicConfig(format='%(asctime)s %(levelname)s: %(message)s',
- datefmt='%m/%d/%Y %I:%M:%S %p',
- level=logging_level)
- if hasattr(args, 'func'):
- args.func(args)
- else:
- parser.print_help()
- exit(1)
-
-if __name__ == "__main__":
- main()
diff --git a/lib/undercloud-functions.sh b/lib/undercloud-functions.sh
deleted file mode 100755
index 08e1b7cf..00000000
--- a/lib/undercloud-functions.sh
+++ /dev/null
@@ -1,291 +0,0 @@
-#!/usr/bin/env bash
-##############################################################################
-# Copyright (c) 2015 Tim Rozet (Red Hat), Dan Radez (Red Hat) and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-
-##verify vm exists, an has a dhcp lease assigned to it
-##params: none
-function setup_undercloud_vm {
- local libvirt_imgs=/var/lib/libvirt/images
- if ! virsh list --all | grep undercloud > /dev/null; then
- undercloud_nets="default admin"
- if [[ $enabled_network_list =~ "external" ]]; then
- undercloud_nets+=" external"
- fi
- define_vm undercloud hd 30 "$undercloud_nets" 4 12288
-
- ### this doesn't work for some reason I was getting hangup events so using cp instead
- #virsh vol-upload --pool default --vol undercloud.qcow2 --file $BASE/stack/undercloud.qcow2
- #2015-12-05 12:57:20.569+0000: 8755: info : libvirt version: 1.2.8, package: 16.el7_1.5 (CentOS BuildSystem <http://bugs.centos.org>, 2015-11-03-13:56:46, worker1.bsys.centos.org)
- #2015-12-05 12:57:20.569+0000: 8755: warning : virKeepAliveTimerInternal:143 : No response from client 0x7ff1e231e630 after 6 keepalive messages in 35 seconds
- #2015-12-05 12:57:20.569+0000: 8756: warning : virKeepAliveTimerInternal:143 : No response from client 0x7ff1e231e630 after 6 keepalive messages in 35 seconds
- #error: cannot close volume undercloud.qcow2
- #error: internal error: received hangup / error event on socket
- #error: Reconnected to the hypervisor
-
- cp -f $IMAGES/undercloud.qcow2 $libvirt_imgs/undercloud.qcow2
- cp -f $IMAGES/overcloud-full.vmlinuz $libvirt_imgs/overcloud-full.vmlinuz
- cp -f $IMAGES/overcloud-full.initrd $libvirt_imgs/overcloud-full.initrd
-
- # resize Undercloud machine
- echo "Checking if Undercloud needs to be resized..."
- undercloud_size=$(LIBGUESTFS_BACKEND=direct virt-filesystems --long -h --all -a $libvirt_imgs/undercloud.qcow2 |grep device | grep -Eo "[0-9\.]+G" | sed -n 's/\([0-9][0-9]*\).*/\1/p')
- if [ "$undercloud_size" -lt 30 ]; then
- qemu-img resize /var/lib/libvirt/images/undercloud.qcow2 +25G
- LIBGUESTFS_BACKEND=direct virt-resize --expand /dev/sda1 $IMAGES/undercloud.qcow2 $libvirt_imgs/undercloud.qcow2
- LIBGUESTFS_BACKEND=direct virt-customize -a $libvirt_imgs/undercloud.qcow2 --run-command 'xfs_growfs -d /dev/sda1 || true'
- new_size=$(LIBGUESTFS_BACKEND=direct virt-filesystems --long -h --all -a $libvirt_imgs/undercloud.qcow2 |grep filesystem | grep -Eo "[0-9\.]+G" | sed -n 's/\([0-9][0-9]*\).*/\1/p')
- if [ "$new_size" -lt 30 ]; then
- echo "Error resizing Undercloud machine, disk size is ${new_size}"
- exit 1
- else
- echo "Undercloud successfully resized"
- fi
- else
- echo "Skipped Undercloud resize, upstream is large enough"
- fi
-
- else
- echo "Found existing Undercloud VM, exiting."
- exit 1
- fi
-
- # if the VM is not running update the authkeys and start it
- if ! virsh list | grep undercloud > /dev/null; then
- if [ "$debug" == 'TRUE' ]; then
- LIBGUESTFS_BACKEND=direct virt-customize -a $libvirt_imgs/undercloud.qcow2 --root-password password:opnfvapex
- fi
-
- echo "Injecting ssh key to Undercloud VM"
- LIBGUESTFS_BACKEND=direct virt-customize -a $libvirt_imgs/undercloud.qcow2 --run-command "mkdir -p /root/.ssh/" \
- --upload ~/.ssh/id_rsa.pub:/root/.ssh/authorized_keys \
- --run-command "chmod 600 /root/.ssh/authorized_keys && restorecon /root/.ssh/authorized_keys" \
- --run-command "cp /root/.ssh/authorized_keys /home/stack/.ssh/" \
- --run-command "chown stack:stack /home/stack/.ssh/authorized_keys && chmod 600 /home/stack/.ssh/authorized_keys"
- virsh start undercloud
- virsh autostart undercloud
- fi
-
- sleep 10 # let undercloud get started up
-
- # get the undercloud VM IP
- CNT=10
- echo -n "${blue}Waiting for Undercloud's dhcp address${reset}"
- undercloud_mac=$(virsh domiflist undercloud | grep default | awk '{ print $5 }')
- while ! $(arp -en | grep ${undercloud_mac} > /dev/null) && [ $CNT -gt 0 ]; do
- echo -n "."
- sleep 10
- CNT=$((CNT-1))
- done
- UNDERCLOUD=$(arp -en | grep ${undercloud_mac} | awk {'print $1'})
-
- if [ -z "$UNDERCLOUD" ]; then
- echo "\n\nCan't get IP for Undercloud. Can Not Continue."
- exit 1
- else
- echo -e "${blue}\rUndercloud VM has IP $UNDERCLOUD${reset}"
- fi
-
- CNT=10
- echo -en "${blue}\rValidating Undercloud VM connectivity${reset}"
- while ! ping -c 1 $UNDERCLOUD > /dev/null && [ $CNT -gt 0 ]; do
- echo -n "."
- sleep 3
- CNT=$((CNT-1))
- done
- if [ "$CNT" -eq 0 ]; then
- echo "Failed to contact Undercloud. Can Not Continue"
- exit 1
- fi
- CNT=10
- while ! ssh -T ${SSH_OPTIONS[@]} "root@$UNDERCLOUD" "echo ''" 2>&1> /dev/null && [ $CNT -gt 0 ]; do
- echo -n "."
- sleep 3
- CNT=$((CNT-1))
- done
- if [ "$CNT" -eq 0 ]; then
- echo "Failed to connect to Undercloud. Can Not Continue"
- exit 1
- fi
-
- # extra space to overwrite the previous connectivity output
- echo -e "${blue}\r ${reset}"
- sleep 1
-
- # ensure stack user on Undercloud machine has an ssh key
- ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" "if [ ! -e ~/.ssh/id_rsa.pub ]; then ssh-keygen -t rsa -N '' -f ~/.ssh/id_rsa; fi"
-
- # ssh key fix for stack user
- ssh -T ${SSH_OPTIONS[@]} "root@$UNDERCLOUD" "restorecon -r /home/stack"
-}
-
-##Copy over the glance images and instackenv json file
-##params: none
-function configure_undercloud {
- local controller_nic_template compute_nic_template
- echo
- echo "Copying configuration files to Undercloud"
- echo -e "${blue}Network Environment set for Deployment: ${reset}"
- cat $APEX_TMP_DIR/network-environment.yaml
- scp ${SSH_OPTIONS[@]} $APEX_TMP_DIR/network-environment.yaml "stack@$UNDERCLOUD":
-
- # check for ODL L3/ONOS
- if [ "${deploy_options_array['dataplane']}" == 'fdio' ]; then
- ext_net_type=vpp_interface
- else
- ext_net_type=br-ex
- fi
-
- if [ "${deploy_options_array['dataplane']}" == 'ovs_dpdk' ]; then
- ovs_dpdk_bridge='br-phy'
- else
- ovs_dpdk_bridge=''
- fi
-
- # for some reason putting IP on the bridge fails with pinging validation in OOO
- if [ "${deploy_options_array['sfc']}" == 'True' ]; then
- controller_external='interface'
- else
- controller_external='br-ex'
- fi
-
- if ! controller_nic_template=$(python3 -B $LIB/python/apex_python_utils.py nic-template -r controller -s $NETSETS -t $BASE/nics-template.yaml.jinja2 -e $controller_external --deploy-settings-file $DEPLOY_SETTINGS_FILE); then
- echo -e "${red}ERROR: Failed to generate controller NIC heat template ${reset}"
- exit 1
- fi
-
- if ! compute_nic_template=$(python3 -B $LIB/python/apex_python_utils.py nic-template -r compute -s $NETSETS -t $BASE/nics-template.yaml.jinja2 -e $ext_net_type -d "$ovs_dpdk_bridge" --deploy-settings-file $DEPLOY_SETTINGS_FILE); then
- echo -e "${red}ERROR: Failed to generate compute NIC heat template ${reset}"
- exit 1
- fi
- ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" << EOI
-mkdir nics/
-cat > nics/controller.yaml << EOF
-$controller_nic_template
-EOF
-cat > nics/compute.yaml << EOF
-$compute_nic_template
-EOF
-EOI
-
- # disable requiretty for sudo
- ssh -T ${SSH_OPTIONS[@]} "root@$UNDERCLOUD" "sed -i 's/Defaults\s*requiretty//'" /etc/sudoers
-
- # configure undercloud on Undercloud VM
- echo "Running undercloud installation and configuration."
- echo "Logging undercloud installation to stack@undercloud:/home/stack/apex-undercloud-install.log"
- ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" << EOI
-set -e
-openstack-config --set undercloud.conf DEFAULT local_ip ${admin_installer_vm_ip}/${admin_cidr##*/}
-openstack-config --set undercloud.conf DEFAULT network_gateway ${admin_installer_vm_ip}
-openstack-config --set undercloud.conf DEFAULT network_cidr ${admin_cidr}
-openstack-config --set undercloud.conf DEFAULT dhcp_start ${admin_dhcp_range%%,*}
-openstack-config --set undercloud.conf DEFAULT dhcp_end ${admin_dhcp_range##*,}
-openstack-config --set undercloud.conf DEFAULT inspection_iprange ${admin_introspection_range}
-openstack-config --set undercloud.conf DEFAULT undercloud_debug false
-openstack-config --set undercloud.conf DEFAULT undercloud_hostname "undercloud.${domain_name}"
-openstack-config --set undercloud.conf DEFAULT enable_ui false
-openstack-config --set undercloud.conf DEFAULT undercloud_update_packages false
-sudo openstack-config --set /etc/ironic/ironic.conf disk_utils iscsi_verify_attempts 30
-sudo openstack-config --set /etc/ironic/ironic.conf disk_partitioner check_device_max_retries 40
-
-if [[ -n "${deploy_options_array['ceph_device']}" ]]; then
- sed -i '/ExtraConfig/a\\ ceph::profile::params::osds: {\\x27${deploy_options_array['ceph_device']}\\x27: {}}' ${ENV_FILE}
-fi
-
-sudo sed -i '/CephClusterFSID:/c\\ CephClusterFSID: \\x27$(cat /proc/sys/kernel/random/uuid)\\x27' /usr/share/openstack-tripleo-heat-templates/environments/storage-environment.yaml
-sudo sed -i '/CephMonKey:/c\\ CephMonKey: \\x27'"\$(ceph-authtool --gen-print-key)"'\\x27' /usr/share/openstack-tripleo-heat-templates/environments/storage-environment.yaml
-sudo sed -i '/CephAdminKey:/c\\ CephAdminKey: \\x27'"\$(ceph-authtool --gen-print-key)"'\\x27' /usr/share/openstack-tripleo-heat-templates/environments/storage-environment.yaml
-
-if [ "\$(uname -i)" == 'aarch64' ]; then
-
-# These two fixes are done in the base OOO image build right now
-# keeping them here to know that they are done and in case we need
-# to take care of them in the future.
-# # remove syslinux references for aarch64
-# sudo sh -xc 'cd /etc/puppet/modules/ironic/manifests && patch -p0 < puppet-ironic-manifests-pxe-pp-aarch64.patch'
-# sudo sed -i '/syslinux-extlinux/d' /usr/share/instack-undercloud/puppet-stack-config/puppet-stack-config.pp
-#
-# # disable use_linkat in swift
-# sudo sed -i 's/o_tmpfile_supported()/False/' /usr/lib/python2.7/site-packages/swift/obj/diskfile.py
-
- openstack-config --set undercloud.conf DEFAULT ipxe_enabled false
- sudo sed -i '/ _link_ip_address_pxe_configs/a\\ _link_mac_pxe_configs(task)' /usr/lib/python2.7/site-packages/ironic/common/pxe_utils.py
-fi
-
-openstack undercloud install &> apex-undercloud-install.log || {
- # cat the undercloud install log incase it fails
- echo "ERROR: openstack undercloud install has failed. Dumping Log:"
- cat apex-undercloud-install.log
- exit 1
-}
-
-if [ "\$(uname -i)" == 'aarch64' ]; then
-sudo yum -y reinstall grub2-efi shim
-sudo cp /boot/efi/EFI/centos/grubaa64.efi /tftpboot/grubaa64.efi
-sudo mkdir -p /tftpboot/EFI/centos
-sudo tee /tftpboot/EFI/centos/grub.cfg > /dev/null << EOF
-set default=master
-set timeout=5
-set hidden_timeout_quiet=false
-
-menuentry "master" {
-configfile /tftpboot/\\\$net_default_ip.conf
-}
-EOF
-sudo chmod 644 /tftpboot/EFI/centos/grub.cfg
-sudo openstack-config --set /etc/ironic/ironic.conf pxe uefi_pxe_config_template \\\$pybasedir/drivers/modules/pxe_grub_config.template
-sudo openstack-config --set /etc/ironic/ironic.conf pxe uefi_pxe_bootfile_name grubaa64.efi
-sudo service openstack-ironic-conductor restart
-sudo sed -i 's/linuxefi/linux/g' /usr/lib/python2.7/site-packages/ironic/drivers/modules/pxe_grub_config.template
-sudo sed -i 's/initrdefi/initrd/g' /usr/lib/python2.7/site-packages/ironic/drivers/modules/pxe_grub_config.template
-echo '' | sudo tee --append /tftpboot/map-file > /dev/null
-echo 'r ^/EFI/centos/grub.cfg-(.*) /tftpboot/pxelinux.cfg/\\1' | sudo tee --append /tftpboot/map-file > /dev/null
-sudo service xinetd restart
-fi
-
-# Set nova domain name
-sudo openstack-config --set /etc/nova/nova.conf DEFAULT dns_domain ${domain_name}
-sudo openstack-config --set /etc/nova/nova.conf DEFAULT dhcp_domain ${domain_name}
-sudo systemctl restart openstack-nova-conductor
-sudo systemctl restart openstack-nova-compute
-sudo systemctl restart openstack-nova-api
-sudo systemctl restart openstack-nova-scheduler
-
-# Set neutron domain name
-sudo openstack-config --set /etc/neutron/neutron.conf DEFAULT dns_domain ${domain_name}
-sudo systemctl restart neutron-server
-sudo systemctl restart neutron-dhcp-agent
-EOI
-
-# configure external network
-if [[ "$enabled_network_list" =~ "external" ]]; then
- ssh -T ${SSH_OPTIONS[@]} "root@$UNDERCLOUD" << EOI
-if [[ "$external_installer_vm_vlan" != "native" ]]; then
- cat <<EOF > /etc/sysconfig/network-scripts/ifcfg-vlan${external_installer_vm_vlan}
-DEVICE=vlan${external_installer_vm_vlan}
-ONBOOT=yes
-DEVICETYPE=ovs
-TYPE=OVSIntPort
-BOOTPROTO=static
-IPADDR=${external_installer_vm_ip}
-PREFIX=${external_cidr##*/}
-OVS_BRIDGE=br-ctlplane
-OVS_OPTIONS="tag=${external_installer_vm_vlan}"
-EOF
- ifup vlan${external_installer_vm_vlan}
-else
- if ! ip a s eth2 | grep ${external_installer_vm_ip} > /dev/null; then
- ip a a ${external_installer_vm_ip}/${external_cidr##*/} dev eth2
- ip link set up dev eth2
- fi
-fi
-EOI
-fi
-
-}
diff --git a/lib/utility-functions.sh b/lib/utility-functions.sh
deleted file mode 100644
index c12619ae..00000000
--- a/lib/utility-functions.sh
+++ /dev/null
@@ -1,85 +0,0 @@
-#!/usr/bin/env bash
-# Utility Functions used by OPNFV Apex
-# author: Tim Rozet (trozet@redhat.com)
-
-SSH_OPTIONS=(-o StrictHostKeyChecking=no -o GlobalKnownHostsFile=/dev/null -o UserKnownHostsFile=/dev/null -o LogLevel=error)
-
-##connects to undercloud
-##params: user to login with, command to execute on undercloud (optional)
-function undercloud_connect {
- local user=$1
-
- if [ -z "$1" ]; then
- echo "Missing required argument: user to login as to undercloud"
- return 1
- fi
-
- if [ -z "$2" ]; then
- ssh ${SSH_OPTIONS[@]} ${user}@$(get_undercloud_ip)
- else
- ssh ${SSH_OPTIONS[@]} -T ${user}@$(get_undercloud_ip) "$2"
- fi
-}
-
-##outputs the Undercloud's IP address
-##params: none
-function get_undercloud_ip {
- echo $(arp -an | grep $(virsh domiflist undercloud | grep default |\
- awk '{print $5}') | grep -Eo "[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+")
-}
-
-##connects to overcloud nodes
-##params: node to login to, command to execute on overcloud (optional)
-function overcloud_connect {
- local node
- local node_output
- local node_ip
-
- if [ -z "$1" ]; then
- echo "Missing required argument: overcloud node to login to"
- return 1
- elif ! echo "$1" | grep -E "(controller|compute)[0-9]+" > /dev/null; then
- echo "Invalid argument: overcloud node to login to must be in the format: \
-controller<number> or compute<number>"
- return 1
- fi
-
- node_output=$(undercloud_connect "stack" "source stackrc; nova list")
- node=$(echo "$1" | sed -E 's/([a-zA-Z]+)([0-9]+)/\1-\2/')
-
- node_ip=$(echo "$node_output" | grep "$node" | grep -Eo "[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+")
-
- if [ "$node_ip" == "" ]; then
- echo -e "Unable to find IP for ${node} in \n${node_output}"
- return 1
- fi
-
- if [ -z "$2" ]; then
- ssh ${SSH_OPTIONS[@]} heat-admin@${node_ip}
- else
- ssh ${SSH_OPTIONS[@]} -T heat-admin@${node_ip} "$2"
- fi
-}
-
-##connects to opendaylight karaf console
-##params: None
-function opendaylight_connect {
- local opendaylight_ip
- opendaylight_ip=$(undercloud_connect "stack" "cat overcloudrc | grep SDN_CONTROLLER_IP | grep -Eo [0-9]+\.[0-9]+\.[0-9]+\.[0-9]+")
-
- if [ "$opendaylight_ip" == "" ]; then
- echo -e "Unable to find IP for OpenDaylight in overcloudrc"
- return 1
- else
- echo -e "Connecting to ODL Karaf console. Default password is 'karaf'"
- fi
-
- ssh -p 8101 ${SSH_OPTIONS[@]} karaf@${opendaylight_ip}
-}
-
-##outputs heat stack deployment failures
-##params: none
-function debug_stack {
- source ~/stackrc
- openstack stack failures list overcloud --long
-}
diff --git a/lib/virtual-setup-functions.sh b/lib/virtual-setup-functions.sh
deleted file mode 100755
index 5f9e6ba5..00000000
--- a/lib/virtual-setup-functions.sh
+++ /dev/null
@@ -1,164 +0,0 @@
-#!/usr/bin/env bash
-##############################################################################
-# Copyright (c) 2015 Tim Rozet (Red Hat), Dan Radez (Red Hat) and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-
-##Create virtual nodes in virsh
-##params: vcpus, ramsize
-function setup_virtual_baremetal {
- local vcpus ramsize held_ramsize
- if [ -z "$1" ]; then
- vcpus=4
- ramsize=8192
- elif [ -z "$2" ]; then
- vcpus=$1
- ramsize=8192
- else
- vcpus=$1
- ramsize=$(($2*1024))
- fi
- #start by generating the opening yaml for the inventory-virt.yaml file
- cat > $APEX_TMP_DIR/inventory-virt.yaml << EOF
-nodes:
-EOF
-
- # next create the virtual machines and add their definitions to the file
- if [ "$ha_enabled" == "False" ]; then
- controller_index=0
- else
- controller_index=2
- # 3 controller + computes
- # zero based so add 2 to compute count
- if [ $VM_COMPUTES -lt 2 ]; then
- VM_COMPUTES=2
- fi
- fi
-
- # tmp var to hold ramsize in case modified during detection
- held_ramsize=${ramsize}
- for i in $(seq 0 $(($controller_index+$VM_COMPUTES))); do
- ramsize=${held_ramsize}
- if [ $i -gt $controller_index ]; then
- capability="profile:compute"
- if [ -n "$VM_COMPUTE_RAM" ]; then
- ramsize=$((${VM_COMPUTE_RAM}*1024))
- fi
- else
- capability="profile:control"
- if [[ "${deploy_options_array['sdn_controller']}" == 'opendaylight' && "$ramsize" -lt 12288 ]]; then
- echo "WARN: RAM per controller too low. OpenDaylight specified in deployment requires at least 12GB"
- echo "INFO: Increasing RAM per controller to 12GB"
- ramsize=12288
- elif [[ "$ramsize" -lt 10240 ]]; then
- echo "WARN: RAM per controller too low. Deployment requires at least 10GB"
- echo "INFO: Increasing RAM per controller to 10GB"
- ramsize=10240
- fi
- fi
- if ! virsh list --all | grep baremetal${i} > /dev/null; then
- define_vm baremetal${i} network 41 'admin' $vcpus $ramsize
- for n in tenant external storage api; do
- if [[ $enabled_network_list =~ $n ]]; then
- echo -n "$n "
- virsh attach-interface --domain baremetal${i} --type network --source $n --model virtio --config
- fi
- done
- else
- echo "Found baremetal${i} VM, using existing VM"
- fi
- #virsh vol-list default | grep baremetal${i} 2>&1> /dev/null || virsh vol-create-as default baremetal${i}.qcow2 41G --format qcow2
- mac=$(virsh domiflist baremetal${i} | grep admin | awk '{ print $5 }')
-
- cat >> $APEX_TMP_DIR/inventory-virt.yaml << EOF
- node${i}:
- mac_address: "$mac"
- ipmi_ip: 192.168.122.1
- ipmi_user: admin
- ipmi_pass: "password"
- pm_type: "pxe_ipmitool"
- pm_port: "623$i"
- cpu: $vcpus
- memory: $ramsize
- disk: 41
- arch: "$(uname -i)"
- capabilities: "$capability"
-EOF
- vbmc add baremetal$i --port 623$i
- if service firewalld status > /dev/null; then
- firewall-cmd --permanent --zone=public --add-port=623$i/udp
- fi
- # TODO: add iptables check and commands too
- vbmc start baremetal$i
- done
- if service firewalld status > /dev/null; then
- firewall-cmd --reload
- fi
-}
-
-##Create virtual nodes in virsh
-##params: name - String: libvirt name for VM
-## bootdev - String: boot device for the VM
-## disksize - Number: size of the disk in GB
-## ovs_bridges: - List: list of ovs bridges
-## vcpus - Number of VCPUs to use (defaults to 4)
-## ramsize - Size of RAM for VM in MB (defaults to 8192)
-function define_vm () {
- local vcpus ramsize volume_path direct_boot kernel_args
-
- if [ -z "$5" ]; then
- vcpus=4
- ramsize=8388608
- elif [ -z "$6" ]; then
- vcpus=$5
- ramsize=8388608
- else
- vcpus=$5
- ramsize=$(($6*1024))
- fi
-
- # Create the libvirt storage volume
- if virsh vol-list default | grep ${1}.qcow2 2>&1> /dev/null; then
- volume_path=$(virsh vol-path --pool default ${1}.qcow2 || echo "/var/lib/libvirt/images/${1}.qcow2")
- echo "Volume ${1} exists. Deleting Existing Volume $volume_path"
- virsh vol-dumpxml ${1}.qcow2 --pool default > /dev/null || echo '' #ok for this to fail
- touch $volume_path
- virsh vol-delete ${1}.qcow2 --pool default
- fi
- virsh vol-create-as default ${1}.qcow2 ${3}G --format qcow2
- volume_path=$(virsh vol-path --pool default ${1}.qcow2)
- if [ ! -f $volume_path ]; then
- echo "$volume_path Not created successfully... Aborting"
- exit 1
- fi
-
- # undercloud need to be direct booted.
- # the upstream image no longer includes the kernel and initrd
- if [ "$1" == 'undercloud' ]; then
- direct_boot='--direct-boot overcloud-full'
- kernel_args='--kernel-arg console=ttyS0 --kernel-arg root=/dev/sda'
- fi
-
- if [ "$(uname -i)" == 'aarch64' ]; then
- diskbus='scsi'
- else
- diskbus='sata'
- fi
-
- # create the VM
- $LIB/configure-vm --name $1 \
- --bootdev $2 \
- --image "$volume_path" \
- --diskbus $diskbus \
- --arch $(uname -i) \
- --cpus $vcpus \
- --memory $ramsize \
- --libvirt-nic-driver virtio \
- $direct_boot \
- $kernel_args \
- --baremetal-interface $4
-}