summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rwxr-xr-xbuild/instack.sh14
-rwxr-xr-xci/deploy.sh72
-rw-r--r--docs/installation-instructions/installation-instructions.rst81
3 files changed, 123 insertions, 44 deletions
diff --git a/build/instack.sh b/build/instack.sh
index f3a366a0..54f05d54 100755
--- a/build/instack.sh
+++ b/build/instack.sh
@@ -226,8 +226,14 @@ PACKAGES+=",openstack-nova-api,openstack-nova-cert,openstack-heat-api-cfn,openst
PACKAGES+=",openstack-ceilometer-central,openstack-ceilometer-polling,openstack-ceilometer-collector,"
PACKAGES+=",openstack-heat-api-cloudwatch,openstack-heat-engine,openstack-heat-common,openstack-ceilometer-notification"
PACKAGES+=",hiera,puppet,memcached,keepalived,mariadb,mariadb-server,rabbitmq-server,python-pbr,python-proliantutils"
-
-LIBGUESTFS_BACKEND=direct virt-customize --install $PACKAGES -a instack.qcow2
+PACKAGES+=",ceph-common"
+
+# install the packages above and enabling ceph to live on the controller
+LIBGUESTFS_BACKEND=direct virt-customize --install $PACKAGES \
+ --run-command "sed -i '/ControllerEnableCephStorage/c\\ ControllerEnableCephStorage: true' /usr/share/openstack-tripleo-heat-templates/environments/storage-environment.yaml" \
+ --run-command "sed -i '/ \$enable_ceph = /c\\ \$enable_ceph = true' /usr/share/openstack-tripleo-heat-templates/puppet/manifests/overcloud_controller_pacemaker.pp" \
+ --run-command "sed -i '/ \$enable_ceph = /c\\ \$enable_ceph = true' /usr/share/openstack-tripleo-heat-templates/puppet/manifests/overcloud_controller.pp" \
+ -a instack.qcow2
popd
@@ -238,8 +244,10 @@ cp overcloud-full.qcow2 overcloud-full-odl.qcow2
# remove unnecessary packages and install necessary packages
LIBGUESTFS_BACKEND=direct virt-customize --run-command "yum remove -y openstack-neutron-openvswitch" \
+ --install https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm \
--upload /etc/yum.repos.d/opendaylight.repo:/etc/yum.repos.d/opendaylight.repo \
- --install opendaylight,python-networking-odl -a overcloud-full-odl.qcow2
+ --install opendaylight,python-networking-odl,ceph \
+ -a overcloud-full-odl.qcow2
## WORK AROUND
## when OpenDaylight lands in upstream RDO manager this can be removed
diff --git a/ci/deploy.sh b/ci/deploy.sh
index ae6366f3..0ee7ac0d 100755
--- a/ci/deploy.sh
+++ b/ci/deploy.sh
@@ -26,6 +26,7 @@ ha_enabled="TRUE"
ping_site="8.8.8.8"
ntp_server="pool.ntp.org"
net_isolation_enabled="TRUE"
+post_config="TRUE"
declare -i CNT
declare UNDERCLOUD
@@ -391,7 +392,7 @@ function configure_deps {
elif [ "$virtual" == "FALSE" ]; then
virsh_enabled_networks="admin_network public_network"
else
- virsh_enabled_neworks=$enabled_network_list
+ virsh_enabled_networks=$enabled_network_list
fi
for network in ${OPNFV_NETWORK_TYPES}; do
@@ -735,8 +736,13 @@ if [[ "$net_isolation_enabled" == "TRUE" ]]; then
openstack-config --set undercloud.conf DEFAULT dhcp_end ${admin_network_dhcp_range##*,}
openstack-config --set undercloud.conf DEFAULT inspection_iprange ${admin_network_introspection_range}
openstack-config --set undercloud.conf DEFAULT undercloud_debug false
+
fi
+sudo sed -i '/CephClusterFSID:/c\\ CephClusterFSID: \\x27$(cat /proc/sys/kernel/random/uuid)\\x27' /usr/share/openstack-tripleo-heat-templates/environments/storage-environment.yaml
+sudo sed -i '/CephMonKey:/c\\ CephMonKey: \\x27'"\$(ceph-authtool --gen-print-key)"'\\x27' /usr/share/openstack-tripleo-heat-templates/environments/storage-environment.yaml
+sudo sed -i '/CephAdminKey:/c\\ CephAdminKey: \\x27'"\$(ceph-authtool --gen-print-key)"'\\x27' /usr/share/openstack-tripleo-heat-templates/environments/storage-environment.yaml
+
openstack undercloud install &> apex-undercloud-install.log
sleep 30
sudo systemctl restart openstack-glance-api
@@ -766,6 +772,9 @@ function undercloud_prep_overcloud_deploy {
exit 1
fi
+ # make sure ceph is installed
+ DEPLOY_OPTIONS+=" -e /usr/share/openstack-tripleo-heat-templates/environments/storage-environment.yaml"
+
# check if HA is enabled
if [[ "$ha_enabled" == "TRUE" ]]; then
DEPLOY_OPTIONS+=" --control-scale 3 --compute-scale 2"
@@ -812,11 +821,56 @@ echo "Configuring nameserver on ctlplane network"
neutron subnet-update \$(neutron subnet-list | grep -v id | grep -v \\\\-\\\\- | awk {'print \$2'}) --dns-nameserver 8.8.8.8
echo "Executing overcloud deployment, this should run for an extended period without output."
sleep 60 #wait for Hypervisor stats to check-in to nova
+# save deploy command so it can be used for debugging
+cat > deploy_command << EOF
+openstack overcloud deploy --templates $DEPLOY_OPTIONS
+EOF
openstack overcloud deploy --templates $DEPLOY_OPTIONS
EOI
}
+##Post configuration after install
+##params: none
+function configure_post_install {
+ local opnfv_attach_networks ovs_ip ip_range net_cidr tmp_ip
+ opnfv_attach_networks="admin_network public_network"
+
+ echo -e "${blue}INFO: Post Install Configuration Running...${reset}"
+
+ ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" <<EOI
+source overcloudrc
+set -o errexit
+echo "Configuring Neutron external network"
+neutron net-create external --router:external=True
+neutron subnet-create --name external-net --disable-dhcp external --gateway ${public_network_gateway} --allocation-pool start=${public_network_floating_ip_range%%,*},end=${public_network_floating_ip_range##*,} ${public_network_cidr}
+EOI
+
+ echo -e "${blue}INFO: Checking if OVS bridges have IP addresses...${reset}"
+ for network in ${opnfv_attach_networks}; do
+ ovs_ip=$(find_ip ${NET_MAP[$network]})
+ tmp_ip=''
+ if [ -n "$ovs_ip" ]; then
+ echo -e "${blue}INFO: OVS Bridge ${NET_MAP[$network]} has IP address ${ovs_ip}${reset}"
+ else
+ echo -e "${blue}INFO: OVS Bridge ${NET_MAP[$network]} missing IP, will configure${reset}"
+ # use last IP of allocation pool
+ eval "ip_range=\${${network}_usable_ip_range}"
+ ovs_ip=${ip_range##*,}
+ eval "net_cidr=\${${network}_cidr}"
+ sudo ip addr add ${ovs_ip}/${net_cidr##*/} dev ${NET_MAP[$network]}
+ tmp_ip=$(find_ip ${NET_MAP[$network]})
+ if [ -n "$tmp_ip" ]; then
+ echo -e "${blue}INFO: OVS Bridge ${NET_MAP[$network]} IP set: ${tmp_ip}${reset}"
+ continue
+ else
+ echo -e "${red}ERROR: Unable to set OVS Bridge ${NET_MAP[$network]} with IP: ${ovs_ip}${reset}"
+ return 1
+ fi
+ fi
+ done
+}
+
display_usage() {
echo -e "Usage:\n$0 [arguments] \n"
echo -e " -c|--config : Directory to configuration files. Optional. Defaults to /var/opt/opnfv/ \n"
@@ -827,7 +881,8 @@ display_usage() {
echo -e " -r|--resources : Directory to deployment resources. Optional. Defaults to /var/opt/opnfv/stack \n"
echo -e " -v|--virtual : Virtualize overcloud nodes instead of using baremetal. \n"
echo -e " --no-ha : disable High Availability deployment scheme, this assumes a single controller and single compute node \n"
- echo -e " --flat : disable Network Isolation and use a single flat network for the underlay network."
+ echo -e " --flat : disable Network Isolation and use a single flat network for the underlay network.\n"
+ echo -e " --no-post-config : disable Post Install configuration."
}
##translates the command line parameters into variables
@@ -889,6 +944,11 @@ parse_cmdline() {
echo "Underlay Network Isolation Disabled: using flat configuration"
shift 1
;;
+ --no-post-config )
+ post_config="FALSE"
+ echo "Post install configuration disabled"
+ shift 1
+ ;;
*)
display_usage
exit 1
@@ -951,6 +1011,14 @@ main() {
fi
configure_undercloud
undercloud_prep_overcloud_deploy
+ if [ "$post_config" == "TRUE" ]; then
+ if ! configure_post_install; then
+ echo -e "${red}ERROR:Post Install Configuration Failed, Exiting.${reset}"
+ exit 1
+ else
+ echo -e "${blue}INFO: Post Install Configuration Complete${reset}"
+ fi
+ fi
}
main "$@"
diff --git a/docs/installation-instructions/installation-instructions.rst b/docs/installation-instructions/installation-instructions.rst
index 8d2a3417..d166bad8 100644
--- a/docs/installation-instructions/installation-instructions.rst
+++ b/docs/installation-instructions/installation-instructions.rst
@@ -202,13 +202,14 @@ configured with an IP gateway on its admin or public interface and configured wi
working DNS server. The Jumphost should also have routable access to the lights out network.
``opnfv-deploy`` is then executed in order to deploy the Instack VM. ``opnfv-deploy`` uses
-two configuration files in order to know how to install and provision the OPNFV target system.
+three configuration files in order to know how to install and provision the OPNFV target system.
The information gathered under section `Execution Requirements (Bare Metal Only)`_ is put
-into the JSON file (``instackenv.json``) configuration file. Networking definitions gathered
-under section `Network Requirements`_ are put into the JSON file
-(``network-environment.yaml``). ``opnfv-deploy`` will boot the Instack VM and load the target
-deployment configuration into the provisioning toolchain. This includes MAC address, IPMI,
-Networking Environment and OPNFV deployment options.
+into the YAML file (``/etc/opnfv-apex/inventory.yaml``) configuration file. Deployment
+options are put into the YAML file (``/etc/opnfv-apex/deploy_settings.yaml``). Networking
+definitions gathered under section `Network Requirements`_ are put into the YAML file
+(``/etc/opnfv-apex/network_settings.yaml``). ``opnfv-deploy`` will boot the Instack VM
+and load the target deployment configuration into the provisioning toolchain. This includes
+MAC address, IPMI, Networking Environment and OPNFV deployment options.
Once configuration is loaded and Instack is configured it will then reboot the nodes via IPMI.
The nodes should already be set to PXE boot first off the admin interface. The nodes will
@@ -286,59 +287,61 @@ Creating a Node Inventory File
------------------------------
IPMI configuration information gathered in section `Execution Requirements (Bare Metal Only)`_
-needs to be added to the ``instackenv.json`` file.
+needs to be added to the ``inventory.yaml`` file.
-1. Make a copy of ``/var/opt/opnfv/instackenv.json.example`` into root's home directory: ``/root/instackenv.json``
+1. Edit ``/etc/apex-opnfv/inventory.yaml``.
-2. Edit the file in your favorite editor.
-
-3. The nodes dictionary contains a definition block for each baremetal host that will be deployed.
+2. The nodes dictionary contains a definition block for each baremetal host that will be deployed.
1 or more compute nodes and 3 controller nodes are required.
(The example file contains blocks for each of these already).
- It is optional at this point to add more compute nodes into the dictionary.
+ It is optional at this point to add more compute nodes into the node list.
-4. Edit the following values for each node:
+3. Edit the following values for each node:
- - ``pm_type``: Power Management driver to use for the node
- - ``pm_addr``: IPMI IP Address
- - ``pm_user``: IPMI username
- - ``pm_password``: IPMI password
- - ``capabilities``: Intended node role (profile:control or profile:compute)
- - ``cpu``: CPU cores available
- - ``memory``: Memory available in Mib
- - ``disk``: Disk space available in Gb
- - ``arch``: System architecture
- - ``mac``: MAC of the interface that will PXE boot from Instack
+ - ``mac_address``: MAC of the interface that will PXE boot from Instack
+ - ``ipmi_ip``: IPMI IP Address
+ - ``ipmi_user``: IPMI username
+ - ``ipmi_password``: IPMI password
+ - ``ipmi_type``: Power Management driver to use for the node
+ - ``cpus``: (Introspected*) CPU cores available
+ - ``memory``: (Introspected*) Memory available in Mib
+ - ``disk``: (Introspected*) Disk space available in Gb
+ - ``arch``: (Introspected*) System architecture
+ - ``capabilities``: (Optional**) Intended node role (profile:control or profile:compute)
-5. Save your changes.
+* Introspection looks up the overcloud node's resources and overrides these value. You can
+leave default values and Apex will get the correct values when it runs introspection on the nodes.
-Creating a Network Environment File
------------------------------------
+** If capabilities profile is not specified then Apex will select node's roles in the OPNFV cluster
+in a non-deterministic fashion.
-Network environment information gathered in section `Network Requirements`_
-needs to be added to the ``network-environment.yaml`` file.
+Creating the Settings Files
+-----------------------------------
-1. Make a copy of ``/var/opt/opnfv/network-environment.yaml`` into root's home
-directory: ``/root/network-environment.yaml``
+Edit the 2 settings files in /etc/opnfv-apex/. These files have comments to help you customize them.
-2. Edit the file in your favorite editor.
+1. deploy_settings.yaml
+ This file includes basic configuration options deployment.
-3. Update the information (TODO: More Cowbell please!)
+2. network_settings.yaml
+ This file provides Apex with the networking information that satisfies the
+ prerequisite `Network Requirements`_. These are specific to your environment.
Running ``opnfv-deploy``
------------------------
-You are now ready to deploy OPNFV!
-``opnfv-deploy`` will use the instackenv.json and network-environment.yaml to deploy OPNFV.
-The names of these files are important. ``opnfv-deploy`` will look for ``instackenv.json`` and
-``network-environment.yaml`` in the present working directory when it is run.
+You are now ready to deploy OPNFV using Apex!
+``opnfv-deploy`` will use the inventory and settings files to deploy OPNFV.
Follow the steps below to execute:
-1. execute ``sudo opnfv-deploy -i /path/to/instackenv.json -n /path/to/network-environment.yaml``
+1. Execute opnfv-deploy
+ ``sudo opnfv-deploy [ --flat | -n network_setttings.yaml ] -i instackenv.json -d deploy_settings.yaml``
+ If you need more information about the options that can be passed to opnfv-deploy use ``opnfv-deploy --help``
+ --flat will collapse all networks onto a single nic, -n network_settings.yaml allows you to customize your
+ networking topology.
-2. It will take about approximately 30 minutes to stand up instack,
- configure the deployment and execute the deployment.
+2. Wait while deployment is executed.
If something goes wrong during this part of the process,
it is most likely a problem with the setup of your network or the information in your configuration files.
You will also notice different outputs in your shell.