diff options
-rw-r--r-- | build/Makefile | 22 | ||||
-rwxr-xr-x | build/instack.sh | 28 | ||||
-rw-r--r-- | build/network-environment.yaml | 39 | ||||
-rw-r--r-- | build/nics/compute.yaml | 87 | ||||
-rw-r--r-- | build/nics/controller.yaml | 116 | ||||
-rw-r--r-- | build/opnfv-apex.spec | 57 | ||||
-rwxr-xr-x | ci/clean.sh | 6 | ||||
-rwxr-xr-x | ci/deploy.sh | 36 |
8 files changed, 332 insertions, 59 deletions
diff --git a/build/Makefile b/build/Makefile index 7aec8fb6..109521c6 100644 --- a/build/Makefile +++ b/build/Makefile @@ -101,12 +101,13 @@ rpm: pushd ../ && git archive --format=tar --prefix=opnfv-apex-$(RPMVERS)/ HEAD > build/opnfv-apex.tar tar -u --xform="s:instack.qcow2:opnfv-apex-$(RPMVERS)/build/instack.qcow2:" --file=opnfv-apex.tar instack.qcow2 tar -u --xform="s:instack.xml:opnfv-apex-$(RPMVERS)/build/instack.xml:" --file=opnfv-apex.tar instack.xml - tar -u --xform="s:baremetalbrbm_0.xml:opnfv-apex-$(RPMVERS)/build/baremetalbrbm_0.xml:" --file=opnfv-apex.tar baremetalbrbm_0.xml - tar -u --xform="s:baremetalbrbm_1.xml:opnfv-apex-$(RPMVERS)/build/baremetalbrbm_1.xml:" --file=opnfv-apex.tar baremetalbrbm_1.xml - tar -u --xform="s:baremetalbrbm_2.xml:opnfv-apex-$(RPMVERS)/build/baremetalbrbm_2.xml:" --file=opnfv-apex.tar baremetalbrbm_2.xml - tar -u --xform="s:baremetalbrbm_3.xml:opnfv-apex-$(RPMVERS)/build/baremetalbrbm_3.xml:" --file=opnfv-apex.tar baremetalbrbm_3.xml - tar -u --xform="s:baremetalbrbm_4.xml:opnfv-apex-$(RPMVERS)/build/baremetalbrbm_4.xml:" --file=opnfv-apex.tar baremetalbrbm_4.xml + tar -u --xform="s:baremetalbrbm_brbm1_0.xml:opnfv-apex-$(RPMVERS)/build/baremetalbrbm_brbm1_0.xml:" --file=opnfv-apex.tar baremetalbrbm_brbm1_0.xml + tar -u --xform="s:baremetalbrbm_brbm1_1.xml:opnfv-apex-$(RPMVERS)/build/baremetalbrbm_brbm1_1.xml:" --file=opnfv-apex.tar baremetalbrbm_brbm1_1.xml + tar -u --xform="s:baremetalbrbm_brbm1_2.xml:opnfv-apex-$(RPMVERS)/build/baremetalbrbm_brbm1_2.xml:" --file=opnfv-apex.tar baremetalbrbm_brbm1_2.xml + tar -u --xform="s:baremetalbrbm_brbm1_3.xml:opnfv-apex-$(RPMVERS)/build/baremetalbrbm_brbm1_3.xml:" --file=opnfv-apex.tar baremetalbrbm_brbm1_3.xml + tar -u --xform="s:baremetalbrbm_brbm1_4.xml:opnfv-apex-$(RPMVERS)/build/baremetalbrbm_brbm1_4.xml:" --file=opnfv-apex.tar baremetalbrbm_brbm1_4.xml tar -u --xform="s:brbm-net.xml:opnfv-apex-$(RPMVERS)/build/brbm-net.xml:" --file=opnfv-apex.tar brbm-net.xml + tar -u --xform="s:brbm1-net.xml:opnfv-apex-$(RPMVERS)/build/brbm1-net.xml:" --file=opnfv-apex.tar brbm1-net.xml tar -u --xform="s:default-pool.xml:opnfv-apex-$(RPMVERS)/build/default-pool.xml:" --file=opnfv-apex.tar default-pool.xml tar -u --xform="s:instackenv-virt.json:opnfv-apex-$(RPMVERS)/build/instackenv-virt.json:" --file=opnfv-apex.tar instackenv-virt.json tar -u --xform="s:stack/deploy-ramdisk-ironic.initramfs:opnfv-apex-$(RPMVERS)/build/stack/deploy-ramdisk-ironic.initramfs:" --file=opnfv-apex.tar stack/deploy-ramdisk-ironic.initramfs @@ -117,6 +118,7 @@ rpm: tar -u --xform="s:stack/overcloud-full.initrd:opnfv-apex-$(RPMVERS)/build/stack/overcloud-full.initrd:" --file=opnfv-apex.tar stack/overcloud-full.initrd tar -u --xform="s:stack/overcloud-full-odl.qcow2:opnfv-apex-$(RPMVERS)/build/stack/overcloud-full.qcow2:" --file=opnfv-apex.tar stack/overcloud-full-odl.qcow2 tar -u --xform="s:stack/overcloud-full.vmlinuz:opnfv-apex-$(RPMVERS)/build/stack/overcloud-full.vmlinuz:" --file=opnfv-apex.tar stack/overcloud-full.vmlinuz + tar -u --xform="s:network-environment.yaml:opnfv-apex-$(RPMVERS)/build/network-environment.yaml:" --file=opnfv-apex.tar network-environment.yaml tar -u --xform="s:opendaylight.yaml:opnfv-apex-$(RPMVERS)/build/opendaylight.yaml:" --file=opnfv-apex.tar opendaylight.yaml tar -u --xform="s:opendaylight.patch:opnfv-apex-$(RPMVERS)/build/opendaylight.patch:" --file=opnfv-apex.tar opendaylight.patch gzip -f opnfv-apex.tar @@ -131,11 +133,11 @@ instack.qcow2: .PHONY: instack-clean instack-clean: rm -f instackenv-virt.json - rm -f baremetalbrbm_0.xml - rm -f baremetalbrbm_1.xml - rm -f baremetalbrbm_2.xml - rm -f baremetalbrbm_3.xml - rm -f baremetalbrbm_4.xml + rm -f baremetalbrbm_brbm1_0.xml + rm -f baremetalbrbm_brbm1_1.xml + rm -f baremetalbrbm_brbm1_2.xml + rm -f baremetalbrbm_brbm1_3.xml + rm -f baremetalbrbm_brbm1_4.xml rm -f instack.xml rm -f instack.qcow2 diff --git a/build/instack.sh b/build/instack.sh index 1e8e8b38..b682c8d9 100755 --- a/build/instack.sh +++ b/build/instack.sh @@ -83,6 +83,9 @@ if ! rpm -q libvirt-daemon-kvm > /dev/null; then sudo yum install -y libvirt-daemon-kvm fi +# clean this up incase it's there +sudo rm -f /tmp/instack.answers + # ensure that no previous undercloud VMs are running # and rebuild the bare undercloud VMs ssh -T ${SSH_OPTIONS[@]} stack@localhost <<EOI @@ -90,10 +93,10 @@ set -e virsh destroy instack 2> /dev/null || echo -n '' virsh undefine instack --remove-all-storage 2> /dev/null || echo -n '' for i in \$(seq 0 $vm_index); do - virsh destroy baremetalbrbm_\$i 2> /dev/null || echo -n '' - virsh undefine baremetalbrbm_\$i --remove-all-storage 2> /dev/null || echo -n '' + virsh destroy baremetalbrbm_brbm1_\$i 2> /dev/null || echo -n '' + virsh undefine baremetalbrbm_brbm1_\$i --remove-all-storage 2> /dev/null || echo -n '' done -NODE_COUNT=5 NODE_CPU=2 NODE_MEM=8192 instack-virt-setup +NODE_COUNT=5 NODE_CPU=2 NODE_MEM=8192 TESTENV_ARGS="--baremetal-bridge-names 'brbm brbm1'" instack-virt-setup EOI # let dhcp happen so we can get the ip @@ -132,13 +135,13 @@ EOI # install undercloud on Undercloud VM ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" "openstack undercloud install" -# Clean cache to reduce the images size -ssh -T ${SSH_OPTIONS[@]} "root@$UNDERCLOUD" "yum clean all" - # copy instackenv file for future virt deployments if [ ! -d stack ]; then mkdir stack; fi scp ${SSH_OPTIONS[@]} stack@$UNDERCLOUD:instackenv.json stack/instackenv.json +# Clean cache to reduce the images size +ssh -T ${SSH_OPTIONS[@]} "root@$UNDERCLOUD" "yum clean all" + # make a copy of instack VM's definitions, and disk image # it must be stopped to make a copy of its disk image ssh -T ${SSH_OPTIONS[@]} stack@localhost <<EOI @@ -160,26 +163,29 @@ fi echo $'\nGenerating libvirt configuration' for i in \$(seq 0 $vm_index); do - virsh dumpxml baremetalbrbm_\$i > baremetalbrbm_\$i.xml + virsh dumpxml baremetalbrbm_brbm1_\$i | awk '/model type='\''virtio'\''/{c++;if(c==2){sub("model type='\''virtio'\''","model type='\''rtl8139'\''");c=0}}1' > baremetalbrbm_brbm1_\$i.xml done virsh dumpxml instack > instack.xml -#virsh vol-dumpxml instack.qcow2 --pool default > instack.qcow2.xml virsh net-dumpxml brbm > brbm-net.xml +virsh net-dumpxml brbm1 > brbm1-net.xml virsh pool-dumpxml default > default-pool.xml EOI # copy off the instack artifacts echo "Copying instack files to build directory" for i in $(seq 0 $vm_index); do - scp ${SSH_OPTIONS[@]} stack@localhost:baremetalbrbm_${i}.xml . + scp ${SSH_OPTIONS[@]} stack@localhost:baremetalbrbm_brbm1_${i}.xml . done scp ${SSH_OPTIONS[@]} stack@localhost:instack.xml . scp ${SSH_OPTIONS[@]} stack@localhost:brbm-net.xml . +scp ${SSH_OPTIONS[@]} stack@localhost:brbm1-net.xml . scp ${SSH_OPTIONS[@]} stack@localhost:default-pool.xml . +# copy the instack disk image for inclusion in artifacts sudo cp /var/lib/libvirt/images/instack.qcow2 ./instack.qcow2 + #sudo chown $(whoami):$(whoami) ./instack.qcow2_ #virt-sparsify --check-tmpdir=fail ./instack.qcow2_ ./instack.qcow2 #rm -f ./instack.qcow2_ @@ -227,8 +233,8 @@ set -e virsh destroy instack 2> /dev/null || echo -n '' virsh undefine instack --remove-all-storage 2> /dev/null || echo -n '' for i in \$(seq 0 $vm_index); do - virsh destroy baremetalbrbm_\$i 2> /dev/null || echo -n '' - virsh undefine baremetalbrbm_\$i --remove-all-storage 2> /dev/null || echo -n '' + virsh destroy baremetalbrbm_brbm1_\$i 2> /dev/null || echo -n '' + virsh undefine baremetalbrbm_brbm1_\$i --remove-all-storage 2> /dev/null || echo -n '' done EOI diff --git a/build/network-environment.yaml b/build/network-environment.yaml new file mode 100644 index 00000000..a3d56025 --- /dev/null +++ b/build/network-environment.yaml @@ -0,0 +1,39 @@ +# Enable the creation of Neutron networks for isolated Overcloud +# traffic and configure each role to assign ports (related +# to that role) on these networks. +resource_registry: + OS::TripleO::Network::External: /usr/share/openstack-tripleo-heat-templates/network/external.yaml + OS::TripleO::Network::InternalApi: /usr/share/openstack-tripleo-heat-templates/network/noop.yaml + OS::TripleO::Network::StorageMgmt: /usr/share/openstack-tripleo-heat-templates/network/noop.yaml + OS::TripleO::Network::Storage: /usr/share/openstack-tripleo-heat-templates/network/noop.yaml + OS::TripleO::Network::Tenant: /usr/share/openstack-tripleo-heat-templates/network/noop.yaml + + # Port assignments for the controller role + OS::TripleO::Controller::Ports::ExternalPort: /usr/share/openstack-tripleo-heat-templates/network/ports/external.yaml + OS::TripleO::Controller::Ports::InternalApiPort: /usr/share/openstack-tripleo-heat-templates/network/ports/noop.yaml + OS::TripleO::Controller::Ports::StoragePort: /usr/share/openstack-tripleo-heat-templates/network/ports/noop.yaml + OS::TripleO::Controller::Ports::StorageMgmtPort: /usr/share/openstack-tripleo-heat-templates/network/ports/noop.yaml + OS::TripleO::Controller::Ports::TenantPort: /usr/share/openstack-tripleo-heat-templates/network/ports/noop.yaml + + # Port assignments for the compute role + OS::TripleO::Compute::Ports::InternalApiPort: /usr/share/openstack-tripleo-heat-templates/network/ports/noop.yaml + OS::TripleO::Compute::Ports::StoragePort: /usr/share/openstack-tripleo-heat-templates/network/ports/noop.yaml + OS::TripleO::Compute::Ports::TenantPort: /usr/share/openstack-tripleo-heat-templates/network/ports/noop.yaml + + # Port assignments for service virtual IPs for the controller role + OS::TripleO::Controller::Ports::RedisVipPort: /usr/share/openstack-tripleo-heat-templates/network/ports/noop.yaml + + # NIC assignments + OS::TripleO::Compute::Net::SoftwareConfig: nics/compute.yaml + OS::TripleO::Controller::Net::SoftwareConfig: nics/controller.yaml + + +parameter_defaults: + NeutronExternalNetworkBridge: "''" + ControlPlaneSubnetCidr: "24" + ControlPlaneDefaultRoute: 192.0.2.1 + ExternalNetCidr: 192.168.37.0/24 + ExternalAllocationPools: [{'start': '192.168.37.50', 'end': '192.168.37.99'}] + ExternalInterfaceDefaultRoute: 192.168.37.1 + EC2MetadataIp: 192.0.2.1 + DnsServers: ["8.8.8.8","8.8.4.4"] diff --git a/build/nics/compute.yaml b/build/nics/compute.yaml new file mode 100644 index 00000000..674b250a --- /dev/null +++ b/build/nics/compute.yaml @@ -0,0 +1,87 @@ +heat_template_version: 2015-04-30 + +description: > + Software Config to drive os-net-config to configure multiple interfaces + for the compute role. + +parameters: + ControlPlaneIp: + default: '' + description: IP address/subnet on the ctlplane network + type: string + ExternalIpSubnet: + default: '' + description: IP address/subnet on the external network + type: string + InternalApiIpSubnet: + default: '' + description: IP address/subnet on the internal API network + type: string + StorageIpSubnet: + default: '' + description: IP address/subnet on the storage network + type: string + StorageMgmtIpSubnet: + default: '' + description: IP address/subnet on the storage mgmt network + type: string + TenantIpSubnet: + default: '' + description: IP address/subnet on the tenant network + type: string + InternalApiNetworkVlanID: + default: 20 + description: Vlan ID for the internal_api network traffic. + type: number + StorageNetworkVlanID: + default: 30 + description: Vlan ID for the storage network traffic. + type: number + TenantNetworkVlanID: + default: 50 + description: Vlan ID for the tenant network traffic. + type: number + ControlPlaneSubnetCidr: # Override this via parameter_defaults + default: '24' + description: The subnet CIDR of the control plane network. + type: string + ControlPlaneDefaultRoute: # Override this via parameter_defaults + description: The subnet CIDR of the control plane network. + type: string + DnsServers: # Override this via parameter_defaults + default: [] + description: A list of DNS servers (2 max for some implementations) that will be added to resolv.conf. + type: json + EC2MetadataIp: # Override this via parameter_defaults + description: The IP address of the EC2 metadata server. + type: string + +resources: + OsNetConfigImpl: + type: OS::Heat::StructuredConfig + properties: + group: os-apply-config + config: + os_net_config: + network_config: + - + type: interface + name: nic1 + use_dhcp: false + dns_servers: {get_param: DnsServers} + addresses: + - + ip_netmask: + list_join: + - '/' + - - {get_param: ControlPlaneIp} + - {get_param: ControlPlaneSubnetCidr} + routes: + - + ip_netmask: 169.254.169.254/32 + next_hop: {get_param: EC2MetadataIp} + +outputs: + OS::stack_id: + description: The OsNetConfigImpl resource. + value: {get_resource: OsNetConfigImpl} diff --git a/build/nics/controller.yaml b/build/nics/controller.yaml new file mode 100644 index 00000000..1b421ac9 --- /dev/null +++ b/build/nics/controller.yaml @@ -0,0 +1,116 @@ +heat_template_version: 2015-04-30 + +description: > + Software Config to drive os-net-config to configure multiple interfaces + for the controller role. + +parameters: + ControlPlaneIp: + default: '' + description: IP address/subnet on the ctlplane network + type: string + ExternalIpSubnet: + default: '' + description: IP address/subnet on the external network + type: string + InternalApiIpSubnet: + default: '' + description: IP address/subnet on the internal API network + type: string + StorageIpSubnet: + default: '' + description: IP address/subnet on the storage network + type: string + StorageMgmtIpSubnet: + default: '' + description: IP address/subnet on the storage mgmt network + type: string + TenantIpSubnet: + default: '' + description: IP address/subnet on the tenant network + type: string + ExternalNetworkVlanID: + default: 10 + description: Vlan ID for the external network traffic. + type: number + InternalApiNetworkVlanID: + default: 20 + description: Vlan ID for the internal_api network traffic. + type: number + StorageNetworkVlanID: + default: 30 + description: Vlan ID for the storage network traffic. + type: number + StorageMgmtNetworkVlanID: + default: 40 + description: Vlan ID for the storage mgmt network traffic. + type: number + TenantNetworkVlanID: + default: 50 + description: Vlan ID for the tenant network traffic. + type: number + ExternalInterfaceDefaultRoute: + default: '10.0.0.1' + description: default route for the external network + type: string + ControlPlaneSubnetCidr: # Override this via parameter_defaults + default: '24' + description: The subnet CIDR of the control plane network. + type: string + ControlPlaneDefaultRoute: # Override this via parameter_defaults + description: The subnet CIDR of the control plane network. + type: string + DnsServers: # Override this via parameter_defaults + default: [] + description: A list of DNS servers (2 max for some implementations) that will be added to resolv.conf. + type: json + EC2MetadataIp: # Override this via parameter_defaults + description: The IP address of the EC2 metadata server. + type: string + +resources: + OsNetConfigImpl: + type: OS::Heat::StructuredConfig + properties: + group: os-apply-config + config: + os_net_config: + network_config: + - + type: interface + name: nic1 + use_dhcp: false + dns_servers: {get_param: DnsServers} + addresses: + - + ip_netmask: + list_join: + - '/' + - - {get_param: ControlPlaneIp} + - {get_param: ControlPlaneSubnetCidr} + routes: + - + ip_netmask: 169.254.169.254/32 + next_hop: {get_param: EC2MetadataIp} + - + type: ovs_bridge + name: {get_input: bridge_name} + use_dhcp: false + addresses: + - + ip_netmask: {get_param: ExternalIpSubnet} + routes: + - + ip_netmask: 0.0.0.0/0 + next_hop: {get_param: ExternalInterfaceDefaultRoute} + members: + - + type: interface + name: nic2 + # force the MAC address of the bridge to this interface + primary: true + +outputs: + OS::stack_id: + description: The OsNetConfigImpl resource. + value: {get_resource: OsNetConfigImpl} diff --git a/build/opnfv-apex.spec b/build/opnfv-apex.spec index 9d334a14..29cede8a 100644 --- a/build/opnfv-apex.spec +++ b/build/opnfv-apex.spec @@ -1,5 +1,5 @@ Name: opnfv-apex -Version: 2.1 +Version: 2.2 Release: %{release} Summary: RDO Manager disk images for deployment @@ -25,37 +25,46 @@ to deploy an OpenStack overcloud. %install mkdir -p %{buildroot}%{_bindir}/ -cp ci/deploy.sh %{buildroot}%{_bindir}/opnfv-deploy -cp ci/clean.sh %{buildroot}%{_bindir}/opnfv-clean +install ci/deploy.sh %{buildroot}%{_bindir}/opnfv-deploy +install ci/clean.sh %{buildroot}%{_bindir}/opnfv-clean mkdir -p %{buildroot}%{_var}/opt/opnfv/stack/ +mkdir -p %{buildroot}%{_var}/opt/opnfv/nics/ -cp build/instack.qcow2 %{buildroot}%{_var}/opt/opnfv/stack/ -cp build/instack.xml %{buildroot}%{_var}/opt/opnfv/ -cp build/baremetalbrbm_*.xml %{buildroot}%{_var}/opt/opnfv/ -cp build/brbm-net.xml %{buildroot}%{_var}/opt/opnfv/ -cp build/default-pool.xml %{buildroot}%{_var}/opt/opnfv/ -cp build/opendaylight.yaml %{buildroot}%{_var}/opt/opnfv/ -cp build/opendaylight.patch %{buildroot}%{_var}/opt/opnfv/ +install build/instack.qcow2 %{buildroot}%{_var}/opt/opnfv/stack/ +install build/instack.xml %{buildroot}%{_var}/opt/opnfv/ +install build/baremetalbrbm_brbm1_*.xml %{buildroot}%{_var}/opt/opnfv/ +install build/brbm-net.xml %{buildroot}%{_var}/opt/opnfv/ +install build/brbm1-net.xml %{buildroot}%{_var}/opt/opnfv/ +install build/default-pool.xml %{buildroot}%{_var}/opt/opnfv/ +install build/network-environment.yaml %{buildroot}%{_var}/opt/opnfv/ +install build/nics/controller.yaml %{buildroot}%{_var}/opt/opnfv/nics/ +install build/nics/compute.yaml %{buildroot}%{_var}/opt/opnfv/nics/ +install build/opendaylight.yaml %{buildroot}%{_var}/opt/opnfv/ +install build/opendaylight.patch %{buildroot}%{_var}/opt/opnfv/ -cp build/instackenv-virt.json %{buildroot}%{_var}/opt/opnfv/ -cp build/stack/deploy-ramdisk-ironic.initramfs %{buildroot}%{_var}/opt/opnfv/stack/ -cp build/stack/deploy-ramdisk-ironic.kernel %{buildroot}%{_var}/opt/opnfv/stack/ -cp build/stack/ironic-python-agent.initramfs %{buildroot}%{_var}/opt/opnfv/stack/ -cp build/stack/ironic-python-agent.kernel %{buildroot}%{_var}/opt/opnfv/stack/ -cp build/stack/ironic-python-agent.vmlinuz %{buildroot}%{_var}/opt/opnfv/stack/ -cp build/stack/overcloud-full.initrd %{buildroot}%{_var}/opt/opnfv/stack/ -cp build/stack/overcloud-full.qcow2 %{buildroot}%{_var}/opt/opnfv/stack/ -cp build/stack/overcloud-full.vmlinuz %{buildroot}%{_var}/opt/opnfv/stack/ +install build/instackenv-virt.json %{buildroot}%{_var}/opt/opnfv/ +install build/stack/deploy-ramdisk-ironic.initramfs %{buildroot}%{_var}/opt/opnfv/stack/ +install build/stack/deploy-ramdisk-ironic.kernel %{buildroot}%{_var}/opt/opnfv/stack/ +install build/stack/ironic-python-agent.initramfs %{buildroot}%{_var}/opt/opnfv/stack/ +install build/stack/ironic-python-agent.kernel %{buildroot}%{_var}/opt/opnfv/stack/ +install build/stack/ironic-python-agent.vmlinuz %{buildroot}%{_var}/opt/opnfv/stack/ +install build/stack/overcloud-full.initrd %{buildroot}%{_var}/opt/opnfv/stack/ +install build/stack/overcloud-full.qcow2 %{buildroot}%{_var}/opt/opnfv/stack/ +install build/stack/overcloud-full.vmlinuz %{buildroot}%{_var}/opt/opnfv/stack/ %files %{_bindir}/opnfv-deploy %{_bindir}/opnfv-clean %{_var}/opt/opnfv/stack/instack.qcow2 %{_var}/opt/opnfv/instack.xml -%{_var}/opt/opnfv/baremetalbrbm_*.xml +%{_var}/opt/opnfv/baremetalbrbm_brbm1_*.xml %{_var}/opt/opnfv/brbm-net.xml +%{_var}/opt/opnfv/brbm1-net.xml %{_var}/opt/opnfv/default-pool.xml +%{_var}/opt/opnfv/network-environment.yaml +%{_var}/opt/opnfv/nics/controller.yaml +%{_var}/opt/opnfv/nics/compute.yaml %{_var}/opt/opnfv/opendaylight.yaml %{_var}/opt/opnfv/opendaylight.patch %{_var}/opt/opnfv/instackenv-virt.json @@ -69,9 +78,11 @@ cp build/stack/overcloud-full.vmlinuz %{buildroot}%{_var}/opt/opnfv/stack/ %{_var}/opt/opnfv/stack/overcloud-full.vmlinuz %changelog -* Wed Oct 21 2015 Dan Radez <dradez@redhatcom> - 2.1-1 +* Thu Nov 12 2015 Dan Radez <dradez@redhat.com> - 2.2-1 +- OpenDaylight and Network Isolation support +* Wed Oct 21 2015 Dan Radez <dradez@redhat.com> - 2.1-1 - Initial deployment success using RPM packaging -* Fri Sep 25 2015 Dan Radez <dradez@redhatcom> - 2.0-1 +* Fri Sep 25 2015 Dan Radez <dradez@redhat.com> - 2.0-1 - Migrated to RDO Manager -* Fri Apr 24 2015 Dan Radez <dradez@redhatcom> - 0.1-1 +* Fri Apr 24 2015 Dan Radez <dradez@redhat.com> - 0.1-1 - Initial Packaging diff --git a/ci/clean.sh b/ci/clean.sh index acae615d..265c8484 100755 --- a/ci/clean.sh +++ b/ci/clean.sh @@ -11,7 +11,7 @@ virsh undefine instack --remove-all-storage 2> /dev/null || echo -n '' rm -f /var/lib/libvirt/images/instack.qcow2 2> /dev/null for i in $(seq 0 $vm_index); do - virsh destroy baremetalbrbm_$i 2> /dev/null || echo -n '' - virsh undefine baremetalbrbm_$i --remove-all-storage 2> /dev/null || echo -n '' - rm -f /var/lib/libvirt/images/baremetalbrbm_${i}.qcow2 2> /dev/null + virsh destroy baremetalbrbm_brbm1_$i 2> /dev/null || echo -n '' + virsh undefine baremetalbrbm_brbm1_$i --remove-all-storage 2> /dev/null || echo -n '' + rm -f /var/lib/libvirt/images/baremetalbrbm_brbm1_${i}.qcow2 2> /dev/null done diff --git a/ci/deploy.sh b/ci/deploy.sh index 1a94345e..e2933115 100755 --- a/ci/deploy.sh +++ b/ci/deploy.sh @@ -62,11 +62,14 @@ function configure_deps { internet=true fi - # ensure brbm network is configured + # ensure brbm networks are configured systemctl start openvswitch ovs-vsctl list-br | grep brbm > /dev/null || ovs-vsctl add-br brbm virsh net-list --all | grep brbm > /dev/null || virsh net-create $CONFIG/brbm-net.xml virsh net-list | grep -E "brbm\s+active" > /dev/null || virsh net-start brbm + ovs-vsctl list-br | grep brbm1 > /dev/null || ovs-vsctl add-br brbm1 + virsh net-list --all | grep brbm1 > /dev/null || virsh net-create $CONFIG/brbm1-net.xml + virsh net-list | grep -E "brbm1\s+active" > /dev/null || virsh net-start brbm1 # ensure storage pool exists and is started virsh pool-list --all | grep default > /dev/null || virsh pool-create $CONFIG/default-pool.xml @@ -165,6 +168,12 @@ function setup_instack_vm { echo -e "${blue}\rInstack VM has IP $UNDERCLOUD ${reset}" ssh -T ${SSH_OPTIONS[@]} "root@$UNDERCLOUD" "if ! ip a s eth1 | grep 192.0.2.1 > /dev/null; then ip a a 192.0.2.1/24 dev eth1; fi" + + #add the instack brbm1 interface + virsh attach-interface --domain instack --type network --source brbm1 --model rtl8139 --config --live + sleep 1 + ssh -T ${SSH_OPTIONS[@]} "root@$UNDERCLOUD" "if ! ip a s eth2 | grep 192.168.37.1 > /dev/null; then ip a a 192.168.37.252/24 dev eth2; ip link set up dev eth2; fi" + # ssh key fix for stack user ssh -T ${SSH_OPTIONS[@]} "root@$UNDERCLOUD" "restorecon -r /home/stack" } @@ -173,15 +182,15 @@ function setup_instack_vm { ##params: none function setup_virtual_baremetal { for i in $(seq 0 $vm_index); do - if ! virsh list --all | grep baremetalbrbm_${i} > /dev/null; then - if [ ! -e $CONFIG/baremetalbrbm_${i}.xml ]; then - define_virtual_node baremetalbrbm_${i} + if ! virsh list --all | grep baremetalbrbm_brbm1_${i} > /dev/null; then + if [ ! -e $CONFIG/baremetalbrbm_brbm1_${i}.xml ]; then + define_virtual_node baremetalbrbm_brbm1_${i} fi - virsh define $CONFIG/baremetalbrbm_${i}.xml + virsh define $CONFIG/baremetalbrbm_brbm1_${i}.xml else echo "Found Baremetal ${i} VM, using existing VM" fi - virsh vol-list default | grep baremetalbrbm_${i} 2>&1> /dev/null || virsh vol-create-as default baremetalbrbm_${i}.qcow2 40G --format qcow2 + virsh vol-list default | grep baremetalbrbm_brbm1_${i} 2>&1> /dev/null || virsh vol-create-as default baremetalbrbm_brbm1_${i}.qcow2 40G --format qcow2 done } @@ -199,7 +208,9 @@ function copy_materials { scp ${SSH_OPTIONS[@]} $RESOURCES/overcloud-full.initrd "stack@$UNDERCLOUD": scp ${SSH_OPTIONS[@]} $RESOURCES/overcloud-full.qcow2 "stack@$UNDERCLOUD": scp ${SSH_OPTIONS[@]} $RESOURCES/overcloud-full.vmlinuz "stack@$UNDERCLOUD": + scp ${SSH_OPTIONS[@]} $CONFIG/network-environment.yaml "stack@$UNDERCLOUD": scp ${SSH_OPTIONS[@]} $CONFIG/opendaylight.yaml "stack@$UNDERCLOUD": + scp ${SSH_OPTIONS[@]} -r $CONFIG/nics/ "stack@$UNDERCLOUD": ## WORK AROUND # when OpenDaylight lands in upstream RDO manager this can be removed @@ -219,11 +230,12 @@ data = json.load(open('$CONFIG/instackenv-virt.json')) print data['nodes'][$i]['mac'][0]" old_mac=$(python -c "$pyscript") - new_mac=$(virsh dumpxml baremetalbrbm_$i | grep "mac address" | cut -d = -f2 | grep -Eo "[0-9a-f:]+") - if [ "$old_mac" != "$new_mac" ]; then - echo "${blue}Modifying MAC for node from $old_mac to ${new_mac}${reset}" - sed -i 's/'"$old_mac"'/'"$new_mac"'/' $CONFIG/instackenv-virt.json - fi + new_mac=$(virsh dumpxml baremetalbrbm_brbm1_$i | grep "mac address" | cut -d = -f2 | grep -Eo "[0-9a-f:]+") + # this doesn't work with multiple vnics on the vms + #if [ "$old_mac" != "$new_mac" ]; then + # echo "${blue}Modifying MAC for node from $old_mac to ${new_mac}${reset}" + # sed -i 's/'"$old_mac"'/'"$new_mac"'/' $CONFIG/instackenv-virt.json + #fi done # upload virt json file @@ -273,7 +285,7 @@ echo "Configuring nameserver on ctlplane network" neutron subnet-update \$(neutron subnet-list | grep -v id | grep -v \\\\-\\\\- | awk {'print \$2'}) --dns-nameserver 8.8.8.8 echo "Executing overcloud deployment, this should run for an extended period without output." sleep 60 #wait for Hypervisor stats to check-in to nova -openstack overcloud deploy --templates $DEPLOY_OPTIONS -e opendaylight.yaml +openstack overcloud deploy --templates $DEPLOY_OPTIONS -e /usr/share/openstack-tripleo-heat-templates/environments/network-isolation.yaml -e network-environment.yaml EOI } |