diff options
author | CNlukai <lukai1@huawei.com> | 2016-01-11 17:39:55 +0800 |
---|---|---|
committer | CNlukai <lukai1@huawei.com> | 2016-01-11 17:41:56 +0800 |
commit | a94e0159d98d511be4b4da9afe077e1188cc6ce6 (patch) | |
tree | 46015bb12d272deef477fb90ebbc8bef67f960f8 /framework/scripts/installer/apex | |
parent | 5d28b35794d9bdd8def6dfdf0224420daaa05f11 (diff) |
ONOSFW-157
Add script of installer integration onos
Change-Id: I0ac05b6a7ede8654af2c2573ea7ecb3e5e7a9473
Signed-off-by: CNlukai <lukai1@huawei.com>
Diffstat (limited to 'framework/scripts/installer/apex')
-rw-r--r-- | framework/scripts/installer/apex/opnfv-tripleo-heat-templates.patch | 1155 | ||||
-rw-r--r-- | framework/scripts/installer/apex/puppet-onos.rar | bin | 0 -> 99785 bytes |
2 files changed, 1155 insertions, 0 deletions
diff --git a/framework/scripts/installer/apex/opnfv-tripleo-heat-templates.patch b/framework/scripts/installer/apex/opnfv-tripleo-heat-templates.patch new file mode 100644 index 00000000..be40742e --- /dev/null +++ b/framework/scripts/installer/apex/opnfv-tripleo-heat-templates.patch @@ -0,0 +1,1155 @@ +From 63f8b6412f526ba245d86f40eb6b1ae1ee06485d Mon Sep 17 00:00:00 2001 +From: Dan Radez <dradez@redhat.com> +Date: Sun, 13 Dec 2015 21:20:40 -0500 +Subject: [PATCH] Adds OpenDaylight support + +To enable OpenDaylight on controllers use environments/opendaylight.yaml +To enable OpenDaylight on external node use +environments/opendaylight-external.yaml + +Adds onos support +--- + environments/onos.yaml | 8 + + environments/opendaylight-external.yaml | 25 ++ + environments/opendaylight.yaml | 25 ++ + overcloud-resource-registry-puppet.yaml | 3 + + overcloud-without-mergepy.yaml | 62 +++++ + puppet/all-nodes-config.yaml | 6 + + puppet/compute.yaml | 25 ++ + puppet/controller.yaml | 35 +++ + puppet/manifests/overcloud_compute.pp | 33 ++- + puppet/manifests/overcloud_controller.pp | 80 +++++- + puppet/manifests/overcloud_controller_pacemaker.pp | 299 +++++++++++++-------- + puppet/manifests/overcloud_opendaylight.pp | 26 ++ + puppet/opendaylight-puppet.yaml | 209 ++++++++++++++ + 13 files changed, 712 insertions(+), 124 deletions(-) + create mode 100644 environments/onos.yaml + create mode 100644 environments/opendaylight-external.yaml + create mode 100644 environments/opendaylight.yaml + create mode 100644 puppet/manifests/overcloud_opendaylight.pp + create mode 100644 puppet/opendaylight-puppet.yaml + +diff --git a/environments/onos.yaml b/environments/onos.yaml +new file mode 100644 +index 0000000..510aca9 +--- /dev/null ++++ b/environments/onos.yaml +@@ -0,0 +1,8 @@ ++parameters: ++ #This a bug for odl deployment. Once bug fixed OpenDaylightCount can be remove. ++ OpenDaylightCount: 0 ++ NeutronL3HA: false ++ ExtraConfig: ++ neutron_service_plugins: ['onos_router'] ++ neutron_mechanism_drivers: ['onos_ml2'] ++ neutron_tenant_network_type: vxlan +diff --git a/environments/opendaylight-external.yaml b/environments/opendaylight-external.yaml +new file mode 100644 +index 0000000..411df21 +--- /dev/null ++++ b/environments/opendaylight-external.yaml +@@ -0,0 +1,25 @@ ++# Environment file used to enable OpenDaylight ++# Currently uses overcloud image that is assumed ++# to be virt-customized with ODL RPM already on it ++ ++# These parameters customize the OpenDaylight Node ++# The user name and password are for the ODL service ++# Defaults are included here for reference ++#parameter_defaults: ++# OpenDaylightFlavor: baremetal ++# OpenDaylightHostname: opendaylight-server ++# OpenDaylightImage: overcloud-full ++# OpenDaylightUsername: admin ++# OpenDaylightPassword: admin ++ ++parameters: ++ # increase this if you need more ODL nodes ++ OpenDaylightCount: 1 ++ NeutronL3HA: false ++ ExtraConfig: ++ neutron_mechanism_drivers: ['opendaylight'] ++ neutron_tenant_network_type: vxlan ++ # Enable this if you want OpenDaylight on the contollers ++ # reduce OpenDaylightCount to 0 if you don't want any ++ # OpenDaylight only nodes ++ #opendaylight_install: true +diff --git a/environments/opendaylight.yaml b/environments/opendaylight.yaml +new file mode 100644 +index 0000000..c8abf75 +--- /dev/null ++++ b/environments/opendaylight.yaml +@@ -0,0 +1,25 @@ ++# Environment file used to enable OpenDaylight ++# Currently uses overcloud image that is assumed ++# to be virt-customized with ODL RPM already on it ++ ++# These parameters customize the OpenDaylight Node ++# The user name and password are for the ODL service ++# Defaults are included here for reference ++#parameter_defaults: ++# OpenDaylightFlavor: baremetal ++# OpenDaylightHostname: opendaylight-server ++# OpenDaylightImage: overcloud-full ++# OpenDaylightUsername: admin ++# OpenDaylightPassword: admin ++ ++parameters: ++ # increase this if you need more ODL nodes ++ # OpenDaylightCount: 1 ++ NeutronL3HA: false ++ ExtraConfig: ++ neutron_mechanism_drivers: ['opendaylight'] ++ neutron_tenant_network_type: vxlan ++ # Enable this if you want OpenDaylight on the contollers ++ # reduce OpenDaylightCount to 0 if you don't want any ++ # OpenDaylight only nodes ++ opendaylight_install: true +diff --git a/overcloud-resource-registry-puppet.yaml b/overcloud-resource-registry-puppet.yaml +index c072c29..2413450 100644 +--- a/overcloud-resource-registry-puppet.yaml ++++ b/overcloud-resource-registry-puppet.yaml +@@ -27,6 +27,9 @@ resource_registry: + # To disable, replace with firstboot/userdata_default.yaml + OS::TripleO::NodeAdminUserData: firstboot/userdata_heat_admin.yaml + ++ # This configures OpenDaylight to drive the network ++ OS::TripleO::OpenDaylightNode: puppet/opendaylight-puppet.yaml ++ + # Hooks for operator extra config + # NodeUserData == Cloud-init additional user-data, e.g cloud-config + # ControllerExtraConfigPre == Controller configuration pre service deployment +diff --git a/overcloud-without-mergepy.yaml b/overcloud-without-mergepy.yaml +index 01c0079..210ec11 100644 +--- a/overcloud-without-mergepy.yaml ++++ b/overcloud-without-mergepy.yaml +@@ -227,6 +227,23 @@ parameters: + default: false + description: Should MongoDb journaling be disabled + type: boolean ++ OpenDaylightPort: ++ default: 8081 ++ description: Set opendaylight service port ++ type: number ++ OpenDaylightInstall: ++ default: false ++ description: Whether to install OpenDaylight on the control nodes. ++ type: boolean ++ OpenDaylightUsername: ++ default: 'admin' ++ description: The username for the opendaylight server. ++ type: string ++ OpenDaylightPassword: ++ default: 'admin' ++ type: string ++ description: The password for the opendaylight server. ++ hidden: true + PublicVirtualFixedIPs: + default: [] + description: > +@@ -650,6 +667,18 @@ parameters: + structure as ExtraConfig. + type: json + ++# OpenDaylight specific parameters ++ OpenDaylightCount: ++ type: number ++ default: 0 ++ OpenDaylightImage: ++ default: overcloud-full ++ type: string ++ OpenDaylightFlavor: ++ default: baremetal ++ description: Flavor for OpenDaylight node ++ type: string ++ + # Hostname format for each role + # Note %index% is translated into the index of the node, e.g 0/1/2 etc + # and %stackname% is replaced with OS::stack_name in the template below. +@@ -674,6 +703,10 @@ parameters: + type: string + description: Format for CephStorage node hostnames + default: '%stackname%-cephstorage-%index%' ++ OpenDaylightHostnameFormat: ++ type: string ++ description: Format for OpenDaylight node hostnames ++ default: '%stackname%-opendaylight-%index%' + + # Identifiers to trigger tasks on nodes + UpdateIdentifier: +@@ -756,6 +789,27 @@ resources: + SwiftProxyVirtualIP: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, SwiftProxyNetwork]}]} + PublicVirtualIP: {get_attr: [VipMap, net_ip_map, external]} + ++ OpenDaylightNode: ++ type: OS::Heat::ResourceGroup ++ depends_on: Networks ++ properties: ++ count: {get_param: OpenDaylightCount} ++ removal_policies: {get_param: ComputeRemovalPolicies} ++ resource_def: ++ type: OS::TripleO::OpenDaylightNode ++ properties: ++ UpdateIdentifier: {get_param: UpdateIdentifier} ++ OpenDaylightFlavor: {get_param: OpenDaylightFlavor} ++ OpenDaylightImage: {get_param: OpenDaylightImage} ++ OpenDaylightPort: {get_param: OpenDaylightPort} ++ OpenDaylightUsername: {get_param: OpenDaylightUsername} ++ OpenDaylightPassword: {get_param: OpenDaylightPassword} ++ OpenDaylightHostname: ++ str_replace: ++ template: {get_param: OpenDaylightHostnameFormat} ++ params: ++ '%stackname%': {get_param: 'OS::stack_name'} ++ + Controller: + type: OS::Heat::ResourceGroup + depends_on: Networks +@@ -839,6 +893,10 @@ resources: + NovaPassword: {get_param: NovaPassword} + NtpServer: {get_param: NtpServer} + MongoDbNoJournal: {get_param: MongoDbNoJournal} ++ OpenDaylightPort: {get_param: OpenDaylightPort} ++ OpenDaylightInstall: {get_param: OpenDaylightInstall} ++ OpenDaylightUsername: {get_param: OpenDaylightUsername} ++ OpenDaylightPassword: {get_param: OpenDaylightPassword} + PcsdPassword: {get_resource: PcsdPassword} + PublicVirtualInterface: {get_param: PublicVirtualInterface} + RabbitPassword: {get_param: RabbitPassword} +@@ -933,6 +991,9 @@ resources: + NovaPublicIP: {get_attr: [VipMap, net_ip_map, external]} + NovaPassword: {get_param: NovaPassword} + NtpServer: {get_param: NtpServer} ++ OpenDaylightPort: {get_param: OpenDaylightPort} ++ OpenDaylightUsername: {get_param: OpenDaylightUsername} ++ OpenDaylightPassword: {get_param: OpenDaylightPassword} + RabbitHost: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, RabbitMqNetwork]}]} + RabbitPassword: {get_param: RabbitPassword} + RabbitUserName: {get_param: RabbitUserName} +@@ -1053,6 +1114,7 @@ resources: + compute_hosts: {get_attr: [Compute, hosts_entry]} + controller_hosts: {get_attr: [Controller, hosts_entry]} + controller_ips: {get_attr: [Controller, ip_address]} ++ opendaylight_ip: {get_attr: [OpenDaylightNode, ip_address]} + block_storage_hosts: {get_attr: [BlockStorage, hosts_entry]} + object_storage_hosts: {get_attr: [ObjectStorage, hosts_entry]} + ceph_storage_hosts: {get_attr: [CephStorage, hosts_entry]} +diff --git a/puppet/all-nodes-config.yaml b/puppet/all-nodes-config.yaml +index 2bc519b..98283c2 100644 +--- a/puppet/all-nodes-config.yaml ++++ b/puppet/all-nodes-config.yaml +@@ -8,6 +8,8 @@ parameters: + type: comma_delimited_list + controller_ips: + type: comma_delimited_list ++ opendaylight_ip: ++ type: comma_delimited_list + block_storage_hosts: + type: comma_delimited_list + object_storage_hosts: +@@ -82,6 +84,10 @@ resources: + raw_data: {get_file: hieradata/RedHat.yaml} + all_nodes: + mapped_data: ++ opendaylight_controller_ip: ++ list_join: ++ - ',' ++ - {get_param: opendaylight_ip} + controller_node_ips: + list_join: + - ',' +diff --git a/puppet/compute.yaml b/puppet/compute.yaml +index e259cff..5527669 100644 +--- a/puppet/compute.yaml ++++ b/puppet/compute.yaml +@@ -213,6 +213,23 @@ parameters: + NtpServer: + type: string + default: '' ++ OpenDaylightPort: ++ default: 8081 ++ description: Set opendaylight service port ++ type: number ++ OpenDaylightUsername: ++ default: 'admin' ++ description: The username for the opendaylight server. ++ type: string ++ OpenDaylightPassword: ++ default: 'admin' ++ type: string ++ description: The password for the opendaylight server. ++ hidden: true ++ ONOSPort: ++ default: 8181 ++ description: Set onos service port ++ type: number + RabbitHost: + type: string + default: '' # Has to be here because of the ignored empty value bug +@@ -415,6 +432,10 @@ resources: + neutron::rabbit_user: {get_input: rabbit_username} + neutron::rabbit_use_ssl: {get_input: rabbit_client_use_ssl} + neutron::rabbit_port: {get_input: rabbit_client_port} ++ opendaylight_port: {get_input: opendaylight_port} ++ opendaylight_username: {get_input: opendaylight_username} ++ opendaylight_password: {get_input: opendaylight_password} ++ onos_port: {get_input: onos_port} + neutron_flat_networks: {get_input: neutron_flat_networks} + neutron_host: {get_input: neutron_host} + neutron::agents::ml2::ovs::local_ip: {get_input: neutron_local_ip} +@@ -468,6 +489,10 @@ resources: + snmpd_readonly_user_name: {get_param: SnmpdReadonlyUserName} + snmpd_readonly_user_password: {get_param: SnmpdReadonlyUserPassword} + glance_api_servers: {get_param: [EndpointMap, GlanceInternal, uri]} ++ opendaylight_port: {get_param: OpenDaylightPort} ++ opendaylight_username: {get_param: OpenDaylightUsername} ++ opendaylight_password: {get_param: OpenDaylightPassword} ++ onos_port: {get_param: ONOSPort} + neutron_flat_networks: {get_param: NeutronFlatNetworks} + neutron_host: {get_param: NeutronHost} + neutron_local_ip: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, NeutronTenantNetwork]}]} +diff --git a/puppet/controller.yaml b/puppet/controller.yaml +index fdc1821..865a838 100644 +--- a/puppet/controller.yaml ++++ b/puppet/controller.yaml +@@ -443,6 +443,27 @@ parameters: + NtpServer: + type: string + default: '' ++ OpenDaylightPort: ++ default: 8081 ++ description: Set opendaylight service port ++ type: number ++ OpenDaylightInstall: ++ default: false ++ description: Whether to install OpenDaylight on the control nodes. ++ type: boolean ++ OpenDaylightUsername: ++ default: 'admin' ++ description: The username for the opendaylight server. ++ type: string ++ OpenDaylightPassword: ++ default: 'admin' ++ type: string ++ description: The password for the opendaylight server. ++ hidden: true ++ ONOSPort: ++ default: 8181 ++ description: Set onos service port ++ type: number + PcsdPassword: + type: string + description: The password for the 'pcsd' user. +@@ -819,6 +840,11 @@ resources: + template: tripleo-CLUSTER + params: + CLUSTER: {get_param: MysqlClusterUniquePart} ++ opendaylight_port: {get_param: OpenDaylightPort} ++ opendaylight_install: {get_param: OpenDaylightInstall} ++ opendaylight_username: {get_param: OpenDaylightUsername} ++ opendaylight_password: {get_param: OpenDaylightPassword} ++ onos_port: {get_param: ONOSPort} + neutron_flat_networks: {get_param: NeutronFlatNetworks} + neutron_metadata_proxy_shared_secret: {get_param: NeutronMetadataProxySharedSecret} + neutron_agent_mode: {get_param: NeutronAgentMode} +@@ -1151,6 +1177,15 @@ resources: + mysql_bind_host: {get_input: mysql_network} + mysql_virtual_ip: {get_input: mysql_virtual_ip} + ++ # OpenDaylight ++ opendaylight_port: {get_input: opendaylight_port} ++ opendaylight_install: {get_input: opendaylight_install} ++ opendaylight_username: {get_input: opendaylight_username} ++ opendaylight_password: {get_input: opendaylight_password} ++ ++ # ONOS ++ onos_port: {get_input: onos_port} ++ + # Neutron + neutron::bind_host: {get_input: neutron_api_network} + neutron::rabbit_password: {get_input: rabbit_password} +diff --git a/puppet/manifests/overcloud_compute.pp b/puppet/manifests/overcloud_compute.pp +index cd41cc7..b8336ee 100644 +--- a/puppet/manifests/overcloud_compute.pp ++++ b/puppet/manifests/overcloud_compute.pp +@@ -75,9 +75,36 @@ class { '::neutron::plugins::ml2': + tenant_network_types => [hiera('neutron_tenant_network_type')], + } + +-class { '::neutron::agents::ml2::ovs': +- bridge_mappings => split(hiera('neutron_bridge_mappings'), ','), +- tunnel_types => split(hiera('neutron_tunnel_types'), ','), ++if 'opendaylight' in hiera('neutron_mechanism_drivers') { ++ ++ if str2bool(hiera('opendaylight_install', 'false')) { ++ $controller_ips = split(hiera('controller_node_ips'), ',') ++ $opendaylight_controller_ip = $controller_ips[0] ++ } else { ++ $opendaylight_controller_ip = hiera('opendaylight_controller_ip') ++ } ++ ++ if str2bool(hiera('opendaylight_install', 'false')) { ++ class { 'neutron::plugins::ovs::opendaylight': ++ odl_controller_ip => $opendaylight_controller_ip, ++ tunnel_ip => hiera('neutron::agents::ml2::ovs::local_ip'), ++ odl_port => hiera('opendaylight_port'), ++ odl_username => hiera('opendaylight_username'), ++ odl_password => hiera('opendaylight_password'), ++ } ++ } ++ ++} elsif 'onos_ml2' in hiera('neutron_mechanism_drivers') { ++ $controller_ips = split(hiera('controller_node_ips'), ',') ++ class {'onos::ovs_computer': ++ manager_ip => $controller_ips[0] ++ } ++ ++} else { ++ class { 'neutron::agents::ml2::ovs': ++ bridge_mappings => split(hiera('neutron_bridge_mappings'), ','), ++ tunnel_types => split(hiera('neutron_tunnel_types'), ','), ++ } + } + + if 'cisco_n1kv' in hiera('neutron_mechanism_drivers') { +diff --git a/puppet/manifests/overcloud_controller.pp b/puppet/manifests/overcloud_controller.pp +index 1b0429b..d3f3d2d 100644 +--- a/puppet/manifests/overcloud_controller.pp ++++ b/puppet/manifests/overcloud_controller.pp +@@ -30,6 +30,20 @@ if hiera('step') >= 1 { + + if hiera('step') >= 2 { + ++ if str2bool(hiera('opendaylight_install', 'false')) { ++ class {"opendaylight": ++ extra_features => ['odl-ovsdb-openstack'], ++ odl_rest_port => hiera('opendaylight_port'), ++ } ++ } ++ ++ if 'onos_ml2' in hiera('neutron_mechanism_drivers') { ++ # install onos and config ovs ++ class {"onos": ++ controllers_ip => $controller_node_ips ++ } ++ } ++ + if count(hiera('ntp::servers')) > 0 { + include ::ntp + } +@@ -223,9 +237,7 @@ + include ::nova::scheduler + include ::nova::scheduler::filter + +- include ::neutron + include ::neutron::server +- include ::neutron::agents::l3 + include ::neutron::agents::dhcp + include ::neutron::agents::metadata + +@@ -238,15 +250,71 @@ if hiera('step') >= 3 { + require => Package['neutron'], + } + ++ if 'onos_ml2' in hiera('neutron_mechanism_drivers') { ++ # config neutron service_plugins to onos driver ++ class { '::neutron': ++ service_plugins => [hiera('neutron_service_plugins')] ++ } ++ } else { ++ include ::neutron ++ include ::neutron::agents::l3 ++ } ++ + class { '::neutron::plugins::ml2': + flat_networks => split(hiera('neutron_flat_networks'), ','), + tenant_network_types => [hiera('neutron_tenant_network_type')], + mechanism_drivers => [hiera('neutron_mechanism_drivers')], + } +- class { '::neutron::agents::ml2::ovs': +- bridge_mappings => split(hiera('neutron_bridge_mappings'), ','), +- tunnel_types => split(hiera('neutron_tunnel_types'), ','), ++ ++ if 'opendaylight' in hiera('neutron_mechanism_drivers') { ++ ++ if str2bool(hiera('opendaylight_install', 'false')) { ++ $controller_ips = split(hiera('controller_node_ips'), ',') ++ $opendaylight_controller_ip = $controller_ips[0] ++ } else { ++ $opendaylight_controller_ip = hiera('opendaylight_controller_ip') ++ } ++ ++ class { 'neutron::plugins::ml2::opendaylight': ++ odl_controller_ip => $opendaylight_controller_ip, ++ odl_username => hiera('opendaylight_username'), ++ odl_password => hiera('opendaylight_password'), ++ odl_port => hiera('opendaylight_port'), ++ } ++ ++ if str2bool(hiera('opendaylight_install', 'false')) { ++ class { 'neutron::plugins::ovs::opendaylight': ++ odl_controller_ip => $opendaylight_controller_ip, ++ tunnel_ip => hiera('neutron::agents::ml2::ovs::local_ip'), ++ odl_port => hiera('opendaylight_port'), ++ odl_username => hiera('opendaylight_username'), ++ odl_password => hiera('opendaylight_password'), ++ } ++ } ++ Service['neutron-server'] -> Service['neutron-l3'] ++ ++ } elsif 'onos_ml2' in hiera('neutron_mechanism_drivers') { ++ #config ml2_conf.ini with onos url address ++ $onos_port = hiera('onos_port') ++ $private_ip = hiera('neutron::agents::ml2::ovs::local_ip') ++ ++ neutron_plugin_ml2 { ++ 'onos/username': value => 'admin'; ++ 'onos/password': value => 'admin'; ++ 'onos/url_path': value => "http://${controller_node_ips[0]}:${onos_port}/onos/vtn"; ++ } ++ ++ } else { ++ ++ class { 'neutron::agents::ml2::ovs': ++ bridge_mappings => split(hiera('neutron_bridge_mappings'), ','), ++ tunnel_types => split(hiera('neutron_tunnel_types'), ','), ++ } ++ ++ Service['neutron-server'] -> Service['neutron-ovs-agent-service'] ++ Service['neutron-server'] -> Service['neutron-l3'] + } ++ + if 'cisco_n1kv' in hiera('neutron_mechanism_drivers') { + include ::neutron::plugins::ml2::cisco::nexus1000v + +@@ -281,8 +349,6 @@ if hiera('step') >= 3 { + } + + Service['neutron-server'] -> Service['neutron-dhcp-service'] +- Service['neutron-server'] -> Service['neutron-l3'] +- Service['neutron-server'] -> Service['neutron-ovs-agent-service'] + Service['neutron-server'] -> Service['neutron-metadata'] + + include ::cinder +diff --git a/puppet/manifests/overcloud_controller_pacemaker.pp b/puppet/manifests/overcloud_controller_pacemaker.pp +index 863cc5f..5b1c37a 100644 +--- a/puppet/manifests/overcloud_controller_pacemaker.pp ++++ b/puppet/manifests/overcloud_controller_pacemaker.pp +@@ -380,6 +380,20 @@ if hiera('step') >= 2 { + + } + ++ if str2bool(hiera('opendaylight_install', 'false')) { ++ class {"opendaylight": ++ extra_features => ['odl-ovsdb-openstack'], ++ odl_rest_port => hiera('opendaylight_port'), ++ } ++ } ++ ++ if 'onos_ml2' in hiera('neutron_mechanism_drivers') { ++ # install onos and config ovs ++ class {"onos": ++ controllers_ip => $controller_node_ips ++ } ++ } ++ + exec { 'galera-ready' : + command => '/usr/bin/clustercheck >/dev/null', + timeout => 30, +@@ -584,7 +598,14 @@ if hiera('step') >= 3 { + include ::nova::network::neutron + + # Neutron class definitions +- include ::neutron ++ if 'onos_ml2' in hiera('neutron_mechanism_drivers') { ++ # config neutron service_plugins to onos driver ++ class { '::neutron': ++ service_plugins => [hiera('neutron_service_plugins')] ++ } ++ } else { ++ include ::neutron ++ } + class { '::neutron::server' : + sync_db => $sync_db, + manage_service => false, +@@ -595,10 +616,6 @@ if hiera('step') >= 3 { + manage_service => false, + enabled => false, + } +- class { '::neutron::agents::l3' : +- manage_service => false, +- enabled => false, +- } + class { '::neutron::agents::metadata': + manage_service => false, + enabled => false, +@@ -610,18 +627,66 @@ if hiera('step') >= 3 { + notify => Service['neutron-dhcp-service'], + require => Package['neutron'], + } ++ + class { '::neutron::plugins::ml2': + flat_networks => split(hiera('neutron_flat_networks'), ','), + tenant_network_types => [hiera('neutron_tenant_network_type')], + mechanism_drivers => [hiera('neutron_mechanism_drivers')], + } +- class { '::neutron::agents::ml2::ovs': +- manage_service => false, +- enabled => false, +- bridge_mappings => split(hiera('neutron_bridge_mappings'), ','), +- tunnel_types => split(hiera('neutron_tunnel_types'), ','), +- } ++ if 'opendaylight' in hiera('neutron_mechanism_drivers') { ++ if str2bool(hiera('opendaylight_install', 'false')) { ++ $controller_ips = split(hiera('controller_node_ips'), ',') ++ $opendaylight_controller_ip = $controller_ips[0] ++ } else { ++ $opendaylight_controller_ip = hiera('opendaylight_controller_ip') ++ } ++ ++ $opendaylight_port = hiera('opendaylight_port') ++ $private_ip = hiera('neutron::agents::ml2::ovs::local_ip') ++ ++ class { 'neutron::plugins::ml2::opendaylight': ++ odl_controller_ip => $opendaylight_controller_ip, ++ odl_username => hiera('opendaylight_username'), ++ odl_password => hiera('opendaylight_password'), ++ odl_port => hiera('opendaylight_port'), ++ } ++ ++ if str2bool(hiera('opendaylight_install', 'false')) { ++ class { 'neutron::plugins::ovs::opendaylight': ++ odl_controller_ip => $opendaylight_controller_ip, ++ tunnel_ip => hiera('neutron::agents::ml2::ovs::local_ip'), ++ odl_port => hiera('opendaylight_port'), ++ odl_username => hiera('opendaylight_username'), ++ odl_password => hiera('opendaylight_password'), ++ } ++ } ++ class { '::neutron::agents::l3' : ++ manage_service => false, ++ enabled => false, ++ } ++ } elsif 'onos_ml2' in hiera('neutron_mechanism_drivers') { ++ #config ml2_conf.ini with onos url address ++ $onos_port = hiera('onos_port') ++ $private_ip = hiera('neutron::agents::ml2::ovs::local_ip') ++ ++ neutron_plugin_ml2 { ++ 'onos/username': value => 'admin'; ++ 'onos/password': value => 'admin'; ++ 'onos/url_path': value => "http://${controller_node_ips[0]}:${onos_port}/onos/vtn"; ++ } + ++ } else { ++ class { '::neutron::agents::l3' : ++ manage_service => false, ++ enabled => false, ++ } ++ class { 'neutron::agents::ml2::ovs': ++ manage_service => false, ++ enabled => false, ++ bridge_mappings => split(hiera('neutron_bridge_mappings'), ','), ++ tunnel_types => split(hiera('neutron_tunnel_types'), ','), ++ } ++ } + if 'cisco_ucsm' in hiera('neutron_mechanism_drivers') { + include ::neutron::plugins::ml2::cisco::ucsm + } +@@ -646,8 +711,10 @@ if hiera('step') >= 3 { + if hiera('neutron_enable_bigswitch_ml2', false) { + include ::neutron::plugins::ml2::bigswitch::restproxy + } +- neutron_l3_agent_config { +- 'DEFAULT/ovs_use_veth': value => hiera('neutron_ovs_use_veth', false); ++ if !('onos_ml2' in hiera('neutron_mechanism_drivers')) { ++ neutron_l3_agent_config { ++ 'DEFAULT/ovs_use_veth': value => hiera('neutron_ovs_use_veth', false); ++ } + } + neutron_dhcp_agent_config { + 'DEFAULT/ovs_use_veth': value => hiera('neutron_ovs_use_veth', false); +@@ -1073,62 +1140,21 @@ if hiera('step') >= 4 { + require => Pacemaker::Resource::Service[$::keystone::params::service_name] + } + } +- pacemaker::resource::service { $::neutron::params::l3_agent_service: +- clone_params => 'interleave=true', ++ if !('onos_ml2' in hiera('neutron_mechanism_drivers')) { ++ pacemaker::resource::service { $::neutron::params::l3_agent_service: ++ clone_params => 'interleave=true', ++ } + } + pacemaker::resource::service { $::neutron::params::dhcp_agent_service: + clone_params => 'interleave=true', + } +- pacemaker::resource::service { $::neutron::params::ovs_agent_service: +- clone_params => 'interleave=true', +- } + pacemaker::resource::service { $::neutron::params::metadata_agent_service: + clone_params => 'interleave=true', + } +- pacemaker::resource::ocf { $::neutron::params::ovs_cleanup_service: +- ocf_agent_name => 'neutron:OVSCleanup', +- clone_params => 'interleave=true', +- } + pacemaker::resource::ocf { 'neutron-netns-cleanup': + ocf_agent_name => 'neutron:NetnsCleanup', + clone_params => 'interleave=true', + } +- +- # neutron - one chain ovs-cleanup-->netns-cleanup-->ovs-agent +- pacemaker::constraint::base { 'neutron-ovs-cleanup-to-netns-cleanup-constraint': +- constraint_type => 'order', +- first_resource => "${::neutron::params::ovs_cleanup_service}-clone", +- second_resource => 'neutron-netns-cleanup-clone', +- first_action => 'start', +- second_action => 'start', +- require => [Pacemaker::Resource::Ocf[$::neutron::params::ovs_cleanup_service], +- Pacemaker::Resource::Ocf['neutron-netns-cleanup']], +- } +- pacemaker::constraint::colocation { 'neutron-ovs-cleanup-to-netns-cleanup-colocation': +- source => 'neutron-netns-cleanup-clone', +- target => "${::neutron::params::ovs_cleanup_service}-clone", +- score => 'INFINITY', +- require => [Pacemaker::Resource::Ocf[$::neutron::params::ovs_cleanup_service], +- Pacemaker::Resource::Ocf['neutron-netns-cleanup']], +- } +- pacemaker::constraint::base { 'neutron-netns-cleanup-to-openvswitch-agent-constraint': +- constraint_type => 'order', +- first_resource => 'neutron-netns-cleanup-clone', +- second_resource => "${::neutron::params::ovs_agent_service}-clone", +- first_action => 'start', +- second_action => 'start', +- require => [Pacemaker::Resource::Ocf['neutron-netns-cleanup'], +- Pacemaker::Resource::Service[$::neutron::params::ovs_agent_service]], +- } +- pacemaker::constraint::colocation { 'neutron-netns-cleanup-to-openvswitch-agent-colocation': +- source => "${::neutron::params::ovs_agent_service}-clone", +- target => 'neutron-netns-cleanup-clone', +- score => 'INFINITY', +- require => [Pacemaker::Resource::Ocf['neutron-netns-cleanup'], +- Pacemaker::Resource::Service[$::neutron::params::ovs_agent_service]], +- } +- +- #another chain keystone-->neutron-server-->ovs-agent-->dhcp-->l3 + pacemaker::constraint::base { 'keystone-to-neutron-server-constraint': + constraint_type => 'order', + first_resource => "${::keystone::params::service_name}-clone", +@@ -1138,65 +1164,110 @@ if hiera('step') >= 4 { + require => [Pacemaker::Resource::Service[$::keystone::params::service_name], + Pacemaker::Resource::Service[$::neutron::params::server_service]], + } +- pacemaker::constraint::base { 'neutron-server-to-openvswitch-agent-constraint': +- constraint_type => 'order', +- first_resource => "${::neutron::params::server_service}-clone", +- second_resource => "${::neutron::params::ovs_agent_service}-clone", +- first_action => 'start', +- second_action => 'start', +- require => [Pacemaker::Resource::Service[$::neutron::params::server_service], +- Pacemaker::Resource::Service[$::neutron::params::ovs_agent_service]], +- } +- pacemaker::constraint::base { 'neutron-openvswitch-agent-to-dhcp-agent-constraint': +- constraint_type => 'order', +- first_resource => "${::neutron::params::ovs_agent_service}-clone", +- second_resource => "${::neutron::params::dhcp_agent_service}-clone", +- first_action => 'start', +- second_action => 'start', +- require => [Pacemaker::Resource::Service[$::neutron::params::ovs_agent_service], +- Pacemaker::Resource::Service[$::neutron::params::dhcp_agent_service]], ++ if 'openvswitch' in hiera('neutron_mechanism_drivers') { ++ pacemaker::resource::service { $::neutron::params::ovs_agent_service: ++ clone_params => "interleave=true", ++ } ++ pacemaker::resource::ocf { $::neutron::params::ovs_cleanup_service: ++ ocf_agent_name => "neutron:OVSCleanup", ++ clone_params => "interleave=true", ++ } ++ # neutron - one chain ovs-cleanup-->netns-cleanup-->ovs-agent ++ pacemaker::constraint::base { 'neutron-ovs-cleanup-to-netns-cleanup-constraint': ++ constraint_type => "order", ++ first_resource => "${::neutron::params::ovs_cleanup_service}-clone", ++ second_resource => "neutron-netns-cleanup-clone", ++ first_action => "start", ++ second_action => "start", ++ require => [Pacemaker::Resource::Ocf["${::neutron::params::ovs_cleanup_service}"], ++ Pacemaker::Resource::Ocf['neutron-netns-cleanup']], ++ } ++ pacemaker::constraint::colocation { 'neutron-ovs-cleanup-to-netns-cleanup-colocation': ++ source => "neutron-netns-cleanup-clone", ++ target => "${::neutron::params::ovs_cleanup_service}-clone", ++ score => "INFINITY", ++ require => [Pacemaker::Resource::Ocf["${::neutron::params::ovs_cleanup_service}"], ++ Pacemaker::Resource::Ocf['neutron-netns-cleanup']], ++ } ++ pacemaker::constraint::base { 'neutron-netns-cleanup-to-openvswitch-agent-constraint': ++ constraint_type => "order", ++ first_resource => "neutron-netns-cleanup-clone", ++ second_resource => "${::neutron::params::ovs_agent_service}-clone", ++ first_action => "start", ++ second_action => "start", ++ require => [Pacemaker::Resource::Ocf["neutron-netns-cleanup"], ++ Pacemaker::Resource::Service["${::neutron::params::ovs_agent_service}"]], ++ } ++ pacemaker::constraint::colocation { 'neutron-netns-cleanup-to-openvswitch-agent-colocation': ++ source => "${::neutron::params::ovs_agent_service}-clone", ++ target => "neutron-netns-cleanup-clone", ++ score => "INFINITY", ++ require => [Pacemaker::Resource::Ocf["neutron-netns-cleanup"], ++ Pacemaker::Resource::Service["${::neutron::params::ovs_agent_service}"]], ++ } + ++ #another chain keystone-->neutron-server-->ovs-agent-->dhcp-->l3 ++ pacemaker::constraint::base { 'neutron-server-to-openvswitch-agent-constraint': ++ constraint_type => "order", ++ first_resource => "${::neutron::params::server_service}-clone", ++ second_resource => "${::neutron::params::ovs_agent_service}-clone", ++ first_action => "start", ++ second_action => "start", ++ require => [Pacemaker::Resource::Service[$::neutron::params::server_service], ++ Pacemaker::Resource::Service[$::neutron::params::ovs_agent_service]], ++ } ++ pacemaker::constraint::base { 'neutron-openvswitch-agent-to-dhcp-agent-constraint': ++ constraint_type => "order", ++ first_resource => "${::neutron::params::ovs_agent_service}-clone", ++ second_resource => "${::neutron::params::dhcp_agent_service}-clone", ++ first_action => "start", ++ second_action => "start", ++ require => [Pacemaker::Resource::Service["${::neutron::params::ovs_agent_service}"], ++ Pacemaker::Resource::Service["${::neutron::params::dhcp_agent_service}"]], ++ ++ } ++ pacemaker::constraint::colocation { 'neutron-openvswitch-agent-to-dhcp-agent-colocation': ++ source => "${::neutron::params::dhcp_agent_service}-clone", ++ target => "${::neutron::params::ovs_agent_service}-clone", ++ score => "INFINITY", ++ require => [Pacemaker::Resource::Service["${::neutron::params::ovs_agent_service}"], ++ Pacemaker::Resource::Service["${::neutron::params::dhcp_agent_service}"]], ++ } + } +- pacemaker::constraint::colocation { 'neutron-openvswitch-agent-to-dhcp-agent-colocation': +- source => "${::neutron::params::dhcp_agent_service}-clone", +- target => "${::neutron::params::ovs_agent_service}-clone", +- score => 'INFINITY', +- require => [Pacemaker::Resource::Service[$::neutron::params::ovs_agent_service], +- Pacemaker::Resource::Service[$::neutron::params::dhcp_agent_service]], +- } +- pacemaker::constraint::base { 'neutron-dhcp-agent-to-l3-agent-constraint': +- constraint_type => 'order', +- first_resource => "${::neutron::params::dhcp_agent_service}-clone", +- second_resource => "${::neutron::params::l3_agent_service}-clone", +- first_action => 'start', +- second_action => 'start', +- require => [Pacemaker::Resource::Service[$::neutron::params::dhcp_agent_service], +- Pacemaker::Resource::Service[$::neutron::params::l3_agent_service]], +- } +- pacemaker::constraint::colocation { 'neutron-dhcp-agent-to-l3-agent-colocation': +- source => "${::neutron::params::l3_agent_service}-clone", +- target => "${::neutron::params::dhcp_agent_service}-clone", +- score => 'INFINITY', +- require => [Pacemaker::Resource::Service[$::neutron::params::dhcp_agent_service], +- Pacemaker::Resource::Service[$::neutron::params::l3_agent_service]], +- } +- pacemaker::constraint::base { 'neutron-l3-agent-to-metadata-agent-constraint': +- constraint_type => 'order', +- first_resource => "${::neutron::params::l3_agent_service}-clone", +- second_resource => "${::neutron::params::metadata_agent_service}-clone", +- first_action => 'start', +- second_action => 'start', +- require => [Pacemaker::Resource::Service[$::neutron::params::l3_agent_service], +- Pacemaker::Resource::Service[$::neutron::params::metadata_agent_service]], +- } +- pacemaker::constraint::colocation { 'neutron-l3-agent-to-metadata-agent-colocation': +- source => "${::neutron::params::metadata_agent_service}-clone", +- target => "${::neutron::params::l3_agent_service}-clone", +- score => 'INFINITY', +- require => [Pacemaker::Resource::Service[$::neutron::params::l3_agent_service], +- Pacemaker::Resource::Service[$::neutron::params::metadata_agent_service]], ++ if !('onos_ml2' in hiera('neutron_mechanism_drivers')) { ++ pacemaker::constraint::base { 'neutron-dhcp-agent-to-l3-agent-constraint': ++ constraint_type => 'order', ++ first_resource => "${::neutron::params::dhcp_agent_service}-clone", ++ second_resource => "${::neutron::params::l3_agent_service}-clone", ++ first_action => 'start', ++ second_action => 'start', ++ require => [Pacemaker::Resource::Service[$::neutron::params::dhcp_agent_service], ++ Pacemaker::Resource::Service[$::neutron::params::l3_agent_service]], ++ } ++ pacemaker::constraint::colocation { 'neutron-dhcp-agent-to-l3-agent-colocation': ++ source => "${::neutron::params::l3_agent_service}-clone", ++ target => "${::neutron::params::dhcp_agent_service}-clone", ++ score => 'INFINITY', ++ require => [Pacemaker::Resource::Service[$::neutron::params::dhcp_agent_service], ++ Pacemaker::Resource::Service[$::neutron::params::l3_agent_service]], ++ } ++ pacemaker::constraint::base { 'neutron-l3-agent-to-metadata-agent-constraint': ++ constraint_type => 'order', ++ first_resource => "${::neutron::params::l3_agent_service}-clone", ++ second_resource => "${::neutron::params::metadata_agent_service}-clone", ++ first_action => 'start', ++ second_action => 'start', ++ require => [Pacemaker::Resource::Service[$::neutron::params::l3_agent_service], ++ Pacemaker::Resource::Service[$::neutron::params::metadata_agent_service]], ++ } ++ pacemaker::constraint::colocation { 'neutron-l3-agent-to-metadata-agent-colocation': ++ source => "${::neutron::params::metadata_agent_service}-clone", ++ target => "${::neutron::params::l3_agent_service}-clone", ++ score => 'INFINITY', ++ require => [Pacemaker::Resource::Service[$::neutron::params::l3_agent_service], ++ Pacemaker::Resource::Service[$::neutron::params::metadata_agent_service]], ++ } + } +- + # Nova + pacemaker::resource::service { $::nova::params::api_service_name : + clone_params => 'interleave=true', +diff --git a/puppet/manifests/overcloud_opendaylight.pp b/puppet/manifests/overcloud_opendaylight.pp +new file mode 100644 +index 0000000..aea6568 +--- /dev/null ++++ b/puppet/manifests/overcloud_opendaylight.pp +@@ -0,0 +1,26 @@ ++# Copyright 2015 Red Hat, Inc. ++# All Rights Reserved. ++# ++# Licensed under the Apache License, Version 2.0 (the "License"); you may ++# not use this file except in compliance with the License. You may obtain ++# a copy of the License at ++# ++# http://www.apache.org/licenses/LICENSE-2.0 ++# ++# Unless required by applicable law or agreed to in writing, software ++# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT ++# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the ++# License for the specific language governing permissions and limitations ++# under the License. ++ ++include ::tripleo::packages ++ ++if count(hiera('ntp::servers')) > 0 { ++ include ::ntp ++} ++ ++class {"opendaylight": ++ extra_features => ['odl-ovsdb-openstack'], ++ odl_rest_port => hiera('opendaylight_port'), ++} ++ +diff --git a/puppet/opendaylight-puppet.yaml b/puppet/opendaylight-puppet.yaml +new file mode 100644 +index 0000000..70f2543 +--- /dev/null ++++ b/puppet/opendaylight-puppet.yaml +@@ -0,0 +1,209 @@ ++heat_template_version: 2015-04-30 ++ ++description: > ++ OpenDaylight node configured by Puppet. ++ ++parameters: ++ OpenDaylightFlavor: ++ default: baremetal ++ description: The flavor to use for the OpenDaylight node ++ type: string ++ OpenDaylightImage: ++ default: overcloud-full ++ description: The image to use for the OpenDaylight node ++ type: string ++ OpenDaylightHostname: ++ default: opendaylight-server ++ description: The hostname to use for the OpenDaylight node ++ type: string ++ OpenDaylightUsername: ++ default: admin ++ description: The admin user for the OpenDaylight node ++ type: string ++ OpenDaylightPassword: ++ default: '' ++ description: The admin password for the OpenDaylight node ++ type: string ++ hidden: true ++ OpenDaylightPort: ++ default: 8081 ++ description: Set OpenDaylight service port ++ type: number ++ KeyName: ++ description: The keypair to use for SSH access to the node (via heat-admin user) ++ type: string ++ default: default ++ constraints: ++ - custom_constraint: nova.keypair ++ ImageUpdatePolicy: ++ default: 'REBUILD_PRESERVE_EPHEMERAL' ++ description: What policy to use when reconstructing instances. REBUILD for rebuilds, REBUILD_PRESERVE_EPHEMERAL to preserve /mnt. ++ type: string ++ UpdateIdentifier: ++ default: '' ++ type: string ++ description: > ++ Setting to a previously unused value during stack-update will trigger ++ package update on all nodes ++ NtpServer: ++ type: string ++ default: '' ++ PublicInterface: ++ default: nic1 ++ description: What interface to bridge onto br-ex for network nodes. ++ type: string ++ ++resources: ++ OpenDaylightNode: ++ type: OS::Nova::Server ++ properties: ++ image: {get_param: OpenDaylightImage} ++ image_update_policy: {get_param: ImageUpdatePolicy} ++ flavor: {get_param: OpenDaylightFlavor} ++ key_name: {get_param: KeyName} ++ networks: ++ - network: ctlplane ++ user_data_format: SOFTWARE_CONFIG ++ user_data: {get_resource: NodeUserData} ++ name: {get_param: OpenDaylightHostname} ++ ++ NodeUserData: ++ type: OS::TripleO::NodeUserData ++ ++ ExternalPort: ++ type: OS::TripleO::Controller::Ports::ExternalPort ++ properties: ++ ControlPlaneIP: {get_attr: [OpenDaylightNode, networks, ctlplane, 0]} ++ ++ InternalApiPort: ++ type: OS::TripleO::Controller::Ports::InternalApiPort ++ properties: ++ ControlPlaneIP: {get_attr: [OpenDaylightNode, networks, ctlplane, 0]} ++ ++ NetIpMap: ++ type: OS::TripleO::Network::Ports::NetIpMap ++ properties: ++ ControlPlaneIp: {get_attr: [OpenDaylightNode, networks, ctlplane, 0]} ++ ExternalIp: {get_attr: [ExternalPort, ip_address]} ++ InternalApiIp: {get_attr: [InternalApiPort, ip_address]} ++ ++ NetIpSubnetMap: ++ type: OS::TripleO::Network::Ports::NetIpSubnetMap ++ properties: ++ ControlPlaneIp: {get_attr: [OpenDaylightNode, networks, ctlplane, 0]} ++ ExternalIpSubnet: {get_attr: [ExternalPort, ip_subnet]} ++ InternalApiIpSubnet: {get_attr: [InternalApiPort, ip_subnet]} ++ ++ NetworkConfig: ++ type: OS::TripleO::Controller::Net::SoftwareConfig ++ properties: ++ ControlPlaneIp: {get_attr: [OpenDaylightNode, networks, ctlplane, 0]} ++ ExternalIpSubnet: {get_attr: [ExternalPort, ip_subnet]} ++ InternalApiIpSubnet: {get_attr: [InternalApiPort, ip_subnet]} ++ ++ NetworkDeployment: ++ type: OS::TripleO::SoftwareDeployment ++ properties: ++ config: {get_resource: NetworkConfig} ++ server: {get_resource: OpenDaylightNode} ++ input_values: ++ bridge_name: br-ex ++ interface_name: {get_param: PublicInterface} ++ ++ OpenDaylightDeployment: ++ type: OS::TripleO::SoftwareDeployment ++ depends_on: NetworkDeployment ++ properties: ++ config: {get_resource: OpenDaylightConfig} ++ server: {get_resource: OpenDaylightNode} ++ input_values: ++ ntp_servers: ++ str_replace: ++ template: '["server"]' ++ params: ++ server: {get_param: NtpServer} ++ opendaylight_port: {get_param: OpenDaylightPort} ++ ++ OpenDaylightConfig: ++ type: OS::Heat::StructuredConfig ++ properties: ++ group: os-apply-config ++ config: ++ hiera: ++ hierarchy: ++ - '"%{::uuid}"' ++ - heat_config_%{::deploy_config_name} ++ - extraconfig ++ - bootstrap_node # provided by BootstrapNodeConfig ++ - all_nodes # provided by allNodesConfig ++ - vip_data # provided by vip-config ++ - RedHat # Workaround for https://bugzilla.redhat.com/show_bug.cgi?id=1236143 ++ - common ++ datafiles: ++ common: ++ raw_data: {get_file: hieradata/common.yaml} ++ mapped_data: ++ ntp::servers: {get_input: ntp_servers} ++ opendaylight::admin_username: {get_param: OpenDaylightUsername} ++ opendaylight::admin_password: {get_param: OpenDaylightPassword} ++ opendaylight_port: {get_input: opendaylight_port} ++ ceph: ++ raw_data: {get_file: hieradata/ceph.yaml} ++ ++ UpdateConfig: ++ type: OS::TripleO::Tasks::PackageUpdate ++ ++ UpdateDeployment: ++ type: OS::Heat::SoftwareDeployment ++ properties: ++ config: {get_resource: UpdateConfig} ++ server: {get_resource: OpenDaylightNode} ++ input_values: ++ update_identifier: ++ get_param: UpdateIdentifier ++ ++ OpenDaylightHostsConfig: ++ type: OS::Heat::SoftwareConfig ++ properties: ++ group: script ++ config: | ++ #!/usr/bin/env bash ++ echo -e "$(facter ipaddress)\t\t$(hostname -f)\t$(hostname -s)" >> /etc/hosts ++ ++ OpenDaylightHostsDeployment: ++ type: OS::Heat::StructuredDeployment ++ depends_on: OpenDaylightDeployment ++ properties: ++ server: {get_resource: OpenDaylightNode} ++ config: {get_resource: OpenDaylightHostsConfig} ++ ++ OpenDaylightPuppetConfig: ++ type: OS::Heat::SoftwareConfig ++ properties: ++ group: puppet ++ config: ++ get_file: manifests/overcloud_opendaylight.pp ++ ++ OpenDaylightPuppetDeployment: ++ depends_on: OpenDaylightHostsDeployment ++ type: OS::Heat::StructuredDeployment ++ properties: ++ server: {get_resource: OpenDaylightNode} ++ config: {get_resource: OpenDaylightPuppetConfig} ++ input_values: ++ update_identifier: {get_param: UpdateIdentifier} ++ ++outputs: ++ ip_address: ++ description: IP address of the server in the ctlplane network ++ value: {get_attr: [OpenDaylightNode, networks, ctlplane, 0]} ++ opendaylight_controller_ip: ++ description: IP address of the server on the internal network ++ value: {get_attr: [InternalApiPort, ip_address]} ++ config_identifier: ++ description: identifier which changes if the node configuration may need re-applying ++ value: ++ list_join: ++ - ',' ++ - - {get_attr: [OpenDaylightDeployment, deploy_stdout]} ++ - {get_param: UpdateIdentifier} +-- +2.5.0 + diff --git a/framework/scripts/installer/apex/puppet-onos.rar b/framework/scripts/installer/apex/puppet-onos.rar Binary files differnew file mode 100644 index 00000000..fcb14faa --- /dev/null +++ b/framework/scripts/installer/apex/puppet-onos.rar |