diff options
29 files changed, 232 insertions, 155 deletions
diff --git a/ci/common/net-config-multinode-os-net-config.yaml b/ci/common/net-config-multinode-os-net-config.yaml index 9d45a9ff..316862ee 100644 --- a/ci/common/net-config-multinode-os-net-config.yaml +++ b/ci/common/net-config-multinode-os-net-config.yaml @@ -56,14 +56,14 @@ resources: function network_config_hook { primary_private_ip=$(cat /etc/nodepool/primary_node_private) sed -i "s/primary_private_ip/$primary_private_ip/" /etc/os-net-config/config.json - subnode_private_ip=$(cat /etc/nodepool/node_private) + subnode_private_ip=$(cat /etc/nodepool/sub_nodes_private) sed -i "s/subnode_private_ip/$subnode_private_ip/" /etc/os-net-config/config.json # We start with an arbitrarily high vni key so that we don't # overlap with Neutron created values. These will also match the # values that we've been using previously from the devstack-gate # code. vni=1000002 - subnode_index=$(grep -n $(cat /etc/nodepool/node_private) /etc/nodepool/sub_nodes_private | cut -d: -f1) + subnode_index=$(grep -n $(cat /etc/nodepool/sub_nodes_private) /etc/nodepool/sub_nodes_private | cut -d: -f1) let vni+=$subnode_index sed -i "s/vni/$vni/" /etc/os-net-config/config.json export interface_name="br-ex_$primary_private_ip" diff --git a/docker/services/nova-compute.yaml b/docker/services/nova-compute.yaml index bf7841be..34f56cdf 100644 --- a/docker/services/nova-compute.yaml +++ b/docker/services/nova-compute.yaml @@ -63,7 +63,6 @@ resources: DefaultPasswords: {get_param: DefaultPasswords} RoleName: {get_param: RoleName} RoleParameters: {get_param: RoleParameters} - MigrationSshPort: {get_param: DockerNovaMigrationSshdPort} outputs: role_data: diff --git a/docker/services/nova-libvirt.yaml b/docker/services/nova-libvirt.yaml index d20c093d..c059cfaf 100644 --- a/docker/services/nova-libvirt.yaml +++ b/docker/services/nova-libvirt.yaml @@ -105,7 +105,6 @@ resources: DefaultPasswords: {get_param: DefaultPasswords} RoleName: {get_param: RoleName} RoleParameters: {get_param: RoleParameters} - MigrationSshPort: {get_param: DockerNovaMigrationSshdPort} outputs: role_data: diff --git a/docker/services/nova-migration-target.yaml b/docker/services/nova-migration-target.yaml index 385343a0..904a042f 100644 --- a/docker/services/nova-migration-target.yaml +++ b/docker/services/nova-migration-target.yaml @@ -41,6 +41,29 @@ parameters: description: Port that dockerized nova migration target sshd service binds to. type: number + MigrationSshKey: + type: json + description: > + SSH key for migration. + Expects a dictionary with keys 'public_key' and 'private_key'. + Values should be identical to SSH public/private key files. + default: + public_key: '' + private_key: '' + MigrationSshPort: + default: 2022 + description: Target port for migration over ssh + type: number + +conditions: + + # During Ocata->Pike upgrade initially configure the ssh service on port 22 + # to proxy migration commands to the containerized sshd on port 2022. + # When the upgrade converges we can switch migrations over to port 2022. + enable_migration_proxy: + equals: + - {get_param: MigrationSshPort} + - 22 resources: @@ -74,10 +97,15 @@ outputs: map_merge: - get_attr: [SshdBase, role_data, config_settings] - get_attr: [NovaMigrationTargetBase, role_data, config_settings] - - tripleo.nova_migration_target.firewall_rules: - '113 nova_migration_target': - dport: - - {get_param: DockerNovaMigrationSshdPort} + # NB this prevents the baremetal ssh from listening on port 2022 + # It doesn't affect the sshd port in the container as we override it below on the sshd cli + - tripleo::profile::base::sshd::port: 22 + - if: + - enable_migration_proxy + - tripleo::profile::base::nova::migration::proxy::ssh_private_key: {get_param: [ MigrationSshKey, private_key ]} + tripleo::profile::base::nova::migration::proxy::target_port: {get_param: DockerNovaMigrationSshdPort} + tripleo::profile::base::nova::migration::proxy::target_host: "%{hiera('live_migration_ssh_inbound_addr')}" + - {} step_config: &step_config list_join: - "\n" diff --git a/docker/services/sshd.yaml b/docker/services/sshd.yaml new file mode 100644 index 00000000..6f57f13d --- /dev/null +++ b/docker/services/sshd.yaml @@ -0,0 +1,72 @@ +heat_template_version: pike + +description: > + Configure sshd_config + +parameters: + ServiceData: + default: {} + description: Dictionary packing service data + type: json + ServiceNetMap: + default: {} + description: Mapping of service_name -> network name. Typically set + via parameter_defaults in the resource registry. This + mapping overrides those in ServiceNetMapDefaults. + type: json + DefaultPasswords: + default: {} + type: json + RoleName: + default: '' + description: Role name on which the service is applied + type: string + RoleParameters: + default: {} + description: Parameters specific to the role + type: json + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + MigrationSshPort: + default: 2022 + description: Target port for migration over ssh + type: number + +conditions: + + # During Ocata->Pike upgrade initially configure the ssh service on port 22 + # to proxy migration commands to the containerized sshd on port 2022. + # When the upgrade converges we can switch migrations over to port 2022. + enable_migration_proxy: + equals: + - {get_param: MigrationSshPort} + - 22 + +resources: + SshdBase: + type: ../../puppet/services/sshd.yaml + properties: + EndpointMap: {get_param: EndpointMap} + ServiceNetMap: {get_param: ServiceNetMap} + DefaultPasswords: {get_param: DefaultPasswords} + RoleName: {get_param: RoleName} + RoleParameters: {get_param: RoleParameters} + +outputs: + role_data: + description: Role data for the ssh + value: + service_name: sshd + config_settings: {get_attr: [SshdBase, role_data, config_settings]} + step_config: + list_join: + - "\n" + - - get_attr: [SshdBase, role_data, step_config] + - if: + - enable_migration_proxy + - | + include tripleo::profile::base::nova::migration::proxy + - '' diff --git a/environments/docker-services-tls-everywhere.yaml b/environments/docker-services-tls-everywhere.yaml index ba190e7c..342f2781 100644 --- a/environments/docker-services-tls-everywhere.yaml +++ b/environments/docker-services-tls-everywhere.yaml @@ -49,6 +49,7 @@ resource_registry: OS::TripleO::Services::PankoApi: ../docker/services/panko-api.yaml OS::TripleO::Services::RabbitMQ: ../docker/services/rabbitmq.yaml OS::TripleO::Services::Redis: ../docker/services/database/redis.yaml + OS::TripleO::Services::Sshd: ../docker/services/sshd.yaml OS::TripleO::Services::SwiftProxy: ../docker/services/swift-proxy.yaml OS::TripleO::Services::SwiftRingBuilder: ../docker/services/swift-ringbuilder.yaml OS::TripleO::Services::SwiftStorage: ../docker/services/swift-storage.yaml diff --git a/environments/docker.yaml b/environments/docker.yaml index 06e3d3ab..57379925 100644 --- a/environments/docker.yaml +++ b/environments/docker.yaml @@ -32,6 +32,7 @@ resource_registry: OS::TripleO::Services::RabbitMQ: ../docker/services/rabbitmq.yaml OS::TripleO::Services::Redis: ../docker/services/database/redis.yaml OS::TripleO::Services::Memcached: ../docker/services/memcached.yaml + OS::TripleO::Services::Sshd: ../docker/services/sshd.yaml OS::TripleO::Services::SwiftProxy: ../docker/services/swift-proxy.yaml OS::TripleO::Services::SwiftStorage: ../docker/services/swift-storage.yaml OS::TripleO::Services::SwiftRingBuilder: ../docker/services/swift-ringbuilder.yaml diff --git a/environments/major-upgrade-composable-steps-docker.yaml b/environments/major-upgrade-composable-steps-docker.yaml index 888e2705..1b7f17e0 100644 --- a/environments/major-upgrade-composable-steps-docker.yaml +++ b/environments/major-upgrade-composable-steps-docker.yaml @@ -9,3 +9,4 @@ parameter_defaults: set -eu # Ocata to Pike, put any needed host-level workarounds here yum install -y ansible-pacemaker + MigrationSshPort: 22 diff --git a/environments/major-upgrade-composable-steps.yaml b/environments/major-upgrade-composable-steps.yaml index db83f906..5433572c 100644 --- a/environments/major-upgrade-composable-steps.yaml +++ b/environments/major-upgrade-composable-steps.yaml @@ -14,3 +14,4 @@ parameter_defaults: rm -f /usr/libexec/os-apply-config/templates/etc/puppet/hiera.yaml rm -f /usr/libexec/os-refresh-config/configure.d/40-hiera-datafiles rm -f /etc/puppet/hieradata/*.yaml + MigrationSshPort: 22 diff --git a/environments/major-upgrade-converge-docker.yaml b/environments/major-upgrade-converge-docker.yaml index 668f8a94..3f0a3ecd 100644 --- a/environments/major-upgrade-converge-docker.yaml +++ b/environments/major-upgrade-converge-docker.yaml @@ -8,3 +8,4 @@ parameter_defaults: UpgradeLevelNovaCompute: '' UpgradeInitCommonCommand: '' UpgradeInitCommand: '' + MigrationSshPort: 2022 diff --git a/environments/major-upgrade-converge.yaml b/environments/major-upgrade-converge.yaml index 668f8a94..3f0a3ecd 100644 --- a/environments/major-upgrade-converge.yaml +++ b/environments/major-upgrade-converge.yaml @@ -8,3 +8,4 @@ parameter_defaults: UpgradeLevelNovaCompute: '' UpgradeInitCommonCommand: '' UpgradeInitCommand: '' + MigrationSshPort: 2022 diff --git a/environments/nova-nuage-config.yaml b/environments/nova-nuage-config.yaml index 5e75ed9e..e8e3aaa4 100644 --- a/environments/nova-nuage-config.yaml +++ b/environments/nova-nuage-config.yaml @@ -1,8 +1,7 @@ # A Heat environment file which can be used to enable # Nuage backend on the compute, configured via puppet resource_registry: - OS::TripleO::ComputeExtraConfigPre: ../puppet/extraconfig/pre_deploy/compute/nova-nuage.yaml - OS::TripleO::Services::ComputeNeutronCorePlugin: ../puppet/services/neutron-compute-plugin-nuage.yaml + OS::TripleO::Services::ComputeNeutronCorePlugin: OS::TripleO::Services::ComputeNeutronCorePluginNuage parameter_defaults: NuageActiveController: '0.0.0.0' diff --git a/environments/ssl/tls-endpoints-public-dns.yaml b/environments/ssl/tls-endpoints-public-dns.yaml index 3b3ddc16..d1bd2d20 100644 --- a/environments/ssl/tls-endpoints-public-dns.yaml +++ b/environments/ssl/tls-endpoints-public-dns.yaml @@ -77,9 +77,9 @@ parameter_defaults: OctaviaAdmin: {protocol: 'http', port: '9876', host: 'IP_ADDRESS'} OctaviaInternal: {protocol: 'http', port: '9876', host: 'IP_ADDRESS'} OctaviaPublic: {protocol: 'https', port: '13876', host: 'CLOUDNAME'} - PankoAdmin: {protocol: 'http', port: '8779', host: 'IP_ADDRESS'} - PankoInternal: {protocol: 'http', port: '8779', host: 'IP_ADDRESS'} - PankoPublic: {protocol: 'https', port: '13779', host: 'CLOUDNAME'} + PankoAdmin: {protocol: 'http', port: '8977', host: 'IP_ADDRESS'} + PankoInternal: {protocol: 'http', port: '8977', host: 'IP_ADDRESS'} + PankoPublic: {protocol: 'https', port: '13977', host: 'CLOUDNAME'} SaharaAdmin: {protocol: 'http', port: '8386', host: 'IP_ADDRESS'} SaharaInternal: {protocol: 'http', port: '8386', host: 'IP_ADDRESS'} SaharaPublic: {protocol: 'https', port: '13386', host: 'CLOUDNAME'} diff --git a/environments/ssl/tls-endpoints-public-ip.yaml b/environments/ssl/tls-endpoints-public-ip.yaml index bca6a891..b776f648 100644 --- a/environments/ssl/tls-endpoints-public-ip.yaml +++ b/environments/ssl/tls-endpoints-public-ip.yaml @@ -77,9 +77,9 @@ parameter_defaults: OctaviaAdmin: {protocol: 'http', port: '9876', host: 'IP_ADDRESS'} OctaviaInternal: {protocol: 'http', port: '9876', host: 'IP_ADDRESS'} OctaviaPublic: {protocol: 'https', port: '13876', host: 'IP_ADDRESS'} - PankoAdmin: {protocol: 'http', port: '8779', host: 'IP_ADDRESS'} - PankoInternal: {protocol: 'http', port: '8779', host: 'IP_ADDRESS'} - PankoPublic: {protocol: 'https', port: '13779', host: 'IP_ADDRESS'} + PankoAdmin: {protocol: 'http', port: '8977', host: 'IP_ADDRESS'} + PankoInternal: {protocol: 'http', port: '8977', host: 'IP_ADDRESS'} + PankoPublic: {protocol: 'https', port: '13977', host: 'IP_ADDRESS'} SaharaAdmin: {protocol: 'http', port: '8386', host: 'IP_ADDRESS'} SaharaInternal: {protocol: 'http', port: '8386', host: 'IP_ADDRESS'} SaharaPublic: {protocol: 'https', port: '13386', host: 'IP_ADDRESS'} diff --git a/environments/ssl/tls-everywhere-endpoints-dns.yaml b/environments/ssl/tls-everywhere-endpoints-dns.yaml index e3fe608b..e0cfc9e6 100644 --- a/environments/ssl/tls-everywhere-endpoints-dns.yaml +++ b/environments/ssl/tls-everywhere-endpoints-dns.yaml @@ -77,9 +77,9 @@ parameter_defaults: OctaviaAdmin: {protocol: 'https', port: '9876', host: 'IP_ADDRESS'} OctaviaInternal: {protocol: 'https', port: '9876', host: 'IP_ADDRESS'} OctaviaPublic: {protocol: 'https', port: '13876', host: 'CLOUDNAME'} - PankoAdmin: {protocol: 'https', port: '8779', host: 'CLOUDNAME'} - PankoInternal: {protocol: 'https', port: '8779', host: 'CLOUDNAME'} - PankoPublic: {protocol: 'https', port: '13779', host: 'CLOUDNAME'} + PankoAdmin: {protocol: 'https', port: '8977', host: 'CLOUDNAME'} + PankoInternal: {protocol: 'https', port: '8977', host: 'CLOUDNAME'} + PankoPublic: {protocol: 'https', port: '13977', host: 'CLOUDNAME'} SaharaAdmin: {protocol: 'https', port: '8386', host: 'CLOUDNAME'} SaharaInternal: {protocol: 'https', port: '8386', host: 'CLOUDNAME'} SaharaPublic: {protocol: 'https', port: '13386', host: 'CLOUDNAME'} diff --git a/network/networks.j2.yaml b/network/networks.j2.yaml index 48c509df..1a170045 100644 --- a/network/networks.j2.yaml +++ b/network/networks.j2.yaml @@ -4,8 +4,7 @@ description: Create networks to split out Overcloud traffic resources: {%- for network in networks %} - {%- set network_name = network.compat_name|default(network.name) %} - {{network_name}}Network: + {{network.name}}Network: type: OS::TripleO::Network::{{network.name}} {%- endfor %} @@ -19,9 +18,8 @@ outputs: # NOTE(gfidente): we need to replace the null value with a # string to work around https://bugs.launchpad.net/heat/+bug/1700025 {%- for network in networks %} - {%- set network_name = network.compat_name|default(network.name) %} {{network.name_lower}}: yaql: - data: {get_attr: [{{network_name}}Network, subnet_cidr]} + data: {get_attr: [{{network.name}}Network, subnet_cidr]} expression: str($.data).replace('null', 'disabled') {%- endfor %} diff --git a/network/ports/net_ip_list_map.j2.yaml b/network/ports/net_ip_list_map.j2.yaml index e929ab2c..16bd3986 100644 --- a/network/ports/net_ip_list_map.j2.yaml +++ b/network/ports/net_ip_list_map.j2.yaml @@ -21,30 +21,6 @@ parameters: NetworkHostnameMap: default: [] type: json - - InternalApiNetName: - default: internal_api - description: The name of the internal_api network. - type: string - ExternalNetName: - default: external - description: The name of the external network. - type: string - ManagementNetName: - default: management - description: The name of the management network. - type: string - StorageNetName: - default: storage - description: The name of the storage network. - type: string - StorageMgmtNetName: - default: storage_mgmt - description: The name of the storage_mgmt network. - type: string - TenantNetName: - default: tenant - description: The name of the tenant network. {%- for network in networks %} {{network.name}}NetName: default: {{network.name_lower}} diff --git a/network_data.yaml b/network_data.yaml index bce82cb2..90293ab3 100644 --- a/network_data.yaml +++ b/network_data.yaml @@ -58,7 +58,6 @@ allocation_pools: [{'start': '172.16.2.4', 'end': '172.16.2.250'}] ipv6_subnet: 'fd00:fd00:fd00:2000::/64' ipv6_allocation_pools: [{'start': 'fd00:fd00:fd00:2000::10', 'end': 'fd00:fd00:fd00:2000:ffff:ffff:ffff:fffe'}] - compat_name: Internal - name: Storage vip: true name_lower: storage diff --git a/overcloud-resource-registry-puppet.j2.yaml b/overcloud-resource-registry-puppet.j2.yaml index e5ae5279..c8bdf9e4 100644 --- a/overcloud-resource-registry-puppet.j2.yaml +++ b/overcloud-resource-registry-puppet.j2.yaml @@ -152,6 +152,7 @@ resource_registry: # can be the same as NeutronCorePlugin but some vendors install different # things where VMs run OS::TripleO::Services::ComputeNeutronCorePlugin: puppet/services/neutron-plugin-ml2.yaml + OS::TripleO::Services::ComputeNeutronCorePluginNuage: puppet/services/neutron-compute-plugin-nuage.yaml # Neutron Core Plugin Vendors (these typically override NeutronCorePlugin) OS::TripleO::Services::NeutronCorePluginML2OVN: puppet/services/neutron-plugin-ml2-ovn.yaml OS::TripleO::Services::NeutronCorePluginPlumgrid: puppet/services/neutron-plugin-plumgrid.yaml diff --git a/puppet/extraconfig/pre_deploy/compute/nova-nuage.yaml b/puppet/extraconfig/pre_deploy/compute/nova-nuage.yaml deleted file mode 100644 index ea2fd71c..00000000 --- a/puppet/extraconfig/pre_deploy/compute/nova-nuage.yaml +++ /dev/null @@ -1,92 +0,0 @@ -heat_template_version: pike - -description: Configure hieradata for Nuage configuration on the Compute - -parameters: - server: - description: ID of the compute node to apply this config to - type: string - - NuageActiveController: - description: IP address of the Active Virtualized Services Controller (VSC) - type: string - NuageStandbyController: - description: IP address of the Standby Virtualized Services Controller (VSC) - type: string - NuageMetadataPort: - description: TCP Port to listen for metadata server requests - type: string - default: '9697' - NuageNovaMetadataPort: - description: TCP Port used by Nova metadata server - type: string - default: '8775' - NuageMetadataProxySharedSecret: - description: Shared secret to sign the instance-id request - type: string - NuageNovaClientVersion: - description: Client Version Nova - type: string - default: '2' - NuageNovaOsUsername: - description: Nova username in keystone_authtoken - type: string - default: 'nova' - NuageMetadataAgentStartWithOvs: - description: Set to True if nuage-metadata-agent needs to be started with nuage-openvswitch-switch - type: string - default: 'True' - NuageNovaApiEndpoint: - description: One of publicURL, internalURL, adminURL in "keystone endpoint-list" - type: string - default: 'publicURL' - NuageNovaRegionName: - description: Region name in "keystone endpoint-list" - type: string - default: 'regionOne' - -# Declaration of resources for the template. -resources: - NovaNuageConfig: - type: OS::Heat::StructuredConfig - properties: - group: hiera - config: - datafiles: - nova_nuage_data: - mapped_data: - nuage::vrs::active_controller: {get_input: ActiveController} - nuage::vrs::standby_controller: {get_input: StandbyController} - nuage::metadataagent::metadata_port: {get_input: MetadataPort} - nuage::metadataagent::nova_metadata_port: {get_input: NovaMetadataPort} - nuage::metadataagent::metadata_secret: {get_input: SharedSecret} - nuage::metadataagent::nova_client_version: {get_input: NovaClientVersion} - nuage::metadataagent::nova_os_username: {get_input: NovaOsUsername} - nuage::metadataagent::metadata_agent_start_with_ovs: {get_input: MetadataAgentStartWithOvs} - nuage::metadataagent::nova_api_endpoint_type: {get_input: NovaApiEndpointType} - nuage::metadataagent::nova_region_name: {get_input: NovaRegionName} - - NovaNuageDeployment: - type: OS::Heat::StructuredDeployment - properties: - name: NovaNuageDeployment - config: {get_resource: NovaNuageConfig} - server: {get_param: server} - input_values: - ActiveController: {get_param: NuageActiveController} - StandbyController: {get_param: NuageStandbyController} - MetadataPort: {get_param: NuageMetadataPort} - NovaMetadataPort: {get_param: NuageNovaMetadataPort} - SharedSecret: {get_param: NuageMetadataProxySharedSecret} - NovaClientVersion: {get_param: NuageNovaClientVersion} - NovaOsUsername: {get_param: NuageNovaOsUsername} - MetadataAgentStartWithOvs: {get_param: NuageMetadataAgentStartWithOvs} - NovaApiEndpointType: {get_param: NuageNovaApiEndpoint} - NovaRegionName: {get_param: NuageNovaRegionName} - -# Specify output parameters that will be available -# after the template is instantiated. -outputs: - deploy_stdout: - description: Deployment reference, used to trigger puppet apply on changes - value: {get_attr: [NovaNuageDeployment, deploy_stdout]} diff --git a/puppet/services/kernel.yaml b/puppet/services/kernel.yaml index 011ec037..1c2da401 100644 --- a/puppet/services/kernel.yaml +++ b/puppet/services/kernel.yaml @@ -60,6 +60,11 @@ parameters: ARP cache. The garbage collector will always run if there are more than this number of entries in the cache. type: number + InotifyIntancesMax: + default: 1024 + description: Configures sysctl fs.inotify.max_user_instances key + type: number + outputs: role_data: @@ -129,5 +134,9 @@ outputs: value: {get_param: NeighbourGcThreshold2} net.ipv4.neigh.default.gc_thresh3: value: {get_param: NeighbourGcThreshold3} + # set inotify value for neutron/dnsmasq scale + fs.inotify.max_user_instances: + value: {get_param: InotifyIntancesMax} + step_config: | include ::tripleo::profile::base::kernel diff --git a/puppet/services/neutron-compute-plugin-nuage.yaml b/puppet/services/neutron-compute-plugin-nuage.yaml index f1a56530..e594c2da 100644 --- a/puppet/services/neutron-compute-plugin-nuage.yaml +++ b/puppet/services/neutron-compute-plugin-nuage.yaml @@ -38,6 +38,39 @@ parameters: description: TCP Port to listen for metadata server requests type: string default: '9697' + NuageActiveController: + description: IP address of the Active Virtualized Services Controller (VSC) + type: string + NuageStandbyController: + description: IP address of the Standby Virtualized Services Controller (VSC) + type: string + NuageNovaMetadataPort: + description: TCP Port used by Nova metadata server + type: string + default: '8775' + NuageMetadataProxySharedSecret: + description: Shared secret to sign the instance-id request + type: string + NuageNovaClientVersion: + description: Client Version Nova + type: string + default: '2' + NuageNovaOsUsername: + description: Nova username in keystone_authtoken + type: string + default: 'nova' + NuageMetadataAgentStartWithOvs: + description: Set to True if nuage-metadata-agent needs to be started with nuage-openvswitch-switch + type: string + default: 'True' + NuageNovaApiEndpoint: + description: One of publicURL, internalURL, adminURL in "keystone endpoint-list" + type: string + default: 'publicURL' + NuageNovaRegionName: + description: Region name in "keystone endpoint-list" + type: string + default: 'regionOne' outputs: role_data: @@ -45,6 +78,16 @@ outputs: value: service_name: neutron_compute_plugin_nuage config_settings: + nuage::vrs::active_controller: {get_param: NuageActiveController} + nuage::vrs::standby_controller: {get_param: NuageStandbyController} + nuage::metadataagent::metadata_port: {get_param: NuageMetadataPort} + nuage::metadataagent::nova_metadata_port: {get_param: NuageNovaMetadataPort} + nuage::metadataagent::metadata_secret: {get_param: NuageMetadataProxySharedSecret} + nuage::metadataagent::nova_client_version: {get_param: NuageNovaClientVersion} + nuage::metadataagent::nova_os_username: {get_param: NuageNovaOsUsername} + nuage::metadataagent::metadata_agent_start_with_ovs: {get_param: NuageMetadataAgentStartWithOvs} + nuage::metadataagent::nova_api_endpoint_type: {get_param: NuageNovaApiEndpoint} + nuage::metadataagent::nova_region_name: {get_param: NuageNovaRegionName} tripleo::profile::base::neutron::agents::nuage::nova_os_tenant_name: 'service' tripleo::profile::base::neutron::agents::nuage::nova_os_password: {get_param: NovaPassword} tripleo::profile::base::neutron::agents::nuage::nova_auth_ip: {get_param: [EndpointMap, KeystoneInternal, host]} diff --git a/puppet/services/nova-compute.yaml b/puppet/services/nova-compute.yaml index 22a743e0..9e5ba129 100644 --- a/puppet/services/nova-compute.yaml +++ b/puppet/services/nova-compute.yaml @@ -108,7 +108,7 @@ parameters: public_key: '' private_key: '' MigrationSshPort: - default: 22 + default: 2022 description: Target port for migration over ssh type: number diff --git a/puppet/services/nova-libvirt.yaml b/puppet/services/nova-libvirt.yaml index 3f37cd94..ac7cc8f1 100644 --- a/puppet/services/nova-libvirt.yaml +++ b/puppet/services/nova-libvirt.yaml @@ -94,7 +94,7 @@ parameters: public_key: '' private_key: '' MigrationSshPort: - default: 22 + default: 2022 description: Target port for migration over ssh type: number diff --git a/puppet/services/nova-migration-target.yaml b/puppet/services/nova-migration-target.yaml index 128abc2c..0c2b419e 100644 --- a/puppet/services/nova-migration-target.yaml +++ b/puppet/services/nova-migration-target.yaml @@ -39,6 +39,10 @@ parameters: default: public_key: '' private_key: '' + MigrationSshPort: + default: 2022 + description: Target port for migration over ssh + type: number outputs: role_data: @@ -53,5 +57,12 @@ outputs: - "%{hiera('live_migration_ssh_inbound_addr')}" live_migration_ssh_inbound_addr: {get_param: [ServiceNetMap, NovaLibvirtNetwork]} cold_migration_ssh_inbound_addr: {get_param: [ServiceNetMap, NovaColdMigrationNetwork]} + tripleo::profile::base::sshd::port: + - 22 + - {get_param: MigrationSshPort} + tripleo.nova_migration_target.firewall_rules: + '113 nova_migration_target': + dport: + - {get_param: MigrationSshPort} step_config: | include tripleo::profile::base::nova::migration::target diff --git a/releasenotes/notes/fix-internal-api-network-name-282bfda2cdb406aa.yaml b/releasenotes/notes/fix-internal-api-network-name-282bfda2cdb406aa.yaml new file mode 100644 index 00000000..2e7e79f1 --- /dev/null +++ b/releasenotes/notes/fix-internal-api-network-name-282bfda2cdb406aa.yaml @@ -0,0 +1,6 @@ +--- +fixes: + - | + Fixes heat resource OS::TripleO::Network::Internal to be renamed back to + OS::TripleO::Network::InternalApi for backwards compatibility with + previous versions. diff --git a/releasenotes/notes/fix-tenant-net-name-type-94a9c50c86529001.yaml b/releasenotes/notes/fix-tenant-net-name-type-94a9c50c86529001.yaml new file mode 100644 index 00000000..77d9d52f --- /dev/null +++ b/releasenotes/notes/fix-tenant-net-name-type-94a9c50c86529001.yaml @@ -0,0 +1,5 @@ +--- +fixes: + - | + Removes hardcoded network names. The networks are + now defined dynamically by network_data.yaml. diff --git a/sample-env-generator/ssl.yaml b/sample-env-generator/ssl.yaml index 43a1afc1..a3b5f24a 100644 --- a/sample-env-generator/ssl.yaml +++ b/sample-env-generator/ssl.yaml @@ -172,9 +172,9 @@ environments: OctaviaAdmin: {protocol: 'http', port: '9876', host: 'IP_ADDRESS'} OctaviaInternal: {protocol: 'http', port: '9876', host: 'IP_ADDRESS'} OctaviaPublic: {protocol: 'https', port: '13876', host: 'IP_ADDRESS'} - PankoAdmin: {protocol: 'http', port: '8779', host: 'IP_ADDRESS'} - PankoInternal: {protocol: 'http', port: '8779', host: 'IP_ADDRESS'} - PankoPublic: {protocol: 'https', port: '13779', host: 'IP_ADDRESS'} + PankoAdmin: {protocol: 'http', port: '8977', host: 'IP_ADDRESS'} + PankoInternal: {protocol: 'http', port: '8977', host: 'IP_ADDRESS'} + PankoPublic: {protocol: 'https', port: '13977', host: 'IP_ADDRESS'} SaharaAdmin: {protocol: 'http', port: '8386', host: 'IP_ADDRESS'} SaharaInternal: {protocol: 'http', port: '8386', host: 'IP_ADDRESS'} SaharaPublic: {protocol: 'https', port: '13386', host: 'IP_ADDRESS'} @@ -273,9 +273,9 @@ environments: OctaviaAdmin: {protocol: 'http', port: '9876', host: 'IP_ADDRESS'} OctaviaInternal: {protocol: 'http', port: '9876', host: 'IP_ADDRESS'} OctaviaPublic: {protocol: 'https', port: '13876', host: 'CLOUDNAME'} - PankoAdmin: {protocol: 'http', port: '8779', host: 'IP_ADDRESS'} - PankoInternal: {protocol: 'http', port: '8779', host: 'IP_ADDRESS'} - PankoPublic: {protocol: 'https', port: '13779', host: 'CLOUDNAME'} + PankoAdmin: {protocol: 'http', port: '8977', host: 'IP_ADDRESS'} + PankoInternal: {protocol: 'http', port: '8977', host: 'IP_ADDRESS'} + PankoPublic: {protocol: 'https', port: '13977', host: 'CLOUDNAME'} SaharaAdmin: {protocol: 'http', port: '8386', host: 'IP_ADDRESS'} SaharaInternal: {protocol: 'http', port: '8386', host: 'IP_ADDRESS'} SaharaPublic: {protocol: 'https', port: '13386', host: 'CLOUDNAME'} @@ -374,9 +374,9 @@ environments: OctaviaAdmin: {protocol: 'https', port: '9876', host: 'IP_ADDRESS'} OctaviaInternal: {protocol: 'https', port: '9876', host: 'IP_ADDRESS'} OctaviaPublic: {protocol: 'https', port: '13876', host: 'CLOUDNAME'} - PankoAdmin: {protocol: 'https', port: '8779', host: 'CLOUDNAME'} - PankoInternal: {protocol: 'https', port: '8779', host: 'CLOUDNAME'} - PankoPublic: {protocol: 'https', port: '13779', host: 'CLOUDNAME'} + PankoAdmin: {protocol: 'https', port: '8977', host: 'CLOUDNAME'} + PankoInternal: {protocol: 'https', port: '8977', host: 'CLOUDNAME'} + PankoPublic: {protocol: 'https', port: '13977', host: 'CLOUDNAME'} SaharaAdmin: {protocol: 'https', port: '8386', host: 'CLOUDNAME'} SaharaInternal: {protocol: 'https', port: '8386', host: 'CLOUDNAME'} SaharaPublic: {protocol: 'https', port: '13386', host: 'CLOUDNAME'} diff --git a/tools/yaml-validate.py b/tools/yaml-validate.py index f7a45d7b..66ac1010 100755 --- a/tools/yaml-validate.py +++ b/tools/yaml-validate.py @@ -116,6 +116,22 @@ PREFERRED_CAMEL_CASE = { 'haproxy': 'HAProxy', } +# Overrides for docker/puppet validation +# <filename>: True explicitly enables validation +# <filename>: False explicitly disables validation +# +# If a filename is not found in the overrides then the top level directory is +# used to determine which validation method to use. +VALIDATE_PUPPET_OVERRIDE = { + # docker/service/sshd.yaml is a variation of the puppet sshd service + './docker/services/sshd.yaml': True, + # qdr aliases rabbitmq service to provide alternative messaging backend + './puppet/services/qdr.yaml': False, +} +VALIDATE_DOCKER_OVERRIDE = { + # docker/service/sshd.yaml is a variation of the puppet sshd service + './docker/services/sshd.yaml': False, +} def exit_usage(): print('Usage %s <yaml file or directory>' % sys.argv[0]) @@ -436,12 +452,14 @@ def validate(filename, param_map): % filename) return 1 - # qdr aliases rabbitmq service to provide alternative messaging backend - if (filename.startswith('./puppet/services/') and - filename not in ['./puppet/services/qdr.yaml']): + if VALIDATE_PUPPET_OVERRIDE.get(filename, False) or ( + filename.startswith('./puppet/services/') and + VALIDATE_PUPPET_OVERRIDE.get(filename, True)): retval = validate_service(filename, tpl) - if filename.startswith('./docker/services/'): + if VALIDATE_DOCKER_OVERRIDE.get(filename, False) or ( + filename.startswith('./docker/services/') and + VALIDATE_DOCKER_OVERRIDE.get(filename, True)): retval = validate_docker_service(filename, tpl) if filename.endswith('hyperconverged-ceph.yaml'): |