diff options
Diffstat (limited to 'puppet')
67 files changed, 1327 insertions, 278 deletions
diff --git a/puppet/blockstorage-role.yaml b/puppet/blockstorage-role.yaml index 51f9abac..b9e5c6fe 100644 --- a/puppet/blockstorage-role.yaml +++ b/puppet/blockstorage-role.yaml @@ -126,7 +126,7 @@ parameters: resources: BlockStorage: - type: OS::TripleO::Server + type: OS::TripleO::BlockStorageServer metadata: os-collect-config: command: {get_param: ConfigCommand} @@ -457,6 +457,12 @@ resources: update_identifier: get_param: UpdateIdentifier + SshHostPubKey: + type: OS::TripleO::Ssh::HostPubKey + depends_on: BlockStorageDeployment + properties: + server: {get_resource: BlockStorage} + outputs: ip_address: description: IP address of the server in the ctlplane network @@ -504,6 +510,37 @@ outputs: MANAGEMENTHOST: {get_attr: [NetHostMap, value, management, short]} CTLPLANEIP: {get_attr: [BlockStorage, networks, ctlplane, 0]} CTLPLANEHOST: {get_attr: [NetHostMap, value, ctlplane, short]} + known_hosts_entry: + description: Entry for ssh known hosts + value: + str_replace: + template: "PRIMARYIP,PRIMARYHOST.DOMAIN,PRIMARYHOST,\ +EXTERNALIP,EXTERNALHOST.DOMAIN,EXTERNALHOST,\ +INTERNAL_APIIP,INTERNAL_APIHOST.DOMAIN,INTERNAL_APIHOST,\ +STORAGEIP,STORAGEHOST.DOMAIN,STORAGEHOST,\ +STORAGE_MGMTIP,STORAGE_MGMTHOST.DOMAIN,STORAGE_MGMTHOST,\ +TENANTIP,TENANTHOST.DOMAIN,TENANTHOST,\ +MANAGEMENTIP,MANAGEMENTHOST.DOMAIN,MANAGEMENTHOST,\ +CTLPLANEIP,CTLPLANEHOST.DOMAIN,CTLPLANEHOST HOSTSSHPUBKEY" + params: + PRIMARYIP: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, BlockStorageHostnameResolveNetwork]}]} + DOMAIN: {get_param: CloudDomain} + PRIMARYHOST: {get_attr: [BlockStorage, name]} + EXTERNALIP: {get_attr: [ExternalPort, ip_address]} + EXTERNALHOST: {get_attr: [NetHostMap, value, external, short]} + INTERNAL_APIIP: {get_attr: [InternalApiPort, ip_address]} + INTERNAL_APIHOST: {get_attr: [NetHostMap, value, internal_api, short]} + STORAGEIP: {get_attr: [StoragePort, ip_address]} + STORAGEHOST: {get_attr: [NetHostMap, value, storage, short]} + STORAGE_MGMTIP: {get_attr: [StorageMgmtPort, ip_address]} + STORAGE_MGMTHOST: {get_attr: [NetHostMap, value, storage_mgmt, short]} + TENANTIP: {get_attr: [TenantPort, ip_address]} + TENANTHOST: {get_attr: [NetHostMap, value, tenant, short]} + MANAGEMENTIP: {get_attr: [ManagementPort, ip_address]} + MANAGEMENTHOST: {get_attr: [NetHostMap, value, management, short]} + CTLPLANEIP: {get_attr: [BlockStorage, networks, ctlplane, 0]} + CTLPLANEHOST: {get_attr: [NetHostMap, value, ctlplane, short]} + HOSTSSHPUBKEY: {get_attr: [SshHostPubKey, ecdsa]} nova_server_resource: description: Heat resource handle for the block storage server value: diff --git a/puppet/cephstorage-role.yaml b/puppet/cephstorage-role.yaml index d7d7f478..075f42ba 100644 --- a/puppet/cephstorage-role.yaml +++ b/puppet/cephstorage-role.yaml @@ -132,7 +132,7 @@ parameters: resources: CephStorage: - type: OS::TripleO::Server + type: OS::TripleO::CephStorageServer metadata: os-collect-config: command: {get_param: ConfigCommand} @@ -468,6 +468,12 @@ resources: update_identifier: get_param: UpdateIdentifier + SshHostPubKey: + type: OS::TripleO::Ssh::HostPubKey + depends_on: CephStorageDeployment + properties: + server: {get_resource: CephStorage} + outputs: ip_address: description: IP address of the server in the ctlplane network @@ -515,6 +521,37 @@ outputs: MANAGEMENTHOST: {get_attr: [NetHostMap, value, management, short]} CTLPLANEIP: {get_attr: [CephStorage, networks, ctlplane, 0]} CTLPLANEHOST: {get_attr: [NetHostMap, value, ctlplane, short]} + known_hosts_entry: + description: Entry for ssh known hosts + value: + str_replace: + template: "PRIMARYIP,PRIMARYHOST.DOMAIN,PRIMARYHOST,\ +EXTERNALIP,EXTERNALHOST.DOMAIN,EXTERNALHOST,\ +INTERNAL_APIIP,INTERNAL_APIHOST.DOMAIN,INTERNAL_APIHOST,\ +STORAGEIP,STORAGEHOST.DOMAIN,STORAGEHOST,\ +STORAGE_MGMTIP,STORAGE_MGMTHOST.DOMAIN,STORAGE_MGMTHOST,\ +TENANTIP,TENANTHOST.DOMAIN,TENANTHOST,\ +MANAGEMENTIP,MANAGEMENTHOST.DOMAIN,MANAGEMENTHOST,\ +CTLPLANEIP,CTLPLANEHOST.DOMAIN,CTLPLANEHOST HOSTSSHPUBKEY" + params: + PRIMARYIP: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, CephStorageHostnameResolveNetwork]}]} + DOMAIN: {get_param: CloudDomain} + PRIMARYHOST: {get_attr: [CephStorage, name]} + EXTERNALIP: {get_attr: [ExternalPort, ip_address]} + EXTERNALHOST: {get_attr: [NetHostMap, value, external, short]} + INTERNAL_APIIP: {get_attr: [InternalApiPort, ip_address]} + INTERNAL_APIHOST: {get_attr: [NetHostMap, value, internal_api, short]} + STORAGEIP: {get_attr: [StoragePort, ip_address]} + STORAGEHOST: {get_attr: [NetHostMap, value, storage, short]} + STORAGE_MGMTIP: {get_attr: [StorageMgmtPort, ip_address]} + STORAGE_MGMTHOST: {get_attr: [NetHostMap, value, storage_mgmt, short]} + TENANTIP: {get_attr: [TenantPort, ip_address]} + TENANTHOST: {get_attr: [NetHostMap, value, tenant, short]} + MANAGEMENTIP: {get_attr: [ManagementPort, ip_address]} + MANAGEMENTHOST: {get_attr: [NetHostMap, value, management, short]} + CTLPLANEIP: {get_attr: [CephStorage, networks, ctlplane, 0]} + CTLPLANEHOST: {get_attr: [NetHostMap, value, ctlplane, short]} + HOSTSSHPUBKEY: {get_attr: [SshHostPubKey, ecdsa]} nova_server_resource: description: Heat resource handle for the ceph storage server value: diff --git a/puppet/compute-role.yaml b/puppet/compute-role.yaml index ebdd762d..351b3823 100644 --- a/puppet/compute-role.yaml +++ b/puppet/compute-role.yaml @@ -145,7 +145,7 @@ parameters: resources: NovaCompute: - type: OS::TripleO::Server + type: OS::TripleO::ComputeServer metadata: os-collect-config: command: {get_param: ConfigCommand} @@ -492,6 +492,12 @@ resources: update_identifier: get_param: UpdateIdentifier + SshHostPubKey: + type: OS::TripleO::Ssh::HostPubKey + depends_on: NovaComputeDeployment + properties: + server: {get_resource: NovaCompute} + outputs: ip_address: description: IP address of the server in the ctlplane network @@ -559,7 +565,38 @@ outputs: MANAGEMENTHOST: {get_attr: [NetHostMap, value, management, short]} CTLPLANEIP: {get_attr: [NovaCompute, networks, ctlplane, 0]} CTLPLANEHOST: {get_attr: [NetHostMap, value, ctlplane, short]} + known_hosts_entry: + description: Entry for ssh known hosts + value: + str_replace: + template: "PRIMARYIP,PRIMARYHOST.DOMAIN,PRIMARYHOST,\ +EXTERNALIP,EXTERNALHOST.DOMAIN,EXTERNALHOST,\ +INTERNAL_APIIP,INTERNAL_APIHOST.DOMAIN,INTERNAL_APIHOST,\ +STORAGEIP,STORAGEHOST.DOMAIN,STORAGEHOST,\ +STORAGE_MGMTIP,STORAGE_MGMTHOST.DOMAIN,STORAGE_MGMTHOST,\ +TENANTIP,TENANTHOST.DOMAIN,TENANTHOST,\ +MANAGEMENTIP,MANAGEMENTHOST.DOMAIN,MANAGEMENTHOST,\ +CTLPLANEIP,CTLPLANEHOST.DOMAIN,CTLPLANEHOST HOSTSSHPUBKEY" + params: + PRIMARYIP: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, ComputeHostnameResolveNetwork]}]} + DOMAIN: {get_param: CloudDomain} + PRIMARYHOST: {get_attr: [NovaCompute, name]} + EXTERNALIP: {get_attr: [ExternalPort, ip_address]} + EXTERNALHOST: {get_attr: [NetHostMap, value, external, short]} + INTERNAL_APIIP: {get_attr: [InternalApiPort, ip_address]} + INTERNAL_APIHOST: {get_attr: [NetHostMap, value, internal_api, short]} + STORAGEIP: {get_attr: [StoragePort, ip_address]} + STORAGEHOST: {get_attr: [NetHostMap, value, storage, short]} + STORAGE_MGMTIP: {get_attr: [StorageMgmtPort, ip_address]} + STORAGE_MGMTHOST: {get_attr: [NetHostMap, value, storage_mgmt, short]} + TENANTIP: {get_attr: [TenantPort, ip_address]} + TENANTHOST: {get_attr: [NetHostMap, value, tenant, short]} + MANAGEMENTIP: {get_attr: [ManagementPort, ip_address]} + MANAGEMENTHOST: {get_attr: [NetHostMap, value, management, short]} + CTLPLANEIP: {get_attr: [NovaCompute, networks, ctlplane, 0]} + CTLPLANEHOST: {get_attr: [NetHostMap, value, ctlplane, short]} + HOSTSSHPUBKEY: {get_attr: [SshHostPubKey, ecdsa]} nova_server_resource: description: Heat resource handle for the Nova compute server value: - {get_resource: NovaCompute} + {get_resource: NovaCompute}
\ No newline at end of file diff --git a/puppet/config.role.j2.yaml b/puppet/config.role.j2.yaml index 7337d062..cdbc76f0 100644 --- a/puppet/config.role.j2.yaml +++ b/puppet/config.role.j2.yaml @@ -38,7 +38,7 @@ resources: - '' - list_join: - ',' - - ['file,concat,file_line', {get_param: PuppetTags}] + - ['file,concat,file_line,augeas', {get_param: PuppetTags}] outputs: - name: result inputs: diff --git a/puppet/controller-role.yaml b/puppet/controller-role.yaml index 2f4f583c..92eb70ad 100644 --- a/puppet/controller-role.yaml +++ b/puppet/controller-role.yaml @@ -165,7 +165,7 @@ parameter_groups: resources: Controller: - type: OS::TripleO::Server + type: OS::TripleO::ControllerServer metadata: os-collect-config: command: {get_param: ConfigCommand} @@ -467,7 +467,6 @@ resources: - all_nodes # provided by allNodesConfig - vip_data # provided by allNodesConfig - '"%{::osfamily}"' - - cinder_netapp_data # Optionally provided by ControllerExtraConfigPre - neutron_bigswitch_data # Optionally provided by ControllerExtraConfigPre - neutron_cisco_data # Optionally provided by ControllerExtraConfigPre - cisco_n1kv_data # Optionally provided by ControllerExtraConfigPre @@ -532,6 +531,12 @@ resources: update_identifier: get_param: UpdateIdentifier + SshHostPubKey: + type: OS::TripleO::Ssh::HostPubKey + depends_on: ControllerDeployment + properties: + server: {get_resource: Controller} + outputs: ip_address: description: IP address of the server in the ctlplane network @@ -599,6 +604,37 @@ outputs: MANAGEMENTHOST: {get_attr: [NetHostMap, value, management, short]} CTLPLANEIP: {get_attr: [Controller, networks, ctlplane, 0]} CTLPLANEHOST: {get_attr: [NetHostMap, value, ctlplane, short]} + known_hosts_entry: + description: Entry for ssh known hosts + value: + str_replace: + template: "PRIMARYIP,PRIMARYHOST.DOMAIN,PRIMARYHOST,\ +EXTERNALIP,EXTERNALHOST.DOMAIN,EXTERNALHOST,\ +INTERNAL_APIIP,INTERNAL_APIHOST.DOMAIN,INTERNAL_APIHOST,\ +STORAGEIP,STORAGEHOST.DOMAIN,STORAGEHOST,\ +STORAGE_MGMTIP,STORAGE_MGMTHOST.DOMAIN,STORAGE_MGMTHOST,\ +TENANTIP,TENANTHOST.DOMAIN,TENANTHOST,\ +MANAGEMENTIP,MANAGEMENTHOST.DOMAIN,MANAGEMENTHOST,\ +CTLPLANEIP,CTLPLANEHOST.DOMAIN,CTLPLANEHOST HOSTSSHPUBKEY" + params: + PRIMARYIP: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, ControllerHostnameResolveNetwork]}]} + DOMAIN: {get_param: CloudDomain} + PRIMARYHOST: {get_attr: [Controller, name]} + EXTERNALIP: {get_attr: [ExternalPort, ip_address]} + EXTERNALHOST: {get_attr: [NetHostMap, value, external, short]} + INTERNAL_APIIP: {get_attr: [InternalApiPort, ip_address]} + INTERNAL_APIHOST: {get_attr: [NetHostMap, value, internal_api, short]} + STORAGEIP: {get_attr: [StoragePort, ip_address]} + STORAGEHOST: {get_attr: [NetHostMap, value, storage, short]} + STORAGE_MGMTIP: {get_attr: [StorageMgmtPort, ip_address]} + STORAGE_MGMTHOST: {get_attr: [NetHostMap, value, storage_mgmt, short]} + TENANTIP: {get_attr: [TenantPort, ip_address]} + TENANTHOST: {get_attr: [NetHostMap, value, tenant, short]} + MANAGEMENTIP: {get_attr: [ManagementPort, ip_address]} + MANAGEMENTHOST: {get_attr: [NetHostMap, value, management, short]} + CTLPLANEIP: {get_attr: [Controller, networks, ctlplane, 0]} + CTLPLANEHOST: {get_attr: [NetHostMap, value, ctlplane, short]} + HOSTSSHPUBKEY: {get_attr: [SshHostPubKey, ecdsa]} nova_server_resource: description: Heat resource handle for the Nova compute server value: diff --git a/puppet/extraconfig/pre_deploy/compute/neutron-ml2-bigswitch.yaml b/puppet/extraconfig/pre_deploy/compute/neutron-ml2-bigswitch.yaml index 533c0ee9..e3f4cce6 100644 --- a/puppet/extraconfig/pre_deploy/compute/neutron-ml2-bigswitch.yaml +++ b/puppet/extraconfig/pre_deploy/compute/neutron-ml2-bigswitch.yaml @@ -27,6 +27,15 @@ resources: mapped_data: neutron::agents::bigswitch::agent_enabled: {get_input: neutron_enable_bigswitch_agent} neutron::agents::bigswitch::lldp_enabled: {get_input: neutron_enable_bigswitch_lldp} + # NOTE(aschultz): required for the puppet module but we don't + # actually want them defined on the compute nodes so we're + # relying on the puppet module's handling of <SERVICE DEFAULT> + # to just not set these but still accept that they were defined. + # This will should be fixed in puppet-neutron and removed here, + # but for backportability, we need to define something. + neutron::plugins::ml2::bigswitch::restproxy::servers: '<SERVICE DEFAULT>' + neutron::plugins::ml2::bigswitch::restproxy::server_auth: '<SERVICE DEFAULT>' + NeutronBigswitchDeployment: type: OS::Heat::StructuredDeployment diff --git a/puppet/extraconfig/pre_deploy/controller/cinder-netapp.yaml b/puppet/extraconfig/pre_deploy/controller/cinder-netapp.yaml deleted file mode 100644 index 378f7f98..00000000 --- a/puppet/extraconfig/pre_deploy/controller/cinder-netapp.yaml +++ /dev/null @@ -1,157 +0,0 @@ -heat_template_version: ocata - -description: Configure hieradata for Cinder Netapp configuration - -parameters: - server: - description: ID of the controller node to apply this config to - type: string - - # Config specific parameters, to be provided via parameter_defaults - CinderEnableNetappBackend: - type: boolean - default: true - CinderNetappBackendName: - type: string - default: 'tripleo_netapp' - CinderNetappLogin: - type: string - CinderNetappPassword: - type: string - hidden: true - CinderNetappServerHostname: - type: string - CinderNetappServerPort: - type: string - default: '80' - CinderNetappSizeMultiplier: - type: string - default: '1.2' - CinderNetappStorageFamily: - type: string - default: 'ontap_cluster' - CinderNetappStorageProtocol: - type: string - default: 'nfs' - CinderNetappTransportType: - type: string - default: 'http' - CinderNetappVfiler: - type: string - default: '' - CinderNetappVolumeList: - type: string - default: '' - CinderNetappVserver: - type: string - default: '' - CinderNetappPartnerBackendName: - type: string - default: '' - CinderNetappNfsShares: - type: string - default: '' - CinderNetappNfsSharesConfig: - type: string - default: '/etc/cinder/shares.conf' - CinderNetappNfsMountOptions: - type: string - default: '' - CinderNetappCopyOffloadToolPath: - type: string - default: '' - CinderNetappControllerIps: - type: string - default: '' - CinderNetappSaPassword: - type: string - default: '' - hidden: true - CinderNetappStoragePools: - type: string - default: '' - CinderNetappHostType: - type: string - default: '' - CinderNetappWebservicePath: - type: string - default: '/devmgr/v2' - # DEPRECATED options for compatibility with older versions - CinderNetappEseriesHostType: - type: string - default: 'linux_dm_mp' - -parameter_groups: -- label: deprecated - description: Do not use deprecated params, they will be removed. - parameters: - - CinderNetappEseriesHostType - -resources: - CinderNetappConfig: - type: OS::Heat::StructuredConfig - properties: - group: hiera - config: - datafiles: - cinder_netapp_data: - mapped_data: - tripleo::profile::base::cinder::volume::cinder_enable_netapp_backend: {get_input: EnableNetappBackend} - cinder::backend::netapp::title: {get_input: NetappBackendName} - cinder::backend::netapp::netapp_login: {get_input: NetappLogin} - cinder::backend::netapp::netapp_password: {get_input: NetappPassword} - cinder::backend::netapp::netapp_server_hostname: {get_input: NetappServerHostname} - cinder::backend::netapp::netapp_server_port: {get_input: NetappServerPort} - cinder::backend::netapp::netapp_size_multiplier: {get_input: NetappSizeMultiplier} - cinder::backend::netapp::netapp_storage_family: {get_input: NetappStorageFamily} - cinder::backend::netapp::netapp_storage_protocol: {get_input: NetappStorageProtocol} - cinder::backend::netapp::netapp_transport_type: {get_input: NetappTransportType} - cinder::backend::netapp::netapp_vfiler: {get_input: NetappVfiler} - cinder::backend::netapp::netapp_volume_list: {get_input: NetappVolumeList} - cinder::backend::netapp::netapp_vserver: {get_input: NetappVserver} - cinder::backend::netapp::netapp_partner_backend_name: {get_input: NetappPartnerBackendName} - cinder::backend::netapp::nfs_shares: {get_input: NetappNfsShares} - cinder::backend::netapp::nfs_shares_config: {get_input: NetappNfsSharesConfig} - cinder::backend::netapp::nfs_mount_options: {get_input: NetappNfsMountOptions} - cinder::backend::netapp::netapp_copyoffload_tool_path: {get_input: NetappCopyOffloadToolPath} - cinder::backend::netapp::netapp_controller_ips: {get_input: NetappControllerIps} - cinder::backend::netapp::netapp_sa_password: {get_input: NetappSaPassword} - cinder::backend::netapp::netapp_storage_pools: {get_input: NetappStoragePools} - cinder::backend::netapp::netapp_host_type: {get_input: NetappHostType} - cinder::backend::netapp::netapp_webservice_path: {get_input: NetappWebservicePath} - - CinderNetappDeployment: - type: OS::Heat::StructuredDeployment - properties: - name: CinderNetappDeployment - config: {get_resource: CinderNetappConfig} - server: {get_param: server} - input_values: - EnableNetappBackend: {get_param: CinderEnableNetappBackend} - NetappBackendName: {get_param: CinderNetappBackendName} - NetappLogin: {get_param: CinderNetappLogin} - NetappPassword: {get_param: CinderNetappPassword} - NetappServerHostname: {get_param: CinderNetappServerHostname} - NetappServerPort: {get_param: CinderNetappServerPort} - NetappSizeMultiplier: {get_param: CinderNetappSizeMultiplier} - NetappStorageFamily: {get_param: CinderNetappStorageFamily} - NetappStorageProtocol: {get_param: CinderNetappStorageProtocol} - NetappTransportType: {get_param: CinderNetappTransportType} - NetappVfiler: {get_param: CinderNetappVfiler} - NetappVolumeList: {get_param: CinderNetappVolumeList} - NetappVserver: {get_param: CinderNetappVserver} - NetappPartnerBackendName: {get_param: CinderNetappPartnerBackendName} - NetappNfsShares: {get_param: CinderNetappNfsShares} - NetappNfsSharesConfig: {get_param: CinderNetappNfsSharesConfig} - NetappNfsMountOptions: {get_param: CinderNetappNfsMountOptions} - NetappCopyOffloadToolPath: {get_param: CinderNetappCopyOffloadToolPath} - NetappControllerIps: {get_param: CinderNetappControllerIps} - NetappSaPassword: {get_param: CinderNetappSaPassword} - NetappStoragePools: {get_param: CinderNetappStoragePools} - NetappHostType: {get_param: CinderNetappHostType} - NetappWebservicePath: {get_param: CinderNetappWebservicePath} - -outputs: - deploy_stdout: - description: Deployment reference, used to trigger puppet apply on changes - value: {get_attr: [CinderNetappDeployment, deploy_stdout]} diff --git a/puppet/extraconfig/pre_deploy/controller/neutron-ml2-bigswitch.yaml b/puppet/extraconfig/pre_deploy/controller/neutron-ml2-bigswitch.yaml index 1456337f..e7d0b830 100644 --- a/puppet/extraconfig/pre_deploy/controller/neutron-ml2-bigswitch.yaml +++ b/puppet/extraconfig/pre_deploy/controller/neutron-ml2-bigswitch.yaml @@ -6,6 +6,14 @@ parameters: server: description: ID of the controller node to apply this config to type: string + NeutronBigswitchAgentEnabled: + description: The state of the neutron-bsn-agent service. + type: boolean + default: true + NeutronBigswitchLLDPEnabled: + description: The state of the neutron-bsn-lldp service. + type: boolean + default: false NeutronBigswitchRestproxyServers: description: 'Big Switch controllers ("IP:port,IP:port")' type: string @@ -43,6 +51,8 @@ resources: datafiles: neutron_bigswitch_data: mapped_data: + neutron::agents::bigswitch::agent_enabled: {get_input: neutron_enable_bigswitch_agent} + neutron::agents::bigswitch::lldp_enabled: {get_input: neutron_enable_bigswitch_lldp} neutron::plugins::ml2::bigswitch::restproxy::servers: {get_input: restproxy_servers} neutron::plugins::ml2::bigswitch::restproxy::server_auth: {get_input: restproxy_server_auth} neutron::plugins::ml2::bigswitch::restproxy::auto_sync_on_failure: {get_input: restproxy_auto_sync_on_failure} @@ -58,6 +68,8 @@ resources: config: {get_resource: NeutronBigswitchConfig} server: {get_param: server} input_values: + neutron_enable_bigswitch_agent: {get_param: NeutronBigswitchAgentEnabled} + neutron_enable_bigswitch_lldp: {get_param: NeutronBigswitchLLDPEnabled} restproxy_servers: {get_param: NeutronBigswitchRestproxyServers} restproxy_server_auth: {get_param: NeutronBigswitchRestproxyServerAuth } restproxy_auto_sync_on_failure: {get_param: NeutronBigswitchRestproxyAutoSyncOnFailure} diff --git a/puppet/extraconfig/pre_deploy/controller/neutron-ml2-cisco-n1kv.yaml b/puppet/extraconfig/pre_deploy/controller/neutron-ml2-cisco-n1kv.yaml index bca6010a..40b407bc 100644 --- a/puppet/extraconfig/pre_deploy/controller/neutron-ml2-cisco-n1kv.yaml +++ b/puppet/extraconfig/pre_deploy/controller/neutron-ml2-cisco-n1kv.yaml @@ -10,7 +10,7 @@ parameters: # Config specific parameters, to be provided via parameter_defaults N1000vVSMIP: type: string - default: '192.0.2.50' + default: '192.168.24.50' N1000vVSMDomainID: type: number default: 100 @@ -62,7 +62,7 @@ parameters: default: '255.255.255.0' N1000vMgmtGatewayIP: type: string - default: '192.0.2.1' + default: '192.168.24.1' N1000vPacemakerControl: type: boolean default: true diff --git a/puppet/major_upgrade_steps.j2.yaml b/puppet/major_upgrade_steps.j2.yaml index 5aba90e8..28092773 100644 --- a/puppet/major_upgrade_steps.j2.yaml +++ b/puppet/major_upgrade_steps.j2.yaml @@ -32,20 +32,6 @@ parameters: type: string hidden: true -conditions: - # Conditions to disable any steps where the task list is empty -{%- for role in roles %} - {{role.name}}UpgradeBatchConfigEnabled: - not: - equals: - - {get_param: [role_data, {{role.name}}, upgrade_batch_tasks]} - - [] - {{role.name}}UpgradeConfigEnabled: - not: - equals: - - {get_param: [role_data, {{role.name}}, upgrade_tasks]} - - [] -{%- endfor %} resources: @@ -65,10 +51,11 @@ resources: - " crudini --set /etc/nova/nova.conf placement project_domain_name Default\n\n" - " crudini --set /etc/nova/nova.conf placement user_domain_name Default\n\n" - " crudini --set /etc/nova/nova.conf placement project_name service\n\n" + - " crudini --set /etc/nova/nova.conf placement os_interface internal\n\n" - str_replace: template: | crudini --set /etc/nova/nova.conf placement password 'SERVICE_PASSWORD' - crudini --set /etc/nova/nova.conf placement region_name 'REGION_NAME' + crudini --set /etc/nova/nova.conf placement os_region_name 'REGION_NAME' crudini --set /etc/nova/nova.conf placement auth_url 'AUTH_URL' params: SERVICE_PASSWORD: { get_param: NovaPassword } @@ -103,12 +90,11 @@ resources: {{role.name}}UpgradeBatchConfig_Step{{step}}: type: OS::TripleO::UpgradeConfig {%- if step > 0 %} - condition: {{role.name}}UpgradeBatchConfigEnabled - {% if role.name in enabled_roles %} + {%- if role in enabled_roles %} depends_on: - {{role.name}}UpgradeBatch_Step{{step -1}} {%- endif %} - {% else %} + {%- else %} {% for role in roles if role.disable_upgrade_deployment|default(false) %} {% if deliver_script.update({'deliver': True}) %} {% endif %} {% endfor %} @@ -128,13 +114,11 @@ resources: {%- for role in enabled_roles %} {{role.name}}UpgradeBatch_Step{{step}}: type: OS::Heat::SoftwareDeploymentGroup - condition: {{role.name}}UpgradeBatchConfigEnabled {%- if step > 0 %} depends_on: - - {{role.name}}UpgradeBatch_Step{{step -1}} - {% else %} - depends_on: - - {{role.name}}UpgradeBatchConfig_Step{{step}} + {%- for role_inside in enabled_roles %} + - {{role_inside.name}}UpgradeBatch_Step{{step -1}} + {%- endfor %} {%- endif %} update_policy: batch_create: @@ -188,11 +172,10 @@ resources: # do, and there should be minimal performance hit (creating the # config is cheap compared to the time to apply the deployment). {%- if step > 0 %} - condition: {{role.name}}UpgradeConfigEnabled - {% if role.name in enabled_roles %} + {%- if role in enabled_roles %} depends_on: - {{role.name}}Upgrade_Step{{step -1}} - {% endif %} + {%- endif %} {%- endif %} properties: UpgradeStepConfig: {get_param: [role_data, {{role.name}}, upgrade_tasks]} @@ -204,9 +187,18 @@ resources: {{role.name}}Upgrade_Step{{step}}: type: OS::Heat::SoftwareDeploymentGroup {%- if step > 0 %} - condition: {{role.name}}UpgradeConfigEnabled + # Make sure we wait that all roles have finished their own + # previous step before going to the next, so we can guarantee + # state for each steps. depends_on: - - {{role.name}}Upgrade_Step{{step -1}} + {%- for role_inside in enabled_roles %} + - {{role_inside.name}}Upgrade_Step{{step -1}} + {%- endfor %} + {%- else %} + depends_on: + {%- for role_inside in enabled_roles %} + - {{role_inside.name}}UpgradeBatch_Step{{batch_upgrade_steps_max -1}} + {%- endfor %} {%- endif %} properties: name: {{role.name}}Upgrade_Step{{step}} diff --git a/puppet/objectstorage-role.yaml b/puppet/objectstorage-role.yaml index 6ee06d78..84b646a2 100644 --- a/puppet/objectstorage-role.yaml +++ b/puppet/objectstorage-role.yaml @@ -127,7 +127,7 @@ parameters: resources: SwiftStorage: - type: OS::Nova::Server + type: OS::Nova::ObjectStorageServer metadata: os-collect-config: command: {get_param: ConfigCommand} @@ -455,6 +455,12 @@ resources: update_identifier: get_param: UpdateIdentifier + SshHostPubKey: + type: OS::TripleO::Ssh::HostPubKey + depends_on: SwiftStorageHieraDeploy + properties: + server: {get_resource: SwiftStorage} + outputs: ip_address: description: IP address of the server in the ctlplane network @@ -502,6 +508,37 @@ outputs: MANAGEMENTHOST: {get_attr: [NetHostMap, value, management, short]} CTLPLANEIP: {get_attr: [SwiftStorage, networks, ctlplane, 0]} CTLPLANEHOST: {get_attr: [NetHostMap, value, ctlplane, short]} + known_hosts_entry: + description: Entry for ssh known hosts + value: + str_replace: + template: "PRIMARYIP,PRIMARYHOST.DOMAIN,PRIMARYHOST,\ +EXTERNALIP,EXTERNALHOST.DOMAIN,EXTERNALHOST,\ +INTERNAL_APIIP,INTERNAL_APIHOST.DOMAIN,INTERNAL_APIHOST,\ +STORAGEIP,STORAGEHOST.DOMAIN,STORAGEHOST,\ +STORAGE_MGMTIP,STORAGE_MGMTHOST.DOMAIN,STORAGE_MGMTHOST,\ +TENANTIP,TENANTHOST.DOMAIN,TENANTHOST,\ +MANAGEMENTIP,MANAGEMENTHOST.DOMAIN,MANAGEMENTHOST,\ +CTLPLANEIP,CTLPLANEHOST.DOMAIN,CTLPLANEHOST HOSTSSHPUBKEY" + params: + PRIMARYIP: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, ObjectStorageHostnameResolveNetwork]}]} + DOMAIN: {get_param: CloudDomain} + PRIMARYHOST: {get_attr: [SwiftStorage, name]} + EXTERNALIP: {get_attr: [ExternalPort, ip_address]} + EXTERNALHOST: {get_attr: [NetHostMap, value, external, short]} + INTERNAL_APIIP: {get_attr: [InternalApiPort, ip_address]} + INTERNAL_APIHOST: {get_attr: [NetHostMap, value, internal_api, short]} + STORAGEIP: {get_attr: [StoragePort, ip_address]} + STORAGEHOST: {get_attr: [NetHostMap, value, storage, short]} + STORAGE_MGMTIP: {get_attr: [StorageMgmtPort, ip_address]} + STORAGE_MGMTHOST: {get_attr: [NetHostMap, value, storage_mgmt, short]} + TENANTIP: {get_attr: [TenantPort, ip_address]} + TENANTHOST: {get_attr: [NetHostMap, value, tenant, short]} + MANAGEMENTIP: {get_attr: [ManagementPort, ip_address]} + MANAGEMENTHOST: {get_attr: [NetHostMap, value, management, short]} + CTLPLANEIP: {get_attr: [SwiftStorage, networks, ctlplane, 0]} + CTLPLANEHOST: {get_attr: [NetHostMap, value, ctlplane, short]} + HOSTSSHPUBKEY: {get_attr: [SshHostPubKey, ecdsa]} nova_server_resource: description: Heat resource handle for the swift storage server value: diff --git a/puppet/puppet-steps.j2 b/puppet/puppet-steps.j2 index 86af6114..782a32c9 100644 --- a/puppet/puppet-steps.j2 +++ b/puppet/puppet-steps.j2 @@ -30,13 +30,6 @@ input_values: update_identifier: {get_param: DeployIdentifier} - {% if role.name in ['Controller', 'ObjectStorage'] %} - {{role.name}}SwiftRingDeploy: - type: OS::TripleO::Tasks::SwiftRingDeploy - properties: - servers: {get_param: [servers, {{role.name}}]} - {% endif %} - # Step through a series of configuration steps {% for step in range(1, 6) %} {{role.name}}Deployment_Step{{step}}: @@ -88,15 +81,4 @@ servers: {get_param: [servers, {{role.name}}]} input_values: update_identifier: {get_param: DeployIdentifier} - - {% if role.name in ['Controller', 'ObjectStorage'] %} - {{role.name}}SwiftRingUpdate: - type: OS::TripleO::Tasks::SwiftRingUpdate - depends_on: - {% for dep in roles %} - - {{dep.name}}Deployment_Step5 - {% endfor %} - properties: - servers: {get_param: [servers, {{role.name}}]} - {% endif %} {% endfor %} diff --git a/puppet/role.role.j2.yaml b/puppet/role.role.j2.yaml index 1f68f41f..960f0d58 100644 --- a/puppet/role.role.j2.yaml +++ b/puppet/role.role.j2.yaml @@ -148,7 +148,7 @@ parameters: resources: {{role}}: - type: OS::TripleO::Server + type: OS::TripleO::{{role.name}}Server metadata: os-collect-config: command: {get_param: ConfigCommand} @@ -483,12 +483,19 @@ resources: type: OS::Heat::SoftwareDeployment depends_on: NetworkDeployment properties: + name: UpdateDeployment config: {get_resource: UpdateConfig} server: {get_resource: {{role}}} input_values: update_identifier: get_param: UpdateIdentifier + SshHostPubKey: + type: OS::TripleO::Ssh::HostPubKey + depends_on: {{role}}Deployment + properties: + server: {get_resource: {{role}}} + outputs: ip_address: description: IP address of the server in the ctlplane network @@ -536,6 +543,37 @@ outputs: MANAGEMENTHOST: {get_attr: [NetHostMap, value, management, short]} CTLPLANEIP: {get_attr: [{{role}}, networks, ctlplane, 0]} CTLPLANEHOST: {get_attr: [NetHostMap, value, ctlplane, short]} + known_hosts_entry: + description: Entry for ssh known hosts + value: + str_replace: + template: "PRIMARYIP,PRIMARYHOST.DOMAIN,PRIMARYHOST,\ +EXTERNALIP,EXTERNALHOST.DOMAIN,EXTERNALHOST,\ +INTERNAL_APIIP,INTERNAL_APIHOST.DOMAIN,INTERNAL_APIHOST,\ +STORAGEIP,STORAGEHOST.DOMAIN,STORAGEHOST,\ +STORAGE_MGMTIP,STORAGE_MGMTHOST.DOMAIN,STORAGE_MGMTHOST,\ +TENANTIP,TENANTHOST.DOMAIN,TENANTHOST,\ +MANAGEMENTIP,MANAGEMENTHOST.DOMAIN,MANAGEMENTHOST,\ +CTLPLANEIP,CTLPLANEHOST.DOMAIN,CTLPLANEHOST HOSTSSHPUBKEY" + params: + PRIMARYIP: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, {{role}}HostnameResolveNetwork]}]} + DOMAIN: {get_param: CloudDomain} + PRIMARYHOST: {get_attr: [{{role}}, name]} + EXTERNALIP: {get_attr: [ExternalPort, ip_address]} + EXTERNALHOST: {get_attr: [NetHostMap, value, external, short]} + INTERNAL_APIIP: {get_attr: [InternalApiPort, ip_address]} + INTERNAL_APIHOST: {get_attr: [NetHostMap, value, internal_api, short]} + STORAGEIP: {get_attr: [StoragePort, ip_address]} + STORAGEHOST: {get_attr: [NetHostMap, value, storage, short]} + STORAGE_MGMTIP: {get_attr: [StorageMgmtPort, ip_address]} + STORAGE_MGMTHOST: {get_attr: [NetHostMap, value, storage_mgmt, short]} + TENANTIP: {get_attr: [TenantPort, ip_address]} + TENANTHOST: {get_attr: [NetHostMap, value, tenant, short]} + MANAGEMENTIP: {get_attr: [ManagementPort, ip_address]} + MANAGEMENTHOST: {get_attr: [NetHostMap, value, management, short]} + CTLPLANEIP: {get_attr: [{{role}}, networks, ctlplane, 0]} + CTLPLANEHOST: {get_attr: [NetHostMap, value, ctlplane, short]} + HOSTSSHPUBKEY: {get_attr: [SshHostPubKey, ecdsa]} nova_server_resource: description: Heat resource handle for {{role}} server value: diff --git a/puppet/services/aodh-api.yaml b/puppet/services/aodh-api.yaml index d7c87b61..7cc6e4c6 100644 --- a/puppet/services/aodh-api.yaml +++ b/puppet/services/aodh-api.yaml @@ -24,6 +24,12 @@ parameters: EnableInternalTLS: type: boolean default: false + AodhApiPolicies: + description: | + A hash of policies to configure for Aodh API. + e.g. { aodh-context_is_admin: { key: context_is_admin, value: 'role:admin' } } + default: {} + type: json resources: AodhBase: @@ -61,6 +67,7 @@ outputs: aodh::wsgi::apache::wsgi_process_display_name: 'aodh_wsgi' aodh::api::service_name: 'httpd' aodh::api::enable_proxy_headers_parsing: true + aodh::policy::policies: {get_param: AodhApiPolicies} tripleo.aodh_api.firewall_rules: '128 aodh-api': dport: diff --git a/puppet/services/apache.yaml b/puppet/services/apache.yaml index 9bd282f8..6e53b1f7 100644 --- a/puppet/services/apache.yaml +++ b/puppet/services/apache.yaml @@ -77,13 +77,15 @@ outputs: - "%{hiera('apache_remote_proxy_ips_network')}" - generate_service_certificates: true + tripleo::certmonger::apache_dirs::certificate_dir: '/etc/pki/tls/certs/httpd' + tripleo::certmonger::apache_dirs::key_dir: '/etc/pki/tls/private/httpd' apache_certificates_specs: map_merge: repeat: template: httpd-NETWORK: - service_certificate: '/etc/pki/tls/certs/httpd-NETWORK.crt' - service_key: '/etc/pki/tls/private/httpd-NETWORK.key' + service_certificate: '/etc/pki/tls/certs/httpd/httpd-NETWORK.crt' + service_key: '/etc/pki/tls/private/httpd/httpd-NETWORK.key' hostname: "%{hiera('fqdn_NETWORK')}" principal: "HTTP/%{hiera('fqdn_NETWORK')}" for_each: diff --git a/puppet/services/barbican-api.yaml b/puppet/services/barbican-api.yaml index d8787c87..91a5b01c 100644 --- a/puppet/services/barbican-api.yaml +++ b/puppet/services/barbican-api.yaml @@ -55,6 +55,12 @@ parameters: EnableInternalTLS: type: boolean default: false + BarbicanPolicies: + description: | + A hash of policies to configure for Barbican. + e.g. { barbican-context_is_admin: { key: context_is_admin, value: 'role:admin' } } + default: {} + type: json resources: @@ -77,6 +83,7 @@ outputs: barbican::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]} barbican::keystone::authtoken::auth_url: { get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]} barbican::keystone::authtoken::project_name: 'service' + barbican::policy::policies: {get_param: BarbicanPolicies} barbican::api::host_href: {get_param: [EndpointMap, BarbicanPublic, uri]} barbican::api::db_auto_create: false barbican::api::enabled_certificate_plugins: ['simple_certificate'] diff --git a/puppet/services/ceilometer-agent-ipmi.yaml b/puppet/services/ceilometer-agent-ipmi.yaml new file mode 100644 index 00000000..26647dfd --- /dev/null +++ b/puppet/services/ceilometer-agent-ipmi.yaml @@ -0,0 +1,77 @@ +heat_template_version: ocata + +description: > + OpenStack Ceilometer Ipmi Agent service configured with Puppet + +parameters: + ServiceNetMap: + default: {} + description: Mapping of service_name -> network name. Typically set + via parameter_defaults in the resource registry. This + mapping overrides those in ServiceNetMapDefaults. + type: json + DefaultPasswords: + default: {} + type: json + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + RedisPassword: + description: The password for the redis service account. + type: string + hidden: true + MonitoringSubscriptionCeilometerIpmi: + default: 'overcloud-ceilometer-agent-ipmi' + type: string + CeilometerAgentIpmiLoggingSource: + type: json + default: + tag: openstack.ceilometer.agent.ipmi + path: /var/log/ceilometer/ipmi.log + +resources: + CeilometerServiceBase: + type: ./ceilometer-base.yaml + properties: + ServiceNetMap: {get_param: ServiceNetMap} + DefaultPasswords: {get_param: DefaultPasswords} + EndpointMap: {get_param: EndpointMap} + +outputs: + role_data: + description: Role data for the Ceilometer Agent Ipmi role. + value: + service_name: ceilometer_agent_ipmi + monitoring_subscription: {get_param: MonitoringSubscriptionCeilometerIpmi} + logging_source: {get_param: CeilometerAgentIpmiLoggingSource} + logging_groups: + - ceilometer + config_settings: + map_merge: + - get_attr: [CeilometerServiceBase, role_data, config_settings] + - ceilometer_redis_password: {get_param: RedisPassword} + ipmi_namespace: true + step_config: | + include ::tripleo::profile::base::ceilometer::agent::polling + upgrade_tasks: + - name: Check if ceilometer-agent-ipmi is deployed + command: systemctl is-enabled openstack-ceilometer-ipmi + tags: common + ignore_errors: True + register: ceilometer_ipmi_enabled + - name: "PreUpgrade step0,validation: Check if openstack-ceilometer-ipmi is running" + shell: > + /usr/bin/systemctl show 'openstack-ceilometer-ipmi' --property ActiveState | + grep '\bactive\b' + when: ceilometer_ipmi_enabled.rc == 0 + tags: step0,validation + - name: Stop openstack-ceilometer-ipmi service + tags: step1 + when: ceilometer_ipmi_enabled.rc == 0 + service: name=openstack-ceilometer-ipmi state=stopped + - name: Install openstack-ceilometer-ipmi package if it was disabled + tags: step3 + yum: name=openstack-ceilometer-ipmi state=latest + when: ceilometer_ipmi_enabled.rc != 0 diff --git a/puppet/services/ceilometer-api.yaml b/puppet/services/ceilometer-api.yaml index f5ee9d40..ba94b451 100644 --- a/puppet/services/ceilometer-api.yaml +++ b/puppet/services/ceilometer-api.yaml @@ -29,6 +29,12 @@ parameters: EnableInternalTLS: type: boolean default: false + CeilometerApiPolicies: + description: | + A hash of policies to configure for Ceilometer API. + e.g. { ceilometer-context_is_admin: { key: context_is_admin, value: 'role:admin' } } + default: {} + type: json resources: CeilometerServiceBase: @@ -78,6 +84,7 @@ outputs: "%{hiera('fqdn_$NETWORK')}" params: $NETWORK: {get_param: [ServiceNetMap, CeilometerApiNetwork]} + ceilometer::policy::policies: {get_param: CeilometerApiPolicies} ceilometer::wsgi::apache::bind_host: {get_param: [ServiceNetMap, CeilometerApiNetwork]} ceilometer::wsgi::apache::ssl: {get_param: EnableInternalTLS} ceilometer::wsgi::apache::servername: diff --git a/puppet/services/ceilometer-base.yaml b/puppet/services/ceilometer-base.yaml index a9c84289..e1613720 100644 --- a/puppet/services/ceilometer-base.yaml +++ b/puppet/services/ceilometer-base.yaml @@ -37,7 +37,7 @@ parameters: constraints: - allowed_values: ['gnocchi', 'database'] CeilometerEventDispatcher: - default: ['gnocchi'] + default: ['panko', 'gnocchi'] description: Comma-separated list of Dispatchers to process events data type: comma_delimited_list constraints: @@ -76,6 +76,11 @@ parameters: default: 5672 description: Set rabbit subscriber port, change this if using SSL type: number + CeilometerApiEndpoint: + default: false + description: Whether to create or skip API endpoint. Set this to + false, if you choose to disable Ceilometer API service. + type: boolean outputs: role_data: @@ -83,6 +88,7 @@ outputs: value: service_name: ceilometer_base config_settings: + ceilometer_auth_enabled: true ceilometer::debug: {get_param: Debug} ceilometer::db::database_connection: list_join: @@ -133,6 +139,7 @@ outputs: ceilometer::keystone::auth::password: {get_param: CeilometerPassword} ceilometer::keystone::auth::region: {get_param: KeystoneRegion} ceilometer::keystone::auth::tenant: 'service' + ceilometer::keystone::auth::configure_endpoint: {get_param: CeilometerApiEndpoint} mysql: ceilometer::db::mysql::password: {get_param: CeilometerPassword} ceilometer::db::mysql::user: ceilometer diff --git a/puppet/services/ceph-rgw.yaml b/puppet/services/ceph-rgw.yaml index 01531971..49856115 100644 --- a/puppet/services/ceph-rgw.yaml +++ b/puppet/services/ceph-rgw.yaml @@ -73,7 +73,7 @@ outputs: ceph::rgw::keystone::auth::internal_url: {get_param: [EndpointMap, CephRgwInternal, uri]} ceph::rgw::keystone::auth::admin_url: {get_param: [EndpointMap, CephRgwAdmin, uri]} ceph::rgw::keystone::auth::region: {get_param: KeystoneRegion} - ceph::rgw::keystone::auth::roles: [ 'admin', 'member', '_member_' ] + ceph::rgw::keystone::auth::roles: [ 'admin', 'Member', '_member_' ] ceph::rgw::keystone::auth::tenant: service ceph::rgw::keystone::auth::user: swift ceph::rgw::keystone::auth::password: {get_param: SwiftPassword} diff --git a/puppet/services/cinder-api.yaml b/puppet/services/cinder-api.yaml index 958b0e7d..c1e6b0b0 100644 --- a/puppet/services/cinder-api.yaml +++ b/puppet/services/cinder-api.yaml @@ -46,6 +46,12 @@ parameters: EnableInternalTLS: type: boolean default: false + CinderApiPolicies: + description: | + A hash of policies to configure for Cinder API. + e.g. { cinder-context_is_admin: { key: context_is_admin, value: 'role:admin' } } + default: {} + type: json conditions: cinder_workers_zero: {equals : [{get_param: CinderWorkers}, 0]} @@ -86,6 +92,7 @@ outputs: cinder::keystone::authtoken::project_name: 'service' cinder::keystone::authtoken::user_domain_name: 'Default' cinder::keystone::authtoken::project_domain_name: 'Default' + cinder::policy::policies: {get_param: CinderApiPolicies} cinder::api::enable_proxy_headers_parsing: true cinder::api::nova_catalog_info: 'compute:nova:internalURL' diff --git a/puppet/services/cinder-backend-netapp.yaml b/puppet/services/cinder-backend-netapp.yaml new file mode 100644 index 00000000..29a0ce1b --- /dev/null +++ b/puppet/services/cinder-backend-netapp.yaml @@ -0,0 +1,129 @@ +heat_template_version: ocata + +description: Openstack Cinder Netapp backend + +parameters: + CinderEnableNetappBackend: + type: boolean + default: true + CinderNetappBackendName: + type: string + default: 'tripleo_netapp' + CinderNetappLogin: + type: string + CinderNetappPassword: + type: string + hidden: true + CinderNetappServerHostname: + type: string + CinderNetappServerPort: + type: string + default: '80' + CinderNetappSizeMultiplier: + type: string + default: '1.2' + CinderNetappStorageFamily: + type: string + default: 'ontap_cluster' + CinderNetappStorageProtocol: + type: string + default: 'nfs' + CinderNetappTransportType: + type: string + default: 'http' + CinderNetappVfiler: + type: string + default: '' + CinderNetappVolumeList: + type: string + default: '' + CinderNetappVserver: + type: string + default: '' + CinderNetappPartnerBackendName: + type: string + default: '' + CinderNetappNfsShares: + type: string + default: '' + CinderNetappNfsSharesConfig: + type: string + default: '/etc/cinder/shares.conf' + CinderNetappNfsMountOptions: + type: string + default: '' + CinderNetappCopyOffloadToolPath: + type: string + default: '' + CinderNetappControllerIps: + type: string + default: '' + CinderNetappSaPassword: + type: string + default: '' + hidden: true + CinderNetappStoragePools: + type: string + default: '' + CinderNetappHostType: + type: string + default: '' + CinderNetappWebservicePath: + type: string + default: '/devmgr/v2' + # DEPRECATED options for compatibility with older versions + CinderNetappEseriesHostType: + type: string + default: 'linux_dm_mp' + ServiceNetMap: + default: {} + description: Mapping of service_name -> network name. Typically set + via parameter_defaults in the resource registry. This + mapping overrides those in ServiceNetMapDefaults. + type: json + DefaultPasswords: + default: {} + type: json + EndpointMap: + default: {} + type: json + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + +parameter_groups: +- label: deprecated + description: Do not use deprecated params, they will be removed. + parameters: + - CinderNetappEseriesHostType + +outputs: + role_data: + description: Role data for the Cinder NetApp backend. + value: + service_name: cinder_backend_netapp + config_settings: + tripleo::profile::base::cinder::volume::cinder_enable_netapp_backend: {get_param: CinderEnableNetappBackend} + cinder::backend::netapp::title: {get_param: CinderNetappBackendName} + cinder::backend::netapp::netapp_login: {get_param: CinderNetappLogin} + cinder::backend::netapp::netapp_password: {get_param: CinderNetappPassword} + cinder::backend::netapp::netapp_server_hostname: {get_param: CinderNetappServerHostname} + cinder::backend::netapp::netapp_server_port: {get_param: CinderNetappServerPort} + cinder::backend::netapp::netapp_size_multiplier: {get_param: CinderNetappSizeMultiplier} + cinder::backend::netapp::netapp_storage_family: {get_param: CinderNetappStorageFamily} + cinder::backend::netapp::netapp_storage_protocol: {get_param: CinderNetappStorageProtocol} + cinder::backend::netapp::netapp_transport_type: {get_param: CinderNetappTransportType} + cinder::backend::netapp::netapp_vfiler: {get_param: CinderNetappVfiler} + cinder::backend::netapp::netapp_volume_list: {get_param: CinderNetappVolumeList} + cinder::backend::netapp::netapp_vserver: {get_param: CinderNetappVserver} + cinder::backend::netapp::netapp_partner_backend_name: {get_param: CinderNetappPartnerBackendName} + cinder::backend::netapp::nfs_shares: {get_param: CinderNetappNfsShares} + cinder::backend::netapp::nfs_shares_config: {get_param: CinderNetappNfsSharesConfig} + cinder::backend::netapp::nfs_mount_options: {get_param: CinderNetappNfsMountOptions} + cinder::backend::netapp::netapp_copyoffload_tool_path: {get_param: CinderNetappCopyOffloadToolPath} + cinder::backend::netapp::netapp_controller_ips: {get_param: CinderNetappControllerIps} + cinder::backend::netapp::netapp_sa_password: {get_param: CinderNetappSaPassword} + cinder::backend::netapp::netapp_storage_pools: {get_param: CinderNetappStoragePools} + cinder::backend::netapp::netapp_host_type: {get_param: CinderNetappHostType} + cinder::backend::netapp::netapp_webservice_path: {get_param: CinderNetappWebservicePath} + step_config: | + include ::tripleo::profile::base::cinder::volume diff --git a/puppet/services/congress.yaml b/puppet/services/congress.yaml index 20f64162..5f6b5657 100644 --- a/puppet/services/congress.yaml +++ b/puppet/services/congress.yaml @@ -47,6 +47,12 @@ parameters: default: 5672 description: Set rabbit subscriber port, change this if using SSL type: number + CongressPolicies: + description: | + A hash of policies to configure for Congress. + e.g. { congress-context_is_admin: { key: context_is_admin, value: 'role:admin' } } + default: {} + type: json outputs: role_data: @@ -73,6 +79,7 @@ outputs: congress::rabbit_port: {get_param: RabbitClientPort} congress::server::bind_host: {get_param: [ServiceNetMap, CongressApiNetwork]} + congress::keystone::authtoken::password: {get_param: CongressPassword} congress::keystone::authtoken::project_name: 'service' congress::keystone::authtoken::user_domain_name: 'Default' congress::keystone::authtoken::project_domain_name: 'Default' @@ -86,6 +93,7 @@ outputs: congress::db::mysql::allowed_hosts: - '%' - {get_param: [EndpointMap, MysqlInternal, host_nobrackets]} + congress::policy::policies: {get_param: CongressPolicies} service_config_settings: keystone: diff --git a/puppet/services/database/mongodb.yaml b/puppet/services/database/mongodb.yaml index 63ec4446..50597216 100644 --- a/puppet/services/database/mongodb.yaml +++ b/puppet/services/database/mongodb.yaml @@ -19,6 +19,10 @@ parameters: description: Mapping of service endpoint -> protocol. Typically set via parameter_defaults in the resource registry. type: json + MongodbMemoryLimit: + default: '20G' + description: Limit the amount of memory mongodb uses with systemd. + type: string MongoDbLoggingSource: type: json description: Fluentd logging configuration for mongodb. @@ -49,6 +53,7 @@ outputs: map_merge: - get_attr: [MongoDbBase, role_data, config_settings] - tripleo::profile::base::database::mongodb::mongodb_replset: {get_attr: [MongoDbBase, aux_parameters, rplset_name]} + tripleo::profile::base::database::mongodb::memory_limit: {get_param: MongodbMemoryLimit} mongodb::server::service_manage: True tripleo.mongodb.firewall_rules: '101 mongodb_config': diff --git a/puppet/services/ec2-api.yaml b/puppet/services/ec2-api.yaml index 10f6d311..d1adefe5 100644 --- a/puppet/services/ec2-api.yaml +++ b/puppet/services/ec2-api.yaml @@ -42,6 +42,12 @@ parameters: default: 'false' description: Set to true to enable package installation via Puppet type: boolean + Ec2ApiPolicies: + description: | + A hash of policies to configure for EC2-API. + e.g. { ec2api-context_is_admin: { key: context_is_admin, value: 'role:admin' } } + default: {} + type: json conditions: @@ -67,6 +73,7 @@ outputs: ec2api::keystone::authtoken::password: {get_param: Ec2ApiPassword} ec2api::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri] } ec2api::keystone::authtoken::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]} + ec2api::policy::policies: {get_param: Ec2ApiPolicies} ec2api::api::enabled: true ec2api::package_manage: {get_param: EnablePackageInstall} ec2api::api::ec2api_listen: @@ -91,6 +98,11 @@ outputs: - {get_param: [EndpointMap, MysqlInternal, host]} - '/ec2_api' - '?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo' + ec2api::api::keystone_ec2_tokens_url: + list_join: + - '' + - - {get_param: [EndpointMap, KeystoneV3Internal, uri]} + - '/ec2tokens' - if: - nova_workers_zero diff --git a/puppet/services/external-swift-proxy.yaml b/puppet/services/external-swift-proxy.yaml new file mode 100644 index 00000000..75f5b6a0 --- /dev/null +++ b/puppet/services/external-swift-proxy.yaml @@ -0,0 +1,70 @@ +heat_template_version: ocata + +description: > + External Swift Proxy endpoint configured with Puppet + +parameters: + ExternalPublicUrl: + description: Public endpoint url for the external swift proxy + type: string + ExternalInternalUrl: + description: Internal endpoint url for the external swift proxy + type: string + ExternalAdminUrl: + description: External endpoint url for the external swift proxy + type: string + ExternalSwiftUserTenant: + description: Tenant where swift user will be set as admin + type: string + default: 'service' + SwiftPassword: + description: The password for the swift service account, used by the swift proxy services. + type: string + hidden: true + KeystoneRegion: + type: string + default: 'regionOne' + description: Keystone region for endpoint + ServiceNetMap: + default: {} + description: Mapping of service_name -> network name. Typically set + via parameter_defaults in the resource registry. This + mapping overrides those in ServiceNetMapDefaults. + type: json + DefaultPasswords: + default: {} + type: json + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + +resources: + +outputs: + role_data: + description: Role data for External Swift proxy. + value: + service_name: external_swift_proxy + config_settings: + + step_config: + + service_config_settings: + keystone: + swift::keystone::auth::public_url: {get_param: ExternalPublicUrl} + swift::keystone::auth::internal_url: {get_param: ExternalInternalUrl} + swift::keystone::auth::admin_url: {get_param: ExternalAdminUrl} + swift::keystone::auth::public_url_s3: '' + swift::keystone::auth::internal_url_s3: '' + swift::keystone::auth::admin_url_s3: '' + swift::keystone::auth::password: {get_param: SwiftPassword} + swift::keystone::auth::region: {get_param: KeystoneRegion} + swift::keystone::auth::tenant: {get_param: ExternalSwiftUserTenant} + swift::keystone::auth::configure_s3_endpoint: false + swift::keystone::auth::operator_roles: + - admin + - swiftoperator + - ResellerAdmin + diff --git a/puppet/services/glance-api.yaml b/puppet/services/glance-api.yaml index b06f9993..f61e6154 100644 --- a/puppet/services/glance-api.yaml +++ b/puppet/services/glance-api.yaml @@ -110,6 +110,12 @@ parameters: type: string default: 'regionOne' description: Keystone region for endpoint + GlanceApiPolicies: + description: | + A hash of policies to configure for Glance API. + e.g. { glance-context_is_admin: { key: context_is_admin, value: 'role:admin' } } + default: {} + type: json conditions: use_tls_proxy: {equals : [{get_param: EnableInternalTLS}, true]} @@ -155,6 +161,7 @@ outputs: glance::api::enable_proxy_headers_parsing: true glance::api::debug: {get_param: Debug} glance::api::workers: {get_param: GlanceWorkers} + glance::policy::policies: {get_param: GlanceApiPolicies} tripleo.glance_api.firewall_rules: '112 glance_api': dport: diff --git a/puppet/services/gnocchi-api.yaml b/puppet/services/gnocchi-api.yaml index f4629917..cd323703 100644 --- a/puppet/services/gnocchi-api.yaml +++ b/puppet/services/gnocchi-api.yaml @@ -44,6 +44,12 @@ parameters: EnableInternalTLS: type: boolean default: false + GnocchiApiPolicies: + description: | + A hash of policies to configure for Gnocchi API. + e.g. { gnocchi-context_is_admin: { key: context_is_admin, value: 'role:admin' } } + default: {} + type: json resources: @@ -83,6 +89,7 @@ outputs: gnocchi::api::enabled: true gnocchi::api::enable_proxy_headers_parsing: true gnocchi::api::service_name: 'httpd' + gnocchi::policy::policies: {get_param: GnocchiApiPolicies} gnocchi::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]} gnocchi::keystone::authtoken::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]} gnocchi::keystone::authtoken::password: {get_param: GnocchiPassword} diff --git a/puppet/services/heat-api.yaml b/puppet/services/heat-api.yaml index e21369e8..f8128bb8 100644 --- a/puppet/services/heat-api.yaml +++ b/puppet/services/heat-api.yaml @@ -41,6 +41,12 @@ parameters: EnableInternalTLS: type: boolean default: false + HeatApiPolicies: + description: | + A hash of policies to configure for Heat API. + e.g. { heat-context_is_admin: { key: context_is_admin, value: 'role:admin' } } + default: {} + type: json conditions: heat_workers_zero: {equals : [{get_param: HeatWorkers}, 0]} @@ -82,6 +88,7 @@ outputs: - 13004 heat::api::bind_host: {get_param: [ServiceNetMap, HeatApiNetwork]} heat::wsgi::apache_api::ssl: {get_param: EnableInternalTLS} + heat::policy::policies: {get_param: HeatApiPolicies} heat::api::service_name: 'httpd' # NOTE: bind IP is found in Heat replacing the network name with the local node IP # for the given network; replacement examples (eg. for internal_api): diff --git a/puppet/services/heat-engine.yaml b/puppet/services/heat-engine.yaml index a166f3a7..98dac4c9 100644 --- a/puppet/services/heat-engine.yaml +++ b/puppet/services/heat-engine.yaml @@ -112,7 +112,11 @@ outputs: - {get_param: [EndpointMap, MysqlInternal, host]} - '/heat' - '?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo' - heat::keystone_ec2_uri: {get_param: [EndpointMap, KeystoneEC2, uri]} + heat::keystone_ec2_uri: + list_join: + - '' + - - {get_param: [EndpointMap, KeystoneV3Internal, uri]} + - '/ec2tokens' heat::keystone::domain::domain_password: {get_param: HeatStackDomainAdminPassword} heat::engine::auth_encryption_key: yaql: diff --git a/puppet/services/horizon.yaml b/puppet/services/horizon.yaml index 7ae518b5..8fb13c16 100644 --- a/puppet/services/horizon.yaml +++ b/puppet/services/horizon.yaml @@ -40,6 +40,10 @@ parameters: type: string hidden: true default: '' + HorizonSecureCookies: + description: Set CSRF_COOKIE_SECURE / SESSION_COOKIE_SECURE in Horizon + type: boolean + default: true MemcachedIPv6: default: false description: Enable IPv6 features in Memcached. @@ -88,6 +92,7 @@ outputs: passwords: - {get_param: HorizonSecret} - {get_param: [DefaultPasswords, horizon_secret]} + horizon::secure_cookies: {get_param: [HorizonSecureCookies]} memcached_ipv6: {get_param: MemcachedIPv6} - if: diff --git a/puppet/services/ironic-api.yaml b/puppet/services/ironic-api.yaml index e24d0de6..1f18cb1b 100644 --- a/puppet/services/ironic-api.yaml +++ b/puppet/services/ironic-api.yaml @@ -29,6 +29,12 @@ parameters: type: string default: 'regionOne' description: Keystone region for endpoint + IronicApiPolicies: + description: | + A hash of policies to configure for Ironic API. + e.g. { ironic-context_is_admin: { key: context_is_admin, value: 'role:admin' } } + default: {} + type: json resources: IronicBase: @@ -64,6 +70,7 @@ outputs: ironic::api::port: {get_param: [EndpointMap, IronicInternal, port]} # This is used to build links in responses ironic::api::public_endpoint: {get_param: [EndpointMap, IronicPublic, uri_no_suffix]} + ironic::policy::policies: {get_param: IronicApiPolicies} tripleo.ironic_api.firewall_rules: '133 ironic api': dport: diff --git a/puppet/services/ironic-conductor.yaml b/puppet/services/ironic-conductor.yaml index 56e1a90b..be910d10 100644 --- a/puppet/services/ironic-conductor.yaml +++ b/puppet/services/ironic-conductor.yaml @@ -32,6 +32,15 @@ parameters: created yet) and should be changed to an actual UUID in a post-deployment stack update. type: string + IronicDefaultNetworkInterface: + default: 'flat' + description: Network interface implementation to use by default. + Set to "flat" (the default) to use one flat provider network. + Set to "neutron" to make Ironic interact with the Neutron + ML2 driver to enable other network types and certain + advances networking features. Requires + IronicProvisioningNetwork to be correctly set. + type: string IronicEnabledDrivers: default: ['pxe_ipmitool', 'pxe_drac', 'pxe_ilo'] description: Enabled Ironic drivers @@ -48,6 +57,15 @@ parameters: description: The password for the Ironic service and db account, used by the Ironic services type: string hidden: true + IronicProvisioningNetwork: + default: 'provisioning' + description: Name or UUID of the *overcloud* network used for provisioning + of bare metal nodes, if IronicDefaultNetworkInterface is + set to "neutron". The default value of "provisioning" can be + left during the initial deployment (when no networks are + created yet) and should be changed to an actual UUID in + a post-deployment stack update. + type: string MonitoringSubscriptionIronicConductor: default: 'overcloud-ironic-conductor' type: string @@ -72,6 +90,7 @@ outputs: - ironic::conductor::api_url: {get_param: [EndpointMap, IronicInternal, uri_no_suffix]} ironic::conductor::cleaning_disk_erase: {get_param: IronicCleaningDiskErase} ironic::conductor::cleaning_network: {get_param: IronicCleaningNetwork} + ironic::conductor::provisioning_network: {get_param: IronicProvisioningNetwork} ironic::conductor::enabled_drivers: {get_param: IronicEnabledDrivers} # We need an endpoint containing a real IP, not a VIP here ironic_conductor_http_host: {get_param: [ServiceNetMap, IronicNetwork]} @@ -93,6 +112,8 @@ outputs: # NOTE(dtantsur): UEFI only works with iPXE currently for us ironic::drivers::pxe::uefi_pxe_config_template: '$pybasedir/drivers/modules/ipxe_config.template' ironic::drivers::pxe::uefi_pxe_bootfile_name: 'ipxe.efi' + ironic::drivers::interfaces::enabled_network_interfaces: ['flat', 'neutron'] + ironic::drivers::interfaces::default_network_interface: {get_param: IronicDefaultNetworkInterface} tripleo.ironic_conductor.firewall_rules: '134 ironic conductor TFTP': dport: 69 diff --git a/puppet/services/kernel.yaml b/puppet/services/kernel.yaml index ee4c771f..2a335b67 100644 --- a/puppet/services/kernel.yaml +++ b/puppet/services/kernel.yaml @@ -22,6 +22,10 @@ parameters: default: 1048576 description: Configures sysctl kernel.pid_max key type: number + KernelDisableIPv6: + default: 0 + description: Configures sysctl net.ipv6.{default/all}.disable_ipv6 keys + type: number outputs: role_data: @@ -31,7 +35,7 @@ outputs: config_settings: kernel_modules: nf_conntrack: {} - ip_conntrack_proto_sctp: {} + nf_conntrack_proto_sctp: {} sysctl_settings: net.ipv4.tcp_keepalive_intvl: value: 1 @@ -39,10 +43,28 @@ outputs: value: 5 net.ipv4.tcp_keepalive_time: value: 5 + net.ipv4.conf.default.send_redirects: + value: 0 + net.ipv4.conf.all.send_redirects: + value: 0 + net.ipv4.conf.default.accept_redirects: + value: 0 + net.ipv4.conf.default.secure_redirects: + value: 0 + net.ipv4.conf.all.secure_redirects: + value: 0 + net.ipv4.conf.default.log_martians: + value: 1 + net.ipv4.conf.all.log_martians: + value: 1 net.nf_conntrack_max: value: 500000 net.netfilter.nf_conntrack_max: value: 500000 + net.ipv6.conf.default.disable_ipv6: + value: {get_param: KernelDisableIPv6} + net.ipv6.conf.all.disable_ipv6: + value: {get_param: KernelDisableIPv6} # prevent neutron bridges from autoconfiguring ipv6 addresses net.ipv6.conf.all.accept_ra: value: 0 @@ -52,11 +74,17 @@ outputs: value: 0 net.ipv6.conf.default.autoconf: value: 0 + net.ipv6.conf.default.accept_redirects: + value: 0 + net.ipv6.conf.all.accept_redirects: + value: 0 net.core.netdev_max_backlog: value: 10000 kernel.pid_max: value: {get_param: KernelPidMax} kernel.dmesg_restrict: value: 1 + fs.suid_dumpable: + value: 0 step_config: | include ::tripleo::profile::base::kernel diff --git a/puppet/services/keystone.yaml b/puppet/services/keystone.yaml index 17616867..8a0e750d 100644 --- a/puppet/services/keystone.yaml +++ b/puppet/services/keystone.yaml @@ -119,27 +119,27 @@ parameters: Cron to purge expired tokens - Ensure default: 'present' KeystoneCronTokenFlushMinute: - type: string + type: comma_delimited_list description: > Cron to purge expired tokens - Minute default: '1' KeystoneCronTokenFlushHour: - type: string + type: comma_delimited_list description: > Cron to purge expired tokens - Hour - default: '0' + default: '*' KeystoneCronTokenFlushMonthday: - type: string + type: comma_delimited_list description: > Cron to purge expired tokens - Month Day default: '*' KeystoneCronTokenFlushMonth: - type: string + type: comma_delimited_list description: > Cron to purge expired tokens - Month default: '*' KeystoneCronTokenFlushWeekday: - type: string + type: comma_delimited_list description: > Cron to purge expired tokens - Week Day default: '*' @@ -158,6 +158,22 @@ parameters: description: > Cron to purge expired tokens - User default: 'keystone' + KeystonePolicies: + description: | + A hash of policies to configure for Keystone. + e.g. { keystone-context_is_admin: { key: context_is_admin, value: 'role:admin' } } + default: {} + type: json + KeystoneLDAPDomainEnable: + description: Trigger to call ldap_backend puppet keystone define. + type: boolean + default: False + KeystoneLDAPBackendConfigs: + description: Hash containing the configurations for the LDAP backends + configured in keystone. + type: json + default: {} + hidden: true resources: @@ -171,6 +187,7 @@ resources: conditions: keystone_fernet_tokens: {equals: [{get_param: KeystoneTokenProvider}, "fernet"]} + keystone_ldap_domain_enabled: {equals: [{get_param: KeystoneLDAPDomainEnable}, True]} outputs: role_data: @@ -197,6 +214,7 @@ outputs: keystone::admin_token: {get_param: AdminToken} keystone::admin_password: {get_param: AdminPassword} keystone::roles::admin::password: {get_param: AdminPassword} + keystone::policy::policies: {get_param: KeystonePolicies} keystone_ssl_certificate: {get_param: KeystoneSSLCertificate} keystone_ssl_certificate_key: {get_param: KeystoneSSLCertificateKey} keystone::token_provider: {get_param: KeystoneTokenProvider} @@ -293,6 +311,15 @@ outputs: keystone::cron::token_flush::maxdelay: {get_param: KeystoneCronTokenFlushMaxDelay} keystone::cron::token_flush::destination: {get_param: KeystoneCronTokenFlushDestination} keystone::cron::token_flush::user: {get_param: KeystoneCronTokenFlushUser} + - + if: + - keystone_ldap_domain_enabled + - + tripleo::profile::base::keystone::ldap_backend_enable: True + keystone::using_domain_config: True + tripleo::profile::base::keystone::ldap_backends_config: + get_param: KeystoneLDAPBackendConfigs + - {} step_config: | include ::tripleo::profile::base::keystone @@ -305,6 +332,13 @@ outputs: keystone::db::mysql::allowed_hosts: - '%' - "%{hiera('mysql_bind_host')}" + horizon: + if: + - keystone_ldap_domain_enabled + - + horizon::keystone_multidomain_support: true + horizon::keystone_default_domain: 'Default' + - {} # Ansible tasks to handle upgrade upgrade_tasks: - name: Stop keystone service (running under httpd) diff --git a/puppet/services/mistral-api.yaml b/puppet/services/mistral-api.yaml index 1c7d6bd3..02c69392 100644 --- a/puppet/services/mistral-api.yaml +++ b/puppet/services/mistral-api.yaml @@ -22,6 +22,12 @@ parameters: default: 1 description: The number of workers for the mistral-api. type: number + MistralApiPolicies: + description: | + A hash of policies to configure for Mistral API. + e.g. { mistral-context_is_admin: { key: context_is_admin, value: 'role:admin' } } + default: {} + type: json resources: MistralBase: @@ -41,6 +47,7 @@ outputs: - get_attr: [MistralBase, role_data, config_settings] - mistral::api::api_workers: {get_param: MistralWorkers} mistral::api::bind_host: {get_param: [ServiceNetMap, MistralApiNetwork]} + mistral::policy::policies: {get_param: MistralApiPolicies} tripleo.mistral_api.firewall_rules: '133 mistral': dport: diff --git a/puppet/services/mistral-base.yaml b/puppet/services/mistral-base.yaml index e1030346..d5c21694 100644 --- a/puppet/services/mistral-base.yaml +++ b/puppet/services/mistral-base.yaml @@ -74,7 +74,11 @@ outputs: mistral::keystone_password: {get_param: MistralPassword} mistral::keystone_tenant: 'service' mistral::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri]} - mistral::keystone_ec2_uri: {get_param: [EndpointMap, KeystoneEC2, uri]} + mistral::keystone_ec2_uri: + list_join: + - '' + - - {get_param: [EndpointMap, KeystoneV3Internal, uri]} + - '/ec2tokens' mistral::identity_uri: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]} service_config_settings: keystone: diff --git a/puppet/services/monitoring/sensu-client.yaml b/puppet/services/monitoring/sensu-client.yaml index aba2b1ed..4b5f36ac 100644 --- a/puppet/services/monitoring/sensu-client.yaml +++ b/puppet/services/monitoring/sensu-client.yaml @@ -81,4 +81,4 @@ outputs: - name: Install sensu package if it was disabled tags: step3 yum: name=sensu state=latest - when: sensu_client.rc != 0 + when: sensu_client_enabled.rc != 0 diff --git a/puppet/services/network/contrail-vrouter.yaml b/puppet/services/network/contrail-vrouter.yaml index db9f0836..0cd1f829 100644 --- a/puppet/services/network/contrail-vrouter.yaml +++ b/puppet/services/network/contrail-vrouter.yaml @@ -27,7 +27,7 @@ parameters: description: vRouter physical interface type: string ContrailVrouterGateway: - default: '192.0.2.1' + default: '192.168.24.1' description: vRouter default gateway type: string ContrailVrouterNetmask: diff --git a/puppet/services/neutron-api.yaml b/puppet/services/neutron-api.yaml index 7a24ffdd..9b9d1c72 100644 --- a/puppet/services/neutron-api.yaml +++ b/puppet/services/neutron-api.yaml @@ -60,6 +60,12 @@ parameters: EnableInternalTLS: type: boolean default: false + NeutronApiPolicies: + description: | + A hash of policies to configure for Neutron API. + e.g. { neutron-context_is_admin: { key: context_is_admin, value: 'role:admin' } } + default: {} + type: json # DEPRECATED: the following options are deprecated and are currently maintained # for backwards compatibility. They will be removed in the Ocata cycle. @@ -127,6 +133,7 @@ outputs: - {get_param: [EndpointMap, MysqlInternal, host]} - '/ovs_neutron' - '?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo' + neutron::policy::policies: {get_param: NeutronApiPolicies} neutron::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix] } neutron::keystone::authtoken::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]} neutron::server::api_workers: {get_param: NeutronWorkers} diff --git a/puppet/services/neutron-base.yaml b/puppet/services/neutron-base.yaml index e364a3aa..b41cb3cc 100644 --- a/puppet/services/neutron-base.yaml +++ b/puppet/services/neutron-base.yaml @@ -22,6 +22,10 @@ parameters: default: 5672 description: Set rabbit subscriber port, change this if using SSL type: number + DatabaseSyncTimeout: + default: 300 + description: DB Sync Timeout default + type: number NeutronDhcpAgentsPerNetwork: type: number default: 0 @@ -100,6 +104,7 @@ outputs: neutron::host: '%{::fqdn}' neutron::db::database_db_max_retries: -1 neutron::db::database_max_retries: -1 + neutron::db::sync::db_sync_timeout: {get_param: DatabaseSyncTimeout} neutron::global_physnet_mtu: {get_param: NeutronGlobalPhysnetMtu} - if: - dhcp_agents_zero diff --git a/puppet/services/neutron-bigswitch-agent.yaml b/puppet/services/neutron-bigswitch-agent.yaml new file mode 100644 index 00000000..8f56e0a9 --- /dev/null +++ b/puppet/services/neutron-bigswitch-agent.yaml @@ -0,0 +1,29 @@ +heat_template_version: ocata + +description: > + Installs bigswitch agent and enables the services + +parameters: + ServiceNetMap: + default: {} + description: Mapping of service_name -> network name. Typically set + via parameter_defaults in the resource registry. This + mapping overrides those in ServiceNetMapDefaults. + type: json + DefaultPasswords: + default: {} + type: json + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + + +outputs: + role_data: + description: Configure the bigswitch agent services + value: + service_name: neutron_bigswitch_agent + step_config: | + include ::tripleo::profile::base::neutron::agents::bigswitch diff --git a/puppet/services/neutron-compute-plugin-nuage.yaml b/puppet/services/neutron-compute-plugin-nuage.yaml index 04431e28..ea717690 100644 --- a/puppet/services/neutron-compute-plugin-nuage.yaml +++ b/puppet/services/neutron-compute-plugin-nuage.yaml @@ -22,6 +22,10 @@ parameters: description: The password for the nova service account, used by nova-api. type: string hidden: true + NuageMetadataPort: + description: TCP Port to listen for metadata server requests + type: string + default: '9697' outputs: role_data: @@ -32,5 +36,11 @@ outputs: tripleo::profile::base::neutron::agents::nuage::nova_os_tenant_name: 'service' tripleo::profile::base::neutron::agents::nuage::nova_os_password: {get_param: NovaPassword} tripleo::profile::base::neutron::agents::nuage::nova_auth_ip: {get_param: [EndpointMap, KeystoneInternal, host]} + tripleo.neutron_compute_plugin_nuage.firewall_rules: + '118 neutron vxlan networks': + proto: 'udp' + dport: 4789 + '100 metadata agent': + dport: {get_param: NuageMetadataPort} step_config: | include ::tripleo::profile::base::neutron::agents::nuage diff --git a/puppet/services/neutron-compute-plugin-ovn.yaml b/puppet/services/neutron-compute-plugin-ovn.yaml index e3a4da99..0dca29ab 100644 --- a/puppet/services/neutron-compute-plugin-ovn.yaml +++ b/puppet/services/neutron-compute-plugin-ovn.yaml @@ -48,6 +48,7 @@ outputs: ovn::controller::ovn_encap_type: {get_param: OVNTunnelEncapType} ovn::controller::ovn_encap_ip: {get_param: [ServiceNetMap, NeutronApiNetwork]} ovn::controller::ovn_bridge_mappings: {get_param: NeutronBridgeMappings} + nova::compute::force_config_drive: true tripleo.neutron_compute_plugin_ovn.firewall_rules: '118 neutron vxlan networks': proto: 'udp' diff --git a/puppet/services/neutron-l2gw-api.yaml b/puppet/services/neutron-l2gw-api.yaml new file mode 100644 index 00000000..b6f0d281 --- /dev/null +++ b/puppet/services/neutron-l2gw-api.yaml @@ -0,0 +1,54 @@ +heat_template_version: ocata + +description: > + L2 Gateway service plugin configured with Puppet + +parameters: + ServiceNetMap: + default: {} + description: Mapping of service_name -> network name. Typically set + via parameter_defaults in the resource registry. This + mapping overrides those in ServiceNetMapDefaults. + type: json + DefaultPasswords: + default: {} + type: json + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + L2gwServiceDefaultInterfaceName: + default: 'FortyGigE1/0/1' + description: default interface name of the L2 gateway + type: string + L2gwServiceDefaultDeviceName: + default: 'Switch1' + description: default device name of the L2 gateway + type: string + L2gwServiceQuotaL2Gateway: + default: 5 + description: quota of the L2 gateway + type: number + L2gwServicePeriodicMonitoringInterval: + default: 5 + description: The periodic interval at which the plugin + type: number + L2gwServiceProvider: + default: ["L2GW:l2gw:networking_l2gw.services.l2gateway.service_drivers.L2gwDriver:default"] + description: Backend to use as a service provider for L2 Gateway + type: comma_delimited_list + +outputs: + role_data: + description: Role data for the L2 Gateway role. + value: + service_name: neutron_l2gw_api + config_settings: + neutron::services::l2gw::default_interface_name: {get_param: L2gwServiceDefaultInterfaceName} + neutron::services::l2gw::default_device_name: {get_param: L2gwServiceDefaultDeviceName} + neutron::services::l2gw::quota_l2_gateway: {get_param: L2gwServiceQuotaL2Gateway} + neutron::services::l2gw::periodic_monitoring_interval: {get_param: L2gwServicePeriodicMonitoringInterval} + neutron::services::l2gw::service_providers: {get_param: L2gwServiceProvider} + step_config: | + include tripleo::profile::base::neutron::l2gw diff --git a/puppet/services/neutron-ovs-agent.yaml b/puppet/services/neutron-ovs-agent.yaml index 01471ba2..ef2485d4 100644 --- a/puppet/services/neutron-ovs-agent.yaml +++ b/puppet/services/neutron-ovs-agent.yaml @@ -82,6 +82,9 @@ resources: DefaultPasswords: {get_param: DefaultPasswords} EndpointMap: {get_param: EndpointMap} + OpenVswitchUpgrade: + type: ./openvswitch-upgrade.yaml + outputs: role_data: description: Role data for the Neutron OVS agent service. @@ -121,16 +124,22 @@ outputs: step_config: | include ::tripleo::profile::base::neutron::ovs upgrade_tasks: - - name: Check if neutron_ovs_agent is deployed - command: systemctl is-enabled neutron-openvswitch-agent - tags: common - ignore_errors: True - register: neutron_ovs_agent_enabled - - name: "PreUpgrade step0,validation: Check service neutron-openvswitch-agent is running" - shell: /usr/bin/systemctl show 'neutron-openvswitch-agent' --property ActiveState | grep '\bactive\b' - when: neutron_ovs_agent_enabled.rc == 0 - tags: step0,validation - - name: Stop neutron_ovs_agent service - tags: step1 - when: neutron_ovs_agent_enabled.rc == 0 - service: name=neutron-openvswitch-agent state=stopped + yaql: + expression: $.data.ovs_upgrade + $.data.neutron_ovs_upgrade + data: + ovs_upgrade: + get_attr: [OpenVswitchUpgrade, role_data, upgrade_tasks] + neutron_ovs_upgrade: + - name: Check if neutron_ovs_agent is deployed + command: systemctl is-enabled neutron-openvswitch-agent + tags: common + ignore_errors: True + register: neutron_ovs_agent_enabled + - name: "PreUpgrade step0,validation: Check service neutron-openvswitch-agent is running" + shell: /usr/bin/systemctl show 'neutron-openvswitch-agent' --property ActiveState | grep '\bactive\b' + when: neutron_ovs_agent_enabled.rc == 0 + tags: step0,validation + - name: Stop neutron_ovs_agent service + tags: step1 + when: neutron_ovs_agent_enabled.rc == 0 + service: name=neutron-openvswitch-agent state=stopped diff --git a/puppet/services/neutron-ovs-dpdk-agent.yaml b/puppet/services/neutron-ovs-dpdk-agent.yaml index 2c7ab57c..80516fe6 100644 --- a/puppet/services/neutron-ovs-dpdk-agent.yaml +++ b/puppet/services/neutron-ovs-dpdk-agent.yaml @@ -62,6 +62,9 @@ resources: DefaultPasswords: {get_param: DefaultPasswords} EndpointMap: {get_param: EndpointMap} + OpenVswitchUpgrade: + type: ./openvswitch-upgrade.yaml + outputs: role_data: description: Role data for the Neutron OVS DPDK Agent service. @@ -82,3 +85,5 @@ outputs: vswitch::dpdk::socket_mem: {get_param: NeutronDpdkSocketMemory} vswitch::dpdk::driver_type: {get_param: NeutronDpdkDriverType} step_config: {get_attr: [NeutronOvsAgent, role_data, step_config]} + upgrade_tasks: + get_attr: [OpenVswitchUpgrade, role_data, upgrade_tasks] diff --git a/puppet/services/neutron-plugin-ml2-odl.yaml b/puppet/services/neutron-plugin-ml2-odl.yaml new file mode 100644 index 00000000..acacadfa --- /dev/null +++ b/puppet/services/neutron-plugin-ml2-odl.yaml @@ -0,0 +1,45 @@ +heat_template_version: ocata + +description: > + OpenStack Neutron ML2/OpenDaylight plugin configured with Puppet + +parameters: + ServiceNetMap: + default: {} + description: Mapping of service_name -> network name. Typically set + via parameter_defaults in the resource registry. This + mapping overrides those in ServiceNetMapDefaults. + type: json + DefaultPasswords: + default: {} + type: json + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + OpenDaylightPortBindingController: + description: OpenDaylight port binding controller + type: string + default: 'network-topology' + +resources: + + NeutronMl2Base: + type: ./neutron-plugin-ml2.yaml + properties: + ServiceNetMap: {get_param: ServiceNetMap} + DefaultPasswords: {get_param: DefaultPasswords} + EndpointMap: {get_param: EndpointMap} + +outputs: + role_data: + description: Role data for the Neutron ML2/ODL plugin. + value: + service_name: neutron_plugin_ml2_odl + config_settings: + map_merge: + - get_attr: [NeutronMl2Base, role_data, config_settings] + - neutron::plugins::ml2::opendaylight::port_binding_controller: {get_param: OpenDaylightPortBindingController} + step_config: | + include ::tripleo::profile::base::neutron::plugins::ml2 diff --git a/puppet/services/neutron-plugin-nuage.yaml b/puppet/services/neutron-plugin-nuage.yaml index e09cd704..6229a3f1 100644 --- a/puppet/services/neutron-plugin-nuage.yaml +++ b/puppet/services/neutron-plugin-nuage.yaml @@ -19,10 +19,6 @@ parameters: via parameter_defaults in the resource registry. type: json # Config specific parameters, to be provided via parameter_defaults - NeutronNuageOSControllerIp: - description: IP address of the OpenStack Controller - type: string - NeutronNuageNetPartitionName: description: Specifies the title that you will see on the VSD type: string @@ -76,8 +72,7 @@ outputs: config_settings: map_merge: - get_attr: [NeutronBase, role_data, config_settings] - - neutron::plugins::nuage::nuage_oscontroller_ip: {get_param: NeutronNuageOSControllerIp} - neutron::plugins::nuage::nuage_net_partition_name: {get_param: NeutronNuageNetPartitionName} + - neutron::plugins::nuage::nuage_net_partition_name: {get_param: NeutronNuageNetPartitionName} neutron::plugins::nuage::nuage_vsd_ip: {get_param: NeutronNuageVSDIp} neutron::plugins::nuage::nuage_vsd_username: {get_param: NeutronNuageVSDUsername} neutron::plugins::nuage::nuage_vsd_password: {get_param: NeutronNuageVSDPassword} diff --git a/puppet/services/nova-api.yaml b/puppet/services/nova-api.yaml index 473c24b4..21910cc4 100644 --- a/puppet/services/nova-api.yaml +++ b/puppet/services/nova-api.yaml @@ -62,6 +62,12 @@ parameters: default: 300 description: Timeout for Nova db sync type: number + NovaApiPolicies: + description: | + A hash of policies to configure for Nova API. + e.g. { nova-context_is_admin: { key: context_is_admin, value: 'role:admin' } } + default: {} + type: json conditions: nova_workers_zero: {equals : [{get_param: NovaWorkers}, 0]} @@ -145,6 +151,7 @@ outputs: nova::api::neutron_metadata_proxy_shared_secret: {get_param: NeutronMetadataProxySharedSecret} nova::api::instance_name_template: {get_param: InstanceNameTemplate} nova_enable_db_purge: {get_param: NovaEnableDBPurge} + nova::policy::policies: {get_param: NovaApiPolicies} - if: - nova_workers_zero @@ -220,14 +227,14 @@ outputs: - name: Run puppet apply to set tranport_url in nova.conf tags: step5 when: is_bootstrap_node - command: puppet apply --detailed-exitcodes /root/nova-api_upgrade_manifest.pp + command: puppet apply --modulepath /etc/puppet/modules:/opt/stack/puppet-modules:/usr/share/openstack-puppet/modules --detailed-exitcodes /root/nova-api_upgrade_manifest.pp register: puppet_apply_nova_api_upgrade failed_when: puppet_apply_nova_api_upgrade.rc not in [0,2] changed_when: puppet_apply_nova_api_upgrade.rc == 2 - name: Setup cell_v2 (map cell0) tags: step5 when: is_bootstrap_node - command: nova-manage cell_v2 map_cell0 + shell: nova-manage cell_v2 map_cell0 --database_connection=$(hiera nova::cell0_database_connection) - name: Setup cell_v2 (create default cell) tags: step5 when: is_bootstrap_node @@ -243,15 +250,15 @@ outputs: command: nova-manage db sync async: {get_param: NovaDbSyncTimeout} poll: 10 - - name: Setup cell_v2 (migrate hosts) - tags: step5 - when: is_bootstrap_node - command: nova-manage cell_v2 map_cell_and_hosts - name: Setup cell_v2 (get cell uuid) tags: step5 when: is_bootstrap_node shell: nova-manage cell_v2 list_cells | sed -e '1,3d' -e '$d' | awk -F ' *| *' '$2 == "default" {print $4}' register: nova_api_cell_uuid + - name: Setup cell_v2 (migrate hosts) + tags: step5 + when: is_bootstrap_node + command: nova-manage cell_v2 discover_hosts --cell_uuid {{nova_api_cell_uuid.stdout}} --verbose - name: Setup cell_v2 (migrate instances) tags: step5 when: is_bootstrap_node diff --git a/puppet/services/nova-base.yaml b/puppet/services/nova-base.yaml index 8100c9b1..9e7f0145 100644 --- a/puppet/services/nova-base.yaml +++ b/puppet/services/nova-base.yaml @@ -52,6 +52,10 @@ parameters: default: 5672 description: Set rabbit subscriber port, change this if using SSL type: number + DatabaseSyncTimeout: + default: 300 + description: DB Sync Timeout default + type: number Debug: type: string default: '' @@ -151,6 +155,16 @@ outputs: - {get_param: [EndpointMap, MysqlInternal, host]} - '/nova' - '?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo' + nova::cell0_database_connection: + list_join: + - '' + - - {get_param: [EndpointMap, MysqlInternal, protocol]} + - '://nova:' + - {get_param: NovaPassword} + - '@' + - {get_param: [EndpointMap, MysqlInternal, host]} + - '/nova_cell0' + - '?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo' nova::api_database_connection: list_join: - '' @@ -188,6 +202,8 @@ outputs: nova::network::neutron::neutron_auth_type: 'v3password' nova::db::database_db_max_retries: -1 nova::db::database_max_retries: -1 + nova::db::sync::db_sync_timeout: {get_param: DatabaseSyncTimeout} + nova::db::sync_api::db_sync_timeout: {get_param: DatabaseSyncTimeout} nova::glance_api_servers: {get_param: [EndpointMap, GlanceInternal, uri]} nova::use_ipv6: {get_param: NovaIPv6} nova::network::neutron::neutron_ovs_bridge: {get_param: NovaOVSBridge} diff --git a/puppet/services/nova-compute.yaml b/puppet/services/nova-compute.yaml index a9737eb6..b1711436 100644 --- a/puppet/services/nova-compute.yaml +++ b/puppet/services/nova-compute.yaml @@ -79,6 +79,13 @@ parameters: type: string description: Nova Compute upgrade level default: auto + MigrationSshKey: + type: json + description: > + SSH key for migration. + Expects a dictionary with keys 'public_key' and 'private_key'. + Values should be identical to SSH public/private key files. + default: {} resources: NovaBase: @@ -111,6 +118,7 @@ outputs: # we manage migration in nova common puppet profile nova::compute::libvirt::migration_support: false tripleo::profile::base::nova::manage_migration: true + tripleo::profile::base::nova::migration_ssh_key: {get_param: MigrationSshKey} tripleo::profile::base::nova::nova_compute_enabled: true nova::compute::rbd::libvirt_images_rbd_pool: {get_param: NovaRbdPoolName} nova::compute::rbd::libvirt_rbd_user: {get_param: CephClientUserName} diff --git a/puppet/services/nova-libvirt.yaml b/puppet/services/nova-libvirt.yaml index faf1ae48..21a5e78a 100644 --- a/puppet/services/nova-libvirt.yaml +++ b/puppet/services/nova-libvirt.yaml @@ -32,6 +32,36 @@ parameters: MonitoringSubscriptionNovaLibvirt: default: 'overcloud-nova-libvirt' type: string + EnableInternalTLS: + type: boolean + default: false + UseTLSTransportForLiveMigration: + type: boolean + default: true + description: If set to true and if EnableInternalTLS is enabled, it will + set the libvirt URI's transport to tls and configure the + relevant keys for libvirt. + LibvirtCACert: + type: string + default: '/etc/ipa/ca.crt' + description: This specifies the CA certificate to use for TLS in libvirt. + This file will be symlinked to the default CA path in libvirt, + which is /etc/pki/CA/cacert.pem. Note that due to limitations + GNU TLS, which is the TLS backend for libvirt, the file must + be less than 65K (so we can't use the system's CA bundle). The + current default reflects TripleO's default CA, which is + FreeIPA. It will only be used if internal TLS is enabled. + +conditions: + + use_tls_for_live_migration: + and: + - equals: + - {get_param: EnableInternalTLS} + - true + - equals: + - {get_param: UseTLSTransportForLiveMigration} + - true resources: NovaBase: @@ -66,10 +96,61 @@ outputs: tripleo.nova_libvirt.firewall_rules: '200 nova_libvirt': dport: - - 16509 - 16514 - '49152-49215' - '5900-5999' + - + if: + - use_tls_for_live_migration + - + generate_service_certificates: true + tripleo::profile::base::nova::libvirt_tls: true + nova::migration::libvirt::live_migration_inbound_addr: + str_replace: + template: + "%{hiera('fqdn_$NETWORK')}" + params: + $NETWORK: {get_param: [ServiceNetMap, NovaLibvirtNetwork]} + tripleo::certmonger::ca::libvirt::origin_ca_pem: + get_param: LibvirtCACert + tripleo::certmonger::libvirt_dirs::certificate_dir: '/etc/pki/libvirt' + tripleo::certmonger::libvirt_dirs::key_dir: '/etc/pki/libvirt/private' + libvirt_certificates_specs: + libvirt-server-cert: + service_certificate: '/etc/pki/libvirt/servercert.pem' + service_key: '/etc/pki/libvirt/private/serverkey.pem' + hostname: + str_replace: + template: "%{hiera('fqdn_NETWORK')}" + params: + NETWORK: {get_param: [ServiceNetMap, NovaLibvirtNetwork]} + principal: + str_replace: + template: "libvirt/%{hiera('fqdn_NETWORK')}" + params: + NETWORK: {get_param: [ServiceNetMap, NovaLibvirtNetwork]} + libvirt-client-cert: + service_certificate: '/etc/pki/libvirt/clientcert.pem' + service_key: '/etc/pki/libvirt/private/clientkey.pem' + hostname: + str_replace: + template: "%{hiera('fqdn_NETWORK')}" + params: + NETWORK: {get_param: [ServiceNetMap, NovaLibvirtNetwork]} + principal: + str_replace: + template: "libvirt/%{hiera('fqdn_NETWORK')}" + params: + NETWORK: {get_param: [ServiceNetMap, NovaLibvirtNetwork]} + - {} step_config: | include tripleo::profile::base::nova::libvirt + metadata_settings: + if: + - use_tls_for_live_migration + - + - service: libvirt + network: {get_param: [ServiceNetMap, NovaLibvirtNetwork]} + type: node + - null diff --git a/puppet/services/octavia-api.yaml b/puppet/services/octavia-api.yaml index 909a3030..2f898a67 100644 --- a/puppet/services/octavia-api.yaml +++ b/puppet/services/octavia-api.yaml @@ -34,6 +34,12 @@ parameters: default: tag: openstack.octavia.api path: /var/log/octavia/api.log + OctaviaApiPolicies: + description: | + A hash of policies to configure for Octavia API. + e.g. { octavia-context_is_admin: { key: context_is_admin, value: 'role:admin' } } + default: {} + type: json resources: @@ -57,6 +63,7 @@ outputs: map_merge: - get_attr: [OctaviaBase, role_data, config_settings] - octavia::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri] } + octavia::policy::policies: {get_param: OctaviaApiPolicies} octavia::db::database_connection: list_join: - '' diff --git a/puppet/services/opendaylight-ovs.yaml b/puppet/services/opendaylight-ovs.yaml index 5cf416f3..ed572b4d 100644 --- a/puppet/services/opendaylight-ovs.yaml +++ b/puppet/services/opendaylight-ovs.yaml @@ -48,6 +48,10 @@ parameters: default: {} type: json +resources: + OpenVswitchUpgrade: + type: ./openvswitch-upgrade.yaml + outputs: role_data: description: Role data for the OpenDaylight service. @@ -70,16 +74,22 @@ outputs: step_config: | include tripleo::profile::base::neutron::plugins::ovs::opendaylight upgrade_tasks: - - name: Check if openvswitch is deployed - command: systemctl is-enabled openvswitch - tags: common - ignore_errors: True - register: openvswitch_enabled - - name: "PreUpgrade step0,validation: Check service openvswitch is running" - shell: /usr/bin/systemctl show 'openvswitch' --property ActiveState | grep '\bactive\b' - when: openvswitch_enabled.rc == 0 - tags: step0,validation - - name: Stop openvswitch service - tags: step1 - when: openvswitch_enabled.rc == 0 - service: name=openvswitch state=stopped + yaql: + expression: $.data.ovs_upgrade + $.data.opendaylight_upgrade + data: + ovs_upgrade: + get_attr: [OpenVswitchUpgrade, role_data, upgrade_tasks] + opendaylight_upgrade: + - name: Check if openvswitch is deployed + command: systemctl is-enabled openvswitch + tags: common + ignore_errors: True + register: openvswitch_enabled + - name: "PreUpgrade step0,validation: Check service openvswitch is running" + shell: /usr/bin/systemctl show 'openvswitch' --property ActiveState | grep '\bactive\b' + when: openvswitch_enabled.rc == 0 + tags: step0,validation + - name: Stop openvswitch service + tags: step1 + when: openvswitch_enabled.rc == 0 + service: name=openvswitch state=stopped diff --git a/puppet/services/openvswitch-upgrade.yaml b/puppet/services/openvswitch-upgrade.yaml new file mode 100644 index 00000000..fea1ba96 --- /dev/null +++ b/puppet/services/openvswitch-upgrade.yaml @@ -0,0 +1,50 @@ +heat_template_version: ocata + +description: > + Openvswitch package special handling for upgrade. + +outputs: + role_data: + description: Upgrade task for special handling of Openvswitch (OVS) upgrade. + value: + service_name: openvswitch_upgrade + upgrade_tasks: + - name: Check openvswitch version. + tags: step2 + register: ovs_version + ignore_errors: true + shell: rpm -qa | awk -F- '/^openvswitch-2/{print $2 "-" $3}' + - name: Check openvswitch packaging. + tags: step2 + shell: rpm -q --scripts openvswitch | awk '/postuninstall/,/*/' | grep -q "systemctl.*try-restart" + register: ovs_packaging_issue + ignore_errors: true + - block: + - name: "Ensure empty directory: emptying." + file: + state: absent + path: /root/OVS_UPGRADE + - name: "Ensure empty directory: creating." + file: + state: directory + path: /root/OVS_UPGRADE + owner: root + group: root + mode: 0750 + - name: Download OVS packages. + command: yumdownloader --destdir /root/OVS_UPGRADE --resolve openvswitch + - name: Get rpm list for manual upgrade of OVS. + shell: ls -1 /root/OVS_UPGRADE/*.rpm + register: ovs_list_of_rpms + - name: Manual upgrade of OVS + shell: | + rpm -U --test {{item}} 2>&1 | grep "already installed" || \ + rpm -U --replacepkgs --notriggerun --nopostun {{item}}; + args: + chdir: /root/OVS_UPGRADE + with_items: + - "{{ovs_list_of_rpms.stdout_lines}}" + tags: step2 + when: "'2.5.0-14' in '{{ovs_version.stdout}}' + or + ovs_packaging_issue|succeeded" diff --git a/puppet/services/ovn-dbs.yaml b/puppet/services/ovn-dbs.yaml index 7f81afde..6b8be77c 100644 --- a/puppet/services/ovn-dbs.yaml +++ b/puppet/services/ovn-dbs.yaml @@ -36,5 +36,11 @@ outputs: ovn::northbound::port: {get_param: OVNNorthboundServerPort} ovn::southbound::port: {get_param: OVNSouthboundServerPort} ovn::northd::dbs_listen_ip: {get_param: [ServiceNetMap, OvnDbsNetwork]} + tripleo.ovn_dbs.firewall_rules: + '121 OVN DB server ports': + proto: 'tcp' + dport: + - {get_param: OVNNorthboundServerPort} + - {get_param: OVNSouthboundServerPort} step_config: | include ::tripleo::profile::base::neutron::ovn_northd diff --git a/puppet/services/pacemaker.yaml b/puppet/services/pacemaker.yaml index 762d0092..f7a0edf8 100644 --- a/puppet/services/pacemaker.yaml +++ b/puppet/services/pacemaker.yaml @@ -87,10 +87,16 @@ parameters: \[(?<pid>[^ ]*)\] (?<host>[^ ]*) (?<message>.*)$/ + + EnableLoadBalancer: + default: true + description: Whether to deploy a LoadBalancer on the Controller + type: boolean + PacemakerResources: type: comma_delimited_list description: List of resources managed by pacemaker - default: ['rabbitmq','haproxy','galera'] + default: ['rabbitmq', 'galera'] outputs: role_data: @@ -135,6 +141,8 @@ outputs: - name: Check pacemaker cluster running before upgrade tags: step0,validation pacemaker_cluster: state=online check_and_fail=true + async: 30 + poll: 4 - name: Stop pacemaker cluster tags: step2 pacemaker_cluster: state=offline @@ -147,3 +155,9 @@ outputs: resource: "{{ item }}" max_wait: 500 with_items: {get_param: PacemakerResources} + - name: Check pacemaker haproxy resource + tags: step4 + pacemaker_is_active: + resource: haproxy + max_wait: 500 + when: {get_param: EnableLoadBalancer} diff --git a/puppet/services/panko-api.yaml b/puppet/services/panko-api.yaml index eed98257..43e7aa18 100644 --- a/puppet/services/panko-api.yaml +++ b/puppet/services/panko-api.yaml @@ -24,6 +24,12 @@ parameters: EnableInternalTLS: type: boolean default: false + PankoApiPolicies: + description: | + A hash of policies to configure for Panko API. + e.g. { panko-context_is_admin: { key: context_is_admin, value: 'role:admin' } } + default: {} + type: json resources: PankoBase: @@ -58,6 +64,7 @@ outputs: "%{hiera('fqdn_$NETWORK')}" params: $NETWORK: {get_param: [ServiceNetMap, PankoApiNetwork]} + panko::policy::policies: {get_param: PankoApiPolicies} panko::api::service_name: 'httpd' panko::api::enable_proxy_headers_parsing: true tripleo.panko_api.firewall_rules: diff --git a/puppet/services/qdr.yaml b/puppet/services/qdr.yaml new file mode 100644 index 00000000..f8746cec --- /dev/null +++ b/puppet/services/qdr.yaml @@ -0,0 +1,60 @@ +heat_template_version: ocata + +description: > + Qpid dispatch router service configured with Puppet + +parameters: + ServiceNetMap: + default: {} + description: Mapping of service_name -> network name. Typically set + via parameter_defaults in the resource registry. This + mapping overrides those in ServiceNetMapDefaults. + type: json + DefaultPasswords: + default: {} + type: json + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + RabbitUserName: + default: guest + description: The username for Qdr + type: string + RabbitPassword: + description: The password for Qdr + type: string + hidden: true + RabbitClientPort: + description: Listening port for Qdr + default: 5672 + type: number + MonitoringSubscriptionQdr: + default: 'overcloud-qdr' + type: string + +outputs: + role_data: + description: Role data for the Qdr role. + value: + service_name: rabbitmq + monitoring_subscription: {get_param: MonitoringSubscriptionQdr} + global_config_settings: + messaging_notify_service_name: 'amqp' + messaging_rpc_service_name: 'amqp' + keystone::messaging::amqp::amqp_pre_settled: 'notify' + config_settings: + tripleo.rabbitmq.firewall_rules: + '109 qdr': + dport: + - {get_param: RabbitClientPort} + qdr::listener_addr: {get_param: [ServiceNetMap, QdrNetwork]} + # cannot pass qdr::listener_port directly because it needs to be a string + # we do the conversion in the puppet layer + tripleo::profile::base::qdr::qdr_listener_port: {get_param: RabbitClientPort} + tripleo::profile::base::qdr::qdr_username: {get_param: RabbitUserName} + tripleo::profile::base::qdr::qdr_password: {get_param: RabbitPassword} + + step_config: | + include ::tripleo::profile::base::qdr diff --git a/puppet/services/sahara-api.yaml b/puppet/services/sahara-api.yaml index 96b3d6e3..d9f2115a 100644 --- a/puppet/services/sahara-api.yaml +++ b/puppet/services/sahara-api.yaml @@ -38,6 +38,12 @@ parameters: default: tag: openstack.sahara.api path: /var/log/sahara/sahara-api.log + SaharaApiPolicies: + description: | + A hash of policies to configure for Sahara API. + e.g. { sahara-context_is_admin: { key: context_is_admin, value: 'role:admin' } } + default: {} + type: json resources: SaharaBase: @@ -60,6 +66,7 @@ outputs: map_merge: - get_attr: [SaharaBase, role_data, config_settings] - sahara::port: {get_param: [EndpointMap, SaharaInternal, port]} + sahara::policy::policies: {get_param: SaharaApiPolicies} sahara::service::api::api_workers: {get_param: SaharaWorkers} # NOTE: bind IP is found in Heat replacing the network name with the local node IP # for the given network; replacement examples (eg. for internal_api): diff --git a/puppet/services/securetty.yaml b/puppet/services/securetty.yaml new file mode 100644 index 00000000..6d32fe82 --- /dev/null +++ b/puppet/services/securetty.yaml @@ -0,0 +1,36 @@ +heat_template_version: ocata + +description: > + Configure securetty values + +parameters: + ServiceNetMap: + default: {} + description: Mapping of service_name -> network name. Typically set + via parameter_defaults in the resource registry. This + mapping overrides those in ServiceNetMapDefaults. + type: json + DefaultPasswords: + default: {} + type: json + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + TtyValues: + default: {} + description: Configures console values in securetty + type: json + constraints: + - length: { min: 1} + +outputs: + role_data: + description: Console data for the securetty + value: + service_name: securetty + config_settings: + tripleo::profile::base::securetty::tty_list: {get_param: TtyValues} + step_config: | + include ::tripleo::profile::base::securetty diff --git a/puppet/services/services.yaml b/puppet/services/services.yaml index a2286d16..9820b431 100644 --- a/puppet/services/services.yaml +++ b/puppet/services/services.yaml @@ -90,14 +90,11 @@ outputs: # fluentd user. yaql: expression: > - set($.data.groups.flatten()).where($) + set(($.data.default + $.data.extra + $.data.role_data.where($ != null).select($.get('logging_groups'))).flatten()).where($) data: - groups: - - [{get_attr: [LoggingConfiguration, LoggingDefaultGroups]}] - - yaql: - expression: list($.data.role_data.where($ != null).select($.get('logging_groups')).where($ != null)) - data: {role_data: {get_attr: [ServiceChain, role_data]}} - - [{get_attr: [LoggingConfiguration, LoggingExtraGroups]}] + default: {get_attr: [LoggingConfiguration, LoggingDefaultGroups]} + extra: {get_attr: [LoggingConfiguration, LoggingExtraGroups]} + role_data: {get_attr: [ServiceChain, role_data]} config_settings: {map_merge: {get_attr: [ServiceChain, role_data, config_settings]}} global_config_settings: map_merge: diff --git a/puppet/services/swift-proxy.yaml b/puppet/services/swift-proxy.yaml index 0c3cc1ec..0ecc942c 100644 --- a/puppet/services/swift-proxy.yaml +++ b/puppet/services/swift-proxy.yaml @@ -63,10 +63,14 @@ parameters: Rabbit client subscriber parameter to specify an SSL connection to the RabbitMQ host. type: string + EnableInternalTLS: + type: boolean + default: false conditions: ceilometer_pipeline_enabled: {equals : [{get_param: SwiftCeilometerPipelineEnabled}, True]} + use_tls_proxy: {equals : [{get_param: EnableInternalTLS}, true]} resources: SwiftBase: @@ -76,6 +80,14 @@ resources: DefaultPasswords: {get_param: DefaultPasswords} EndpointMap: {get_param: EndpointMap} + TLSProxyBase: + type: OS::TripleO::Services::TLSProxyBase + properties: + ServiceNetMap: {get_param: ServiceNetMap} + DefaultPasswords: {get_param: DefaultPasswords} + EndpointMap: {get_param: EndpointMap} + EnableInternalTLS: {get_param: EnableInternalTLS} + outputs: role_data: description: Role data for the Swift proxy service. @@ -85,7 +97,7 @@ outputs: config_settings: map_merge: - get_attr: [SwiftBase, role_data, config_settings] - + - get_attr: [TLSProxyBase, role_data, config_settings] - swift::proxy::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri]} swift::proxy::authtoken::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]} swift::proxy::authtoken::password: {get_param: SwiftPassword} @@ -146,7 +158,22 @@ outputs: # internal_api -> IP # internal_api_uri -> [IP] # internal_api_subnet - > IP/CIDR - swift::proxy::proxy_local_net_ip: {get_param: [ServiceNetMap, SwiftProxyNetwork]} + tripleo::profile::base::swift::proxy::tls_proxy_bind_ip: + get_param: [ServiceNetMap, SwiftProxyNetwork] + tripleo::profile::base::swift::proxy::tls_proxy_fqdn: + str_replace: + template: + "%{hiera('fqdn_$NETWORK')}" + params: + $NETWORK: {get_param: [ServiceNetMap, SwiftProxyNetwork]} + tripleo::profile::base::swift::proxy::tls_proxy_port: + get_param: [EndpointMap, SwiftInternal, port] + swift::proxy::port: {get_param: [EndpointMap, SwiftInternal, port]} + swift::proxy::proxy_local_net_ip: + if: + - use_tls_proxy + - 'localhost' + - {get_param: [ServiceNetMap, SwiftProxyNetwork]} step_config: | include ::tripleo::profile::base::swift::proxy service_config_settings: @@ -169,3 +196,5 @@ outputs: - name: Stop swift_proxy service tags: step1 service: name=openstack-swift-proxy state=stopped + metadata_settings: + get_attr: [TLSProxyBase, role_data, metadata_settings] diff --git a/puppet/services/swift-ringbuilder.yaml b/puppet/services/swift-ringbuilder.yaml index 2e3c818f..f62d5e18 100644 --- a/puppet/services/swift-ringbuilder.yaml +++ b/puppet/services/swift-ringbuilder.yaml @@ -42,6 +42,14 @@ parameters: default: true description: 'Use a local directory for Swift storage services when building rings' type: boolean + SwiftRingGetTempurl: + default: '' + description: A temporary Swift URL to download rings from. + type: string + SwiftRingPutTempurl: + default: '' + description: A temporary Swift URL to upload rings to. + type: string conditions: swift_use_local_dir: @@ -59,6 +67,8 @@ outputs: value: service_name: swift_ringbuilder config_settings: + tripleo::profile::base::swift::ringbuilder::swift_ring_get_tempurl: {get_param: SwiftRingGetTempurl} + tripleo::profile::base::swift::ringbuilder::swift_ring_put_tempurl: {get_param: SwiftRingPutTempurl} tripleo::profile::base::swift::ringbuilder::build_ring: {get_param: SwiftRingBuild} tripleo::profile::base::swift::ringbuilder::replicas: {get_param: SwiftReplicas} tripleo::profile::base::swift::ringbuilder::part_power: {get_param: SwiftPartPower} diff --git a/puppet/services/tacker.yaml b/puppet/services/tacker.yaml index a4c139b5..c14e061b 100644 --- a/puppet/services/tacker.yaml +++ b/puppet/services/tacker.yaml @@ -47,6 +47,12 @@ parameters: default: 5672 description: Set rabbit subscriber port, change this if using SSL type: number + TackerPolicies: + description: | + A hash of policies to configure for Tacker. + e.g. { tacker-context_is_admin: { key: context_is_admin, value: 'role:admin' } } + default: {} + type: json outputs: role_data: @@ -87,10 +93,12 @@ outputs: tacker::db::mysql::allowed_hosts: - '%' - {get_param: [EndpointMap, MysqlInternal, host_nobrackets]} + tacker::policy::policies: {get_param: TackerPolicies} service_config_settings: keystone: tacker::keystone::auth::tenant: 'service' + tacker::keystone::auth::region: {get_param: KeystoneRegion} tacker::keystone::auth::password: {get_param: TackerPassword} tacker::keystone::auth::public_url: {get_param: [EndpointMap, TackerPublic, uri]} tacker::keystone::auth::internal_url: {get_param: [EndpointMap, TackerInternal, uri]} diff --git a/puppet/services/zaqar.yaml b/puppet/services/zaqar.yaml index a320f694..33769d02 100644 --- a/puppet/services/zaqar.yaml +++ b/puppet/services/zaqar.yaml @@ -30,6 +30,12 @@ parameters: type: string default: 'regionOne' description: Keystone region for endpoint + ZaqarPolicies: + description: | + A hash of policies to configure for Zaqar. + e.g. { zaqar-context_is_admin: { key: context_is_admin, value: 'role:admin' } } + default: {} + type: json outputs: @@ -38,6 +44,7 @@ outputs: value: service_name: zaqar config_settings: + zaqar::policy::policies: {get_param: ZaqarPolicies} zaqar::keystone::authtoken::password: {get_param: ZaqarPassword} zaqar::keystone::authtoken::project_name: 'service' zaqar::keystone::authtoken::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]} |