diff options
24 files changed, 310 insertions, 144 deletions
diff --git a/deployed-server/deployed-server-config.yaml b/deployed-server/deployed-server-config.yaml deleted file mode 100644 index 8c59dc72..00000000 --- a/deployed-server/deployed-server-config.yaml +++ /dev/null @@ -1,22 +0,0 @@ -heat_template_version: 2014-10-16 -parameters: - user_data_format: - type: string - default: SOFTWARE_CONFIG - -resources: - # We just need something which returns a unique ID, but we can't - # use RandomString because RefId returns the value, not the physical - # resource ID, SoftwareConfig should work as it returns a UUID - deployed-server-config: - type: OS::Heat::SoftwareConfig - -outputs: - # FIXME(shardy) this is needed because TemplateResource returns an - # ARN not a UUID, which overflows the Deployment server_id column.. - user_data_format: - value: SOFTWARE_CONFIG - OS::stack_id: - value: {get_resource: deployed-server-config} - - diff --git a/deployed-server/deployed-server.yaml b/deployed-server/deployed-server.yaml index 22797c2e..3ba7abac 100644 --- a/deployed-server/deployed-server.yaml +++ b/deployed-server/deployed-server.yaml @@ -21,7 +21,7 @@ parameters: default: '' name: type: string - default: '' + default: 'deployed-server' image_update_policy: type: string default: '' @@ -40,20 +40,18 @@ parameters: default: {} resources: - # We just need something which returns a unique ID, but we can't - # use RandomString because RefId returns the value, not the physical - # resource ID, SoftwareConfig should work as it returns a UUID deployed-server: - type: OS::TripleO::DeployedServerConfig + type: OS::Heat::DeployedServer properties: - user_data_format: SOFTWARE_CONFIG + name: {get_param: name} + software_config_transport: {get_param: software_config_transport} InstanceIdConfig: type: OS::Heat::StructuredConfig properties: - group: os-apply-config + group: apply-config config: - instance-id: {get_attr: [deployed-server, "OS::stack_id"]} + instance-id: {get_resource: deployed-server} InstanceIdDeployment: type: OS::Heat::StructuredDeployment @@ -69,7 +67,7 @@ resources: #!/bin/bash set -eux mkdir -p $heat_outputs_path - host=$(hostnamectl --transient) + host=$(hostname -s) echo -n $host > $heat_outputs_path.hostname cat $heat_outputs_path.hostname outputs: @@ -88,10 +86,8 @@ resources: Hostname: {get_attr: [HostsEntryDeployment, hostname]} outputs: - # FIXME(shardy) this is needed because TemplateResource returns an - # ARN not a UUID, which overflows the Deployment server_id column.. OS::stack_id: - value: {get_attr: [deployed-server, "OS::stack_id"]} + value: {get_resource: deployed-server} networks: value: ctlplane: diff --git a/deployed-server/scripts/get-occ-config.sh b/deployed-server/scripts/get-occ-config.sh index c3ce7183..404244b1 100755 --- a/deployed-server/scripts/get-occ-config.sh +++ b/deployed-server/scripts/get-occ-config.sh @@ -79,24 +79,19 @@ for role in $OVERCLOUD_ROLES; do server_stack=$(openstack stack resource show $stack $server_resource_name -c physical_resource_id -f value) done - deployed_server_stack=$(openstack stack resource show $server_stack deployed-server -c physical_resource_id -f value) + deployed_server_metadata_url=$(openstack stack resource metadata $server_stack deployed-server | jq -r '.["os-collect-config"].request.metadata_url') echo "======================" echo "$role$i os-collect-config.conf configuration:" config=" [DEFAULT] -collectors=heat +collectors=request command=os-refresh-config polling_interval=30 -[heat] -user_id=$admin_user_id -password=$OS_PASSWORD -auth_url=$OS_AUTH_URL -project_id=$admin_project_id -stack_id=$deployed_server_stack -resource_name=deployed-server-config" +[request] +metadata_url=$deployed_server_metadata_url" echo "$config" echo "======================" diff --git a/docker/firstboot/start_docker_agents.sh b/docker/firstboot/start_docker_agents.sh index acb44ce5..e7e276b2 100644..100755 --- a/docker/firstboot/start_docker_agents.sh +++ b/docker/firstboot/start_docker_agents.sh @@ -43,6 +43,7 @@ AGENT_COMMAND_MOUNTS="-v /var/lib/etc-data:/var/lib/etc-data \ -v /var/lib/cloud:/var/lib/cloud \ -v /var/lib/heat-cfntools:/var/lib/heat-cfntools \ -v /etc/sysconfig/docker:/etc/sysconfig/docker \ + -v /etc/sysconfig/network-scripts:/etc/sysconfig/network-scripts \ -v /usr/lib64/libseccomp.so.2:/usr/lib64/libseccomp.so.2" diff --git a/docker/post.j2.yaml b/docker/post.j2.yaml index de17cffe..6cb92c83 100644 --- a/docker/post.j2.yaml +++ b/docker/post.j2.yaml @@ -252,27 +252,6 @@ resources: environment: - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS - NovaComputeContainersDeploymentNetconfig: - type: OS::Heat::SoftwareDeploymentGroup - depends_on: NovaComputeContainersDeploymentOVS - properties: - name: NovaComputeContainersDeploymentNetconfig - config: {get_resource: NovaComputeContainersConfigNetconfig} - servers: {get_param: [servers, {{role.name}}]} - - # We run os-net-config here because we depend on the ovs containers to be up - # and running before we configure the network. This allows explicit timing - # of the network configuration. - NovaComputeContainersConfigNetconfig: - type: OS::Heat::SoftwareConfig - properties: - group: script - outputs: - - name: result - config: | - #!/bin/bash - /usr/local/bin/run-os-net-config - {{role.name}}ContainersConfig_Step1: type: OS::Heat::StructuredConfig depends_on: CopyJsonDeployment @@ -291,7 +270,7 @@ resources: {{role.name}}ContainersDeployment_Step1: type: OS::Heat::StructuredDeploymentGroup - depends_on: [{{role.name}}PreConfig, {{role.name}}ArtifactsDeploy, NovaComputeContainersDeploymentNetconfig] + depends_on: [{{role.name}}PreConfig, {{role.name}}ArtifactsDeploy] properties: name: {{role.name}}ContainersDeployment_Step1 servers: {get_param: [servers, {{role.name}}]} diff --git a/environments/deployed-server-environment.yaml b/environments/deployed-server-environment.yaml index c63d399a..e9004bd6 100644 --- a/environments/deployed-server-environment.yaml +++ b/environments/deployed-server-environment.yaml @@ -1,4 +1,3 @@ resource_registry: OS::TripleO::Server: ../deployed-server/deployed-server.yaml - OS::TripleO::DeployedServerConfig: ../deployed-server/deployed-server-config.yaml OS::TripleO::DeployedServer::ControlPlanePort: ../deployed-server/ctlplane-port.yaml diff --git a/environments/major-upgrade-all-in-one.yaml b/environments/major-upgrade-all-in-one.yaml new file mode 100644 index 00000000..69d72edd --- /dev/null +++ b/environments/major-upgrade-all-in-one.yaml @@ -0,0 +1,8 @@ +# We run the upgrade steps without disabling the OS::TripleO::PostDeploySteps +# this means you can do a major upgrade in one pass, which may be useful +# e.g for all-in-one deployments where we can upgrade the compute services +# at the same time as the controlplane +# Note that it will be necessary to pass a mapping of OS::Heat::None again for +# any subsequent updates, or the upgrade steps will run again. +resource_registry: + OS::TripleO::UpgradeSteps: ../puppet/major_upgrade_steps.yaml diff --git a/network/scripts/run-os-net-config.sh b/network/scripts/run-os-net-config.sh index fc1e6d54..5df67b78 100755 --- a/network/scripts/run-os-net-config.sh +++ b/network/scripts/run-os-net-config.sh @@ -1,7 +1,7 @@ #!/bin/bash -# Note this script expects the following environment variables to be set -# normally these are provided by the calling SoftwareConfig resource, but -# they may also be set manually for testing +# The following environment variables may be set to substitute in a +# custom bridge or interface name. Normally these are provided by the calling +# SoftwareConfig resource, but they may also be set manually for testing. # $bridge_name : The bridge device name to apply # $interface_name : The interface name to apply # @@ -113,8 +113,8 @@ if [ -n '$network_config' ]; then mkdir -p /etc/os-net-config # Note these variables come from the calling heat SoftwareConfig echo '$network_config' > /etc/os-net-config/config.json - sed -i "s/bridge_name/$bridge_name/" /etc/os-net-config/config.json - sed -i "s/interface_name/$interface_name/" /etc/os-net-config/config.json + sed -i "s/bridge_name/${bridge_name:-''}/" /etc/os-net-config/config.json + sed -i "s/interface_name/${interface_name:-''}/" /etc/os-net-config/config.json os-net-config -c /etc/os-net-config/config.json -v --detailed-exit-codes RETVAL=$? diff --git a/network/service_net_map.j2.yaml b/network/service_net_map.j2.yaml index 0cb6571f..5991b3bc 100644 --- a/network/service_net_map.j2.yaml +++ b/network/service_net_map.j2.yaml @@ -59,6 +59,7 @@ parameters: PublicNetwork: external OpendaylightApiNetwork: internal_api MistralApiNetwork: internal_api + ZaqarApiNetwork: internal_api # We special-case the default ResolveNetwork for the CephStorage role # for backwards compatibility, all other roles default to internal_api CephStorageHostnameResolveNetwork: storage diff --git a/overcloud-resource-registry-puppet.j2.yaml b/overcloud-resource-registry-puppet.j2.yaml index 77a48658..ebbeef6e 100644 --- a/overcloud-resource-registry-puppet.j2.yaml +++ b/overcloud-resource-registry-puppet.j2.yaml @@ -90,6 +90,7 @@ resource_registry: OS::TripleO::Network::Ports::StorageVipPort: network/ports/noop.yaml OS::TripleO::Network::Ports::StorageMgmtVipPort: network/ports/noop.yaml OS::TripleO::Network::Ports::RedisVipPort: network/ports/ctlplane_vip.yaml + OS::TripleO::Network::Ports::ControlPlaneVipPort: OS::Neutron::Port # Service to network Mappings OS::TripleO::ServiceNetMap: network/service_net_map.yaml diff --git a/overcloud.j2.yaml b/overcloud.j2.yaml index 39a092b1..f7e6f37f 100644 --- a/overcloud.j2.yaml +++ b/overcloud.j2.yaml @@ -477,7 +477,7 @@ resources: type: OS::TripleO::Network ControlVirtualIP: - type: OS::Neutron::Port + type: OS::TripleO::Network::Ports::ControlPlaneVipPort depends_on: Networks properties: name: control_virtual_ip @@ -587,9 +587,9 @@ resources: servers: {get_attr: [{{role.name}}, attributes, nova_server_resource]} {% endfor %} - # Post deployment steps for all roles - AllNodesDeploySteps: - type: OS::TripleO::PostDeploySteps + # Upgrade steps for all roles + AllNodesUpgradeSteps: + type: OS::TripleO::UpgradeSteps depends_on: {% for role in roles %} - {{role.name}}AllNodesDeployment @@ -604,10 +604,10 @@ resources: {{role.name}}: {get_attr: [{{role.name}}ServiceChain, role_data]} {% endfor %} - # Upgrade steps for all roles - AllNodesUpgradeSteps: - type: OS::TripleO::UpgradeSteps - depends_on: AllNodesDeploySteps + # Post deployment steps for all roles + AllNodesDeploySteps: + type: OS::TripleO::PostDeploySteps + depends_on: AllNodesUpgradeSteps properties: servers: {% for role in roles %} @@ -618,7 +618,6 @@ resources: {{role.name}}: {get_attr: [{{role.name}}ServiceChain, role_data]} {% endfor %} - outputs: ManagedEndpoints: description: Asserts that the keystone endpoints have been provisioned. diff --git a/puppet/blockstorage-role.yaml b/puppet/blockstorage-role.yaml index 36587a41..7d1f8d8f 100644 --- a/puppet/blockstorage-role.yaml +++ b/puppet/blockstorage-role.yaml @@ -71,11 +71,20 @@ parameters: description: > The DNS domain used for the hosts. This should match the dhcp_domain configured in the Undercloud neutron. Defaults to localdomain. + BlockStorageServerMetadata: + default: {} + description: > + Extra properties or metadata passed to Nova for the created nodes in + the overcloud. It's accessible via the Nova metadata API. This option is + role-specific and is merged with the values given to the ServerMetadata + parameter. + type: json ServerMetadata: default: {} description: > Extra properties or metadata passed to Nova for the created nodes in - the overcloud. It's accessible via the Nova metadata API. + the overcloud. It's accessible via the Nova metadata API. This applies to + all roles and is merged with a role-specific metadata parameter. type: json BlockStorageSchedulerHints: type: json @@ -97,6 +106,12 @@ parameters: type: string description: Command which will be run whenever configuration data changes default: os-refresh-config --timeout 14400 + UpgradeInitCommand: + type: string + description: | + Command or script snippet to run on all overcloud nodes to + initialize the upgrade process. E.g. a repository switch. + default: '' resources: BlockStorage: @@ -118,7 +133,10 @@ resources: template: {get_param: Hostname} params: {get_param: HostnameMap} software_config_transport: {get_param: SoftwareConfigTransport} - metadata: {get_param: ServerMetadata} + metadata: + map_merge: + - {get_param: ServerMetadata} + - {get_param: BlockStorageServerMetadata} scheduler_hints: {get_param: BlockStorageSchedulerHints} # Combine the NodeAdminUserData and NodeUserData mime archives @@ -315,9 +333,30 @@ resources: server: {get_resource: BlockStorage} actions: {get_param: NetworkDeploymentActions} + BlockStorageUpgradeInitConfig: + type: OS::Heat::SoftwareConfig + properties: + group: script + config: + list_join: + - '' + - - "#!/bin/bash\n\n" + - "if [[ -f /etc/resolv.conf.save ]] ; then rm /etc/resolv.conf.save; fi\n\n" + - get_param: UpgradeInitCommand + + # Note we may be able to make this conditional on UpgradeInitCommandNotEmpty + # but https://bugs.launchpad.net/heat/+bug/1649900 needs fixing first + BlockStorageUpgradeInitDeployment: + type: OS::Heat::SoftwareDeployment + depends_on: NetworkDeployment + properties: + name: BlockStorageUpgradeInitDeployment + server: {get_resource: BlockStorage} + config: {get_resource: BlockStorageUpgradeInitConfig} + BlockStorageDeployment: type: OS::Heat::StructuredDeployment - depends_on: NetworkDeployment + depends_on: BlockStorageUpgradeInitDeployment properties: name: BlockStorageDeployment server: {get_resource: BlockStorage} diff --git a/puppet/cephstorage-role.yaml b/puppet/cephstorage-role.yaml index 558f97d8..2c46bf1a 100644 --- a/puppet/cephstorage-role.yaml +++ b/puppet/cephstorage-role.yaml @@ -77,11 +77,20 @@ parameters: description: > The DNS domain used for the hosts. This should match the dhcp_domain configured in the Undercloud neutron. Defaults to localdomain. + CephStorageServerMetadata: + default: {} + description: > + Extra properties or metadata passed to Nova for the created nodes in + the overcloud. It's accessible via the Nova metadata API. This option is + role-specific and is merged with the values given to the ServerMetadata + parameter. + type: json ServerMetadata: default: {} description: > Extra properties or metadata passed to Nova for the created nodes in - the overcloud. It's accessible via the Nova metadata API. + the overcloud. It's accessible via the Nova metadata API. This applies to + all roles and is merged with a role-specific metadata parameter. type: json CephStorageSchedulerHints: type: json @@ -103,6 +112,12 @@ parameters: type: string description: Command which will be run whenever configuration data changes default: os-refresh-config --timeout 14400 + UpgradeInitCommand: + type: string + description: | + Command or script snippet to run on all overcloud nodes to + initialize the upgrade process. E.g. a repository switch. + default: '' resources: CephStorage: @@ -124,7 +139,10 @@ resources: template: {get_param: Hostname} params: {get_param: HostnameMap} software_config_transport: {get_param: SoftwareConfigTransport} - metadata: {get_param: ServerMetadata} + metadata: + map_merge: + - {get_param: ServerMetadata} + - {get_param: CephStorageServerMetadata} scheduler_hints: {get_param: CephStorageSchedulerHints} # Combine the NodeAdminUserData and NodeUserData mime archives @@ -321,9 +339,30 @@ resources: server: {get_resource: CephStorage} actions: {get_param: NetworkDeploymentActions} + CephStorageUpgradeInitConfig: + type: OS::Heat::SoftwareConfig + properties: + group: script + config: + list_join: + - '' + - - "#!/bin/bash\n\n" + - "if [[ -f /etc/resolv.conf.save ]] ; then rm /etc/resolv.conf.save; fi\n\n" + - get_param: UpgradeInitCommand + + # Note we may be able to make this conditional on UpgradeInitCommandNotEmpty + # but https://bugs.launchpad.net/heat/+bug/1649900 needs fixing first + CephStorageUpgradeInitDeployment: + type: OS::Heat::SoftwareDeployment + depends_on: NetworkDeployment + properties: + name: CephStorageUpgradeInitDeployment + server: {get_resource: CephStorage} + config: {get_resource: CephStorageUpgradeInitConfig} + CephStorageDeployment: type: OS::Heat::StructuredDeployment - depends_on: NetworkDeployment + depends_on: CephStorageUpgradeInitDeployment properties: name: CephStorageDeployment config: {get_resource: CephStorageConfig} diff --git a/puppet/compute-role.yaml b/puppet/compute-role.yaml index 818f18c8..0a2598c1 100644 --- a/puppet/compute-role.yaml +++ b/puppet/compute-role.yaml @@ -92,11 +92,20 @@ parameters: description: > The DNS domain used for the hosts. This should match the dhcp_domain configured in the Undercloud neutron. Defaults to localdomain. + NovaComputeServerMetadata: + default: {} + description: > + Extra properties or metadata passed to Nova for the created nodes in + the overcloud. It's accessible via the Nova metadata API. This option is + role-specific and is merged with the values given to the ServerMetadata + parameter. + type: json ServerMetadata: default: {} description: > Extra properties or metadata passed to Nova for the created nodes in - the overcloud. It's accessible via the Nova metadata API. + the overcloud. It's accessible via the Nova metadata API. This applies to + all roles and is merged with a role-specific metadata parameter. type: json NovaComputeSchedulerHints: type: json @@ -115,6 +124,12 @@ parameters: type: string description: Command which will be run whenever configuration data changes default: os-refresh-config --timeout 14400 + UpgradeInitCommand: + type: string + description: | + Command or script snippet to run on all overcloud nodes to + initialize the upgrade process. E.g. a repository switch. + default: '' resources: @@ -138,7 +153,10 @@ resources: template: {get_param: Hostname} params: {get_param: HostnameMap} software_config_transport: {get_param: SoftwareConfigTransport} - metadata: {get_param: ServerMetadata} + metadata: + map_merge: + - {get_param: ServerMetadata} + - {get_param: NovaComputeServerMetadata} scheduler_hints: {get_param: NovaComputeSchedulerHints} # Combine the NodeAdminUserData and NodeUserData mime archives @@ -338,6 +356,27 @@ resources: bridge_name: {get_param: NeutronPhysicalBridge} interface_name: {get_param: NeutronPublicInterface} + NovaComputeUpgradeInitConfig: + type: OS::Heat::SoftwareConfig + properties: + group: script + config: + list_join: + - '' + - - "#!/bin/bash\n\n" + - "if [[ -f /etc/resolv.conf.save ]] ; then rm /etc/resolv.conf.save; fi\n\n" + - get_param: UpgradeInitCommand + + # Note we may be able to make this conditional on UpgradeInitCommandNotEmpty + # but https://bugs.launchpad.net/heat/+bug/1649900 needs fixing first + NovaComputeUpgradeInitDeployment: + type: OS::Heat::SoftwareDeployment + depends_on: NetworkDeployment + properties: + name: NovaComputeUpgradeInitDeployment + server: {get_resource: NovaCompute} + config: {get_resource: NovaComputeUpgradeInitConfig} + NovaComputeConfig: type: OS::Heat::StructuredConfig properties: @@ -383,7 +422,7 @@ resources: NovaComputeDeployment: type: OS::TripleO::SoftwareDeployment - depends_on: NetworkDeployment + depends_on: NovaComputeUpgradeInitDeployment properties: name: NovaComputeDeployment config: {get_resource: NovaComputeConfig} diff --git a/puppet/controller-role.yaml b/puppet/controller-role.yaml index 2781daa0..5e03adcd 100644 --- a/puppet/controller-role.yaml +++ b/puppet/controller-role.yaml @@ -106,11 +106,20 @@ parameters: description: > The DNS domain used for the hosts. This should match the dhcp_domain configured in the Undercloud neutron. Defaults to localdomain. + ControllerServerMetadata: + default: {} + description: > + Extra properties or metadata passed to Nova for the created nodes in + the overcloud. It's accessible via the Nova metadata API. This option is + role-specific and is merged with the values given to the ServerMetadata + parameter. + type: json ServerMetadata: default: {} description: > Extra properties or metadata passed to Nova for the created nodes in - the overcloud. It's accessible via the Nova metadata API. + the overcloud. It's accessible via the Nova metadata API. This applies to + all roles and is merged with a role-specific metadata parameter. type: json ControllerSchedulerHints: type: json @@ -129,6 +138,12 @@ parameters: type: string description: Command which will be run whenever configuration data changes default: os-refresh-config --timeout 14400 + UpgradeInitCommand: + type: string + description: | + Command or script snippet to run on all overcloud nodes to + initialize the upgrade process. E.g. a repository switch. + default: '' parameter_groups: - label: deprecated @@ -157,7 +172,10 @@ resources: template: {get_param: Hostname} params: {get_param: HostnameMap} software_config_transport: {get_param: SoftwareConfigTransport} - metadata: {get_param: ServerMetadata} + metadata: + map_merge: + - {get_param: ServerMetadata} + - {get_param: ControllerServerMetadata} scheduler_hints: {get_param: ControllerSchedulerHints} # Combine the NodeAdminUserData and NodeUserData mime archives @@ -372,10 +390,30 @@ resources: server: {get_resource: Controller} NodeIndex: {get_param: NodeIndex} + ControllerUpgradeInitConfig: + type: OS::Heat::SoftwareConfig + properties: + group: script + config: + list_join: + - '' + - - "#!/bin/bash\n\n" + - "if [[ -f /etc/resolv.conf.save ]] ; then rm /etc/resolv.conf.save; fi\n\n" + - get_param: UpgradeInitCommand + + # Note we may be able to make this conditional on UpgradeInitCommandNotEmpty + # but https://bugs.launchpad.net/heat/+bug/1649900 needs fixing first + ControllerUpgradeInitDeployment: + type: OS::Heat::SoftwareDeployment + depends_on: NetworkDeployment + properties: + name: ControllerUpgradeInitDeployment + server: {get_resource: Controller} + config: {get_resource: ControllerUpgradeInitConfig} ControllerDeployment: type: OS::TripleO::SoftwareDeployment - depends_on: NetworkDeployment + depends_on: ControllerUpgradeInitDeployment properties: name: ControllerDeployment config: {get_resource: ControllerConfig} diff --git a/puppet/major_upgrade_steps.j2.yaml b/puppet/major_upgrade_steps.j2.yaml index f8dad433..8d954c09 100644 --- a/puppet/major_upgrade_steps.j2.yaml +++ b/puppet/major_upgrade_steps.j2.yaml @@ -15,36 +15,8 @@ parameters: Setting to a previously unused value during stack-update will trigger the Upgrade resources to re-run on all roles. - UpgradeInitCommand: - type: string - description: | - Command or script snippet to run on all overcloud nodes to - initialize the upgrade process. E.g. a repository switch. - default: '' - resources: - # For the UpgradeInit also rename /etc/resolv.conf.save for +bug/1567004 - UpgradeInitConfig: - type: OS::Heat::SoftwareConfig - properties: - group: script - config: - list_join: - - '' - - - "#!/bin/bash\n\n" - - "if [[ -f /etc/resolv.conf.save ]] ; then rm /etc/resolv.conf.save; fi\n\n" - - get_param: UpgradeInitCommand - -{% for role in roles %} - {{role.name}}Upgrade_Init: - type: OS::Heat::StructuredDeploymentGroup - properties: - name: {{role.name}}Upgrade_Init - servers: {get_param: [servers, {{role.name}}]} - config: {get_resource: UpgradeInitConfig} -{% endfor %} - # Upgrade Steps for all roles # FIXME(shardy): would be nice to make the number of steps configurable {% for step in range(1, 8) %} @@ -56,10 +28,8 @@ resources: # serialization, but the event output is easier to follow if we # do, and there should be minimal performance hit (creating the # config is cheap compared to the time to apply the deployment). + {% if step > 1 %} depends_on: - {% if step == 1 %} - - {{role.name}}Upgrade_Init - {% else %} {% for dep in roles %} - {{dep.name}}Upgrade_Step{{step -1}} {% endfor %} diff --git a/puppet/objectstorage-role.yaml b/puppet/objectstorage-role.yaml index 2c76492a..088a2e3d 100644 --- a/puppet/objectstorage-role.yaml +++ b/puppet/objectstorage-role.yaml @@ -71,11 +71,20 @@ parameters: description: > The DNS domain used for the hosts. This should match the dhcp_domain configured in the Undercloud neutron. Defaults to localdomain. + SwiftStorageServerMetadata: + default: {} + description: > + Extra properties or metadata passed to Nova for the created nodes in + the overcloud. It's accessible via the Nova metadata API. This option is + role-specific and is merged with the values given to the ServerMetadata + parameter. + type: json ServerMetadata: default: {} description: > Extra properties or metadata passed to Nova for the created nodes in - the overcloud. It's accessible via the Nova metadata API. + the overcloud. It's accessible via the Nova metadata API. This applies to + all roles and is merged with a role-specific metadata parameter. type: json ObjectStorageSchedulerHints: type: json @@ -97,6 +106,12 @@ parameters: type: string description: Command which will be run whenever configuration data changes default: os-refresh-config --timeout 14400 + UpgradeInitCommand: + type: string + description: | + Command or script snippet to run on all overcloud nodes to + initialize the upgrade process. E.g. a repository switch. + default: '' resources: @@ -118,7 +133,10 @@ resources: template: {get_param: Hostname} params: {get_param: HostnameMap} software_config_transport: {get_param: SoftwareConfigTransport} - metadata: {get_param: ServerMetadata} + metadata: + map_merge: + - {get_param: ServerMetadata} + - {get_param: SwiftStorageServerMetadata} scheduler_hints: {get_param: ObjectStorageSchedulerHints} # Combine the NodeAdminUserData and NodeUserData mime archives @@ -315,6 +333,27 @@ resources: server: {get_resource: SwiftStorage} actions: {get_param: NetworkDeploymentActions} + SwiftStorageUpgradeInitConfig: + type: OS::Heat::SoftwareConfig + properties: + group: script + config: + list_join: + - '' + - - "#!/bin/bash\n\n" + - "if [[ -f /etc/resolv.conf.save ]] ; then rm /etc/resolv.conf.save; fi\n\n" + - get_param: UpgradeInitCommand + + # Note we may be able to make this conditional on UpgradeInitCommandNotEmpty + # but https://bugs.launchpad.net/heat/+bug/1649900 needs fixing first + SwiftStorageUpgradeInitDeployment: + type: OS::Heat::SoftwareDeployment + depends_on: NetworkDeployment + properties: + name: SwiftStorageUpgradeInitDeployment + server: {get_resource: SwiftStorage} + config: {get_resource: SwiftStorageUpgradeInitConfig} + SwiftStorageHieraConfig: type: OS::Heat::StructuredConfig properties: @@ -354,7 +393,7 @@ resources: SwiftStorageHieraDeploy: type: OS::Heat::StructuredDeployment - depends_on: NetworkDeployment + depends_on: SwiftStorageUpgradeInitDeployment properties: name: SwiftStorageHieraDeploy server: {get_resource: SwiftStorage} diff --git a/puppet/role.role.j2.yaml b/puppet/role.role.j2.yaml index 9726d978..1f432773 100644 --- a/puppet/role.role.j2.yaml +++ b/puppet/role.role.j2.yaml @@ -83,11 +83,20 @@ parameters: description: > The DNS domain used for the hosts. This should match the dhcp_domain configured in the Undercloud neutron. Defaults to localdomain. + {{role}}ServerMetadata: + default: {} + description: > + Extra properties or metadata passed to Nova for the created nodes in + the overcloud. It's accessible via the Nova metadata API. This option is + role-specific and is merged with the values given to the ServerMetadata + parameter. + type: json ServerMetadata: default: {} description: > Extra properties or metadata passed to Nova for the created nodes in - the overcloud. It's accessible via the Nova metadata API. + the overcloud. It's accessible via the Nova metadata API. This applies to + all roles and is merged with a role-specific metadata parameter. type: json {{role}}SchedulerHints: type: json @@ -115,6 +124,13 @@ parameters: LoggingGroups: type: comma_delimited_list default: [] + UpgradeInitCommand: + type: string + description: | + Command or script snippet to run on all overcloud nodes to + initialize the upgrade process. E.g. a repository switch. + default: '' + resources: {{role}}: @@ -136,7 +152,10 @@ resources: template: {get_param: Hostname} params: {get_param: HostnameMap} software_config_transport: {get_param: SoftwareConfigTransport} - metadata: {get_param: ServerMetadata} + metadata: + map_merge: + - {get_param: ServerMetadata} + - {get_param: {{role}}ServerMetadata} scheduler_hints: {get_param: {{role}}SchedulerHints} # Combine the NodeAdminUserData and NodeUserData mime archives @@ -333,9 +352,30 @@ resources: server: {get_resource: {{role}}} actions: {get_param: NetworkDeploymentActions} + {{role}}UpgradeInitConfig: + type: OS::Heat::SoftwareConfig + properties: + group: script + config: + list_join: + - '' + - - "#!/bin/bash\n\n" + - "if [[ -f /etc/resolv.conf.save ]] ; then rm /etc/resolv.conf.save; fi\n\n" + - get_param: UpgradeInitCommand + + # Note we may be able to make this conditional on UpgradeInitCommandNotEmpty + # but https://bugs.launchpad.net/heat/+bug/1649900 needs fixing first + {{role}}UpgradeInitDeployment: + type: OS::Heat::SoftwareDeployment + depends_on: NetworkDeployment + properties: + name: {{role}}UpgradeInitDeployment + server: {get_resource: {{role}}} + config: {get_resource: {{role}}UpgradeInitConfig} + {{role}}Deployment: type: OS::Heat::StructuredDeployment - depends_on: NetworkDeployment + depends_on: {{role}}UpgradeInitDeployment properties: name: {{role}}Deployment config: {get_resource: {{role}}Config} diff --git a/puppet/services/ceilometer-base.yaml b/puppet/services/ceilometer-base.yaml index 060ae32d..24c71cbb 100644 --- a/puppet/services/ceilometer-base.yaml +++ b/puppet/services/ceilometer-base.yaml @@ -50,6 +50,14 @@ parameters: default: false description: Whether to store events in ceilometer. type: boolean + EnableLegacyCeilometerApi: + default: false + description: Enable legacy ceilometer Api service if needed. + type: boolean + EventPipelinePublishers: + default: ['notifier://?topic=alarm.all'] + description: A list of publishers to put in event_pipeline.yaml. + type: comma_delimited_list Debug: default: '' description: Set to True to enable debugging on all services. @@ -93,6 +101,7 @@ outputs: - '@' - {get_param: [EndpointMap, MysqlInternal, host]} - '/ceilometer' + enable_legacy_ceilometer_api: {get_param: EnableLegacyCeilometerApi} ceilometer_backend: {get_param: CeilometerBackend} ceilometer::metering_secret: {get_param: CeilometerMeteringSecret} # we include db_sync class in puppet-tripleo @@ -104,6 +113,7 @@ outputs: ceilometer::agent::auth::auth_password: {get_param: CeilometerPassword} ceilometer::agent::auth::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix] } ceilometer::agent::notification::store_events: {get_param: CeilometerStoreEvents} + ceilometer::agent::notification::event_pipeline_publishers: {get_param: EventPipelinePublishers} ceilometer::agent::auth::auth_region: {get_param: KeystoneRegion} ceilometer::agent::auth::auth_tenant_name: 'service' ceilometer::agent::auth::auth_endpoint_type: 'internalURL' diff --git a/puppet/services/horizon.yaml b/puppet/services/horizon.yaml index 8eaf4044..e59dc202 100644 --- a/puppet/services/horizon.yaml +++ b/puppet/services/horizon.yaml @@ -58,6 +58,7 @@ outputs: dport: - 80 - 443 + horizon::enable_secure_proxy_ssl_header: true horizon::disable_password_reveal: true horizon::enforce_password_check: true horizon::cache_backend: django.core.cache.backends.memcached.MemcachedCache diff --git a/puppet/services/neutron-sriov-agent.yaml b/puppet/services/neutron-sriov-agent.yaml index 44f7f242..0ab066d7 100644 --- a/puppet/services/neutron-sriov-agent.yaml +++ b/puppet/services/neutron-sriov-agent.yaml @@ -25,6 +25,7 @@ parameters: All physical networks listed in network_vlan_ranges on the server should have mappings to appropriate interfaces on each agent. + Example "tenant0:ens2f0,tenant1:ens2f1" type: comma_delimited_list default: "" NeutronExcludeDevices: @@ -40,8 +41,8 @@ parameters: NeutronSriovNumVFs: description: > Provide the list of VFs to be reserved for each SR-IOV interface. - Format "<interface_name1>:<numvfs1>","<interface_name2>:<numvfs2>" - Example "eth1:4096","eth2:128" + Format "<interface_name1>:<numvfs1>,<interface_name2>:<numvfs2>" + Example "eth1:4096,eth2:128" type: comma_delimited_list default: "" diff --git a/puppet/services/panko-base.yaml b/puppet/services/panko-base.yaml index 32754a55..af9c5353 100644 --- a/puppet/services/panko-base.yaml +++ b/puppet/services/panko-base.yaml @@ -37,7 +37,6 @@ outputs: value: service_name: panko_base config_settings: - panko_redis_password: {get_param: RedisPassword} panko::db::database_connection: list_join: - '' diff --git a/puppet/services/rabbitmq.yaml b/puppet/services/rabbitmq.yaml index b77e0a91..08f3f6bc 100644 --- a/puppet/services/rabbitmq.yaml +++ b/puppet/services/rabbitmq.yaml @@ -73,6 +73,8 @@ outputs: rabbitmq::repos_ensure: false rabbitmq::tcp_keepalive: true rabbitmq_environment: + NODE_PORT: '' + NODE_IP_ADDRESS: '' RABBITMQ_NODENAME: "rabbit@%{::hostname}" RABBITMQ_SERVER_ERL_ARGS: '"+K true +P 1048576 -kernel inet_default_connect_options [{nodelay,true},{raw,6,18,<<5000:64/native>>}] -kernel inet_default_listen_options [{raw,6,18,<<5000:64/native>>}]"' rabbitmq_kernel_variables: @@ -95,7 +97,7 @@ outputs: # internal_api -> IP # internal_api_uri -> [IP] # internal_api_subnet - > IP/CIDR - rabbitmq::node_ip_address: {get_param: [ServiceNetMap, RabbitmqNetwork]} + rabbitmq::interface: {get_param: [ServiceNetMap, RabbitmqNetwork]} rabbitmq::nr_ha_queues: {get_param: RabbitHAQueues} step_config: | include ::tripleo::profile::base::rabbitmq diff --git a/scripts/hosts-config.sh b/scripts/hosts-config.sh index 4826d615..f456b316 100755 --- a/scripts/hosts-config.sh +++ b/scripts/hosts-config.sh @@ -30,17 +30,9 @@ write_entries() { } if [ ! -z "$hosts" ]; then - # cloud-init files are /etc/cloud/templates/hosts.OSNAME.tmpl - DIST=$(lsb_release -is | tr -s [A-Z] [a-z]) - case $DIST in - fedora|redhatenterpriseserver) - name="redhat" - ;; - *) - name="$DIST" - ;; - esac - write_entries "/etc/cloud/templates/hosts.${name}.tmpl" "$hosts" + for tmpl in /etc/cloud/templates/hosts.*.tmpl ; do + write_entries "$tmpl" "$hosts" + done write_entries "/etc/hosts" "$hosts" else echo "No hosts in Heat, nothing written." |