diff options
23 files changed, 357 insertions, 148 deletions
diff --git a/docker/compute-post.yaml b/docker/compute-post.yaml index 4532549f..09d0e9fd 100644 --- a/docker/compute-post.yaml +++ b/docker/compute-post.yaml @@ -28,7 +28,7 @@ parameters: default: "/etc/libvirt/libvirtd.conf" NovaConfig: type: string - default: "/etc/nova/nova.conf" + default: "/etc/nova/nova.conf,/etc/nova/rootwrap.conf" NeutronOpenvswitchAgentConfig: type: string default: "/etc/neutron/neutron.conf,/etc/neutron/plugins/openvswitch/ovs_neutron_plugin.ini" @@ -259,6 +259,8 @@ resources: volumes: - /run:/run - /lib/modules:/lib/modules:ro + - /dev:/dev + - /lib/udev:/lib/udev - /sys/fs/cgroup:/sys/fs/cgroup - /var/lib/etc-data/json-config/nova-libvirt.json:/var/lib/kolla/config_files/config.json - /var/lib/etc-data/libvirt/libvirtd.conf:/var/lib/kolla/config_files/libvirtd.conf @@ -316,8 +318,12 @@ resources: volumes: - /run:/run - /lib/modules:/lib/modules:ro + - /dev:/dev + - /lib/udev:/lib/udev + - /etc/iscsi:/etc/iscsi - /var/lib/etc-data/json-config/nova-compute.json:/var/lib/kolla/config_files/config.json - /var/lib/etc-data/nova/nova.conf:/var/lib/kolla/config_files/nova.conf:ro + - /var/lib/etc-data/nova/rootwrap.conf:/var/lib/kolla/config_files/rootwrap.conf:ro environment: - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS volumes_from: diff --git a/docker/firstboot/start_docker_agents.sh b/docker/firstboot/start_docker_agents.sh index bb458a68..027aed40 100644 --- a/docker/firstboot/start_docker_agents.sh +++ b/docker/firstboot/start_docker_agents.sh @@ -48,23 +48,13 @@ fi /sbin/setenforce 0 /sbin/modprobe ebtables +# CentOS sets ptmx to 000. Withoutit being 666, we can't use Cinder volumes +chmod 666 /dev/pts/ptmx + # We need hostname -f to return in a centos container for the puppet hook HOSTNAME=$(hostname) echo "127.0.0.1 $HOSTNAME.localdomain $HOSTNAME" >> /etc/hosts -# Another hack.. we need a different docker version -# (should obviously be dropped once the atomic image contains docker 1.8.2) -/usr/bin/systemctl stop docker.service -/bin/curl -o /tmp/docker https://get.docker.com/builds/Linux/x86_64/docker-1.8.2 -/bin/mount -o remount,rw /usr -/bin/rm /bin/docker -/bin/cp /tmp/docker /bin/docker -/bin/chmod 755 /bin/docker - -# enable and start docker -/usr/bin/systemctl enable docker.service -/usr/bin/systemctl restart --no-block docker.service - # enable and start heat-docker-agents chmod 0640 /etc/systemd/system/heat-docker-agents.service /usr/bin/systemctl enable heat-docker-agents.service diff --git a/environments/neutron-plumgrid.yaml b/environments/neutron-plumgrid.yaml new file mode 100755 index 00000000..b8d66015 --- /dev/null +++ b/environments/neutron-plumgrid.yaml @@ -0,0 +1,29 @@ +# A Heat environment file which can be used to enable PLUMgrid +# extensions, configured via puppet +resource_registry: + OS::TripleO::ControllerExtraConfigPre: ../puppet/extraconfig/pre_deploy/controller/neutron-plumgrid.yaml + +parameter_defaults: + NeutronCorePlugin: networking_plumgrid.neutron.plugins.plugin.NeutronPluginPLUMgridV2 + PLUMgridDirectorServer: 127.0.0.1 + PLUMgridDirectorServerPort: 443 + PLUMgridUsername: username + PLUMgridPassword: password + PLUMgridServerTimeOut: 99 + PLUMgridNovaMetadataIP: 169.254.169.254 + PLUMgridNovaMetadataPort: 8775 + PLUMgridL2GatewayVendor: vendor + PLUMgridL2GatewayUsername: username + PLUMgridL2GatewayPassword: password + PLUMgridIdentityVersion: v2.0 + PLUMgridConnectorType: distributed + + #Optional Parameters + #PLUMgridNeutronPluginVersion: present + #PLUMgridPlumlibVersion: present + + # PLUMgrid doesn't require dhcp, l3, ovs and metadata agents + NeutronEnableDHCPAgent: false + NeutronEnableL3Agent: false + NeutronEnableMetadataAgent: false + NeutronEnableOVSAgent: false diff --git a/extraconfig/pre_deploy/rhel-registration/environment-rhel-registration.yaml b/extraconfig/pre_deploy/rhel-registration/environment-rhel-registration.yaml index 70437a8a..c388358a 100644 --- a/extraconfig/pre_deploy/rhel-registration/environment-rhel-registration.yaml +++ b/extraconfig/pre_deploy/rhel-registration/environment-rhel-registration.yaml @@ -20,3 +20,4 @@ parameter_defaults: rhel_reg_user: "" rhel_reg_type: "" rhel_reg_method: "" + rhel_reg_sat_repo: "rhel-7-server-satellite-tools-6.1-rpms" diff --git a/extraconfig/pre_deploy/rhel-registration/rhel-registration.yaml b/extraconfig/pre_deploy/rhel-registration/rhel-registration.yaml index a884bdae..7c65bd8b 100644 --- a/extraconfig/pre_deploy/rhel-registration/rhel-registration.yaml +++ b/extraconfig/pre_deploy/rhel-registration/rhel-registration.yaml @@ -43,6 +43,8 @@ parameters: type: string rhel_reg_method: type: string + rhel_reg_sat_repo: + type: string resources: @@ -68,6 +70,7 @@ resources: - name: REG_USER - name: REG_TYPE - name: REG_METHOD + - name: REG_SAT_REPO config: {get_file: scripts/rhel-registration} RHELRegistrationDeployment: @@ -95,6 +98,7 @@ resources: REG_USER: {get_param: rhel_reg_user} REG_TYPE: {get_param: rhel_reg_type} REG_METHOD: {get_param: rhel_reg_method} + REG_SAT_REPO: {get_param: rhel_reg_sat_repo} RHELUnregistration: type: OS::Heat::SoftwareConfig diff --git a/extraconfig/pre_deploy/rhel-registration/scripts/rhel-registration b/extraconfig/pre_deploy/rhel-registration/scripts/rhel-registration index 242819b0..1c9acd2b 100644 --- a/extraconfig/pre_deploy/rhel-registration/scripts/rhel-registration +++ b/extraconfig/pre_deploy/rhel-registration/scripts/rhel-registration @@ -15,7 +15,7 @@ opts= attach_opts= sat5_opts= repos="repos --enable rhel-7-server-rpms" -satellite_repo="rhel-7-server-rh-common-rpms" +satellite_repo=${REG_SAT_REPO} if [ -n "${REG_AUTO_ATTACH:-}" ]; then opts="$opts --auto-attach" diff --git a/extraconfig/tasks/pacemaker_maintenance_mode.sh b/extraconfig/tasks/pacemaker_maintenance_mode.sh new file mode 100755 index 00000000..ddc84ad2 --- /dev/null +++ b/extraconfig/tasks/pacemaker_maintenance_mode.sh @@ -0,0 +1,19 @@ +#!/bin/bash +set -x + +# On initial deployment, the pacemaker service is disabled and is-active exits +# 3 in that case, so allow this to fail gracefully. +pacemaker_status=$(systemctl is-active pacemaker || :) + +if [ "$pacemaker_status" = "active" ]; then + pcs property set maintenance-mode=true +fi + +# We need to reload haproxy in case the certificate changed because +# puppet doesn't know the contents of the cert file. We shouldn't +# reload it if it wasn't already active (such as if using external +# loadbalancer or on initial deployment). +haproxy_status=$(systemctl is-active haproxy || :) +if [ "$haproxy_status" = "active" ]; then + systemctl reload haproxy +fi diff --git a/extraconfig/tasks/pre_puppet_pacemaker.yaml b/extraconfig/tasks/pre_puppet_pacemaker.yaml index 2cfe92a7..82546588 100644 --- a/extraconfig/tasks/pre_puppet_pacemaker.yaml +++ b/extraconfig/tasks/pre_puppet_pacemaker.yaml @@ -14,13 +14,8 @@ resources: type: OS::Heat::SoftwareConfig properties: group: script - config: | - #!/bin/bash - pacemaker_status=$(systemctl is-active pacemaker) - - if [ "$pacemaker_status" = "active" ]; then - pcs property set maintenance-mode=true - fi + config: + get_file: pacemaker_maintenance_mode.sh ControllerPrePuppetMaintenanceModeDeployment: type: OS::Heat::SoftwareDeployments diff --git a/network/endpoints/endpoint_data.yaml b/network/endpoints/endpoint_data.yaml index a74d75da..edcfaca1 100644 --- a/network/endpoints/endpoint_data.yaml +++ b/network/endpoints/endpoint_data.yaml @@ -123,17 +123,14 @@ Nova: vip_param: NovaApi uri_suffixes: '': /v2.1/%(tenant_id)s - V3: /v3 Public: vip_param: Public uri_suffixes: '': /v2.1/%(tenant_id)s - V3: /v3 Admin: vip_param: NovaApi uri_suffixes: '': /v2.1/%(tenant_id)s - V3: /v3 port: 8774 NovaEC2: diff --git a/network/endpoints/endpoint_map.yaml b/network/endpoints/endpoint_map.yaml index 956fb0ba..07266f98 100644 --- a/network/endpoints/endpoint_map.yaml +++ b/network/endpoints/endpoint_map.yaml @@ -1307,123 +1307,6 @@ outputs: IP_ADDRESS: {get_param: PublicVirtualIP} - ':' - get_param: [EndpointMap, NovaPublic, port] - NovaV3Admin: - host: - str_replace: - template: - get_param: [EndpointMap, NovaAdmin, host] - params: - CLOUDNAME: {get_param: CloudName} - IP_ADDRESS: {get_param: NovaApiVirtualIP} - port: - get_param: [EndpointMap, NovaAdmin, port] - protocol: - get_param: [EndpointMap, NovaAdmin, protocol] - uri: - list_join: - - '' - - - get_param: [EndpointMap, NovaAdmin, protocol] - - :// - - str_replace: - template: - get_param: [EndpointMap, NovaAdmin, host] - params: - CLOUDNAME: {get_param: CloudName} - IP_ADDRESS: {get_param: NovaApiVirtualIP} - - ':' - - get_param: [EndpointMap, NovaAdmin, port] - - /v3 - uri_no_suffix: - list_join: - - '' - - - get_param: [EndpointMap, NovaAdmin, protocol] - - :// - - str_replace: - template: - get_param: [EndpointMap, NovaAdmin, host] - params: - CLOUDNAME: {get_param: CloudName} - IP_ADDRESS: {get_param: NovaApiVirtualIP} - - ':' - - get_param: [EndpointMap, NovaAdmin, port] - NovaV3Internal: - host: - str_replace: - template: - get_param: [EndpointMap, NovaInternal, host] - params: - CLOUDNAME: {get_param: CloudName} - IP_ADDRESS: {get_param: NovaApiVirtualIP} - port: - get_param: [EndpointMap, NovaInternal, port] - protocol: - get_param: [EndpointMap, NovaInternal, protocol] - uri: - list_join: - - '' - - - get_param: [EndpointMap, NovaInternal, protocol] - - :// - - str_replace: - template: - get_param: [EndpointMap, NovaInternal, host] - params: - CLOUDNAME: {get_param: CloudName} - IP_ADDRESS: {get_param: NovaApiVirtualIP} - - ':' - - get_param: [EndpointMap, NovaInternal, port] - - /v3 - uri_no_suffix: - list_join: - - '' - - - get_param: [EndpointMap, NovaInternal, protocol] - - :// - - str_replace: - template: - get_param: [EndpointMap, NovaInternal, host] - params: - CLOUDNAME: {get_param: CloudName} - IP_ADDRESS: {get_param: NovaApiVirtualIP} - - ':' - - get_param: [EndpointMap, NovaInternal, port] - NovaV3Public: - host: - str_replace: - template: - get_param: [EndpointMap, NovaPublic, host] - params: - CLOUDNAME: {get_param: CloudName} - IP_ADDRESS: {get_param: PublicVirtualIP} - port: - get_param: [EndpointMap, NovaPublic, port] - protocol: - get_param: [EndpointMap, NovaPublic, protocol] - uri: - list_join: - - '' - - - get_param: [EndpointMap, NovaPublic, protocol] - - :// - - str_replace: - template: - get_param: [EndpointMap, NovaPublic, host] - params: - CLOUDNAME: {get_param: CloudName} - IP_ADDRESS: {get_param: PublicVirtualIP} - - ':' - - get_param: [EndpointMap, NovaPublic, port] - - /v3 - uri_no_suffix: - list_join: - - '' - - - get_param: [EndpointMap, NovaPublic, protocol] - - :// - - str_replace: - template: - get_param: [EndpointMap, NovaPublic, host] - params: - CLOUDNAME: {get_param: CloudName} - IP_ADDRESS: {get_param: PublicVirtualIP} - - ':' - - get_param: [EndpointMap, NovaPublic, port] NovaEC2Admin: host: str_replace: diff --git a/overcloud-resource-registry-puppet.yaml b/overcloud-resource-registry-puppet.yaml index 54074d12..bc3b7241 100644 --- a/overcloud-resource-registry-puppet.yaml +++ b/overcloud-resource-registry-puppet.yaml @@ -122,6 +122,9 @@ resource_registry: # validation resources OS::TripleO::AllNodes::Validation: all-nodes-validation.yaml + # services + OS::TripleO::Services: puppet/services/services.yaml + parameter_defaults: EnablePackageInstall: false SoftwareConfigTransport: POLL_TEMP_URL diff --git a/overcloud.yaml b/overcloud.yaml index 19e847a5..cd724b3e 100644 --- a/overcloud.yaml +++ b/overcloud.yaml @@ -1,4 +1,4 @@ -heat_template_version: 2015-04-30 +heat_template_version: 2016-04-08 description: > Deploy an OpenStack environment, consisting of several node types (roles), @@ -711,6 +711,13 @@ parameters: via parameter_defaults in the resource registry. type: json + ControllerServices: + default: [] + description: A list of service resources (configured in the Heat + resource_registry) which represent nested stacks + for each service that should get installed on the Controllers. + type: comma_delimited_list + # Block storage specific parameters BlockStorageCount: type: number @@ -892,6 +899,13 @@ resources: SwiftProxyVirtualIP: {get_attr: [VipMap, net_ip_uri_map, {get_param: [ServiceNetMap, SwiftProxyNetwork]}]} PublicVirtualIP: {get_attr: [VipMap, net_ip_uri_map, external]} + ControllerServiceChain: + type: OS::TripleO::Services + properties: + Services: {get_param: ControllerServices} + EndpointMap: {get_attr: [EndpointMap, endpoint_map]} + MysqlVirtualIPUri: {get_attr: [VipMap, net_ip_uri_map, {get_param: [ServiceNetMap, MysqlNetwork]}]} + Controller: type: OS::Heat::ResourceGroup depends_on: Networks @@ -1043,6 +1057,7 @@ resources: NodeIndex: '%index%' ServerMetadata: {get_param: ServerMetadata} SchedulerHints: {get_param: ControllerSchedulerHints} + ServiceConfigSettings: {get_attr: [ControllerServiceChain, config_settings]} Compute: type: OS::Heat::ResourceGroup @@ -1611,6 +1626,7 @@ resources: allnodes_extra: {get_attr: [AllNodesExtraConfig, config_identifier]} controller_config: {get_attr: [Controller, attributes, config_identifier]} deployment_identifier: {get_param: DeployIdentifier} + StepConfig: {get_attr: [ControllerServiceChain, step_config]} ComputeNodesPostDeployment: type: OS::TripleO::ComputePostDeployment diff --git a/puppet/controller-config-pacemaker.yaml b/puppet/controller-config-pacemaker.yaml index 21db825a..dfebcf82 100644 --- a/puppet/controller-config-pacemaker.yaml +++ b/puppet/controller-config-pacemaker.yaml @@ -8,6 +8,10 @@ parameters: default: false description: Whether to run config management (e.g. Puppet) in debug mode. type: boolean + StepConfig: + type: string + description: Config manifests that will be used to step through the deployment. + default: '' resources: @@ -26,6 +30,7 @@ resources: - '' - - get_file: manifests/overcloud_controller_pacemaker.pp - get_file: manifests/ringbuilder.pp + - {get_param: StepConfig} outputs: OS::stack_id: diff --git a/puppet/controller-config.yaml b/puppet/controller-config.yaml index f7a6a56d..458aff32 100644 --- a/puppet/controller-config.yaml +++ b/puppet/controller-config.yaml @@ -8,6 +8,10 @@ parameters: default: false description: Whether to run config management (e.g. Puppet) in debug mode. type: boolean + StepConfig: + type: string + description: Config manifests that will be used to step through the deployment. + default: '' resources: @@ -26,6 +30,7 @@ resources: - '' - - get_file: manifests/overcloud_controller.pp - get_file: manifests/ringbuilder.pp + - {get_param: StepConfig} outputs: OS::stack_id: diff --git a/puppet/controller-post.yaml b/puppet/controller-post.yaml index 04f20b61..80b08a06 100644 --- a/puppet/controller-post.yaml +++ b/puppet/controller-post.yaml @@ -13,7 +13,10 @@ parameters: NodeConfigIdentifiers: type: json description: Value which changes if the node configuration may need to be re-applied - + StepConfig: + type: string + description: Config manifests that will be used to step through the deployment. + default: '' resources: @@ -35,6 +38,8 @@ resources: ControllerPuppetConfig: type: OS::TripleO::ControllerConfig + properties: + StepConfig: {get_param: StepConfig} # Step through a series of Puppet runs using the same manifest. # NOTE: To enable stepping through the deployments via heat hooks, diff --git a/puppet/controller.yaml b/puppet/controller.yaml index 9b362a85..d966700f 100644 --- a/puppet/controller.yaml +++ b/puppet/controller.yaml @@ -790,6 +790,9 @@ parameters: type: json description: Optional scheduler hints to pass to nova default: {} + ServiceConfigSettings: + type: json + default: {} resources: @@ -1294,6 +1297,7 @@ resources: - heat_config_%{::deploy_config_name} - controller_extraconfig - extraconfig + - service_configs - controller - database - object @@ -1314,7 +1318,10 @@ resources: - neutron_nuage_data # Optionally provided by ControllerExtraConfigPre - midonet_data #Optionally provided by AllNodesExtraConfig - neutron_opencontrail_data # Optionally provided by ControllerExtraConfigPre + - neutron_plumgrid_data # Optionally provided by ControllerExtraConfigPre datafiles: + service_configs: + mapped_data: {get_param: ServiceConfigSettings} controller_extraconfig: mapped_data: {get_param: ControllerExtraConfig} extraconfig: diff --git a/puppet/extraconfig/pre_deploy/controller/neutron-plumgrid.yaml b/puppet/extraconfig/pre_deploy/controller/neutron-plumgrid.yaml new file mode 100755 index 00000000..7c0a7ad2 --- /dev/null +++ b/puppet/extraconfig/pre_deploy/controller/neutron-plumgrid.yaml @@ -0,0 +1,113 @@ +heat_template_version: 2015-04-30 + +description: Controller hieradata for Neutron PLUMgrid configuration + +parameters: + server: + description: ID of the controller node to apply this config to + type: string + PLUMgridDirectorServer: + description: IP address of the PLUMgrid Director Server + type: string + default: 127.0.0.1 + PLUMgridDirectorServerPort: + description: Port of the PLUMgrid Director Server + type: string + default: 443 + PLUMgridUsername: + description: Username for PLUMgrid platform + type: string + PLUMgridPassword: + description: Password for PLUMgrid platform + type: string + hidden: true + PLUMgridServerTimeOut: + description: Request timeout duration (seconds) to PLUMgrid platform + type: string + default: 99 + PLUMgridNovaMetadataIP: + description: IP address of Nova Metadata + type: string + default: 169.254.169.254 + PLUMgridNovaMetadataPort: + description: Port of Nova Metadata + type: string + default: 8775 + PLUMgridL2GatewayVendor: + description: Vendor for L2 Gateway Switch + type: string + default: vendor + PLUMgridL2GatewayUsername: + description: Username for L2 Gateway Switch + type: string + default: username + PLUMgridL2GatewayPassword: + description: Password for L2 Gateway Switch + type: string + hidden: true + PLUMgridIdentityVersion: + description: Keystone Identity version + type: string + default: v2.0 + PLUMgridConnectorType: + description: Neutron Network Connector Type + type: string + default: distributed + PLUMgridNeutronPluginVersion: + description: PLUMgrid Neutron Plugin version + type: string + default: present + PLUMgridPlumlibVersion: + description: PLUMgrid Plumlib version + type: string + default: present + + +resources: + ControllerPLUMgridConfig: + type: OS::Heat::StructuredConfig + properties: + group: os-apply-config + config: + hiera: + datafiles: + neutron_plumgrid_data: + mapped_data: + neutron::plugins::plumgrid::director_server: {get_input: plumgrid_director_server} + neutron::plugins::plumgrid::director_server_port: {get_input: plumgrid_director_server_port} + neutron::plugins::plumgrid::username: {get_input: plumgrid_username} + neutron::plugins::plumgrid::password: {get_input: plumgrid_password} + neutron::plugins::plumgrid::nova_metadata_ip: {get_input: plumgrid_nova_metadata_ip} + neutron::plugins::plumgrid::nova_metadata_port: {get_input: plumgrid_nova_metadata_port} + neutron::plugins::plumgrid::l2gateway_vendor: {get_input: plumgrid_l2gateway_vendor} + neutron::plugins::plumgrid::l2gateway_sw_username: {get_input: plumgrid_l2gateway_sw_username} + neutron::plugins::plumgrid::l2gateway_sw_password: {get_input: plumgrid_l2gateway_sw_password} + neutron::plugins::plumgrid::connector_type: {get_input: plumgrid_connector_type} + neutron::plugins::plumgrid::identity_version: {get_input: plumgrid_identity_version} + neutron::plugins::plumgrid::package_ensure: {get_input: plumgrid_neutron_plugin_version} + neutron::plugins::plumgrid::plumlib_package_ensure: {get_input: plumgrid_plumlib_version} + + ControllerPLUMgridDeployment: + type: OS::Heat::StructuredDeployment + properties: + config: {get_resource: ControllerPLUMgridConfig} + server: {get_param: server} + input_values: + plumgrid_director_server: {get_param: PLUMgridDirectorServer} + plumgrid_director_server_port: {get_param: PLUMgridDirectorServerPort} + plumgrid_username: {get_param: PLUMgridUsername} + plumgrid_password: {get_param: PLUMgridPassword} + plumgrid_nova_metadata_ip: {get_param: PLUMgridNovaMetadataIP} + plumgrid_nova_metadata_port: {get_param: PLUMgridNovaMetadataPort} + plumgrid_l2gateway_vendor: {get_param: PLUMgridL2GatewayVendor} + plumgrid_l2gateway_sw_username: {get_param: PLUMgridL2GatewayUsername} + plumgrid_l2gateway_sw_password: {get_param: PLUMgridL2GatewayPassword} + plumgrid_identity_version: {get_param: PLUMgridIdentityVersion} + plumgrid_connector_type: {get_param: PLUMgridConnectorType} + plumgrid_neutron_plugin_version: {get_param: PLUMgridNeutronPluginVersion} + plumgrid_plumlib_version: {get_param: PLUMgridPlumlibVersion} + +outputs: + deploy_stdout: + description: Deployment reference, used to trigger puppet apply on changes + value: {get_attr: [ControllerPLUMgridDeployment, deploy_stdout]} diff --git a/puppet/hieradata/controller.yaml b/puppet/hieradata/controller.yaml index 3e523f1b..5e87793a 100644 --- a/puppet/hieradata/controller.yaml +++ b/puppet/hieradata/controller.yaml @@ -90,7 +90,6 @@ neutron::agents::dhcp::dnsmasq_config_file: /etc/neutron/dnsmasq-neutron.conf # nova nova::notify_on_state_change: 'vm_and_task_state' nova::api::default_floating_pool: 'public' -nova::api::osapi_v3: true nova::api::sync_db_api: true nova::scheduler::filter::ram_allocation_ratio: '1.0' nova::cron::archive_deleted_rows::hour: '*/12' diff --git a/puppet/manifests/overcloud_compute.pp b/puppet/manifests/overcloud_compute.pp index 13ae31c5..cc58cb14 100644 --- a/puppet/manifests/overcloud_compute.pp +++ b/puppet/manifests/overcloud_compute.pp @@ -152,8 +152,32 @@ elsif hiera('neutron::core_plugin') == 'neutron_plugin_contrail.plugins.opencont # require => Class['contrail::vrouter'], #} } +elsif hiera('neutron::core_plugin') == 'networking_plumgrid.neutron.plugins.plugin.NeutronPluginPLUMgridV2' { + # forward all ipv4 traffic + # this is required for the vms to pass through the gateways public interface + sysctl::value { 'net.ipv4.ip_forward': value => '1' } + + # ifc_ctl_pp needs to be invoked by root as part of the vif.py when a VM is powered on + file { '/etc/sudoers.d/ifc_ctl_sudoers': + ensure => file, + owner => root, + group => root, + mode => '0440', + content => "nova ALL=(root) NOPASSWD: /opt/pg/bin/ifc_ctl_pp *\n", + } +} else { + # NOTE: this code won't live in puppet-neutron until Neutron OVS agent + # can be gracefully restarted. See https://review.openstack.org/#/c/297211 + # In the meantime, it's safe to restart the agent on each change in neutron.conf, + # because Puppet changes are supposed to be done during bootstrap and upgrades. + # Some resource managed by Neutron_config (like messaging and logging options) require + # a restart of OVS agent. This code does it. + # In Newton, OVS agent will be able to be restarted gracefully so we'll drop the code + # from here and fix it in puppet-neutron. + Neutron_config<||> ~> Service['neutron-ovs-agent-service'] + include ::neutron::plugins::ml2 include ::neutron::agents::ml2::ovs diff --git a/puppet/manifests/overcloud_controller.pp b/puppet/manifests/overcloud_controller.pp index ff2264cf..c6667ae6 100644 --- a/puppet/manifests/overcloud_controller.pp +++ b/puppet/manifests/overcloud_controller.pp @@ -344,13 +344,21 @@ if hiera('step') >= 4 { include ::neutron::server include ::neutron::server::notifications - # If the value of core plugin is set to 'nuage' or 'opencontrail', - # include nuage or opencontrail core plugins, and it does not - # need the l3, dhcp and metadata agents + # If the value of core plugin is set to 'nuage' or'opencontrail' or 'plumgrid', + # include nuage or opencontrail or plumgrid core plugins + # else use the default value of 'ml2' if hiera('neutron::core_plugin') == 'neutron.plugins.nuage.plugin.NuagePlugin' { include ::neutron::plugins::nuage } elsif hiera('neutron::core_plugin') == 'neutron_plugin_contrail.plugins.opencontrail.contrail_plugin.NeutronPluginContrailCoreV2' { include ::neutron::plugins::opencontrail + } + elsif hiera('neutron::core_plugin') == 'networking_plumgrid.neutron.plugins.plugin.NeutronPluginPLUMgridV2' { + class { '::neutron::plugins::plumgrid' : + connection => hiera('neutron::server::database_connection'), + controller_priv_host => hiera('keystone_admin_api_vip'), + admin_password => hiera('admin_password'), + metadata_proxy_shared_secret => hiera('nova::api::neutron_metadata_proxy_shared_secret'), + } } else { include ::neutron::agents::l3 include ::neutron::agents::dhcp diff --git a/puppet/manifests/overcloud_controller_pacemaker.pp b/puppet/manifests/overcloud_controller_pacemaker.pp index b271114f..f1092321 100644 --- a/puppet/manifests/overcloud_controller_pacemaker.pp +++ b/puppet/manifests/overcloud_controller_pacemaker.pp @@ -185,8 +185,10 @@ if hiera('step') >= 1 { 'bind-address' => $::hostname, 'max_connections' => hiera('mysql_max_connections'), 'open_files_limit' => '-1', + 'wsrep_on' => 'ON', 'wsrep_provider' => '/usr/lib64/galera/libgalera_smm.so', 'wsrep_cluster_name' => 'galera_cluster', + 'wsrep_cluster_address' => "gcomm://${galera_nodes}", 'wsrep_slave_threads' => '1', 'wsrep_certify_nonPK' => '1', 'wsrep_max_ws_rows' => '131072', @@ -792,6 +794,14 @@ if hiera('step') >= 4 { keystone_password => hiera('neutron::server::auth_password') } } + if hiera('neutron::core_plugin') == 'networking_plumgrid.neutron.plugins.plugin.NeutronPluginPLUMgridV2' { + class { '::neutron::plugins::plumgrid' : + connection => hiera('neutron::server::database_connection'), + controller_priv_host => hiera('keystone_admin_api_vip'), + admin_password => hiera('admin_password'), + metadata_proxy_shared_secret => hiera('nova::api::neutron_metadata_proxy_shared_secret'), + } + } if hiera('neutron::enable_dhcp_agent',true) { class { '::neutron::agents::dhcp' : manage_service => false, diff --git a/puppet/services/README.rst b/puppet/services/README.rst new file mode 100644 index 00000000..38d2ac64 --- /dev/null +++ b/puppet/services/README.rst @@ -0,0 +1,50 @@ +======== +services +======== + +A TripleO nested stack Heat template that encapsulates generic configuration +data to configure a specific service. This generally includes everything +needed to configure the service excluding the local bind ports which +are still managed in the per-node role templates directly (controller.yaml, +compute.yaml, etc.). All other (global) service settings go into +the puppet/service templates. + +Input Parameters +---------------- + +Each service may define its own input parameters and defaults. +Operators will use the parameter_defaults section of any Heat +environment to set per service parameters. + +Config Settings +--------------- + +Each service may define a config_settings output variable which returns +Hiera settings to be configured. + +Steps +----- + +Each service may define an output variable which returns a puppet manifest +snippet that will run at each of the following steps. Earlier manifests +are re-asserted when applying latter ones. + + * config_settings: Custom hiera settings for this service. + + * step_config: A puppet manifest that is used to step through the deployment + sequence. Each sequence is given a "step" (via hiera('step') that provides + information for when puppet classes should activate themselves. + + Steps correlate to the following: + + 1) Load Balancer configuration + + 2) Core Services (Database/Rabbit/NTP/etc.) + + 3) Early Openstack Service setup (Ringbuilder, etc.) + + 4) General OpenStack Services + + 5) Service activation (Pacemaker) + + 6) Fencing (Pacemaker) diff --git a/puppet/services/services.yaml b/puppet/services/services.yaml new file mode 100644 index 00000000..f9681634 --- /dev/null +++ b/puppet/services/services.yaml @@ -0,0 +1,40 @@ +heat_template_version: 2016-04-08 + +description: > + Utility stack to convert an array of services into a set of combined + role configs. + +parameters: + Services: + default: [] + description: | + List nested stack service templates. + type: comma_delimited_list + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + MysqlVirtualIPUri: + default: '' + type: string + description: The URI virtual IP for the MySQL service. + +resources: + + ServiceChain: + type: OS::Heat::ResourceChain + properties: + resources: {get_param: Services} + concurrent: true + resource_properties: + EndpointMap: {get_param: EndpointMap} + MysqlVirtualIPUri: {get_param: MysqlVirtualIPUri} + +outputs: + config_settings: + description: Configuration settings. + value: {map_merge: {get_attr: [ServiceChain, role_data, config_settings]}} + step_config: + description: Step configuration. + value: {list_join: ["\n", {get_attr: [ServiceChain, role_data, step_config]}]} |