diff options
199 files changed, 4118 insertions, 3709 deletions
diff --git a/deployed-server/README.rst b/deployed-server/README.rst new file mode 100644 index 00000000..ce74e77b --- /dev/null +++ b/deployed-server/README.rst @@ -0,0 +1,129 @@ +TripleO with Deployed Servers +============================= + +The deployed-server set of templates can be used to deploy TripleO via +tripleo-heat-templates to servers that are already installed with a base +operating system. + +When OS::TripleO::Server is mapped to the deployed-server.yaml template via the +provided deployed-server-environment.yaml resource registry, Nova and Ironic +are not used to create any server instances. Heat continues to create the +SoftwareDeployment resources, and they are made available to the already +deployed and running servers. + +Template Usage +-------------- +To use these templates pass the included environment file to the deployment +command:: + + -e deployed-server/deployed-server-environment.yaml + +Deployed Server configuration +----------------------------- +It is currently assumed that the deployed servers being used have the required +set of software and packages already installed on them. These exact +requirements must match how such a server would look if it were deployed the +standard way via Ironic using the TripleO overcloud-full image. + +An easy way to help get this setup for development is to use an overcloud-full +image from an already existing TripleO setup. Create the vm's for the already +deployed server, and use the overcloud-full image as their disk. + +Each server must have a fqdn set that resolves to an IP address on a routable +network (e.g., the hostname should not resolve to 127.0.0.1). The hostname +will be detected on each server via the hostnamectl --static command. + +Each server also must have a route to the configured IP address on the +undercloud where the OpenStack services are listening. This is the value for +local_ip in the undercloud.conf. + +It's recommended that each server have at least 2 nic's. One used for external +management such as ssh, and one used for the OpenStack deployment itself. Since +the overcloud deployment will reconfigure networking on the configured nic to +be used by OpenStack, the external management nic is needed as a fallback so +that all connectivity is not lost in case of a configuration error. Be sure to +use correct nic config templates as needed, since the nodes will not receive +dhcp from the undercloud neutron-dhcp-agent service. + +For example, the net-config-static-bridge.yaml template could be used for +controllers, and the net-config-static.yaml template could be used for computes +by specifying: + +resource_registry: + OS::TripleO::Controller::Net::SoftwareConfig: /home/stack/deployed-server/tripleo-heat-templates/net-config-static-bridge.yaml + OS::TripleO::Compute::Net::SoftwareConfig: /home/stack/deployed-server/tripleo-heat-templates/net-config-static.yaml + +In a setup where the first nic on the servers is used for external management, +set the nic's to be used for OpenStack to nic2: + +parameter_defaults: + NeutronPublicInterface: nic2 + HypervisorNeutronPublicInterface: nic2 + +The above nic config templates also require a route to the ctlplane network to +be defined. Define the needed parameters as necessary for your environment, for +example: + +parameter_defaults: + ControlPlaneDefaultRoute: 192.168.122.130 + ControlPlaneSubnetCidr: "24" + EC2MetadataIp: "192.0.2.1" + +In this example, 192.168.122.130 is the external management IP of an +undercloud, thus it is the default route for the configured local_ip value of +192.0.2.1. + + +os-collect-config +----------------- +os-collect-config on each deployed server must be manually configured to poll +the Heat API for the available SoftwareDeployments. An example configuration +for /etc/os-collect-config.conf looks like: + + [DEFAULT] + collectors=heat + command=os-refresh-config + + [heat] + # you can get these values from stackrc on the undercloud + user_id=<a user that can connect to heat> # note this must be the ID, not the username + password=<a password> + auth_url=<keystone url> + project_id=<project_id> # note, this must be the ID, not project name + stack_id=<stack_id> + resource_name=<resource_name> + +Note that the stack_id value is the id of the nested stack containing the +resource (identified by resource_name) implemented by the deployed-server.yaml +templates. + +Once the configuration for os-collect-config has been defined, the service +needs to be restarted. Once restarted, it will start polling Heat and applying +the SoftwareDeployments. + +A sample script at deployed-server/scripts/get-occ-config.sh is included that +will automatically generate the os-collect-config configuration needed on each +server, ssh to each server, copy the configuration, and restart the +os-collect-config service. + +.. warning:: + The get-occ-config.sh script is not intended for production use, as it + copies admin credentials to each of the deployed nodes. + +The script can only be used once the stack id's of the nested deployed-server +stacks have been created via Heat. This usually only takes a couple of minutes +once the deployment command has been started. Once the following output is seen +from the deployment command, the script should be ready to run: + + [Controller]: CREATE_IN_PROGRESS state changed + [NovaCompute]: CREATE_IN_PROGRESS state changed + +The user running the script must be able to ssh as root to each server. Define +the hostnames of the deployed servers you intend to use for each role type:: + + export controller_hosts="controller0 controller1 controller2" + export compute_hosts="compute0" + +Then run the script on the undercloud with a stackrc file sourced, and +the script will copy the needed os-collect-config.conf configuration to each +server and restart the os-collect-config service. diff --git a/deployed-server/deployed-server-config.yaml b/deployed-server/deployed-server-config.yaml new file mode 100644 index 00000000..8c59dc72 --- /dev/null +++ b/deployed-server/deployed-server-config.yaml @@ -0,0 +1,22 @@ +heat_template_version: 2014-10-16 +parameters: + user_data_format: + type: string + default: SOFTWARE_CONFIG + +resources: + # We just need something which returns a unique ID, but we can't + # use RandomString because RefId returns the value, not the physical + # resource ID, SoftwareConfig should work as it returns a UUID + deployed-server-config: + type: OS::Heat::SoftwareConfig + +outputs: + # FIXME(shardy) this is needed because TemplateResource returns an + # ARN not a UUID, which overflows the Deployment server_id column.. + user_data_format: + value: SOFTWARE_CONFIG + OS::stack_id: + value: {get_resource: deployed-server-config} + + diff --git a/deployed-server/deployed-server.yaml b/deployed-server/deployed-server.yaml new file mode 100644 index 00000000..81941047 --- /dev/null +++ b/deployed-server/deployed-server.yaml @@ -0,0 +1,122 @@ +heat_template_version: 2014-10-16 +parameters: + image: + type: string + default: unused + flavor: + type: string + default: unused + key_name: + type: string + default: unused + security_groups: + type: json + default: [] + # Require this so we can validate the parent passes the + # correct value + user_data_format: + type: string + user_data: + type: string + default: '' + name: + type: string + default: '' + image_update_policy: + type: string + default: '' + networks: + type: comma_delimited_list + default: '' + metadata: + type: json + default: {} + software_config_transport: + default: POLL_SERVER_CFN + type: string + scheduler_hints: + type: json + description: Optional scheduler hints to pass to nova + default: {} + +resources: + # We just need something which returns a unique ID, but we can't + # use RandomString because RefId returns the value, not the physical + # resource ID, SoftwareConfig should work as it returns a UUID + deployed-server: + type: OS::TripleO::DeployedServerConfig + properties: + user_data_format: SOFTWARE_CONFIG + + InstanceIdConfig: + type: OS::Heat::StructuredConfig + properties: + group: os-apply-config + config: + instance-id: {get_attr: [deployed-server, "OS::stack_id"]} + + InstanceIdDeployment: + type: OS::Heat::StructuredDeployment + properties: + config: {get_resource: InstanceIdConfig} + server: {get_resource: deployed-server} + + HostsEntryConfig: + type: OS::Heat::SoftwareConfig + properties: + group: script + config: | + #!/bin/bash + set -eux + mkdir -p $heat_outputs_path + host=$(hostnamectl --static) + echo -n "$host " > $heat_outputs_path.hosts_entry + host_ip=$(python -c "import socket; print socket.gethostbyname(\"$host\")") + echo -n "$host_ip " >> $heat_outputs_path.hosts_entry + echo >> $heat_outputs_path.hosts_entry + cat $heat_outputs_path.hosts_entry + echo -n $host_ip > $heat_outputs_path.ip_address + cat $heat_outputs_path.ip_address + echo -n $host > $heat_outputs_path.hostname + cat $heat_outputs_path.hostname + outputs: + - name: hosts_entry + description: hosts_entry + - name: ip_address + description: ip_address + - name: hostname + description: hostname + + HostsEntryDeployment: + type: OS::Heat::SoftwareDeployment + properties: + config: {get_resource: HostsEntryConfig} + server: {get_resource: deployed-server} + + ControlPlanePort: + type: OS::Neutron::Port + properties: + network: ctlplane + name: + list_join: + - '-' + - - {get_attr: [HostsEntryDeployment, hostname]} + - ctlplane + - port + replacement_policy: AUTO + +outputs: + # FIXME(shardy) this is needed because TemplateResource returns an + # ARN not a UUID, which overflows the Deployment server_id column.. + OS::stack_id: + value: {get_attr: [deployed-server, "OS::stack_id"]} + networks: + value: + ctlplane: + - {get_attr: [ControlPlanePort, fixed_ips, 0, ip_address]} + name: + value: {get_attr: [HostsEntryDeployment, hostname]} + hosts_entry: + value: {get_attr: [HostsEntryDeployment, hosts_entry]} + ip_address: + value: {get_attr: [HostsEntryDeployment, ip_address]} diff --git a/deployed-server/scripts/get-occ-config.sh b/deployed-server/scripts/get-occ-config.sh new file mode 100755 index 00000000..2c01174e --- /dev/null +++ b/deployed-server/scripts/get-occ-config.sh @@ -0,0 +1,113 @@ +#!/bin/bash + +set -eux + +SLEEP_TIME=5 + +CONTROLLER_HOSTS=${CONTROLLER_HOSTS:-""} +COMPUTE_HOSTS=${COMPUTE_HOSTS:-""} +BLOCKSTORAGE_HOSTS=${BLOCKSTORAGE_HOSTS:-""} +OBJECTSTORAGE_HOSTS=${OBJECTSTORAGE_HOSTS:-""} +CEPHSTORAGE_HOSTS=${CEPHSTORAGE_HOSTS:-""} +SUBNODES_SSH_KEY=${SUBNODES_SSH_KEY:-"~/.ssh/id_rsa"} +SSH_OPTIONS="-tt -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o LogLevel=Verbose -o PasswordAuthentication=no -o ConnectionAttempts=32" + +read -a Controller_hosts_a <<< $CONTROLLER_HOSTS +read -a Compute_hosts_a <<< $COMPUTE_HOSTS +read -a BlockStorage_hosts_a <<< $BLOCKSTORAGE_HOSTS +read -a ObjectStorage_hosts_a <<< $OBJECTSTORAGE_HOSTS +read -a CephStorage_hosts_a <<< $CEPHSTORAGE_HOSTS + +roles="Controller Compute BlockStorage ObjectStorage CephStorage" +admin_user_id=$(openstack user show admin -c id -f value) +admin_project_id=$(openstack project show admin -c id -f value) + +function check_stack { + local stack_to_check=$1 + + if [ "$stack_to_check" = "|" ]; then + echo Stack not created + return 1 + fi + + echo Checking if $1 stack is created + set +e + heat resource-list $stack_to_check + rc=$? + set -e + + if [ ! "$rc" = "0" ]; then + echo Stack $1 not yet created + fi + + return $rc +} + + +for role in $roles; do + while ! check_stack overcloud; do + sleep $SLEEP_TIME + done + + rg_stack=$(heat resource-list overcloud | grep " $role " | awk '{print $4}') + while ! check_stack $rg_stack; do + sleep $SLEEP_TIME + rg_stack=$(heat resource-list overcloud | grep " $role " | awk '{print $4}') + done + + stacks=$(heat resource-list $rg_stack | grep OS::TripleO::$role | awk '{print $4}') + + i=0 + + for stack in $stacks; do + server_resource_name=$role + if [ "$server_resource_name" = "Compute" ]; then + server_resource_name="NovaCompute" + fi + + server_stack=$(heat resource-list $stack | grep " $server_resource_name " | awk '{print $4}') + while ! check_stack $server_stack; do + sleep $SLEEP_TIME + server_stack=$(heat resource-list $stack | grep " $server_resource_name " | awk '{print $4}') + done + + deployed_server_stack=$(heat resource-list $server_stack | grep "deployed-server" | awk '{print $4}') + + echo "======================" + echo "$role$i os-collect-config.conf configuration:" + + config=" +[DEFAULT] +collectors=heat +command=os-refresh-config +polling_interval=30 + +[heat] +user_id=$admin_user_id +password=$OS_PASSWORD +auth_url=$OS_AUTH_URL +project_id=$admin_project_id +stack_id=$deployed_server_stack +resource_name=deployed-server-config" + + echo "$config" + echo "======================" + echo + + + host= + eval host=\${${role}_hosts_a[i]} + if [ -n "$host" ]; then + # Delete the os-collect-config.conf template so our file won't get + # overwritten + ssh $SSH_OPTIONS -i $SUBNODES_SSH_KEY $host sudo /bin/rm -f /usr/libexec/os-apply-config/templates/etc/os-collect-config.conf + ssh $SSH_OPTIONS -i $SUBNODES_SSH_KEY $host "echo \"$config\" > os-collect-config.conf" + ssh $SSH_OPTIONS -i $SUBNODES_SSH_KEY $host sudo cp os-collect-config.conf /etc/os-collect-config.conf + ssh $SSH_OPTIONS -i $SUBNODES_SSH_KEY $host sudo systemctl restart os-collect-config + fi + + let i+=1 + + done + +done diff --git a/docker/compute-post.yaml b/docker/compute-post.yaml index 3c4a9413..3fc07561 100644 --- a/docker/compute-post.yaml +++ b/docker/compute-post.yaml @@ -5,8 +5,8 @@ description: > parameters: servers: type: json - NodeConfigIdentifiers: - type: json + DeployIdentifier: + type: string description: Value which changes if the node configuration may need to be re-applied DockerNamespace: type: string @@ -38,6 +38,11 @@ parameters: NeutronOpenvswitchAgentOvsVolume: type: string default: " " + StepConfig: + type: string + description: Config manifests that will be used to step through the deployment. + default: '' + resources: @@ -56,7 +61,11 @@ resources: outputs: - name: result config: - get_file: ../puppet/manifests/overcloud_compute.pp + list_join: + - '' + - - get_file: ../puppet/manifests/overcloud_compute.pp + - {get_param: StepConfig} + ComputePuppetDeployment: type: OS::Heat::SoftwareDeployments @@ -65,7 +74,7 @@ resources: servers: {get_param: servers} config: {get_resource: ComputePuppetConfig} input_values: - update_identifier: {get_param: NodeConfigIdentifiers} + update_identifier: {get_param: DeployIdentifier} tripleo::packages::enable_install: True CopyEtcConfig: diff --git a/environments/deployed-server-environment.yaml b/environments/deployed-server-environment.yaml new file mode 100644 index 00000000..3c9e3459 --- /dev/null +++ b/environments/deployed-server-environment.yaml @@ -0,0 +1,3 @@ +resource_registry: + OS::TripleO::Server: ../deployed-server/deployed-server.yaml + OS::TripleO::DeployedServerConfig: ../deployed-server/deployed-server-config.yaml diff --git a/environments/docker.yaml b/environments/docker.yaml index a7e2504c..c03d8511 100644 --- a/environments/docker.yaml +++ b/environments/docker.yaml @@ -5,8 +5,6 @@ resource_registry: parameter_defaults: NovaImage: atomic-image - -parameter_defaults: # Defaults to 'tripleoupstream'. Specify a local docker registry # Example: 192.0.2.1:8787/tripleoupstream DockerNamespace: tripleoupstream diff --git a/environments/enable-tls.yaml b/environments/enable-tls.yaml index b4c3f08b..289ec2e3 100644 --- a/environments/enable-tls.yaml +++ b/environments/enable-tls.yaml @@ -28,10 +28,14 @@ parameter_defaults: HeatCfnInternal: {protocol: 'http', port: '8000', host: 'IP_ADDRESS'} HeatCfnPublic: {protocol: 'https', port: '13005', host: 'CLOUDNAME'} HorizonPublic: {protocol: 'https', port: '443', host: 'CLOUDNAME'} + IronicAdmin: {protocol: 'http', port: '6385', host: 'IP_ADDRESS'} + IronicInternal: {protocol: 'http', port: '6385', host: 'IP_ADDRESS'} + IronicPublic: {protocol: 'https', port: '13385', host: 'CLOUDNAME'} KeystoneAdmin: {protocol: 'http', port: '35357', host: 'IP_ADDRESS'} KeystoneInternal: {protocol: 'http', port: '5000', host: 'IP_ADDRESS'} KeystonePublic: {protocol: 'https', port: '13000', host: 'CLOUDNAME'} MysqlInternal: {protocol: 'mysql+pymysql', port: '3306', host: 'IP_ADDRESS'} + MysqlNoBracketsInternal: {protocol: 'mysql+pymysql', port: '3306', host: 'IP_ADDRESS'} NeutronAdmin: {protocol: 'http', port: '9696', host: 'IP_ADDRESS'} NeutronInternal: {protocol: 'http', port: '9696', host: 'IP_ADDRESS'} NeutronPublic: {protocol: 'https', port: '13696', host: 'CLOUDNAME'} diff --git a/environments/ips-from-pool-all.yaml b/environments/ips-from-pool-all.yaml index f660d501..87563753 100644 --- a/environments/ips-from-pool-all.yaml +++ b/environments/ips-from-pool-all.yaml @@ -5,30 +5,36 @@ resource_registry: OS::TripleO::Controller::Ports::StoragePort: ../network/ports/storage_from_pool.yaml OS::TripleO::Controller::Ports::StorageMgmtPort: ../network/ports/storage_mgmt_from_pool.yaml OS::TripleO::Controller::Ports::TenantPort: ../network/ports/tenant_from_pool.yaml + # Management network is optional and disabled by default + #OS::TripleO::Controller::Ports::ManagementPort: ../network/ports/management_from_pool.yaml OS::TripleO::Compute::Ports::ExternalPort: ../network/ports/noop.yaml OS::TripleO::Compute::Ports::InternalApiPort: ../network/ports/internal_api_from_pool.yaml OS::TripleO::Compute::Ports::StoragePort: ../network/ports/storage_from_pool.yaml OS::TripleO::Compute::Ports::StorageMgmtPort: ../network/ports/noop.yaml OS::TripleO::Compute::Ports::TenantPort: ../network/ports/tenant_from_pool.yaml + #OS::TripleO::Compute::Ports::ManagementPort: ../network/ports/management_from_pool.yaml OS::TripleO::CephStorage::Ports::ExternalPort: ../network/ports/noop.yaml OS::TripleO::CephStorage::Ports::InternalApiPort: ../network/ports/noop.yaml OS::TripleO::CephStorage::Ports::StoragePort: ../network/ports/storage_from_pool.yaml OS::TripleO::CephStorage::Ports::StorageMgmtPort: ../network/ports/storage_mgmt_from_pool.yaml OS::TripleO::CephStorage::Ports::TenantPort: ../network/ports/noop.yaml + #OS::TripleO::CephStorage::Ports::ManagementPort: ../network/ports/management_from_pool.yaml OS::TripleO::SwiftStorage::Ports::ExternalPort: ../network/ports/noop.yaml OS::TripleO::SwiftStorage::Ports::InternalApiPort: ../network/ports/internal_api_from_pool.yaml OS::TripleO::SwiftStorage::Ports::StoragePort: ../network/ports/storage_from_pool.yaml OS::TripleO::SwiftStorage::Ports::StorageMgmtPort: ../network/ports/storage_mgmt_from_pool.yaml OS::TripleO::SwiftStorage::Ports::TenantPort: ../network/ports/noop.yaml + #OS::TripleO::SwiftStorage::Ports::ManagementPort: ../network/ports/management_from_pool.yaml OS::TripleO::BlockStorage::Ports::ExternalPort: ../network/ports/noop.yaml OS::TripleO::BlockStorage::Ports::InternalApiPort: ../network/ports/internal_api_from_pool.yaml OS::TripleO::BlockStorage::Ports::StoragePort: ../network/ports/storage_from_pool.yaml OS::TripleO::BlockStorage::Ports::StorageMgmtPort: ../network/ports/storage_mgmt_from_pool.yaml OS::TripleO::BlockStorage::Ports::TenantPort: ../network/ports/noop.yaml + #OS::TripleO::BlockStorage::Ports::ManagementPort: ../network/ports/management_from_pool.yaml parameter_defaults: ControllerIPs: @@ -43,6 +49,8 @@ parameter_defaults: - 172.16.3.251 tenant: - 172.16.0.251 + #management: + #- 172.16.4.251 NovaComputeIPs: # Each compute will get an IP from the lists below, first compute, first IP internal_api: @@ -51,12 +59,16 @@ parameter_defaults: - 172.16.1.252 tenant: - 172.16.0.252 + #management: + #- 172.16.4.252 CephStorageIPs: # Each ceph node will get an IP from the lists below, first node, first IP storage: - 172.16.1.253 storage_mgmt: - 172.16.3.253 + #management: + #- 172.16.4.253 SwiftStorageIPs: # Each swift node will get an IP from the lists below, first node, first IP internal_api: @@ -65,6 +77,8 @@ parameter_defaults: - 172.16.1.254 storage_mgmt: - 172.16.3.254 + #management: + #- 172.16.4.254 BlockStorageIPs: # Each cinder node will get an IP from the lists below, first node, first IP internal_api: @@ -73,3 +87,5 @@ parameter_defaults: - 172.16.1.250 storage_mgmt: - 172.16.3.250 + #management: + #- 172.16.4.250 diff --git a/environments/major-upgrade-pacemaker-init.yaml b/environments/major-upgrade-pacemaker-init.yaml index fbad0406..d97f8fc1 100644 --- a/environments/major-upgrade-pacemaker-init.yaml +++ b/environments/major-upgrade-pacemaker-init.yaml @@ -3,7 +3,6 @@ parameter_defaults: resource_registry: OS::TripleO::Tasks::UpdateWorkflow: ../extraconfig/tasks/major_upgrade_pacemaker_init.yaml - OS::TripleO::Tasks::PackageUpdate: ../extraconfig/tasks/yum_update_noop.yaml OS::TripleO::ControllerPostDeployment: OS::Heat::None OS::TripleO::ComputePostDeployment: OS::Heat::None OS::TripleO::ObjectStoragePostDeployment: OS::Heat::None diff --git a/environments/major-upgrade-pacemaker.yaml b/environments/major-upgrade-pacemaker.yaml index 763ca67e..95f09666 100644 --- a/environments/major-upgrade-pacemaker.yaml +++ b/environments/major-upgrade-pacemaker.yaml @@ -3,7 +3,6 @@ parameter_defaults: resource_registry: OS::TripleO::Tasks::UpdateWorkflow: ../extraconfig/tasks/major_upgrade_pacemaker.yaml - OS::TripleO::Tasks::PackageUpdate: ../extraconfig/tasks/yum_update_noop.yaml OS::TripleO::ControllerPostDeployment: OS::Heat::None OS::TripleO::ComputePostDeployment: OS::Heat::None OS::TripleO::ObjectStoragePostDeployment: OS::Heat::None diff --git a/environments/network-isolation.yaml b/environments/network-isolation.yaml index c0420c5c..737d7d36 100644 --- a/environments/network-isolation.yaml +++ b/environments/network-isolation.yaml @@ -1,16 +1,15 @@ # Enable the creation of Neutron networks for isolated Overcloud # traffic and configure each role to assign ports (related # to that role) on these networks. -# Many networks are disabled by default because they are not used -# in a typical configuration. Override via parameter_defaults. resource_registry: OS::TripleO::Network::External: ../network/external.yaml OS::TripleO::Network::InternalApi: ../network/internal_api.yaml OS::TripleO::Network::StorageMgmt: ../network/storage_mgmt.yaml OS::TripleO::Network::Storage: ../network/storage.yaml OS::TripleO::Network::Tenant: ../network/tenant.yaml - # Management network is optional and disabled by default - OS::TripleO::Network::Management: OS::Heat::None + # Management network is optional and disabled by default. + # To enable it, include environments/network-management.yaml + #OS::TripleO::Network::Management: ../network/management.yaml # Port assignments for the VIPs OS::TripleO::Network::Ports::ExternalVipPort: ../network/ports/external.yaml @@ -19,13 +18,15 @@ resource_registry: OS::TripleO::Network::Ports::StorageMgmtVipPort: ../network/ports/storage_mgmt.yaml OS::TripleO::Network::Ports::RedisVipPort: ../network/ports/vip.yaml + # Port assignments for service virtual IPs for the controller role + OS::TripleO::Controller::Ports::RedisVipPort: ../network/ports/vip.yaml # Port assignments for the controller role OS::TripleO::Controller::Ports::ExternalPort: ../network/ports/external.yaml OS::TripleO::Controller::Ports::InternalApiPort: ../network/ports/internal_api.yaml OS::TripleO::Controller::Ports::StoragePort: ../network/ports/storage.yaml OS::TripleO::Controller::Ports::StorageMgmtPort: ../network/ports/storage_mgmt.yaml OS::TripleO::Controller::Ports::TenantPort: ../network/ports/tenant.yaml - OS::TripleO::Controller::Ports::ManagementPort: ../network/ports/noop.yaml + #OS::TripleO::Controller::Ports::ManagementPort: ../network/ports/management.yaml # Port assignments for the compute role OS::TripleO::Compute::Ports::ExternalPort: ../network/ports/noop.yaml @@ -33,7 +34,7 @@ resource_registry: OS::TripleO::Compute::Ports::StoragePort: ../network/ports/storage.yaml OS::TripleO::Compute::Ports::StorageMgmtPort: ../network/ports/noop.yaml OS::TripleO::Compute::Ports::TenantPort: ../network/ports/tenant.yaml - OS::TripleO::Compute::Ports::ManagementPort: ../network/ports/noop.yaml + #OS::TripleO::Compute::Ports::ManagementPort: ../network/ports/management.yaml # Port assignments for the ceph storage role OS::TripleO::CephStorage::Ports::ExternalPort: ../network/ports/noop.yaml @@ -41,7 +42,7 @@ resource_registry: OS::TripleO::CephStorage::Ports::StoragePort: ../network/ports/storage.yaml OS::TripleO::CephStorage::Ports::StorageMgmtPort: ../network/ports/storage_mgmt.yaml OS::TripleO::CephStorage::Ports::TenantPort: ../network/ports/noop.yaml - OS::TripleO::CephStorage::Ports::ManagementPort: ../network/ports/noop.yaml + #OS::TripleO::CephStorage::Ports::ManagementPort: ../network/ports/management.yaml # Port assignments for the swift storage role OS::TripleO::SwiftStorage::Ports::ExternalPort: ../network/ports/noop.yaml @@ -49,7 +50,7 @@ resource_registry: OS::TripleO::SwiftStorage::Ports::StoragePort: ../network/ports/storage.yaml OS::TripleO::SwiftStorage::Ports::StorageMgmtPort: ../network/ports/storage_mgmt.yaml OS::TripleO::SwiftStorage::Ports::TenantPort: ../network/ports/noop.yaml - OS::TripleO::SwiftStorage::Ports::ManagementPort: ../network/ports/noop.yaml + #OS::TripleO::SwiftStorage::Ports::ManagementPort: ../network/ports/management.yaml # Port assignments for the block storage role OS::TripleO::BlockStorage::Ports::ExternalPort: ../network/ports/noop.yaml @@ -57,7 +58,5 @@ resource_registry: OS::TripleO::BlockStorage::Ports::StoragePort: ../network/ports/storage.yaml OS::TripleO::BlockStorage::Ports::StorageMgmtPort: ../network/ports/storage_mgmt.yaml OS::TripleO::BlockStorage::Ports::TenantPort: ../network/ports/noop.yaml - OS::TripleO::BlockStorage::Ports::ManagementPort: ../network/ports/noop.yaml + #OS::TripleO::BlockStorage::Ports::ManagementPort: ../network/ports/management.yaml - # Port assignments for service virtual IPs for the controller role - OS::TripleO::Controller::Ports::RedisVipPort: ../network/ports/vip.yaml diff --git a/environments/network-management-v6.yaml b/environments/network-management-v6.yaml new file mode 100644 index 00000000..812e84f3 --- /dev/null +++ b/environments/network-management-v6.yaml @@ -0,0 +1,25 @@ +# Enable the creation of an IPv6 system management network. This +# creates a Neutron network for isolated Overcloud +# system management traffic and configures each role to +# assign a port (related to that role) on that network. +# Note that the basic sample NIC configuration templates +# do not include the management network, see the +# comments in the sample network config templates in +# network/config/ for an example. +resource_registry: + OS::TripleO::Network::Management: ../network/management_v6.yaml + + # Port assignments for the controller role + OS::TripleO::Controller::Ports::ManagementPort: ../network/ports/management_v6.yaml + + # Port assignments for the compute role + OS::TripleO::Compute::Ports::ManagementPort: ../network/ports/management_v6.yaml + + # Port assignments for the ceph storage role + OS::TripleO::CephStorage::Ports::ManagementPort: ../network/ports/management_v6.yaml + + # Port assignments for the swift storage role + OS::TripleO::SwiftStorage::Ports::ManagementPort: ../network/ports/management_v6.yaml + + # Port assignments for the block storage role + OS::TripleO::BlockStorage::Ports::ManagementPort: ../network/ports/management_v6.yaml diff --git a/environments/network-management.yaml b/environments/network-management.yaml index 2f0cff8b..041617be 100644 --- a/environments/network-management.yaml +++ b/environments/network-management.yaml @@ -4,7 +4,8 @@ # assign a port (related to that role) on that network. # Note that the basic sample NIC configuration templates # do not include the management network, see the -# single-nic-vlans-mgmt templates for an example. +# comments in the sample network config templates in +# network/config/ for an example. resource_registry: OS::TripleO::Network::Management: ../network/management.yaml diff --git a/environments/neutron-midonet.yaml b/environments/neutron-midonet.yaml index 7f50f15b..c120d0b3 100644 --- a/environments/neutron-midonet.yaml +++ b/environments/neutron-midonet.yaml @@ -4,6 +4,10 @@ resource_registry: OS::TripleO::Controller::Net::SoftwareConfig: ../net-config-linux-bridge.yaml # We have to avoid any ovs bridge. MidoNet is incompatible with its datapath OS::TripleO::Services::NeutronL3Agent: OS::Heat::None OS::TripleO::Services::NeutronOvsAgent: OS::Heat::None + OS::TripleO::Services::ComputeNeutronOvsAgent: OS::Heat::None + # Override the NeutronCorePlugin to use Nuage + OS::TripleO::Services::NeutronCorePlugin: OS::TripleO::Services::NeutronCorePluginMidonet + OS::TripleO::Services::ComputeNeutronCorePlugin: ../puppet/services/neutron-compute-plugin-midonet.yaml parameter_defaults: EnableZookeeperOnController: true diff --git a/environments/neutron-nuage-config.yaml b/environments/neutron-nuage-config.yaml index bf5036bf..e157ae35 100644 --- a/environments/neutron-nuage-config.yaml +++ b/environments/neutron-nuage-config.yaml @@ -1,10 +1,13 @@ # A Heat environment file which can be used to enable a # a Neutron Nuage backend on the controller, configured via puppet resource_registry: - OS::TripleO::ControllerExtraConfigPre: ../puppet/extraconfig/pre_deploy/controller/neutron-nuage.yaml OS::TripleO::Services::NeutronL3Agent: OS::Heat::None OS::TripleO::Services::NeutronMetadataAgent: OS::Heat::None OS::TripleO::Services::NeutronOvsAgent: OS::Heat::None + OS::TripleO::Services::ComputeNeutronOvsAgent: OS::Heat::None + # Override the NeutronCorePlugin to use Nuage + OS::TripleO::Services::NeutronCorePlugin: OS::TripleO::Services::NeutronCorePluginNuage + OS::TripleO::Services::ComputeNeutronCorePlugin: ../puppet/services/neutron-compute-plugin-nuage.yaml parameter_defaults: NeutronNuageOSControllerIp: '0.0.0.0' diff --git a/environments/neutron-opencontrail.yaml b/environments/neutron-opencontrail.yaml index b17c9c0a..4895287e 100644 --- a/environments/neutron-opencontrail.yaml +++ b/environments/neutron-opencontrail.yaml @@ -1,12 +1,15 @@ # A Heat environment file which can be used to enable OpenContrail # extensions, configured via puppet resource_registry: - OS::TripleO::ControllerExtraConfigPre: ../puppet/extraconfig/pre_deploy/controller/neutron-opencontrail.yaml OS::TripleO::ComputeExtraConfigPre: ../puppet/extraconfig/pre_deploy/compute/neutron-opencontrail.yaml OS::TripleO::Services::NeutronDhcpAgent: OS::Heat::None OS::TripleO::Services::NeutronL3Agent: OS::Heat::None OS::TripleO::Services::NeutronMetadataAgent: OS::Heat::None OS::TripleO::Services::NeutronOvsAgent: OS::Heat::None + OS::TripleO::Services::ComputeNeutronOvsAgent: OS::Heat::None + # Override the NeutronCorePlugin to use Nuage + OS::TripleO::Services::NeutronCorePlugin: OS::TripleO::Services::NeutronCorePluginOpencontrail + OS::TripleO::Services::ComputeNeutronCorePlugin: ../puppet/services/neutron-compute-plugin-opencontrail.yaml parameter_defaults: NeutronCorePlugin: neutron_plugin_contrail.plugins.opencontrail.contrail_plugin.NeutronPluginContrailCoreV2 diff --git a/environments/neutron-plumgrid.yaml b/environments/neutron-plumgrid.yaml index 08ace103..87946211 100755 --- a/environments/neutron-plumgrid.yaml +++ b/environments/neutron-plumgrid.yaml @@ -6,8 +6,10 @@ resource_registry: OS::TripleO::Services::NeutronL3Agent: OS::Heat::None OS::TripleO::Services::NeutronMetadataAgent: OS::Heat::None OS::TripleO::Services::NeutronOvsAgent: OS::Heat::None + OS::TripleO::Services::ComputeNeutronOvsAgent: OS::Heat::None # Override the Neutron core plugin to use PLUMgrid OS::TripleO::Services::NeutronCorePlugin: OS::TripleO::Services::NeutronCorePluginPlumgrid + OS::TripleO::Services::ComputeNeutronCorePlugin: ../puppet/services/neutron-compute-plugin-plumgrid.yaml parameter_defaults: NeutronCorePlugin: networking_plumgrid.neutron.plugins.plugin.NeutronPluginPLUMgridV2 diff --git a/environments/puppet-ceph-devel.yaml b/environments/puppet-ceph-devel.yaml index a2d1100f..a9e459df 100644 --- a/environments/puppet-ceph-devel.yaml +++ b/environments/puppet-ceph-devel.yaml @@ -1,6 +1,11 @@ # A Heat environment file which can be used to enable a Ceph # storage cluster using the controller and ceph nodes. # Rbd backends are enabled for Cinder, Glance, Gnocchi and Nova. +resource_registry: + OS::TripleO::Services::CephMon: ../puppet/services/ceph-mon.yaml + OS::TripleO::Services::CephOSD: ../puppet/services/ceph-osd.yaml + OS::TripleO::Services::CephClient: ../puppet/services/ceph-client.yaml + parameter_defaults: #NOTE: These ID's and keys should be regenerated for # a production deployment. What is here is suitable for @@ -8,9 +13,9 @@ parameter_defaults: CephClusterFSID: '4b5c8c0a-ff60-454b-a1b4-9747aa737d19' CephMonKey: 'AQC+Ox1VmEr3BxAALZejqeHj50Nj6wJDvs96OQ==' CephAdminKey: 'AQDLOh1VgEp6FRAAFzT7Zw+Y9V6JJExQAsRnRQ==' + CephClientKey: 'AQC+vYNXgDAgAhAAc8UoYt+OTz5uhV7ItLdwUw==' NovaEnableRbdBackend: true CinderEnableRbdBackend: true GlanceBackend: rbd GnocchiBackend: rbd CinderEnableIscsiBackend: false - ControllerEnableCephStorage: true diff --git a/environments/puppet-ceph-external.yaml b/environments/puppet-ceph-external.yaml index 865e0b98..d5e0b1b5 100644 --- a/environments/puppet-ceph-external.yaml +++ b/environments/puppet-ceph-external.yaml @@ -1,7 +1,7 @@ # A Heat environment file which can be used to enable the # use of an externally managed Ceph cluster. resource_registry: - OS::TripleO::CephClusterConfig::SoftwareConfig: ../puppet/extraconfig/ceph/ceph-external-config.yaml + OS::TripleO::Services::CephExternal: ../puppet/services/ceph-external.yaml parameter_defaults: # NOTE: These example parameters are required when using Ceph External diff --git a/environments/puppet-pacemaker-no-restart.yaml b/environments/puppet-pacemaker-no-restart.yaml new file mode 100644 index 00000000..67d8692d --- /dev/null +++ b/environments/puppet-pacemaker-no-restart.yaml @@ -0,0 +1,3 @@ +# use this file *in addition* to puppet-pacemaker.yaml +resource_registry: + OS::TripleO::Tasks::ControllerPostPuppetRestart: OS::Heat::None diff --git a/environments/puppet-pacemaker.yaml b/environments/puppet-pacemaker.yaml index 52a94d80..4622bc4d 100644 --- a/environments/puppet-pacemaker.yaml +++ b/environments/puppet-pacemaker.yaml @@ -4,6 +4,7 @@ resource_registry: OS::TripleO::ControllerConfig: ../puppet/controller-config-pacemaker.yaml OS::TripleO::Tasks::ControllerPrePuppet: ../extraconfig/tasks/pre_puppet_pacemaker.yaml OS::TripleO::Tasks::ControllerPostPuppet: ../extraconfig/tasks/post_puppet_pacemaker.yaml + OS::TripleO::Tasks::ControllerPostPuppetRestart: ../extraconfig/tasks/post_puppet_pacemaker_restart.yaml # custom pacemaker services # NOTE: For now we will need to specify overrides to all services @@ -12,6 +13,7 @@ resource_registry: OS::TripleO::Services::CinderApi: ../puppet/services/pacemaker/cinder-api.yaml OS::TripleO::Services::CinderScheduler: ../puppet/services/pacemaker/cinder-scheduler.yaml OS::TripleO::Services::CinderVolume: ../puppet/services/pacemaker/cinder-volume.yaml + OS::TripleO::Services::Core: ../puppet/services/pacemaker/core.yaml OS::TripleO::Services::Keystone: ../puppet/services/pacemaker/keystone.yaml OS::TripleO::Services::GlanceApi: ../puppet/services/pacemaker/glance-api.yaml OS::TripleO::Services::GlanceRegistry: ../puppet/services/pacemaker/glance-registry.yaml @@ -26,11 +28,14 @@ resource_registry: OS::TripleO::Services::NeutronCorePlugin: ../puppet/services/pacemaker/neutron-plugin-ml2.yaml # Neutron Core Plugin Vendors (these typically override NeutronCorePlugin) OS::TripleO::Services::NeutronCorePluginPlumgrid: ../puppet/services/pacemaker/neutron-plugin-plumgrid.yaml - + OS::TripleO::Services::NeutronCorePluginNuage: ../puppet/services/pacemaker/neutron-plugin-nuage.yaml + OS::TripleO::Services::NeutronCorePluginOpencontrail: ../puppet/services/pacemaker/neutron-plugin-opencontrail.yaml + OS::TripleO::Services::NeutronCorePluginMidonet: ../puppet/services/pacemaker/neutron-midonet.yaml OS::TripleO::Services::NeutronOvsAgent: ../puppet/services/pacemaker/neutron-ovs-agent.yaml OS::TripleO::Services::RabbitMQ: ../puppet/services/pacemaker/rabbitmq.yaml OS::TripleO::Services::HAproxy: ../puppet/services/pacemaker/haproxy.yaml OS::TripleO::Services::Memcached: ../puppet/services/pacemaker/memcached.yaml + OS::TripleO::Services::Pacemaker: ../puppet/services/pacemaker.yaml OS::TripleO::Services::Redis: ../puppet/services/pacemaker/database/redis.yaml OS::TripleO::Services::NovaConductor: ../puppet/services/pacemaker/nova-conductor.yaml OS::TripleO::Services::MongoDb: ../puppet/services/pacemaker/database/mongodb.yaml @@ -38,3 +43,13 @@ resource_registry: OS::TripleO::Services::NovaScheduler: ../puppet/services/pacemaker/nova-scheduler.yaml OS::TripleO::Services::NovaConsoleauth: ../puppet/services/pacemaker/nova-consoleauth.yaml OS::TripleO::Services::NovaVncproxy: ../puppet/services/pacemaker/nova-vncproxy.yaml + OS::TripleO::Services::CeilometerApi: ../puppet/services/pacemaker/ceilometer-api.yaml + OS::TripleO::Services::CeilometerCollector: ../puppet/services/pacemaker/ceilometer-collector.yaml + OS::TripleO::Services::CeilometerAgentCentral: ../puppet/services/pacemaker/ceilometer-agent-central.yaml + OS::TripleO::Services::CeilometerAgentNotification: ../puppet/services/pacemaker/ceilometer-agent-notification.yaml + #Gnocchi services + OS::TripleO::Services::GnocchiApi: ../puppet/services/pacemaker/gnocchi-api.yaml + OS::TripleO::Services::GnocchiMetricd: ../puppet/services/pacemaker/gnocchi-metricd.yaml + OS::TripleO::Services::GnocchiStatsd: ../puppet/services/pacemaker/gnocchi-statsd.yaml + OS::TripleO::Services::MySQL: ../puppet/services/pacemaker/database/mysql.yaml + OS::TripleO::Services::Horizon: ../puppet/services/pacemaker/horizon.yaml diff --git a/environments/storage-environment.yaml b/environments/storage-environment.yaml index da33acfd..0128cabd 100644 --- a/environments/storage-environment.yaml +++ b/environments/storage-environment.yaml @@ -1,6 +1,11 @@ ## A Heat environment file which can be used to set up storage ## backends. Defaults to Ceph used as a backend for Cinder, Glance and ## Nova ephemeral storage. +resource_registry: + OS::TripleO::Services::CephMon: ../puppet/services/ceph-mon.yaml + OS::TripleO::Services::CephOSD: ../puppet/services/ceph-osd.yaml + OS::TripleO::Services::CephClient: ../puppet/services/ceph-client.yaml + parameter_defaults: #### BACKEND SELECTION #### @@ -43,10 +48,6 @@ parameter_defaults: #### CEPH SETTINGS #### - ## Whether to deploy Ceph OSDs on the controller nodes. By default - ## OSDs are deployed on dedicated ceph-storage nodes only. - # ControllerEnableCephStorage: false - ## When deploying Ceph Nodes through the oscplugin CLI, the following ## parameters are set automatically by the CLI. When deploying via ## heat stack-create or ceph on the controller nodes only, @@ -60,3 +61,5 @@ parameter_defaults: # CephMonKey: '' ## Ceph admin key, e.g. 'AQDLOh1VgEp6FRAAFzT7Zw+Y9V6JJExQAsRnRQ==' # CephAdminKey: '' + ## Ceph client key, e.g 'AQC+vYNXgDAgAhAAc8UoYt+OTz5uhV7ItLdwUw==' + # CephClientKey: '' diff --git a/extraconfig/all_nodes/default.yaml b/extraconfig/all_nodes/default.yaml deleted file mode 100644 index 68f9eadd..00000000 --- a/extraconfig/all_nodes/default.yaml +++ /dev/null @@ -1,27 +0,0 @@ -heat_template_version: 2014-10-16 - -description: > - Noop extra config for allnodes extra cluster config - -# Parameters passed from the parent template - note if you maintain -# out-of-tree templates they may require additional parameters if the -# in-tree templates add a new role. -parameters: - controller_servers: - type: json - compute_servers: - type: json - blockstorage_servers: - type: json - objectstorage_servers: - type: json - cephstorage_servers: - type: json -# Note extra parameters can be defined, then passed data via the -# environment parameter_defaults, without modifying the parent template - -outputs: - # This value should change if the configuration data has changed - # It is used to e.g re-apply puppet after hieradata values change. - config_identifier: - value: none diff --git a/extraconfig/all_nodes/mac_hostname.yaml b/extraconfig/all_nodes/mac_hostname.yaml index 5883e06a..7d8704e3 100644 --- a/extraconfig/all_nodes/mac_hostname.yaml +++ b/extraconfig/all_nodes/mac_hostname.yaml @@ -113,10 +113,3 @@ resources: objectstorage_mappings: {get_attr: [CollectMacDeploymentsObjectStorage, deploy_stdouts]} cephstorage_mappings: {get_attr: [CollectMacDeploymentsCephStorage, deploy_stdouts]} actions: ['CREATE'] # Only do this on CREATE - -outputs: - # This value should change if the configuration data has changed - # It is used to e.g re-apply puppet after hieradata values change. - config_identifier: - value: {get_attr: [DistributeMacDeploymentsController, deploy_stdouts]} - diff --git a/extraconfig/all_nodes/random_string.yaml b/extraconfig/all_nodes/random_string.yaml index 49d2d8b6..d38701e2 100644 --- a/extraconfig/all_nodes/random_string.yaml +++ b/extraconfig/all_nodes/random_string.yaml @@ -57,9 +57,3 @@ resources: actions: ['CREATE'] # Only do this on CREATE input_values: random_value: {get_attr: [Random, value]} - -outputs: - # This value should change if the configuration data has changed - # It is used to e.g re-apply puppet after hieradata values change. - config_identifier: - value: {get_attr: [Random, value]} diff --git a/extraconfig/all_nodes/swap-partition.yaml b/extraconfig/all_nodes/swap-partition.yaml index 89a2adb0..e6fa9eca 100644 --- a/extraconfig/all_nodes/swap-partition.yaml +++ b/extraconfig/all_nodes/swap-partition.yaml @@ -84,7 +84,3 @@ resources: input_values: swap_partition_label: {get_param: swap_partition_label} actions: ["CREATE"] - -outputs: - config_identifier: - value: none diff --git a/extraconfig/all_nodes/swap.yaml b/extraconfig/all_nodes/swap.yaml index 374b1e5d..5383ffc9 100644 --- a/extraconfig/all_nodes/swap.yaml +++ b/extraconfig/all_nodes/swap.yaml @@ -102,7 +102,3 @@ resources: swap_size_megabytes: {get_param: swap_size_megabytes} swap_path: {get_param: swap_path} actions: ["CREATE"] - -outputs: - config_identifier: - value: none diff --git a/extraconfig/tasks/major_upgrade_controller_pacemaker_1.sh b/extraconfig/tasks/major_upgrade_controller_pacemaker_1.sh index f5399222..36d85444 100755 --- a/extraconfig/tasks/major_upgrade_controller_pacemaker_1.sh +++ b/extraconfig/tasks/major_upgrade_controller_pacemaker_1.sh @@ -2,14 +2,96 @@ set -eu -cluster_sync_timeout=600 +cluster_sync_timeout=1800 if pcs status 2>&1 | grep -E '(cluster is not currently running)|(OFFLINE:)'; then echo_error "ERROR: upgrade cannot start with some cluster nodes being offline" exit 1 fi + +# We want to disable fencing during the cluster --stop as it might fence +# nodes where a service fails to stop, which could be fatal during an upgrade +# procedure. So we remember the stonith state. If it was enabled we reenable it +# at the end of this script +STONITH_STATE=$(pcs property show stonith-enabled | grep "stonith-enabled" | awk '{ print $2 }') +pcs property set stonith-enabled=false + +# If for some reason rpm-python are missing we want to error out early enough +if [ ! rpm -q rpm-python &> /dev/null ]; then + echo_error "ERROR: upgrade cannot start without rpm-python installed" + exit 1 +fi + +# In case the mysql package is updated, the database on disk must be +# upgraded as well. This typically needs to happen during major +# version upgrades (e.g. 5.5 -> 5.6, 5.5 -> 10.1...) +# +# Because in-place upgrades are not supported across 2+ major versions +# (e.g. 5.5 -> 10.1), we rely on logical upgrades via dump/restore cycle +# https://bugzilla.redhat.com/show_bug.cgi?id=1341968 +# +# The default is to determine automatically if upgrade is needed based +# on mysql package versionning, but this can be overriden manually +# to support specific upgrade scenario + +# Where to backup current database if mysql need to be upgraded +MYSQL_BACKUP_DIR=/var/tmp/mysql_upgrade_osp +MYSQL_TEMP_UPGRADE_BACKUP_DIR=/var/lib/mysql-temp-upgrade-backup +# Spare disk ratio for extra safety +MYSQL_BACKUP_SIZE_RATIO=1.2 + +# Shall we upgrade mysql data directory during the stack upgrade? +if [ "$mariadb_do_major_upgrade" = "auto" ]; then + ret=$(is_mysql_upgrade_needed) + if [ $ret = "1" ]; then + DO_MYSQL_UPGRADE=1 + else + DO_MYSQL_UPGRADE=0 + fi + echo "mysql upgrade required: $DO_MYSQL_UPGRADE" +elif [ "$mariadb_do_major_upgrade" = 0 ]; then + DO_MYSQL_UPGRADE=0 +else + DO_MYSQL_UPGRADE=1 +fi + if [ "$(hiera -c /etc/puppet/hiera.yaml bootstrap_nodeid)" = "$(facter hostname)" ]; then + if [ $DO_MYSQL_UPGRADE -eq 1 ]; then + if [ -d "$MYSQL_BACKUP_DIR" ]; then + echo_error "Error: $MYSQL_BACKUP_DIR exists already. Likely an upgrade failed previously" + exit 1 + fi + mkdir "$MYSQL_BACKUP_DIR" + if [ $? -ne 0 ]; then + echo_error "Error: could not create temporary backup directory $MYSQL_BACKUP_DIR" + exit 1 + fi + + # the /root/.my.cnf is needed because we set the mysql root + # password from liberty onwards + backup_flags="--defaults-extra-file=/root/.my.cnf -u root --flush-privileges --all-databases --single-transaction" + # While not ideal, this step allows us to calculate exactly how much space the dump + # will need. Our main goal here is avoiding any chance of corruption due to disk space + # exhaustion + backup_size=$(mysqldump $backup_flags 2>/dev/null | wc -c) + database_size=$(du -cb /var/lib/mysql | tail -1 | awk '{ print $1 }') + free_space=$(df -B1 --output=avail "$MYSQL_BACKUP_DIR" | tail -1) + + # we need at least space for a new mysql database + dump of the existing one, + # times a small factor for additional safety room + # note: bash doesn't do floating point math or floats in if statements, + # so use python to apply the ratio and cast it back to integer + required_space=$(python -c "from __future__ import print_function; print(\"%d\" % int((($database_size + $backup_size) * $MYSQL_BACKUP_SIZE_RATIO)))") + if [ $required_space -ge $free_space ]; then + echo_error "Error: not enough free space in $MYSQL_BACKUP_DIR ($required_space bytes required)" + exit 1 + fi + + mysqldump $backup_flags > "$MYSQL_BACKUP_DIR/openstack_database.sql" + cp -rdp /etc/my.cnf* "$MYSQL_BACKUP_DIR" + fi + pcs resource disable httpd check_resource httpd stopped 1800 pcs resource disable openstack-core @@ -46,9 +128,69 @@ while systemctl is-active pacemaker; do fi done +# The reason we do an sql dump *and* we move the old dir out of +# the way is because it gives us an extra level of safety in case +# something goes wrong during the upgrade. Once the restore is +# successful we go ahead and remove it. If the directory exists +# we bail out as it means the upgrade process had issues in the last +# run. +if [ $DO_MYSQL_UPGRADE -eq 1 ]; then + if [ -d $MYSQL_TEMP_UPGRADE_BACKUP_DIR ]; then + echo_error "ERROR: mysql backup dir already exist" + exit 1 + fi + mv /var/lib/mysql $MYSQL_TEMP_UPGRADE_BACKUP_DIR +fi + yum -y install python-zaqarclient # needed for os-collect-config yum -y -q update +# We need to ensure at least those two configuration settings, otherwise +# mariadb 10.1+ won't activate galera replication. +# wsrep_cluster_address must only be set though, its value does not +# matter because it's overriden by the galera resource agent. +cat >> /etc/my.cnf.d/galera.cnf <<EOF +[mysqld] +wsrep_on = ON +wsrep_cluster_address = gcomm://localhost +EOF + +if [ "$(hiera -c /etc/puppet/hiera.yaml bootstrap_nodeid)" = "$(facter hostname)" ]; then + if [ $DO_MYSQL_UPGRADE -eq 1 ]; then + # Scripts run via heat have no HOME variable set and this confuses + # mysqladmin + export HOME=/root + mkdir /var/lib/mysql || /bin/true + chown mysql:mysql /var/lib/mysql + chmod 0755 /var/lib/mysql + restorecon -R /var/lib/mysql/ + mysql_install_db --datadir=/var/lib/mysql --user=mysql + chown -R mysql:mysql /var/lib/mysql/ + mysqld_safe --wsrep-new-cluster & + # We have a populated /root/.my.cnf with root/password here so + # we need to temporarily rename it because the newly created + # db is empty and no root password is set + mv /root/.my.cnf /root/.my.cnf.temporary + timeout 60 sh -c 'while ! mysql -e "" &> /dev/null; do sleep 1; done' + mysql -u root < "$MYSQL_BACKUP_DIR/openstack_database.sql" + mv /root/.my.cnf.temporary /root/.my.cnf + mysqladmin -u root shutdown + # The import was successful so we may remove the folder + rm -r "$MYSQL_BACKUP_DIR" + fi +fi + +# If we reached here without error we can safely blow away the origin +# mysql dir from every controller +if [ $DO_MYSQL_UPGRADE -eq 1 ]; then + rm -r $MYSQL_TEMP_UPGRADE_BACKUP_DIR +fi + +# Let's reset the stonith back to true if it was true, before starting the cluster +if [ $STONITH_STATE == "true" ]; then + pcs -f /var/lib/pacemaker/cib/cib.xml property set stonith-enabled=true +fi + # Pin messages sent to compute nodes to kilo, these will be upgraded later crudini --set /etc/nova/nova.conf upgrade_levels compute "$upgrade_level_nova_compute" # https://bugzilla.redhat.com/show_bug.cgi?id=1284047 diff --git a/extraconfig/tasks/major_upgrade_pacemaker.yaml b/extraconfig/tasks/major_upgrade_pacemaker.yaml index 4af3186c..c70a954f 100644 --- a/extraconfig/tasks/major_upgrade_pacemaker.yaml +++ b/extraconfig/tasks/major_upgrade_pacemaker.yaml @@ -20,6 +20,12 @@ parameters: type: string description: Nova Compute upgrade level default: '' + MySqlMajorUpgrade: + type: string + description: Can be auto,yes,no and influences if the major upgrade should do or detect an automatic mysql upgrade + constraints: + - allowed_values: ['auto', 'yes', 'no'] + default: 'auto' resources: # TODO(jistr): for Mitaka->Newton upgrades and further we can use @@ -39,6 +45,12 @@ resources: upgrade_level_nova_compute='UPGRADE_LEVEL_NOVA_COMPUTE' params: UPGRADE_LEVEL_NOVA_COMPUTE: {get_param: UpgradeLevelNovaCompute} + - str_replace: + template: | + #!/bin/bash + mariadb_do_major_upgrade='MYSQL_MAJOR_UPGRADE' + params: + MYSQL_MAJOR_UPGRADE: {get_param: MySqlMajorUpgrade} - get_file: pacemaker_common_functions.sh - get_file: major_upgrade_pacemaker_migrations.sh - get_file: major_upgrade_controller_pacemaker_1.sh diff --git a/extraconfig/tasks/major_upgrade_pacemaker_migrations.sh b/extraconfig/tasks/major_upgrade_pacemaker_migrations.sh index b63198db..164269dc 100644 --- a/extraconfig/tasks/major_upgrade_pacemaker_migrations.sh +++ b/extraconfig/tasks/major_upgrade_pacemaker_migrations.sh @@ -13,6 +13,50 @@ # been already applied, it should be possible to call the function # again without damaging the deployment or failing the upgrade. +# If the major version of mysql is going to change after the major +# upgrade, the database must be upgraded on disk to avoid failures +# due to internal incompatibilities between major mysql versions +# https://bugs.launchpad.net/tripleo/+bug/1587449 +# This function detects whether a database upgrade is required +# after a mysql package upgrade. It returns 0 when no major upgrade +# has to take place, 1 otherwise. +function is_mysql_upgrade_needed { + # The name of the package which provides mysql might differ + # after the upgrade. Consider the generic package name, which + # should capture the major version change (e.g. 5.5 -> 10.1) + local name="mariadb" + local output + local ret + set +e + output=$(yum -q check-update $name) + ret=$? + set -e + if [ $ret -ne 100 ]; then + # no updates so we exit + echo "0" + return + fi + + local currentepoch=$(rpm -q --qf "%{epoch}" $name) + local currentversion=$(rpm -q --qf "%{version}" $name) + local currentrelease=$(rpm -q --qf "%{release}" $name) + local newoutput=$(repoquery -a --pkgnarrow=updates --qf "%{epoch} %{version} %{release}\n" $name) + local newepoch=$(echo "$newoutput" | awk '{ print $1 }') + local newversion=$(echo "$newoutput" | awk '{ print $2 }') + local newrelease=$(echo "$newoutput" | awk '{ print $3 }') + + # With this we trigger the dump restore/path if we change either epoch or + # version in the package If only the release tag changes we do not do it + # FIXME: we could refine this by trying to parse the mariadb version + # into X.Y.Z and trigger the update only if X and/or Y change. + output=$(python -c "import rpm; rc = rpm.labelCompare((\"$currentepoch\", \"$currentversion\", None), (\"$newepoch\", \"$newversion\", None)); print rc") + if [ "$output" != "-1" ]; then + echo "0" + return + fi + echo "1" +} + function add_missing_openstack_core_constraints { # The CIBs are saved under /root as they might contain sensitive data CIB="/root/migration.cib" diff --git a/extraconfig/tasks/post_puppet_pacemaker.yaml b/extraconfig/tasks/post_puppet_pacemaker.yaml index fbed9ce5..b62502f8 100644 --- a/extraconfig/tasks/post_puppet_pacemaker.yaml +++ b/extraconfig/tasks/post_puppet_pacemaker.yaml @@ -29,20 +29,9 @@ resources: config: {get_resource: ControllerPostPuppetMaintenanceModeConfig} input_values: {get_param: input_values} - ControllerPostPuppetRestartConfig: - type: OS::Heat::SoftwareConfig - properties: - group: script - config: - list_join: - - '' - - - get_file: pacemaker_common_functions.sh - - get_file: pacemaker_resource_restart.sh - - ControllerPostPuppetRestartDeployment: - type: OS::Heat::SoftwareDeployments + ControllerPostPuppetRestart: + type: OS::TripleO::Tasks::ControllerPostPuppetRestart depends_on: ControllerPostPuppetMaintenanceModeDeployment properties: servers: {get_param: servers} - config: {get_resource: ControllerPostPuppetRestartConfig} input_values: {get_param: input_values} diff --git a/extraconfig/tasks/post_puppet_pacemaker_restart.yaml b/extraconfig/tasks/post_puppet_pacemaker_restart.yaml new file mode 100644 index 00000000..52760c87 --- /dev/null +++ b/extraconfig/tasks/post_puppet_pacemaker_restart.yaml @@ -0,0 +1,28 @@ +heat_template_version: 2014-10-16 +description: 'Post-Puppet restart config for Pacemaker deployments' + +parameters: + servers: + type: json + input_values: + type: json + description: input values for the software deployments + +resources: + + ControllerPostPuppetRestartConfig: + type: OS::Heat::SoftwareConfig + properties: + group: script + config: + list_join: + - '' + - - get_file: pacemaker_common_functions.sh + - get_file: pacemaker_resource_restart.sh + + ControllerPostPuppetRestartDeployment: + type: OS::Heat::SoftwareDeployments + properties: + servers: {get_param: servers} + config: {get_resource: ControllerPostPuppetRestartConfig} + input_values: {get_param: input_values} diff --git a/firstboot/os-net-config-mappings.yaml b/firstboot/os-net-config-mappings.yaml new file mode 100644 index 00000000..833c3bc2 --- /dev/null +++ b/firstboot/os-net-config-mappings.yaml @@ -0,0 +1,65 @@ +heat_template_version: 2015-10-15 + +description: > + Configure os-net-config mappings for specific nodes + Your environment file needs to look like: + parameter_defaults: + NetConfigDataLookup: + node1: + nic1: "00:c8:7c:e6:f0:2e" + node2: + nic1: "00:18:7d:99:0c:b6" + This will result in the first nodeN entry where a mac matches a + local device being written as a mapping file for os-net-config in + /etc/os-net-config/mapping.yaml + +parameters: + # Note this requires a liberty heat or newer in the undercloud due to + # the 2015-10-15 (which is required to enable str_replace serializing + # the json parameter to json, another approch with a string parameter + # will be required for older heat versions) + NetConfigDataLookup: + type: json + default: {} + description: per-node configuration map + +resources: + userdata: + type: OS::Heat::MultipartMime + properties: + parts: + - config: {get_resource: OsNetConfigMappings} + + OsNetConfigMappings: + type: OS::Heat::SoftwareConfig + properties: + group: ungrouped + config: + str_replace: + template: | + #!/bin/sh + eth_addr=$(/sbin/ifconfig eth0 | grep ether | awk '{print $2}') + mkdir -p /etc/os-net-config + + # Create an os-net-config mapping file, note this defaults to + # /etc/os-net-config/mapping.yaml, so we use that name despite + # rendering the result as json + echo '$node_lookup' | python -c " + import json + import sys + import yaml + input = sys.stdin.readline() or '{}' + data = json.loads(input) + for node in data: + if '${eth_addr}' in data[node].values(): + interface_mapping = {'interface_mapping': data[node]} + with open('/etc/os-net-config/mapping.yaml', 'w') as f: + yaml.safe_dump(interface_mapping, f, default_flow_style=False) + break + " + params: + $node_lookup: {get_param: NetConfigDataLookup} + +outputs: + OS::stack_id: + value: {get_resource: userdata} diff --git a/net-config-static-bridge-with-external-dhcp.yaml b/net-config-static-bridge-with-external-dhcp.yaml new file mode 100644 index 00000000..6dbe5982 --- /dev/null +++ b/net-config-static-bridge-with-external-dhcp.yaml @@ -0,0 +1,99 @@ +heat_template_version: 2015-04-30 + +description: > + Software Config to drive os-net-config for a simple bridge configured + with a static IP address for the ctlplane network. + +parameters: + ControlPlaneIp: + default: '' + description: IP address/subnet on the ctlplane network + type: string + ExternalIpSubnet: + default: '' + description: IP address/subnet on the external network + type: string + InternalApiIpSubnet: + default: '' + description: IP address/subnet on the internal API network + type: string + StorageIpSubnet: + default: '' + description: IP address/subnet on the storage network + type: string + StorageMgmtIpSubnet: + default: '' + description: IP address/subnet on the storage mgmt network + type: string + TenantIpSubnet: + default: '' + description: IP address/subnet on the tenant network + type: string + ManagementIpSubnet: + default: '' + description: IP address/subnet on the management network + type: string + ControlPlaneSubnetCidr: # Override this via parameter_defaults + default: '24' + description: The subnet CIDR of the control plane network. + type: string + ControlPlaneDefaultRoute: # Override this via parameter_defaults + description: The default route of the control plane network. + type: string + DnsServers: # Override this via parameter_defaults + default: [] + description: A list of DNS servers (2 max for some implementations) that will be added to resolv.conf. + type: comma_delimited_list + EC2MetadataIp: # Override this via parameter_defaults + description: The IP address of the EC2 metadata server. + type: string + +resources: + OsNetConfigImpl: + type: OS::Heat::StructuredConfig + properties: + group: os-apply-config + config: + os_net_config: + network_config: + - + type: ovs_bridge + name: {get_input: bridge_name} + use_dhcp: true + members: + - + type: interface + name: {get_input: interface_name} + # force the MAC address of the bridge to this interface + primary: true + - + type: interface + # would like to do the following, but can't b/c of: + # https://bugs.launchpad.net/heat/+bug/1344284 + # name: + # list_join: + # - '/' + # - - {get_input: bridge_name} + # - ':0' + # So, just hardcode to br-ex:0 for now, br-ex is hardcoded in + # controller.yaml anyway. + name: br-ex:0 + addresses: + - + ip_netmask: + list_join: + - '/' + - - {get_param: ControlPlaneIp} + - {get_param: ControlPlaneSubnetCidr} + routes: + - + ip_netmask: 169.254.169.254/32 + next_hop: {get_param: EC2MetadataIp} + - + default: true + next_hop: {get_param: ControlPlaneDefaultRoute} + +outputs: + OS::stack_id: + description: The OsNetConfigImpl resource. + value: {get_resource: OsNetConfigImpl} diff --git a/network/endpoints/endpoint_data.yaml b/network/endpoints/endpoint_data.yaml index c76e1360..5afcf5de 100644 --- a/network/endpoints/endpoint_data.yaml +++ b/network/endpoints/endpoint_data.yaml @@ -66,6 +66,12 @@ Mysql: protocol: mysql+pymysql port: 3306 +MysqlNoBrackets: + Internal: + vip_param: MysqlNoBrackets + protocol: mysql+pymysql + port: 3306 + Heat: Internal: vip_param: HeatApi @@ -140,15 +146,15 @@ Nova: Internal: vip_param: NovaApi uri_suffixes: - '': /v2.1/%(tenant_id)s + '': /v2.1 Public: vip_param: Public uri_suffixes: - '': /v2.1/%(tenant_id)s + '': /v2.1 Admin: vip_param: NovaApi uri_suffixes: - '': /v2.1/%(tenant_id)s + '': /v2.1 port: 8774 NovaVNCProxy: @@ -192,3 +198,18 @@ Sahara: uri_suffixes: '': /v1.1/%(tenant_id)s port: 8386 + +Ironic: + Internal: + vip_param: IronicApi + uri_suffixes: + '': /v1 + Public: + vip_param: Public + uri_suffixes: + '': /v1 + Admin: + vip_param: IronicApi + uri_suffixes: + '': /v1 + port: 6385 diff --git a/network/endpoints/endpoint_map.yaml b/network/endpoints/endpoint_map.yaml index fbfa0a15..e1b8984f 100644 --- a/network/endpoints/endpoint_map.yaml +++ b/network/endpoints/endpoint_map.yaml @@ -15,8 +15,10 @@ parameters: GlanceRegistryVirtualIP: {type: string, default: ''} GnocchiApiVirtualIP: {type: string, default: ''} HeatApiVirtualIP: {type: string, default: ''} + IronicApiVirtualIP: {type: string, default: ''} KeystoneAdminApiVirtualIP: {type: string, default: ''} KeystonePublicApiVirtualIP: {type: string, default: ''} + MysqlNoBracketsVirtualIP: {type: string, default: ''} MysqlVirtualIP: {type: string, default: ''} NeutronApiVirtualIP: {type: string, default: ''} NovaApiVirtualIP: {type: string, default: ''} @@ -49,10 +51,15 @@ parameters: HeatCfnInternal: {protocol: http, port: '8000', host: IP_ADDRESS} HeatCfnPublic: {protocol: http, port: '8000', host: IP_ADDRESS} HorizonPublic: {protocol: http, port: '80', host: IP_ADDRESS} + IronicAdmin: {protocol: http, port: '6385', host: IP_ADDRESS} + IronicInternal: {protocol: http, port: '6385', host: IP_ADDRESS} + IronicPublic: {protocol: http, port: '6385', host: IP_ADDRESS} KeystoneAdmin: {protocol: http, port: '35357', host: IP_ADDRESS} KeystoneInternal: {protocol: http, port: '5000', host: IP_ADDRESS} KeystonePublic: {protocol: http, port: '5000', host: IP_ADDRESS} MysqlInternal: {protocol: mysql+pymysql, port: '3306', host: IP_ADDRESS} + MysqlNoBracketsInternal: {protocol: mysql+pymysql, port: '3306', + host: IP_ADDRESS} NeutronAdmin: {protocol: http, port: '9696', host: IP_ADDRESS} NeutronInternal: {protocol: http, port: '9696', host: IP_ADDRESS} NeutronPublic: {protocol: http, port: '9696', host: IP_ADDRESS} @@ -1076,6 +1083,123 @@ outputs: IP_ADDRESS: {get_param: PublicVirtualIP} - ':' - get_param: [EndpointMap, HorizonPublic, port] + IronicAdmin: + host: + str_replace: + template: + get_param: [EndpointMap, IronicAdmin, host] + params: + CLOUDNAME: {get_param: CloudName} + IP_ADDRESS: {get_param: IronicApiVirtualIP} + port: + get_param: [EndpointMap, IronicAdmin, port] + protocol: + get_param: [EndpointMap, IronicAdmin, protocol] + uri: + list_join: + - '' + - - get_param: [EndpointMap, IronicAdmin, protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, IronicAdmin, host] + params: + CLOUDNAME: {get_param: CloudName} + IP_ADDRESS: {get_param: IronicApiVirtualIP} + - ':' + - get_param: [EndpointMap, IronicAdmin, port] + - /v1 + uri_no_suffix: + list_join: + - '' + - - get_param: [EndpointMap, IronicAdmin, protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, IronicAdmin, host] + params: + CLOUDNAME: {get_param: CloudName} + IP_ADDRESS: {get_param: IronicApiVirtualIP} + - ':' + - get_param: [EndpointMap, IronicAdmin, port] + IronicInternal: + host: + str_replace: + template: + get_param: [EndpointMap, IronicInternal, host] + params: + CLOUDNAME: {get_param: CloudName} + IP_ADDRESS: {get_param: IronicApiVirtualIP} + port: + get_param: [EndpointMap, IronicInternal, port] + protocol: + get_param: [EndpointMap, IronicInternal, protocol] + uri: + list_join: + - '' + - - get_param: [EndpointMap, IronicInternal, protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, IronicInternal, host] + params: + CLOUDNAME: {get_param: CloudName} + IP_ADDRESS: {get_param: IronicApiVirtualIP} + - ':' + - get_param: [EndpointMap, IronicInternal, port] + - /v1 + uri_no_suffix: + list_join: + - '' + - - get_param: [EndpointMap, IronicInternal, protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, IronicInternal, host] + params: + CLOUDNAME: {get_param: CloudName} + IP_ADDRESS: {get_param: IronicApiVirtualIP} + - ':' + - get_param: [EndpointMap, IronicInternal, port] + IronicPublic: + host: + str_replace: + template: + get_param: [EndpointMap, IronicPublic, host] + params: + CLOUDNAME: {get_param: CloudName} + IP_ADDRESS: {get_param: PublicVirtualIP} + port: + get_param: [EndpointMap, IronicPublic, port] + protocol: + get_param: [EndpointMap, IronicPublic, protocol] + uri: + list_join: + - '' + - - get_param: [EndpointMap, IronicPublic, protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, IronicPublic, host] + params: + CLOUDNAME: {get_param: CloudName} + IP_ADDRESS: {get_param: PublicVirtualIP} + - ':' + - get_param: [EndpointMap, IronicPublic, port] + - /v1 + uri_no_suffix: + list_join: + - '' + - - get_param: [EndpointMap, IronicPublic, protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, IronicPublic, host] + params: + CLOUDNAME: {get_param: CloudName} + IP_ADDRESS: {get_param: PublicVirtualIP} + - ':' + - get_param: [EndpointMap, IronicPublic, port] KeystoneAdmin: host: str_replace: @@ -1387,6 +1511,44 @@ outputs: IP_ADDRESS: {get_param: MysqlVirtualIP} - ':' - get_param: [EndpointMap, MysqlInternal, port] + MysqlNoBracketsInternal: + host: + str_replace: + template: + get_param: [EndpointMap, MysqlNoBracketsInternal, host] + params: + CLOUDNAME: {get_param: CloudName} + IP_ADDRESS: {get_param: MysqlNoBracketsVirtualIP} + port: + get_param: [EndpointMap, MysqlNoBracketsInternal, port] + protocol: + get_param: [EndpointMap, MysqlNoBracketsInternal, protocol] + uri: + list_join: + - '' + - - get_param: [EndpointMap, MysqlNoBracketsInternal, protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, MysqlNoBracketsInternal, host] + params: + CLOUDNAME: {get_param: CloudName} + IP_ADDRESS: {get_param: MysqlNoBracketsVirtualIP} + - ':' + - get_param: [EndpointMap, MysqlNoBracketsInternal, port] + uri_no_suffix: + list_join: + - '' + - - get_param: [EndpointMap, MysqlNoBracketsInternal, protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, MysqlNoBracketsInternal, host] + params: + CLOUDNAME: {get_param: CloudName} + IP_ADDRESS: {get_param: MysqlNoBracketsVirtualIP} + - ':' + - get_param: [EndpointMap, MysqlNoBracketsInternal, port] NeutronAdmin: host: str_replace: @@ -1526,7 +1688,7 @@ outputs: IP_ADDRESS: {get_param: NovaApiVirtualIP} - ':' - get_param: [EndpointMap, NovaAdmin, port] - - /v2.1/%(tenant_id)s + - /v2.1 uri_no_suffix: list_join: - '' @@ -1565,7 +1727,7 @@ outputs: IP_ADDRESS: {get_param: NovaApiVirtualIP} - ':' - get_param: [EndpointMap, NovaInternal, port] - - /v2.1/%(tenant_id)s + - /v2.1 uri_no_suffix: list_join: - '' @@ -1604,7 +1766,7 @@ outputs: IP_ADDRESS: {get_param: PublicVirtualIP} - ':' - get_param: [EndpointMap, NovaPublic, port] - - /v2.1/%(tenant_id)s + - /v2.1 uri_no_suffix: list_join: - '' diff --git a/network/management_v6.yaml b/network/management_v6.yaml new file mode 100644 index 00000000..a5e70667 --- /dev/null +++ b/network/management_v6.yaml @@ -0,0 +1,69 @@ +heat_template_version: 2015-04-30 + +description: > + Management network. System administration, SSH, DNS, NTP, etc. This network + would usually be the default gateway for the non-controller nodes. + +parameters: + # the defaults here work for static IP assignment (IPAM) only + ManagementNetCidr: + default: 'fd00:fd00:fd00:6000::/64' + description: Cidr for the management network. + type: string + ManagementNetValueSpecs: + default: {'provider:physical_network': 'management', 'provider:network_type': 'flat'} + description: Value specs for the management network. + type: json + ManagementNetAdminStateUp: + default: false + description: This admin state of of the network. + type: boolean + ManagementNetShared: + default: false + description: Whether this network is shared across all tenants. + type: boolean + ManagementNetName: + default: management + description: The name of the management network. + type: string + ManagementSubnetName: + default: management_subnet + description: The name of the management subnet in Neutron. + type: string + ManagementAllocationPools: + default: [{'start': 'fd00:fd00:fd00:6000::10', 'end': 'fd00:fd00:fd00:6000:ffff:ffff:ffff:fffe'}] + description: Ip allocation pool range for the management network. + type: json + IPv6AddressMode: + default: dhcpv6-stateful + description: Neutron subnet IPv6 address mode + type: string + IPv6RAMode: + default: dhcpv6-stateful + description: Neutron subnet IPv6 router advertisement mode + type: string + +resources: + ManagementNetwork: + type: OS::Neutron::Net + properties: + admin_state_up: {get_param: ManagementNetAdminStateUp} + name: {get_param: ManagementNetName} + shared: {get_param: ManagementNetShared} + value_specs: {get_param: ManagementNetValueSpecs} + + ManagementSubnet: + type: OS::Neutron::Subnet + properties: + ip_version: 6 + ipv6_address_mode: {get_param: IPv6AddressMode} + ipv6_ra_mode: {get_param: IPv6RAMode} + cidr: {get_param: ManagementNetCidr} + name: {get_param: ManagementSubnetName} + network: {get_resource: ManagementNetwork} + allocation_pools: {get_param: ManagementAllocationPools} + +outputs: + OS::stack_id: + description: Neutron management network + value: {get_resource: ManagementNetwork} diff --git a/network/networks.yaml b/network/networks.yaml index ab50ae11..d3ae482b 100644 --- a/network/networks.yaml +++ b/network/networks.yaml @@ -21,3 +21,6 @@ resources: ManagementNetwork: type: OS::TripleO::Network::Management + + NetworkExtraConfig: + type: OS::TripleO::Network::ExtraConfig diff --git a/network/ports/management_from_pool_v6.yaml b/network/ports/management_from_pool_v6.yaml new file mode 100644 index 00000000..d9ac6046 --- /dev/null +++ b/network/ports/management_from_pool_v6.yaml @@ -0,0 +1,52 @@ +heat_template_version: 2015-10-15 + +description: > + Returns an IP from a network mapped list of IPs. This version is for IPv6 + addresses. The ip_address_uri output will have brackets for use in URLs. + +parameters: + ManagementNetName: + description: Name of the management network + default: management + type: string + PortName: + description: Name of the port + default: '' + type: string + ControlPlaneIP: # Here for compatability with noop.yaml + description: IP address on the control plane + default: '' + type: string + IPPool: + default: {} + description: A network mapped list of IPs + type: json + NodeIndex: + default: 0 + description: Index of the IP to get from Pool + type: number + ManagementNetCidr: + default: 'fd00:fd00:fd00:6000::/64' + description: Cidr for the management network. + type: string + +outputs: + ip_address: + description: management network IP + value: {get_param: [IPPool, {get_param: ManagementNetName}, {get_param: NodeIndex}]} + ip_address_uri: + description: management network IP (for compatibility with management_v6.yaml) + value: + list_join: + - '' + - - '[' + - {get_param: [IPPool, {get_param: ManagementNetName}, {get_param: NodeIndex}]} + - ']' + ip_subnet: + description: IP/Subnet CIDR for the management network IP + value: + list_join: + - '' + - - {get_param: [IPPool, {get_param: ManagementNetName}, {get_param: NodeIndex}]} + - '/' + - {str_split: ['/', {get_attr: [ManagementPort, subnets, 0, cidr]}, 1]} diff --git a/overcloud-resource-registry-puppet.yaml b/overcloud-resource-registry-puppet.yaml index 4fab2466..c71ced2d 100644 --- a/overcloud-resource-registry-puppet.yaml +++ b/overcloud-resource-registry-puppet.yaml @@ -18,9 +18,7 @@ resource_registry: OS::TripleO::BlockStoragePostDeployment: puppet/cinder-storage-post.yaml OS::TripleO::CephStoragePostDeployment: puppet/ceph-storage-post.yaml OS::TripleO::SwiftDevicesAndProxy::SoftwareConfig: puppet/swift-devices-and-proxy-config.yaml - OS::TripleO::CephClusterConfig::SoftwareConfig: puppet/ceph-cluster-config.yaml OS::TripleO::AllNodes::SoftwareConfig: puppet/all-nodes-config.yaml - OS::TripleO::BootstrapNode::SoftwareConfig: puppet/bootstrap-config.yaml # Tasks (for internal TripleO usage) OS::TripleO::Tasks::UpdateWorkflow: OS::Heat::None @@ -28,6 +26,9 @@ resource_registry: OS::TripleO::Tasks::ControllerPrePuppet: OS::Heat::None OS::TripleO::Tasks::ControllerPostPuppet: OS::Heat::None + + OS::TripleO::Server: OS::Nova::Server + # This creates the "heat-admin" user for all OS images by default # To disable, replace with firstboot/userdata_default.yaml OS::TripleO::NodeAdminUserData: firstboot/userdata_heat_admin.yaml @@ -50,7 +51,8 @@ resource_registry: # phase, e.g when puppet is applied, but after the pre_deploy phase. Useful when # configuration with knowledge of all nodes in the cluster is required vs single # node configuration in the pre_deploy step. - OS::TripleO::AllNodesExtraConfig: extraconfig/all_nodes/default.yaml + # See extraconfig/all_nodes/* for examples + OS::TripleO::AllNodesExtraConfig: OS::Heat::None # TripleO overcloud networks OS::TripleO::Network: network/networks.yaml @@ -63,6 +65,8 @@ resource_registry: OS::TripleO::Network::Tenant: OS::Heat::None OS::TripleO::Network::Management: OS::Heat::None + OS::TripleO::Network::ExtraConfig: OS::Heat::None + OS::TripleO::Network::Ports::NetVipMap: network/ports/net_ip_map.yaml OS::TripleO::Network::Ports::NetIpMap: network/ports/net_ip_map.yaml OS::TripleO::Network::Ports::NetIpListMap: network/ports/net_ip_list_map.yaml @@ -122,9 +126,14 @@ resource_registry: # services OS::TripleO::Services: puppet/services/services.yaml + OS::TripleO::Services::CephMon: OS::Heat::None + OS::TripleO::Services::CephOSD: OS::Heat::None + OS::TripleO::Services::CephClient: OS::Heat::None + OS::TripleO::Services::CephExternal: OS::Heat::None OS::TripleO::Services::CinderApi: puppet/services/cinder-api.yaml OS::TripleO::Services::CinderScheduler: puppet/services/cinder-scheduler.yaml OS::TripleO::Services::CinderVolume: puppet/services/cinder-volume.yaml + OS::TripleO::Services::Core: OS::Heat::None OS::TripleO::Services::Keystone: puppet/services/keystone.yaml OS::TripleO::Services::GlanceApi: puppet/services/glance-api.yaml OS::TripleO::Services::GlanceRegistry: puppet/services/glance-registry.yaml @@ -132,20 +141,30 @@ resource_registry: OS::TripleO::Services::HeatApiCfn: puppet/services/heat-api-cfn.yaml OS::TripleO::Services::HeatApiCloudwatch: puppet/services/heat-api-cloudwatch.yaml OS::TripleO::Services::HeatEngine: puppet/services/heat-engine.yaml + OS::TripleO::Services::IronicApi: puppet/services/ironic-api.yaml + OS::TripleO::Services::IronicConductor: puppet/services/ironic-conductor.yaml + OS::TripleO::Services::Kernel: puppet/services/kernel.yaml + OS::TripleO::Services::MySQL: puppet/services/database/mysql.yaml OS::TripleO::Services::NeutronDhcpAgent: puppet/services/neutron-dhcp.yaml OS::TripleO::Services::NeutronL3Agent: puppet/services/neutron-l3.yaml OS::TripleO::Services::NeutronMetadataAgent: puppet/services/neutron-metadata.yaml OS::TripleO::Services::NeutronServer: puppet/services/neutron-server.yaml OS::TripleO::Services::NeutronCorePlugin: puppet/services/neutron-plugin-ml2.yaml + # can be the same as NeutronCorePlugin but some vendors install different + # things where VMs run + OS::TripleO::Services::ComputeNeutronCorePlugin: puppet/services/neutron-plugin-ml2.yaml # Neutron Core Plugin Vendors (these typically override NeutronCorePlugin) OS::TripleO::Services::NeutronCorePluginPlumgrid: puppet/services/neutron-plugin-plumgrid.yaml - + OS::TripleO::Services::NeutronCorePluginNuage: puppet/services/neutron-plugin-nuage.yaml + OS::TripleO::Services::NeutronCorePluginOpencontrail: puppet/services/neutron-plugin-opencontrail.yaml + OS::TripleO::Services::NeutronCorePluginMidonet: puppet/services/neutron-midonet.yaml OS::TripleO::Services::NeutronOvsAgent: puppet/services/neutron-ovs-agent.yaml + OS::TripleO::Services::ComputeNeutronOvsAgent: puppet/services/neutron-ovs-agent.yaml + OS::TripleO::Services::Pacemaker: OS::Heat::None OS::TripleO::Services::RabbitMQ: puppet/services/rabbitmq.yaml OS::TripleO::Services::HAproxy: puppet/services/haproxy.yaml OS::TripleO::Services::Keepalived: puppet/services/keepalived.yaml OS::TripleO::Services::Memcached: puppet/services/memcached.yaml - OS::TripleO::Services::SwiftProxy: puppet/services/swift-proxy.yaml OS::TripleO::Services::SaharaApi: puppet/services/sahara-api.yaml OS::TripleO::Services::SaharaEngine: puppet/services/sahara-engine.yaml OS::TripleO::Services::Redis: puppet/services/database/redis.yaml @@ -156,6 +175,24 @@ resource_registry: OS::TripleO::Services::NovaConsoleauth: puppet/services/nova-consoleauth.yaml OS::TripleO::Services::NovaVncproxy: puppet/services/nova-vncproxy.yaml OS::TripleO::Services::NovaCompute: puppet/services/nova-compute.yaml + OS::TripleO::Services::NovaLibvirt: puppet/services/nova-libvirt.yaml + OS::TripleO::Services::Ntp: puppet/services/time/ntp.yaml + OS::TripleO::Services::SwiftProxy: puppet/services/swift-proxy.yaml + OS::TripleO::Services::SwiftStorage: puppet/services/swift-storage.yaml + OS::TripleO::Services::SwiftRingBuilder: puppet/services/swift-ringbuilder.yaml + OS::TripleO::Services::Snmp: puppet/services/snmp.yaml + OS::TripleO::Services::Timezone: puppet/services/time/timezone.yaml + OS::TripleO::Services::CeilometerApi: puppet/services/ceilometer-api.yaml + OS::TripleO::Services::CeilometerCollector: puppet/services/ceilometer-collector.yaml + OS::TripleO::Services::CeilometerExpirer: puppet/services/ceilometer-expirer.yaml + OS::TripleO::Services::CeilometerAgentCentral: puppet/services/ceilometer-agent-central.yaml + OS::TripleO::Services::CeilometerAgentNotification: puppet/services/ceilometer-agent-notification.yaml + OS::TripleO::Services::ComputeCeilometerAgent: puppet/services/ceilometer-agent-compute.yaml + OS::TripleO::Services::Horizon: puppet/services/horizon.yaml + #Gnocchi services + OS::TripleO::Services::GnocchiApi: puppet/services/gnocchi-api.yaml + OS::TripleO::Services::GnocchiMetricd: puppet/services/gnocchi-metricd.yaml + OS::TripleO::Services::GnocchiStatsd: puppet/services/gnocchi-statsd.yaml parameter_defaults: EnablePackageInstall: false diff --git a/overcloud.yaml b/overcloud.yaml index d8955b9e..077ffacc 100644 --- a/overcloud.yaml +++ b/overcloud.yaml @@ -20,56 +20,10 @@ parameters: description: The password for the aodh services. type: string hidden: true - CeilometerBackend: - default: 'mongodb' - description: The ceilometer backend type. - type: string - CeilometerMeteringSecret: - description: Secret shared by the ceilometer services. - type: string - hidden: true - CeilometerPassword: - description: The password for the ceilometer service account. - type: string - hidden: true - CeilometerMeterDispatcher: - default: 'database' - description: Dispatcher to process meter data - type: string - constraints: - - allowed_values: ['gnocchi', 'database'] - # This has to be an UUID so for now we generate it outside the template - CephClusterFSID: - default: '' - type: string - description: The Ceph cluster FSID. Must be a UUID. - CephMonKey: - default: '' - description: The Ceph monitors key. Can be created with ceph-authtool --gen-print-key. - type: string - hidden: true - CephAdminKey: - default: '' - description: The Ceph admin client key. Can be created with ceph-authtool --gen-print-key. - type: string - hidden: true CinderEnableNfsBackend: default: false description: Whether to enable or not the NFS backend for Cinder type: boolean - CephClientKey: - default: '' - description: The Ceph client key. Can be created with ceph-authtool --gen-print-key. Currently only used for external Ceph deployments to create the openstack user keyring. - type: string - hidden: true - CephExternalMonHost: - default: '' - type: string - description: List of externally managed Ceph Mon Host IPs. Only used for external Ceph deployments. - CinderEnableIscsiBackend: - default: true - description: Whether to enable or not the Iscsi backend for Cinder - type: boolean CinderEnableRbdBackend: default: false description: Whether to enable or not the Rbd backend for Cinder @@ -94,10 +48,6 @@ parameters: default: /dev/log description: Syslog address where HAproxy will send its log type: string - HorizonAllowedHosts: - default: '*' - description: A list of IP/Hostname allowed to connect to horizon - type: comma_delimited_list ImageUpdatePolicy: default: 'REBUILD_PRESERVE_EPHEMERAL' description: What policy to use when reconstructing instances. REBUILD for rebuilds, REBUILD_PRESERVE_EPHEMERAL to preserve /mnt. @@ -118,38 +68,10 @@ parameters: default: false description: Enable IPv6 features in Memcached. type: boolean - NeutronBridgeMappings: - description: > - The OVS logical->physical bridge mappings to use. See the Neutron - documentation for details. Defaults to mapping br-ex - the external - bridge on hosts - to a physical name 'datacentre' which can be used - to create provider networks (and we use this for the default floating - network) - if changing this either use different post-install network - scripts or be sure to keep 'datacentre' as a mapping network name. - type: comma_delimited_list - default: "datacentre:br-ex" NeutronControlPlaneID: default: 'ctlplane' type: string description: Neutron ID or name for ctlplane network. - NeutronEnableTunnelling: - type: string - default: "True" - NeutronEnableL2Pop: - type: string - description: > - Enable/disable the L2 population feature in the Neutron agents. - default: "False" - NeutronFlatNetworks: - type: comma_delimited_list - default: 'datacentre' - description: > - If set, flat networks to configure in neutron plugins. Defaults to - 'datacentre' to permit external network creation. - NeutronNetworkType: - default: 'vxlan' - description: The tenant network type for Neutron. - type: comma_delimited_list NeutronPassword: description: The password for the neutron service account, used by neutron agents. type: string @@ -158,61 +80,6 @@ parameters: default: nic1 description: What interface to bridge onto br-ex for network nodes. type: string - NeutronMetadataProxySharedSecret: - description: Shared secret to prevent spoofing - type: string - hidden: true - NeutronTenantMtu: - description: > - The default MTU for tenant networks. For VXLAN/GRE tunneling, this should - be at least 50 bytes smaller than the MTU on the physical network. This - value will be used to set the MTU on the virtual Ethernet device. - This value will be used to construct the NeutronDnsmasqOptions, since that - will determine the MTU that is assigned to the VM host through DHCP. - default: "1400" - type: string - NeutronTunnelTypes: - default: 'vxlan' - description: | - The tunnel types for the Neutron tenant network. - type: comma_delimited_list - type: comma_delimited_list - NeutronCorePlugin: - default: 'ml2' - description: | - The core plugin for Neutron. The value should be the entrypoint to be loaded - from neutron.core_plugins namespace. - type: string - NeutronServicePlugins: - default: "router,qos" - description: | - Comma-separated list of service plugin entrypoints to be loaded from the - neutron.service_plugins namespace. - type: comma_delimited_list - NeutronTypeDrivers: - default: "vxlan,vlan,flat,gre" - description: | - Comma-separated list of network type driver entrypoints to be loaded. - type: comma_delimited_list - NeutronMechanismDrivers: - default: 'openvswitch' - description: | - The mechanism drivers for the Neutron tenant network. - type: comma_delimited_list - type: comma_delimited_list - NeutronAgentExtensions: - default: "qos" - description: | - Comma-separated list of extensions enabled for the Neutron agents. - type: comma_delimited_list - NeutronAllowL3AgentFailover: - default: 'False' - description: Allow automatic l3-agent failover - type: string - NeutronL3HA: - default: 'False' - description: Whether to enable l3-agent HA - type: string NovaIPv6: default: false description: Enable IPv6 features in Nova @@ -221,10 +88,6 @@ parameters: description: The password for the nova service account, used by nova-api. type: string hidden: true - NtpServer: - default: '' - description: Comma-separated list of ntp servers - type: comma_delimited_list PublicVirtualFixedIPs: default: [] description: > @@ -257,14 +120,6 @@ parameters: description: The password for Redis type: string hidden: true - SnmpdReadonlyUserName: - default: ro_snmp_user - description: The user name for SNMPd with readonly rights running on all Overcloud nodes - type: string - SnmpdReadonlyUserPassword: - description: The user password for SNMPd with readonly rights running on all Overcloud nodes - type: string - hidden: true StorageVirtualFixedIPs: default: [] description: > @@ -277,10 +132,6 @@ parameters: Control the IP allocation for the StorageMgmgVirtualInterface port. E.g. [{'ip_address':'1.2.3.4'}] type: json - TimeZone: - default: 'UTC' - description: The timezone to be set on nodes. - type: string CloudDomain: default: 'localdomain' type: string @@ -295,23 +146,9 @@ parameters: type: json # Controller-specific params - CinderLVMLoopDeviceSize: - default: 10280 - description: The size of the loopback file used by the cinder LVM driver. - type: number - CinderPassword: - description: The password for the cinder service account, used by cinder-api. - type: string - hidden: true - CinderISCSIHelper: - default: lioadm - description: The iSCSI helper to use with cinder. - type: string ControllerCount: type: number default: 1 - constraints: - - range: {min: 1} controllerExtraConfig: default: {} description: | @@ -340,14 +177,6 @@ parameters: default: true description: Whether to use Galera instead of regular MariaDB. type: boolean - ControllerEnableCephStorage: - default: false - description: Whether to deploy Ceph Storage (OSD) on the Controller - type: boolean - ControllerEnableSwiftStorage: - default: true - description: Whether to enable Swift Storage on the Controller - type: boolean ControllerSchedulerHints: type: json description: Optional scheduler hints to pass to nova @@ -391,19 +220,6 @@ parameters: ] } type: json - GnocchiBackend: - default: file - description: The short name of the Gnocchi backend to use. Should be one - of swift, rbd or file - type: string - constraints: - - allowed_values: ['swift', 'file', 'rbd'] - GnocchiIndexerBackend: - default: 'mysql' - description: The short name of the Gnocchi indexer backend to use. - type: string - GnocchiPassword: - description: The password for the gnocchi service account. type: string hidden: true InstanceNameTemplate: @@ -411,7 +227,7 @@ parameters: description: Template string to be used to generate instance names type: string ManageFirewall: - default: false + default: true description: Whether to manage IPtables rules. type: boolean PurgeFirewallRules: @@ -439,30 +255,8 @@ parameters: description: A random string to be used as a salt when hashing to determine mappings in the ring. type: string hidden: true - SwiftMountCheck: - default: 'false' - description: Value of mount_check in Swift account/container/object -server.conf - type: boolean - SwiftMinPartHours: - type: number - default: 1 - description: The minimum time (in hours) before a partition in a ring can be moved following a rebalance. - SwiftPartPower: - default: 10 - description: Partition Power to use when building Swift rings - type: number - SwiftReplicas: - type: number - default: 3 - description: How many replicas to use in the swift rings. # Compute-specific params - CeilometerComputeAgent: - description: Indicates whether the Compute agent is present and expects nova-compute to be configured accordingly - type: string - default: '' - constraints: - - allowed_values: ['', Present] ComputeCount: type: number default: 1 @@ -477,13 +271,6 @@ parameters: default: nic1 description: What interface to add to the HypervisorNeutronPhysicalBridge. type: string - NeutronNetworkVLANRanges: - default: 'datacentre:1:1000' - description: > - The Neutron ML2 and OpenVSwitch vlan mapping range to support. See the - Neutron documentation for permitted values. Defaults to permitting any - VLAN on the 'datacentre' physical network (See NeutronBridgeMappings). - type: comma_delimited_list NovaComputeDriver: type: string default: libvirt.LibvirtDriver @@ -538,6 +325,7 @@ parameters: CinderIscsiNetwork: storage GlanceApiNetwork: storage GlanceRegistryNetwork: internal_api + IronicApiNetwork: internal_api KeystoneAdminApiNetwork: ctlplane # allows undercloud to config endpoints KeystonePublicApiNetwork: internal_api NeutronApiNetwork: internal_api @@ -566,9 +354,13 @@ parameters: ControllerServices: default: + - OS::TripleO::Services::CephMon + - OS::TripleO::Services::CephExternal - OS::TripleO::Services::CinderApi - OS::TripleO::Services::CinderScheduler - OS::TripleO::Services::CinderVolume + - OS::TripleO::Services::Core + - OS::TripleO::Services::Kernel - OS::TripleO::Services::Keystone - OS::TripleO::Services::GlanceApi - OS::TripleO::Services::GlanceRegistry @@ -576,6 +368,7 @@ parameters: - OS::TripleO::Services::HeatApiCfn - OS::TripleO::Services::HeatApiCloudwatch - OS::TripleO::Services::HeatEngine + - OS::TripleO::Services::MySQL - OS::TripleO::Services::NeutronDhcpAgent - OS::TripleO::Services::NeutronL3Agent - OS::TripleO::Services::NeutronMetadataAgent @@ -586,7 +379,7 @@ parameters: - OS::TripleO::Services::HAproxy - OS::TripleO::Services::Keepalived - OS::TripleO::Services::Memcached - - OS::TripleO::Services::SwiftProxy + - OS::TripleO::Services::Pacemaker - OS::TripleO::Services::Redis - OS::TripleO::Services::NovaConductor - OS::TripleO::Services::MongoDb @@ -594,6 +387,22 @@ parameters: - OS::TripleO::Services::NovaScheduler - OS::TripleO::Services::NovaConsoleauth - OS::TripleO::Services::NovaVncproxy + - OS::TripleO::Services::Ntp + - OS::TripleO::Services::SwiftProxy + - OS::TripleO::Services::SwiftStorage + - OS::TripleO::Services::SwiftRingBuilder + - OS::TripleO::Services::Snmp + - OS::TripleO::Services::Timezone + - OS::TripleO::Services::CeilometerApi + - OS::TripleO::Services::CeilometerCollector + - OS::TripleO::Services::CeilometerExpirer + - OS::TripleO::Services::CeilometerAgentCentral + - OS::TripleO::Services::CeilometerAgentNotification + - OS::TripleO::Services::Horizon + - OS::TripleO::Services::GnocchiApi + - OS::TripleO::Services::GnocchiMetricd + - OS::TripleO::Services::GnocchiStatsd + description: A list of service resources (configured in the Heat resource_registry) which represent nested stacks for each service that should get installed on the Controllers. @@ -601,7 +410,17 @@ parameters: ComputeServices: default: + - OS::TripleO::Services::CephClient + - OS::TripleO::Services::CephExternal + - OS::TripleO::Services::Timezone + - OS::TripleO::Services::Ntp + - OS::TripleO::Services::Snmp - OS::TripleO::Services::NovaCompute + - OS::TripleO::Services::NovaLibvirt + - OS::TripleO::Services::Kernel + - OS::TripleO::Services::ComputeNeutronCorePlugin + - OS::TripleO::Services::ComputeNeutronOvsAgent + - OS::TripleO::Services::ComputeCeilometerAgent description: A list of service resources (configured in the Heat resource_registry) which represent nested stacks for each service that should get installed on the Compute Nodes. @@ -630,7 +449,17 @@ parameters: type: json description: Optional scheduler hints to pass to nova default: {} - + BlockStorageServices: + default: + - OS::TripleO::Services::CinderVolume + - OS::TripleO::Services::Kernel + - OS::TripleO::Services::Ntp + - OS::TripleO::Services::Timezone + - OS::TripleO::Services::Snmp + description: A list of service resources (configured in the Heat + resource_registry) which represent nested stacks + for each service that should get installed on the BlockStorage nodes. + type: comma_delimited_list # Object storage specific parameters ObjectStorageCount: @@ -656,7 +485,13 @@ parameters: description: Optional scheduler hints to pass to nova default: {} ObjectStorageServices: - default: [] + default: + - OS::TripleO::Services::Kernel + - OS::TripleO::Services::Ntp + - OS::TripleO::Services::SwiftStorage + - OS::TripleO::Services::SwiftRingBuilder + - OS::TripleO::Services::Snmp + - OS::TripleO::Services::Timezone description: A list of service resources (configured in the Heat resource_registry) which represent nested stacks for each service that should get installed on the ObjectStorage nodes. @@ -688,7 +523,11 @@ parameters: description: Optional scheduler hints to pass to nova default: {} CephStorageServices: - default: [] + default: + - OS::TripleO::Services::CephOSD + - OS::TripleO::Services::Kernel + - OS::TripleO::Services::Ntp + - OS::TripleO::Services::Timezone description: A list of service resources (configured in the Heat resource_registry) which represent nested stacks for each service that should get installed on the CephStorage nodes. @@ -800,9 +639,11 @@ resources: GlanceRegistryVirtualIP: {get_attr: [VipMap, net_ip_uri_map, {get_param: [ServiceNetMap, GlanceRegistryNetwork]}]} GnocchiApiVirtualIP: {get_attr: [VipMap, net_ip_uri_map, {get_param: [ServiceNetMap, GnocchiApiNetwork]}]} HeatApiVirtualIP: {get_attr: [VipMap, net_ip_uri_map, {get_param: [ServiceNetMap, HeatApiNetwork]}]} + IronicApiVirtualIP: {get_attr: [VipMap, net_ip_uri_map, {get_param: [ServiceNetMap, IronicApiNetwork]}]} KeystoneAdminApiVirtualIP: {get_attr: [VipMap, net_ip_uri_map, {get_param: [ServiceNetMap, KeystoneAdminApiNetwork]}]} KeystonePublicApiVirtualIP: {get_attr: [VipMap, net_ip_uri_map, {get_param: [ServiceNetMap, KeystonePublicApiNetwork]}]} MysqlVirtualIP: {get_attr: [VipMap, net_ip_uri_map, {get_param: [ServiceNetMap, MysqlNetwork]}]} + MysqlNoBracketsVirtualIP: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, MysqlNetwork]}]} NeutronApiVirtualIP: {get_attr: [VipMap, net_ip_uri_map, {get_param: [ServiceNetMap, NeutronApiNetwork]}]} NovaApiVirtualIP: {get_attr: [VipMap, net_ip_uri_map, {get_param: [ServiceNetMap, NovaApiNetwork]}]} SaharaApiVirtualIP: {get_attr: [VipMap, net_ip_uri_map, {get_param: [ServiceNetMap, SaharaApiNetwork]}]} @@ -811,7 +652,6 @@ resources: ControllerServiceChain: type: OS::TripleO::Services - depends_on: Networks properties: Services: {get_param: ControllerServices} EndpointMap: {get_attr: [EndpointMap, endpoint_map]} @@ -827,10 +667,6 @@ resources: properties: AdminPassword: {get_param: AdminPassword} AodhPassword: {get_param: AodhPassword} - CeilometerBackend: {get_param: CeilometerBackend} - CeilometerMeteringSecret: {get_param: CeilometerMeteringSecret} - CeilometerPassword: {get_param: CeilometerPassword} - CeilometerMeterDispatcher: {get_param: CeilometerMeterDispatcher} CloudDomain: {get_param: CloudDomain} ControlVirtualInterface: {get_param: ControlVirtualInterface} controllerExtraConfig: {get_param: controllerExtraConfig} @@ -840,17 +676,11 @@ resources: ManageFirewall: {get_param: ManageFirewall} PurgeFirewallRules: {get_param: PurgeFirewallRules} EnableGalera: {get_param: EnableGalera} - EnableCephStorage: {get_param: ControllerEnableCephStorage} - EnableSwiftStorage: {get_param: ControllerEnableSwiftStorage} ExtraConfig: {get_param: ExtraConfig} FencingConfig: {get_param: FencingConfig} Flavor: {get_param: OvercloudControlFlavor} - GnocchiPassword: {get_param: GnocchiPassword} - GnocchiBackend: {get_param: GnocchiBackend} - GnocchiIndexerBackend: {get_param: GnocchiIndexerBackend} HAProxySyslogAddress: {get_param: HAProxySyslogAddress} HeatAuthEncryptionKey: {get_resource: HeatAuthEncryptionKey} - HorizonAllowedHosts: {get_param: HorizonAllowedHosts} HorizonSecret: {get_resource: HorizonSecret} Image: {get_param: controllerImage} ImageUpdatePolicy: {get_param: ImageUpdatePolicy} @@ -861,13 +691,10 @@ resources: MysqlInnodbBufferPoolSize: {get_param: MysqlInnodbBufferPoolSize} MysqlMaxConnections: {get_param: MysqlMaxConnections} MysqlRootPassword: {get_attr: [MysqlRootPassword, value]} - NeutronTenantMtu: {get_param: NeutronTenantMtu} NeutronPublicInterface: {get_param: NeutronPublicInterface} NeutronPassword: {get_param: NeutronPassword} - NeutronMetadataProxySharedSecret: {get_param: NeutronMetadataProxySharedSecret} NovaIPv6: {get_param: NovaIPv6} NovaPassword: {get_param: NovaPassword} - NtpServer: {get_param: NtpServer} PcsdPassword: {get_resource: PcsdPassword} PublicVirtualInterface: {get_param: PublicVirtualInterface} RabbitPassword: {get_param: RabbitPassword} @@ -876,32 +703,12 @@ resources: RabbitClientUseSSL: {get_param: RabbitClientUseSSL} RabbitClientPort: {get_param: RabbitClientPort} RedisPassword: {get_param: RedisPassword} - SnmpdReadonlyUserName: {get_param: SnmpdReadonlyUserName} - SnmpdReadonlyUserPassword: {get_param: SnmpdReadonlyUserPassword} RedisVirtualIP: {get_attr: [RedisVirtualIP, ip_address]} RedisVirtualIPUri: {get_attr: [RedisVirtualIP, ip_address_uri]} SwiftHashSuffix: {get_param: SwiftHashSuffix} - SwiftMountCheck: {get_param: SwiftMountCheck} - SwiftMinPartHours: {get_param: SwiftMinPartHours} - SwiftPartPower: {get_param: SwiftPartPower} - SwiftReplicas: { get_param: SwiftReplicas} - TimeZone: {get_param: TimeZone} - VirtualIP: {get_attr: [VipMap, net_ip_map, ctlplane]} # deprecated. Use per service VIP settings instead now. - PublicVirtualIP: {get_attr: [VipMap, net_ip_map, external]} ServiceNetMap: {get_param: ServiceNetMap} EndpointMap: {get_attr: [EndpointMap, endpoint_map]} - CeilometerApiVirtualIP: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, CeilometerApiNetwork]}]} - AodhApiVirtualIP: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, AodhApiNetwork]}]} - GnocchiApiVirtualIP: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, GnocchiApiNetwork]}]} - CinderApiVirtualIP: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, CinderApiNetwork]}]} - HeatApiVirtualIP: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, HeatApiNetwork]}]} - HeatApiVirtualIPUri: {get_attr: [VipMap, net_ip_uri_map, {get_param: [ServiceNetMap, HeatApiNetwork]}]} - NovaApiVirtualIP: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, NovaApiNetwork]}]} - SwiftProxyVirtualIP: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, SwiftProxyNetwork]}]} MysqlVirtualIP: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, MysqlNetwork]}]} - NeutronApiVirtualIP: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, NeutronApiNetwork]}]} - NovaApiVirtualIP: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, NovaApiNetwork]}]} - SaharaApiVirtualIP: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, SaharaApiNetwork]}]} UpdateIdentifier: {get_param: UpdateIdentifier} Hostname: str_replace: @@ -911,9 +718,11 @@ resources: NodeIndex: '%index%' ServerMetadata: {get_param: ServerMetadata} SchedulerHints: {get_param: ControllerSchedulerHints} - ServiceConfigSettings: {get_attr: [ControllerServiceChain, config_settings]} + ServiceConfigSettings: {get_attr: [ControllerServiceChain, role_data, config_settings]} + ServiceNames: {get_attr: [ControllerServiceChain, role_data, service_names]} ComputeServiceChain: + type: OS::TripleO::Services properties: Services: {get_param: ComputeServices} @@ -929,9 +738,6 @@ resources: type: OS::TripleO::Compute properties: AdminPassword: {get_param: AdminPassword} - CeilometerComputeAgent: {get_param: CeilometerComputeAgent} - CeilometerMeteringSecret: {get_param: CeilometerMeteringSecret} - CeilometerPassword: {get_param: CeilometerPassword} CinderEnableNfsBackend: {get_param: CinderEnableNfsBackend} CinderEnableRbdBackend: {get_param: CinderEnableRbdBackend} Debug: {get_param: Debug} @@ -943,27 +749,10 @@ resources: KeyName: {get_param: KeyName} KeystoneAdminApiVirtualIP: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, KeystoneAdminApiNetwork]}]} KeystonePublicApiVirtualIP: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, KeystonePublicApiNetwork]}]} - NeutronBridgeMappings: {get_param: NeutronBridgeMappings} - NeutronTenantMtu: {get_param: NeutronTenantMtu} - NeutronEnableTunnelling: {get_param: NeutronEnableTunnelling} - NeutronEnableL2Pop : {get_param: NeutronEnableL2Pop} - NeutronFlatNetworks: {get_param: NeutronFlatNetworks} - NeutronHost: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, NeutronApiNetwork]}]} - NeutronNetworkType: {get_param: NeutronNetworkType} - NeutronTunnelTypes: {get_param: NeutronTunnelTypes} - NeutronNetworkVLANRanges: {get_param: NeutronNetworkVLANRanges} NeutronPassword: {get_param: NeutronPassword} NeutronPhysicalBridge: {get_param: HypervisorNeutronPhysicalBridge} NeutronPublicInterface: {get_param: HypervisorNeutronPublicInterface} - NeutronMetadataProxySharedSecret: {get_param: NeutronMetadataProxySharedSecret} - NeutronCorePlugin: {get_param: NeutronCorePlugin} - NeutronServicePlugins: {get_param: NeutronServicePlugins} - NeutronTypeDrivers: {get_param: NeutronTypeDrivers} - NeutronMechanismDrivers: {get_param: NeutronMechanismDrivers} - NeutronAgentExtensions: {get_param: NeutronAgentExtensions} # L3 HA and Failover is not relevant for Computes, should be removed - NeutronAllowL3AgentFailover: {get_param: NeutronAllowL3AgentFailover} - NeutronL3HA: {get_param: NeutronL3HA} NovaApiHost: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, NovaApiNetwork]}]} NovaComputeDriver: {get_param: NovaComputeDriver} NovaComputeExtraConfig: {get_param: NovaComputeExtraConfig} @@ -975,16 +764,12 @@ resources: NovaPassword: {get_param: NovaPassword} NovaOVSBridge: {get_param: NovaOVSBridge} NovaSecurityGroupAPI: {get_param: NovaSecurityGroupAPI} - NtpServer: {get_param: NtpServer} RabbitHost: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, RabbitMqNetwork]}]} RabbitPassword: {get_param: RabbitPassword} RabbitUserName: {get_param: RabbitUserName} RabbitClientUseSSL: {get_param: RabbitClientUseSSL} RabbitClientPort: {get_param: RabbitClientPort} - SnmpdReadonlyUserName: {get_param: SnmpdReadonlyUserName} - SnmpdReadonlyUserPassword: {get_param: SnmpdReadonlyUserPassword} ServiceNetMap: {get_param: ServiceNetMap} - TimeZone: {get_param: TimeZone} EndpointMap: {get_attr: [EndpointMap, endpoint_map]} UpdateIdentifier: {get_param: UpdateIdentifier} Hostname: @@ -996,8 +781,14 @@ resources: ServerMetadata: {get_param: ServerMetadata} SchedulerHints: {get_param: NovaComputeSchedulerHints} NodeIndex: '%index%' - ServiceConfigSettings: {get_attr: [ComputeServiceChain, config_settings]} + ServiceConfigSettings: {get_attr: [ComputeServiceChain, role_data, config_settings]} + ServiceNames: {get_attr: [ComputeServiceChain, role_data, service_names]} + BlockStorageServiceChain: + type: OS::TripleO::Services + properties: + Services: {get_param: BlockStorageServices} + EndpointMap: {get_attr: [EndpointMap, endpoint_map]} BlockStorage: type: OS::Heat::ResourceGroup @@ -1008,23 +799,9 @@ resources: resource_def: type: OS::TripleO::BlockStorage properties: - Debug: {get_param: Debug} Image: {get_param: BlockStorageImage} - CinderISCSIHelper: {get_param: CinderISCSIHelper} - CinderLVMLoopDeviceSize: {get_param: CinderLVMLoopDeviceSize} - # Purpose of the dedicated BlockStorage nodes should be to use their local LVM - CinderEnableIscsiBackend: {get_param: CinderEnableIscsiBackend} - CinderPassword: {get_param: CinderPassword} KeyName: {get_param: KeyName} Flavor: {get_param: OvercloudBlockStorageFlavor} - VirtualIP: {get_attr: [VipMap, net_ip_map, ctlplane]} - GlanceApiVirtualIP: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, GlanceApiNetwork]}]} - RabbitPassword: {get_param: RabbitPassword} - RabbitUserName: {get_param: RabbitUserName} - RabbitClientUseSSL: {get_param: RabbitClientUseSSL} - RabbitClientPort: {get_param: RabbitClientPort} - TimeZone: {get_param: TimeZone} - NtpServer: {get_param: NtpServer} UpdateIdentifier: {get_param: UpdateIdentifier} Hostname: str_replace: @@ -1032,13 +809,14 @@ resources: params: '%stackname%': {get_param: 'OS::stack_name'} ServiceNetMap: {get_param: ServiceNetMap} - EndpointMap: {get_attr: [EndpointMap, endpoint_map]} ExtraConfig: {get_param: ExtraConfig} BlockStorageExtraConfig: {get_param: BlockStorageExtraConfig} CloudDomain: {get_param: CloudDomain} ServerMetadata: {get_param: ServerMetadata} SchedulerHints: {get_param: BlockStorageSchedulerHints} NodeIndex: '%index%' + ServiceConfigSettings: {get_attr: [BlockStorageServiceChain, role_data, config_settings]} + ServiceNames: {get_attr: [BlockStorageServiceChain, role_data, service_names]} ObjectStorageServiceChain: type: OS::TripleO::Services @@ -1058,13 +836,7 @@ resources: KeyName: {get_param: KeyName} Flavor: {get_param: OvercloudSwiftStorageFlavor} HashSuffix: {get_param: SwiftHashSuffix} - MountCheck: {get_param: SwiftMountCheck} - MinPartHours: {get_param: SwiftMinPartHours} - PartPower: {get_param: SwiftPartPower} Image: {get_param: SwiftStorageImage} - Replicas: { get_param: SwiftReplicas} - TimeZone: {get_param: TimeZone} - NtpServer: {get_param: NtpServer} UpdateIdentifier: {get_param: UpdateIdentifier} ServiceNetMap: {get_param: ServiceNetMap} Hostname: @@ -1078,7 +850,8 @@ resources: ServerMetadata: {get_param: ServerMetadata} SchedulerHints: {get_param: ObjectStorageSchedulerHints} NodeIndex: '%index%' - ServiceConfigSettings: {get_attr: [ObjectStorageServiceChain, config_settings]} + ServiceConfigSettings: {get_attr: [ObjectStorageServiceChain, role_data, config_settings]} + ServiceNames: {get_attr: [ObjectStorageServiceChain, role_data, service_names]} CephStorageServiceChain: type: OS::TripleO::Services @@ -1098,9 +871,7 @@ resources: Image: {get_param: CephStorageImage} KeyName: {get_param: KeyName} Flavor: {get_param: OvercloudCephStorageFlavor} - NtpServer: {get_param: NtpServer} ServiceNetMap: {get_param: ServiceNetMap} - TimeZone: {get_param: TimeZone} UpdateIdentifier: {get_param: UpdateIdentifier} Hostname: str_replace: @@ -1113,7 +884,8 @@ resources: ServerMetadata: {get_param: ServerMetadata} SchedulerHints: {get_param: CephStorageSchedulerHints} NodeIndex: '%index%' - ServiceConfigSettings: {get_attr: [CephStorageServiceChain, config_settings]} + ServiceConfigSettings: {get_attr: [CephStorageServiceChain, role_data, config_settings]} + ServiceNames: {get_attr: [CephStorageServiceChain, role_data, service_names]} ControllerIpListMap: type: OS::TripleO::Network::Ports::NetIpListMap @@ -1156,6 +928,9 @@ resources: keystone_public_api_node_ips: {get_attr: [ControllerIpListMap, net_ip_map, {get_param: [ServiceNetMap, KeystonePublicApiNetwork]}]} keystone_admin_api_node_ips: {get_attr: [ControllerIpListMap, net_ip_map, {get_param: [ServiceNetMap, KeystoneAdminApiNetwork]}]} sahara_api_node_ips: {get_attr: [ControllerIpListMap, net_ip_map, {get_param: [ServiceNetMap, SaharaApiNetwork]}]} + ironic_api_node_ips: {get_attr: [ControllerIpListMap, net_ip_map, {get_param: [ServiceNetMap, IronicApiNetwork]}]} + ceph_mon_node_ips: {get_attr: [ControllerIpListMap, net_ip_map, {get_param: [ServiceNetMap, CephPublicNetwork]}]} + ceph_mon_node_names: {get_attr: [Controller, hostname]} DeployIdentifier: {get_param: DeployIdentifier} UpdateIdentifier: {get_param: UpdateIdentifier} @@ -1279,22 +1054,10 @@ resources: public_virtual_ip: {get_attr: [VipMap, net_ip_map, external]} internal_api_virtual_ip: {get_attr: [VipMap, net_ip_map, internal_api]} sahara_api_vip: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, SaharaApiNetwork]}]} + ironic_api_vip: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, IronicApiNetwork]}]} storage_virtual_ip: {get_attr: [VipMap, net_ip_map, storage]} storage_mgmt_virtual_ip: {get_attr: [VipMap, net_ip_map, storage_mgmt]} - ControllerBootstrapNodeConfig: - type: OS::TripleO::BootstrapNode::SoftwareConfig - properties: - bootstrap_nodeid: {get_attr: [Controller, resource.0.hostname]} - bootstrap_nodeid_ip: {get_attr: [Controller, resource.0.ip_address]} - - ControllerBootstrapNodeDeployment: - type: OS::Heat::StructuredDeployments - properties: - name: ControllerBootstrapNodeDeployment - config: {get_attr: [ControllerBootstrapNodeConfig, config_id]} - servers: {get_attr: [Controller, attributes, nova_server_resource]} - ControllerSwiftDeployment: type: OS::Heat::StructuredDeployments properties: @@ -1316,45 +1079,15 @@ resources: object_store_swift_devices: {get_attr: [ObjectStorage, swift_device]} controller_swift_proxy_memcaches: {get_attr: [Controller, swift_proxy_memcache]} - ComputeCephDeployment: - type: OS::Heat::StructuredDeployments - properties: - name: ComputeCephDeployment - config: {get_attr: [CephClusterConfig, config_id]} - servers: {get_attr: [Compute, attributes, nova_server_resource]} - - ControllerCephDeployment: - type: OS::Heat::StructuredDeployments - properties: - name: ControllerCephDeployment - config: {get_attr: [CephClusterConfig, config_id]} - servers: {get_attr: [Controller, attributes, nova_server_resource]} - - CephStorageCephDeployment: - type: OS::Heat::StructuredDeployments - properties: - name: CephStorageCephDeployment - config: {get_attr: [CephClusterConfig, config_id]} - servers: {get_attr: [CephStorage, attributes, nova_server_resource]} - - CephClusterConfig: - type: OS::TripleO::CephClusterConfig::SoftwareConfig - properties: - ceph_storage_count: {get_param: CephStorageCount} - ceph_fsid: {get_param: CephClusterFSID} - ceph_mon_key: {get_param: CephMonKey} - ceph_admin_key: {get_param: CephAdminKey} - ceph_client_key: {get_param: CephClientKey} - ceph_external_mon_ips: {get_param: CephExternalMonHost} - ceph_mon_names: {get_attr: [Controller, hostname]} - ceph_mon_ips: {get_attr: [ControllerIpListMap, net_ip_map, {get_param: [ServiceNetMap, CephPublicNetwork]}]} - ControllerAllNodesDeployment: type: OS::Heat::StructuredDeployments properties: name: ControllerAllNodesDeployment config: {get_attr: [allNodesConfig, config_id]} servers: {get_attr: [Controller, attributes, nova_server_resource]} + input_values: + bootstrap_nodeid: {get_attr: [Controller, resource.0.hostname]} + bootstrap_nodeid_ip: {get_attr: [Controller, resource.0.ip_address]} ComputeAllNodesDeployment: type: OS::Heat::StructuredDeployments @@ -1362,6 +1095,9 @@ resources: name: ComputeAllNodesDeployment config: {get_attr: [allNodesConfig, config_id]} servers: {get_attr: [Compute, attributes, nova_server_resource]} + input_values: + bootstrap_nodeid: {get_attr: [Compute, resource.0.hostname]} + bootstrap_nodeid_ip: {get_attr: [Compute, resource.0.ip_address]} BlockStorageAllNodesDeployment: type: OS::Heat::StructuredDeployments @@ -1369,6 +1105,9 @@ resources: name: BlockStorageAllNodesDeployment config: {get_attr: [allNodesConfig, config_id]} servers: {get_attr: [BlockStorage, attributes, nova_server_resource]} + input_values: + bootstrap_nodeid: {get_attr: [BlockStorage, resource.0.hostname]} + bootstrap_nodeid_ip: {get_attr: [BlockStorage, resource.0.ip_address]} ObjectStorageAllNodesDeployment: type: OS::Heat::StructuredDeployments @@ -1376,6 +1115,9 @@ resources: name: ObjectStorageAllNodesDeployment config: {get_attr: [allNodesConfig, config_id]} servers: {get_attr: [ObjectStorage, attributes, nova_server_resource]} + input_values: + bootstrap_nodeid: {get_attr: [ObjectStorage, resource.0.hostname]} + bootstrap_nodeid_ip: {get_attr: [ObjectStorage, resource.0.ip_address]} CephStorageAllNodesDeployment: type: OS::Heat::StructuredDeployments @@ -1383,6 +1125,9 @@ resources: name: CephStorageAllNodesDeployment config: {get_attr: [allNodesConfig, config_id]} servers: {get_attr: [CephStorage, attributes, nova_server_resource]} + input_values: + bootstrap_nodeid: {get_attr: [CephStorage, resource.0.hostname]} + bootstrap_nodeid_ip: {get_attr: [CephStorage, resource.0.ip_address]} # All Nodes Validations AllNodesValidationConfig: @@ -1470,57 +1215,38 @@ resources: # Nested stack deployment runs after all other controller deployments ControllerNodesPostDeployment: type: OS::TripleO::ControllerPostDeployment - depends_on: [ControllerBootstrapNodeDeployment, ControllerAllNodesDeployment, ControllerSwiftDeployment, ControllerCephDeployment] + depends_on: [ControllerAllNodesDeployment, ControllerSwiftDeployment] properties: servers: {get_attr: [Controller, attributes, nova_server_resource]} - NodeConfigIdentifiers: - allnodes_extra: {get_attr: [AllNodesExtraConfig, config_identifier]} - controller_config: {get_attr: [Controller, attributes, config_identifier]} - deployment_identifier: {get_param: DeployIdentifier} - StepConfig: {get_attr: [ControllerServiceChain, step_config]} + RoleData: {get_attr: [ControllerServiceChain, role_data]} ComputeNodesPostDeployment: type: OS::TripleO::ComputePostDeployment - depends_on: [ComputeAllNodesDeployment, ComputeCephDeployment] + depends_on: [ComputeAllNodesDeployment] properties: servers: {get_attr: [Compute, attributes, nova_server_resource]} - NodeConfigIdentifiers: - allnodes_extra: {get_attr: [AllNodesExtraConfig, config_identifier]} - compute_config: {get_attr: [Compute, attributes, config_identifier]} - deployment_identifier: {get_param: DeployIdentifier} - StepConfig: {get_attr: [ComputeServiceChain, step_config]} + RoleData: {get_attr: [ComputeServiceChain, role_data]} ObjectStorageNodesPostDeployment: type: OS::TripleO::ObjectStoragePostDeployment depends_on: [ObjectStorageSwiftDeployment, ObjectStorageAllNodesDeployment] properties: servers: {get_attr: [ObjectStorage, attributes, nova_server_resource]} - NodeConfigIdentifiers: - allnodes_extra: {get_attr: [AllNodesExtraConfig, config_identifier]} - objectstorage_config: {get_attr: [ObjectStorage, attributes, config_identifier]} - deployment_identifier: {get_param: DeployIdentifier} - StepConfig: {get_attr: [ObjectStorageServiceChain, step_config]} + RoleData: {get_attr: [ObjectStorageServiceChain, role_data]} BlockStorageNodesPostDeployment: type: OS::TripleO::BlockStoragePostDeployment depends_on: [ControllerNodesPostDeployment, BlockStorageAllNodesDeployment] properties: servers: {get_attr: [BlockStorage, attributes, nova_server_resource]} - NodeConfigIdentifiers: - allnodes_extra: {get_attr: [AllNodesExtraConfig, config_identifier]} - blockstorage_config: {get_attr: [BlockStorage, attributes, config_identifier]} - deployment_identifier: {get_param: DeployIdentifier} + RoleData: {get_attr: [BlockStorageServiceChain, role_data]} CephStorageNodesPostDeployment: type: OS::TripleO::CephStoragePostDeployment - depends_on: [ControllerNodesPostDeployment, CephStorageCephDeployment, CephStorageAllNodesDeployment] + depends_on: [ControllerNodesPostDeployment, CephStorageAllNodesDeployment] properties: servers: {get_attr: [CephStorage, attributes, nova_server_resource]} - NodeConfigIdentifiers: - allnodes_extra: {get_attr: [AllNodesExtraConfig, config_identifier]} - cephstorage_config: {get_attr: [CephStorage, attributes, config_identifier]} - deployment_identifier: {get_param: DeployIdentifier} - StepConfig: {get_attr: [CephStorageServiceChain, step_config]} + RoleData: {get_attr: [CephStorageServiceChain, role_data]} outputs: KeystoneURL: @@ -1550,6 +1276,9 @@ outputs: HeatInternalVip: description: VIP for Heat API internal endpoint value: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, HeatApiNetwork]}]} + IronicInternalVip: + description: VIP for Ironic API internal endpoint + value: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, IronicApiNetwork]}]} KeystoneInternalVip: description: VIP for Keystone API internal endpoint value: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, KeystonePublicApiNetwork]}]} diff --git a/puppet/all-nodes-config.yaml b/puppet/all-nodes-config.yaml index b065ddd2..803a5d49 100644 --- a/puppet/all-nodes-config.yaml +++ b/puppet/all-nodes-config.yaml @@ -56,7 +56,12 @@ parameters: type: comma_delimited_list sahara_api_node_ips: type: comma_delimited_list - + ironic_api_node_ips: + type: comma_delimited_list + ceph_mon_node_ips: + type: comma_delimited_list + ceph_mon_node_names: + type: comma_delimited_list DeployIdentifier: type: string description: > @@ -101,8 +106,10 @@ resources: - {get_param: ceph_storage_hosts} hiera: datafiles: - RedHat: - raw_data: {get_file: hieradata/RedHat.yaml} + bootstrap_node: + mapped_data: + bootstrap_nodeid: {get_input: bootstrap_nodeid} + bootstrap_nodeid_ip: {get_input: bootstrap_nodeid_ip} all_nodes: mapped_data: controller_node_ips: @@ -294,7 +301,31 @@ resources: list_join: - "','" - {get_param: sahara_api_node_ips} + ironic_api_node_ips: + str_replace: + template: "['SERVERS_LIST']" + params: + SERVERS_LIST: + list_join: + - "','" + - {get_param: ironic_api_node_ips} + tripleo::profile::base::ceph::ceph_mon_initial_members: + list_join: + - ',' + - {get_param: ceph_mon_node_names} + tripleo::profile::base::ceph::ceph_mon_host: + list_join: + - ',' + - {get_param: ceph_mon_node_ips} + tripleo::profile::base::ceph::ceph_mon_host_v6: + str_replace: + template: "'[IPS_LIST]'" + params: + IPS_LIST: + list_join: + - '],[' + - {get_param: ceph_mon_node_ips} # NOTE(gfidente): interpolation with %{} in the # hieradata file can't be used as it returns string ceilometer::rabbit_hosts: *rabbit_nodes_array @@ -306,6 +337,7 @@ resources: nova::rabbit_hosts: *rabbit_nodes_array keystone::rabbit_hosts: *rabbit_nodes_array sahara::rabbit_hosts: *rabbit_nodes_array + ironic::rabbit_hosts: *rabbit_nodes_array deploy_identifier: {get_param: DeployIdentifier} update_identifier: {get_param: UpdateIdentifier} diff --git a/puppet/bootstrap-config.yaml b/puppet/bootstrap-config.yaml deleted file mode 100644 index d88eebdf..00000000 --- a/puppet/bootstrap-config.yaml +++ /dev/null @@ -1,28 +0,0 @@ -heat_template_version: 2015-04-30 -description: 'Bootstrap Config Puppet' - -parameters: - bootstrap_nodeid: - type: string - bootstrap_nodeid_ip: - type: string - -resources: - - BootstrapNodeConfigImpl: - type: OS::Heat::StructuredConfig - properties: - group: os-apply-config - config: - hiera: - datafiles: - bootstrap_node: - mapped_data: - bootstrap_nodeid: {get_param: bootstrap_nodeid} - bootstrap_nodeid_ip: {get_param: bootstrap_nodeid_ip} - -outputs: - config_id: - description: The ID of the BootstrapNodeConfigImpl resource. - value: - {get_resource: BootstrapNodeConfigImpl} diff --git a/puppet/ceph-cluster-config.yaml b/puppet/ceph-cluster-config.yaml deleted file mode 100644 index 245710f2..00000000 --- a/puppet/ceph-cluster-config.yaml +++ /dev/null @@ -1,141 +0,0 @@ -heat_template_version: 2015-04-30 -description: 'Ceph Cluster config data for Puppet' - -parameters: - ceph_storage_count: - default: 0 - type: number - description: Number of Ceph storage nodes. Used to enable/disable managed Ceph installation. - ceph_external_mon_ips: - default: '' - type: string - description: List of external Ceph Mon host IPs. - ceph_client_key: - default: '' - type: string - description: Ceph key used to create the client user keyring. - ceph_fsid: - default: '' - type: string - ceph_admin_key: - default: '' - type: string - ceph_mon_key: - default: '' - type: string - ceph_mon_names: - type: comma_delimited_list - ceph_mon_ips: - type: comma_delimited_list - NovaRbdPoolName: - default: vms - type: string - CinderRbdPoolName: - default: volumes - type: string - GlanceRbdPoolName: - default: images - type: string - GnocchiRbdPoolName: - default: metrics - type: string - CephClientUserName: - default: openstack - type: string - CephIPv6: - default: False - type: boolean - -resources: - CephClusterConfigImpl: - type: OS::Heat::StructuredConfig - properties: - group: os-apply-config - config: - hiera: - datafiles: - ceph_cluster: - mapped_data: - ceph_ipv6: {get_param: CephIPv6} - ceph_storage_count: {get_param: ceph_storage_count} - ceph_mon_initial_members: - list_join: - - ',' - - {get_param: ceph_mon_names} - ceph_mon_host: - list_join: - - ',' - - {get_param: ceph_mon_ips} - ceph_mon_host_v6: - str_replace: - template: "'[IPS_LIST]'" - params: - IPS_LIST: - list_join: - - '],[' - - {get_param: ceph_mon_ips} - ceph::profile::params::ms_bind_ipv6: {get_param: CephIPv6} - ceph::profile::params::fsid: {get_param: ceph_fsid} - ceph::profile::params::mon_key: {get_param: ceph_mon_key} - # We should use a separated key for the non-admin clients - ceph::profile::params::client_keys: - str_replace: - template: "{ - client.admin: { - secret: 'ADMIN_KEY', - mode: '0600', - cap_mon: 'allow *', - cap_osd: 'allow *', - cap_mds: 'allow *' - }, - client.bootstrap-osd: { - secret: 'ADMIN_KEY', - keyring_path: '/var/lib/ceph/bootstrap-osd/ceph.keyring', - cap_mon: 'allow profile bootstrap-osd' - }, - client.CLIENT_USER: { - secret: 'CLIENT_KEY', - mode: '0644', - cap_mon: 'allow r', - cap_osd: 'allow class-read object_prefix rbd_children, allow rwx pool=CINDER_POOL, allow rwx pool=NOVA_POOL, allow rwx pool=GLANCE_POOL, allow rwx pool=GNOCCHI_POOL' - } - }" - params: - CLIENT_USER: {get_param: CephClientUserName} - CLIENT_KEY: {get_param: ceph_client_key} - ADMIN_KEY: {get_param: ceph_admin_key} - NOVA_POOL: {get_param: NovaRbdPoolName} - CINDER_POOL: {get_param: CinderRbdPoolName} - GLANCE_POOL: {get_param: GlanceRbdPoolName} - GNOCCHI_POOL: {get_param: GnocchiRbdPoolName} - nova::compute::rbd::libvirt_images_rbd_pool: {get_param: NovaRbdPoolName} - tripleo::profile::base::cinder::volume::rbd::cinder_rbd_pool_name: {get_param: CinderRbdPoolName} - glance::backend::rbd::rbd_store_pool: {get_param: GlanceRbdPoolName} - gnocchi::storage::ceph::ceph_pool: {get_param: GnocchiRbdPoolName} - gnocchi::storage::ceph::ceph_username: {get_param: CephClientUserName} - nova::compute::rbd::libvirt_rbd_user: {get_param: CephClientUserName} - glance::backend::rbd::rbd_store_user: {get_param: CephClientUserName} - nova::compute::rbd::rbd_keyring: - list_join: - - '.' - - - 'client' - - {get_param: CephClientUserName} - gnocchi::storage::ceph::ceph_keyring: - list_join: - - '.' - - - '/etc/ceph/ceph' - - 'client' - - {get_param: CephClientUserName} - - 'keyring' - tripleo::profile::base::cinder::volume::rbd::cinder_rbd_user_name: {get_param: CephClientUserName} - ceph_pools: - - {get_param: CinderRbdPoolName} - - {get_param: NovaRbdPoolName} - - {get_param: GlanceRbdPoolName} - - {get_param: GnocchiRbdPoolName} - -outputs: - config_id: - description: The ID of the CephClusterConfigImpl resource. - value: - {get_resource: CephClusterConfigImpl} diff --git a/puppet/ceph-storage-post.yaml b/puppet/ceph-storage-post.yaml index 2b9ae751..70baeb6e 100644 --- a/puppet/ceph-storage-post.yaml +++ b/puppet/ceph-storage-post.yaml @@ -10,13 +10,12 @@ parameters: type: boolean servers: type: json - NodeConfigIdentifiers: - type: json + RoleData: + type: json + default: {} + DeployIdentifier: + type: string description: Value which changes if the node configuration may need to be re-applied - StepConfig: - type: string - description: Config manifests that will be used to step through the deployment. - default: '' resources: @@ -29,7 +28,7 @@ resources: servers: {get_param: servers} config: {get_resource: CephStorageArtifactsConfig} input_values: - update_identifier: {get_param: NodeConfigIdentifiers} + update_identifier: {get_param: DeployIdentifier} CephStoragePuppetConfig: type: OS::Heat::SoftwareConfig @@ -47,7 +46,7 @@ resources: list_join: - '' - - get_file: manifests/overcloud_cephstorage.pp - - {get_param: StepConfig} + - {get_param: [RoleData, step_config]} CephStorageDeployment_Step2: type: OS::Heat::StructuredDeployments @@ -58,7 +57,7 @@ resources: config: {get_resource: CephStoragePuppetConfig} input_values: step: 2 - update_identifier: {get_param: NodeConfigIdentifiers} + update_identifier: {get_param: DeployIdentifier} CephStorageDeployment_Step3: type: OS::Heat::StructuredDeployments @@ -69,7 +68,7 @@ resources: config: {get_resource: CephStoragePuppetConfig} input_values: step: 3 - update_identifier: {get_param: NodeConfigIdentifiers} + update_identifier: {get_param: DeployIdentifier} # Note, this should come last, so use depends_on to ensure # this is created after any other resources. diff --git a/puppet/ceph-storage.yaml b/puppet/ceph-storage.yaml index eedb35e4..77ca4dc6 100644 --- a/puppet/ceph-storage.yaml +++ b/puppet/ceph-storage.yaml @@ -21,10 +21,6 @@ parameters: default: default constraints: - custom_constraint: nova.keypair - NtpServer: - default: '' - description: Comma-separated list of ntp servers - type: comma_delimited_list EnablePackageInstall: default: 'false' description: Set to true to enable package installation via Puppet @@ -34,10 +30,6 @@ parameters: description: Mapping of service_name -> network name. Typically set via parameter_defaults in the resource registry. type: json - TimeZone: - default: 'UTC' - description: The timezone to be set on Ceph nodes. - type: string UpdateIdentifier: default: '' type: string @@ -99,10 +91,20 @@ parameters: ServiceConfigSettings: type: json default: {} + ServiceNames: + type: comma_delimited_list + default: [] + ConfigCommand: + type: string + description: Command which will be run whenever configuration data changes + default: os-refresh-config --timeout 14400 resources: CephStorage: - type: OS::Nova::Server + type: OS::TripleO::Server + metadata: + os-collect-config: + command: {get_param: ConfigCommand} properties: image: {get_param: Image} image_update_policy: {get_param: ImageUpdatePolicy} @@ -232,8 +234,6 @@ resources: config: {get_resource: CephStorageConfig} server: {get_resource: CephStorage} input_values: - ntp_servers: {get_param: NtpServer} - timezone: {get_param: TimeZone} enable_package_install: {get_param: EnablePackageInstall} enable_package_upgrade: {get_attr: [UpdateDeployment, update_managed_packages]} ceph_cluster_network: {get_attr: [NetIpMap, net_ip_subnet_map, {get_param: [ServiceNetMap, CephClusterNetwork]}]} @@ -250,14 +250,19 @@ resources: - heat_config_%{::deploy_config_name} - ceph_extraconfig - extraconfig + - service_names - service_configs - ceph_cluster # provided by CephClusterConfig - ceph + - all_nodes # provided by allNodesConfig - '"%{::osfamily}"' - common - network merge_behavior: deeper datafiles: + service_names: + mapped_data: + service_names: {get_param: ServiceNames} service_configs: mapped_data: {get_param: ServiceConfigSettings} common: @@ -274,8 +279,6 @@ resources: ceph: raw_data: {get_file: hieradata/ceph.yaml} mapped_data: - ntp::servers: {get_input: ntp_servers} - timezone::timezone: {get_input: timezone} tripleo::packages::enable_install: {get_input: enable_package_install} tripleo::packages::enable_upgrade: {get_input: enable_package_upgrade} ceph::profile::params::cluster_network: {get_input: ceph_cluster_network} @@ -316,6 +319,12 @@ resources: get_param: UpdateIdentifier outputs: + ip_address: + description: IP address of the server in the ctlplane network + value: {get_attr: [CephStorage, networks, ctlplane, 0]} + hostname: + description: Hostname of the server + value: {get_attr: [CephStorage, name]} hosts_entry: value: str_replace: @@ -389,12 +398,3 @@ outputs: management_ip_address: description: IP address of the server in the management network value: {get_attr: [ManagementPort, ip_address]} - config_identifier: - description: identifier which changes if the node configuration may need re-applying - value: - list_join: - - ',' - - - {get_attr: [CephStorageDeployment, deploy_stdout]} - - {get_attr: [NodeTLSCAData, deploy_stdout]} - - {get_attr: [CephStorageExtraConfigPre, deploy_stdout]} - - {get_param: UpdateIdentifier} diff --git a/puppet/cinder-storage-post.yaml b/puppet/cinder-storage-post.yaml index f470203f..c3dd403e 100644 --- a/puppet/cinder-storage-post.yaml +++ b/puppet/cinder-storage-post.yaml @@ -8,9 +8,12 @@ parameters: type: boolean servers: type: json - NodeConfigIdentifiers: - type: json + DeployIdentifier: + type: string description: Value which changes if the node configuration may need to be re-applied + RoleData: + type: json + default: {} resources: @@ -23,7 +26,7 @@ resources: servers: {get_param: servers} config: {get_resource: VolumeArtifactsConfig} input_values: - update_identifier: {get_param: NodeConfigIdentifiers} + update_identifier: {get_param: DeployIdentifier} VolumePuppetConfig: type: OS::Heat::SoftwareConfig @@ -32,22 +35,55 @@ resources: group: puppet options: enable_debug: {get_param: ConfigDebug} + enable_hiera: True + enable_facter: False + inputs: + - name: step outputs: - name: result config: - get_file: manifests/overcloud_volume.pp + list_join: + - '' + - - get_file: manifests/overcloud_volume.pp + - {get_param: [RoleData, step_config]} + + VolumeDeployment_Step2: + type: OS::Heat::StructuredDeployments + depends_on: VolumeArtifactsDeploy + properties: + name: VolumeDeployment_Step2 + servers: {get_param: servers} + config: {get_resource: VolumePuppetConfig} + input_values: + step: 2 + update_identifier: {get_param: DeployIdentifier} - VolumeDeployment_Step1: + VolumeDeployment_Step3: type: OS::Heat::StructuredDeployments + depends_on: VolumeDeployment_Step2 properties: - name: VolumeDeployment_Step1 + name: VolumeDeployment_Step3 servers: {get_param: servers} config: {get_resource: VolumePuppetConfig} + input_values: + step: 3 + update_identifier: {get_param: DeployIdentifier} + + VolumeDeployment_Step4: + type: OS::Heat::StructuredDeployments + depends_on: VolumeDeployment_Step3 + properties: + name: VolumeDeployment_Step4 + servers: {get_param: servers} + config: {get_resource: VolumePuppetConfig} + input_values: + step: 4 + update_identifier: {get_param: DeployIdentifier} # Note, this should come last, so use depends_on to ensure # this is created after any other resources. ExtraConfig: - depends_on: VolumeDeployment_Step1 + depends_on: VolumeDeployment_Step4 type: OS::TripleO::NodeExtraConfigPost properties: servers: {get_param: servers} diff --git a/puppet/cinder-storage.yaml b/puppet/cinder-storage.yaml index d760de5e..edd81b9d 100644 --- a/puppet/cinder-storage.yaml +++ b/puppet/cinder-storage.yaml @@ -4,29 +4,6 @@ parameters: Image: default: overcloud-cinder-volume type: string - CinderEnableIscsiBackend: - default: true - description: Whether to enable or not the Iscsi backend for Cinder - type: boolean - CinderISCSIHelper: - default: lioadm - description: The iSCSI helper to use with cinder. - type: string - CinderLVMLoopDeviceSize: - default: 10280 - description: The size of the loopback file used by the cinder LVM driver. - type: number - CinderPassword: - description: The password for the cinder service and db account, used by cinder-api. - type: string - hidden: true - Debug: - default: '' - description: Set to True to enable debugging on all services. - type: string - VirtualIP: # deprecated. Use per service VIPs instead. - default: '' - type: string ExtraConfig: default: {} description: | @@ -50,22 +27,6 @@ parameters: default: default description: Name of an existing Nova key pair to enable SSH access to the instances type: string - RabbitPassword: - type: string - hidden: true - RabbitUserName: - default: 'guest' - type: string - RabbitClientUseSSL: - default: false - description: > - Rabbit client subscriber parameter to specify - an SSL connection to the RabbitMQ host. - type: string - RabbitClientPort: - default: 5672 - description: Set rabbit subscriber port, change this if using SSL - type: number SnmpdReadonlyUserName: default: ro_snmp_user description: The user name for SNMPd with readonly rights running on all Overcloud nodes @@ -74,10 +35,6 @@ parameters: description: The user password for SNMPd with readonly rights running on all Overcloud nodes type: string hidden: true - NtpServer: - default: '' - description: Comma-separated list of ntp servers - type: comma_delimited_list EnablePackageInstall: default: 'false' description: Set to true to enable package installation via Puppet @@ -100,18 +57,6 @@ parameters: description: Mapping of service_name -> network name. Typically set via parameter_defaults in the resource registry. type: json - EndpointMap: - default: {} - description: Mapping of service endpoint -> protocol. Typically set - via parameter_defaults in the resource registry. - type: json - TimeZone: - default: 'UTC' - description: The timezone to be set on Cinder nodes. - type: string - GlanceApiVirtualIP: - type: string - default: '' NetworkDeploymentActions: type: comma_delimited_list description: > @@ -143,11 +88,23 @@ parameters: NodeIndex: type: number default: 0 - + ServiceConfigSettings: + type: json + default: {} + ServiceNames: + type: comma_delimited_list + default: [] + ConfigCommand: + type: string + description: Command which will be run whenever configuration data changes + default: os-refresh-config --timeout 14400 resources: BlockStorage: - type: OS::Nova::Server + type: OS::TripleO::Server + metadata: + os-collect-config: + command: {get_param: ConfigCommand} properties: image: {get_param: Image} @@ -277,37 +234,13 @@ resources: server: {get_resource: BlockStorage} config: {get_resource: BlockStorageConfig} input_values: - debug: {get_param: Debug} - cinder_dsn: - list_join: - - '' - - - {get_param: [EndpointMap, MysqlInternal, protocol]} - - '://cinder:' - - {get_param: CinderPassword} - - '@' - - {get_param: [EndpointMap, MysqlInternal, host]} - - '/cinder' snmpd_readonly_user_name: {get_param: SnmpdReadonlyUserName} snmpd_readonly_user_password: {get_param: SnmpdReadonlyUserPassword} - cinder_lvm_loop_device_size: - str_replace: - template: sizeM - params: - size: {get_param: CinderLVMLoopDeviceSize} - cinder_enable_iscsi_backend: {get_param: CinderEnableIscsiBackend} - cinder_iscsi_helper: {get_param: CinderISCSIHelper} cinder_iscsi_ip_address: str_replace: template: "'IP'" params: IP: {get_attr: [NetIpMap, net_ip_uri_map, {get_param: [ServiceNetMap, CinderIscsiNetwork]}]} - glance_api_servers: {get_param: [EndpointMap, GlanceInternal, uri]} - rabbit_username: {get_param: RabbitUserName} - rabbit_password: {get_param: RabbitPassword} - rabbit_client_use_ssl: {get_param: RabbitClientUseSSL} - rabbit_client_port: {get_param: RabbitClientPort} - ntp_servers: {get_param: NtpServer} - timezone: {get_param: TimeZone} enable_package_install: {get_param: EnablePackageInstall} enable_package_upgrade: {get_attr: [UpdateDeployment, update_managed_packages]} @@ -323,6 +256,8 @@ resources: - heat_config_%{::deploy_config_name} - volume_extraconfig - extraconfig + - service_names + - service_configs - volume - all_nodes # provided by allNodesConfig - '"%{::osfamily}"' @@ -330,6 +265,11 @@ resources: - network merge_behavior: deeper datafiles: + service_names: + mapped_data: + service_names: {get_param: ServiceNames} + service_configs: + mapped_data: {get_param: ServiceConfigSettings} common: raw_data: {get_file: hieradata/common.yaml} network: @@ -345,19 +285,7 @@ resources: raw_data: {get_file: hieradata/volume.yaml} mapped_data: # Cinder - cinder::debug: {get_input: debug} - cinder::setup_test_volume::size: {get_input: cinder_lvm_loop_device_size} - cinder_iscsi_helper: {get_input: cinder_iscsi_helper} - cinder::database_connection: {get_input: cinder_dsn} - cinder::rabbit_userid: {get_input: rabbit_username} - cinder::rabbit_password: {get_input: rabbit_password} - cinder::rabbit_use_ssl: {get_input: rabbit_client_use_ssl} - cinder::rabbit_port: {get_input: rabbit_client_port} - cinder_enable_iscsi_backend: {get_input: cinder_enable_iscsi_backend} - cinder_iscsi_ip_address: {get_input: cinder_iscsi_ip_address} - cinder::glance::glance_api_servers: {get_input: glance_api_servers} - ntp::servers: {get_input: ntp_servers} - timezone::timezone: {get_input: timezone} + tripleo::profile::base::cinder::volume::iscsi::cinder_iscsi_ip_address: {get_input: cinder_iscsi_ip_address} tripleo::packages::enable_install: {get_input: enable_package_install} tripleo::packages::enable_upgrade: {get_input: enable_package_upgrade} snmpd_readonly_user_name: {get_input: snmpd_readonly_user_name} @@ -392,6 +320,12 @@ resources: get_param: UpdateIdentifier outputs: + ip_address: + description: IP address of the server in the ctlplane network + value: {get_attr: [BlockStorage, networks, ctlplane, 0]} + hostname: + description: Hostname of the server + value: {get_attr: [BlockStorage, name]} hosts_entry: value: str_replace: @@ -465,11 +399,3 @@ outputs: management_ip_address: description: IP address of the server in the management network value: {get_attr: [ManagementPort, ip_address]} - config_identifier: - description: identifier which changes if the node configuration may need re-applying - value: - list_join: - - '' - - - {get_attr: [BlockStorageDeployment, deploy_stdout]} - - {get_attr: [NodeTLSCAData, deploy_stdout]} - - {get_param: UpdateIdentifier} diff --git a/puppet/compute-post.yaml b/puppet/compute-post.yaml index 698cadba..c1b37772 100644 --- a/puppet/compute-post.yaml +++ b/puppet/compute-post.yaml @@ -10,13 +10,12 @@ parameters: type: boolean servers: type: json - NodeConfigIdentifiers: - type: json - description: Value which changes if the node configuration may need to be re-applied - StepConfig: + RoleData: + type: json + default: {} + DeployIdentifier: type: string - description: Config manifests that will be used to step through the deployment. - default: '' + description: Value which changes if the node configuration may need to be re-applied resources: @@ -29,7 +28,7 @@ resources: servers: {get_param: servers} config: {get_resource: ComputeArtifactsConfig} input_values: - update_identifier: {get_param: NodeConfigIdentifiers} + update_identifier: {get_param: DeployIdentifier} ComputePuppetConfig: type: OS::Heat::SoftwareConfig @@ -47,7 +46,7 @@ resources: list_join: - '' - - get_file: manifests/overcloud_compute.pp - - {get_param: StepConfig} + - {get_param: [RoleData, step_config]} ComputeServicesBaseDeployment_Step2: type: OS::Heat::StructuredDeployments @@ -58,7 +57,7 @@ resources: config: {get_resource: ComputePuppetConfig} input_values: step: 2 - update_identifier: {get_param: NodeConfigIdentifiers} + update_identifier: {get_param: DeployIdentifier} ComputeOvercloudServicesDeployment_Step3: type: OS::Heat::StructuredDeployments @@ -69,7 +68,7 @@ resources: config: {get_resource: ComputePuppetConfig} input_values: step: 3 - update_identifier: {get_param: NodeConfigIdentifiers} + update_identifier: {get_param: DeployIdentifier} ComputeOvercloudServicesDeployment_Step4: type: OS::Heat::StructuredDeployments @@ -80,7 +79,7 @@ resources: config: {get_resource: ComputePuppetConfig} input_values: step: 4 - update_identifier: {get_param: NodeConfigIdentifiers} + update_identifier: {get_param: DeployIdentifier} # Note, this should come last, so use depends_on to ensure # this is created after any other resources. diff --git a/puppet/compute.yaml b/puppet/compute.yaml index d3b17784..0e029162 100644 --- a/puppet/compute.yaml +++ b/puppet/compute.yaml @@ -69,43 +69,6 @@ parameters: KeystonePublicApiVirtualIP: type: string default: '' - NeutronBridgeMappings: - description: > - The OVS logical->physical bridge mappings to use. See the Neutron - documentation for details. Defaults to mapping br-ex - the external - bridge on hosts - to a physical name 'datacentre' which can be used - to create provider networks (and we use this for the default floating - network) - if changing this either use different post-install network - scripts or be sure to keep 'datacentre' as a mapping network name. - type: comma_delimited_list - default: "datacentre:br-ex" - NeutronEnableTunnelling: - type: string - default: "True" - NeutronEnableL2Pop: - type: string - description: > - Enable/disable the L2 population feature in the Neutron agents. - default: "False" - NeutronFlatNetworks: - type: comma_delimited_list - default: 'datacentre' - description: > - If set, flat networks to configure in neutron plugins. - NeutronHost: - type: string - default: '' # Has to be here because of the ignored empty value bug - NeutronNetworkType: - type: comma_delimited_list - description: The tenant network type for Neutron. - default: 'vxlan' - NeutronNetworkVLANRanges: - default: 'datacentre:1:1000' - description: > - The Neutron ML2 and OpenVSwitch vlan mapping range to support. See the - Neutron documentation for permitted values. Defaults to permitting any - VLAN on the 'datacentre' physical network (See NeutronBridgeMappings). - type: comma_delimited_list NeutronPassword: description: The password for the neutron service account, used by neutron agents. type: string @@ -118,73 +81,6 @@ parameters: default: nic1 description: A port to add to the NeutronPhysicalBridge. type: string - NeutronTenantMtu: - description: > - The default MTU for tenant networks. For VXLAN/GRE tunneling, this should - be at least 50 bytes smaller than the MTU on the physical network. This - value will be used to set the MTU on the virtual Ethernet device. - This number is related to the value of NeutronDnsmasqOptions, since that - will determine the MTU that is assigned to the VM host through DHCP. - default: 1400 - type: number - NeutronTunnelTypes: - type: comma_delimited_list - description: | - The tunnel types for the Neutron tenant network. - default: 'vxlan' - NeutronTunnelIdRanges: - description: | - Comma-separated list of <tun_min>:<tun_max> tuples enumerating ranges - of GRE tunnel IDs that are available for tenant network allocation - default: ["1:4094", ] - type: comma_delimited_list - NeutronVniRanges: - description: | - Comma-separated list of <vni_min>:<vni_max> tuples enumerating ranges - of VXLAN VNI IDs that are available for tenant network allocation - default: ["1:4094", ] - type: comma_delimited_list - NeutronMetadataProxySharedSecret: - description: Shared secret to prevent spoofing - type: string - hidden: true - NeutronCorePlugin: - default: 'ml2' - description: | - The core plugin for Neutron. The value should be the entrypoint to be loaded - from neutron.core_plugins namespace. - type: string - NeutronServicePlugins: - default: "router,qos" - description: | - Comma-separated list of service plugin entrypoints to be loaded from the - neutron.service_plugins namespace. - type: comma_delimited_list - NeutronTypeDrivers: - default: "vxlan,vlan,flat,gre" - description: | - Comma-separated list of network type driver entrypoints to be loaded. - type: comma_delimited_list - NeutronMechanismDrivers: - default: 'openvswitch' - description: | - The mechanism drivers for the Neutron tenant network. - type: comma_delimited_list - NeutronAgentExtensions: - default: "qos" - description: | - Comma-separated list of extensions enabled for the Neutron agents. - type: comma_delimited_list - # Not relevant for Computes, should be removed - NeutronAllowL3AgentFailover: - default: 'True' - description: Allow automatic l3-agent failover - type: string - # Not relevant for Computes, should be removed - NeutronL3HA: - default: 'False' - description: Whether to enable l3-agent HA - type: string NodeIndex: type: number default: 0 @@ -233,10 +129,6 @@ parameters: default: 'neutron' description: The full class name of the security API class type: string - NtpServer: - default: '' - description: Comma-separated list of ntp servers - type: comma_delimited_list RabbitHost: type: string default: '' # Has to be here because of the ignored empty value bug @@ -258,14 +150,6 @@ parameters: default: 5672 description: Set rabbit subscriber port, change this if using SSL type: number - SnmpdReadonlyUserName: - default: ro_snmp_user - description: The user name for SNMPd with readonly rights running on all Overcloud nodes - type: string - SnmpdReadonlyUserPassword: - description: The user password for SNMPd with readonly rights running on all Overcloud nodes - type: string - hidden: true UpgradeLevelNovaCompute: type: string description: Nova Compute upgrade level @@ -284,10 +168,6 @@ parameters: description: Mapping of service endpoint -> protocol. Typically set via parameter_defaults in the resource registry. type: json - TimeZone: - default: 'UTC' - description: The timezone to be set on compute nodes. - type: string UpdateIdentifier: default: '' type: string @@ -332,11 +212,21 @@ parameters: ServiceConfigSettings: type: json default: {} + ServiceNames: + type: comma_delimited_list + default: [] + ConfigCommand: + type: string + description: Command which will be run whenever configuration data changes + default: os-refresh-config --timeout 14400 resources: NovaCompute: - type: OS::Nova::Server + type: OS::TripleO::Server + metadata: + os-collect-config: + command: {get_param: ConfigCommand} properties: image: {get_param: Image} @@ -474,6 +364,7 @@ resources: - heat_config_%{::deploy_config_name} - compute_extraconfig - extraconfig + - service_names - service_configs - compute - ceph_cluster # provided by CephClusterConfig @@ -489,6 +380,9 @@ resources: - neutron_opencontrail_data # Optionally provided by ComputeExtraConfigPre merge_behavior: deeper datafiles: + service_names: + mapped_data: + service_names: {get_param: ServiceNames} service_configs: mapped_data: {get_param: ServiceConfigSettings} compute_extraconfig: @@ -516,6 +410,10 @@ resources: nova::rabbit_port: {get_input: rabbit_client_port} nova::upgrade_level_compute: {get_input: upgrade_level_nova_compute} nova_compute_driver: {get_input: nova_compute_driver} + # TODO(emilien): move libvirt & migration parameters in libvirt profile + # used to deploy libvirt/kvm dependencies: + nova::compute::libvirt::services::libvirt_virt_type: {get_input: nova_compute_libvirt_type} + # used to configured nova.conf: nova::compute::libvirt::libvirt_virt_type: {get_input: nova_compute_libvirt_type} nova::compute::neutron::libvirt_vif_driver: {get_input: nova_compute_libvirt_vif_driver} nova_api_host: {get_input: nova_api_host} @@ -529,7 +427,6 @@ resources: nova::migration::live_migration_tunnelled: {get_input: nova_enable_rbd_backend} rbd_persistent_storage: {get_input: cinder_enable_rbd_backend} nova_password: {get_input: nova_password} - nova::compute::network_device_mtu: {get_input: neutron_tenant_mtu} nova::compute::vncserver_proxyclient_address: {get_input: nova_vnc_proxyclient_address} nova::vncproxy::common::vncproxy_protocol: {get_input: nova_vncproxy_protocol} nova::vncproxy::common::vncproxy_host: {get_input: nova_vncproxy_host} @@ -544,43 +441,19 @@ resources: ceilometer::telemetry_secret: {get_input: ceilometer_metering_secret} ceilometer::agent::auth::auth_password: {get_input: ceilometer_password} ceilometer::agent::auth::auth_url: {get_input: ceilometer_agent_auth_url} - ceilometer_compute_agent: {get_input: ceilometer_compute_agent} - snmpd_readonly_user_name: {get_input: snmpd_readonly_user_name} - snmpd_readonly_user_password: {get_input: snmpd_readonly_user_password} nova::glance_api_servers: {get_input: glance_api_servers} neutron::debug: {get_input: debug} neutron::rabbit_password: {get_input: rabbit_password} neutron::rabbit_user: {get_input: rabbit_username} neutron::rabbit_use_ssl: {get_input: rabbit_client_use_ssl} neutron::rabbit_port: {get_input: rabbit_client_port} - neutron::plugins::ml2::flat_networks: {get_input: neutron_flat_networks} - neutron_host: {get_input: neutron_host} neutron::agents::ml2::ovs::local_ip: {get_input: neutron_local_ip} - neutron::network_device_mtu: {get_input: neutron_tenant_mtu} - neutron::plugins::ml2::tenant_network_types: {get_input: neutron_tenant_network_types} - neutron::agents::ml2::ovs::tunnel_types: {get_input: neutron_tunnel_types} - neutron::agents::ml2::ovs::extensions: {get_input: neutron_agent_extensions} - neutron::plugins::ml2::network_vlan_ranges: {get_input: neutron_network_vlan_ranges} - neutron::plugins::ml2::tunnel_id_ranges: {get_input: neutron_tunnel_id_ranges} - neutron::plugins::ml2::vni_ranges: {get_input: neutron_vni_ranges} - neutron::agents::ml2::ovs::bridge_mappings: {get_input: neutron_bridge_mappings} - neutron::agents::ml2::ovs::enable_tunneling: {get_input: neutron_enable_tunneling} - neutron::agents::ml2::ovs::l2_population: {get_input: neutron_enable_l2pop} - neutron_physical_bridge: {get_input: neutron_physical_bridge} - neutron_public_interface: {get_input: neutron_public_interface} nova::network::neutron::neutron_password: {get_input: neutron_password} nova::network::neutron::neutron_url: {get_input: neutron_internal_url} nova::network::neutron::neutron_auth_url: {get_input: neutron_auth_url} - neutron_metadata_proxy_shared_secret: {get_input: neutron_metadata_proxy_shared_secret} - neutron::core_plugin: {get_input: neutron_core_plugin} - neutron::service_plugins: {get_input: neutron_service_plugins} - neutron::plugins::ml2::type_drivers: {get_input: neutron_type_drivers} - neutron::plugins::ml2::mechanism_drivers: {get_input: neutron_mechanism_drivers} keystone_public_api_virtual_ip: {get_input: keystone_vip} admin_password: {get_input: admin_password} - ntp::servers: {get_input: ntp_servers} - timezone::timezone: {get_input: timezone} tripleo::packages::enable_install: {get_input: enable_package_install} tripleo::packages::enable_upgrade: {get_input: enable_package_upgrade} @@ -619,76 +492,10 @@ resources: upgrade_level_nova_compute: {get_param: UpgradeLevelNovaCompute} ceilometer_metering_secret: {get_param: CeilometerMeteringSecret} ceilometer_password: {get_param: CeilometerPassword} - ceilometer_compute_agent: {get_param: CeilometerComputeAgent} - ceilometer_agent_auth_url: {get_param: [EndpointMap, KeystoneInternal, uri]} - snmpd_readonly_user_name: {get_param: SnmpdReadonlyUserName} - snmpd_readonly_user_password: {get_param: SnmpdReadonlyUserPassword} + ceilometer_agent_auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]} glance_api_servers: {get_param: [EndpointMap, GlanceInternal, uri]} - neutron_flat_networks: - str_replace: - template: NETWORKS - params: - NETWORKS: {get_param: NeutronFlatNetworks} - neutron_host: {get_param: NeutronHost} neutron_local_ip: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, NeutronTenantNetwork]}]} - neutron_tunnel_id_ranges: - str_replace: - template: RANGES - params: - RANGES: {get_param: NeutronTunnelIdRanges} - neutron_vni_ranges: - str_replace: - template: RANGES - params: - RANGES: {get_param: NeutronVniRanges} - neutron_tenant_network_types: - str_replace: - template: TYPES - params: - TYPES: {get_param: NeutronNetworkType} - neutron_tunnel_types: - str_replace: - template: TYPES - params: - TYPES: {get_param: NeutronTunnelTypes} - neutron_network_vlan_ranges: - str_replace: - template: RANGES - params: - RANGES: {get_param: NeutronNetworkVLANRanges} - neutron_bridge_mappings: - str_replace: - template: MAPPINGS - params: - MAPPINGS: {get_param: NeutronBridgeMappings} - neutron_tenant_mtu: {get_param: NeutronTenantMtu} - neutron_enable_tunneling: {get_param: NeutronEnableTunnelling} - neutron_enable_l2pop: {get_param: NeutronEnableL2Pop} - neutron_physical_bridge: {get_param: NeutronPhysicalBridge} - neutron_public_interface: {get_param: NeutronPublicInterface} neutron_password: {get_param: NeutronPassword} - neutron_metadata_proxy_shared_secret: {get_param: NeutronMetadataProxySharedSecret} - neutron_core_plugin: {get_param: NeutronCorePlugin} - neutron_service_plugins: - str_replace: - template: PLUGINS - params: - PLUGINS: {get_param: NeutronServicePlugins} - neutron_type_drivers: - str_replace: - template: DRIVERS - params: - DRIVERS: {get_param: NeutronTypeDrivers} - neutron_mechanism_drivers: - str_replace: - template: MECHANISMS - params: - MECHANISMS: {get_param: NeutronMechanismDrivers} - neutron_agent_extensions: - str_replace: - template: AGENT_EXTENSIONS - params: - AGENT_EXTENSIONS: {get_param: NeutronAgentExtensions} neutron_internal_url: {get_param: [EndpointMap, NeutronInternal, uri]} neutron_auth_url: {get_param: [EndpointMap, KeystoneV3Admin, uri]} keystone_vip: {get_param: KeystonePublicApiVirtualIP} @@ -697,8 +504,6 @@ resources: rabbit_password: {get_param: RabbitPassword} rabbit_client_use_ssl: {get_param: RabbitClientUseSSL} rabbit_client_port: {get_param: RabbitClientPort} - ntp_servers: {get_param: NtpServer} - timezone: {get_param: TimeZone} enable_package_install: {get_param: EnablePackageInstall} enable_package_upgrade: {get_attr: [UpdateDeployment, update_managed_packages]} @@ -819,12 +624,3 @@ outputs: description: Heat resource handle for the Nova compute server value: {get_resource: NovaCompute} - config_identifier: - description: identifier which changes if the node configuration may need re-applying - value: - list_join: - - ',' - - - {get_attr: [NovaComputeDeployment, deploy_stdout]} - - {get_attr: [NodeTLSCAData, deploy_stdout]} - - {get_attr: [ComputeExtraConfigPre, deploy_stdout]} - - {get_param: UpdateIdentifier} diff --git a/puppet/controller-config-pacemaker.yaml b/puppet/controller-config-pacemaker.yaml index dfebcf82..5116cac7 100644 --- a/puppet/controller-config-pacemaker.yaml +++ b/puppet/controller-config-pacemaker.yaml @@ -29,7 +29,6 @@ resources: list_join: - '' - - get_file: manifests/overcloud_controller_pacemaker.pp - - get_file: manifests/ringbuilder.pp - {get_param: StepConfig} outputs: diff --git a/puppet/controller-config.yaml b/puppet/controller-config.yaml index 458aff32..cadba703 100644 --- a/puppet/controller-config.yaml +++ b/puppet/controller-config.yaml @@ -29,7 +29,6 @@ resources: list_join: - '' - - get_file: manifests/overcloud_controller.pp - - get_file: manifests/ringbuilder.pp - {get_param: StepConfig} outputs: diff --git a/puppet/controller-post.yaml b/puppet/controller-post.yaml index 36f9b4f8..4af6cb46 100644 --- a/puppet/controller-post.yaml +++ b/puppet/controller-post.yaml @@ -10,13 +10,12 @@ parameters: type: boolean servers: type: json - NodeConfigIdentifiers: - type: json - description: Value which changes if the node configuration may need to be re-applied - StepConfig: + RoleData: + type: json + default: {} + DeployIdentifier: type: string - description: Config manifests that will be used to step through the deployment. - default: '' + description: Value which changes if the node configuration may need to be re-applied resources: @@ -34,12 +33,12 @@ resources: properties: servers: {get_param: servers} input_values: - update_identifier: {get_param: NodeConfigIdentifiers} + update_identifier: {get_param: DeployIdentifier} ControllerPuppetConfig: type: OS::TripleO::ControllerConfig properties: - StepConfig: {get_param: StepConfig} + StepConfig: {get_param: [RoleData, step_config]} # Step through a series of Puppet runs using the same manifest. # NOTE: To enable stepping through the deployments via heat hooks, @@ -54,7 +53,7 @@ resources: config: {get_resource: ControllerPuppetConfig} input_values: step: 1 - update_identifier: {get_param: NodeConfigIdentifiers} + update_identifier: {get_param: DeployIdentifier} ControllerServicesBaseDeployment_Step2: type: OS::Heat::StructuredDeployments @@ -65,7 +64,7 @@ resources: config: {get_resource: ControllerPuppetConfig} input_values: step: 2 - update_identifier: {get_param: NodeConfigIdentifiers} + update_identifier: {get_param: DeployIdentifier} ControllerOvercloudServicesDeployment_Step3: type: OS::Heat::StructuredDeployments @@ -76,7 +75,7 @@ resources: config: {get_resource: ControllerPuppetConfig} input_values: step: 3 - update_identifier: {get_param: NodeConfigIdentifiers} + update_identifier: {get_param: DeployIdentifier} ControllerOvercloudServicesDeployment_Step4: type: OS::Heat::StructuredDeployments @@ -87,7 +86,7 @@ resources: config: {get_resource: ControllerPuppetConfig} input_values: step: 4 - update_identifier: {get_param: NodeConfigIdentifiers} + update_identifier: {get_param: DeployIdentifier} ControllerOvercloudServicesDeployment_Step5: type: OS::Heat::StructuredDeployments @@ -98,7 +97,7 @@ resources: config: {get_resource: ControllerPuppetConfig} input_values: step: 5 - update_identifier: {get_param: NodeConfigIdentifiers} + update_identifier: {get_param: DeployIdentifier} ControllerPostPuppet: type: OS::TripleO::Tasks::ControllerPostPuppet @@ -106,7 +105,7 @@ resources: properties: servers: {get_param: servers} input_values: - update_identifier: {get_param: NodeConfigIdentifiers} + update_identifier: {get_param: DeployIdentifier} # Note, this should come last, so use depends_on to ensure # this is created after any other resources. diff --git a/puppet/controller.yaml b/puppet/controller.yaml index 70989ccf..679fd90b 100644 --- a/puppet/controller.yaml +++ b/puppet/controller.yaml @@ -8,47 +8,10 @@ parameters: description: The password for the keystone admin account, used for monitoring, querying neutron etc. type: string hidden: true - AodhApiVirtualIP: - type: string - default: '' AodhPassword: description: The password for the aodh services. type: string hidden: true - #TODO(composable Redis): Remove the Redis password param - #As is used by ceilometer - CeilometerApiVirtualIP: - type: string - default: '' - CeilometerBackend: - default: 'mongodb' - description: The ceilometer backend type. - type: string - CeilometerMeteringSecret: - description: Secret shared by the ceilometer services. - type: string - hidden: true - CeilometerPassword: - description: The password for the ceilometer service and db account. - type: string - hidden: true - CeilometerStoreEvents: - default: false - description: Whether to store events in ceilometer. - type: boolean - CeilometerMeterDispatcher: - default: 'database' - description: Dispatcher to process meter data - type: string - constraints: - - allowed_values: ['gnocchi', 'database'] - CinderApiVirtualIP: - type: string - default: '' - CeilometerWorkers: - default: 0 - description: Number of workers for Ceilometer service. - type: number controllerExtraConfig: default: {} description: | @@ -92,14 +55,6 @@ parameters: default: true description: Whether to deploy a LoadBalancer on the Controller type: boolean - EnableCephStorage: - default: false - description: Whether to deploy Ceph Storage (OSD) on the Controller - type: boolean - EnableSwiftStorage: - default: true - description: Whether to enable Swift Storage on the Controller - type: boolean ExtraConfig: default: {} description: | @@ -143,24 +98,6 @@ parameters: type: string constraints: - custom_constraint: nova.flavor - GnocchiBackend: - default: file - description: The short name of the Gnocchi backend to use. Should be one - of swift, rbd, or file - type: string - constraints: - - allowed_values: ['swift', 'file', 'rbd'] - GnocchiIndexerBackend: - default: 'mysql' - description: The short name of the Gnocchi indexer backend to use. - type: string - GnocchiApiVirtualIP: - type: string - default: '' - GnocchiPassword: - description: The password for the gnocchi service and db account. - type: string - hidden: true HAProxyStatsPassword: description: Password for HAProxy stats endpoint type: string @@ -176,10 +113,6 @@ parameters: description: Auth encryption key for heat-engine type: string hidden: true - HorizonAllowedHosts: - default: '*' - description: A list of IP/Hostname allowed to connect to horizon - type: comma_delimited_list HorizonSecret: description: Secret key for Django type: string @@ -219,9 +152,6 @@ parameters: default: false description: Whether IPtables rules should be purged before setting up the new ones. type: boolean - SaharaApiVirtualIP: - type: string - default: '' MysqlClusterUniquePart: description: A unique identifier of the MySQL cluster the controller is in. type: string @@ -259,18 +189,6 @@ parameters: default: nic1 description: What interface to bridge onto br-ex for network nodes. type: string - NeutronTenantMtu: - description: > - The default MTU for tenant networks. For VXLAN/GRE tunneling, this should - be at least 50 bytes smaller than the MTU on the physical network. This - value will be used to set the MTU on the virtual Ethernet device. - This number is related to the value of NeutronDnsmasqOptions, since that - will determine the MTU that is assigned to the VM host through DHCP. - default: 1400 - type: number - NovaApiVirtualIP: - type: string - default: '' NovaEnableDBPurge: default: true description: | @@ -284,10 +202,6 @@ parameters: description: The password for the nova service and db account, used by nova-api. type: string hidden: true - NtpServer: - default: '' - description: Comma-separated list of ntp servers - type: comma_delimited_list PcsdPassword: type: string description: The password for the 'pcsd' user. @@ -298,9 +212,6 @@ parameters: Specifies the interface where the public-facing virtual ip will be assigned. This should be int_public when a VLAN is being used. type: string - PublicVirtualIP: - type: string - default: '' # Has to be here because of the ignored empty value bug RabbitCookie: type: string default: '' # Has to be here because of the ignored empty value bug @@ -334,65 +245,18 @@ parameters: type: string default: '' # Has to be here because of the ignored empty value bug description: An IP address which is wrapped in brackets in case of IPv6 - SnmpdReadonlyUserName: - default: ro_snmp_user - description: The user name for SNMPd with readonly rights running on all Overcloud nodes - type: string - SnmpdReadonlyUserPassword: - description: The user password for SNMPd with readonly rights running on all Overcloud nodes - type: string - hidden: true SwiftHashSuffix: description: A random string to be used as a salt when hashing to determine mappings in the ring. hidden: true type: string - SwiftMountCheck: - default: 'false' - description: Value of mount_check in Swift account/container/object -server.conf - type: boolean - SwiftMinPartHours: - type: number - default: 1 - description: The minimum time (in hours) before a partition in a ring can be moved following a rebalance. - SwiftPartPower: - default: 10 - description: Partition Power to use when building Swift rings - type: number - SwiftRingBuild: - default: true - description: Whether to manage Swift rings or not - type: boolean - SwiftProxyVirtualIP: - type: string - default: '' - SwiftReplicas: - type: number - default: 3 - description: How many replicas to use in the swift rings. - TimeZone: - default: 'UTC' - description: The timezone to be set on controller nodes. - type: string UpgradeLevelNovaCompute: type: string description: Nova Compute upgrade level default: '' - VirtualIP: # DEPRECATED: use per service settings instead - type: string - default: '' # Has to be here because of the ignored empty value bug - HeatApiVirtualIP: - type: string - default: '' - HeatApiVirtualIPUri: - type: string - default: '' MysqlVirtualIP: type: string default: '' - NeutronApiVirtualIP: - type: string - default: '' EnablePackageInstall: default: 'false' description: Set to true to enable package installation via Puppet @@ -454,6 +318,13 @@ parameters: ServiceConfigSettings: type: json default: {} + ServiceNames: + type: comma_delimited_list + default: [] + ConfigCommand: + type: string + description: Command which will be run whenever configuration data changes + default: os-refresh-config --timeout 14400 parameter_groups: - label: deprecated @@ -464,7 +335,10 @@ parameter_groups: resources: Controller: - type: OS::Nova::Server + type: OS::TripleO::Server + metadata: + os-collect-config: + command: {get_param: ConfigCommand} properties: image: {get_param: Image} image_update_policy: {get_param: ImageUpdatePolicy} @@ -614,12 +488,10 @@ resources: server: {get_resource: Controller} input_values: bootstack_nodeid: {get_attr: [Controller, name]} - ceilometer_workers: {get_param: CeilometerWorkers} haproxy_log_address: {get_param: HAProxySyslogAddress} haproxy_stats_password: {get_param: HAProxyStatsPassword} haproxy_stats_user: {get_param: HAProxyStatsUser} heat_auth_encryption_key: {get_param: HeatAuthEncryptionKey} - horizon_allowed_hosts: {get_param: HorizonAllowedHosts} horizon_secret: {get_param: HorizonSecret} admin_password: {get_param: AdminPassword} debug: {get_param: Debug} @@ -635,8 +507,6 @@ resources: enable_fencing: {get_param: EnableFencing} enable_galera: {get_param: EnableGalera} enable_load_balancer: {get_param: EnableLoadBalancer} - enable_ceph_storage: {get_param: EnableCephStorage} - enable_swift_storage: {get_param: EnableSwiftStorage} manage_firewall: {get_param: ManageFirewall} purge_firewall_rules: {get_param: PurgeFirewallRules} mysql_innodb_buffer_pool_size: {get_param: MysqlInnodbBufferPoolSize} @@ -650,24 +520,15 @@ resources: CLUSTER: {get_param: MysqlClusterUniquePart} neutron_metadata_proxy_shared_secret: {get_param: NeutronMetadataProxySharedSecret} neutron_password: {get_param: NeutronPassword} - neutron_tenant_mtu: {get_param: NeutronTenantMtu} neutron_internal_url: { get_param: [ EndpointMap, NeutronInternal, uri ] } neutron_public_url: { get_param: [ EndpointMap, NeutronPublic, uri ] } neutron_admin_url: { get_param: [ EndpointMap, NeutronAdmin, uri ] } neutron_auth_url: { get_param: [ EndpointMap, KeystoneV3Admin, uri ] } nova_internal_url: { get_param: [ EndpointMap, NovaInternal, uri ] } - ceilometer_backend: {get_param: CeilometerBackend} - ceilometer_metering_secret: {get_param: CeilometerMeteringSecret} - ceilometer_password: {get_param: CeilometerPassword} - ceilometer_store_events: {get_param: CeilometerStoreEvents} aodh_password: {get_param: AodhPassword} aodh_internal_url: { get_param: [ EndpointMap, AodhInternal, uri ] } aodh_public_url: { get_param: [ EndpointMap, AodhPublic, uri ] } aodh_admin_url: { get_param: [ EndpointMap, AodhAdmin, uri ] } - ceilometer_meter_dispatcher: {get_param: CeilometerMeterDispatcher} - gnocchi_password: {get_param: GnocchiPassword} - gnocchi_backend: {get_param: GnocchiBackend} - gnocchi_indexer_backend: {get_param: GnocchiIndexerBackend} ceilometer_coordination_url: list_join: - '' @@ -676,55 +537,23 @@ resources: - '@' - {get_param: RedisVirtualIPUri} - ':6379/' - ceilometer_dsn: - list_join: - - '' - - - {get_param: [EndpointMap, MysqlInternal, protocol]} - - '://ceilometer:' - - {get_param: CeilometerPassword} - - '@' - - {get_param: [EndpointMap, MysqlInternal, host]} - - '/ceilometer' - gnocchi_dsn: + aodh_dsn: list_join: - '' - - {get_param: [EndpointMap, MysqlInternal, protocol]} - - '://gnocchi:' - - {get_param: GnocchiPassword} + - '://aodh:' + - {get_param: AodhPassword} - '@' - {get_param: [EndpointMap, MysqlInternal, host]} - - '/gnocchi' + - '/aodh' gnocchi_internal_url: {get_param: [EndpointMap, GnocchiInternal, uri]} gnocchi_public_url: { get_param: [ EndpointMap, GnocchiPublic, uri ] } gnocchi_admin_url: { get_param: [ EndpointMap, GnocchiAdmin, uri ] } - ceilometer_public_url: {get_param: [EndpointMap, CeilometerPublic, uri]} - ceilometer_internal_url: {get_param: [EndpointMap, CeilometerInternal, uri]} - ceilometer_admin_url: {get_param: [EndpointMap, CeilometerAdmin, uri]} - snmpd_readonly_user_name: {get_param: SnmpdReadonlyUserName} - snmpd_readonly_user_password: {get_param: SnmpdReadonlyUserPassword} nova_enable_db_purge: {get_param: NovaEnableDBPurge} nova_ipv6: {get_param: NovaIPv6} corosync_ipv6: {get_param: CorosyncIPv6} memcached_ipv6: {get_param: MemcachedIPv6} nova_password: {get_param: NovaPassword} - nova_dsn: - list_join: - - '' - - - {get_param: [EndpointMap, MysqlInternal, protocol]} - - '://nova:' - - {get_param: NovaPassword} - - '@' - - {get_param: [EndpointMap, MysqlInternal, host]} - - '/nova' - nova_api_dsn: - list_join: - - '' - - - {get_param: [EndpointMap, MysqlInternal, protocol]} - - '://nova_api:' - - {get_param: NovaPassword} - - '@' - - {get_param: [EndpointMap, MysqlInternal, host]} - - '/nova_api' upgrade_level_nova_compute: {get_param: UpgradeLevelNovaCompute} instance_name_template: {get_param: InstanceNameTemplate} nova_public_url: {get_param: [EndpointMap, NovaPublic, uri]} @@ -737,16 +566,9 @@ resources: rabbit_cookie: {get_param: RabbitCookie} rabbit_client_use_ssl: {get_param: RabbitClientUseSSL} rabbit_client_port: {get_param: RabbitClientPort} - ntp_servers: {get_param: NtpServer} - timezone: {get_param: TimeZone} control_virtual_interface: {get_param: ControlVirtualInterface} public_virtual_interface: {get_param: PublicVirtualInterface} swift_hash_suffix: {get_param: SwiftHashSuffix} - swift_part_power: {get_param: SwiftPartPower} - swift_ring_build: {get_param: SwiftRingBuild} - swift_replicas: {get_param: SwiftReplicas} - swift_min_part_hours: {get_param: SwiftMinPartHours} - swift_mount_check: {get_param: SwiftMountCheck} enable_package_install: {get_param: EnablePackageInstall} enable_package_upgrade: {get_attr: [UpdateDeployment, update_managed_packages]} swift_proxy_network: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, SwiftProxyNetwork]}]} @@ -788,6 +610,7 @@ resources: ceph_cluster_network: {get_attr: [NetIpMap, net_ip_subnet_map, {get_param: [ServiceNetMap, CephClusterNetwork]}]} ceph_public_network: {get_attr: [NetIpMap, net_ip_subnet_map, {get_param: [ServiceNetMap, CephPublicNetwork]}]} ceph_public_ip: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, CephPublicNetwork]}]} + ironic_api_network: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, IronicApiNetwork]}]} # Map heat metadata into hiera datafiles ControllerConfig: @@ -802,6 +625,7 @@ resources: - controller_extraconfig - extraconfig - service_configs + - service_names - controller - database - object @@ -820,11 +644,12 @@ resources: - neutron_bigswitch_data # Optionally provided by ControllerExtraConfigPre - neutron_cisco_data # Optionally provided by ControllerExtraConfigPre - cisco_n1kv_data # Optionally provided by ControllerExtraConfigPre - - neutron_nuage_data # Optionally provided by ControllerExtraConfigPre - midonet_data #Optionally provided by AllNodesExtraConfig - - neutron_opencontrail_data # Optionally provided by ControllerExtraConfigPre merge_behavior: deeper datafiles: + service_names: + mapped_data: + service_names: {get_param: ServiceNames} service_configs: mapped_data: {get_param: ServiceConfigSettings} controller_extraconfig: @@ -868,11 +693,6 @@ resources: swift::proxy::proxy_local_net_ip: {get_input: swift_proxy_network} swift::storage::all::storage_local_net_ip: {get_input: swift_management_network} swift::swift_hash_path_suffix: {get_input: swift_hash_suffix} - tripleo::ringbuilder::build_ring: { get_input: swift_ring_build } - tripleo::ringbuilder::part_power: {get_input: swift_part_power} - tripleo::ringbuilder::replicas: {get_input: swift_replicas} - tripleo::ringbuilder::min_part_hours: {get_input: swift_min_part_hours} - swift_mount_check: {get_input: swift_mount_check} # Cinder tripleo::profile::base::cinder::volume::iscsi::cinder_iscsi_address: {get_input: cinder_iscsi_network} @@ -909,8 +729,6 @@ resources: # MySQL admin_password: {get_input: admin_password} enable_galera: {get_input: enable_galera} - enable_ceph_storage: {get_input: enable_ceph_storage} - enable_swift_storage: {get_input: enable_swift_storage} mysql_innodb_buffer_pool_size: {get_input: mysql_innodb_buffer_pool_size} mysql_max_connections: {get_input: mysql_max_connections} mysql::server::root_password: {get_input: mysql_root_password} @@ -921,7 +739,6 @@ resources: # Neutron neutron::bind_host: {get_input: neutron_api_network} - neutron::network_device_mtu: {get_input: neutron_tenant_mtu} neutron::agents::ml2::ovs::local_ip: {get_input: neutron_local_ip} neutron::agents::metadata::metadata_ip: {get_input: neutron_api_network} neutron::keystone::auth::public_url: {get_input: neutron_public_url } @@ -931,37 +748,12 @@ resources: neutron::keystone::auth::region: {get_input: keystone_region} # Ceilometer - ceilometer_backend: {get_input: ceilometer_backend} - ceilometer_mysql_conn_string: {get_input: ceilometer_dsn} - ceilometer::telemetry_secret: {get_input: ceilometer_metering_secret} - ceilometer::rabbit_userid: {get_input: rabbit_username} - ceilometer::rabbit_password: {get_input: rabbit_password} - ceilometer::rabbit_use_ssl: {get_input: rabbit_client_use_ssl} - ceilometer::rabbit_port: {get_input: rabbit_client_port} - ceilometer::debug: {get_input: debug} ceilometer::api::host: {get_input: ceilometer_api_network} - ceilometer::api::keystone_password: {get_input: ceilometer_password} - ceilometer::api::auth_uri: {get_input: keystone_auth_uri} - ceilometer::api::identity_uri: {get_input: keystone_identity_uri} - ceilometer::agent::auth::auth_password: {get_input: ceilometer_password} - ceilometer::agent::auth::auth_url: {get_input: keystone_auth_uri} - ceilometer::agent::central::coordination_url: {get_input: ceilometer_coordination_url} - ceilometer::agent::notification::store_events: {get_input: ceilometer_store_events} - ceilometer::db::mysql::password: {get_input: ceilometer_password} - ceilometer::collector::meter_dispatcher: {get_input: ceilometer_meter_dispatcher} - ceilometer::dispatcher::gnocchi::url: {get_input: gnocchi_internal_url } - ceilometer::dispatcher::gnocchi::filter_project: 'service' - ceilometer::dispatcher::gnocchi::archive_policy: 'low' - ceilometer::dispatcher::gnocchi::resources_definition_file: 'gnocchi_resources.yaml' - ceilometer::keystone::auth::public_url: {get_input: ceilometer_public_url } - ceilometer::keystone::auth::internal_url: {get_input: ceilometer_internal_url } - ceilometer::keystone::auth::admin_url: {get_input: ceilometer_admin_url } - ceilometer::keystone::auth::password: {get_input: ceilometer_password } - ceilometer::keystone::auth::region: {get_input: keystone_region} snmpd_readonly_user_name: {get_input: snmpd_readonly_user_name} snmpd_readonly_user_password: {get_input: snmpd_readonly_user_password} # Aodh + aodh_mysql_conn_string: {get_input: aodh_dsn} aodh::rabbit_userid: {get_input: rabbit_username} aodh::rabbit_password: {get_input: rabbit_password} aodh::rabbit_use_ssl: {get_input: rabbit_client_use_ssl} @@ -974,6 +766,7 @@ resources: aodh::api::keystone_password: {get_input: aodh_password} aodh::api::keystone_auth_uri: {get_input: keystone_auth_uri} aodh::api::keystone_identity_uri: {get_input: keystone_identity_uri} + aodh::auth::auth_url: {get_input: keystone_auth_uri} aodh::auth::auth_password: {get_input: aodh_password} aodh::db::mysql::password: {get_input: aodh_password} # for a migration path from ceilometer-alarm to aodh, we use the same database & coordination @@ -985,24 +778,14 @@ resources: aodh::keystone::auth::region: {get_input: keystone_region} # Gnocchi - gnocchi_backend: {get_input: gnocchi_backend} - gnocchi_indexer_backend: {get_input: gnocchi_indexer_backend} - gnocchi_mysql_conn_string: {get_input: gnocchi_dsn} - gnocchi::debug: {get_input: debug} - gnocchi::wsgi::apache::ssl: false gnocchi::wsgi::apache::bind_host: {get_input: gnocchi_api_network} - gnocchi::api::service_name: 'httpd' gnocchi::api::host: {get_input: gnocchi_api_network} - gnocchi::api::keystone_password: {get_input: gnocchi_password} gnocchi::api::keystone_auth_uri: {get_input: keystone_auth_uri} gnocchi::api::keystone_identity_uri: {get_input: keystone_identity_uri} - gnocchi::db::mysql::password: {get_input: gnocchi_password} gnocchi::storage::swift::swift_authurl: {get_input: keystone_auth_uri} - gnocchi::storage::swift::swift_key: {get_input: gnocchi_password} gnocchi::keystone::auth::public_url: {get_input: gnocchi_public_url } gnocchi::keystone::auth::internal_url: {get_input: gnocchi_internal_url } gnocchi::keystone::auth::admin_url: {get_input: gnocchi_admin_url } - gnocchi::keystone::auth::password: {get_input: gnocchi_password } gnocchi::keystone::auth::region: {get_input: keystone_region} # Nova @@ -1013,9 +796,6 @@ resources: nova::api::api_bind_address: {get_input: nova_api_network} nova::api::metadata_listen: {get_input: nova_metadata_network} nova::api::admin_password: {get_input: nova_password} - nova::compute::network_device_mtu: {get_input: neutron_tenant_mtu} - nova::database_connection: {get_input: nova_dsn} - nova::api_database_connection: {get_input: nova_api_dsn} nova::glance_api_servers: {get_input: glance_api_servers} nova::api::neutron_metadata_proxy_shared_secret: {get_input: neutron_metadata_proxy_shared_secret} nova::api::instance_name_template: {get_input: instance_name_template} @@ -1023,8 +803,6 @@ resources: nova::network::neutron::neutron_url: {get_input: neutron_internal_url} nova::network::neutron::neutron_auth_url: {get_input: neutron_auth_url} nova::vncproxy::host: {get_input: nova_api_network} - nova::db::mysql::password: {get_input: nova_password} - nova::db::mysql_api::password: {get_input: nova_password} nova_enable_db_purge: {get_input: nova_enable_db_purge} nova::keystone::auth::public_url: {get_input: nova_public_url} nova::keystone::auth::internal_url: {get_input: nova_internal_url} @@ -1035,7 +813,6 @@ resources: # Horizon apache::mod::remoteip::proxy_ips: {get_input: horizon_subnet} apache::ip: {get_input: horizon_network} - horizon::allowed_hosts: {get_input: horizon_allowed_hosts} horizon::django_debug: {get_input: debug} horizon::secret_key: {get_input: horizon_secret} horizon::bind_address: {get_input: horizon_network} @@ -1053,8 +830,6 @@ resources: # Misc memcached_ipv6: {get_input: memcached_ipv6} memcached::listen_ip: {get_input: memcached_network} - ntp::servers: {get_input: ntp_servers} - timezone::timezone: {get_input: timezone} control_virtual_interface: {get_input: control_virtual_interface} public_virtual_interface: {get_input: public_virtual_interface} tripleo::keepalived::control_virtual_interface: {get_input: control_virtual_interface} @@ -1193,16 +968,6 @@ outputs: template: "IP:11211" params: IP: {get_attr: [NetIpMap, net_ip_uri_map, {get_param: [ServiceNetMap, MemcachedNetwork]}]} - config_identifier: - description: identifier which changes if the controller configuration may need re-applying - value: - list_join: - - ',' - - - {get_attr: [ControllerDeployment, deploy_stdout]} - - {get_attr: [NodeTLSCAData, deploy_stdout]} - - {get_attr: [NodeTLSData, deploy_stdout]} - - {get_attr: [ControllerExtraConfigPre, deploy_stdout]} - - {get_param: UpdateIdentifier} tls_key_modulus_md5: description: MD5 checksum of the TLS Key Modulus value: {get_attr: [NodeTLSData, key_modulus_md5]} diff --git a/puppet/extraconfig/all_nodes/neutron-midonet-all-nodes.yaml b/puppet/extraconfig/all_nodes/neutron-midonet-all-nodes.yaml index 3e455347..aa5c3c43 100644 --- a/puppet/extraconfig/all_nodes/neutron-midonet-all-nodes.yaml +++ b/puppet/extraconfig/all_nodes/neutron-midonet-all-nodes.yaml @@ -109,11 +109,3 @@ resources: properties: config: {get_resource: NetworkMidoNetConfig} servers: {get_param: compute_servers} - -outputs: - config_identifier: - value: - list_join: - - ' ' - - - {get_attr: [NetworkMidonetDeploymentControllers, deploy_stdouts]} - - {get_attr: [NetworkMidonetDeploymentComputes, deploy_stdouts]} diff --git a/puppet/extraconfig/all_nodes/neutron-ml2-cisco-nexus-ucsm.yaml b/puppet/extraconfig/all_nodes/neutron-ml2-cisco-nexus-ucsm.yaml index 71445800..e924fc87 100644 --- a/puppet/extraconfig/all_nodes/neutron-ml2-cisco-nexus-ucsm.yaml +++ b/puppet/extraconfig/all_nodes/neutron-ml2-cisco-nexus-ucsm.yaml @@ -343,11 +343,3 @@ resources: input_values: ucsm_config: {get_param: NetworkUCSMHostList} actions: ['CREATE'] # Only do this on CREATE - -outputs: - # The Deployment applying the hieradata outputs the derived config-id, which - # changes if the input_values change, so if the stdouts from - # NetworkCiscoDeployment change, we need to reapply puppet (which will - # happen if we return a different config_identifier) - config_identifier: - value: {get_attr: [NetworkCiscoDeployment, deploy_stdouts]} diff --git a/puppet/extraconfig/ceph/ceph-external-config.yaml b/puppet/extraconfig/ceph/ceph-external-config.yaml deleted file mode 100644 index 7d4dc15b..00000000 --- a/puppet/extraconfig/ceph/ceph-external-config.yaml +++ /dev/null @@ -1,115 +0,0 @@ -heat_template_version: 2015-04-30 -description: 'Configure parameters for an external Ceph cluster via Puppet.' - -parameters: - ceph_storage_count: - default: 0 - type: number - description: Number of Ceph storage nodes. Used to enable/disable managed Ceph installation. - ceph_external_mon_ips: - default: '' - type: string - description: List of external Ceph Mon host IPs. - ceph_client_key: - default: '' - type: string - description: Ceph key used to create the 'openstack' user keyring. - ceph_fsid: - default: '' - type: string - # The following parameters are unused for external Ceph clusters and - # are here and exist for compatibility - ceph_admin_key: - default: '' - type: string - ceph_mon_key: - default: '' - type: string - ceph_mon_names: - type: comma_delimited_list - ceph_mon_ips: - type: comma_delimited_list - NovaRbdPoolName: - default: vms - type: string - CinderRbdPoolName: - default: volumes - type: string - GlanceRbdPoolName: - default: images - type: string - GnocchiRbdPoolName: - default: metrics - type: string - CephClientUserName: - default: openstack - type: string - CephIPv6: - default: False - type: boolean - -resources: - CephClusterConfigImpl: - type: OS::Heat::StructuredConfig - properties: - group: os-apply-config - config: - hiera: - datafiles: - ceph_cluster: - mapped_data: - ceph_storage_count: {get_param: ceph_storage_count} - enable_external_ceph: true - ceph_ipv6: {get_param: CephIPv6} - ceph_mon_host: {get_param: ceph_external_mon_ips} - ceph_mon_host_v6: {get_param: ceph_external_mon_ips} - ceph::profile::params::fsid: {get_param: ceph_fsid} - ceph::profile::params::client_keys: - str_replace: - template: "{ - client.CLIENT_USER: { - secret: 'CLIENT_KEY', - mode: '0644', - cap_mon: 'allow r', - cap_osd: 'allow class-read object_prefix rbd_children, allow rwx pool=CINDER_POOL, allow rwx pool=NOVA_POOL, allow rwx pool=GLANCE_POOL, allow rwx pool=GNOCCHI_POOL' - } - }" - params: - CLIENT_USER: {get_param: CephClientUserName} - CLIENT_KEY: {get_param: ceph_client_key} - NOVA_POOL: {get_param: NovaRbdPoolName} - CINDER_POOL: {get_param: CinderRbdPoolName} - GLANCE_POOL: {get_param: GlanceRbdPoolName} - GNOCCHI_POOL: {get_param: GnocchiRbdPoolName} - ceph::profile::params::ms_bind_ipv6: {get_param: CephIPv6} - nova::compute::rbd::libvirt_images_rbd_pool: {get_param: NovaRbdPoolName} - tripleo::profile::base::cinder::volume::rbd::cinder_rbd_pool_name: {get_param: CinderRbdPoolName} - glance::backend::rbd::rbd_store_pool: {get_param: GlanceRbdPoolName} - gnocchi::storage::ceph::ceph_pool: {get_param: GnocchiRbdPoolName} - gnocchi::storage::ceph::ceph_username: {get_param: CephClientUserName} - nova::compute::rbd::libvirt_rbd_user: {get_param: CephClientUserName} - glance::backend::rbd::rbd_store_user: {get_param: CephClientUserName} - nova::compute::rbd::rbd_keyring: - list_join: - - '.' - - - 'client' - - {get_param: CephClientUserName} - gnocchi::storage::ceph::ceph_keyring: - list_join: - - '.' - - - '/etc/ceph/ceph' - - 'client' - - {get_param: CephClientUserName} - - 'keyring' - tripleo::profile::base::cinder::volume::rbd::cinder_rbd_user_name: {get_param: CephClientUserName} - ceph_pools: - - {get_param: CinderRbdPoolName} - - {get_param: NovaRbdPoolName} - - {get_param: GlanceRbdPoolName} - - {get_param: GnocchiRbdPoolName} - -outputs: - config_id: - description: The ID of the CephClusterConfigImpl resource. - value: - {get_resource: CephClusterConfigImpl} diff --git a/puppet/extraconfig/pre_deploy/controller/neutron-nuage.yaml b/puppet/extraconfig/pre_deploy/controller/neutron-nuage.yaml deleted file mode 100644 index a4cfea07..00000000 --- a/puppet/extraconfig/pre_deploy/controller/neutron-nuage.yaml +++ /dev/null @@ -1,91 +0,0 @@ -heat_template_version: 2015-04-30 - -description: Configure hieradata for Nuage configuration on the Controller - -parameters: - server: - description: ID of the controller node to apply this config to - type: string - - # Config specific parameters, to be provided via parameter_defaults - NeutronNuageOSControllerIp: - description: IP address of the OpenStack Controller - type: string - - NeutronNuageNetPartitionName: - description: Specifies the title that you will see on the VSD - type: string - default: 'default_name' - - NeutronNuageVSDIp: - description: IP address and port of the Virtual Services Directory - type: string - - NeutronNuageVSDUsername: - description: Username to be used to log into VSD - type: string - - NeutronNuageVSDPassword: - description: Password to be used to log into VSD - type: string - - NeutronNuageVSDOrganization: - description: Organization parameter required to log into VSD - type: string - default: 'organization' - - NeutronNuageBaseURIVersion: - description: URI version to be used based on the VSD release - type: string - default: 'default_uri_version' - - NeutronNuageCMSId: - description: Cloud Management System ID (CMS ID) to distinguish between OS instances on the same VSD - type: string - - UseForwardedFor: - description: Treat X-Forwarded-For as the canonical remote address. Only enable this if you have a sanitizing proxy. - type: boolean - default: false - -resources: - NeutronNuageConfig: - type: OS::Heat::StructuredConfig - properties: - group: os-apply-config - config: - hiera: - datafiles: - neutron_nuage_data: - mapped_data: - neutron::plugins::nuage::nuage_oscontroller_ip: {get_input: NuageOSControllerIp} - neutron::plugins::nuage::nuage_net_partition_name: {get_input: NuageNetPartitionName} - neutron::plugins::nuage::nuage_vsd_ip: {get_input: NuageVSDIp} - neutron::plugins::nuage::nuage_vsd_username: {get_input: NuageVSDUsername} - neutron::plugins::nuage::nuage_vsd_password: {get_input: NuageVSDPassword} - neutron::plugins::nuage::nuage_vsd_organization: {get_input: NuageVSDOrganization} - neutron::plugins::nuage::nuage_base_uri_version: {get_input: NuageBaseURIVersion} - neutron::plugins::nuage::nuage_cms_id: {get_input: NuageCMSId} - nova::api::use_forwarded_for: {get_input: NovaUseForwardedFor} - - NeutronNuageDeployment: - type: OS::Heat::StructuredDeployment - properties: - name: NeutronNuageDeployment - config: {get_resource: NeutronNuageConfig} - server: {get_param: server} - input_values: - NuageOSControllerIp: {get_param: NeutronNuageOSControllerIp} - NuageNetPartitionName: {get_param: NeutronNuageNetPartitionName} - NuageVSDIp: {get_param: NeutronNuageVSDIp} - NuageVSDUsername: {get_param: NeutronNuageVSDUsername} - NuageVSDPassword: {get_param: NeutronNuageVSDPassword} - NuageVSDOrganization: {get_param: NeutronNuageVSDOrganization} - NuageBaseURIVersion: {get_param: NeutronNuageBaseURIVersion} - NuageCMSId: {get_param: NeutronNuageCMSId} - NovaUseForwardedFor: {get_param: UseForwardedFor} - -outputs: - deploy_stdout: - description: Deployment reference, used to trigger puppet apply on changes - value: {get_attr: [NeutronNuageDeployment, deploy_stdout]} diff --git a/puppet/extraconfig/pre_deploy/controller/neutron-opencontrail.yaml b/puppet/extraconfig/pre_deploy/controller/neutron-opencontrail.yaml deleted file mode 100644 index 5c686fe7..00000000 --- a/puppet/extraconfig/pre_deploy/controller/neutron-opencontrail.yaml +++ /dev/null @@ -1,62 +0,0 @@ -heat_template_version: 2015-04-30 - -description: Controller hieradata for Neutron OpenContrail configuration - -parameters: - server: - description: ID of the controller node to apply this config to - type: string - ContrailApiServerIp: - description: IP address of the OpenContrail API server - type: string - ContrailApiServerPort: - description: Port of the OpenContrail API - type: string - default: 8082 - ContrailMultiTenancy: - description: Whether to enable multi tenancy - type: boolean - default: false - ContrailExtensions: - description: List of OpenContrail extensions to be enabled - type: comma_delimited_list - default: '' - -resources: - ControllerContrailConfig: - type: OS::Heat::StructuredConfig - properties: - group: os-apply-config - config: - hiera: - datafiles: - neutron_opencontrail_data: - mapped_data: - neutron::api_extensions_path: /usr/lib/python2.7/site-packages/neutron_plugin_contrail/extensions - - neutron::plugins::opencontrail::api_server_ip: {get_input: contrail_api_server_ip} - neutron::plugins::opencontrail::api_server_port: {get_input: contrail_api_server_port} - neutron::plugins::opencontrail::multi_tenancy: {get_input: contrail_multi_tenancy} - neutron::plugins::opencontrail::contrail_extensions: {get_input: contrail_extensions} - neutron::plugins::opencontrail::keystone_auth_url: '"%{hiera(''keystone_auth_uri'')}"' - neutron::plugins::opencontrail::keystone_admin_user: admin - neutron::plugins::opencontrail::keystone_admin_tenant_name: admin - neutron::plugins::opencontrail::keystone_admin_password: '"%{hiera(''admin_password'')}"' - neutron::plugins::opencontrail::keystone_admin_token: '"%{hiera(''keystone::admin_token'')}"' - - ControllerContrailDeployment: - type: OS::Heat::StructuredDeployment - properties: - config: {get_resource: ControllerContrailConfig} - server: {get_param: server} - input_values: - contrail_api_server_ip: {get_param: ContrailApiServerIp} - contrail_api_server_port: {get_param: ContrailApiServerPort} - contrail_multi_tenancy: {get_param: ContrailMultiTenancy} - contrail_extensions: {get_param: ContrailExtensions} - - -outputs: - deploy_stdout: - description: Output of the extra hiera data deployment - value: {get_attr: [ControllerContrailDeployment, deploy_stdout]} diff --git a/puppet/hieradata/README.rst b/puppet/hieradata/README.rst new file mode 100644 index 00000000..64a60229 --- /dev/null +++ b/puppet/hieradata/README.rst @@ -0,0 +1 @@ +Do not add more hieradata in this directory, and use composable services. diff --git a/puppet/hieradata/RedHat.yaml b/puppet/hieradata/RedHat.yaml deleted file mode 100644 index 25902828..00000000 --- a/puppet/hieradata/RedHat.yaml +++ /dev/null @@ -1,9 +0,0 @@ -# RedHat specific overrides go here -rabbitmq::package_provider: 'yum' - -# The Galera package should work in cluster and -# non-cluster modes based on the config file. -# We set the package name here explicitly so -# that it matches what we pre-install -# in tripleo-puppet-elements. -mysql::server::package_name: 'mariadb-galera-server' diff --git a/puppet/hieradata/ceph.yaml b/puppet/hieradata/ceph.yaml index b29b91cf..ccb41cc4 100644 --- a/puppet/hieradata/ceph.yaml +++ b/puppet/hieradata/ceph.yaml @@ -7,5 +7,3 @@ ceph::profile::params::manage_repo: false ceph::profile::params::authentication_type: cephx ceph_classes: [] - -ceph_osd_selinux_permissive: true diff --git a/puppet/hieradata/common.yaml b/puppet/hieradata/common.yaml index 65cf9577..3bda874f 100644 --- a/puppet/hieradata/common.yaml +++ b/puppet/hieradata/common.yaml @@ -1,51 +1,3 @@ -# Common Hiera data gets applied to all nodes -ssh::server::storeconfigs_enabled: false - -# ceilometer settings used by compute and controller ceilo auth settings -ceilometer::agent::auth::auth_region: 'regionOne' -ceilometer::agent::auth::auth_tenant_name: 'service' - +# TODO(emilien) move it to composable aodh roles later aodh::auth::auth_region: 'regionOne' aodh::auth::auth_tenant_name: 'service' - -gnocchi::auth::auth_region: 'regionOne' -gnocchi::auth::auth_tenant_name: 'service' - -nova::api::admin_tenant_name: 'service' -nova::network::neutron::neutron_project_name: 'service' -nova::network::neutron::neutron_username: 'neutron' -nova::network::neutron::dhcp_domain: '' - -neutron::allow_overlapping_ips: true -neutron::server::project_name: 'service' - -kernel_modules: - nf_conntrack: {} - -sysctl_settings: - net.ipv4.tcp_keepalive_intvl: - value: 1 - net.ipv4.tcp_keepalive_probes: - value: 5 - net.ipv4.tcp_keepalive_time: - value: 5 - net.nf_conntrack_max: - value: 500000 - net.netfilter.nf_conntrack_max: - value: 500000 - # prevent neutron bridges from autoconfiguring ipv6 addresses - net.ipv6.conf.default.accept_ra: - value: 0 - net.ipv6.conf.default.autoconf: - value: 0 - net.core.netdev_max_backlog: - value: 10000 - -nova::rabbit_heartbeat_timeout_threshold: 60 -neutron::rabbit_heartbeat_timeout_threshold: 60 -cinder::rabbit_heartbeat_timeout_threshold: 60 -ceilometer::rabbit_heartbeat_timeout_threshold: 60 -heat::rabbit_heartbeat_timeout_threshold: 60 -keystone::rabbit_heartbeat_timeout_threshold: 60 - -nova::cinder_catalog_info: 'volumev2:cinderv2:internalURL' diff --git a/puppet/hieradata/compute.yaml b/puppet/hieradata/compute.yaml index 2d928cbf..219f0d0a 100644 --- a/puppet/hieradata/compute.yaml +++ b/puppet/hieradata/compute.yaml @@ -1,26 +1,3 @@ # Hiera data here applies to all compute nodes -nova::host: "%{::fqdn}" -nova::notify_on_state_change: 'vm_and_task_state' -nova::notification_driver: messagingv2 -nova::compute::instance_usage_audit: true -nova::compute::instance_usage_audit_period: 'hour' - -nova::compute::libvirt::migration_support: true - -nova::compute::rbd::libvirt_rbd_secret_uuid: "%{hiera('ceph::profile::params::fsid')}" - -nova::network::neutron::neutron_auth_type: 'v3password' - -# Changing the default from 512MB. The current templates can not deploy -# overclouds with swap. On an idle compute node, we see ~1024MB of RAM -# used. 2048 is suggested to account for other possible operations for -# example openvswitch. -nova::compute::reserved_host_memory: 2048 - -ceilometer::agent::auth::auth_tenant_name: 'service' -ceilometer::agent::auth::auth_endpoint_type: 'internalURL' - -neutron::host: "%{::fqdn}" - compute_classes: [] diff --git a/puppet/hieradata/controller.yaml b/puppet/hieradata/controller.yaml index e69656cf..71c53b47 100644 --- a/puppet/hieradata/controller.yaml +++ b/puppet/hieradata/controller.yaml @@ -1,299 +1,16 @@ # Hiera data here applies to all controller nodes -nova::api::enabled: true -nova::vncproxy::enabled: true - -# gnocchi -gnocchi::db::sync::extra_opts: '--skip-storage' -gnocchi::storage::swift::swift_user: 'service:gnocchi' -gnocchi::storage::swift::swift_auth_version: 2 -gnocchi::statsd::resource_id: '0a8b55df-f90f-491c-8cb9-7cdecec6fc26' -gnocchi::statsd::user_id: '27c0d3f8-e7ee-42f0-8317-72237d1c5ae3' -gnocchi::statsd::project_id: '6c38cd8d-099a-4cb2-aecf-17be688e8616' -gnocchi::statsd::flush_delay: 10 -gnocchi::statsd::archive_policy_name: 'low' - -# rabbitmq -rabbitmq::delete_guest_user: false -rabbitmq::wipe_db_on_cookie_change: true -rabbitmq::port: '5672' -rabbitmq::package_source: undef -rabbitmq::repos_ensure: false -rabbitmq_environment: - RABBITMQ_NODENAME: "rabbit@%{::hostname}" - RABBITMQ_SERVER_ERL_ARGS: '"+K true +A30 +P 1048576 -kernel inet_default_connect_options [{nodelay,true},{raw,6,18,<<5000:64/native>>}] -kernel inet_default_listen_options [{raw,6,18,<<5000:64/native>>}]"' -rabbitmq_kernel_variables: - inet_dist_listen_min: '35672' - inet_dist_listen_max: '35672' -rabbitmq_config_variables: - tcp_listen_options: '[binary, {packet, raw}, {reuseaddr, true}, {backlog, 128}, {nodelay, true}, {exit_on_close, false}, {keepalive, true}]' - cluster_partition_handling: 'pause_minority' - loopback_users: '[]' - -mongodb::server::replset: tripleo -mongodb::server::journal: false - -redis::port: 6379 -redis::sentinel::master_name: "%{hiera('bootstrap_nodeid')}" -redis::sentinel::redis_host: "%{hiera('bootstrap_nodeid_ip')}" -redis::sentinel::notification_script: '/usr/local/bin/redis-notifications.sh' - -# keystone -keystone::roles::admin::email: 'root@localhost' - -# service tenant -glance::api::keystone_tenant: 'service' +# TODO(emilien) move it to composable aodh roles later aodh::api::keystone_tenant: 'service' -glance::registry::keystone_tenant: 'service' -neutron::server::auth_tenant: 'service' -neutron::agents::metadata::auth_tenant: 'service' -neutron::agents::l3::router_delete_namespaces: True -cinder::api::keystone_tenant: 'service' -swift::proxy::authtoken::admin_tenant_name: 'service' -ceilometer::api::keystone_tenant: 'service' -gnocchi::api::keystone_tenant: 'service' -heat::keystone_tenant: 'service' -sahara::admin_tenant_name: 'service' aodh::keystone::auth::tenant: 'service' -ceilometer::keystone::auth::tenant: 'service' -cinder::keystone::auth::tenant: 'service' -glance::keystone::auth::tenant: 'service' -gnocchi::keystone::auth::tenant: 'service' -heat::keystone::auth::tenant: 'service' -neutron::keystone::auth::tenant: 'service' -nova::keystone::auth::tenant: 'service' -sahara::keystone::auth::tenant: 'service' -swift::keystone::auth::tenant: 'service' - -# keystone -keystone::cron::token_flush::maxdelay: 3600 -keystone::roles::admin::service_tenant: 'service' -keystone::roles::admin::admin_tenant: 'admin' -keystone::cron::token_flush::destination: '/dev/null' -keystone::config::keystone_config: - DEFAULT/secure_proxy_ssl_header: - value: 'HTTP_X_FORWARDED_PROTO' - ec2/driver: - value: 'keystone.contrib.ec2.backends.sql.Ec2' -keystone::service_name: 'httpd' -keystone::wsgi::apache::ssl: false - -#swift -swift::proxy::pipeline: - - 'catch_errors' - - 'healthcheck' - - 'cache' - - 'ratelimit' - - 'tempurl' - - 'formpost' - - 'authtoken' - - 'keystone' - - 'staticweb' - - 'proxy-logging' - - 'proxy-server' - -swift::proxy::account_autocreate: true -swift::keystone::auth::configure_s3_endpoint: false -swift::keystone::auth::operator_roles: - - admin - - swiftoperator - -# glance -glance::api::pipeline: 'keystone' -glance::api::show_image_direct_url: true -glance::registry::pipeline: 'keystone' -glance::backend::swift::swift_store_create_container_on_put: true -glance_file_pcmk_directory: '/var/lib/glance/images' - -# neutron -neutron::server::sync_db: true - -# nova -nova::notify_on_state_change: 'vm_and_task_state' -nova::api::default_floating_pool: 'public' -nova::api::sync_db_api: true -nova::api::enable_proxy_headers_parsing: true -nova::cron::archive_deleted_rows::hour: '*/12' -nova::cron::archive_deleted_rows::destination: '/dev/null' -nova::notification_driver: messaging -# ceilometer -ceilometer::agent::auth::auth_endpoint_type: 'internalURL' - -# cinder -cinder::scheduler::scheduler_driver: cinder.scheduler.filter_scheduler.FilterScheduler -cinder::cron::db_purge::destination: '/dev/null' -cinder::host: hostgroup - -# TODO(jaosorior): Move to cinder profile once cinder is moved as a composable -# service. -cinder::api::enable_proxy_headers_parsing: true - -# heat -heat::engine::configure_delegated_roles: false -heat::engine::trusts_delegated_roles: [] -heat::instance_user: '' -heat::cron::purge_deleted::age: 30 -heat::cron::purge_deleted::age_type: 'days' -heat::cron::purge_deleted::maxdelay: 3600 -heat::cron::purge_deleted::destination: '/dev/null' -heat::keystone::domain::domain_name: 'heat_stack' -heat::keystone::domain::domain_admin: 'heat_stack_domain_admin' -heat::keystone::domain::domain_admin_email: 'heat_stack_domain_admin@localhost' -heat::auth_plugin: 'password' - -# pacemaker -pacemaker::corosync::cluster_name: 'tripleo_cluster' -pacemaker::corosync::manage_fw: false -pacemaker::resource_defaults::defaults: - resource-stickiness: { value: INFINITY } -corosync_token_timeout: 10000 - -# horizon -horizon::cache_backend: django.core.cache.backends.memcached.MemcachedCache -horizon::django_session_engine: 'django.contrib.sessions.backends.cache' -horizon::vhost_extra_params: - add_listen: false - priority: 10 - access_log_format: '%a %l %u %t \"%r\" %>s %b \"%%{}{Referer}i\" \"%%{}{User-Agent}i\"' - -# mysql -mysql::server::manage_config_file: true - - -tripleo::haproxy::keystone_admin: true -tripleo::haproxy::keystone_public: true -tripleo::haproxy::neutron: true -tripleo::haproxy::cinder: true -tripleo::haproxy::glance_api: true -tripleo::haproxy::glance_registry: true -tripleo::haproxy::nova_osapi: true -tripleo::haproxy::nova_metadata: true -tripleo::haproxy::nova_novncproxy: true -tripleo::haproxy::mysql: true -tripleo::haproxy::redis: true -tripleo::haproxy::sahara: true -tripleo::haproxy::swift_proxy_server: true -tripleo::haproxy::ceilometer: true -tripleo::haproxy::aodh: true -tripleo::haproxy::gnocchi: true -tripleo::haproxy::heat_api: true -tripleo::haproxy::heat_cloudwatch: true -tripleo::haproxy::heat_cfn: true -tripleo::haproxy::horizon: true - -controller_classes: [] -# firewall +# TODO(emilien) move it to composable roles later +# Already WIP with https://review.openstack.org/330785 +# and https://review.openstack.org/338527 tripleo::firewall::firewall_rules: - '101 mongodb_config': - port: 27019 - '102 mongodb_sharding': - port: 27018 - '103 mongod': - port: 27017 - '104 mysql galera': - port: - - 873 - - 3306 - - 4444 - - 4567 - - 4568 - - 9200 - '105 ntp': - port: 123 - proto: udp - '106 vrrp': - proto: vrrp - '107 haproxy stats': - port: 1993 - '108 redis': - port: - - 6379 - - 26379 - '109 rabbitmq': - port: - - 5672 - - 35672 - '110 ceph': - port: - - 6789 - - '6800-6810' - '111 keystone': - port: - - 5000 - - 13000 - - 35357 - - 13357 - '112 glance': - port: - - 9292 - - 9191 - - 13292 - '113 nova': - port: - - 6080 - - 13080 - - 8773 - - 3773 - - 8774 - - 13774 - - 8775 - '114 neutron server': - port: - - 9696 - - 13696 - '115 neutron dhcp input': - proto: 'udp' - port: 67 - '116 neutron dhcp output': - proto: 'udp' - chain: 'OUTPUT' - port: 68 - '118 neutron vxlan networks': - proto: 'udp' - port: 4789 - '119 cinder': - port: - - 8776 - - 13776 - '120 iscsi initiator': - port: 3260 - '121 memcached': - port: 11211 - '122 swift proxy': - port: - - 8080 - - 13808 - '123 swift storage': - port: - - 873 - - 6000 - - 6001 - - 6002 - '124 ceilometer': - port: - - 8777 - - 13777 - '125 heat': - port: - - 8000 - - 13800 - - 8003 - - 13003 - - 8004 - - 13004 - '126 horizon': - port: - - 80 - - 443 - '127 snmp': - port: 161 - proto: 'udp' '128 aodh': - port: + dport: - 8042 - 13042 - '129 gnocchi-api': - port: - - 8041 - - 13041 + +controller_classes: [] diff --git a/puppet/hieradata/database.yaml b/puppet/hieradata/database.yaml index 4eb199c8..d93817e7 100644 --- a/puppet/hieradata/database.yaml +++ b/puppet/hieradata/database.yaml @@ -1,77 +1,8 @@ -# Nova -nova::db::mysql::user: nova -nova::db::mysql::host: "%{hiera('mysql_virtual_ip')}" -nova::db::mysql::dbname: nova -nova::db::mysql::allowed_hosts: - - '%' - - "%{hiera('mysql_bind_host')}" - -nova::db::mysql_api::user: nova_api -nova::db::mysql_api::host: "%{hiera('mysql_virtual_ip')}" -nova::db::mysql_api::dbname: nova_api -nova::db::mysql_api::allowed_hosts: - - '%' - - "%{hiera('mysql_bind_host')}" - -# Glance -glance::db::mysql::user: glance -glance::db::mysql::host: "%{hiera('mysql_virtual_ip')}" -glance::db::mysql::dbname: glance -glance::db::mysql::allowed_hosts: - - '%' - - "%{hiera('mysql_bind_host')}" - -# Keystone -keystone::db::mysql::user: keystone -keystone::db::mysql::host: "%{hiera('mysql_virtual_ip')}" -keystone::db::mysql::dbname: keystone -keystone::db::mysql::allowed_hosts: - - '%' - - "%{hiera('mysql_bind_host')}" - -# Neutron -neutron::db::mysql::user: neutron -neutron::db::mysql::host: "%{hiera('mysql_virtual_ip')}" -neutron::db::mysql::dbname: ovs_neutron -neutron::db::mysql::allowed_hosts: - - '%' - - "%{hiera('mysql_bind_host')}" - -# Cinder -cinder::db::mysql::user: cinder -cinder::db::mysql::host: "%{hiera('mysql_virtual_ip')}" -cinder::db::mysql::dbname: cinder -cinder::db::mysql::allowed_hosts: - - '%' - - "%{hiera('mysql_bind_host')}" - -# Heat -heat::db::mysql::user: heat -heat::db::mysql::host: "%{hiera('mysql_virtual_ip')}" -heat::db::mysql::dbname: heat -heat::db::mysql::allowed_hosts: - - '%' - - "%{hiera('mysql_bind_host')}" - -# Ceilometer -ceilometer::db::mysql::user: ceilometer -ceilometer::db::mysql::host: "%{hiera('mysql_virtual_ip')}" -ceilometer::db::mysql::dbname: ceilometer -ceilometer::db::mysql::allowed_hosts: - - '%' - - "%{hiera('mysql_bind_host')}" - -# Gnocchi -gnocchi::db::mysql::user: gnocchi -gnocchi::db::mysql::host: "%{hiera('mysql_virtual_ip')}" -gnocchi::db::mysql::dbname: gnocchi -gnocchi::db::mysql::allowed_hosts: - - '%' - - "%{hiera('mysql_bind_host')}" - -sahara::db::mysql::user: sahara -sahara::db::mysql::host: "%{hiera('mysql_virtual_ip')}" -sahara::db::mysql::dbname: sahara -sahara::db::mysql::allowed_hosts: +# Aodh +# TODO(emilien) move it to composable aodh roles later +aodh::db::mysql::user: aodh +aodh::db::mysql::host: "%{hiera('mysql_virtual_ip')}" +aodh::db::mysql::dbname: aodh +aodh::db::mysql::allowed_hosts: - '%' - "%{hiera('mysql_bind_host')}" diff --git a/puppet/hieradata/object.yaml b/puppet/hieradata/object.yaml index d4a0e81d..da526e39 100644 --- a/puppet/hieradata/object.yaml +++ b/puppet/hieradata/object.yaml @@ -1,21 +1,2 @@ # Hiera data for swift storage nodes -swift::storage::all::incoming_chmod: 'Du=rwx,g=rx,o=rx,Fu=rw,g=r,o=r' -swift::storage::all::outgoing_chmod: 'Du=rwx,g=rx,o=rx,Fu=rw,g=r,o=r' - -swift::storage::all::object_pipeline: - - healthcheck - - recon - - object-server -swift::storage::all::container_pipeline: - - healthcheck - - container-server -swift::storage::all::account_pipeline: - - healthcheck - - account-server - -swift::proxy::keystone::operator_roles: - - admin - - swiftoperator - - ResellerAdmin - object_classes: [] diff --git a/puppet/hieradata/volume.yaml b/puppet/hieradata/volume.yaml index 8640c0a7..dd0582fa 100644 --- a/puppet/hieradata/volume.yaml +++ b/puppet/hieradata/volume.yaml @@ -1,14 +1,3 @@ # Hiera data here applies to all volume storage nodes -# cinder -cinder::scheduler::scheduler_driver: cinder.scheduler.filter_scheduler.FilterScheduler - -cinder::config::cinder_config: - DEFAULT/nova_catalog_info: - value: 'compute:Compute Service:internalURL' - DEFAULT/swift_catalog_info: - value: 'object-store:swift:internalURL' - -cinder_user_enabled_backends: [] - -volume_classes: []
\ No newline at end of file +volume_classes: [] diff --git a/puppet/manifests/overcloud_cephstorage.pp b/puppet/manifests/overcloud_cephstorage.pp index 4add2f02..152694d9 100644 --- a/puppet/manifests/overcloud_cephstorage.pp +++ b/puppet/manifests/overcloud_cephstorage.pp @@ -16,46 +16,9 @@ include ::tripleo::packages include ::tripleo::firewall -if hiera('step') >= 1 { - - create_resources(kmod::load, hiera('kernel_modules'), {}) - create_resources(sysctl::value, hiera('sysctl_settings'), {}) - Exec <| tag == 'kmod::load' |> -> Sysctl <| |> - - include ::timezone - - if count(hiera('ntp::servers')) > 0 { - include ::ntp - } -} - -if hiera('step') >= 3 { - if str2bool(hiera('ceph_osd_selinux_permissive', true)) { - exec { 'set selinux to permissive on boot': - command => "sed -ie 's/^SELINUX=.*/SELINUX=permissive/' /etc/selinux/config", - onlyif => "test -f /etc/selinux/config && ! grep '^SELINUX=permissive' /etc/selinux/config", - path => ['/usr/bin', '/usr/sbin'], - } - - exec { 'set selinux to permissive': - command => 'setenforce 0', - onlyif => "which setenforce && getenforce | grep -i 'enforcing'", - path => ['/usr/bin', '/usr/sbin'], - } -> Class['ceph::profile::osd'] - } - - if str2bool(hiera('ceph_ipv6', false)) { - $mon_host = hiera('ceph_mon_host_v6') - } else { - $mon_host = hiera('ceph_mon_host') - } - class { '::ceph::profile::params': - mon_host => $mon_host, - } - include ::ceph::conf - include ::ceph::profile::client - include ::ceph::profile::osd - +if hiera('step') >= 4 { hiera_include('ceph_classes') - package_manifest{'/var/lib/tripleo/installed-packages/overcloud_ceph': ensure => present} } + +$package_manifest_name = join(['/var/lib/tripleo/installed-packages/overcloud_ceph', hiera('step')]) +package_manifest{$package_manifest_name: ensure => present} diff --git a/puppet/manifests/overcloud_compute.pp b/puppet/manifests/overcloud_compute.pp index 30672f20..ab7f846f 100644 --- a/puppet/manifests/overcloud_compute.pp +++ b/puppet/manifests/overcloud_compute.pp @@ -16,199 +16,9 @@ include ::tripleo::packages include ::tripleo::firewall -create_resources(kmod::load, hiera('kernel_modules'), { }) -create_resources(sysctl::value, hiera('sysctl_settings'), { }) -Exec <| tag == 'kmod::load' |> -> Sysctl <| |> - -if count(hiera('ntp::servers')) > 0 { - include ::ntp -} - -include ::timezone - if hiera('step') >= 4 { - - file { ['/etc/libvirt/qemu/networks/autostart/default.xml', - '/etc/libvirt/qemu/networks/default.xml']: - ensure => absent, - before => Service['libvirt'], - } - # in case libvirt has been already running before the Puppet run, make - # sure the default network is destroyed - exec { 'libvirt-default-net-destroy': - command => '/usr/bin/virsh net-destroy default', - onlyif => '/usr/bin/virsh net-info default | /bin/grep -i "^active:\s*yes"', - before => Service['libvirt'], - } - - # When utilising images for deployment, we need to reset the iSCSI initiator name to make it unique - exec { 'reset-iscsi-initiator-name': - command => '/bin/echo InitiatorName=$(/usr/sbin/iscsi-iname) > /etc/iscsi/initiatorname.iscsi', - onlyif => '/usr/bin/test ! -f /etc/iscsi/.initiator_reset', - }-> - - file { '/etc/iscsi/.initiator_reset': - ensure => present, - } - - $rbd_ephemeral_storage = hiera('nova::compute::rbd::ephemeral_storage', false) - $rbd_persistent_storage = hiera('rbd_persistent_storage', false) - if $rbd_ephemeral_storage or $rbd_persistent_storage { - if str2bool(hiera('ceph_ipv6', false)) { - $mon_host = hiera('ceph_mon_host_v6') - } else { - $mon_host = hiera('ceph_mon_host') - } - class { '::ceph::profile::params': - mon_host => $mon_host, - } - include ::ceph::conf - include ::ceph::profile::client - - $client_keys = hiera('ceph::profile::params::client_keys') - $client_user = join(['client.', hiera('tripleo::profile::base::cinder::volume::rbd::cinder_rbd_user_name')]) - class { '::nova::compute::rbd': - libvirt_rbd_secret_key => $client_keys[$client_user]['secret'], - } - } - - if hiera('cinder_enable_nfs_backend', false) { - if str2bool($::selinux) { - selboolean { 'virt_use_nfs': - value => on, - persistent => true, - } -> Package['nfs-utils'] - } - - package { 'nfs-utils': } -> Service['nova-compute'] - } - - if str2bool(hiera('nova::use_ipv6', false)) { - $vncserver_listen = '::0' - } else { - $vncserver_listen = '0.0.0.0' - } - - if $rbd_ephemeral_storage { - class { '::nova::compute::libvirt': - libvirt_disk_cachemodes => ['network=writeback'], - libvirt_hw_disk_discard => 'unmap', - vncserver_listen => $vncserver_listen, - } - } else { - class { '::nova::compute::libvirt' : - vncserver_listen => $vncserver_listen, - } - } - - nova_config { - 'DEFAULT/my_ip': value => $ipaddress; - 'DEFAULT/linuxnet_interface_driver': value => 'nova.network.linux_net.LinuxOVSInterfaceDriver'; - } - - if hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' { - file { '/etc/libvirt/qemu.conf': - ensure => present, - content => hiera('midonet_libvirt_qemu_data') - } - } - include ::nova::network::neutron - include ::neutron - include ::neutron::config - - # If the value of core plugin is set to 'nuage', - # include nuage agent, - # If the value of core plugin is set to 'midonet', - # include midonet agent, - # else use the default value of 'ml2' - if hiera('neutron::core_plugin') == 'neutron.plugins.nuage.plugin.NuagePlugin' { - include ::nuage::vrs - include ::nova::compute::neutron - - class { '::nuage::metadataagent': - nova_os_tenant_name => hiera('nova::api::admin_tenant_name'), - nova_os_password => hiera('nova_password'), - nova_metadata_ip => hiera('nova_metadata_node_ips'), - nova_auth_ip => hiera('keystone_public_api_virtual_ip'), - } - } - elsif hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' { - - # TODO(devvesa) provide non-controller ips for these services - $zookeeper_node_ips = hiera('neutron_api_node_ips') - $cassandra_node_ips = hiera('neutron_api_node_ips') - - class { '::tripleo::network::midonet::agent': - zookeeper_servers => $zookeeper_node_ips, - cassandra_seeds => $cassandra_node_ips - } - } - elsif hiera('neutron::core_plugin') == 'neutron_plugin_contrail.plugins.opencontrail.contrail_plugin.NeutronPluginContrailCoreV2' { - - include ::contrail::vrouter - # NOTE: it's not possible to use this class without a functional - # contrail controller up and running - #class {'::contrail::vrouter::provision_vrouter': - # require => Class['contrail::vrouter'], - #} - } - elsif hiera('neutron::core_plugin') == 'networking_plumgrid.neutron.plugins.plugin.NeutronPluginPLUMgridV2' { - # forward all ipv4 traffic - # this is required for the vms to pass through the gateways public interface - sysctl::value { 'net.ipv4.ip_forward': value => '1' } - - # ifc_ctl_pp needs to be invoked by root as part of the vif.py when a VM is powered on - file { '/etc/sudoers.d/ifc_ctl_sudoers': - ensure => file, - owner => root, - group => root, - mode => '0440', - content => "nova ALL=(root) NOPASSWD: /opt/pg/bin/ifc_ctl_pp *\n", - } - } - else { - - # NOTE: this code won't live in puppet-neutron until Neutron OVS agent - # can be gracefully restarted. See https://review.openstack.org/#/c/297211 - # In the meantime, it's safe to restart the agent on each change in neutron.conf, - # because Puppet changes are supposed to be done during bootstrap and upgrades. - # Some resource managed by Neutron_config (like messaging and logging options) require - # a restart of OVS agent. This code does it. - # In Newton, OVS agent will be able to be restarted gracefully so we'll drop the code - # from here and fix it in puppet-neutron. - Neutron_config<||> ~> Service['neutron-ovs-agent-service'] - - include ::neutron::plugins::ml2 - include ::neutron::agents::ml2::ovs - - if 'cisco_n1kv' in hiera('neutron::plugins::ml2::mechanism_drivers') { - class { '::neutron::agents::n1kv_vem': - n1kv_source => hiera('n1kv_vem_source', undef), - n1kv_version => hiera('n1kv_vem_version', undef), - } - } - - if 'bsn_ml2' in hiera('neutron::plugins::ml2::mechanism_drivers') { - include ::neutron::agents::bigswitch - } - } - - include ::ceilometer - include ::ceilometer::config - include ::ceilometer::agent::compute - include ::ceilometer::agent::auth - - $snmpd_user = hiera('snmpd_readonly_user_name') - snmp::snmpv3_user { $snmpd_user: - authtype => 'MD5', - authpass => hiera('snmpd_readonly_user_password'), - } - class { '::snmp': - agentaddress => ['udp:161','udp6:[::1]:161'], - snmpd_config => [ join(['createUser ', hiera('snmpd_readonly_user_name'), ' MD5 "', hiera('snmpd_readonly_user_password'), '"']), join(['rouser ', hiera('snmpd_readonly_user_name')]), 'proc cron', 'includeAllDisks 10%', 'master agentx', 'trapsink localhost public', 'iquerySecName internalUser', 'rouser internalUser', 'defaultMonitors yes', 'linkUpDownNotifications yes' ], - } - hiera_include('compute_classes') - package_manifest{ '/var/lib/tripleo/installed-packages/overcloud_compute': ensure => present } - } + +$package_manifest_name = join(['/var/lib/tripleo/installed-packages/overcloud_compute', hiera('step')]) +package_manifest{$package_manifest_name: ensure => present} diff --git a/puppet/manifests/overcloud_controller.pp b/puppet/manifests/overcloud_controller.pp index 6084c954..3778271c 100644 --- a/puppet/manifests/overcloud_controller.pp +++ b/puppet/manifests/overcloud_controller.pp @@ -16,125 +16,10 @@ include ::tripleo::packages include ::tripleo::firewall -$enable_load_balancer = hiera('enable_load_balancer', true) - -if hiera('step') >= 1 { - - create_resources(kmod::load, hiera('kernel_modules'), {}) - create_resources(sysctl::value, hiera('sysctl_settings'), {}) - Exec <| tag == 'kmod::load' |> -> Sysctl <| |> - -} - if hiera('step') >= 2 { - - if count(hiera('ntp::servers')) > 0 { - include ::ntp - } - - include ::timezone - - # MongoDB - if downcase(hiera('ceilometer_backend')) == 'mongodb' { - # NOTE(gfidente): We need to pass the list of IPv6 addresses *with* port and - # without the brackets as 'members' argument for the 'mongodb_replset' - # resource. - if str2bool(hiera('mongodb::server::ipv6', false)) { - $mongo_node_ips_with_port_prefixed = prefix(hiera('mongo_node_ips'), '[') - $mongo_node_ips_with_port = suffix($mongo_node_ips_with_port_prefixed, ']:27017') - $mongo_node_ips_with_port_nobr = suffix(hiera('mongo_node_ips'), ':27017') - } else { - $mongo_node_ips_with_port = suffix(hiera('mongo_node_ips'), ':27017') - $mongo_node_ips_with_port_nobr = suffix(hiera('mongo_node_ips'), ':27017') - } - $mongo_node_string = join($mongo_node_ips_with_port, ',') - - $mongodb_replset = hiera('mongodb::server::replset') - $ceilometer_mongodb_conn_string = "mongodb://${mongo_node_string}/ceilometer?replicaSet=${mongodb_replset}" - } - - if str2bool(hiera('enable_galera', true)) { - $mysql_config_file = '/etc/my.cnf.d/galera.cnf' - } else { - $mysql_config_file = '/etc/my.cnf.d/server.cnf' - } - # TODO Galara - # FIXME: due to https://bugzilla.redhat.com/show_bug.cgi?id=1298671 we - # set bind-address to a hostname instead of an ip address; to move Mysql - # from internal_api on another network we'll have to customize both - # MysqlNetwork and ControllerHostnameResolveNetwork in ServiceNetMap - class { '::mysql::server': - config_file => $mysql_config_file, - override_options => { - 'mysqld' => { - 'bind-address' => $::hostname, - 'max_connections' => hiera('mysql_max_connections'), - 'open_files_limit' => '-1', - }, - }, - remove_default_accounts => true, - } - # FIXME: this should only occur on the bootstrap host (ditto for db syncs) # Create all the database schemas - include ::nova::db::mysql - include ::nova::db::mysql_api - if downcase(hiera('gnocchi_indexer_backend')) == 'mysql' { - include ::gnocchi::db::mysql - } - if downcase(hiera('ceilometer_backend')) == 'mysql' { - include ::ceilometer::db::mysql - include ::aodh::db::mysql - } - - $enable_ceph = hiera('ceph_storage_count', 0) > 0 or hiera('enable_ceph_storage', false) - - if $enable_ceph { - $mon_initial_members = downcase(hiera('ceph_mon_initial_members')) - if str2bool(hiera('ceph_ipv6', false)) { - $mon_host = hiera('ceph_mon_host_v6') - } else { - $mon_host = hiera('ceph_mon_host') - } - class { '::ceph::profile::params': - mon_initial_members => $mon_initial_members, - mon_host => $mon_host, - } - include ::ceph::conf - include ::ceph::profile::mon - } - - if str2bool(hiera('enable_ceph_storage', false)) { - if str2bool(hiera('ceph_osd_selinux_permissive', true)) { - exec { 'set selinux to permissive on boot': - command => "sed -ie 's/^SELINUX=.*/SELINUX=permissive/' /etc/selinux/config", - onlyif => "test -f /etc/selinux/config && ! grep '^SELINUX=permissive' /etc/selinux/config", - path => ['/usr/bin', '/usr/sbin'], - } - - exec { 'set selinux to permissive': - command => 'setenforce 0', - onlyif => "which setenforce && getenforce | grep -i 'enforcing'", - path => ['/usr/bin', '/usr/sbin'], - } -> Class['ceph::profile::osd'] - } - - include ::ceph::conf - include ::ceph::profile::osd - } - - if str2bool(hiera('enable_external_ceph', false)) { - if str2bool(hiera('ceph_ipv6', false)) { - $mon_host = hiera('ceph_mon_host_v6') - } else { - $mon_host = hiera('ceph_mon_host') - } - class { '::ceph::profile::params': - mon_host => $mon_host, - } - include ::ceph::conf - include ::ceph::profile::client - } + include ::aodh::db::mysql } #END STEP 2 @@ -151,140 +36,12 @@ if hiera('step') >= 4 { memcached_servers => $memcached_servers } include ::nova::config - include ::nova::network::neutron - - if hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' { - - # TODO(devvesa) provide non-controller ips for these services - $zookeeper_node_ips = hiera('neutron_api_node_ips') - $cassandra_node_ips = hiera('neutron_api_node_ips') - - # Run zookeeper in the controller if configured - if hiera('enable_zookeeper_on_controller') { - class {'::tripleo::cluster::zookeeper': - zookeeper_server_ips => $zookeeper_node_ips, - # TODO: create a 'bind' hiera key for zookeeper - zookeeper_client_ip => hiera('neutron::bind_host'), - zookeeper_hostnames => hiera('controller_node_names') - } - } - - # Run cassandra in the controller if configured - if hiera('enable_cassandra_on_controller') { - class {'::tripleo::cluster::cassandra': - cassandra_servers => $cassandra_node_ips, - # TODO: create a 'bind' hiera key for cassandra - cassandra_ip => hiera('neutron::bind_host'), - } - } - - class {'::tripleo::network::midonet::agent': - zookeeper_servers => $zookeeper_node_ips, - cassandra_seeds => $cassandra_node_ips - } - - class {'::tripleo::network::midonet::api': - zookeeper_servers => $zookeeper_node_ips, - vip => hiera('public_virtual_ip'), - keystone_ip => hiera('public_virtual_ip'), - keystone_admin_token => hiera('keystone::admin_token'), - # TODO: create a 'bind' hiera key for api - bind_address => hiera('neutron::bind_host'), - admin_password => hiera('admin_password') - } - - # TODO: find a way to get an empty list from hiera - # TODO: when doing the composable midonet plugin, don't forget to - # set service_plugins to an empty array in Hiera. - class {'::neutron': - service_plugins => [] - } - - } - - # If the value of core plugin is set to 'nuage' or'opencontrail' - # include nuage or opencontrail core plugins - # else use the default value of 'ml2' - if hiera('neutron::core_plugin') == 'neutron.plugins.nuage.plugin.NuagePlugin' { - include ::neutron::plugins::nuage - } elsif hiera('neutron::core_plugin') == 'neutron_plugin_contrail.plugins.opencontrail.contrail_plugin.NeutronPluginContrailCoreV2' { - include ::neutron::plugins::opencontrail - } else { - - # If the value of core plugin is set to 'midonet', - # skip all the ML2 configuration - if hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' { - - class {'::neutron::plugins::midonet': - midonet_api_ip => hiera('public_virtual_ip'), - keystone_tenant => hiera('neutron::server::auth_tenant'), - keystone_password => hiera('neutron::server::password') - } - } - - Service['neutron-server'] -> Service['neutron-metadata'] - - } - - if $enable_ceph { - $ceph_pools = hiera('ceph_pools') - ceph::pool { $ceph_pools : - pg_num => hiera('ceph::profile::params::osd_pool_default_pg_num'), - pgp_num => hiera('ceph::profile::params::osd_pool_default_pgp_num'), - size => hiera('ceph::profile::params::osd_pool_default_size'), - } - } - - # swift storage - if str2bool(hiera('enable_swift_storage', true)) { - class { '::swift::storage::all': - mount_check => str2bool(hiera('swift_mount_check')), - } - if(!defined(File['/srv/node'])) { - file { '/srv/node': - ensure => directory, - owner => 'swift', - group => 'swift', - require => Package['openstack-swift'], - } - } - $swift_components = ['account', 'container', 'object'] - swift::storage::filter::recon { $swift_components : } - swift::storage::filter::healthcheck { $swift_components : } - } - - # Ceilometer - $ceilometer_backend = downcase(hiera('ceilometer_backend')) - case $ceilometer_backend { - /mysql/ : { - $ceilometer_database_connection = hiera('ceilometer_mysql_conn_string') - } - default : { - $ceilometer_database_connection = $ceilometer_mongodb_conn_string - } - } - include ::ceilometer - include ::ceilometer::config - include ::ceilometer::api - include ::ceilometer::agent::notification - include ::ceilometer::agent::central - include ::ceilometer::expirer - include ::ceilometer::collector - include ::ceilometer::agent::auth - include ::ceilometer::dispatcher::gnocchi - class { '::ceilometer::db' : - database_connection => $ceilometer_database_connection, - } - - Cron <| title == 'ceilometer-expirer' |> { command => "sleep $((\$(od -A n -t d -N 3 /dev/urandom) % 86400)) && ${::ceilometer::params::expirer_command}" } # Aodh class { '::aodh' : - database_connection => $ceilometer_database_connection, + database_connection => hiera('aodh_mysql_conn_string'), } include ::aodh::db::sync - # To manage the upgrade: - Exec['ceilometer-dbsync'] -> Exec['aodh-db-sync'] include ::aodh::auth include ::aodh::api include ::aodh::wsgi::apache @@ -293,68 +50,9 @@ if hiera('step') >= 4 { include ::aodh::listener include ::aodh::client - # Horizon - include ::apache::mod::remoteip - if 'cisco_n1kv' in hiera('neutron::plugins::ml2::mechanism_drivers') { - $_profile_support = 'cisco' - } else { - $_profile_support = 'None' - } - $neutron_options = {'profile_support' => $_profile_support } - - $memcached_ipv6 = hiera('memcached_ipv6', false) - if $memcached_ipv6 { - $horizon_memcached_servers = hiera('memcache_node_ips_v6', '[::1]') - } else { - $horizon_memcached_servers = hiera('memcache_node_ips', '127.0.0.1') - } - - class { '::horizon': - cache_server_ip => $horizon_memcached_servers, - neutron_options => $neutron_options, - } - - # Gnocchi - $gnocchi_database_connection = hiera('gnocchi_mysql_conn_string') - class { '::gnocchi': - database_connection => $gnocchi_database_connection, - } - include ::gnocchi::api - include ::gnocchi::wsgi::apache - include ::gnocchi::client - include ::gnocchi::db::sync - include ::gnocchi::storage - include ::gnocchi::metricd - include ::gnocchi::statsd - $gnocchi_backend = downcase(hiera('gnocchi_backend', 'swift')) - case $gnocchi_backend { - 'swift': { include ::gnocchi::storage::swift } - 'file': { include ::gnocchi::storage::file } - 'rbd': { include ::gnocchi::storage::ceph } - default: { fail('Unrecognized gnocchi_backend parameter.') } - } - - $snmpd_user = hiera('snmpd_readonly_user_name') - snmp::snmpv3_user { $snmpd_user: - authtype => 'MD5', - authpass => hiera('snmpd_readonly_user_password'), - } - class { '::snmp': - agentaddress => ['udp:161','udp6:[::1]:161'], - snmpd_config => [ join(['createUser ', hiera('snmpd_readonly_user_name'), ' MD5 "', hiera('snmpd_readonly_user_password'), '"']), join(['rouser ', hiera('snmpd_readonly_user_name')]), 'proc cron', 'includeAllDisks 10%', 'master agentx', 'trapsink localhost public', 'iquerySecName internalUser', 'rouser internalUser', 'defaultMonitors yes', 'linkUpDownNotifications yes' ], - } - hiera_include('controller_classes') } #END STEP 4 -if hiera('step') >= 5 { - $nova_enable_db_purge = hiera('nova_enable_db_purge', true) - - if $nova_enable_db_purge { - include ::nova::cron::archive_deleted_rows - } -} #END STEP 5 - $package_manifest_name = join(['/var/lib/tripleo/installed-packages/overcloud_controller', hiera('step')]) package_manifest{$package_manifest_name: ensure => present} diff --git a/puppet/manifests/overcloud_controller_pacemaker.pp b/puppet/manifests/overcloud_controller_pacemaker.pp index 7710217c..ca24c443 100644 --- a/puppet/manifests/overcloud_controller_pacemaker.pp +++ b/puppet/manifests/overcloud_controller_pacemaker.pp @@ -13,18 +13,9 @@ # License for the specific language governing permissions and limitations # under the License. -Pcmk_resource <| |> { - tries => 10, - try_sleep => 3, -} - # TODO(jistr): use pcs resource provider instead of just no-ops Service <| - tag == 'aodh-service' or - tag == 'ceilometer-service' or - tag == 'gnocchi-service' or - tag == 'neutron-service' or - tag == 'nova-service' + tag == 'aodh-service' |> { hasrestart => true, restart => '/bin/true', @@ -43,300 +34,15 @@ if $::hostname == downcase(hiera('bootstrap_nodeid')) { $sync_db = false } -$enable_fencing = str2bool(hiera('enable_fencing', false)) and hiera('step') >= 5 -$enable_load_balancer = hiera('enable_load_balancer', true) - -# When to start and enable services which haven't been Pacemakerized -# FIXME: remove when we start all OpenStack services using Pacemaker -# (occurrences of this variable will be gradually replaced with false) -$non_pcmk_start = hiera('step') >= 5 - -if hiera('step') >= 1 { - - create_resources(kmod::load, hiera('kernel_modules'), {}) - create_resources(sysctl::value, hiera('sysctl_settings'), {}) - Exec <| tag == 'kmod::load' |> -> Sysctl <| |> - - include ::timezone - - if count(hiera('ntp::servers')) > 0 { - include ::ntp - } - - $pacemaker_cluster_members = downcase(regsubst(hiera('controller_node_names'), ',', ' ', 'G')) - $corosync_ipv6 = str2bool(hiera('corosync_ipv6', false)) - if $corosync_ipv6 { - $cluster_setup_extras = { '--token' => hiera('corosync_token_timeout', 1000), '--ipv6' => '' } - } else { - $cluster_setup_extras = { '--token' => hiera('corosync_token_timeout', 1000) } - } - class { '::pacemaker': - hacluster_pwd => hiera('hacluster_pwd'), - } -> - class { '::pacemaker::corosync': - cluster_members => $pacemaker_cluster_members, - setup_cluster => $pacemaker_master, - cluster_setup_extras => $cluster_setup_extras, - } - class { '::pacemaker::stonith': - disable => !$enable_fencing, - } - if $enable_fencing { - include ::tripleo::fencing - - # enable stonith after all Pacemaker resources have been created - Pcmk_resource<||> -> Class['tripleo::fencing'] - Pcmk_constraint<||> -> Class['tripleo::fencing'] - Exec <| tag == 'pacemaker_constraint' |> -> Class['tripleo::fencing'] - # enable stonith after all fencing devices have been created - Class['tripleo::fencing'] -> Class['pacemaker::stonith'] - } - - # FIXME(gfidente): sets 200secs as default start timeout op - # param; until we can use pcmk global defaults we'll still - # need to add it to every resource which redefines op params - Pacemaker::Resource::Service { - op_params => 'start timeout=200s stop timeout=200s', - } - - if downcase(hiera('ceilometer_backend')) == 'mongodb' { - include ::mongodb::params - } - - # Galera - if str2bool(hiera('enable_galera', true)) { - $mysql_config_file = '/etc/my.cnf.d/galera.cnf' - } else { - $mysql_config_file = '/etc/my.cnf.d/server.cnf' - } - $galera_nodes = downcase(hiera('galera_node_names', $::hostname)) - $galera_nodes_count = count(split($galera_nodes, ',')) - - # FIXME: due to https://bugzilla.redhat.com/show_bug.cgi?id=1298671 we - # set bind-address to a hostname instead of an ip address; to move Mysql - # from internal_api on another network we'll have to customize both - # MysqlNetwork and ControllerHostnameResolveNetwork in ServiceNetMap - $mysql_bind_host = hiera('mysql_bind_host') - $mysqld_options = { - 'mysqld' => { - 'skip-name-resolve' => '1', - 'binlog_format' => 'ROW', - 'default-storage-engine' => 'innodb', - 'innodb_autoinc_lock_mode' => '2', - 'innodb_locks_unsafe_for_binlog'=> '1', - 'query_cache_size' => '0', - 'query_cache_type' => '0', - 'bind-address' => $::hostname, - 'max_connections' => hiera('mysql_max_connections'), - 'open_files_limit' => '-1', - 'wsrep_on' => 'ON', - 'wsrep_provider' => '/usr/lib64/galera/libgalera_smm.so', - 'wsrep_cluster_name' => 'galera_cluster', - 'wsrep_cluster_address' => "gcomm://${galera_nodes}", - 'wsrep_slave_threads' => '1', - 'wsrep_certify_nonPK' => '1', - 'wsrep_max_ws_rows' => '131072', - 'wsrep_max_ws_size' => '1073741824', - 'wsrep_debug' => '0', - 'wsrep_convert_LOCK_to_trx' => '0', - 'wsrep_retry_autocommit' => '1', - 'wsrep_auto_increment_control' => '1', - 'wsrep_drupal_282555_workaround'=> '0', - 'wsrep_causal_reads' => '0', - 'wsrep_sst_method' => 'rsync', - 'wsrep_provider_options' => "gmcast.listen_addr=tcp://[${mysql_bind_host}]:4567;", - }, - } - - class { '::mysql::server': - create_root_user => false, - create_root_my_cnf => false, - config_file => $mysql_config_file, - override_options => $mysqld_options, - remove_default_accounts => $pacemaker_master, - service_manage => false, - service_enabled => false, - } - -} - if hiera('step') >= 2 { - - - # NOTE(gfidente): the following vars are needed on all nodes so they - # need to stay out of pacemaker_master conditional. - # The addresses mangling will hopefully go away when we'll be able to - # configure the connection string via hostnames, until then, we need to pass - # the list of IPv6 addresses *with* port and without the brackets as 'members' - # argument for the 'mongodb_replset' resource. - if str2bool(hiera('mongodb::server::ipv6', false)) { - $mongo_node_ips_with_port_prefixed = prefix(hiera('mongo_node_ips'), '[') - $mongo_node_ips_with_port = suffix($mongo_node_ips_with_port_prefixed, ']:27017') - $mongo_node_ips_with_port_nobr = suffix(hiera('mongo_node_ips'), ':27017') - } else { - $mongo_node_ips_with_port = suffix(hiera('mongo_node_ips'), ':27017') - $mongo_node_ips_with_port_nobr = suffix(hiera('mongo_node_ips'), ':27017') - } - $mongodb_replset = hiera('mongodb::server::replset') - - if $pacemaker_master { - - include ::pacemaker::resource_defaults - - # Create an openstack-core dummy resource. See RHBZ 1290121 - pacemaker::resource::ocf { 'openstack-core': - ocf_agent_name => 'heartbeat:Dummy', - clone_params => true, - } - - pacemaker::resource::ocf { 'galera' : - ocf_agent_name => 'heartbeat:galera', - op_params => 'promote timeout=300s on-fail=block', - master_params => '', - meta_params => "master-max=${galera_nodes_count} ordered=true", - resource_params => "additional_parameters='--open-files-limit=16384' enable_creation=true wsrep_cluster_address='gcomm://${galera_nodes}'", - require => Class['::mysql::server'], - before => Exec['galera-ready'], - } - } - $mysql_root_password = hiera('mysql::server::root_password') - $mysql_clustercheck_password = hiera('mysql_clustercheck_password') - # This step is to create a sysconfig clustercheck file with the root user and empty password - # on the first install only (because later on the clustercheck db user will be used) - # We are using exec and not file in order to not have duplicate definition errors in puppet - # when we later set the the file to contain the clustercheck data - exec { 'create-root-sysconfig-clustercheck': - command => "/bin/echo 'MYSQL_USERNAME=root\nMYSQL_PASSWORD=\'\'\nMYSQL_HOST=localhost\n' > /etc/sysconfig/clustercheck", - unless => '/bin/test -e /etc/sysconfig/clustercheck && grep -q clustercheck /etc/sysconfig/clustercheck', - } - - exec { 'galera-ready' : - command => '/usr/bin/clustercheck >/dev/null', - timeout => 30, - tries => 180, - try_sleep => 10, - environment => ['AVAILABLE_WHEN_READONLY=0'], - require => Exec['create-root-sysconfig-clustercheck'], - } - - xinetd::service { 'galera-monitor' : - port => '9200', - server => '/usr/bin/clustercheck', - per_source => 'UNLIMITED', - log_on_success => '', - log_on_failure => 'HOST', - flags => 'REUSE', - service_type => 'UNLISTED', - user => 'root', - group => 'root', - require => Exec['create-root-sysconfig-clustercheck'], - } - # We add a clustercheck db user and we will switch /etc/sysconfig/clustercheck - # to it in a later step. We do this only on one node as it will replicate on - # the other members. We also make sure that the permissions are the minimum necessary if $pacemaker_master { - mysql_user { 'clustercheck@localhost': - ensure => 'present', - password_hash => mysql_password($mysql_clustercheck_password), - require => Exec['galera-ready'], - } - mysql_grant { 'clustercheck@localhost/*.*': - ensure => 'present', - options => ['GRANT'], - privileges => ['PROCESS'], - table => '*.*', - user => 'clustercheck@localhost', - } - } - - # Create all the database schemas - if $sync_db { - class { '::nova::db::mysql': - require => Exec['galera-ready'], - } - class { '::nova::db::mysql_api': - require => Exec['galera-ready'], - } - - if downcase(hiera('ceilometer_backend')) == 'mysql' { - class { '::ceilometer::db::mysql': - require => Exec['galera-ready'], - } - } - - if downcase(hiera('gnocchi_indexer_backend')) == 'mysql' { - class { '::gnocchi::db::mysql': + class { '::aodh::db::mysql': require => Exec['galera-ready'], - } - } - } - - # Ceph - $enable_ceph = hiera('ceph_storage_count', 0) > 0 or hiera('enable_ceph_storage', false) - - if $enable_ceph { - $mon_initial_members = downcase(hiera('ceph_mon_initial_members')) - if str2bool(hiera('ceph_ipv6', false)) { - $mon_host = hiera('ceph_mon_host_v6') - } else { - $mon_host = hiera('ceph_mon_host') - } - class { '::ceph::profile::params': - mon_initial_members => $mon_initial_members, - mon_host => $mon_host, - } - include ::ceph::conf - include ::ceph::profile::mon - } - - if str2bool(hiera('enable_ceph_storage', false)) { - if str2bool(hiera('ceph_osd_selinux_permissive', true)) { - exec { 'set selinux to permissive on boot': - command => "sed -ie 's/^SELINUX=.*/SELINUX=permissive/' /etc/selinux/config", - onlyif => "test -f /etc/selinux/config && ! grep '^SELINUX=permissive' /etc/selinux/config", - path => ['/usr/bin', '/usr/sbin'], - } - - exec { 'set selinux to permissive': - command => 'setenforce 0', - onlyif => "which setenforce && getenforce | grep -i 'enforcing'", - path => ['/usr/bin', '/usr/sbin'], - } -> Class['ceph::profile::osd'] } - - include ::ceph::conf - include ::ceph::profile::osd } - - if str2bool(hiera('enable_external_ceph', false)) { - if str2bool(hiera('ceph_ipv6', false)) { - $mon_host = hiera('ceph_mon_host_v6') - } else { - $mon_host = hiera('ceph_mon_host') - } - class { '::ceph::profile::params': - mon_host => $mon_host, - } - include ::ceph::conf - include ::ceph::profile::client - } - - } #END STEP 2 if hiera('step') >= 4 or ( hiera('step') >= 3 and $sync_db ) { - # At this stage we are guaranteed that the clustercheck db user exists - # so we switch the resource agent to use it. - file { '/etc/sysconfig/clustercheck' : - ensure => file, - mode => '0600', - owner => 'root', - group => 'root', - content => "MYSQL_USERNAME=clustercheck\n -MYSQL_PASSWORD='${mysql_clustercheck_password}'\n -MYSQL_HOST=localhost\n", - } - $nova_ipv6 = hiera('nova::use_ipv6', false) if $nova_ipv6 { $memcached_servers = suffix(hiera('memcache_node_ips_v6'), ':11211') @@ -349,178 +55,10 @@ MYSQL_HOST=localhost\n", } include ::nova::config - include ::nova::network::neutron - - if hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' { - - # TODO(devvesa) provide non-controller ips for these services - $zookeeper_node_ips = hiera('neutron_api_node_ips') - $cassandra_node_ips = hiera('neutron_api_node_ips') - - # Run zookeeper in the controller if configured - if hiera('enable_zookeeper_on_controller') { - class {'::tripleo::cluster::zookeeper': - zookeeper_server_ips => $zookeeper_node_ips, - # TODO: create a 'bind' hiera key for zookeeper - zookeeper_client_ip => hiera('neutron::bind_host'), - zookeeper_hostnames => split(hiera('controller_node_names'), ',') - } - } - - # Run cassandra in the controller if configured - if hiera('enable_cassandra_on_controller') { - class {'::tripleo::cluster::cassandra': - cassandra_servers => $cassandra_node_ips, - # TODO: create a 'bind' hiera key for cassandra - cassandra_ip => hiera('neutron::bind_host'), - } - } - - class {'::tripleo::network::midonet::agent': - zookeeper_servers => $zookeeper_node_ips, - cassandra_seeds => $cassandra_node_ips - } - - class {'::tripleo::network::midonet::api': - zookeeper_servers => $zookeeper_node_ips, - vip => hiera('public_virtual_ip'), - keystone_ip => hiera('public_virtual_ip'), - keystone_admin_token => hiera('keystone::admin_token'), - # TODO: create a 'bind' hiera key for api - bind_address => hiera('neutron::bind_host'), - admin_password => hiera('admin_password') - } - - # Configure Neutron - # TODO: when doing the composable midonet plugin, don't forget to - # set service_plugins to an empty array in Hiera. - class {'::neutron': - service_plugins => [] - } - - } - - if hiera('neutron::core_plugin') == 'neutron.plugins.nuage.plugin.NuagePlugin' { - include ::neutron::plugins::nuage - } - if hiera('neutron::core_plugin') == 'neutron_plugin_contrail.plugins.opencontrail.contrail_plugin.NeutronPluginContrailCoreV2' { - include ::neutron::plugins::opencontrail - } - if hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' { - class {'::neutron::plugins::midonet': - midonet_api_ip => hiera('public_virtual_ip'), - keystone_tenant => hiera('neutron::server::auth_tenant'), - keystone_password => hiera('neutron::server::password') - } - } - - if $enable_ceph { - $ceph_pools = hiera('ceph_pools') - ceph::pool { $ceph_pools : - pg_num => hiera('ceph::profile::params::osd_pool_default_pg_num'), - pgp_num => hiera('ceph::profile::params::osd_pool_default_pgp_num'), - size => hiera('ceph::profile::params::osd_pool_default_size'), - } - } - - # swift storage - if str2bool(hiera('enable_swift_storage', true)) { - class {'::swift::storage::all': - mount_check => str2bool(hiera('swift_mount_check')), - } - class {'::swift::storage::account': - manage_service => $non_pcmk_start, - enabled => $non_pcmk_start, - } - class {'::swift::storage::container': - manage_service => $non_pcmk_start, - enabled => $non_pcmk_start, - } - class {'::swift::storage::object': - manage_service => $non_pcmk_start, - enabled => $non_pcmk_start, - } - if(!defined(File['/srv/node'])) { - file { '/srv/node': - ensure => directory, - owner => 'swift', - group => 'swift', - require => Package['openstack-swift'], - } - } - $swift_components = ['account', 'container', 'object'] - swift::storage::filter::recon { $swift_components : } - swift::storage::filter::healthcheck { $swift_components : } - } - - # Ceilometer - case downcase(hiera('ceilometer_backend')) { - /mysql/: { - $ceilometer_database_connection = hiera('ceilometer_mysql_conn_string') - } - default: { - $mongo_node_string = join($mongo_node_ips_with_port, ',') - $ceilometer_database_connection = "mongodb://${mongo_node_string}/ceilometer?replicaSet=${mongodb_replset}" - } - } - include ::ceilometer - include ::ceilometer::config - class { '::ceilometer::api' : - manage_service => false, - enabled => false, - } - class { '::ceilometer::agent::notification' : - manage_service => false, - enabled => false, - } - class { '::ceilometer::agent::central' : - manage_service => false, - enabled => false, - } - class { '::ceilometer::collector' : - manage_service => false, - enabled => false, - } - include ::ceilometer::expirer - class { '::ceilometer::db' : - database_connection => $ceilometer_database_connection, - sync_db => $sync_db, - } - include ::ceilometer::agent::auth - include ::ceilometer::dispatcher::gnocchi - - Cron <| title == 'ceilometer-expirer' |> { command => "sleep $((\$(od -A n -t d -N 3 /dev/urandom) % 86400)) && ${::ceilometer::params::expirer_command}" } - - # httpd/apache and horizon - # NOTE(gfidente): server-status can be consumed by the pacemaker resource agent - class { '::apache' : - service_enable => false, - # service_manage => false, # <-- not supported with horizon&apache mod_wsgi? - } - include ::apache::mod::remoteip - include ::apache::mod::status - if 'cisco_n1kv' in hiera('neutron::plugins::ml2::mechanism_drivers') { - $_profile_support = 'cisco' - } else { - $_profile_support = 'None' - } - $neutron_options = {'profile_support' => $_profile_support } - - $memcached_ipv6 = hiera('memcached_ipv6', false) - if $memcached_ipv6 { - $horizon_memcached_servers = hiera('memcache_node_ips_v6', '[::1]') - } else { - $horizon_memcached_servers = hiera('memcache_node_ips', '127.0.0.1') - } - - class { '::horizon': - cache_server_ip => $horizon_memcached_servers, - neutron_options => $neutron_options, - } # Aodh class { '::aodh' : - database_connection => $ceilometer_database_connection, + database_connection => hiera('aodh_mysql_conn_string'), } include ::aodh::config include ::aodh::auth @@ -544,84 +82,11 @@ MYSQL_HOST=localhost\n", enabled => false, } - # Gnocchi - $gnocchi_database_connection = hiera('gnocchi_mysql_conn_string') - include ::gnocchi::client - if $sync_db { - include ::gnocchi::db::sync - } - include ::gnocchi::storage - $gnocchi_backend = downcase(hiera('gnocchi_backend', 'swift')) - case $gnocchi_backend { - 'swift': { include ::gnocchi::storage::swift } - 'file': { include ::gnocchi::storage::file } - 'rbd': { include ::gnocchi::storage::ceph } - default: { fail('Unrecognized gnocchi_backend parameter.') } - } - class { '::gnocchi': - database_connection => $gnocchi_database_connection, - } - class { '::gnocchi::api' : - manage_service => false, - enabled => false, - service_name => 'httpd', - } - class { '::gnocchi::wsgi::apache' : - ssl => false, - } - class { '::gnocchi::metricd' : - manage_service => false, - enabled => false, - } - class { '::gnocchi::statsd' : - manage_service => false, - enabled => false, - } - - $snmpd_user = hiera('snmpd_readonly_user_name') - snmp::snmpv3_user { $snmpd_user: - authtype => 'MD5', - authpass => hiera('snmpd_readonly_user_password'), - } - class { '::snmp': - agentaddress => ['udp:161','udp6:[::1]:161'], - snmpd_config => [ join(['createUser ', hiera('snmpd_readonly_user_name'), ' MD5 "', hiera('snmpd_readonly_user_password'), '"']), join(['rouser ', hiera('snmpd_readonly_user_name')]), 'proc cron', 'includeAllDisks 10%', 'master agentx', 'trapsink localhost public', 'iquerySecName internalUser', 'rouser internalUser', 'defaultMonitors yes', 'linkUpDownNotifications yes' ], - } - hiera_include('controller_classes') } #END STEP 4 if hiera('step') >= 5 { - # We now make sure that the root db password is set to a random one - # At first installation /root/.my.cnf will be empty and we connect without a root - # password. On second runs or updates /root/.my.cnf will already be populated - # with proper credentials. This step happens on every node because this sql - # statement does not automatically replicate across nodes. - exec { 'galera-set-root-password': - command => "/bin/touch /root/.my.cnf && /bin/echo \"UPDATE mysql.user SET Password = PASSWORD('${mysql_root_password}') WHERE user = 'root'; flush privileges;\" | /bin/mysql --defaults-extra-file=/root/.my.cnf -u root", - } - file { '/root/.my.cnf' : - ensure => file, - mode => '0600', - owner => 'root', - group => 'root', - content => "[client] -user=root -password=\"${mysql_root_password}\" - -[mysql] -user=root -password=\"${mysql_root_password}\"", - require => Exec['galera-set-root-password'], - } - - $nova_enable_db_purge = hiera('nova_enable_db_purge', true) - - if $nova_enable_db_purge { - include ::nova::cron::archive_deleted_rows - } - if $pacemaker_master { pacemaker::constraint::base { 'openstack-core-then-httpd-constraint': @@ -633,177 +98,13 @@ password=\"${mysql_root_password}\"", require => [Pacemaker::Resource::Service[$::apache::params::service_name], Pacemaker::Resource::Ocf['openstack-core']], } - pacemaker::constraint::base { 'galera-then-openstack-core-constraint': - constraint_type => 'order', - first_resource => 'galera-master', - second_resource => 'openstack-core-clone', - first_action => 'promote', - second_action => 'start', - require => [Pacemaker::Resource::Ocf['galera'], - Pacemaker::Resource::Ocf['openstack-core']], - } - - if hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' { - pacemaker::resource::service {'tomcat': - clone_params => 'interleave=true', - } - } - if hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' { - #midonet-chain chain keystone-->neutron-server-->dhcp-->metadata->tomcat - pacemaker::constraint::base { 'neutron-server-to-dhcp-agent-constraint': - constraint_type => 'order', - first_resource => "${::neutron::params::server_service}-clone", - second_resource => "${::neutron::params::dhcp_agent_service}-clone", - first_action => 'start', - second_action => 'start', - require => [Pacemaker::Resource::Service[$::neutron::params::server_service], - Pacemaker::Resource::Service[$::neutron::params::dhcp_agent_service]], - } - pacemaker::constraint::base { 'neutron-dhcp-agent-to-metadata-agent-constraint': - constraint_type => 'order', - first_resource => "${::neutron::params::dhcp_agent_service}-clone", - second_resource => "${::neutron::params::metadata_agent_service}-clone", - first_action => 'start', - second_action => 'start', - require => [Pacemaker::Resource::Service[$::neutron::params::dhcp_agent_service], - Pacemaker::Resource::Service[$::neutron::params::metadata_agent_service]], - } - pacemaker::constraint::base { 'neutron-metadata-agent-to-tomcat-constraint': - constraint_type => 'order', - first_resource => "${::neutron::params::metadata_agent_service}-clone", - second_resource => 'tomcat-clone', - first_action => 'start', - second_action => 'start', - require => [Pacemaker::Resource::Service[$::neutron::params::metadata_agent_service], - Pacemaker::Resource::Service['tomcat']], - } - pacemaker::constraint::colocation { 'neutron-dhcp-agent-to-metadata-agent-colocation': - source => "${::neutron::params::metadata_agent_service}-clone", - target => "${::neutron::params::dhcp_agent_service}-clone", - score => 'INFINITY', - require => [Pacemaker::Resource::Service[$::neutron::params::dhcp_agent_service], - Pacemaker::Resource::Service[$::neutron::params::metadata_agent_service]], - } - } - - # Nova - pacemaker::constraint::base { 'keystone-then-nova-consoleauth-constraint': - constraint_type => 'order', - first_resource => 'openstack-core-clone', - second_resource => "${::nova::params::consoleauth_service_name}-clone", - first_action => 'start', - second_action => 'start', - require => [Pacemaker::Resource::Service[$::nova::params::consoleauth_service_name], - Pacemaker::Resource::Ocf['openstack-core']], - } - pacemaker::constraint::base { 'nova-consoleauth-then-nova-vncproxy-constraint': - constraint_type => 'order', - first_resource => "${::nova::params::consoleauth_service_name}-clone", - second_resource => "${::nova::params::vncproxy_service_name}-clone", - first_action => 'start', - second_action => 'start', - require => [Pacemaker::Resource::Service[$::nova::params::consoleauth_service_name], - Pacemaker::Resource::Service[$::nova::params::vncproxy_service_name]], - } - pacemaker::constraint::colocation { 'nova-vncproxy-with-nova-consoleauth-colocation': - source => "${::nova::params::vncproxy_service_name}-clone", - target => "${::nova::params::consoleauth_service_name}-clone", - score => 'INFINITY', - require => [Pacemaker::Resource::Service[$::nova::params::consoleauth_service_name], - Pacemaker::Resource::Service[$::nova::params::vncproxy_service_name]], - } - pacemaker::constraint::base { 'nova-vncproxy-then-nova-api-constraint': - constraint_type => 'order', - first_resource => "${::nova::params::vncproxy_service_name}-clone", - second_resource => "${::nova::params::api_service_name}-clone", - first_action => 'start', - second_action => 'start', - require => [Pacemaker::Resource::Service[$::nova::params::vncproxy_service_name], - Pacemaker::Resource::Service[$::nova::params::api_service_name]], - } - pacemaker::constraint::colocation { 'nova-api-with-nova-vncproxy-colocation': - source => "${::nova::params::api_service_name}-clone", - target => "${::nova::params::vncproxy_service_name}-clone", - score => 'INFINITY', - require => [Pacemaker::Resource::Service[$::nova::params::vncproxy_service_name], - Pacemaker::Resource::Service[$::nova::params::api_service_name]], - } - pacemaker::constraint::base { 'nova-api-then-nova-scheduler-constraint': - constraint_type => 'order', - first_resource => "${::nova::params::api_service_name}-clone", - second_resource => "${::nova::params::scheduler_service_name}-clone", - first_action => 'start', - second_action => 'start', - require => [Pacemaker::Resource::Service[$::nova::params::api_service_name], - Pacemaker::Resource::Service[$::nova::params::scheduler_service_name]], - } - pacemaker::constraint::colocation { 'nova-scheduler-with-nova-api-colocation': - source => "${::nova::params::scheduler_service_name}-clone", - target => "${::nova::params::api_service_name}-clone", - score => 'INFINITY', - require => [Pacemaker::Resource::Service[$::nova::params::api_service_name], - Pacemaker::Resource::Service[$::nova::params::scheduler_service_name]], - } - pacemaker::constraint::base { 'nova-scheduler-then-nova-conductor-constraint': - constraint_type => 'order', - first_resource => "${::nova::params::scheduler_service_name}-clone", - second_resource => "${::nova::params::conductor_service_name}-clone", - first_action => 'start', - second_action => 'start', - require => [Pacemaker::Resource::Service[$::nova::params::scheduler_service_name], - Pacemaker::Resource::Service[$::nova::params::conductor_service_name]], - } - pacemaker::constraint::colocation { 'nova-conductor-with-nova-scheduler-colocation': - source => "${::nova::params::conductor_service_name}-clone", - target => "${::nova::params::scheduler_service_name}-clone", - score => 'INFINITY', - require => [Pacemaker::Resource::Service[$::nova::params::scheduler_service_name], - Pacemaker::Resource::Service[$::nova::params::conductor_service_name]], - } - # Ceilometer and Aodh - case downcase(hiera('ceilometer_backend')) { - /mysql/: { - pacemaker::resource::service { $::ceilometer::params::agent_central_service_name: - clone_params => 'interleave=true', - require => Pacemaker::Resource::Ocf['openstack-core'], - } - } - default: { - pacemaker::resource::service { $::ceilometer::params::agent_central_service_name: - clone_params => 'interleave=true', - require => [Pacemaker::Resource::Ocf['openstack-core'], - Pacemaker::Resource::Service[$::mongodb::params::service_name]], - } - } - } - pacemaker::resource::service { $::ceilometer::params::collector_service_name : - clone_params => 'interleave=true', - } - pacemaker::resource::service { $::ceilometer::params::api_service_name : - clone_params => 'interleave=true', - } - pacemaker::resource::service { $::ceilometer::params::agent_notification_service_name : - clone_params => 'interleave=true', - } # Fedora doesn't know `require-all` parameter for constraints yet if $::operatingsystem == 'Fedora' { - $redis_ceilometer_constraint_params = undef $redis_aodh_constraint_params = undef } else { - $redis_ceilometer_constraint_params = 'require-all=false' $redis_aodh_constraint_params = 'require-all=false' } - pacemaker::constraint::base { 'redis-then-ceilometer-central-constraint': - constraint_type => 'order', - first_resource => 'redis-master', - second_resource => "${::ceilometer::params::agent_central_service_name}-clone", - first_action => 'promote', - second_action => 'start', - constraint_params => $redis_ceilometer_constraint_params, - require => [Pacemaker::Resource::Ocf['redis'], - Pacemaker::Resource::Service[$::ceilometer::params::agent_central_service_name]], - } pacemaker::constraint::base { 'redis-then-aodh-evaluator-constraint': constraint_type => 'order', first_resource => 'redis-master', @@ -814,49 +115,6 @@ password=\"${mysql_root_password}\"", require => [Pacemaker::Resource::Ocf['redis'], Pacemaker::Resource::Service[$::aodh::params::evaluator_service_name]], } - pacemaker::constraint::base { 'keystone-then-ceilometer-central-constraint': - constraint_type => 'order', - first_resource => 'openstack-core-clone', - second_resource => "${::ceilometer::params::agent_central_service_name}-clone", - first_action => 'start', - second_action => 'start', - require => [Pacemaker::Resource::Service[$::ceilometer::params::agent_central_service_name], - Pacemaker::Resource::Ocf['openstack-core']], - } - pacemaker::constraint::base { 'keystone-then-ceilometer-notification-constraint': - constraint_type => 'order', - first_resource => 'openstack-core-clone', - second_resource => "${::ceilometer::params::agent_notification_service_name}-clone", - first_action => 'start', - second_action => 'start', - require => [Pacemaker::Resource::Service[$::ceilometer::params::agent_central_service_name], - Pacemaker::Resource::Ocf['openstack-core']], - } - pacemaker::constraint::base { 'ceilometer-central-then-ceilometer-collector-constraint': - constraint_type => 'order', - first_resource => "${::ceilometer::params::agent_central_service_name}-clone", - second_resource => "${::ceilometer::params::collector_service_name}-clone", - first_action => 'start', - second_action => 'start', - require => [Pacemaker::Resource::Service[$::ceilometer::params::agent_central_service_name], - Pacemaker::Resource::Service[$::ceilometer::params::collector_service_name]], - } - pacemaker::constraint::base { 'ceilometer-collector-then-ceilometer-api-constraint': - constraint_type => 'order', - first_resource => "${::ceilometer::params::collector_service_name}-clone", - second_resource => "${::ceilometer::params::api_service_name}-clone", - first_action => 'start', - second_action => 'start', - require => [Pacemaker::Resource::Service[$::ceilometer::params::collector_service_name], - Pacemaker::Resource::Service[$::ceilometer::params::api_service_name]], - } - pacemaker::constraint::colocation { 'ceilometer-api-with-ceilometer-collector-colocation': - source => "${::ceilometer::params::api_service_name}-clone", - target => "${::ceilometer::params::collector_service_name}-clone", - score => 'INFINITY', - require => [Pacemaker::Resource::Service[$::ceilometer::params::api_service_name], - Pacemaker::Resource::Service[$::ceilometer::params::collector_service_name]], - } # Aodh pacemaker::resource::service { $::aodh::params::evaluator_service_name : clone_params => 'interleave=true', @@ -899,50 +157,6 @@ password=\"${mysql_root_password}\"", require => [Pacemaker::Resource::Service[$::aodh::params::evaluator_service_name], Pacemaker::Resource::Service[$::aodh::params::listener_service_name]], } - if downcase(hiera('ceilometer_backend')) == 'mongodb' { - pacemaker::constraint::base { 'mongodb-then-ceilometer-central-constraint': - constraint_type => 'order', - first_resource => "${::mongodb::params::service_name}-clone", - second_resource => "${::ceilometer::params::agent_central_service_name}-clone", - first_action => 'start', - second_action => 'start', - require => [Pacemaker::Resource::Service[$::ceilometer::params::agent_central_service_name], - Pacemaker::Resource::Service[$::mongodb::params::service_name]], - } - } - - # gnocchi - pacemaker::resource::service { $::gnocchi::params::metricd_service_name : - clone_params => 'interleave=true', - } - pacemaker::resource::service { $::gnocchi::params::statsd_service_name : - clone_params => 'interleave=true', - } - pacemaker::constraint::base { 'gnocchi-metricd-then-gnocchi-statsd-constraint': - constraint_type => 'order', - first_resource => "${::gnocchi::params::metricd_service_name}-clone", - second_resource => "${::gnocchi::params::statsd_service_name}-clone", - first_action => 'start', - second_action => 'start', - require => [Pacemaker::Resource::Service[$::gnocchi::params::metricd_service_name], - Pacemaker::Resource::Service[$::gnocchi::params::statsd_service_name]], - } - pacemaker::constraint::colocation { 'gnocchi-statsd-with-metricd-colocation': - source => "${::gnocchi::params::statsd_service_name}-clone", - target => "${::gnocchi::params::metricd_service_name}-clone", - score => 'INFINITY', - require => [Pacemaker::Resource::Service[$::gnocchi::params::metricd_service_name], - Pacemaker::Resource::Service[$::gnocchi::params::statsd_service_name]], - } - - # Horizon and Keystone - pacemaker::resource::service { $::apache::params::service_name: - clone_params => 'interleave=true', - verify_on_create => true, - require => [File['/etc/keystone/ssl/certs/ca.pem'], - File['/etc/keystone/ssl/private/signing_key.pem'], - File['/etc/keystone/ssl/certs/signing_cert.pem']], - } #VSM if 'cisco_n1kv' in hiera('neutron::plugins::ml2::mechanism_drivers') { diff --git a/puppet/manifests/overcloud_object.pp b/puppet/manifests/overcloud_object.pp index 3585c993..1f04c581 100644 --- a/puppet/manifests/overcloud_object.pp +++ b/puppet/manifests/overcloud_object.pp @@ -16,46 +16,9 @@ include ::tripleo::packages include ::tripleo::firewall -if hiera('step') >= 1 { - create_resources(kmod::load, hiera('kernel_modules'), {}) - create_resources(sysctl::value, hiera('sysctl_settings'), {}) - Exec <| tag == 'kmod::load' |> -> Sysctl <| |> - - include ::timezone - - if count(hiera('ntp::servers')) > 0 { - include ::ntp - } -} - if hiera('step') >= 4 { - class { '::swift::storage::all': - mount_check => str2bool(hiera('swift_mount_check')), - } - if(!defined(File['/srv/node'])) { - file { '/srv/node': - ensure => directory, - owner => 'swift', - group => 'swift', - require => Package['openstack-swift'], - } - } - - $swift_components = ['account', 'container', 'object'] - swift::storage::filter::recon { $swift_components : } - swift::storage::filter::healthcheck { $swift_components : } - - $snmpd_user = hiera('snmpd_readonly_user_name') - snmp::snmpv3_user { $snmpd_user: - authtype => 'MD5', - authpass => hiera('snmpd_readonly_user_password'), - } - class { '::snmp': - agentaddress => ['udp:161','udp6:[::1]:161'], - snmpd_config => [ join(['createUser ', hiera('snmpd_readonly_user_name'), ' MD5 "', hiera('snmpd_readonly_user_password'), '"']), join(['rouser ', hiera('snmpd_readonly_user_name')]), 'proc cron', 'includeAllDisks 10%', 'master agentx', 'trapsink localhost public', 'iquerySecName internalUser', 'rouser internalUser', 'defaultMonitors yes', 'linkUpDownNotifications yes' ], - } - hiera_include('object_classes') } -package_manifest{'/var/lib/tripleo/installed-packages/overcloud_object': ensure => present} +$package_manifest_name = join(['/var/lib/tripleo/installed-packages/overcloud_object', hiera('step')]) +package_manifest{$package_manifest_name: ensure => present} diff --git a/puppet/manifests/overcloud_volume.pp b/puppet/manifests/overcloud_volume.pp index 134dc43b..7c7da586 100644 --- a/puppet/manifests/overcloud_volume.pp +++ b/puppet/manifests/overcloud_volume.pp @@ -16,46 +16,9 @@ include ::tripleo::packages include ::tripleo::firewall -create_resources(kmod::load, hiera('kernel_modules'), {}) -create_resources(sysctl::value, hiera('sysctl_settings'), {}) -Exec <| tag == 'kmod::load' |> -> Sysctl <| |> - -if count(hiera('ntp::servers')) > 0 { - include ::ntp -} - -include ::timezone - -include ::cinder -include ::cinder::config -include ::cinder::glance -include ::cinder::volume -include ::cinder::setup_test_volume - -$cinder_enable_iscsi = hiera('cinder_enable_iscsi_backend', true) -if $cinder_enable_iscsi { - $cinder_iscsi_backend = 'tripleo_iscsi' - - cinder::backend::iscsi { $cinder_iscsi_backend : - iscsi_ip_address => hiera('cinder_iscsi_ip_address'), - iscsi_helper => hiera('cinder_iscsi_helper'), - } -} - -$cinder_enabled_backends = any2array($cinder_iscsi_backend) -class { '::cinder::backends' : - enabled_backends => union($cinder_enabled_backends, hiera('cinder_user_enabled_backends')), -} - -$snmpd_user = hiera('snmpd_readonly_user_name') -snmp::snmpv3_user { $snmpd_user: - authtype => 'MD5', - authpass => hiera('snmpd_readonly_user_password'), -} -class { '::snmp': - agentaddress => ['udp:161','udp6:[::1]:161'], - snmpd_config => [ join(['createUser ', hiera('snmpd_readonly_user_name'), ' MD5 "', hiera('snmpd_readonly_user_password'), '"']), join(['rouser ', hiera('snmpd_readonly_user_name')]), 'proc cron', 'includeAllDisks 10%', 'master agentx', 'trapsink localhost public', 'iquerySecName internalUser', 'rouser internalUser', 'defaultMonitors yes', 'linkUpDownNotifications yes' ], +if hiera('step') >= 4 { + hiera_include('volume_classes') } -hiera_include('volume_classes') -package_manifest{'/var/lib/tripleo/installed-packages/overcloud_volume': ensure => present} +$package_manifest_name = join(['/var/lib/tripleo/installed-packages/overcloud_volume', hiera('step')]) +package_manifest{$package_manifest_name: ensure => present} diff --git a/puppet/manifests/ringbuilder.pp b/puppet/manifests/ringbuilder.pp deleted file mode 100644 index 2411ff84..00000000 --- a/puppet/manifests/ringbuilder.pp +++ /dev/null @@ -1,99 +0,0 @@ -# Copyright 2015 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -define add_devices( - $swift_zones = '1' -){ - - # NOTE(dprince): Swift zones is not yet properly wired into the Heat - # templates. See: https://review.openstack.org/#/c/97758/3 - # For now our regex supports the r1z1-192.0.2.6:%PORT%/d1 syntax or the - # newer r1z%<controller or SwiftStorage><N>%-192.0.2.6:%PORT%/d1 syntax. - $server_num_or_device = regsubst($name,'^r1z%+[A-Za-z]*([0-9]+)%+-(.*)$','\1') - if (is_integer($server_num_or_device)) { - $server_num = $server_num_or_device - } else { - $server_num = '1' - } - # Function to place server in its zone. Zone is calculated by - # server number in heat template modulo the number of zones + 1. - $zone = (($server_num%$swift_zones) + 1) - - # add the rings - $base = regsubst($name,'^r1.*-(.*)$','\1') - $object = regsubst($base, '%PORT%', '6000') - ring_object_device { $object: - zone => '1', - weight => 100, - } - $container = regsubst($base, '%PORT%', '6001') - ring_container_device { $container: - zone => '1', - weight => 100, - } - $account = regsubst($base, '%PORT%', '6002') - ring_account_device { $account: - zone => '1', - weight => 100, - } -} - -class tripleo::ringbuilder ( - $swift_zones = '1', - $devices = '', - $build_ring = true, - $part_power, - $replicas, - $min_part_hours, -) { - - validate_bool($build_ring) - - if $build_ring { - - $device_array = strip(split(rstrip($devices), ',')) - - # create local rings - swift::ringbuilder::create{ ['object', 'account', 'container']: - part_power => $part_power, - replicas => min(count($device_array), $replicas), - min_part_hours => $min_part_hours, - } -> - - # add all other devices - add_devices {$device_array: - swift_zones => $swift_zones, - } -> - - # rebalance - swift::ringbuilder::rebalance{ ['object', 'account', 'container']: - seed => 999, - } - - Ring_object_device<| |> ~> Exec['rebalance_object'] - Ring_object_device<| |> ~> Exec['rebalance_account'] - Ring_object_device<| |> ~> Exec['rebalance_container'] - - } -} - -if hiera('step') >= 2 { - # pre-install swift here so we can build rings - include ::swift -} - -if hiera('step') >= 3 { - include ::tripleo::ringbuilder -} diff --git a/puppet/services/ceilometer-agent-central.yaml b/puppet/services/ceilometer-agent-central.yaml new file mode 100644 index 00000000..80f656d0 --- /dev/null +++ b/puppet/services/ceilometer-agent-central.yaml @@ -0,0 +1,44 @@ +heat_template_version: 2016-04-08 + +description: > + OpenStack Ceilometer Central Agent service configured with Puppet + +parameters: + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + RedisPassword: + description: The password for the redis service account. + type: string + hidden: true + RedisVirtualIPUri: + type: string + default: '' + + +resources: + CeilometerServiceBase: + type: ./ceilometer-base.yaml + properties: + EndpointMap: {get_param: EndpointMap} + +outputs: + role_data: + description: Role data for the Ceilometer Central Agent role. + value: + service_name: ceilometer_agent_central + config_settings: + map_merge: + - get_attr: [CeilometerServiceBase, role_data, config_settings] + - ceilometer::agent::central::coordination_url: + list_join: + - '' + - - 'redis://:' + - {get_param: RedisPassword} + - '@' + - {get_param: RedisVirtualIPUri} + - ':6379/' + step_config: | + include ::tripleo::profile::base::ceilometer::agent::central diff --git a/puppet/services/ceilometer-agent-compute.yaml b/puppet/services/ceilometer-agent-compute.yaml new file mode 100644 index 00000000..181c64d2 --- /dev/null +++ b/puppet/services/ceilometer-agent-compute.yaml @@ -0,0 +1,27 @@ +heat_template_version: 2016-04-08 + +description: > + OpenStack Ceilometer Compute Agent service configured with Puppet + +parameters: + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + +resources: + CeilometerServiceBase: + type: ./ceilometer-base.yaml + properties: + EndpointMap: {get_param: EndpointMap} + +outputs: + role_data: + description: Role data for the Ceilometer Compute Agent role. + value: + service_name: ceilometer_agent_compute + config_settings: + get_attr: [CeilometerServiceBase, role_data, config_settings] + step_config: | + include ::tripleo::profile::base::ceilometer::agent::compute diff --git a/puppet/services/ceilometer-agent-notification.yaml b/puppet/services/ceilometer-agent-notification.yaml new file mode 100644 index 00000000..58e28a3d --- /dev/null +++ b/puppet/services/ceilometer-agent-notification.yaml @@ -0,0 +1,28 @@ +heat_template_version: 2016-04-08 + +description: > + OpenStack Ceilometer Notification Agent service configured with Puppet + +parameters: + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + + +resources: + CeilometerServiceBase: + type: ./ceilometer-base.yaml + properties: + EndpointMap: {get_param: EndpointMap} + +outputs: + role_data: + description: Role data for the Ceilometer Notification Agent role. + value: + service_name: ceilometer_agent_notification + config_settings: + get_attr: [CeilometerServiceBase, role_data, config_settings] + step_config: | + include ::tripleo::profile::base::ceilometer::agent::notification diff --git a/puppet/services/ceilometer-api.yaml b/puppet/services/ceilometer-api.yaml new file mode 100644 index 00000000..c5c143b0 --- /dev/null +++ b/puppet/services/ceilometer-api.yaml @@ -0,0 +1,35 @@ +heat_template_version: 2016-04-08 + +description: > + OpenStack Ceilometer API service configured with Puppet + +parameters: + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + + +resources: + CeilometerServiceBase: + type: ./ceilometer-base.yaml + properties: + EndpointMap: {get_param: EndpointMap} + +outputs: + role_data: + description: Role data for the Ceilometer API role. + value: + service_name: ceilometer_api + config_settings: + map_merge: + - get_attr: [CeilometerServiceBase, role_data, config_settings] + - tripleo.ceilometer_api.firewall_rules: + '124 ceilometer': + dport: + - 8777 + - 13777 + - ceilometer::api::keystone_tenant: 'service' + step_config: | + include ::tripleo::profile::base::ceilometer::api diff --git a/puppet/services/ceilometer-base.yaml b/puppet/services/ceilometer-base.yaml new file mode 100644 index 00000000..db5a82b1 --- /dev/null +++ b/puppet/services/ceilometer-base.yaml @@ -0,0 +1,111 @@ +heat_template_version: 2016-04-08 + +description: > + OpenStack Ceilometer service configured with Puppet + +parameters: + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + CeilometerBackend: + default: 'mongodb' + description: The ceilometer backend type. + type: string + CeilometerMeteringSecret: + description: Secret shared by the ceilometer services. + type: string + hidden: true + CeilometerPassword: + description: The password for the ceilometer service account. + type: string + hidden: true + CeilometerMeterDispatcher: + default: 'gnocchi' + description: Dispatcher to process meter data + type: string + constraints: + - allowed_values: ['gnocchi', 'database'] + CeilometerWorkers: + default: 0 + description: Number of workers for Ceilometer service. + type: number + CeilometerStoreEvents: + default: false + description: Whether to store events in ceilometer. + type: boolean + KeystoneRegion: + type: string + default: 'regionOne' + description: Keystone region for endpoint + RabbitPassword: + description: The password for RabbitMQ + type: string + hidden: true + RabbitUserName: + default: guest + description: The username for RabbitMQ + type: string + RabbitClientUseSSL: + default: false + description: > + Rabbit client subscriber parameter to specify + an SSL connection to the RabbitMQ host. + type: string + RabbitClientPort: + default: 5672 + description: Set rabbit subscriber port, change this if using SSL + type: number + +outputs: + role_data: + description: Role data for the Ceilometer role. + value: + service_name: ceilometer_base + config_settings: + ceilometer::db::database_connection: + list_join: + - '' + - - {get_param: [EndpointMap, MysqlInternal, protocol]} + - - '://ceilometer:' + - {get_param: CeilometerPassword} + - '@' + - {get_param: [EndpointMap, MysqlInternal, host]} + - '/ceilometer' + ceilometer_backend: {get_param: CeilometerBackend} + ceilometer::metering_secret: {get_param: CeilometerMeteringSecret} + # we include db_sync class in puppet-tripleo + ceilometer::db::sync_db: false + ceilometer::api::keystone_password: {get_param: CeilometerPassword} + ceilometer::api::keystone_auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri] } + ceilometer::api::keystone_identity_uri: { get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix] } + ceilometer::agent::auth::auth_password: {get_param: CeilometerPassword} + ceilometer::agent::auth::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix] } + ceilometer::agent::notification::store_events: {get_param: CeilometerStoreEvents} + ceilometer::agent::auth::auth_region: 'regionOne' + ceilometer::agent::auth::auth_tenant_name: 'service' + ceilometer::agent::auth::auth_endpoint_type: 'internalURL' + ceilometer::db::mysql::password: {get_param: CeilometerPassword} + ceilometer::collector::meter_dispatcher: {get_param: CeilometerMeterDispatcher} + ceilometer::dispatcher::gnocchi::url: {get_param: [EndpointMap, GnocchiInternal, uri]} + ceilometer::dispatcher::gnocchi::filter_project: 'service' + ceilometer::dispatcher::gnocchi::archive_policy: 'low' + ceilometer::dispatcher::gnocchi::resources_definition_file: 'gnocchi_resources.yaml' + ceilometer::keystone::auth::public_url: {get_param: [EndpointMap, CeilometerPublic, uri]} + ceilometer::keystone::auth::internal_url: {get_param: [EndpointMap, CeilometerInternal, uri]} + ceilometer::keystone::auth::admin_url: {get_param: [EndpointMap, CeilometerAdmin, uri]} + ceilometer::keystone::auth::password: {get_param: CeilometerPassword} + ceilometer::keystone::auth::region: {get_param: KeystoneRegion} + ceilometer::keystone::auth::tenant: 'service' + ceilometer::rabbit_userid: {get_param: RabbitUserName} + ceilometer::rabbit_password: {get_param: RabbitPassword} + ceilometer::rabbit_use_ssl: {get_param: RabbitClientUseSSL} + ceilometer::rabbit_port: {get_param: RabbitClientPort} + ceilometer::db::mysql::user: ceilometer + ceilometer::db::mysql::host: {get_param: [EndpointMap, MysqlNoBracketsInternal, host]} + ceilometer::db::mysql::dbname: ceilometer + ceilometer::db::mysql::allowed_hosts: + - '%' + - "%{hiera('mysql_bind_host')}" + ceilometer::rabbit_heartbeat_timeout_threshold: 60 diff --git a/puppet/services/ceilometer-collector.yaml b/puppet/services/ceilometer-collector.yaml new file mode 100644 index 00000000..7a7bc19d --- /dev/null +++ b/puppet/services/ceilometer-collector.yaml @@ -0,0 +1,27 @@ +heat_template_version: 2016-04-08 + +description: > + OpenStack Ceilometer Collector service configured with Puppet + +parameters: + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + +resources: + CeilometerServiceBase: + type: ./ceilometer-base.yaml + properties: + EndpointMap: {get_param: EndpointMap} + +outputs: + role_data: + description: Role data for the Ceilometer Collector role. + value: + service_name: ceilometer_collector + config_settings: + get_attr: [CeilometerServiceBase, role_data, config_settings] + step_config: | + include ::tripleo::profile::base::ceilometer::collector diff --git a/puppet/services/ceilometer-expirer.yaml b/puppet/services/ceilometer-expirer.yaml new file mode 100644 index 00000000..c960e6dc --- /dev/null +++ b/puppet/services/ceilometer-expirer.yaml @@ -0,0 +1,28 @@ +heat_template_version: 2016-04-08 + +description: > + OpenStack Ceilometer Expirer service configured with Puppet + +parameters: + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + + +resources: + CeilometerServiceBase: + type: ./ceilometer-base.yaml + properties: + EndpointMap: {get_param: EndpointMap} + +outputs: + role_data: + description: Role data for the Ceilometer Expirer role. + value: + service_name: ceilometer_expirer + config_settings: + get_attr: [CeilometerServiceBase, role_data, config_settings] + step_config: | + include ::tripleo::profile::base::ceilometer::expirer diff --git a/puppet/services/ceph-base.yaml b/puppet/services/ceph-base.yaml new file mode 100644 index 00000000..bec4340e --- /dev/null +++ b/puppet/services/ceph-base.yaml @@ -0,0 +1,92 @@ +heat_template_version: 2016-04-08 + +description: > + Ceph base service. Shared by all Ceph services. + +parameters: + CephAdminKey: + description: The Ceph admin client key. Can be created with ceph-authtool --gen-print-key. + type: string + hidden: true + CephClientKey: + description: The Ceph client key. Can be created with ceph-authtool --gen-print-key. Currently only used for external Ceph deployments to create the openstack user keyring. + type: string + hidden: true + CephClientUserName: + default: openstack + type: string + CephClusterFSID: + type: string + description: The Ceph cluster FSID. Must be a UUID. + CephIPv6: + default: False + type: boolean + CinderRbdPoolName: + default: volumes + type: string + CinderBackupRbdPoolName: + default: backups + type: string + GlanceRbdPoolName: + default: images + type: string + GnocchiRbdPoolName: + default: metrics + type: string + NovaRbdPoolName: + default: vms + type: string + # DEPRECATED options for compatibility with overcloud.yaml + # This should be removed and manipulation of the ControllerServices list + # used instead, but we need client support for that first + ControllerEnableCephStorage: + default: false + description: Whether to deploy Ceph Storage (OSD) on the Controller + type: boolean + +parameter_groups: +- label: deprecated + description: Do not use deprecated params, they will be removed. + parameters: + - ControllerEnableCephStorage + +outputs: + role_data: + description: Role data for the Ceph base service. + value: + service_name: ceph_base + config_settings: + tripleo::profile::base::ceph::ceph_ipv6: {get_param: CephIPv6} + tripleo::profile::base::ceph::enable_ceph_storage: {get_param: ControllerEnableCephStorage} + ceph::profile::params::fsid: {get_param: CephClusterFSID} + ceph::profile::params::client_keys: + str_replace: + template: "{ + client.admin: { + secret: 'ADMIN_KEY', + mode: '0600', + cap_mon: 'allow *', + cap_osd: 'allow *', + cap_mds: 'allow *' + }, + client.bootstrap-osd: { + secret: 'ADMIN_KEY', + keyring_path: '/var/lib/ceph/bootstrap-osd/ceph.keyring', + cap_mon: 'allow profile bootstrap-osd' + }, + client.CLIENT_USER: { + secret: 'CLIENT_KEY', + mode: '0644', + cap_mon: 'allow r', + cap_osd: 'allow class-read object_prefix rbd_children, allow rwx pool=CINDER_POOL, allow rwx pool=CINDERBACKUP_POOL, allow rwx pool=NOVA_POOL, allow rwx pool=GLANCE_POOL, allow rwx pool=GNOCCHI_POOL' + } + }" + params: + CLIENT_USER: {get_param: CephClientUserName} + CLIENT_KEY: {get_param: CephClientKey} + ADMIN_KEY: {get_param: CephAdminKey} + NOVA_POOL: {get_param: NovaRbdPoolName} + CINDER_POOL: {get_param: CinderRbdPoolName} + CINDERBACKUP_POOL: {get_param: CinderBackupRbdPoolName} + GLANCE_POOL: {get_param: GlanceRbdPoolName} + GNOCCHI_POOL: {get_param: GnocchiRbdPoolName} diff --git a/puppet/services/ceph-client.yaml b/puppet/services/ceph-client.yaml new file mode 100644 index 00000000..33bbbe58 --- /dev/null +++ b/puppet/services/ceph-client.yaml @@ -0,0 +1,25 @@ +heat_template_version: 2016-04-08 + +description: > + Ceph Client service. + +parameters: + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + +resources: + CephBase: + type: ./ceph-base.yaml + +outputs: + role_data: + description: Role data for the Cinder OSD service. + value: + service_name: ceph_client + config_settings: + get_attr: [CephBase, role_data, config_settings] + step_config: | + include ::tripleo::profile::base::ceph::client diff --git a/puppet/services/ceph-external.yaml b/puppet/services/ceph-external.yaml new file mode 100644 index 00000000..f6fe26db --- /dev/null +++ b/puppet/services/ceph-external.yaml @@ -0,0 +1,64 @@ +heat_template_version: 2016-04-08 + +description: > + Ceph External service. + +parameters: + CephClientKey: + description: The Ceph client key. Can be created with ceph-authtool --gen-print-key. Currently only used for external Ceph deployments to create the openstack user keyring. + type: string + hidden: true + CephClientUserName: + default: openstack + type: string + CephClusterFSID: + type: string + description: The Ceph cluster FSID. Must be a UUID. + CephExternalMonHost: + default: '' + type: string + description: List of externally managed Ceph Mon Host IPs. Only used for external Ceph deployments. + CinderRbdPoolName: + default: volumes + type: string + CinderBackupRbdPoolName: + default: backups + type: string + GlanceRbdPoolName: + default: images + type: string + GnocchiRbdPoolName: + default: metrics + type: string + NovaRbdPoolName: + default: vms + type: string + +outputs: + role_data: + description: Role data for the Ceph External service. + value: + service_name: ceph_external + config_settings: + tripleo::profile::base::ceph::ceph_mon_host: {get_param: CephExternalMonHost} + ceph::profile::params::fsid: {get_param: CephClusterFSID} + ceph::profile::params::client_keys: + str_replace: + template: "{ + client.CLIENT_USER: { + secret: 'CLIENT_KEY', + mode: '0644', + cap_mon: 'allow r', + cap_osd: 'allow class-read object_prefix rbd_children, allow rwx pool=CINDER_POOL, allow rwx pool=CINDERBACKUP_POOL, allow rwx pool=NOVA_POOL, allow rwx pool=GLANCE_POOL, allow rwx pool=GNOCCHI_POOL' + } + }" + params: + CLIENT_USER: {get_param: CephClientUserName} + CLIENT_KEY: {get_param: CephClientKey} + NOVA_POOL: {get_param: NovaRbdPoolName} + CINDER_POOL: {get_param: CinderRbdPoolName} + CINDERBACKUP_POOL: {get_param: CinderBackupRbdPoolName} + GLANCE_POOL: {get_param: GlanceRbdPoolName} + GNOCCHI_POOL: {get_param: GnocchiRbdPoolName} + step_config: | + include ::tripleo::profile::base::ceph::client diff --git a/puppet/services/ceph-mon.yaml b/puppet/services/ceph-mon.yaml new file mode 100644 index 00000000..f48515e5 --- /dev/null +++ b/puppet/services/ceph-mon.yaml @@ -0,0 +1,60 @@ +heat_template_version: 2016-04-08 + +description: > + Ceph Monitor service. + +parameters: + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + CephIPv6: + default: False + type: boolean + CephMonKey: + description: The Ceph monitors key. Can be created with ceph-authtool --gen-print-key. + type: string + hidden: true + CinderRbdPoolName: + default: volumes + type: string + CinderBackupRbdPoolName: + default: backups + type: string + GlanceRbdPoolName: + default: images + type: string + GnocchiRbdPoolName: + default: metrics + type: string + NovaRbdPoolName: + default: vms + type: string + +resources: + CephBase: + type: ./ceph-base.yaml + +outputs: + role_data: + description: Role data for the Ceph Monitor service. + value: + service_name: ceph_mon + config_settings: + map_merge: + - get_attr: [CephBase, role_data, config_settings] + - ceph::profile::params::ms_bind_ipv6: {get_param: CephIPv6} + ceph::profile::params::mon_key: {get_param: CephMonKey} + tripleo::profile::base::ceph::mon::ceph_pools: + - {get_param: CinderRbdPoolName} + - {get_param: CinderBackupRbdPoolName} + - {get_param: NovaRbdPoolName} + - {get_param: GlanceRbdPoolName} + - {get_param: GnocchiRbdPoolName} + tripleo.ceph_mon.firewall_rules: + '110 ceph_mon': + dport: + - 6789 + step_config: | + include ::tripleo::profile::base::ceph::mon diff --git a/puppet/services/ceph-osd.yaml b/puppet/services/ceph-osd.yaml new file mode 100644 index 00000000..21cefb34 --- /dev/null +++ b/puppet/services/ceph-osd.yaml @@ -0,0 +1,30 @@ +heat_template_version: 2016-04-08 + +description: > + Ceph OSD service. + +parameters: + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + +resources: + CephBase: + type: ./ceph-base.yaml + +outputs: + role_data: + description: Role data for the Cinder OSD service. + value: + service_name: ceph_osd + config_settings: + map_merge: + - get_attr: [CephBase, role_data, config_settings] + - tripleo.ceph_osd.firewall_rules: + '111 ceph_osd': + dport: + - '6800-7300' + step_config: | + include ::tripleo::profile::base::ceph::osd diff --git a/puppet/services/cinder-api.yaml b/puppet/services/cinder-api.yaml index c53bef6f..5e58dee9 100644 --- a/puppet/services/cinder-api.yaml +++ b/puppet/services/cinder-api.yaml @@ -30,13 +30,26 @@ outputs: role_data: description: Role data for the Cinder API role. value: + service_name: cinder_api config_settings: map_merge: - get_attr: [CinderBase, role_data, config_settings] - cinder::api::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri]} cinder::api::identity_uri: {get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix]} cinder::api::keystone_password: {get_param: CinderPassword} + cinder::api::keystone_tenant: 'service' + cinder::api::enable_proxy_headers_parsing: true + cinder::api::nova_catalog_info: 'compute:Compute Service:internalURL' + # TODO(emilien) move it to puppet-cinder + cinder::config: + DEFAULT/swift_catalog_info: + value: 'object-store:swift:internalURL' cinder::glance::glance_api_servers: {get_param: [EndpointMap, GlanceInternal, uri]} tripleo::profile::base::cinder::cinder_enable_db_purge: {get_param: CinderEnableDBPurge} + tripleo.cinder_api.firewall_rules: + '119 cinder': + dport: + - 8776 + - 13776 step_config: | include ::tripleo::profile::base::cinder::api diff --git a/puppet/services/cinder-base.yaml b/puppet/services/cinder-base.yaml index 85682448..b224cd65 100644 --- a/puppet/services/cinder-base.yaml +++ b/puppet/services/cinder-base.yaml @@ -40,6 +40,7 @@ outputs: role_data: description: Role data for the Cinder base service. value: + service_name: cinder_base config_settings: cinder::database_connection: list_join: @@ -56,3 +57,13 @@ outputs: cinder::rabbit_userid: {get_param: RabbitUserName} cinder::rabbit_password: {get_param: RabbitPassword} cinder::rabbit_port: {get_param: RabbitClientPort} + cinder::db::mysql::user: cinder + cinder::db::mysql::host: {get_param: [EndpointMap, MysqlNoBracketsInternal, host]} + cinder::db::mysql::dbname: cinder + cinder::db::mysql::allowed_hosts: + - '%' + - "%{hiera('mysql_bind_host')}" + cinder::rabbit_heartbeat_timeout_threshold: 60 + cinder::keystone::auth::tenant: 'service' + cinder::host: hostgroup + cinder::cron::db_purge::destination: '/dev/null' diff --git a/puppet/services/cinder-scheduler.yaml b/puppet/services/cinder-scheduler.yaml index 6bdf86bc..e03090a2 100644 --- a/puppet/services/cinder-scheduler.yaml +++ b/puppet/services/cinder-scheduler.yaml @@ -21,7 +21,10 @@ outputs: role_data: description: Role data for the Cinder Scheduler role. value: + service_name: cinder_scheduler config_settings: - get_attr: [CinderBase, role_data, config_settings] + map_merge: + - get_attr: [CinderBase, role_data, config_settings] + - cinder::scheduler::scheduler_driver: cinder.scheduler.filter_scheduler.FilterScheduler step_config: | include ::tripleo::profile::base::cinder::scheduler diff --git a/puppet/services/cinder-volume.yaml b/puppet/services/cinder-volume.yaml index d28f40e6..9f49bc06 100644 --- a/puppet/services/cinder-volume.yaml +++ b/puppet/services/cinder-volume.yaml @@ -36,6 +36,12 @@ parameters: NFS servers used by Cinder NFS backend. Effective when CinderEnableNfsBackend is true. type: comma_delimited_list + CinderRbdPoolName: + default: volumes + type: string + CephClientUserName: + default: openstack + type: string EndpointMap: default: {} description: Mapping of service endpoint -> protocol. Typically set @@ -53,6 +59,7 @@ outputs: role_data: description: Role data for the Cinder Volume role. value: + service_name: cinder_volume config_settings: map_merge: - get_attr: [CinderBase, role_data, config_settings] @@ -67,5 +74,10 @@ outputs: SERVERS: {get_param: CinderNfsServers} tripleo::profile::base::cinder::volume::iscsi::cinder_lvm_loop_device_size: {get_param: CinderLVMLoopDeviceSize} tripleo::profile::base::cinder::volume::iscsi::cinder_iscsi_helper: {get_param: CinderISCSIHelper} + tripleo::profile::base::cinder::volume::rbd::cinder_rbd_pool_name: {get_param: CinderRbdPoolName} + tripleo::profile::base::cinder::volume::rbd::cinder_rbd_user_name: {get_param: CephClientUserName} + tripleo.cinder_volume.firewall_rules: + '120 iscsi initiator': + dport: 3260 step_config: | include ::tripleo::profile::base::cinder::volume diff --git a/puppet/services/database/mongodb-base.yaml b/puppet/services/database/mongodb-base.yaml index ecd1d319..b8761320 100644 --- a/puppet/services/database/mongodb-base.yaml +++ b/puppet/services/database/mongodb-base.yaml @@ -24,7 +24,9 @@ outputs: role_data: description: Role data for the MongoDB base service. value: + service_name: mongodb_base config_settings: mongodb::server::nojournal: {get_param: MongoDbNoJournal} + mongodb::server::journal: false mongodb::server::ipv6: {get_param: MongoDbIPv6} - mongodb::server::replset: {get_param: MongoDbReplset}
\ No newline at end of file + mongodb::server::replset: {get_param: MongoDbReplset} diff --git a/puppet/services/database/mongodb.yaml b/puppet/services/database/mongodb.yaml index c0488700..6885cfd6 100644 --- a/puppet/services/database/mongodb.yaml +++ b/puppet/services/database/mongodb.yaml @@ -19,10 +19,18 @@ outputs: role_data: description: Service mongodb using composable services. value: + service_name: mongodb config_settings: map_merge: - get_attr: [MongoDbBase, role_data, config_settings] - tripleo::profile::base::database::mongodb::mongodb_replset: {get_attr: [MongoDbBase, aux_parameters, rplset_name]} mongodb::server::service_manage: True + tripleo.mongodb.firewall_rules: + '101 mongodb_config': + dport: 27019 + '102 mongodb_sharding': + dport: 27018 + '103 mongod': + dport: 27017 step_config: | - include ::tripleo::profile::base::database::mongodb
\ No newline at end of file + include ::tripleo::profile::base::database::mongodb diff --git a/puppet/services/database/mysql.yaml b/puppet/services/database/mysql.yaml new file mode 100644 index 00000000..6f8f91b5 --- /dev/null +++ b/puppet/services/database/mysql.yaml @@ -0,0 +1,37 @@ +heat_template_version: 2016-04-08 + +description: > + MySQL service deployment using puppet + +parameters: + #Parameters not used EndpointMap + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + +outputs: + role_data: + description: Service MySQL using composable services. + value: + service_name: mysql + config_settings: + # The Galera package should work in cluster and + # non-cluster modes based on the config file. + # We set the package name here explicitly so + # that it matches what we pre-install + # in tripleo-puppet-elements. + mysql::server::package_name: 'mariadb-galera-server' + mysql::server::manage_config_file: true + tripleo.mysql.firewall_rules: + '104 mysql galera': + dport: + - 873 + - 3306 + - 4444 + - 4567 + - 4568 + - 9200 + step_config: | + include ::tripleo::profile::base::database::mysql diff --git a/puppet/services/database/redis-base.yaml b/puppet/services/database/redis-base.yaml index 77b3c9f0..fe8c0659 100644 --- a/puppet/services/database/redis-base.yaml +++ b/puppet/services/database/redis-base.yaml @@ -13,9 +13,13 @@ outputs: role_data: description: Role data for the redis role. value: + service_name: redis_base config_settings: - redis::requirepass: {get_param: RedisPassword} - redis::masterauth: {get_param: RedisPassword} - redis::sentinel_auth_pass: {get_param: RedisPassword} - tripleo::loadbalancer::redis_password: {get_param: RedisPassword} - + redis::requirepass: {get_param: RedisPassword} + redis::masterauth: {get_param: RedisPassword} + redis::sentinel_auth_pass: {get_param: RedisPassword} + redis::port: 6379 + redis::sentinel::master_name: '"%{hiera(\"bootstrap_nodeid\")}"' + redis::sentinel::redis_host: '"%{hiera(\"bootstrap_nodeid_ip\")}"' + redis::sentinel::notification_script: '/usr/local/bin/redis-notifications.sh' + tripleo::loadbalancer::redis_password: {get_param: RedisPassword} diff --git a/puppet/services/database/redis.yaml b/puppet/services/database/redis.yaml index 2669592a..ef005f77 100644 --- a/puppet/services/database/redis.yaml +++ b/puppet/services/database/redis.yaml @@ -18,8 +18,14 @@ outputs: role_data: description: Role data for the redis role. value: + service_name: redis config_settings: map_merge: - get_attr: [RedisBase, role_data, config_settings] + - tripleo.redis.firewall_rules: + '108 redis': + dport: + - 6379 + - 26379 step_config: | include ::tripleo::profile::base::database::redis diff --git a/puppet/services/glance-api.yaml b/puppet/services/glance-api.yaml index 89e6ee0f..b0eeadeb 100644 --- a/puppet/services/glance-api.yaml +++ b/puppet/services/glance-api.yaml @@ -9,6 +9,9 @@ parameters: description: Mapping of service endpoint -> protocol. Typically set via parameter_defaults in the resource registry. type: json + CephClientUserName: + default: openstack + type: string Debug: default: '' description: Set to True to enable debugging on all services. @@ -36,6 +39,9 @@ parameters: default: 0 description: Number of workers for Glance service. type: number + GlanceRbdPoolName: + default: images + type: string RabbitPassword: description: The password for RabbitMQ type: string @@ -59,6 +65,7 @@ outputs: role_data: description: Role data for the Glance API role. value: + service_name: glance_api config_settings: glance::api::database_connection: list_join: @@ -85,6 +92,9 @@ outputs: glance::backend::swift::swift_store_auth_address: {get_param: [EndpointMap, KeystoneInternal, uri] } glance::backend::swift::swift_store_user: service:glance glance::backend::swift::swift_store_key: {get_param: GlancePassword} + glance::backend::swift::swift_store_create_container_on_put: true + glance::backend::rbd::rbd_store_pool: {get_param: GlanceRbdPoolName} + glance::backend::rbd::rbd_store_user: {get_param: CephClientUserName} glance_backend: {get_param: GlanceBackend} glance::db::mysql::password: {get_param: GlancePassword} glance::notify::rabbitmq::rabbit_userid: {get_param: RabbitUserName} @@ -95,5 +105,15 @@ outputs: glance::keystone::auth::internal_url: {get_param: [EndpointMap, GlanceInternal, uri]} glance::keystone::auth::admin_url: {get_param: [EndpointMap, GlanceAdmin, uri]} glance::keystone::auth::password: {get_param: GlancePassword } + tripleo.glance_api.firewall_rules: + '112 glance_api': + dport: + - 9292 + - 13292 + glance::keystone::auth::tenant: 'service' + glance::api::keystone_tenant: 'service' + glance::api::pipeline: 'keystone' + glance::api::show_image_direct_url: true + step_config: | include ::tripleo::profile::base::glance::api diff --git a/puppet/services/glance-registry.yaml b/puppet/services/glance-registry.yaml index 6f2f0372..5ad4bb9a 100644 --- a/puppet/services/glance-registry.yaml +++ b/puppet/services/glance-registry.yaml @@ -26,6 +26,7 @@ outputs: role_data: description: Role data for the Glance Registry role. value: + service_name: glance_registry config_settings: glance::registry::database_connection: list_join: @@ -37,9 +38,22 @@ outputs: - {get_param: [EndpointMap, MysqlInternal, host]} - '/glance' glance::registry::keystone_password: {get_param: GlancePassword} + glance::registry::keystone_tenant: 'service' + glance::registry::pipeline: 'keystone' glance::registry::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri] } glance::registry::identity_uri: { get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix] } glance::registry::debug: {get_param: Debug} glance::registry::workers: {get_param: GlanceWorkers} + glance::db::mysql::user: glance + glance::db::mysql::host: {get_param: [EndpointMap, MysqlNoBracketsInternal, host]} + glance::db::mysql::dbname: glance + glance::db::mysql::allowed_hosts: + - '%' + - "%{hiera('mysql_bind_host')}" + + tripleo.glance_registry.firewall_rules: + '112 glance_registry': + dport: + - 9191 step_config: | include ::tripleo::profile::base::glance::registry diff --git a/puppet/services/gnocchi-api.yaml b/puppet/services/gnocchi-api.yaml new file mode 100644 index 00000000..d97626a6 --- /dev/null +++ b/puppet/services/gnocchi-api.yaml @@ -0,0 +1,34 @@ +heat_template_version: 2016-04-08 + +description: > + Gnocchi service configured with Puppet + +parameters: + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + +resources: + GnocchiServiceBase: + type: ./gnocchi-base.yaml + properties: + EndpointMap: {get_param: EndpointMap} + +outputs: + role_data: + description: Role data for the Gnocchi role. + value: + service_name: gnocchi_api + config_settings: + map_merge: + - get_attr: [GnocchiServiceBase, role_data, config_settings] + - tripleo.gnocchi_api.firewall_rules: + '129 gnocchi-api': + dport: + - 8041 + - 13041 + - gnocchi::api::keystone_tenant: 'service' + step_config: | + include ::tripleo::profile::base::gnocchi::api diff --git a/puppet/services/gnocchi-base.yaml b/puppet/services/gnocchi-base.yaml new file mode 100644 index 00000000..5c1e015e --- /dev/null +++ b/puppet/services/gnocchi-base.yaml @@ -0,0 +1,94 @@ +heat_template_version: 2016-04-08 + +description: > + Gnocchi service configured with Puppet + +parameters: + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + GnocchiBackend: + default: file + description: The short name of the Gnocchi backend to use. Should be one + of swift, rbd, or file + type: string + constraints: + - allowed_values: ['swift', 'file', 'rbd'] + GnocchiIndexerBackend: + default: 'mysql' + description: The short name of the Gnocchi indexer backend to use. + type: string + GnocchiPassword: + description: The password for the gnocchi service and db account. + type: string + hidden: true + GnocchiRbdPoolName: + default: metrics + type: string + CephClientUserName: + default: openstack + type: string + +outputs: + aux_parameters: + description: Additional parameters referenced outside the base file + value: + gnocchi_indexer_backend: {get_param: GnocchiIndexerBackend} + role_data: + description: Shared role data for the Heat services. + value: + service_name: gnocchi_base + config_settings: + #Gnocchi engine + gnocchi::debug: {get_input: debug} + gnocchi::db::database_connection: + list_join: + - '' + - - {get_param: [EndpointMap, MysqlInternal, protocol]} + - '://gnocchi:' + - {get_param: GnocchiPassword} + - '@' + - {get_param: [EndpointMap, MysqlInternal, host]} + - '/gnocchi' + gnocchi::keystone::auth::region: 'regionOne' + gnocchi::keystone::auth::tenant: 'service' + gnocchi::keystone::auth::password: {get_param: GnocchiPassword} + gnocchi::db::mysql::password: {get_param: GnocchiPassword} + gnocchi::db::sync::extra_opts: '--skip-storage --create-legacy-resource-types' + #Gnocchi API + tripleo::profile::base::gnocchi::api::gnocchi_backend: {get_param: GnocchiBackend} + gnocchi::api::manage_service: false + gnocchi::api::enabled: true + gnocchi::api::service_name: 'httpd' + gnocchi::api::keystone_tenant: 'service' + gnocchi::api::keystone_password: {get_param: GnocchiPassword} + gnocchi::wsgi::apache::ssl: false + gnocchi::storage::swift::swift_user: 'service:gnocchi' + gnocchi::storage::swift::swift_auth_version: 2 + gnocchi::storage::swift::swift_key: {get_param: GnocchiPassword} + gnocchi::storage::ceph::ceph_pool: {get_param: GnocchiRbdPoolName} + gnocchi::storage::ceph::ceph_username: {get_param: CephClientUserName} + gnocchi::storage::ceph::ceph_keyring: + list_join: + - '.' + - - '/etc/ceph/ceph' + - 'client' + - {get_param: CephClientUserName} + - 'keyring' + #Gnocchi statsd + gnocchi::statsd::manage_service: false + gnocchi::statsd::resource_id: '0a8b55df-f90f-491c-8cb9-7cdecec6fc26' + gnocchi::statsd::user_id: '27c0d3f8-e7ee-42f0-8317-72237d1c5ae3' + gnocchi::statsd::project_id: '6c38cd8d-099a-4cb2-aecf-17be688e8616' + gnocchi::statsd::flush_delay: 10 + gnocchi::statsd::archive_policy_name: 'low' + gnocchi::db::mysql::user: gnocchi + gnocchi::db::mysql::host: {get_param: [EndpointMap, MysqlNoBracketsInternal, host]} + gnocchi::db::mysql::dbname: gnocchi + gnocchi::db::mysql::allowed_hosts: + - '%' + - "%{hiera('mysql_bind_host')}" + gnocchi::auth::auth_region: 'regionOne' + gnocchi::auth::auth_tenant_name: 'service' diff --git a/puppet/services/gnocchi-metricd.yaml b/puppet/services/gnocchi-metricd.yaml new file mode 100644 index 00000000..8041c6f4 --- /dev/null +++ b/puppet/services/gnocchi-metricd.yaml @@ -0,0 +1,29 @@ +heat_template_version: 2016-04-08 + +description: > + Gnocchi service configured with Puppet + +parameters: + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + +resources: + GnocchiServiceBase: + type: ./gnocchi-base.yaml + properties: + EndpointMap: {get_param: EndpointMap} + +outputs: + role_data: + description: Role data for the Gnocchi role. + value: + service_name: gnocchi_metricd + config_settings: + map_merge: + - get_attr: [GnocchiServiceBase, role_data, config_settings] + - gnocchi::metricd::manage_service: false + step_config: | + include ::tripleo::profile::base::gnocchi::metricd diff --git a/puppet/services/gnocchi-statsd.yaml b/puppet/services/gnocchi-statsd.yaml new file mode 100644 index 00000000..9c8e6897 --- /dev/null +++ b/puppet/services/gnocchi-statsd.yaml @@ -0,0 +1,29 @@ +heat_template_version: 2016-04-08 + +description: > + Gnocchi service configured with Puppet + +parameters: + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + +resources: + GnocchiServiceBase: + type: ./gnocchi-base.yaml + properties: + EndpointMap: {get_param: EndpointMap} + +outputs: + role_data: + description: Role data for the Gnocchi role. + value: + service_name: gnocchi_statsd + config_settings: + map_merge: + - get_attr: [GnocchiServiceBase, role_data, config_settings] + - gnocchi::statsd::manage_service: false + step_config: | + include ::tripleo::profile::base::gnocchi::statsd diff --git a/puppet/services/haproxy.yaml b/puppet/services/haproxy.yaml index 844637bc..902a1c3f 100644 --- a/puppet/services/haproxy.yaml +++ b/puppet/services/haproxy.yaml @@ -14,5 +14,31 @@ outputs: role_data: description: Role data for the HAproxy role. value: + service_name: haproxy + config_settings: + tripleo.haproxy.firewall_rules: + '107 haproxy stats': + dport: 1993 + # TODO(emilien) make it composable to find which services are actually running + tripleo::haproxy::keystone_admin: true + tripleo::haproxy::keystone_public: true + tripleo::haproxy::neutron: true + tripleo::haproxy::cinder: true + tripleo::haproxy::glance_api: true + tripleo::haproxy::glance_registry: true + tripleo::haproxy::nova_osapi: true + tripleo::haproxy::nova_metadata: true + tripleo::haproxy::nova_novncproxy: true + tripleo::haproxy::mysql: true + tripleo::haproxy::redis: true + tripleo::haproxy::sahara: true + tripleo::haproxy::swift_proxy_server: true + tripleo::haproxy::ceilometer: true + tripleo::haproxy::aodh: true + tripleo::haproxy::gnocchi: true + tripleo::haproxy::heat_api: true + tripleo::haproxy::heat_cloudwatch: true + tripleo::haproxy::heat_cfn: true + tripleo::haproxy::horizon: true step_config: | include ::tripleo::profile::base::haproxy diff --git a/puppet/services/heat-api-cfn.yaml b/puppet/services/heat-api-cfn.yaml index c1f26c15..f8832dd8 100644 --- a/puppet/services/heat-api-cfn.yaml +++ b/puppet/services/heat-api-cfn.yaml @@ -30,6 +30,7 @@ outputs: role_data: description: Role data for the Heat CloudFormation API role. value: + service_name: heat_api_cfn config_settings: map_merge: - get_attr: [HeatBase, role_data, config_settings] @@ -39,5 +40,10 @@ outputs: heat::keystone::auth_cfn::admin_url: {get_param: [EndpointMap, HeatCfnAdmin, uri]} heat::keystone::auth_cfn::password: {get_param: HeatPassword} heat::keystone::auth::region: {get_param: KeystoneRegion} + tripleo.heat_api_cfn.firewall_rules: + '125 heat_cfn': + dport: + - 8000 + - 13800 step_config: | include ::tripleo::profile::base::heat::api_cfn diff --git a/puppet/services/heat-api-cloudwatch.yaml b/puppet/services/heat-api-cloudwatch.yaml index 2c56951b..b4669ac3 100644 --- a/puppet/services/heat-api-cloudwatch.yaml +++ b/puppet/services/heat-api-cloudwatch.yaml @@ -22,9 +22,15 @@ outputs: role_data: description: Role data for the Heat Cloudwatch API role. value: + service_name: heat_api_cloudwatch config_settings: map_merge: - get_attr: [HeatBase, role_data, config_settings] - heat::api_cloudwatch::workers: {get_param: HeatWorkers} + tripleo.heat_api_cloudwatch.firewall_rules: + '125 heat_cloudwatch': + dport: + - 8003 + - 13003 step_config: | include ::tripleo::profile::base::heat::api_cloudwatch diff --git a/puppet/services/heat-api.yaml b/puppet/services/heat-api.yaml index d3461e63..c0e7a690 100644 --- a/puppet/services/heat-api.yaml +++ b/puppet/services/heat-api.yaml @@ -30,6 +30,7 @@ outputs: role_data: description: Role data for the Heat API role. value: + service_name: heat_api config_settings: map_merge: - get_attr: [HeatBase, role_data, config_settings] @@ -39,5 +40,10 @@ outputs: heat::keystone::auth::admin_url: {get_param: [EndpointMap, HeatAdmin, uri]} heat::keystone::auth::password: {get_param: HeatPassword} heat::keystone::auth::region: {get_param: KeystoneRegion} + tripleo.heat_api.firewall_rules: + '125 heat_api': + dport: + - 8004 + - 13004 step_config: | include ::tripleo::profile::base::heat::api diff --git a/puppet/services/heat-base.yaml b/puppet/services/heat-base.yaml index 8617df27..c40136f5 100644 --- a/puppet/services/heat-base.yaml +++ b/puppet/services/heat-base.yaml @@ -31,6 +31,7 @@ outputs: role_data: description: Shared role data for the Heat services. value: + service_name: heat_base config_settings: heat::rabbit_userid: {get_param: RabbitUserName} heat::rabbit_password: {get_param: RabbitPassword} @@ -38,3 +39,20 @@ outputs: heat::rabbit_port: {get_param: RabbitClientPort} heat::debug: {get_param: Debug} heat::enable_proxy_headers_parsing: true + # We need this because the default heat policy.json no longer works on TripleO + # https://git.openstack.org/cgit/openstack/heat/commit/?id=ac86702172ddf01f5bdc3f3cd99d2e32ad9b7024 + heat::policy::policies: + context_is_admin: + key: 'context_is_admin' + value: 'role:admin' + heat::rabbit_heartbeat_timeout_threshold: 60 + heat::keystone_tenant: 'service' + heat::keystone::auth::tenant: 'service' + heat::keystone::domain::domain_name: 'heat_stack' + heat::keystone::domain::domain_admin: 'heat_stack_domain_admin' + heat::keystone::domain::domain_admin_email: 'heat_stack_domain_admin@localhost' + heat::auth_plugin: 'password' + heat::cron::purge_deleted::age: 30 + heat::cron::purge_deleted::age_type: 'days' + heat::cron::purge_deleted::maxdelay: 3600 + heat::cron::purge_deleted::destination: '/dev/null' diff --git a/puppet/services/heat-engine.yaml b/puppet/services/heat-engine.yaml index 4a5ec2c0..13555a62 100644 --- a/puppet/services/heat-engine.yaml +++ b/puppet/services/heat-engine.yaml @@ -35,10 +35,13 @@ outputs: role_data: description: Role data for the Heat Engine role. value: + service_name: heat_engine config_settings: map_merge: - get_attr: [HeatBase, role_data, config_settings] - heat::engine::num_engine_workers: {get_param: HeatWorkers} + heat::engine::configure_delegated_roles: false + heat::engine::trusts_delegated_roles: [] tripleo::profile::base::heat::manage_db_purge: {get_param: HeatEnableDBPurge} heat::database_connection: list_join: @@ -54,5 +57,11 @@ outputs: heat::keystone_password: {get_param: HeatPassword} heat::db::mysql::password: {get_param: HeatPassword} heat::keystone::domain::domain_password: {get_param: HeatStackDomainAdminPassword} + heat::db::mysql::user: heat + heat::db::mysql::host: {get_param: [EndpointMap, MysqlNoBracketsInternal, host]} + heat::db::mysql::dbname: heat + heat::db::mysql::allowed_hosts: + - '%' + - "%{hiera('mysql_bind_host')}" step_config: | include ::tripleo::profile::base::heat::engine diff --git a/puppet/services/horizon.yaml b/puppet/services/horizon.yaml new file mode 100644 index 00000000..64cf450a --- /dev/null +++ b/puppet/services/horizon.yaml @@ -0,0 +1,46 @@ +heat_template_version: 2016-04-08 + +description: > + Horizon service configured with Puppet + +parameters: + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + HorizonAllowedHosts: + default: '*' + description: A list of IP/Hostname allowed to connect to horizon + type: comma_delimited_list + NeutronMechanismDrivers: + default: 'openvswitch' + description: | + The mechanism drivers for the Neutron tenant network. + type: comma_delimited_list + +outputs: + role_data: + description: Role data for the Horizon role. + value: + service_name: horizon + config_settings: + horizon::allowed_hosts: {get_param: HorizonAllowedHosts} + neutron::plugins::ml2::mechanism_drivers: + str_replace: + template: MECHANISMS + params: + MECHANISMS: {get_param: NeutronMechanismDrivers} + tripleo.horizon.firewall_rules: + '126 horizon': + dport: + - 80 + - 443 + horizon::cache_backend: django.core.cache.backends.memcached.MemcachedCache + horizon::django_session_engine: 'django.contrib.sessions.backends.cache' + horizon::vhost_extra_params: + add_listen: false + priority: 10 + access_log_format: '%a %l %u %t \"%r\" %>s %b \"%%{}{Referer}i\" \"%%{}{User-Agent}i\"' + step_config: | + include ::tripleo::profile::base::horizon diff --git a/puppet/services/ironic-api.yaml b/puppet/services/ironic-api.yaml new file mode 100644 index 00000000..949cdf31 --- /dev/null +++ b/puppet/services/ironic-api.yaml @@ -0,0 +1,43 @@ +heat_template_version: 2016-04-08 + +description: > + OpenStack Ironic API configured with Puppet + +parameters: + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + IronicPassword: + description: The password for the Ironic service and db account, used by the Ironic services + type: string + hidden: true + +resources: + IronicBase: + type: ./ironic-base.yaml + properties: + EndpointMap: {get_param: EndpointMap} + +outputs: + role_data: + description: Role data for the Ironic API role. + value: + service_name: ironic_api + config_settings: + map_merge: + - get_attr: [IronicBase, role_data, config_settings] + # NOTE(dtantsur): the my_ip parameter is heavily overloaded in + # ironic. It's used as a default value for e.g. TFTP server IP, + # glance and neutron endpoints, virtual console IP. We override + # the TFTP server IP in ironic-conductor.yaml as it should not be + # the VIP, but rather a real IP of the controller. + - ironic::my_ip: {get_param: [EndpointMap, MysqlInternal, host]} + ironic::api::admin_password: {get_param: IronicPassword} + ironic::keystone::auth::public_url: {get_param: [EndpointMap, IronicPublic, uri]} + ironic::keystone::auth::internal_url: {get_param: [EndpointMap, IronicInternal, uri]} + ironic::keystone::auth::admin_url: {get_param: [EndpointMap, IronicAdmin, uri]} + ironic::keystone::auth::password: {get_param: IronicPassword } + step_config: | + include ::tripleo::profile::base::ironic::api diff --git a/puppet/services/ironic-base.yaml b/puppet/services/ironic-base.yaml new file mode 100644 index 00000000..41d2234a --- /dev/null +++ b/puppet/services/ironic-base.yaml @@ -0,0 +1,70 @@ +heat_template_version: 2016-04-08 + +description: > + OpenStack Ironic services configured with Puppet + +parameters: + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + Debug: + default: '' + description: Set to True to enable debugging on all services. + type: string + IronicPassword: + description: The password for the Ironic service and db account, used by the Ironic services + type: string + hidden: true + RabbitUserName: + default: guest + description: The username for RabbitMQ + type: string + RabbitPassword: + description: The password for RabbitMQ + type: string + hidden: true + RabbitClientPort: + default: 5672 + description: Set rabbit subscriber port, change this if using SSL + type: number + RabbitClientUseSSL: + default: false + description: > + Rabbit client subscriber parameter to specify + an SSL connection to the RabbitMQ host. + type: string + +outputs: + role_data: + description: Role data for the Ironic role. + value: + service_name: ironic_base + config_settings: + ironic::database_connection: + list_join: + - '' + - - {get_param: [EndpointMap, MysqlInternal, protocol]} + - '://ironic:' + - {get_param: IronicPassword} + - '@' + - {get_param: [EndpointMap, MysqlInternal, host]} + - '/ironic' + ironic::admin_tenant_name: 'service' + ironic::debug: {get_param: Debug} + ironic::rabbit_userid: {get_param: RabbitUserName} + ironic::rabbit_password: {get_param: RabbitPassword} + ironic::rabbit_port: {get_param: RabbitClientPort} + ironic::rabbit_use_ssl: {get_param: RabbitClientUseSSL} + ironic::db::mysql::password: {get_param: IronicPassword} + ironic::db::mysql::user: ironic + ironic::db::mysql::host: {get_param: [EndpointMap, MysqlNoBracketsInternal, host]} + ironic::db::mysql::dbname: ironic + ironic::db::mysql::allowed_hosts: + - '%' + - "%{hiera('mysql_bind_host')}" + + ironic::keystone::auth::tenant: 'service' + step_config: | + include ::tripleo::profile::base::ironic diff --git a/puppet/services/ironic-conductor.yaml b/puppet/services/ironic-conductor.yaml new file mode 100644 index 00000000..a3bce305 --- /dev/null +++ b/puppet/services/ironic-conductor.yaml @@ -0,0 +1,36 @@ +heat_template_version: 2016-04-08 + +description: > + OpenStack Ironic conductor configured with Puppet + +parameters: + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + IronicEnabledDrivers: + default: ['pxe_ipmitool', 'agent_ipmitool'] + description: Enabled Ironic drivers + type: comma_delimited_list + +resources: + IronicBase: + type: ./ironic-base.yaml + properties: + EndpointMap: {get_param: EndpointMap} + +outputs: + role_data: + description: Role data for the Ironic conductor role. + value: + service_name: ironic_conductor + config_settings: + map_merge: + - get_attr: [IronicBase, role_data, config_settings] + - ironic::enabled_drivers: {get_param: IronicEnabledDrivers} + # Prevent tftp_server from defaulting to my_ip setting, which is + # controller VIP, not a real IP. + ironic::drivers::pxe::tftp_server: {get_input: ironic_api_network} + step_config: | + include ::tripleo::profile::base::ironic::conductor diff --git a/puppet/services/keepalived.yaml b/puppet/services/keepalived.yaml index 09ce26b5..95a401c6 100644 --- a/puppet/services/keepalived.yaml +++ b/puppet/services/keepalived.yaml @@ -14,5 +14,6 @@ outputs: role_data: description: Role data for the Keepalived role. value: + service_name: keepalived step_config: | include ::tripleo::profile::base::keepalived diff --git a/puppet/services/kernel.yaml b/puppet/services/kernel.yaml new file mode 100644 index 00000000..50ebe925 --- /dev/null +++ b/puppet/services/kernel.yaml @@ -0,0 +1,40 @@ +heat_template_version: 2016-04-08 + +description: > + Load kernel modules with kmod and configure kernel options with sysctl. + +parameters: + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + +outputs: + role_data: + description: Role data for the Kernel modules + value: + service_name: kernel + config_settings: + kernel_modules: + nf_conntrack: {} + sysctl_settings: + net.ipv4.tcp_keepalive_intvl: + value: 1 + net.ipv4.tcp_keepalive_probes: + value: 5 + net.ipv4.tcp_keepalive_time: + value: 5 + net.nf_conntrack_max: + value: 500000 + net.netfilter.nf_conntrack_max: + value: 500000 + # prevent neutron bridges from autoconfiguring ipv6 addresses + net.ipv6.conf.default.accept_ra: + value: 0 + net.ipv6.conf.default.autoconf: + value: 0 + net.core.netdev_max_backlog: + value: 10000 + step_config: | + include ::tripleo::profile::base::kernel diff --git a/puppet/services/keystone.yaml b/puppet/services/keystone.yaml index 25d92d4a..48e74875 100644 --- a/puppet/services/keystone.yaml +++ b/puppet/services/keystone.yaml @@ -45,10 +45,6 @@ parameters: type: string default: 'regionOne' description: Keystone region for endpoint - KeystoneWorkers: - default: 0 - description: Number of workers for Keystone service. - type: number EndpointMap: default: {} description: Mapping of service endpoint -> protocol. Typically set @@ -88,11 +84,15 @@ parameters: default: 5672 description: Set rabbit subscriber port, change this if using SSL type: number - + KeystoneWorkers: + type: string + description: Set the number of workers for keystone::wsgi::apache + default: '"%{::processorcount}"' outputs: role_data: description: Role data for the Keystone role. value: + service_name: keystone config_settings: keystone::database_connection: list_join: @@ -124,9 +124,36 @@ outputs: keystone::endpoint::internal_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]} keystone::endpoint::admin_url: {get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix]} keystone::endpoint::region: {get_param: KeystoneRegion} - keystone::admin_workers: {get_param: KeystoneWorkers} - keystone::public_workers: {get_param: KeystoneWorkers} keystone_enable_db_purge: {get_param: KeystoneEnableDBPurge} keystone::public_endpoint: {get_param: [EndpointMap, KeystonePublic, uri_no_suffix]} + keystone::db::mysql::user: keystone + keystone::db::mysql::host: {get_param: [EndpointMap, MysqlNoBracketsInternal, host]} + keystone::db::mysql::dbname: keystone + keystone::db::mysql::allowed_hosts: + - '%' + - "%{hiera('mysql_bind_host')}" + keystone::rabbit_heartbeat_timeout_threshold: 60 + keystone::cron::token_flush::maxdelay: 3600 + keystone::roles::admin::service_tenant: 'service' + keystone::roles::admin::admin_tenant: 'admin' + keystone::cron::token_flush::destination: '/dev/null' + keystone::config::keystone_config: + DEFAULT/secure_proxy_ssl_header: + value: 'HTTP_X_FORWARDED_PROTO' + ec2/driver: + value: 'keystone.contrib.ec2.backends.sql.Ec2' + keystone::service_name: 'httpd' + keystone::wsgi::apache::ssl: false + + keystone::wsgi::apache::workers: {get_param: KeystoneWorkers} + # override via extraconfig: + keystone::wsgi::apache::threads: 1 + tripleo.keystone.firewall_rules: + '111 keystone': + dport: + - 5000 + - 13000 + - 35357 + - 13357 step_config: | include ::tripleo::profile::base::keystone diff --git a/puppet/services/memcached.yaml b/puppet/services/memcached.yaml index fcd0adca..ceb29b55 100644 --- a/puppet/services/memcached.yaml +++ b/puppet/services/memcached.yaml @@ -14,6 +14,10 @@ outputs: role_data: description: Role data for the Memcached role. value: + service_name: memcached config_settings: + tripleo.memcached.firewall_rules: + '121 memcached': + dport: 11211 step_config: | include ::tripleo::profile::base::memcached diff --git a/puppet/services/neutron-base.yaml b/puppet/services/neutron-base.yaml index 8bd8d989..c1134824 100644 --- a/puppet/services/neutron-base.yaml +++ b/puppet/services/neutron-base.yaml @@ -47,6 +47,7 @@ outputs: role_data: description: Role data for the Neutron base service. value: + service_name: neutron_base config_settings: neutron::rabbit_password: {get_param: RabbitPassword} neutron::rabbit_user: {get_param: RabbitUserName} @@ -60,3 +61,7 @@ outputs: params: PLUGINS: {get_param: NeutronServicePlugins} neutron::debug: {get_param: Debug} + neutron::allow_overlapping_ips: true + neutron::rabbit_heartbeat_timeout_threshold: 60 + neutron::host: '"%{::fqdn}"' #NOTE: extra quoting is needed + neutron::keystone::auth::tenant: 'service' diff --git a/puppet/services/neutron-compute-plugin-midonet.yaml b/puppet/services/neutron-compute-plugin-midonet.yaml new file mode 100644 index 00000000..200aaa1b --- /dev/null +++ b/puppet/services/neutron-compute-plugin-midonet.yaml @@ -0,0 +1,20 @@ +heat_template_version: 2016-04-08 + +description: > + OpenStack Neutron Compute Midonet plugin + +parameters: + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + +outputs: + role_data: + description: Role data for the Neutron Compute Plumgrid plugin + value: + service_name: neutron_compute_plugin_midonet + config_settings: + step_config: | + include ::tripleo::profile::base::neutron::agents::midonet diff --git a/puppet/services/neutron-compute-plugin-nuage.yaml b/puppet/services/neutron-compute-plugin-nuage.yaml new file mode 100644 index 00000000..44aac571 --- /dev/null +++ b/puppet/services/neutron-compute-plugin-nuage.yaml @@ -0,0 +1,27 @@ +heat_template_version: 2016-04-08 + +description: > + OpenStack Neutron Compute Nuage plugin + +parameters: + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + NovaPassword: + description: The password for the nova service account, used by nova-api. + type: string + hidden: true + +outputs: + role_data: + description: Role data for the Neutron Compute Nuage plugin + value: + service_name: neutron_compute_plugin_nuage + config_settings: + tripleo::profile::base::neutron::agents::nuage::nova_os_tenant_name: 'service' + tripleo::profile::base::neutron::agents::nuage::nova_os_password: {get_param: NovaPassword} + tripleo::profile::base::neutron::agents::nuage::nova_auth_ip: {get_param: [EndpointMap, KeystoneInternal, host]} + step_config: | + include ::tripleo::profile::base::neutron::agents::nuage diff --git a/puppet/services/neutron-compute-plugin-opencontrail.yaml b/puppet/services/neutron-compute-plugin-opencontrail.yaml new file mode 100644 index 00000000..fb6d640c --- /dev/null +++ b/puppet/services/neutron-compute-plugin-opencontrail.yaml @@ -0,0 +1,20 @@ +heat_template_version: 2016-04-08 + +description: > + OpenStack Neutron Compute OpenContrail plugin + +parameters: + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + +outputs: + role_data: + description: Role data for the Neutron Compute OpenContrail plugin + value: + service_name: neutron_compute_plugin_opencontrail + config_settings: + step_config: | + include ::tripleo::profile::base::neutron::opencontrail::vrouter diff --git a/puppet/services/neutron-compute-plugin-plumgrid.yaml b/puppet/services/neutron-compute-plugin-plumgrid.yaml new file mode 100644 index 00000000..ad1e3465 --- /dev/null +++ b/puppet/services/neutron-compute-plugin-plumgrid.yaml @@ -0,0 +1,20 @@ +heat_template_version: 2016-04-08 + +description: > + OpenStack Neutron Compute Plumgrid plugin + +parameters: + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + +outputs: + role_data: + description: Role data for the Neutron Compute Plumgrid plugin + value: + service_name: neutron_compute_plugin_plumgrid + config_settings: + step_config: | + include tripleo::profile::base::neutron::plumgrid diff --git a/puppet/services/neutron-dhcp.yaml b/puppet/services/neutron-dhcp.yaml index 80ccf1c2..322e018d 100644 --- a/puppet/services/neutron-dhcp.yaml +++ b/puppet/services/neutron-dhcp.yaml @@ -13,22 +13,6 @@ parameters: default: 'False' description: If True, DHCP provide metadata route to VM. type: string - NeutronDnsmasqOptions: - default: 'dhcp-option-force=26,%MTU%' - description: > - Dnsmasq options for neutron-dhcp-agent. The default value here forces MTU - to be set to the value of NeutronTenantMtu, which should be set to account - for tunnel overhead. - type: string - NeutronTenantMtu: - description: > - The default MTU for tenant networks. For VXLAN/GRE tunneling, this should - be at least 50 bytes smaller than the MTU on the physical network. This - value will be used to set the MTU on the virtual Ethernet device. - This value will be used to construct the NeutronDnsmasqOptions, since that - will determine the MTU that is assigned to the VM host through DHCP. - default: "1400" - type: string resources: @@ -39,15 +23,18 @@ outputs: role_data: description: Role data for the Neutron DHCP agent service. value: + service_name: neutron_dhcp config_settings: map_merge: - get_attr: [NeutronBase, role_data, config_settings] - - neutron::agents::dhcp::dnsmasq_config_file: /etc/neutron/dnsmasq-neutron.conf - tripleo::profile::base::neutron::dhcp: - str_replace: - template: {get_param: NeutronDnsmasqOptions} - params: - '%MTU%': {get_param: NeutronTenantMtu} - neutron::agents::dhcp::enable_isolated_metadata: {get_param: NeutronEnableIsolatedMetadata} + - neutron::agents::dhcp::enable_isolated_metadata: {get_param: NeutronEnableIsolatedMetadata} + tripleo.neutron_dhcp.firewall_rules: + '115 neutron dhcp input': + proto: 'udp' + dport: 67 + '116 neutron dhcp output': + proto: 'udp' + chain: 'OUTPUT' + dport: 68 step_config: | include tripleo::profile::base::neutron::dhcp diff --git a/puppet/services/neutron-l3.yaml b/puppet/services/neutron-l3.yaml index 20c82dc1..a7232a39 100644 --- a/puppet/services/neutron-l3.yaml +++ b/puppet/services/neutron-l3.yaml @@ -26,9 +26,11 @@ outputs: role_data: description: Role data for the Neutron L3 agent service. value: + service_name: neutron_l3 config_settings: map_merge: - get_attr: [NeutronBase, role_data, config_settings] - neutron::agents::l3::external_network_bridge: {get_param: NeutronExternalNetworkBridge} + neutron::agents::l3::router_delete_namespaces: True step_config: | include tripleo::profile::base::neutron::l3 diff --git a/puppet/services/neutron-metadata.yaml b/puppet/services/neutron-metadata.yaml index e221b3a1..73d8c3da 100644 --- a/puppet/services/neutron-metadata.yaml +++ b/puppet/services/neutron-metadata.yaml @@ -31,6 +31,7 @@ outputs: role_data: description: Role data for the Neutron Metadata agent service. value: + service_name: neutron_metadata config_settings: map_merge: - get_attr: [NeutronBase, role_data, config_settings] @@ -38,5 +39,6 @@ outputs: neutron::agents::metadata::metadata_workers: {get_param: NeutronWorkers} neutron::agents::metadata::auth_password: {get_param: NeutronPassword} neutron::agents::metadata::auth_url: { get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix] } + neutron::agents::metadata::auth_tenant: 'service' step_config: | include tripleo::profile::base::neutron::metadata diff --git a/puppet/services/neutron-midonet.yaml b/puppet/services/neutron-midonet.yaml new file mode 100644 index 00000000..ccc0b0dd --- /dev/null +++ b/puppet/services/neutron-midonet.yaml @@ -0,0 +1,49 @@ +heat_template_version: 2016-04-08 + +description: > + OpenStack Neutron Midonet plugin and services + +parameters: + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + NeutronPassword: + description: The password for the neutron service and db account, used by neutron agents. + type: string + hidden: true + AdminPassword: + description: The password for the keystone admin account, used for monitoring, querying neutron etc. + type: string + hidden: true + AdminToken: + description: The keystone auth secret and db password. + type: string + hidden: true + EnableZookeeperOnController: + label: Enable Zookeeper On Controller + description: 'Whether enable Zookeeper cluster on Controller' + type: boolean + default: false + EnableCassandraOnController: + label: Enable Cassandra On Controller + description: 'Whether enable Cassandra cluster on Controller' + type: boolean + default: false + +outputs: + role_data: + description: Role data for the Neutron Midonet plugin and services + value: + service_name: neutron_midonet + config_settings: + tripleo::profile::base::neutron::midonet::admin_password: {get_param: AdminPassword} + tripleo::profile::base::neutron::midonet::keystone_admin_token: {get_param: AdminToken} + tripleo::profile::base::neutron::midonet::neutron_auth_password: {get_param: NeutronPassword} + tripleo::profile::base::neutron::midonet::zk_on_controller: {get_param: EnableZookeeperOnController} + tripleo::profile::base::neutron::midonet::neutron_auth_tenant: 'service' + enable_cassandra_on_controller: {get_param: EnableCassandraOnController} + neutron::service_plugins: [] + step_config: | + include tripleo::profile::base::neutron::plugins::midonet diff --git a/puppet/services/neutron-ovs-agent.yaml b/puppet/services/neutron-ovs-agent.yaml index 0e1dbb29..d8679f2e 100644 --- a/puppet/services/neutron-ovs-agent.yaml +++ b/puppet/services/neutron-ovs-agent.yaml @@ -47,6 +47,7 @@ outputs: role_data: description: Role data for the Neutron OVS agent service. value: + service_name: neutron_ovs_agent config_settings: map_merge: - get_attr: [NeutronBase, role_data, config_settings] diff --git a/puppet/services/neutron-plugin-ml2.yaml b/puppet/services/neutron-plugin-ml2.yaml index 435a6de0..158122ed 100644 --- a/puppet/services/neutron-plugin-ml2.yaml +++ b/puppet/services/neutron-plugin-ml2.yaml @@ -61,6 +61,7 @@ outputs: role_data: description: Role data for the Neutron ML2 plugin. value: + service_name: neutron_plugin_ml2 config_settings: map_merge: - get_attr: [NeutronBase, role_data, config_settings] @@ -106,4 +107,4 @@ outputs: TYPES: {get_param: NeutronNetworkType} step_config: | - include ::tripleo::profile::base::neutron::ml2 + include ::tripleo::profile::base::neutron::plugins::ml2 diff --git a/puppet/services/neutron-plugin-nuage.yaml b/puppet/services/neutron-plugin-nuage.yaml new file mode 100644 index 00000000..5f228f96 --- /dev/null +++ b/puppet/services/neutron-plugin-nuage.yaml @@ -0,0 +1,76 @@ +heat_template_version: 2016-04-08 + +description: > + OpenStack Neutron Nuage plugin + +parameters: + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + # Config specific parameters, to be provided via parameter_defaults + NeutronNuageOSControllerIp: + description: IP address of the OpenStack Controller + type: string + + NeutronNuageNetPartitionName: + description: Specifies the title that you will see on the VSD + type: string + default: 'default_name' + + NeutronNuageVSDIp: + description: IP address and port of the Virtual Services Directory + type: string + + NeutronNuageVSDUsername: + description: Username to be used to log into VSD + type: string + + NeutronNuageVSDPassword: + description: Password to be used to log into VSD + type: string + + NeutronNuageVSDOrganization: + description: Organization parameter required to log into VSD + type: string + default: 'organization' + + NeutronNuageBaseURIVersion: + description: URI version to be used based on the VSD release + type: string + default: 'default_uri_version' + + NeutronNuageCMSId: + description: Cloud Management System ID (CMS ID) to distinguish between OS instances on the same VSD + type: string + + UseForwardedFor: + description: Treat X-Forwarded-For as the canonical remote address. Only enable this if you have a sanitizing proxy. + type: boolean + default: false + +resources: + + NeutronBase: + type: ./neutron-base.yaml + +outputs: + role_data: + description: Role data for the Neutron Nuage plugin + value: + service_name: neutron_plugin_nuage + config_settings: + map_merge: + - get_attr: [NeutronBase, role_data, config_settings] + - neutron::plugins::nuage::nuage_oscontroller_ip: {get_param: NeutronNuageOSControllerIp} + neutron::plugins::nuage::nuage_net_partition_name: {get_param: NeutronNuageNetPartitionName} + neutron::plugins::nuage::nuage_vsd_ip: {get_param: NeutronNuageVSDIp} + neutron::plugins::nuage::nuage_vsd_username: {get_param: NeutronNuageVSDUsername} + neutron::plugins::nuage::nuage_vsd_password: {get_param: NeutronNuageVSDPassword} + neutron::plugins::nuage::nuage_vsd_organization: {get_param: NeutronNuageVSDOrganization} + neutron::plugins::nuage::nuage_base_uri_version: {get_param: NeutronNuageBaseURIVersion} + neutron::plugins::nuage::nuage_cms_id: {get_param: NeutronNuageCMSId} + nova::api::use_forwarded_for: {get_param: UseForwardedFor} + step_config: | + include tripleo::profile::base::neutron::plugins::nuage diff --git a/puppet/services/neutron-plugin-opencontrail.yaml b/puppet/services/neutron-plugin-opencontrail.yaml new file mode 100644 index 00000000..ed6a2c04 --- /dev/null +++ b/puppet/services/neutron-plugin-opencontrail.yaml @@ -0,0 +1,61 @@ +heat_template_version: 2016-04-08 + +description: > + OpenStack Neutron Opencontrail plugin + +parameters: + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + AdminPassword: + description: The password for the keystone admin account, used for monitoring, querying neutron etc. + type: string + hidden: true + AdminToken: + description: The keystone auth secret and db password. + type: string + hidden: true + ContrailApiServerIp: + description: IP address of the OpenContrail API server + type: string + ContrailApiServerPort: + description: Port of the OpenContrail API + type: string + default: 8082 + ContrailMultiTenancy: + description: Whether to enable multi tenancy + type: boolean + default: false + ContrailExtensions: + description: List of OpenContrail extensions to be enabled + type: comma_delimited_list + default: '' + +resources: + + NeutronBase: + type: ./neutron-base.yaml + +outputs: + role_data: + description: Role data for the Neutron Opencontrail plugin + value: + service_name: neutron_plugin_opencontrail + config_settings: + map_merge: + - get_attr: [NeutronBase, role_data, config_settings] + - neutron::api_extensions_path: /usr/lib/python2.7/site-packages/neutron_plugin_contrail/extensions + + neutron::plugins::opencontrail::api_server_ip: {get_param: ContrailApiServerIp} + neutron::plugins::opencontrail::api_server_port: {get_param: ContrailApiServerPort} + neutron::plugins::opencontrail::multi_tenancy: {get_param: ContrailMultiTenancy} + neutron::plugins::opencontrail::contrail_extensions: {get_param: ContrailExtensions} + neutron::plugins::opencontrail::keystone_auth_url: {get_param: [EndpointMap, KeystoneInternal, uri] } + neutron::plugins::opencontrail::keystone_admin_user: admin + neutron::plugins::opencontrail::keystone_admin_tenant_name: admin + neutron::plugins::opencontrail::keystone_admin_password: {get_param: AdminPassword} + neutron::plugins::opencontrail::keystone_admin_token: {get_param: AdminToken} + step_config: | + include tripleo::profile::base::neutron::plugins::opencontrail diff --git a/puppet/services/neutron-plugin-plumgrid.yaml b/puppet/services/neutron-plugin-plumgrid.yaml index 68766674..5488bed7 100644 --- a/puppet/services/neutron-plugin-plumgrid.yaml +++ b/puppet/services/neutron-plugin-plumgrid.yaml @@ -80,6 +80,7 @@ outputs: role_data: description: Role data for the Neutron Plumgrid plugin value: + service_name: neutron_plugin_plumgrid config_settings: neutron::plugins::plumgrid::connection: list_join: diff --git a/puppet/services/neutron-server.yaml b/puppet/services/neutron-server.yaml index b84958cd..1c7cef49 100644 --- a/puppet/services/neutron-server.yaml +++ b/puppet/services/neutron-server.yaml @@ -22,9 +22,9 @@ parameters: description: Allow automatic l3-agent failover type: string NeutronL3HA: - default: 'False' - description: Whether to enable l3-agent HA - type: string + default: false + description: Whether to enable HA for virtual routers + type: boolean NovaPassword: description: The password for the nova service and db account, used by nova-api. type: string @@ -39,10 +39,11 @@ outputs: role_data: description: Role data for the Neutron Server agent service. value: + service_name: neutron_server config_settings: map_merge: - get_attr: [NeutronBase, role_data, config_settings] - neutron_dsn: &neutron_dsn + neutron::server::database_connection: list_join: - '' - - {get_param: [EndpointMap, MysqlInternal, protocol]} @@ -53,8 +54,8 @@ outputs: - '/ovs_neutron' neutron::server::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri] } neutron::server::auth_url: {get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix]} + neutron::server::auth_tenant: 'service' neutron::server::identity_uri: { get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix] } - neutron::server::database_connection: *neutron_dsn neutron::server::api_workers: {get_param: NeutronWorkers} neutron::server::allow_automatic_l3agent_failover: {get_param: NeutronAllowL3AgentFailover} neutron::server::l3_ha: {get_param: NeutronL3HA} @@ -65,6 +66,24 @@ outputs: neutron::server::notifications::tenant_name: 'service' neutron::server::notifications::project_name: 'service' neutron::server::notifications::password: {get_param: NovaPassword} + neutron::server::project_name: 'service' + neutron::server::sync_db: true neutron::db::mysql::password: {get_param: NeutronPassword} + neutron::db::mysql::user: neutron + neutron::db::mysql::host: {get_param: [EndpointMap, MysqlNoBracketsInternal, host]} + neutron::db::mysql::dbname: ovs_neutron + neutron::db::mysql::allowed_hosts: + - '%' + - "%{hiera('mysql_bind_host')}" + tripleo.neutron_server.firewall_rules: + '114 neutron server': + dport: + - 9696 + - 13696 + '118 neutron vxlan networks': + proto: 'udp' + dport: 4789 + '106 vrrp': + proto: vrrp step_config: | include tripleo::profile::base::neutron::server diff --git a/puppet/services/nova-api.yaml b/puppet/services/nova-api.yaml index f31df371..045a8614 100644 --- a/puppet/services/nova-api.yaml +++ b/puppet/services/nova-api.yaml @@ -17,15 +17,35 @@ parameters: resources: NovaBase: type: ./nova-base.yaml + properties: + EndpointMap: {get_param: EndpointMap} outputs: role_data: description: Role data for the Nova API service. value: + service_name: nova_api config_settings: map_merge: - get_attr: [NovaBase, role_data, config_settings] - nova::api::osapi_compute_workers: {get_param: NovaWorkers} - - nova::api::metadata_workers: {get_param: NovaWorkers} + nova::api::metadata_workers: {get_param: NovaWorkers} + nova::cron::archive_deleted_rows::hour: '"*/12"' + nova::cron::archive_deleted_rows::destination: '"/dev/null"' + tripleo.nova_api.firewall_rules: + '113 nova_api': + dport: + - 6080 + - 13080 + - 8773 + - 3773 + - 8774 + - 13774 + - 8775 + nova::api::admin_tenant_name: 'service' + nova::api::enabled: true + nova::api::default_floating_pool: 'public' + nova::api::sync_db_api: true + nova::api::enable_proxy_headers_parsing: true step_config: | include tripleo::profile::base::nova::api diff --git a/puppet/services/nova-base.yaml b/puppet/services/nova-base.yaml index 7de14f68..21dbacd2 100644 --- a/puppet/services/nova-base.yaml +++ b/puppet/services/nova-base.yaml @@ -4,6 +4,15 @@ description: > OpenStack Nova base service. Shared for all Nova services. parameters: + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + NovaPassword: + description: The password for the nova service and db account, used by nova-api. + type: string + hidden: true RabbitPassword: description: The password for RabbitMQ type: string @@ -29,11 +38,66 @@ parameters: outputs: role_data: - description: Role data for the Neutron base service. + description: Role data for the Nova base service. value: + service_name: nova_base config_settings: nova::rabbit_password: {get_param: RabbitPassword} nova::rabbit_user: {get_param: RabbitUserName} nova::rabbit_use_ssl: {get_param: RabbitClientUseSSL} nova::rabbit_port: {get_param: RabbitClientPort} + nova::database_connection: + list_join: + - '' + - - {get_param: [EndpointMap, MysqlInternal, protocol]} + - '://nova:' + - {get_param: NovaPassword} + - '@' + - {get_param: [EndpointMap, MysqlInternal, host]} + - '/nova' + nova::api_database_connection: + list_join: + - '' + - - {get_param: [EndpointMap, MysqlInternal, protocol]} + - '://nova_api:' + - {get_param: NovaPassword} + - '@' + - {get_param: [EndpointMap, MysqlInternal, host]} + - '/nova_api' + nova::db::mysql::password: {get_input: nova_password} + nova::db::mysql::user: nova + nova::db::mysql::host: {get_param: [EndpointMap, MysqlNoBracketsInternal, host]} + nova::db::mysql::dbname: nova + nova::db::mysql::allowed_hosts: + - '%' + - "%{hiera('mysql_bind_host')}" + nova::db::mysql_api::password: {get_input: nova_password} + nova::db::mysql_api::user: nova_api + nova::db::mysql_api::host: {get_param: [EndpointMap, MysqlNoBracketsInternal, host]} + nova::db::mysql_api::dbname: nova_api + nova::db::mysql_api::allowed_hosts: + - '%' + - "%{hiera('mysql_bind_host')}" nova::debug: {get_param: Debug} + nova::network::neutron::neutron_project_name: 'service' + nova::network::neutron::neutron_username: 'neutron' + nova::network::neutron::dhcp_domain: '' + nova::rabbit_heartbeat_timeout_threshold: 60 + nova::cinder_catalog_info: 'volumev2:cinderv2:internalURL' + nova::host: '"%{::fqdn}"' # NOTE: extra quoting is needed. + nova::notify_on_state_change: 'vm_and_task_state' + nova::notification_driver: messagingv2 + nova::network::neutron::neutron_auth_type: 'v3password' + nova::keystone::auth::tenant: 'service' + nova::db::mysql::user: nova + nova::db::mysql::host: {get_param: [EndpointMap, MysqlNoBracketsInternal, host]} + nova::db::mysql::dbname: nova + nova::db::mysql::allowed_hosts: + - '%' + - "%{hiera('mysql_bind_host')}" + nova::db::mysql_api::user: nova_api + nova::db::mysql_api::host: {get_param: [EndpointMap, MysqlNoBracketsInternal, host]} + nova::db::mysql_api::dbname: nova_api + nova::db::mysql_api::allowed_hosts: + - '%' + - "%{hiera('mysql_bind_host')}" diff --git a/puppet/services/nova-compute.yaml b/puppet/services/nova-compute.yaml index 0844aa85..bcc3a232 100644 --- a/puppet/services/nova-compute.yaml +++ b/puppet/services/nova-compute.yaml @@ -9,17 +9,49 @@ parameters: description: Mapping of service endpoint -> protocol. Typically set via parameter_defaults in the resource registry. type: json + NovaRbdPoolName: + default: vms + type: string + CephClientUserName: + default: openstack + type: string resources: NovaBase: type: ./nova-base.yaml + properties: + EndpointMap: {get_param: EndpointMap} outputs: role_data: - description: Role data for the Nova Conductor service. + description: Role data for the Nova Compute service. value: + service_name: nova_compute config_settings: map_merge: - get_attr: [NovaBase, role_data, config_settings] + - nova::compute::libvirt::manage_libvirt_services: false + # we manage migration in nova common puppet profile + nova::compute::libvirt::migration_support: false + tripleo::profile::base::nova::manage_migration: true + tripleo::profile::base::nova::nova_compute_enabled: true + nova::compute::rbd::libvirt_images_rbd_pool: {get_param: NovaRbdPoolName} + nova::compute::rbd::libvirt_rbd_user: {get_param: CephClientUserName} + nova::compute::rbd::rbd_keyring: + list_join: + - '.' + - - 'client' + - {get_param: CephClientUserName} + nova::compute::rbd::libvirt_rbd_secret_uuid: '"%{hiera(\"ceph::profile::params::fsid\")}"' + nova::compute::instance_usage_audit: true + nova::compute::instance_usage_audit_period: 'hour' + # Changing the default from 512MB. The current templates can not deploy + # overclouds with swap. On an idle compute node, we see ~1024MB of RAM + # used. 2048 is suggested to account for other possible operations for + # example openvswitch. + nova::compute::reserved_host_memory: 2048 step_config: | - include tripleo::profile::base::nova::compute + # TODO(emilien): figure how to deal with libvirt profile. + # We'll probably threat it like we do with Neutron plugins. + # Until then, just include it in the default nova-compute role. + include tripleo::profile::base::nova::compute::libvirt diff --git a/puppet/services/nova-conductor.yaml b/puppet/services/nova-conductor.yaml index 412dd275..5964f883 100644 --- a/puppet/services/nova-conductor.yaml +++ b/puppet/services/nova-conductor.yaml @@ -17,11 +17,14 @@ parameters: resources: NovaBase: type: ./nova-base.yaml + properties: + EndpointMap: {get_param: EndpointMap} outputs: role_data: description: Role data for the Nova Conductor service. value: + service_name: nova_conductor config_settings: map_merge: - get_attr: [NovaBase, role_data, config_settings] diff --git a/puppet/services/nova-consoleauth.yaml b/puppet/services/nova-consoleauth.yaml index 791c5449..5fbce1b1 100644 --- a/puppet/services/nova-consoleauth.yaml +++ b/puppet/services/nova-consoleauth.yaml @@ -13,11 +13,14 @@ parameters: resources: NovaBase: type: ./nova-base.yaml + properties: + EndpointMap: {get_param: EndpointMap} outputs: role_data: description: Role data for the Nova Consoleauth service. value: + service_name: nova_consoleauth config_settings: get_attr: [NovaBase, role_data, config_settings] step_config: | diff --git a/puppet/services/nova-libvirt.yaml b/puppet/services/nova-libvirt.yaml new file mode 100644 index 00000000..939b6a09 --- /dev/null +++ b/puppet/services/nova-libvirt.yaml @@ -0,0 +1,34 @@ +heat_template_version: 2016-04-08 + +description: > + Libvirt service configured with Puppet + +parameters: + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + +resources: + NovaBase: + type: ./nova-base.yaml + properties: + EndpointMap: {get_param: EndpointMap} + +outputs: + role_data: + description: Role data for the Libvirt service. + value: + service_name: nova_libvirt + config_settings: + map_merge: + - get_attr: [NovaBase, role_data, config_settings] + # we include ::nova::compute::libvirt::services in nova/libvirt profile + - nova::compute::libvirt::manage_libvirt_services: false + # we manage migration in nova common puppet profile + nova::compute::libvirt::migration_support: false + tripleo::profile::base::nova::manage_migration: true + tripleo::profile::base::nova::libvirt_enabled: true + step_config: | + include tripleo::profile::base::nova::libvirt diff --git a/puppet/services/nova-scheduler.yaml b/puppet/services/nova-scheduler.yaml index 65ed6643..bd6e5116 100644 --- a/puppet/services/nova-scheduler.yaml +++ b/puppet/services/nova-scheduler.yaml @@ -13,11 +13,14 @@ parameters: resources: NovaBase: type: ./nova-base.yaml + properties: + EndpointMap: {get_param: EndpointMap} outputs: role_data: description: Role data for the Nova Scheduler service. value: + service_name: nova_scheduler config_settings: map_merge: - get_attr: [NovaBase, role_data, config_settings] diff --git a/puppet/services/nova-vncproxy.yaml b/puppet/services/nova-vncproxy.yaml index 93a25ab2..0b9cef38 100644 --- a/puppet/services/nova-vncproxy.yaml +++ b/puppet/services/nova-vncproxy.yaml @@ -13,12 +13,17 @@ parameters: resources: NovaBase: type: ./nova-base.yaml + properties: + EndpointMap: {get_param: EndpointMap} outputs: role_data: description: Role data for the Nova Vncproxy service. value: + service_name: nova_vncproxy config_settings: - get_attr: [NovaBase, role_data, config_settings] + map_merge: + - get_attr: [NovaBase, role_data, config_settings] + - nova::vncproxy::enabled: true step_config: | include tripleo::profile::base::nova::vncproxy diff --git a/puppet/services/pacemaker.yaml b/puppet/services/pacemaker.yaml new file mode 100644 index 00000000..b0ebb7d4 --- /dev/null +++ b/puppet/services/pacemaker.yaml @@ -0,0 +1,35 @@ +heat_template_version: 2016-04-08 + +description: > + Pacemaker service configured with Puppet + +parameters: + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + +outputs: + role_data: + description: Role data for the Pacemaker role. + value: + service_name: pacemaker + config_settings: + pacemaker::corosync::cluster_name: 'tripleo_cluster' + pacemaker::corosync::manage_fw: false + pacemaker::resource_defaults::defaults: + resource-stickiness: { value: INFINITY } + corosync_token_timeout: 10000 + tripleo.pacemaker.firewall_rules: + '130 pacemaker tcp': + proto: 'tcp' + dport: + - 2224 + - 3121 + - 21064 + '131 pacemaker udp': + proto: 'udp' + dport: 5405 + step_config: | + include ::tripleo::profile::base::pacemaker diff --git a/puppet/services/pacemaker/ceilometer-agent-central.yaml b/puppet/services/pacemaker/ceilometer-agent-central.yaml new file mode 100644 index 00000000..471b9f7a --- /dev/null +++ b/puppet/services/pacemaker/ceilometer-agent-central.yaml @@ -0,0 +1,30 @@ +heat_template_version: 2016-04-08 + +description: > + OpenStack Ceilometer Central Agent service with Pacemaker configured with Puppet + +parameters: + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + +resources: + CeilometerServiceBase: + type: ../ceilometer-base.yaml + properties: + EndpointMap: {get_param: EndpointMap} + +outputs: + role_data: + description: Role data for the Ceilometer Central Agent pacemaker role. + value: + service_name: ceilometer_agent_central + config_settings: + map_merge: + - get_attr: [CeilometerServiceBase, role_data, config_settings] + - ceilometer::agent::central::manage_service: false + ceilometer::agent::central::enabled: false + step_config: | + include ::tripleo::profile::pacemaker::ceilometer::agent::central diff --git a/puppet/services/pacemaker/ceilometer-agent-notification.yaml b/puppet/services/pacemaker/ceilometer-agent-notification.yaml new file mode 100644 index 00000000..2530848f --- /dev/null +++ b/puppet/services/pacemaker/ceilometer-agent-notification.yaml @@ -0,0 +1,30 @@ +heat_template_version: 2016-04-08 + +description: > + OpenStack Ceilometer Notification Agent service with Pacemaker configured with Puppet + +parameters: + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + +resources: + CeilometerServiceBase: + type: ../ceilometer-base.yaml + properties: + EndpointMap: {get_param: EndpointMap} + +outputs: + role_data: + description: Role data for the Ceilometer Notification Agent pacemaker role. + value: + service_name: ceilometer_agent_notification + config_settings: + map_merge: + - get_attr: [CeilometerServiceBase, role_data, config_settings] + - ceilometer::agent::notification::manage_service: false + ceilometer::agent::notification::enabled: false + step_config: | + include ::tripleo::profile::pacemaker::ceilometer::agent::notification diff --git a/puppet/services/pacemaker/ceilometer-api.yaml b/puppet/services/pacemaker/ceilometer-api.yaml new file mode 100644 index 00000000..f1885372 --- /dev/null +++ b/puppet/services/pacemaker/ceilometer-api.yaml @@ -0,0 +1,30 @@ +heat_template_version: 2016-04-08 + +description: > + OpenStack Ceilometer API service with Pacemaker configured with Puppet + +parameters: + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + +resources: + CeilometerServiceBase: + type: ../ceilometer-base.yaml + properties: + EndpointMap: {get_param: EndpointMap} + +outputs: + role_data: + description: Role data for the Ceilometer API pacemaker role. + value: + service_name: ceilometer_api + config_settings: + map_merge: + - get_attr: [CeilometerServiceBase, role_data, config_settings] + - ceilometer::api::manage_service: false + ceilometer::api::enabled: false + step_config: | + include ::tripleo::profile::pacemaker::ceilometer::api diff --git a/puppet/services/pacemaker/ceilometer-collector.yaml b/puppet/services/pacemaker/ceilometer-collector.yaml new file mode 100644 index 00000000..8d31e1aa --- /dev/null +++ b/puppet/services/pacemaker/ceilometer-collector.yaml @@ -0,0 +1,30 @@ +heat_template_version: 2016-04-08 + +description: > + OpenStack Ceilometer Collector service with Pacemaker configured with Puppet + +parameters: + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + +resources: + CeilometerServiceBase: + type: ../ceilometer-base.yaml + properties: + EndpointMap: {get_param: EndpointMap} + +outputs: + role_data: + description: Role data for the Ceilometer Collector pacemaker role. + value: + service_name: ceilometer_collector + config_settings: + map_merge: + - get_attr: [CeilometerServiceBase, role_data, config_settings] + - ceilometer::collector::manage_service: false + ceilometer::collector::enabled: false + step_config: | + include ::tripleo::profile::pacemaker::ceilometer::collector diff --git a/puppet/services/pacemaker/cinder-api.yaml b/puppet/services/pacemaker/cinder-api.yaml index 0f66cc06..706a189b 100644 --- a/puppet/services/pacemaker/cinder-api.yaml +++ b/puppet/services/pacemaker/cinder-api.yaml @@ -21,6 +21,7 @@ outputs: role_data: description: Role data for the Cinder API role. value: + service_name: cinder_api config_settings: map_merge: - get_attr: [CinderApiBase, role_data, config_settings] diff --git a/puppet/services/pacemaker/cinder-scheduler.yaml b/puppet/services/pacemaker/cinder-scheduler.yaml index d1472c00..350d7126 100644 --- a/puppet/services/pacemaker/cinder-scheduler.yaml +++ b/puppet/services/pacemaker/cinder-scheduler.yaml @@ -21,6 +21,7 @@ outputs: role_data: description: Role data for the Cinder Scheduler role. value: + service_name: cinder_scheduler config_settings: map_merge: - get_attr: [CinderSchedulerBase, role_data, config_settings] diff --git a/puppet/services/pacemaker/cinder-volume.yaml b/puppet/services/pacemaker/cinder-volume.yaml index ee4e6cea..7b6e9a77 100644 --- a/puppet/services/pacemaker/cinder-volume.yaml +++ b/puppet/services/pacemaker/cinder-volume.yaml @@ -21,6 +21,7 @@ outputs: role_data: description: Role data for the Cinder Volume role. value: + service_name: cinder_volume config_settings: map_merge: - get_attr: [CinderVolumeBase, role_data, config_settings] diff --git a/puppet/services/pacemaker/core.yaml b/puppet/services/pacemaker/core.yaml new file mode 100644 index 00000000..1c0c043c --- /dev/null +++ b/puppet/services/pacemaker/core.yaml @@ -0,0 +1,20 @@ +heat_template_version: 2016-04-08 + +description: > + OpenStack Core (fake) service with Pacemaker configured with Puppet. + +parameters: + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + +outputs: + role_data: + description: Role data for the Core role. + value: + service_name: core + config_settings: {} + step_config: | + include ::tripleo::profile::pacemaker::core
\ No newline at end of file diff --git a/puppet/services/pacemaker/database/mongodb.yaml b/puppet/services/pacemaker/database/mongodb.yaml index b2e9e0bb..c09f93a9 100644 --- a/puppet/services/pacemaker/database/mongodb.yaml +++ b/puppet/services/pacemaker/database/mongodb.yaml @@ -19,6 +19,7 @@ outputs: role_data: description: Service mongodb using composable services. value: + service_name: mongodb config_settings: map_merge: - get_attr: [MongoDbBase, role_data, config_settings] diff --git a/puppet/services/pacemaker/database/mysql.yaml b/puppet/services/pacemaker/database/mysql.yaml new file mode 100644 index 00000000..cc9dc946 --- /dev/null +++ b/puppet/services/pacemaker/database/mysql.yaml @@ -0,0 +1,21 @@ +heat_template_version: 2016-04-08 + +description: > + MySQL with Pacemaker service deployment using puppet + +parameters: + #Parameters not used EndpointMap + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + +outputs: + role_data: + description: Service MySQL with Pacemaker using composable services. + value: + service_name: mysql + config_settings: + step_config: | + include ::tripleo::profile::pacemaker::database::mysql diff --git a/puppet/services/pacemaker/database/redis.yaml b/puppet/services/pacemaker/database/redis.yaml index 0e46f8a3..3cd8d168 100644 --- a/puppet/services/pacemaker/database/redis.yaml +++ b/puppet/services/pacemaker/database/redis.yaml @@ -18,11 +18,11 @@ outputs: role_data: description: Role data for the Redis pacemaker role. value: + service_name: redis config_settings: map_merge: - get_attr: [RedisBase, role_data, config_settings] - - tripleo::profile::pacemaker::database::redis::redis_vip: {get_input: redis_vip} - redis::service_manage: false + - redis::service_manage: false redis::notify_service: false step_config: | include ::tripleo::profile::pacemaker::database::redis diff --git a/puppet/services/pacemaker/glance-api.yaml b/puppet/services/pacemaker/glance-api.yaml index 5a581dca..0fc17b22 100644 --- a/puppet/services/pacemaker/glance-api.yaml +++ b/puppet/services/pacemaker/glance-api.yaml @@ -45,6 +45,7 @@ outputs: role_data: description: Role data for the Glance role. value: + service_name: glance_api config_settings: map_merge: - get_attr: [GlanceApiBase, role_data, config_settings] @@ -52,6 +53,7 @@ outputs: glance_file_pcmk_fstype: {get_param: GlanceFilePcmkFstype} glance_file_pcmk_manage: {get_param: GlanceFilePcmkManage} glance_file_pcmk_options: {get_param: GlanceFilePcmkOptions} + glance_file_pcmk_directory: '/var/lib/glance/images' glance::api::manage_service: false glance::api::enabled: false step_config: | diff --git a/puppet/services/pacemaker/glance-registry.yaml b/puppet/services/pacemaker/glance-registry.yaml index 8b88cb93..ae7aa307 100644 --- a/puppet/services/pacemaker/glance-registry.yaml +++ b/puppet/services/pacemaker/glance-registry.yaml @@ -21,6 +21,7 @@ outputs: role_data: description: Role data for the Glance role. value: + service_name: glance_registry config_settings: map_merge: - get_attr: [GlanceRegistryBase, role_data, config_settings] diff --git a/puppet/services/pacemaker/gnocchi-api.yaml b/puppet/services/pacemaker/gnocchi-api.yaml new file mode 100644 index 00000000..bcd73e61 --- /dev/null +++ b/puppet/services/pacemaker/gnocchi-api.yaml @@ -0,0 +1,28 @@ +heat_template_version: 2016-04-08 + +description: > + Gnocchi service configured with Puppet + +parameters: + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + +resources: + GnocchiServiceBase: + type: ../gnocchi-base.yaml + properties: + EndpointMap: {get_param: EndpointMap} + +outputs: + role_data: + description: Role data for the Gnocchi role. + value: + service_name: gnocchi_api + config_settings: + map_merge: + - get_attr: [GnocchiServiceBase, role_data, config_settings] + step_config: | + include ::tripleo::profile::pacemaker::gnocchi::api diff --git a/puppet/services/pacemaker/gnocchi-metricd.yaml b/puppet/services/pacemaker/gnocchi-metricd.yaml new file mode 100644 index 00000000..ec487e89 --- /dev/null +++ b/puppet/services/pacemaker/gnocchi-metricd.yaml @@ -0,0 +1,31 @@ +heat_template_version: 2016-04-08 + +description: > + Gnocchi service configured with Puppet + +parameters: + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + +resources: + GnocchiServiceBase: + type: ../gnocchi-base.yaml + properties: + EndpointMap: {get_param: EndpointMap} + +outputs: + role_data: + description: Role data for the Gnocchi role. + value: + service_name: gnocchi_metricd + config_settings: + map_merge: + - get_attr: [GnocchiServiceBase, role_data, config_settings] + - gnocchi::metricd::manage_service: false + tripleo::profile::pacemaker::gnocchi::gnocchi_indexer_backend: {get_attr: [GnocchiServiceBase, aux_parameters, gnocchi_indexer_backend]} + + step_config: | + include ::tripleo::profile::pacemaker::gnocchi::metricd diff --git a/puppet/services/pacemaker/gnocchi-statsd.yaml b/puppet/services/pacemaker/gnocchi-statsd.yaml new file mode 100644 index 00000000..d1106524 --- /dev/null +++ b/puppet/services/pacemaker/gnocchi-statsd.yaml @@ -0,0 +1,30 @@ +heat_template_version: 2016-04-08 + +description: > + Gnocchi service configured with Puppet + +parameters: + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + +resources: + GnocchiServiceBase: + type: ../gnocchi-base.yaml + properties: + EndpointMap: {get_param: EndpointMap} + +outputs: + role_data: + description: Role data for the Gnocchi role. + value: + service_name: gnocchi_statsd + config_settings: + map_merge: + - get_attr: [GnocchiServiceBase, role_data, config_settings] + - gnocchi::statsd::manage_service: false + tripleo::profile::pacemaker::gnocchi::gnocchi_indexer_backend: {get_attr: [GnocchiServiceBase, aux_parameters, gnocchi_indexer_backend]} + step_config: | + include ::tripleo::profile::pacemaker::gnocchi::statsd diff --git a/puppet/services/pacemaker/haproxy.yaml b/puppet/services/pacemaker/haproxy.yaml index c2ca2816..811a6697 100644 --- a/puppet/services/pacemaker/haproxy.yaml +++ b/puppet/services/pacemaker/haproxy.yaml @@ -20,6 +20,7 @@ outputs: role_data: description: Role data for the HAproxy with pacemaker role. value: + service_name: haproxy config_settings: map_merge: - get_attr: [LoadbalancerServiceBase, role_data, config_settings] diff --git a/puppet/services/pacemaker/heat-api-cfn.yaml b/puppet/services/pacemaker/heat-api-cfn.yaml index 5833c42d..8f2c561a 100644 --- a/puppet/services/pacemaker/heat-api-cfn.yaml +++ b/puppet/services/pacemaker/heat-api-cfn.yaml @@ -20,12 +20,11 @@ outputs: role_data: description: Role data for the Heat CloudFormation API role. value: + service_name: heat_api_cfn config_settings: map_merge: - get_attr: [HeatApiCfnBase, role_data, config_settings] - heat::api_cfn::manage_service: false heat::api_cfn::enabled: false - step_config: - # No puppet manifests since heat-api-cfn is included in - # ::tripleo::profile::pacemaker::heat which is maintained alongside of - # pacemaker/heat-api.yaml. + step_config: | + include ::tripleo::profile::pacemaker::heat::api_cfn diff --git a/puppet/services/pacemaker/heat-api-cloudwatch.yaml b/puppet/services/pacemaker/heat-api-cloudwatch.yaml index 8b67702c..a08e0262 100644 --- a/puppet/services/pacemaker/heat-api-cloudwatch.yaml +++ b/puppet/services/pacemaker/heat-api-cloudwatch.yaml @@ -20,12 +20,11 @@ outputs: role_data: description: Role data for the Heat Cloudwatch API role. value: + service_name: heat_api_cloudwatch config_settings: map_merge: - get_attr: [HeatApiCloudwatchBase, role_data, config_settings] - heat::api_cloudwatch::manage_service: false heat::api_cloudwatch::enabled: false - step_config: - # No puppet manifests since heat-api-cloudwatch is included in - # ::tripleo::profile::pacemaker::heat which is maintained alongside of - # pacemaker/heat-api.yaml. + step_config: | + include ::tripleo::profile::pacemaker::heat::api_cloudwatch diff --git a/puppet/services/pacemaker/heat-api.yaml b/puppet/services/pacemaker/heat-api.yaml index 6628e8dd..5cf25516 100644 --- a/puppet/services/pacemaker/heat-api.yaml +++ b/puppet/services/pacemaker/heat-api.yaml @@ -20,10 +20,11 @@ outputs: role_data: description: Role data for the Heat API role. value: + service_name: heat_api config_settings: map_merge: - get_attr: [HeatApiBase, role_data, config_settings] - heat::api::manage_service: false heat::api::enabled: false step_config: | - include ::tripleo::profile::pacemaker::heat + include ::tripleo::profile::pacemaker::heat::api diff --git a/puppet/services/pacemaker/heat-engine.yaml b/puppet/services/pacemaker/heat-engine.yaml index e1195780..4d41cbe4 100644 --- a/puppet/services/pacemaker/heat-engine.yaml +++ b/puppet/services/pacemaker/heat-engine.yaml @@ -21,12 +21,11 @@ outputs: role_data: description: Role data for the Heat engine role. value: + service_name: heat_engine config_settings: map_merge: - get_attr: [HeatEngineBase, role_data, config_settings] - heat::engine::manage_service: false heat::engine::enabled: false - step_config: - # No puppet manifests since heat-engine is included in - # ::tripleo::profile::pacemaker::heat which is maintained alongside of - # pacemaker/heat-api.yaml. + step_config: | + include ::tripleo::profile::pacemaker::heat::engine diff --git a/puppet/services/pacemaker/horizon.yaml b/puppet/services/pacemaker/horizon.yaml new file mode 100644 index 00000000..f8b5ad0c --- /dev/null +++ b/puppet/services/pacemaker/horizon.yaml @@ -0,0 +1,29 @@ +heat_template_version: 2016-04-08 + +description: > + Horizon service with Pacemaker configured with Puppet. + +parameters: + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + +resources: + + HorizonBase: + type: ../horizon.yaml + properties: + EndpointMap: {get_param: EndpointMap} + +outputs: + role_data: + description: Role data for the Horizon role. + value: + service_name: horizon + config_settings: + get_attr: [HorizonBase, role_data, config_settings] + step_config: | + include ::tripleo::profile::base::horizon + include ::tripleo::profile::pacemaker::apache diff --git a/puppet/services/pacemaker/keystone.yaml b/puppet/services/pacemaker/keystone.yaml index 04e90368..f5f136d3 100644 --- a/puppet/services/pacemaker/keystone.yaml +++ b/puppet/services/pacemaker/keystone.yaml @@ -21,6 +21,7 @@ outputs: role_data: description: Role data for the Keystone pacemaker role. value: + service_name: keystone config_settings: map_merge: - get_attr: [KeystoneServiceBase, role_data, config_settings] diff --git a/puppet/services/pacemaker/memcached.yaml b/puppet/services/pacemaker/memcached.yaml index 9a11855e..43cd528e 100644 --- a/puppet/services/pacemaker/memcached.yaml +++ b/puppet/services/pacemaker/memcached.yaml @@ -19,6 +19,7 @@ outputs: role_data: description: Role data for the Memcached pacemaker role. value: + service_name: memcached config_settings: map_merge: - get_attr: [MemcachedServiceBase, role_data, config_settings] diff --git a/puppet/services/pacemaker/neutron-dhcp.yaml b/puppet/services/pacemaker/neutron-dhcp.yaml index 6f514379..3a5ada47 100644 --- a/puppet/services/pacemaker/neutron-dhcp.yaml +++ b/puppet/services/pacemaker/neutron-dhcp.yaml @@ -21,6 +21,7 @@ outputs: role_data: description: Role data for the Neutron DHCP role. value: + service_name: neutron_dhcp config_settings: map_merge: - get_attr: [NeutronDhcpBase, role_data, config_settings] diff --git a/puppet/services/pacemaker/neutron-l3.yaml b/puppet/services/pacemaker/neutron-l3.yaml index cb9c32d9..a4f2c0e1 100644 --- a/puppet/services/pacemaker/neutron-l3.yaml +++ b/puppet/services/pacemaker/neutron-l3.yaml @@ -21,6 +21,7 @@ outputs: role_data: description: Role data for the Neutron L3 role. value: + service_name: neutron_l3 config_settings: map_merge: - get_attr: [NeutronL3Base, role_data, config_settings] diff --git a/puppet/services/pacemaker/neutron-metadata.yaml b/puppet/services/pacemaker/neutron-metadata.yaml index 1c74b26f..9b322f22 100644 --- a/puppet/services/pacemaker/neutron-metadata.yaml +++ b/puppet/services/pacemaker/neutron-metadata.yaml @@ -21,6 +21,7 @@ outputs: role_data: description: Role data for the Neutron Metadata role. value: + service_name: neutron_metadata config_settings: map_merge: - get_attr: [NeutronMetadataBase, role_data, config_settings] diff --git a/puppet/services/pacemaker/neutron-midonet.yaml b/puppet/services/pacemaker/neutron-midonet.yaml new file mode 100644 index 00000000..e93ed661 --- /dev/null +++ b/puppet/services/pacemaker/neutron-midonet.yaml @@ -0,0 +1,29 @@ +heat_template_version: 2016-04-08 + +description: > + OpenStack Neutron Midonet with Pacemaker configured with Puppet + +parameters: + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + +resources: + + NeutronMidonetBase: + type: ../neutron-midonet.yaml + properties: + EndpointMap: {get_param: EndpointMap} + +outputs: + role_data: + description: Role data for the Neutron Midonet plugin. + value: + service_name: neutron_midonet + config_settings: + map_merge: + - get_attr: [NeutronMidonetBase, role_data, config_settings] + step_config: | + include ::tripleo::profile::pacemaker::neutron::plugins::midonet diff --git a/puppet/services/pacemaker/neutron-ovs-agent.yaml b/puppet/services/pacemaker/neutron-ovs-agent.yaml index a17d7a61..b2260de9 100644 --- a/puppet/services/pacemaker/neutron-ovs-agent.yaml +++ b/puppet/services/pacemaker/neutron-ovs-agent.yaml @@ -19,6 +19,7 @@ outputs: role_data: description: Role data for the Neutron OVS agent service. value: + service_name: neutron_ovs_agent config_settings: get_attr: [NeutronOvsBase, role_data, config_settings] step_config: | diff --git a/puppet/services/pacemaker/neutron-plugin-ml2.yaml b/puppet/services/pacemaker/neutron-plugin-ml2.yaml index ac9d2402..dc10d093 100644 --- a/puppet/services/pacemaker/neutron-plugin-ml2.yaml +++ b/puppet/services/pacemaker/neutron-plugin-ml2.yaml @@ -19,10 +19,11 @@ outputs: role_data: description: Role data for the Neutron ML2 plugin. value: + service_name: neutron_plugin_ml2 config_settings: map_merge: - get_attr: [NeutronMl2Base, role_data, config_settings] - neutron::agents::ml2::ovs::enabled: false neutron::agents::ml2::ovs::manage_service: false step_config: | - include ::tripleo::profile::pacemaker::neutron::ml2 + include ::tripleo::profile::pacemaker::neutron::plugins::ml2 diff --git a/puppet/services/pacemaker/neutron-plugin-nuage.yaml b/puppet/services/pacemaker/neutron-plugin-nuage.yaml new file mode 100644 index 00000000..414c046c --- /dev/null +++ b/puppet/services/pacemaker/neutron-plugin-nuage.yaml @@ -0,0 +1,29 @@ +heat_template_version: 2016-04-08 + +description: > + OpenStack Neutron Nuage Plugin with Pacemaker configured with Puppet + +parameters: + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + +resources: + + NeutronPluginNuageBase: + type: ../neutron-plugin-nuage.yaml + properties: + EndpointMap: {get_param: EndpointMap} + +outputs: + role_data: + description: Role data for the Neutron Nuage plugin. + value: + service_name: neutron_plugin_nuage + config_settings: + map_merge: + - get_attr: [NeutronPluginNuageBase, role_data, config_settings] + step_config: | + include ::tripleo::profile::pacemaker::neutron::plugins::nuage diff --git a/puppet/services/pacemaker/neutron-plugin-opencontrail.yaml b/puppet/services/pacemaker/neutron-plugin-opencontrail.yaml new file mode 100644 index 00000000..1d385d83 --- /dev/null +++ b/puppet/services/pacemaker/neutron-plugin-opencontrail.yaml @@ -0,0 +1,29 @@ +heat_template_version: 2016-04-08 + +description: > + OpenStack Neutron OpenContrail Plugin with Pacemaker configured with Puppet + +parameters: + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + +resources: + + NeutronPluginOpenContrail: + type: ../neutron-plugin-nuage.yaml + properties: + EndpointMap: {get_param: EndpointMap} + +outputs: + role_data: + description: Role data for the Neutron OpenContrail plugin. + value: + service_name: neutron_plugin_opencontrail + config_settings: + map_merge: + - get_attr: [NeutronPluginOpenContrail, role_data, config_settings] + step_config: | + include ::tripleo::profile::pacemaker::neutron::plugins::opencontrail diff --git a/puppet/services/pacemaker/neutron-plugin-plumgrid.yaml b/puppet/services/pacemaker/neutron-plugin-plumgrid.yaml index c2e8eaac..69f5fd27 100644 --- a/puppet/services/pacemaker/neutron-plugin-plumgrid.yaml +++ b/puppet/services/pacemaker/neutron-plugin-plumgrid.yaml @@ -21,6 +21,7 @@ outputs: role_data: description: Role data for the Neutron PLUMgrid plugin. value: + service_name: neutron_plugin_plumgrid config_settings: map_merge: - get_attr: [NeutronPluginPlumgridBase, role_data, config_settings] diff --git a/puppet/services/pacemaker/neutron-server.yaml b/puppet/services/pacemaker/neutron-server.yaml index 60599e7e..b0f739dc 100644 --- a/puppet/services/pacemaker/neutron-server.yaml +++ b/puppet/services/pacemaker/neutron-server.yaml @@ -9,6 +9,10 @@ parameters: description: Mapping of service endpoint -> protocol. Typically set via parameter_defaults in the resource registry. type: json + NeutronL3HA: + default: true + description: Whether to enable HA for virtual routers + type: boolean resources: @@ -21,10 +25,12 @@ outputs: role_data: description: Role data for the Neutron Server. value: + service_name: neutron_server config_settings: map_merge: - get_attr: [NeutronServerBase, role_data, config_settings] - neutron::server::enabled: false neutron::server::manage_service: false + neutron::server::l3_ha: {get_param: NeutronL3HA} step_config: | include ::tripleo::profile::pacemaker::neutron::server diff --git a/puppet/services/pacemaker/nova-api.yaml b/puppet/services/pacemaker/nova-api.yaml index 1b5011b6..2d497ab5 100644 --- a/puppet/services/pacemaker/nova-api.yaml +++ b/puppet/services/pacemaker/nova-api.yaml @@ -21,6 +21,7 @@ outputs: role_data: description: Role data for the Nova API role. value: + service_name: nova_api config_settings: map_merge: - get_attr: [NovaApiBase, role_data, config_settings] diff --git a/puppet/services/pacemaker/nova-conductor.yaml b/puppet/services/pacemaker/nova-conductor.yaml index a484f0df..1f57cf08 100644 --- a/puppet/services/pacemaker/nova-conductor.yaml +++ b/puppet/services/pacemaker/nova-conductor.yaml @@ -21,6 +21,7 @@ outputs: role_data: description: Role data for the Nova Conductor role. value: + service_name: nova_conductor config_settings: map_merge: - get_attr: [NovaConductorBase, role_data, config_settings] diff --git a/puppet/services/pacemaker/nova-consoleauth.yaml b/puppet/services/pacemaker/nova-consoleauth.yaml index f9b6b058..bbab8bea 100644 --- a/puppet/services/pacemaker/nova-consoleauth.yaml +++ b/puppet/services/pacemaker/nova-consoleauth.yaml @@ -21,6 +21,7 @@ outputs: role_data: description: Role data for the Nova Consoleauth role. value: + service_name: nova_consoleauth config_settings: map_merge: - get_attr: [NovaConsoleauthBase, role_data, config_settings] diff --git a/puppet/services/pacemaker/nova-scheduler.yaml b/puppet/services/pacemaker/nova-scheduler.yaml index 0032cbe6..bf2c62bc 100644 --- a/puppet/services/pacemaker/nova-scheduler.yaml +++ b/puppet/services/pacemaker/nova-scheduler.yaml @@ -21,6 +21,7 @@ outputs: role_data: description: Role data for the Nova Scheduler role. value: + service_name: nova_scheduler config_settings: map_merge: - get_attr: [NovaSchedulerBase, role_data, config_settings] diff --git a/puppet/services/pacemaker/nova-vncproxy.yaml b/puppet/services/pacemaker/nova-vncproxy.yaml index 52395240..0353d924 100644 --- a/puppet/services/pacemaker/nova-vncproxy.yaml +++ b/puppet/services/pacemaker/nova-vncproxy.yaml @@ -21,6 +21,7 @@ outputs: role_data: description: Role data for the Nova Vncproxy role. value: + service_name: nova_vncproxy config_settings: map_merge: - get_attr: [NovaVncproxyBase, role_data, config_settings] diff --git a/puppet/services/pacemaker/rabbitmq.yaml b/puppet/services/pacemaker/rabbitmq.yaml index 20fb2e40..b01caeea 100644 --- a/puppet/services/pacemaker/rabbitmq.yaml +++ b/puppet/services/pacemaker/rabbitmq.yaml @@ -20,6 +20,7 @@ outputs: role_data: description: Role data for the RabbitMQ pacemaker role. value: + service_name: rabbitmq config_settings: map_merge: - get_attr: [RabbitMQServiceBase, role_data, config_settings] diff --git a/puppet/services/pacemaker/sahara-api.yaml b/puppet/services/pacemaker/sahara-api.yaml index a5db77c4..ac05a01a 100644 --- a/puppet/services/pacemaker/sahara-api.yaml +++ b/puppet/services/pacemaker/sahara-api.yaml @@ -21,6 +21,7 @@ outputs: role_data: description: Role data for the Sahara API role. value: + service_name: sahara_api config_settings: map_merge: - get_attr: [SaharaApiBase, role_data, config_settings] diff --git a/puppet/services/pacemaker/sahara-engine.yaml b/puppet/services/pacemaker/sahara-engine.yaml index 129f88bf..f6bd8f61 100644 --- a/puppet/services/pacemaker/sahara-engine.yaml +++ b/puppet/services/pacemaker/sahara-engine.yaml @@ -21,6 +21,7 @@ outputs: role_data: description: Role data for the Sahara Engine role. value: + service_name: sahara_engine config_settings: map_merge: - get_attr: [SaharaEngineBase, role_data, config_settings] diff --git a/puppet/services/rabbitmq.yaml b/puppet/services/rabbitmq.yaml index 581b4ba4..4c02f359 100644 --- a/puppet/services/rabbitmq.yaml +++ b/puppet/services/rabbitmq.yaml @@ -18,7 +18,7 @@ parameters: type: string hidden: true RabbitFDLimit: - default: 16384 + default: 65536 description: Configures RabbitMQ FD limit type: string RabbitIPv6: @@ -30,10 +30,32 @@ outputs: role_data: description: Role data for the RabbitMQ role. value: + service_name: rabbitmq config_settings: rabbitmq::file_limit: {get_param: RabbitFDLimit} rabbitmq::default_user: {get_param: RabbitUserName} rabbitmq::default_pass: {get_param: RabbitPassword} rabbit_ipv6: {get_param: RabbitIPv6} + tripleo.rabbitmq.firewall_rules: + '109 rabbitmq': + dport: + - 4369 + - 5672 + - 35672 + rabbitmq::delete_guest_user: false + rabbitmq::wipe_db_on_cookie_change: true + rabbitmq::port: '5672' + rabbitmq::package_source: undef + rabbitmq::repos_ensure: false + rabbitmq_environment: + RABBITMQ_NODENAME: "rabbit@%{::hostname}" + RABBITMQ_SERVER_ERL_ARGS: '"+K true +A30 +P 1048576 -kernel inet_default_connect_options [{nodelay,true},{raw,6,18,<<5000:64/native>>}] -kernel inet_default_listen_options [{raw,6,18,<<5000:64/native>>}]"' + rabbitmq_kernel_variables: + inet_dist_listen_min: '35672' + inet_dist_listen_max: '35672' + rabbitmq_config_variables: + tcp_listen_options: '[binary, {packet, raw}, {reuseaddr, true}, {backlog, 128}, {nodelay, true}, {exit_on_close, false}, {keepalive, true}]' + cluster_partition_handling: 'pause_minority' + loopback_users: '[]' step_config: | include ::tripleo::profile::base::rabbitmq diff --git a/puppet/services/sahara-api.yaml b/puppet/services/sahara-api.yaml index 93bf7385..7ca9bbd5 100644 --- a/puppet/services/sahara-api.yaml +++ b/puppet/services/sahara-api.yaml @@ -34,6 +34,7 @@ outputs: role_data: description: Role data for the Sahara API role. value: + service_name: sahara_api config_settings: map_merge: - get_attr: [SaharaBase, role_data, config_settings] @@ -48,5 +49,10 @@ outputs: sahara::keystone::auth::admin_url: {get_param: [EndpointMap, SaharaAdmin, uri]} sahara::keystone::auth::password: {get_param: SaharaPassword } sahara::keystone::auth::region: {get_param: KeystoneRegion} + tripleo.sahara_api.firewall_rules: + '132 sahara': + dport: + - 8386 + - 13386 step_config: | include ::tripleo::profile::base::sahara::api diff --git a/puppet/services/sahara-base.yaml b/puppet/services/sahara-base.yaml index 275d7536..3e320128 100644 --- a/puppet/services/sahara-base.yaml +++ b/puppet/services/sahara-base.yaml @@ -31,6 +31,7 @@ outputs: role_data: description: Role data for the Sahara base service. value: + service_name: sahara_base config_settings: sahara::rabbit_password: {get_param: RabbitPassword} sahara::rabbit_user: {get_param: RabbitUserName} @@ -46,3 +47,5 @@ outputs: - spark - storm sahara::rpc_backend: rabbit + sahara::admin_tenant_name: 'service' + sahara::keystone::auth::tenant: 'service' diff --git a/puppet/services/sahara-engine.yaml b/puppet/services/sahara-engine.yaml index 17ef49fa..074f83c7 100644 --- a/puppet/services/sahara-engine.yaml +++ b/puppet/services/sahara-engine.yaml @@ -23,19 +23,26 @@ outputs: role_data: description: Role data for the Sahara Engine role. value: + service_name: sahara_engine config_settings: map_merge: - get_attr: [SaharaBase, role_data, config_settings] - sahara_dsn: &sahara_dsn list_join: - '' - - - {get_param: [EndpointMap, MysqlVirtual, protocol]} + - - {get_param: [EndpointMap, MysqlInternal, protocol]} - '://sahara:' - {get_param: SaharaPassword} - '@' - - {get_param: [EndpointMap, MysqlVirtual, host]} + - {get_param: [EndpointMap, MysqlInternal, host]} - '/sahara' sahara::database_connection: *sahara_dsn sahara::db::mysql::password: {get_param: SaharaPassword} + sahara::db::mysql::user: sahara + sahara::db::mysql::host: {get_param: [EndpointMap, MysqlNoBracketsInternal, host]} + sahara::db::mysql::dbname: sahara + sahara::db::mysql::allowed_hosts: + - '%' + - "%{hiera('mysql_bind_host')}" step_config: | include ::tripleo::profile::base::sahara::engine diff --git a/puppet/services/services.yaml b/puppet/services/services.yaml index 7ed880fc..c9a952a5 100644 --- a/puppet/services/services.yaml +++ b/puppet/services/services.yaml @@ -1,4 +1,4 @@ -heat_template_version: 2016-04-08 +heat_template_version: 2016-10-14 description: > Utility stack to convert an array of services into a set of combined @@ -27,9 +27,14 @@ resources: EndpointMap: {get_param: EndpointMap} outputs: - config_settings: - description: Configuration settings. - value: {map_merge: {get_attr: [ServiceChain, role_data, config_settings]}} - step_config: - description: Step configuration. - value: {list_join: ["\n", {get_attr: [ServiceChain, role_data, step_config]}]} + role_data: + description: Combined Role data for this set of services. + value: + service_names: + # Filter any null/None service_names which may be present due to mapping + # of services to OS::Heat::None + yaql: + expression: list($.data.s_names.where($ != null)) + data: {s_names: {get_attr: [ServiceChain, role_data, service_name]}} + config_settings: {map_merge: {get_attr: [ServiceChain, role_data, config_settings]}} + step_config: {list_join: ["\n", {get_attr: [ServiceChain, role_data, step_config]}]} diff --git a/puppet/services/snmp.yaml b/puppet/services/snmp.yaml new file mode 100644 index 00000000..458f444b --- /dev/null +++ b/puppet/services/snmp.yaml @@ -0,0 +1,36 @@ +heat_template_version: 2016-04-08 + +description: > + SNMP client configured with Puppet, to facilitate Ceilometer Hardware + monitoring in the undercloud. This service is required to enable hardware + monitoring. + +parameters: + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + SnmpdReadonlyUserName: + default: ro_snmp_user + description: The user name for SNMPd with readonly rights running on all Overcloud nodes + type: string + SnmpdReadonlyUserPassword: + description: The user password for SNMPd with readonly rights running on all Overcloud nodes + type: string + hidden: true + +outputs: + role_data: + description: Role data for the SNMP services + value: + service_name: snmp + config_settings: + snmpd_readonly_user_name: {get_param: SnmpdReadonlyUserName} + snmpd_readonly_user_password: {get_param: SnmpdReadonlyUserPassword} + tripleo.snmp.firewall_rules: + '127 snmp': + dport: 161 + proto: 'udp' + step_config: | + include ::tripleo::profile::base::snmp diff --git a/puppet/services/swift-proxy.yaml b/puppet/services/swift-proxy.yaml index a86aeaf5..99af7499 100644 --- a/puppet/services/swift-proxy.yaml +++ b/puppet/services/swift-proxy.yaml @@ -17,6 +17,10 @@ parameters: description: The password for the swift service account, used by the swift proxy services. type: string hidden: true + SwiftProxyNodeTimeout: + default: 60 + description: Timeout for requests going from swift-proxy to swift a/c/o services. + type: number SwiftWorkers: default: 0 description: Number of workers for Swift service. @@ -31,11 +35,14 @@ outputs: role_data: description: Role data for the Swift proxy service. value: + service_name: swift_proxy config_settings: # Swift swift::proxy::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri]} swift::proxy::authtoken::identity_uri: {get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix]} swift::proxy::authtoken::admin_password: {get_param: SwiftPassword} + swift::proxy::authtoken::admin_tenant_name: 'service' + swift::proxy::node_timeout: {get_param: SwiftProxyNodeTimeout} swift::proxy::workers: {get_param: SwiftWorkers} swift::keystone::auth::public_url: {get_param: [EndpointMap, SwiftPublic, uri]} swift::keystone::auth::internal_url: {get_param: [EndpointMap, SwiftInternal, uri]} @@ -45,5 +52,35 @@ outputs: swift::keystone::auth::admin_url_s3: {get_param: [EndpointMap, SwiftS3Admin, uri]} swift::keystone::auth::password: {get_param: SwiftPassword} swift::keystone::auth::region: {get_param: KeystoneRegion} + tripleo.swift_proxy.firewall_rules: + '122 swift proxy': + dport: + - 8080 + - 13808 + swift::keystone::auth::tenant: 'service' + swift::keystone::auth::configure_s3_endpoint: false + swift::keystone::auth::operator_roles: + - admin + - swiftoperator + - ResellerAdmin + swift::proxy::keystone::operator_roles: + - admin + - swiftoperator + - ResellerAdmin + swift::proxy::pipeline: + - 'catch_errors' + - 'healthcheck' + - 'proxy-logging' + - 'cache' + - 'ratelimit' + - 'bulk' + - 'tempurl' + - 'formpost' + - 'authtoken' + - 'keystone' + - 'staticweb' + - 'proxy-logging' + - 'proxy-server' + swift::proxy::account_autocreate: true step_config: | include ::tripleo::profile::base::swift::proxy diff --git a/puppet/services/swift-ringbuilder.yaml b/puppet/services/swift-ringbuilder.yaml new file mode 100644 index 00000000..0b17c73c --- /dev/null +++ b/puppet/services/swift-ringbuilder.yaml @@ -0,0 +1,40 @@ +heat_template_version: 2016-04-08 + +description: > + OpenStack Swift Ringbuilder + +parameters: + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + SwiftMinPartHours: + type: number + default: 1 + description: The minimum time (in hours) before a partition in a ring can be moved following a rebalance. + SwiftPartPower: + default: 10 + description: Partition Power to use when building Swift rings + type: number + SwiftRingBuild: + default: true + description: Whether to manage Swift rings or not + type: boolean + SwiftReplicas: + type: number + default: 3 + description: How many replicas to use in the swift rings. + +outputs: + role_data: + description: Role data for Swift Ringbuilder configuration. + value: + service_name: swift_ringbuilder + config_settings: + tripleo::profile::base::swift::ringbuilder::build_ring: {get_param: SwiftRingBuild} + tripleo::profile::base::swift::ringbuilder::replicas: {get_param: SwiftReplicas} + swift::ringbuilder::part_power: {get_param: SwiftPartPower} + swift::ringbuilder::min_part_hours: {get_param: SwiftMinPartHours} + step_config: | + include ::tripleo::profile::base::swift::ringbuilder diff --git a/puppet/services/swift-storage.yaml b/puppet/services/swift-storage.yaml new file mode 100644 index 00000000..74679231 --- /dev/null +++ b/puppet/services/swift-storage.yaml @@ -0,0 +1,64 @@ +heat_template_version: 2016-04-08 + +description: > + OpenStack Swift Storage service configured with Puppet + +parameters: + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + Debug: + default: '' + description: Set to True to enable debugging on all services. + type: string + SwiftMountCheck: + default: false + description: Value of mount_check in Swift account/container/object -server.conf + type: boolean + + # DEPRECATED options for compatibility with overcloud.yaml + # This should be removed and manipulation of the ControllerServices list + # used instead, but we need client support for that first + ControllerEnableSwiftStorage: + default: true + description: Whether to enable Swift Storage on the Controller + type: boolean + +parameter_groups: +- label: deprecated + description: Do not use deprecated params, they will be removed. + parameters: + - ControllerEnableSwiftStorage + +outputs: + role_data: + description: Role data for the Swift Proxy role. + value: + service_name: swift_storage + config_settings: + # Swift + swift::storage::all::mount_check: {get_param: SwiftMountCheck} + tripleo::profile::base::swift::storage::enable_swift_storage: {get_param: ControllerEnableSwiftStorage} + tripleo.swift_storage.firewall_rules: + '123 swift storage': + dport: + - 873 + - 6000 + - 6001 + - 6002 + swift::storage::all::incoming_chmod: 'Du=rwx,g=rx,o=rx,Fu=rw,g=r,o=r' + swift::storage::all::outgoing_chmod: 'Du=rwx,g=rx,o=rx,Fu=rw,g=r,o=r' + swift::storage::all::object_pipeline: + - healthcheck + - recon + - object-server + swift::storage::all::container_pipeline: + - healthcheck + - container-server + swift::storage::all::account_pipeline: + - healthcheck + - account-server + step_config: | + include ::tripleo::profile::base::swift::storage diff --git a/puppet/services/time/ntp.yaml b/puppet/services/time/ntp.yaml new file mode 100644 index 00000000..59d25dd2 --- /dev/null +++ b/puppet/services/time/ntp.yaml @@ -0,0 +1,32 @@ +heat_template_version: 2016-04-08 + +description: > + NTP service deployment using puppet, this YAML file + creates the interface between the HOT template + and the puppet manifest that actually installs + and configure NTP. + +parameters: + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + NtpServer: + default: [] + description: NTP servers + type: comma_delimited_list + +outputs: + role_data: + description: Role ntp using composable services. + value: + service_name: ntp + config_settings: + ntp::ntpservers: {get_param: NtpServer} + tripleo.ntp.firewall_rules: + '105 ntp': + dport: 123 + proto: udp + step_config: | + include ::ntp diff --git a/puppet/services/time/timezone.yaml b/puppet/services/time/timezone.yaml new file mode 100644 index 00000000..805c9050 --- /dev/null +++ b/puppet/services/time/timezone.yaml @@ -0,0 +1,25 @@ +heat_template_version: 2016-04-08 + +description: > + Composable Timezone service + +parameters: + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + TimeZone: + default: 'UTC' + description: The timezone to be set on the overcloud. + type: string + +outputs: + role_data: + description: Timezone role using composable services. + value: + service_name: timezone + config_settings: + timezone::timezone: {get_param: TimeZone} + step_config: | + include ::timezone diff --git a/puppet/swift-devices-and-proxy-config.yaml b/puppet/swift-devices-and-proxy-config.yaml index 92ef5c1c..14df831f 100644 --- a/puppet/swift-devices-and-proxy-config.yaml +++ b/puppet/swift-devices-and-proxy-config.yaml @@ -20,7 +20,7 @@ resources: datafiles: swift_devices_and_proxy: mapped_data: - tripleo::ringbuilder::devices: + tripleo::profile::base::swift::ringbuilder::devices: list_join: - ", " - - list_join: diff --git a/puppet/swift-storage-post.yaml b/puppet/swift-storage-post.yaml index 1aba2bb4..306a4d6e 100644 --- a/puppet/swift-storage-post.yaml +++ b/puppet/swift-storage-post.yaml @@ -8,13 +8,12 @@ parameters: type: boolean servers: type: json - NodeConfigIdentifiers: - type: json - description: Value which changes if the node configuration may need to be re-applied - StepConfig: + RoleData: + type: json + default: {} + DeployIdentifier: type: string - description: Config manifests that will be used to step through the deployment. - default: '' + description: Value which changes if the node configuration may need to be re-applied resources: @@ -27,7 +26,7 @@ resources: servers: {get_param: servers} config: {get_resource: StorageArtifactsConfig} input_values: - update_identifier: {get_param: NodeConfigIdentifiers} + update_identifier: {get_param: DeployIdentifier} StoragePuppetConfig: type: OS::Heat::SoftwareConfig @@ -45,8 +44,7 @@ resources: list_join: - '' - - get_file: manifests/overcloud_object.pp - - get_file: manifests/ringbuilder.pp - - {get_param: StepConfig} + - {get_param: [RoleData, step_config]} StorageRingbuilderDeployment_Step2: type: OS::Heat::StructuredDeployments @@ -57,7 +55,7 @@ resources: config: {get_resource: StoragePuppetConfig} input_values: step: 2 - update_identifier: {get_param: NodeConfigIdentifiers} + update_identifier: {get_param: DeployIdentifier} StorageRingbuilderDeployment_Step3: type: OS::Heat::StructuredDeployments @@ -68,7 +66,7 @@ resources: config: {get_resource: StoragePuppetConfig} input_values: step: 3 - update_identifier: {get_param: NodeConfigIdentifiers} + update_identifier: {get_param: DeployIdentifier} StorageDeployment_Step4: type: OS::Heat::StructuredDeployments @@ -79,7 +77,7 @@ resources: config: {get_resource: StoragePuppetConfig} input_values: step: 4 - update_identifier: {get_param: NodeConfigIdentifiers} + update_identifier: {get_param: DeployIdentifier} # Note, this should come last, so use depends_on to ensure # this is created after any other resources. diff --git a/puppet/swift-storage.yaml b/puppet/swift-storage.yaml index ed52f928..034592a7 100644 --- a/puppet/swift-storage.yaml +++ b/puppet/swift-storage.yaml @@ -18,26 +18,6 @@ parameters: default: default description: Name of an existing Nova key pair to enable SSH access to the instances type: string - MountCheck: - default: 'false' - description: Value of mount_check in Swift account/container/object -server.conf - type: boolean - MinPartHours: - type: number - default: 1 - description: The minimum time (in hours) before a partition in a ring can be moved following a rebalance. - PartPower: - default: 10 - description: Partition Power to use when building Swift rings - type: number - RingBuild: - default: true - description: Whether to manage Swift rings or not - type: boolean - Replicas: - type: number - default: 3 - description: How many replicas to use in the swift rings. SnmpdReadonlyUserName: default: ro_snmp_user description: The user name for SNMPd with readonly rights running on all Overcloud nodes @@ -46,10 +26,6 @@ parameters: description: The user password for SNMPd with readonly rights running on all Overcloud nodes type: string hidden: true - NtpServer: - default: '' - description: Comma-separated list of ntp servers - type: comma_delimited_list EnablePackageInstall: default: 'false' description: Set to true to enable package installation via Puppet @@ -65,10 +41,6 @@ parameters: description: Mapping of service_name -> network name. Typically set via parameter_defaults in the resource registry. type: json - TimeZone: - default: 'UTC' - description: The timezone to be set on Ceph nodes. - type: string Hostname: type: string default: '' # Defaults to Heat created hostname @@ -124,11 +96,21 @@ parameters: ServiceConfigSettings: type: json default: {} + ServiceNames: + type: comma_delimited_list + default: [] + ConfigCommand: + type: string + description: Command which will be run whenever configuration data changes + default: os-refresh-config --timeout 14400 resources: SwiftStorage: type: OS::Nova::Server + metadata: + os-collect-config: + command: {get_param: ConfigCommand} properties: image: {get_param: Image} flavor: {get_param: Flavor} @@ -260,6 +242,7 @@ resources: - heat_config_%{::deploy_config_name} - object_extraconfig - extraconfig + - service_names - service_configs - object - swift_devices_and_proxy # provided by SwiftDevicesAndProxyConfig @@ -269,6 +252,9 @@ resources: - network merge_behavior: deeper datafiles: + service_names: + mapped_data: + service_names: {get_param: ServiceNames} service_configs: mapped_data: {get_param: ServiceConfigSettings} common: @@ -286,14 +272,7 @@ resources: raw_data: {get_file: hieradata/object.yaml} mapped_data: # data supplied directly to this deployment configuration, etc swift::swift_hash_path_suffix: { get_input: swift_hash_suffix } - tripleo::ringbuilder::build_ring: { get_input: swift_ring_build } - tripleo::ringbuilder::part_power: { get_input: swift_part_power } - tripleo::ringbuilder::replicas: {get_input: swift_replicas } swift::storage::all::storage_local_net_ip: {get_input: swift_management_network} - swift_mount_check: {get_input: swift_mount_check } - tripleo::ringbuilder::min_part_hours: { get_input: swift_min_part_hours } - ntp::servers: {get_input: ntp_servers} - timezone::timezone: {get_input: timezone} snmpd_readonly_user_name: {get_input: snmpd_readonly_user_name} snmpd_readonly_user_password: {get_input: snmpd_readonly_user_password} tripleo::packages::enable_install: {get_input: enable_package_install} @@ -312,13 +291,6 @@ resources: snmpd_readonly_user_name: {get_param: SnmpdReadonlyUserName} snmpd_readonly_user_password: {get_param: SnmpdReadonlyUserPassword} swift_hash_suffix: {get_param: HashSuffix} - swift_mount_check: {get_param: MountCheck} - swift_min_part_hours: {get_param: MinPartHours} - swift_ring_build: {get_param: RingBuild} - swift_part_power: {get_param: PartPower} - swift_replicas: { get_param: Replicas} - ntp_servers: {get_param: NtpServer} - timezone: {get_param: TimeZone} enable_package_install: {get_param: EnablePackageInstall} enable_package_upgrade: {get_attr: [UpdateDeployment, update_managed_packages]} swift_management_network: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, SwiftMgmtNetwork]}]} @@ -351,6 +323,12 @@ resources: get_param: UpdateIdentifier outputs: + ip_address: + description: IP address of the server in the ctlplane network + value: {get_attr: [SwiftStorage, networks, ctlplane, 0]} + hostname: + description: Hostname of the server + value: {get_attr: [SwiftStorage, name]} hosts_entry: value: str_replace: @@ -431,11 +409,3 @@ outputs: management_ip_address: description: IP address of the server in the management network value: {get_attr: [ManagementPort, ip_address]} - config_identifier: - description: identifier which changes if the node configuration may need re-applying - value: - list_join: - - ',' - - - {get_attr: [SwiftStorageHieraDeploy, deploy_stdout]} - - {get_attr: [NodeTLSCAData, deploy_stdout]} - - {get_param: UpdateIdentifier} diff --git a/puppet/vip-config.yaml b/puppet/vip-config.yaml index 92234b6c..51129053 100644 --- a/puppet/vip-config.yaml +++ b/puppet/vip-config.yaml @@ -37,6 +37,7 @@ resources: internal_api_virtual_ip: {get_input: internal_api_virtual_ip} storage_virtual_ip: {get_input: storage_virtual_ip} storage_mgmt_virtual_ip: {get_input: storage_mgmt_virtual_ip} + ironic_api_vip: {get_input: ironic_api_vip} # public_virtual_ip and controller_virtual_ip are needed in # both HAproxy & keepalived. tripleo::haproxy::public_virtual_ip: {get_input: public_virtual_ip} diff --git a/tools/yaml-validate.py b/tools/yaml-validate.py index 2da873d0..c93c84bc 100755 --- a/tools/yaml-validate.py +++ b/tools/yaml-validate.py @@ -21,23 +21,50 @@ def exit_usage(): print('Usage %s <yaml file or directory>' % sys.argv[0]) sys.exit(1) + +def validate_service(filename, tpl): + if 'outputs' in tpl and 'role_data' in tpl['outputs']: + if 'value' not in tpl['outputs']['role_data']: + print('ERROR: invalid role_data for filename: %s' + % filename) + return 1 + role_data = tpl['outputs']['role_data']['value'] + if 'service_name' not in role_data: + print('ERROR: service_name is required in role_data for %s.' + % filename) + return 1 + # service_name must match the filename, but with an underscore + if (role_data['service_name'] != + os.path.basename(filename).split('.')[0].replace("-", "_")): + print('ERROR: service_name should match file name for service: %s.' + % filename) + return 1 + return 0 + + def validate(filename): print('Validating %s' % filename) + retval = 0 try: tpl = yaml.load(open(filename).read()) + + if (filename.startswith('./puppet/services/') and + filename != './puppet/services/services.yaml'): + retval = validate_service(filename, tpl) + except Exception: print(traceback.format_exc()) return 1 # yaml is OK, now walk the parameters and output a warning for unused ones for p in tpl.get('parameters', {}): str_p = '\'%s\'' % p - in_resources = str_p in str(tpl.get('resources', {})) - in_outputs = str_p in str(tpl.get('outputs', {})) + in_resources = str_p in str(tpl.get('resources', {})) + in_outputs = str_p in str(tpl.get('outputs', {})) if not in_resources and not in_outputs: print('Warning: parameter %s in template %s appears to be unused' % (p, filename)) - return 0 + return retval if len(sys.argv) < 2: exit_usage() |