diff options
178 files changed, 6503 insertions, 3658 deletions
@@ -45,6 +45,8 @@ nosetests.xml *~ *.swp +*.bundle +Gemfile.lock doc/_build diff --git a/Gemfile b/Gemfile new file mode 100644 index 00000000..302ef415 --- /dev/null +++ b/Gemfile @@ -0,0 +1,24 @@ +source 'https://rubygems.org' + +group :development, :test do + gem 'puppetlabs_spec_helper', :require => false + + gem 'puppet-lint', '~> 1.1' + gem 'puppet-lint-absolute_classname-check' + gem 'puppet-lint-absolute_template_path' + gem 'puppet-lint-trailing_newline-check' + + # Puppet 4.x related lint checks + gem 'puppet-lint-unquoted_string-check' + gem 'puppet-lint-leading_zero-check' + gem 'puppet-lint-variable_contains_upcase' + gem 'puppet-lint-numericvariable' +end + +if puppetversion = ENV['PUPPET_GEM_VERSION'] + gem 'puppet', puppetversion, :require => false +else + gem 'puppet', :require => false +end + +# vim:ft=ruby @@ -22,19 +22,19 @@ overcloud.yaml: deprecated/overcloud-source.yaml deprecated/block-storage.yaml d python ./tripleo_heat_merge/merge.py --hot --scale NovaCompute=$${COMPUTESCALE:-'1'} --scale controller=$${CONTROLSCALE:-'1'} --scale SwiftStorage=$${SWIFTSTORAGESCALE:-'0'} --scale BlockStorage=$${BLOCKSTORAGESCALE:-'0'} --scale CephStorage=$${CEPHSTORAGESCALE:-'0'} deprecated/overcloud-source.yaml deprecated/block-storage.yaml deprecated/swift-source.yaml deprecated/swift-storage-source.yaml deprecated/ssl-source.yaml deprecated/swift-deploy.yaml deprecated/nova-compute-config.yaml ${CONTROLEXTRA} > $@.tmp mv $@.tmp $@ -undercloud-vm.yaml: undercloud-source.yaml undercloud-vm-nova-config.yaml undercloud-vm-nova-deploy.yaml +undercloud-vm.yaml: deprecated/undercloud-source.yaml deprecated/undercloud-vm-nova-config.yaml deprecated/undercloud-vm-nova-deploy.yaml python ./tripleo_heat_merge/merge.py --hot $^ > $@.tmp mv $@.tmp $@ -undercloud-bm.yaml: undercloud-source.yaml undercloud-bm-nova-config.yaml undercloud-bm-nova-deploy.yaml +undercloud-bm.yaml: deprecated/undercloud-source.yaml deprecated/undercloud-bm-nova-config.yaml deprecated/undercloud-bm-nova-deploy.yaml python ./tripleo_heat_merge/merge.py --hot $^ > $@.tmp mv $@.tmp $@ -undercloud-vm-ironic.yaml: undercloud-source.yaml undercloud-vm-ironic-config.yaml undercloud-vm-ironic-deploy.yaml +undercloud-vm-ironic.yaml: deprecated/undercloud-source.yaml deprecated/undercloud-vm-ironic-config.yaml deprecated/undercloud-vm-ironic-deploy.yaml python ./tripleo_heat_merge/merge.py --hot $^ > $@.tmp mv $@.tmp $@ -undercloud-vm-ironic-vlan.yaml: undercloud-source.yaml undercloud-vm-ironic-config.yaml undercloud-vm-ironic-deploy.yaml undercloud-vlan-port.yaml +undercloud-vm-ironic-vlan.yaml: deprecated/undercloud-source.yaml deprecated/undercloud-vm-ironic-config.yaml deprecated/undercloud-vm-ironic-deploy.yaml deprecated/undercloud-vlan-port.yaml python ./tripleo_heat_merge/merge.py --hot $^ > $@.tmp mv $@.tmp $@ diff --git a/README.md b/README.md deleted file mode 100644 index 97cc384e..00000000 --- a/README.md +++ /dev/null @@ -1,10 +0,0 @@ -templates -========= - -Generic templates to describe multi-host infrastructure, consumable by OpenStack Heat, Crowbar, others. - - -merge.py -======== - -The Makefile contains several targets for generated templates, see its contents for all of them. To run functional tests for merge.py, run 'make test'. diff --git a/README.rst b/README.rst new file mode 100644 index 00000000..148a741f --- /dev/null +++ b/README.rst @@ -0,0 +1,55 @@ +====================== +tripleo-heat-templates +====================== + +Heat templates to deploy OpenStack using OpenStack. + +* Free software: Apache license +* Documentation: http://docs.openstack.org/developer/tripleo-docs +* Source: http://git.openstack.org/cgit/openstack/tripleo-heat-templates +* Bugs: http://bugs.launchpad.net/tripleo + +Features +-------- + +The ability to deploy a multi-node, role based OpenStack deployment using +OpenStack Heat. Notable features include: + + * Choice of deployment/configuration tooling: puppet, os-apply-config, and + (soon) docker + + * Role based deployment: roles for the controller, compute, ceph, swift, + and cinder storage + + * physical network configuration: support for isolated networks, bonding, + and standard ctlplane networking + +Directories +----------- + +A description of the directory layout in TripleO Heat Templates. + + * deprecated: contains templates that have been deprecated + + * environments: contains heat environment files that can be used with -e + on the command like to enable features, etc. + + * extraconfig: templates used to enable 'extra' functionality. Includes + functionality for distro specific registration and upgrades. + + * firstboot: example first_boot scripts that can be used when initially + creating instances. + + * network: heat templates to help create isolated networks and ports + + * puppet: templates mostly driven by configuration with puppet. To use these + templates you can use the overcloud-resource-registry-puppet.yaml. + + * os-apply-config: templates mostly driven by configuration w/ + os-collect-config and bash based + elements (which use the Heat os-apply-config group). + These will soon be deprecated and are no longer part + of the upstream CI testing efforts. + + * validation-scripts: validation scripts useful to all deployment + configurations diff --git a/Rakefile b/Rakefile new file mode 100644 index 00000000..bca6a6c2 --- /dev/null +++ b/Rakefile @@ -0,0 +1,6 @@ +require 'puppetlabs_spec_helper/rake_tasks' +require 'puppet-lint/tasks/puppet-lint' + +PuppetLint.configuration.fail_on_warnings = true +PuppetLint.configuration.send('disable_80chars') +PuppetLint.configuration.send('disable_autoloader_layout') diff --git a/all-nodes-validation.yaml b/all-nodes-validation.yaml new file mode 100644 index 00000000..a7383375 --- /dev/null +++ b/all-nodes-validation.yaml @@ -0,0 +1,27 @@ +heat_template_version: 2015-04-30 + +description: > + Software Config to drive validations that occur on all nodes. + Note, you need the heat-config-script element built into your + images, due to the script group below. + +parameters: + PingTestIps: + default: '' + description: A string containing a space separated list of IP addresses used to ping test each available network interface. + type: string + +resources: + AllNodesValidationsImpl: + type: OS::Heat::SoftwareConfig + properties: + group: script + inputs: + - name: ping_test_ips + default: {get_param: PingTestIps} + config: {get_file: ./validation-scripts/all-nodes.sh} + +outputs: + OS::stack_id: + description: The ID of the AllNodesValidationsImpl resource. + value: {get_resource: AllNodesValidationsImpl} diff --git a/base.yaml b/base.yaml deleted file mode 100644 index a1ba509f..00000000 --- a/base.yaml +++ /dev/null @@ -1,77 +0,0 @@ -heat_template_version: 2013-05-23 -description: 'Tie OpenStack components together' -parameters: - KeystoneAdminToken: - description: Admin Token needed for keystone - type: string - hidden: true - TemplateRoot: - description: URL Base where all of our templates are available - type: string -resources: - RabbitMQ: - type: AWS::CloudFormation::Stack - TemplateURL: - Fn::Join: - - {get_param: TemplateRoot} - - rabbitmq.yaml - parameters: - InstanceType: m1.small - KeyName: default - RabbitMQImage: image-rabbitmq - MySQL: - type: AWS::CloudFormation::Stack - TemplateURL: - Fn::Join: - - {get_param: TemplateRoot} - - mysql.yaml - parameters: - InstanceType: m1.small - KeyName: default - MySQLImage: image-mysql - Keystone: - type: AWS::CloudFormation::Stack - TemplateURL: - Fn::Join: - - {get_param: TemplateRoot} - - keystone.yaml - parameters: - AdminToken: {get_param: KeystoneAdminToken} - KeyName: default - KeystoneDSN: - Fn::Join: - - 'mysql://keystone:' - - {get_attr: [ MySQL , KeystonePassword ]} - - '@' - - {get_attr: [ MySQL , MySQLHost ]} - - '/keystone' - Glance: - type: AWS::CloudFormation::Stack - TemplateURL: - Fn::Join: - - {get_param: TemplateRoot} - - glance.yaml - parameters: - KeyName: default - HeatDSN: - Fn::Join: - - 'mysql://glance:' - - {get_attr: [ MySQL, GlancePassword ] } - - '@' - - {get_attr: [ MySQL, MySQLHost ]} - - '/glance' - Heat: - type: AWS::CloudFormation::Stack - TemplateURL: - Fn::Join: - - {get_param: TemplateRoot} - - heat.yaml - parameters: - KeyName: default - HeatDSN: - Fn::Join: - - 'mysql://heat:' - - {get_attr: [ MySQL, HeatPassword ] } - - '@' - - {get_attr: [ MySQL, MySQLHost ]} - - '/heat' diff --git a/debian-mirror.yaml b/debian-mirror.yaml deleted file mode 100644 index ddfff6a6..00000000 --- a/debian-mirror.yaml +++ /dev/null @@ -1,31 +0,0 @@ -description: 'Debian-mirror: A Debian or Ubuntu mirror in the cloud' -parameters: - DebianMirrorArchitectures: - default: [{"arch": "amd64"}] - description: The architectures to be mirrored - type: JSON - DebianMirrorComponents: - default: ["main", "restricted", "universe", "multiverse"] - description: The components to be mirrored - type: JSON - DebianMirrorMirror: - default: http://archive.ubuntu.com/ubuntu - description: The mirror that is to be used as the source - type: string - DebianMirrorSuites: - default: ["saucy", "saucy-updates", "saucy-security"] - description: The suites to be mirrored - type: JSON -resources: - debianMirrorConfig: - type: AWS::AutoScaling::LaunchConfiguration - metadata: - debian-mirror: - mirror: - get_param: DebianMirrorMirror - suites: - get_param: DebianMirrorSuites - architectures: - get_param: DebianMirrorArchitectures - components: - get_param: DebianMirrorComponents diff --git a/deprecated/nova-compute-instance.yaml b/deprecated/nova-compute-instance.yaml index e68c61f9..811c0fc3 100644 --- a/deprecated/nova-compute-instance.yaml +++ b/deprecated/nova-compute-instance.yaml @@ -100,7 +100,7 @@ parameters: NeutronHost: type: string NeutronNetworkType: - default: 'gre' + default: 'vxlan' description: The tenant network type for Neutron, either gre or vxlan. type: string NeutronNetworkVLANRanges: @@ -114,7 +114,7 @@ parameters: description: A port to add to the NeutronPhysicalBridge. type: string NeutronTunnelTypes: - default: 'gre' + default: 'vxlan' description: | The tunnel types for the Neutron tenant network. To specify multiple values, use a comma separated string, like so: 'gre,vxlan' diff --git a/deprecated/overcloud-source.yaml b/deprecated/overcloud-source.yaml index d355c4df..0729b338 100644 --- a/deprecated/overcloud-source.yaml +++ b/deprecated/overcloud-source.yaml @@ -221,7 +221,7 @@ parameters: If set, flat networks to configure in neutron plugins. Defaults to 'datacentre' to permit external network creation. NeutronNetworkType: - default: 'gre' + default: 'vxlan' description: The tenant network type for Neutron, either gre or vxlan. type: string NeutronNetworkVLANRanges: @@ -279,7 +279,7 @@ parameters: description: Shared secret to prevent spoofing type: string NeutronTunnelTypes: - default: 'gre' + default: 'vxlan' description: | The tunnel types for the Neutron tenant network. To specify multiple values, use a comma separated string, like so: 'gre,vxlan' diff --git a/undercloud-bm-nova-config.yaml b/deprecated/undercloud-bm-nova-config.yaml index 306dc0a1..306dc0a1 100644 --- a/undercloud-bm-nova-config.yaml +++ b/deprecated/undercloud-bm-nova-config.yaml diff --git a/undercloud-bm-nova-deploy.yaml b/deprecated/undercloud-bm-nova-deploy.yaml index dca68329..dca68329 100644 --- a/undercloud-bm-nova-deploy.yaml +++ b/deprecated/undercloud-bm-nova-deploy.yaml diff --git a/undercloud-source.yaml b/deprecated/undercloud-source.yaml index 6fe2066d..317896d9 100644 --- a/undercloud-source.yaml +++ b/deprecated/undercloud-source.yaml @@ -1,4 +1,4 @@ -description: All-in-one baremetal OpenStack and all dependencies. +description: Deprecated. Use instack-undercloud instead. All-in-one baremetal OpenStack and all dependencies. heat_template_version: 2013-05-23 parameters: AdminPassword: diff --git a/undercloud-vlan-port.yaml b/deprecated/undercloud-vlan-port.yaml index 7e39f5fc..7e39f5fc 100644 --- a/undercloud-vlan-port.yaml +++ b/deprecated/undercloud-vlan-port.yaml diff --git a/undercloud-vm-ironic-config.yaml b/deprecated/undercloud-vm-ironic-config.yaml index cc0dafb6..cc0dafb6 100644 --- a/undercloud-vm-ironic-config.yaml +++ b/deprecated/undercloud-vm-ironic-config.yaml diff --git a/undercloud-vm-ironic-deploy.yaml b/deprecated/undercloud-vm-ironic-deploy.yaml index 5d23495c..5d23495c 100644 --- a/undercloud-vm-ironic-deploy.yaml +++ b/deprecated/undercloud-vm-ironic-deploy.yaml diff --git a/undercloud-vm-nova-config.yaml b/deprecated/undercloud-vm-nova-config.yaml index 1fb8abb3..1fb8abb3 100644 --- a/undercloud-vm-nova-config.yaml +++ b/deprecated/undercloud-vm-nova-config.yaml diff --git a/undercloud-vm-nova-deploy.yaml b/deprecated/undercloud-vm-nova-deploy.yaml index da15b46d..da15b46d 100644 --- a/undercloud-vm-nova-deploy.yaml +++ b/deprecated/undercloud-vm-nova-deploy.yaml diff --git a/docker/README-containers.md b/docker/README-containers.md new file mode 100644 index 00000000..17990b54 --- /dev/null +++ b/docker/README-containers.md @@ -0,0 +1,50 @@ +# Using Docker Containers With TripleO + +## Configuring TripleO with to use a container based compute node. + +Steps include: +- Adding a base OS image to glance +- Deploy an overcloud configured to use the docker compute heat templates + +## Getting base OS image working. + +Download the fedora atomic image into glance: + +``` +wget https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Atomic-22-20150521.x86_64.qcow2 +glance image-create --name atomic-image --file Fedora-Cloud-Atomic-22-20150521.x86_64.qcow2 --disk-format qcow2 --container-format bare +``` + +## Configuring TripleO + +You can use the tripleo.sh script up until the point of running the Overcloud. +https://github.com/openstack/tripleo-common/blob/master/scripts/tripleo.sh + +Create the Overcloud: +``` +$ openstack overcloud deploy --templates=tripleo-heat-templates -e tripleo-heat-templates/environments/docker-rdo.yaml --libvirt-type=qemu +``` + +Source the overcloudrc and then you can use the overcloud. + +## Debugging + +You can ssh into the controller/compute nodes by using the heat key, eg: +``` +nova list +ssh heat-admin@<compute_node_ip> +``` + +You can check to see what docker containers are running: +``` +sudo docker ps -a +``` + +To enter a container that doesn't seem to be working right: +``` +sudo docker exec -ti <container name> /bin/bash +``` + +Then you can check logs etc. + +You can also just do a 'docker logs' on a given container. diff --git a/docker/compute-post.yaml b/docker/compute-post.yaml new file mode 100644 index 00000000..0d049ebc --- /dev/null +++ b/docker/compute-post.yaml @@ -0,0 +1,228 @@ +heat_template_version: 2015-04-30 + +description: > + OpenStack compute node post deployment for Docker. + +parameters: + servers: + type: json + NodeConfigIdentifiers: + type: json + description: Value which changes if the node configuration may need to be re-applied + DockerComputeImage: + type: string + DockerComputeDataImage: + type: string + DockerLibvirtImage: + type: string + DockerNeutronAgentImage: + type: string + DockerOpenvswitchImage: + type: string + DockerOvsVswitchdImage: + type: string + DockerOpenvswitchDBImage: + type: string + +resources: + + ComputePuppetConfig: + type: OS::Heat::SoftwareConfig + properties: + group: puppet + options: + enable_hiera: True + enable_facter: False + tags: package,file,concat,file_line,nova_config,neutron_config,neutron_agent_ovs,neutron_plugin_ml2 + inputs: + - name: tripleo::packages::enable_install + type: Boolean + default: True + outputs: + - name: result + config: + get_file: ../puppet/manifests/overcloud_compute.pp + + ComputePuppetDeployment: + type: OS::Heat::SoftwareDeployments + properties: + servers: {get_param: servers} + config: {get_resource: ComputePuppetConfig} + input_values: + update_identifier: {get_param: NodeConfigIdentifiers} + tripleo::packages::enable_install: True + + CopyEtcConfig: + type: OS::Heat::SoftwareConfig + properties: + group: script + outputs: + - name: result + config: {get_file: ./copy-etc.sh} + + CopyEtcDeployment: + type: OS::Heat::SoftwareDeployments + depends_on: ComputePuppetDeployment + properties: + config: {get_resource: CopyEtcConfig} + servers: {get_param: servers} + + NovaComputeContainersDeploymentOVS: + type: OS::Heat::StructuredDeployments + properties: + config: {get_resource: NovaComputeContainersConfigOVS} + servers: {get_param: servers} + + NovaComputeContainersConfigOVS: + type: OS::Heat::StructuredConfig + properties: + group: docker-compose + config: + ovsvswitchd: + image: {get_param: DockerOvsVswitchdImage} + container_name: ovs-vswitchd + net: host + privileged: true + restart: always + volumes: + - /run:/run + - /lib/modules:/lib/modules:ro + environment: + - KOLLA_CONFIG_STRATEGY=CONFIG_EXTERNAL_COPY_ALWAYS + + openvswitchdb: + image: {get_param: DockerOpenvswitchDBImage} + container_name: ovs-db-server + net: host + restart: always + volumes: + - /run:/run + environment: + - KOLLA_CONFIG_STRATEGY=CONFIG_EXTERNAL_COPY_ALWAYS + + NovaComputeContainersDeploymentNetconfig: + type: OS::Heat::SoftwareDeployments + depends_on: NovaComputeContainersDeploymentOVS + properties: + config: {get_resource: NovaComputeContainersConfigNetconfig} + servers: {get_param: servers} + + # We run os-net-config here because we depend on the ovs containers to be up + # and running before we configure the network. This allows explicit timing + # of the network configuration. + NovaComputeContainersConfigNetconfig: + type: OS::Heat::SoftwareConfig + properties: + group: script + outputs: + - name: result + config: | + #!/bin/bash + /usr/local/bin/run-os-net-config + + LibvirtContainersDeployment: + type: OS::Heat::StructuredDeployments + depends_on: [CopyEtcDeployment, ComputePuppetDeployment, NovaComputeContainersDeploymentNetconfig] + properties: + config: {get_resource: LibvirtContainersConfig} + servers: {get_param: servers} + + LibvirtContainersConfig: + type: OS::Heat::StructuredConfig + properties: + group: docker-compose + config: + computedata: + image: {get_param: DockerComputeDataImage} + container_name: computedata + + libvirt: + image: {get_param: DockerLibvirtImage} + container_name: libvirt + net: host + pid: host + privileged: true + restart: always + volumes: + - /run:/run + - /lib/modules:/lib/modules:ro + - /var/lib/etc-data/libvirt/libvirtd.conf:/opt/kolla/libvirtd/libvirtd.conf + - /var/lib/nova/instances:/var/lib/nova/instances + environment: + - KOLLA_CONFIG_STRATEGY=CONFIG_EXTERNAL_COPY_ALWAYS + volumes_from: + - computedata + + NovaComputeContainersDeployment: + type: OS::Heat::StructuredDeployments + depends_on: [CopyEtcDeployment, ComputePuppetDeployment, NovaComputeContainersDeploymentNetconfig, LibvirtContainersDeployment] + properties: + config: {get_resource: NovaComputeContainersConfig} + servers: {get_param: servers} + + NovaComputeContainersConfig: + type: OS::Heat::StructuredConfig + properties: + group: docker-compose + config: + openvswitch: + image: {get_param: DockerOpenvswitchImage} + container_name: openvswitch + net: host + privileged: true + restart: always + volumes: + - /run:/run + - /lib/modules:/lib/modules:ro + - /var/lib/etc-data/neutron/neutron.conf:/etc/kolla/neutron-openvswitch-agent/:ro + - /var/lib/etc-data/neutron/plugins/ml2/ml2_conf.ini:/etc/kolla/neutron-openvswitch-agent/:ro + environment: + - KOLLA_CONFIG_STRATEGY=CONFIG_EXTERNAL_COPY_ALWAYS + volumes_from: + - computedata + # FIXME: Kolla now uses a JSON model to run custom commands. We rebuilt a custom container to read in KOLLA_COMMAND_ARGS + + # FIXME: Here we're subjugating kolla's start scripts because we want our custom run command + neutronagent: + image: {get_param: DockerOpenvswitchImage} + container_name: neutronagent + net: host + pid: host + privileged: true + restart: always + volumes: + - /run:/run + - /lib/modules:/lib/modules:ro + - /var/lib/etc-data/neutron/neutron.conf:/etc/neutron/neutron.conf:ro + - /var/lib/etc-data/neutron/plugins/ml2/openvswitch_agent.ini:/etc/neutron/plugins/openvswitch/ovs_neutron_plugin.ini:ro + environment: + - KOLLA_CONFIG_STRATEGY=CONFIG_EXTERNAL_COPY_ALWAYS + # FIXME: Kolla now uses a JSON model to run custom commands. We rebuilt a custom container to read in KOLLA_COMMAND_ARGS + - KOLLA_COMMAND_ARGS=--config-file /usr/share/neutron/neutron-dist.conf --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/openvswitch/ovs_neutron_plugin.ini + volumes_from: + - computedata + + novacompute: + image: {get_param: DockerComputeImage} + container_name: novacompute + net: host + privileged: true + restart: always + volumes: + - /run:/run + - /sys/fs/cgroup:/sys/fs/cgroup + - /lib/modules:/lib/modules:ro + - /var/lib/etc-data/:/etc/:ro + - /var/lib/nova/instances:/var/lib/nova/instances + volumes_from: + - computedata + # FIXME: this skips the kolla start.sh script and just starts Nova + # Ideally we'd have an environment that switched the kolla container + # to be externally configured. + command: /usr/bin/nova-compute + + ExtraConfig: + depends_on: NovaComputeContainersDeployment + type: OS::TripleO::NodeExtraConfigPost + properties: + servers: {get_param: servers} diff --git a/docker/copy-etc.sh b/docker/copy-etc.sh new file mode 100644 index 00000000..1a6cd520 --- /dev/null +++ b/docker/copy-etc.sh @@ -0,0 +1,3 @@ +#!/bin/bash +echo "Copying agent container /etc to /var/lib/etc-data" +cp -a /etc/* /var/lib/etc-data/ diff --git a/docker/firstboot/install_docker_agents.yaml b/docker/firstboot/install_docker_agents.yaml new file mode 100644 index 00000000..8adc8939 --- /dev/null +++ b/docker/firstboot/install_docker_agents.yaml @@ -0,0 +1,28 @@ +heat_template_version: 2014-10-16 + +parameters: + DockerAgentImage: + type: string + default: dprince/heat-docker-agents-centos + +resources: + + userdata: + type: OS::Heat::MultipartMime + properties: + parts: + - config: {get_resource: install_docker_agents} + + install_docker_agents: + type: OS::Heat::SoftwareConfig + properties: + group: script + config: + str_replace: + params: + $agent_image: {get_param: DockerAgentImage} + template: {get_file: ./start_docker_agents.sh} + +outputs: + OS::stack_id: + value: {get_resource: userdata} diff --git a/docker/firstboot/start_docker_agents.sh b/docker/firstboot/start_docker_agents.sh new file mode 100644 index 00000000..88759a5d --- /dev/null +++ b/docker/firstboot/start_docker_agents.sh @@ -0,0 +1,75 @@ +#!/bin/bash +set -eux + +# firstboot isn't split out by role yet so we handle it this way +if ! hostname | grep compute &>/dev/null; then + echo "Exiting. This script is only for the compute role." + exit 0 +fi + +mkdir -p /var/lib/etc-data/ #FIXME: this should be a docker data container + +# heat-docker-agents service +cat <<EOF > /etc/systemd/system/heat-docker-agents.service + +[Unit] +Description=Heat Docker Agent Container +After=docker.service +Requires=docker.service + +[Service] +User=root +Restart=on-failure +ExecStartPre=-/usr/bin/docker kill heat-agents +ExecStartPre=-/usr/bin/docker rm heat-agents +ExecStartPre=/usr/bin/docker pull $agent_image +ExecStart=/usr/bin/docker run --name heat-agents --privileged --net=host -v /var/lib/etc-data:/var/lib/etc-data -v /run:/run -v /etc:/host/etc -v /usr/bin/atomic:/usr/bin/atomic -v /var/lib/dhclient:/var/lib/dhclient -v /var/lib/cloud:/var/lib/cloud -v /var/lib/heat-cfntools:/var/lib/heat-cfntools --entrypoint=/usr/bin/os-collect-config $agent_image +ExecStop=/usr/bin/docker stop heat-agents + +[Install] +WantedBy=multi-user.target + +EOF + +# update docker for local insecure registry(optional) +# Note: This is different for different docker versions +# For older docker versions < 1.4.x use commented line +#echo "OPTIONS='--insecure-registry $docker_registry'" >> /etc/sysconfig/docker +#echo "ADD_REGISTRY='--registry-mirror $docker_registry'" >> /etc/sysconfig/docker + +# Local docker registry 1.8 +#/bin/sed -i s/ADD_REGISTRY/#ADD_REGISTRY/ /etc/sysconfig/docker + +/sbin/setenforce 0 +/sbin/modprobe ebtables + +# Create /var/lib/etc-data for now. FIXME: This should go into a data container. +#mkdir -p /var/lib/etc-data + +echo nameserver 8.8.8.8 > /etc/resolv.conf + +# We need hostname -f to return in a centos container for the puppet hook +HOSTNAME=$(hostname) +echo "127.0.0.1 $HOSTNAME.localdomain $HOSTNAME" >> /etc/hosts + +# Another hack.. we need a different docker version +# (should obviously be dropped once the atomic image contains docker 1.8.2) +/usr/bin/systemctl stop docker.service +/bin/curl -o /tmp/docker https://get.docker.com/builds/Linux/x86_64/docker-1.8.2 +/bin/mount -o remount,rw /usr +/bin/rm /bin/docker +/bin/cp /tmp/docker /bin/docker +/bin/chmod 755 /bin/docker + +# enable and start docker +/usr/bin/systemctl enable docker.service +/usr/bin/systemctl restart --no-block docker.service + +# enable and start heat-docker-agents +chmod 0640 /etc/systemd/system/heat-docker-agents.service +/usr/bin/systemctl enable heat-docker-agents.service +/usr/bin/systemctl start --no-block heat-docker-agents.service + +# Disable NetworkManager and let the ifup/down scripts work properly. +/usr/bin/systemctl disable NetworkManager +/usr/bin/systemctl stop NetworkManager diff --git a/environments/cinder-netapp-config.yaml b/environments/cinder-netapp-config.yaml index 4dd9eed7..0437cc67 100644 --- a/environments/cinder-netapp-config.yaml +++ b/environments/cinder-netapp-config.yaml @@ -1,30 +1,29 @@ # A Heat environment file which can be used to enable a -# a Cinder NetApp backend. +# a Cinder NetApp backend, configured via puppet resource_registry: - OS::TripleO::Controller::CinderBackend: extraconfig/controller/cinder-netapp.yaml + OS::TripleO::ControllerExtraConfigPre: ../puppet/extraconfig/pre_deploy/controller/cinder-netapp.yaml parameter_defaults: - CinderBackendConfig: - CinderEnableNetappBackend: false - CinderNetappBackendName: 'tripleo_netapp' - CinderNetappLogin: '' - CinderNetappPassword: '' - CinderNetappServerHostname: '' - CinderNetappServerPort: '80' - CinderNetappSizeMultiplier: '1.2' - CinderNetappStorageFamily: 'ontap_cluster' - CinderNetappStorageProtocol: 'nfs' - CinderNetappTransportType: 'http' - CinderNetappVfiler: '' - CinderNetappVolumeList: '' - CinderNetappVserver: '' - CinderNetappPartnerBackendName: '' - CinderNetappNfsShares: '' - CinderNetappNfsSharesConfig: '/etc/cinder/shares.conf' - CinderNetappNfsMountOptions: '' - CinderNetappCopyOffloadToolPath: '' - CinderNetappControllerIps: '' - CinderNetappSaPassword: '' - CinderNetappStoragePools: '' - CinderNetappEseriesHostType: 'linux_dm_mp' - CinderNetappWebservicePath: '/devmgr/v2' + CinderEnableNetappBackend: true + CinderNetappBackendName: 'tripleo_netapp' + CinderNetappLogin: '' + CinderNetappPassword: '' + CinderNetappServerHostname: '' + CinderNetappServerPort: '80' + CinderNetappSizeMultiplier: '1.2' + CinderNetappStorageFamily: 'ontap_cluster' + CinderNetappStorageProtocol: 'nfs' + CinderNetappTransportType: 'http' + CinderNetappVfiler: '' + CinderNetappVolumeList: '' + CinderNetappVserver: '' + CinderNetappPartnerBackendName: '' + CinderNetappNfsShares: '' + CinderNetappNfsSharesConfig: '/etc/cinder/shares.conf' + CinderNetappNfsMountOptions: '' + CinderNetappCopyOffloadToolPath: '' + CinderNetappControllerIps: '' + CinderNetappSaPassword: '' + CinderNetappStoragePools: '' + CinderNetappEseriesHostType: 'linux_dm_mp' + CinderNetappWebservicePath: '/devmgr/v2' diff --git a/environments/config-debug.yaml b/environments/config-debug.yaml new file mode 100644 index 00000000..b176c255 --- /dev/null +++ b/environments/config-debug.yaml @@ -0,0 +1,5 @@ +# A Heat environment file which can be used to enable config +# management (e.g. Puppet) debugging. + +parameter_defaults: + ConfigDebug: true diff --git a/environments/docker-rdo.yaml b/environments/docker-rdo.yaml new file mode 100644 index 00000000..8a6e1018 --- /dev/null +++ b/environments/docker-rdo.yaml @@ -0,0 +1,17 @@ +resource_registry: + # Docker container with heat agents for containerized compute node. + OS::TripleO::ComputePostDeployment: ../docker/compute-post.yaml + OS::TripleO::NodeUserData: ../docker/firstboot/install_docker_agents.yaml + OS::TripleO::Compute::Net::SoftwareConfig: ../net-config-bridge.yaml + +parameters: + NovaImage: atomic-image + +parameter_defaults: + DockerComputeImage: rthallisey/centos-binary-nova-compute:liberty + DockerComputeDataImage: kollaglue/centos-rdo-nova-compute-data:liberty2 + DockerLibvirtImage: kollaglue/centos-rdo-nova-libvirt:liberty2 + DockerNeutronAgentImage: kollaglue/centos-rdo-neutron-agents:liberty2 + DockerOpenvswitchImage: rthallisey/centos-rdo-neutron-openvswitch-agent:latest + DockerOvsVswitchdImage: kollaglue/centos-rdo-ovs-vswitchd:liberty2 + DockerOpenvswitchDBImage: kollaglue/centos-rdo-ovs-db-server:liberty2 diff --git a/environments/mongodb-nojournal.yaml b/environments/mongodb-nojournal.yaml new file mode 100644 index 00000000..1e13e452 --- /dev/null +++ b/environments/mongodb-nojournal.yaml @@ -0,0 +1,5 @@ +# A Heat environment file which can be used to disable journal in MongoDb. +# Since, when journaling is enabled, MongoDb will create big journal file +# it can take time. In a CI environment for example journaling is not necessary. +parameters: + MongoDbNoJournal: true diff --git a/environments/net-bond-with-vlans.yaml b/environments/net-bond-with-vlans.yaml index 73f71324..9600fc7e 100644 --- a/environments/net-bond-with-vlans.yaml +++ b/environments/net-bond-with-vlans.yaml @@ -12,7 +12,11 @@ resource_registry: OS::TripleO::ObjectStorage::Net::SoftwareConfig: ../network/config/bond-with-vlans/swift-storage.yaml OS::TripleO::CephStorage::Net::SoftwareConfig: ../network/config/bond-with-vlans/ceph-storage.yaml -parameters: +# We use parameter_defaults instead of parameters here because Tuskar munges +# the names of top level and role level parameters with the role name and a +# version. Using parameter_defaults makes it such that if the parameter name is +# not defined in the template, we don't get an error. +parameter_defaults: # This sets 'external_network_bridge' in l3_agent.ini to an empty string # so that external networks act like provider bridge networks (they # will plug into br-int instead of br-ex) diff --git a/environments/net-multiple-nics.yaml b/environments/net-multiple-nics.yaml new file mode 100644 index 00000000..5ee516fb --- /dev/null +++ b/environments/net-multiple-nics.yaml @@ -0,0 +1,13 @@ +# This template configures each role to use a separate NIC for +# each isolated network. +# This template assumes use of network-isolation.yaml. +# +# FIXME: if/when we add functionality to heatclient to include heat +# environment files we should think about using it here to automatically +# include network-isolation.yaml. +resource_registry: + OS::TripleO::BlockStorage::Net::SoftwareConfig: ../network/config/multiple-nics/cinder-storage.yaml + OS::TripleO::Compute::Net::SoftwareConfig: ../network/config/multiple-nics/compute.yaml + OS::TripleO::Controller::Net::SoftwareConfig: ../network/config/multiple-nics/controller.yaml + OS::TripleO::ObjectStorage::Net::SoftwareConfig: ../network/config/multiple-nics/swift-storage.yaml + OS::TripleO::CephStorage::Net::SoftwareConfig: ../network/config/multiple-nics/ceph-storage.yaml diff --git a/environments/net-single-nic-with-vlans.yaml b/environments/net-single-nic-with-vlans.yaml index 8561acd3..bdfeadd3 100644 --- a/environments/net-single-nic-with-vlans.yaml +++ b/environments/net-single-nic-with-vlans.yaml @@ -12,7 +12,11 @@ resource_registry: OS::TripleO::ObjectStorage::Net::SoftwareConfig: ../network/config/single-nic-vlans/swift-storage.yaml OS::TripleO::CephStorage::Net::SoftwareConfig: ../network/config/single-nic-vlans/ceph-storage.yaml -parameters: +# We use parameter_defaults instead of parameters here because Tuskar munges +# the names of top level and role level parameters with the role name and a +# version. Using parameter_defaults makes it such that if the parameter name is +# not defined in the template, we don't get an error. +parameter_defaults: # This sets 'external_network_bridge' in l3_agent.ini to an empty string # so that external networks act like provider bridge networks (they # will plug into br-int instead of br-ex) diff --git a/environments/neutron-ml2-bigswitch.yaml b/environments/neutron-ml2-bigswitch.yaml new file mode 100644 index 00000000..69c91326 --- /dev/null +++ b/environments/neutron-ml2-bigswitch.yaml @@ -0,0 +1,17 @@ +# A Heat environment file which can be used to enable Big Switch +# extensions, configured via puppet +resource_registry: + OS::TripleO::ControllerExtraConfigPre: ../puppet/extraconfig/pre_deploy/controller/neutron-ml2-bigswitch.yaml + +parameter_defaults: + # Required to fill in: + NeutronBigswitchRestproxyServers: + NeutronBigswitchRestproxyServerAuth: + + # Optional: + # NeutronBigswitchRestproxyAutoSyncOnFailure: + # NeutronBigswitchRestproxyConsistencyInterval: + # NeutronBigswitchRestproxyNeutronId: + # NeutronBigswitchRestproxyServerSsl: + # NeutronBigswitchRestproxySslCertDirectory: + diff --git a/environments/neutron-ml2-cisco-n1kv.yaml b/environments/neutron-ml2-cisco-n1kv.yaml new file mode 100644 index 00000000..651e9564 --- /dev/null +++ b/environments/neutron-ml2-cisco-n1kv.yaml @@ -0,0 +1,11 @@ +# A Heat environment file which can be used to enable a +# a Cisco N1KV backend, configured via puppet +resource_registry: + OS::TripleO::ControllerExtraConfigPre: ../puppet/extraconfig/pre_deploy/controller/neutron-ml2-cisco-n1kv.yaml + OS::TripleO::ComputeExtraConfigPre: ../puppet/extraconfig/pre_deploy/controller/neutron-ml2-cisco-n1kv.yaml + +parameter_defaults: + N1000vVSMIP: '192.0.2.50' + N1000vMgmtGatewayIP: '192.0.2.1' + N1000vVSMDomainID: '100' + N1000vVSMHostMgmtIntf: 'br-ex' diff --git a/environments/neutron-ml2-cisco-nexus-ucsm.yaml b/environments/neutron-ml2-cisco-nexus-ucsm.yaml new file mode 100644 index 00000000..5a1a32a3 --- /dev/null +++ b/environments/neutron-ml2-cisco-nexus-ucsm.yaml @@ -0,0 +1,25 @@ +# A Heat environment file which can be used to enable a +# a Cisco Neutron plugin. +resource_registry: + OS::TripleO::AllNodesExtraConfig: ../puppet/extraconfig/all_nodes/neutron-ml2-cisco-nexus-ucsm.yaml + +parameter_defaults: + NetworkUCSMIp: '127.0.0.1' + NetworkUCSMUsername: 'admin' + NetworkUCSMPassword: 'password' + NetworkUCSMHostList: '12:34:56:78:9a:bc:profile1, 12:34:56:78:9a:de:profile2' + NetworkUCSMSupportedPciDevs: '' + NetworkNexusConfig: {} + NetworkNexusManagedPhysicalNetwork: '' + NetworkNexusVlanNamePrefix: 'q-' + NetworkNexusSviRoundRobin: 'false' + NetworkNexusProviderVlanNamePrefix: 'p-' + NetworkNexusPersistentSwitchConfig: 'false' + NetworkNexusSwitchHeartbeatTime: 0 + NetworkNexusSwitchReplayCount: 3 + NetworkNexusProviderVlanAutoCreate: 'true' + NetworkNexusProviderVlanAutoTrunk: 'true' + NetworkNexusVxlanGlobalConfig: 'true' + NetworkNexusHostKeyChecks: 'false' + NetworkNexusVxlanVniRanges: '0:0' + NetworkNexusVxlanMcastRanges: '0.0.0.0:0.0.0.0' diff --git a/environments/overcloud-steps.yaml b/environments/overcloud-steps.yaml index 99f73688..f61cbb6f 100644 --- a/environments/overcloud-steps.yaml +++ b/environments/overcloud-steps.yaml @@ -1,7 +1,7 @@ # Specifies hooks/breakpoints where overcloud deployment should stop # Allows operator validation between steps, and/or more granular control. # Note: the wildcards relate to naming convention for some resource suffixes, -# e.g see puppet/*-post-puppet.yaml, enabling this will mean we wait for +# e.g see puppet/*-post.yaml, enabling this will mean we wait for # a user signal on every *Deployment_StepN resource defined in those files. resource_registry: resources: diff --git a/environments/puppet-ceph-external.yaml b/environments/puppet-ceph-external.yaml new file mode 100644 index 00000000..7f5b5080 --- /dev/null +++ b/environments/puppet-ceph-external.yaml @@ -0,0 +1,25 @@ +# A Heat environment file which can be used to enable the +# use of an externally managed Ceph cluster. +resource_registry: + OS::TripleO::CephClusterConfig::SoftwareConfig: ../puppet/extraconfig/ceph/ceph-external-config.yaml + +parameter_defaults: + # NOTE: These example parameters are required when using Ceph External + #CephClusterFSID: '4b5c8c0a-ff60-454b-a1b4-9747aa737d19' + #CephClientKey: 'AQDLOh1VgEp6FRAAFzT7Zw+Y9V6JJExQAsRnRQ==' + #CephExternalMonHost: '172.16.1.7, 172.16.1.8' + + # the following parameters enable Ceph backends for Cinder, Glance, and Nova + NovaEnableRbdBackend: true + CinderEnableRbdBackend: true + GlanceBackend: rbd + # If the Ceph pools which host VMs, Volumes and Images do not match these + # names OR the client keyring to use is not named 'openstack', edit the + # following as needed. + NovaRbdPoolName: vms + CinderRbdPoolName: volumes + GlanceRbdPoolName: images + CephClientUserName: openstack + + # finally we disable the Cinder LVM backend + CinderEnableIscsiBackend: false diff --git a/environments/storage-environment.yaml b/environments/storage-environment.yaml new file mode 100644 index 00000000..5ccfa58e --- /dev/null +++ b/environments/storage-environment.yaml @@ -0,0 +1,57 @@ +## A Heat environment file which can be used to set up storage +## backends. Defaults to Ceph used as a backend for Cinder, Glance and +## Nova ephemeral storage. +parameter_defaults: + + #### BACKEND SELECTION #### + + ## Whether to enable iscsi backend for Cinder. + CinderEnableIscsiBackend: false + ## Whether to enable rbd (Ceph) backend for Cinder. + CinderEnableRbdBackend: true + ## Whether to enable NFS backend for Cinder. + # CinderEnableNfsBackend: false + ## Whether to enable rbd (Ceph) backend for Nova ephemeral storage. + NovaEnableRbdBackend: true + ## Glance backend can be either 'rbd' (Ceph), 'swift' or 'file'. + GlanceBackend: rbd + + + #### CINDER NFS SETTINGS #### + + ## NFS mount options + # CinderNfsMountOptions: '' + ## NFS mount point, e.g. '192.168.122.1:/export/cinder' + # CinderNfsServers: '' + + + #### GLANCE FILE BACKEND PACEMAKER SETTINGS (used for mounting NFS) #### + + ## Whether to make Glance 'file' backend a mount managed by Pacemaker + # GlanceFilePcmkManage: false + ## File system type of the mount + # GlanceFilePcmkFstype: nfs + ## Pacemaker mount point, e.g. '192.168.122.1:/export/glance' for NFS + # GlanceFilePcmkDevice: '' + ## Options for the mount managed by Pacemaker + # GlanceFilePcmkOptions: '' + + + #### CEPH SETTINGS #### + + ## Whether to deploy Ceph OSDs on the controller nodes. By default + ## OSDs are deployed on dedicated ceph-storage nodes only. + # ControllerEnableCephStorage: false + + ## When deploying Ceph through the oscplugin CLI, the following + ## parameters are set automatically by the CLI. When deploying via + ## heat stack-create, they need to be provided manually. + + ## Number of Ceph storage nodes to deploy + # CephStorageCount: 0 + ## Ceph FSID, e.g. '4b5c8c0a-ff60-454b-a1b4-9747aa737d19' + # CephClusterFSID: '' + ## Ceph monitor key, e.g. 'AQC+Ox1VmEr3BxAALZejqeHj50Nj6wJDvs96OQ==' + # CephMonKey: '' + ## Ceph admin key, e.g. 'AQDLOh1VgEp6FRAAFzT7Zw+Y9V6JJExQAsRnRQ==' + # CephAdminKey: '' diff --git a/examples/launchconfig1.yaml b/examples/launchconfig1.yaml deleted file mode 100644 index 70ea2463..00000000 --- a/examples/launchconfig1.yaml +++ /dev/null @@ -1,24 +0,0 @@ -HeatTemplateFormatVersion: '2012-12-12' -Parameters: - A: - Type: String - Default: test1 - B: - Type: String - Default: test2 - resource1Image: - Type: String - Default: resource1 -Resources: - notcomputeConfigBase: - Type: AWS::AutoScaling::LaunchConfiguration - Metadata: - OpenStack::Role: notcomputeConfig - a: {Ref: A} - b: {Ref: B} - resource1: - Type: OS::Nova::Server - Properties: - flavor: test_flavor - image: {Ref: resource1Image} - key_name: test_key diff --git a/examples/launchconfig1_hot.yaml b/examples/launchconfig1_hot.yaml deleted file mode 100644 index 4c86e76d..00000000 --- a/examples/launchconfig1_hot.yaml +++ /dev/null @@ -1,24 +0,0 @@ -heat_template_version: 2014-10-16 -parameters: - A: - type: string - default: test1 - B: - type: string - default: test2 - resource1Image: - type: string - default: resource1 -resources: - notcomputeConfigBase: - type: AWS::AutoScaling::LaunchConfiguration - metadata: - OpenStack::Role: notcomputeConfig - a: {get_param: A} - b: {get_param: B} - resource1: - type: OS::Nova::Server - properties: - flavor: test_flavor - image: {get_param: resource1Image} - key_name: test_key diff --git a/examples/launchconfig2.yaml b/examples/launchconfig2.yaml deleted file mode 100644 index 3ced0cc4..00000000 --- a/examples/launchconfig2.yaml +++ /dev/null @@ -1,20 +0,0 @@ -HeatTemplateFormatVersion: '2012-12-12' -Parameters: - C: - Type: String - Default: test3 - resource2Image: - Type: String - Default: resource2 -Resources: - notcomputeConfigMixin: - Type: AWS::AutoScaling::LaunchConfiguration - Metadata: - OpenStack::Role: notcomputeConfig - c: {Ref: C} - resource2: - Type: OS::Nova::Server - Properties: - flavor: test_flavor - image: {Ref: resource2Image} - key_name: test_key diff --git a/examples/launchconfig2_hot.yaml b/examples/launchconfig2_hot.yaml deleted file mode 100644 index 1586a4d0..00000000 --- a/examples/launchconfig2_hot.yaml +++ /dev/null @@ -1,20 +0,0 @@ -heat_template_version: 2014-10-16 -parameters: - C: - type: string - default: test3 - resource2Image: - type: string - default: resource2 -resources: - notcomputeConfigMixin: - type: AWS::AutoScaling::LaunchConfiguration - metadata: - OpenStack::Role: notcomputeConfig - c: {get_param: C} - resource2: - type: OS::Nova::Server - properties: - flavor: test_flavor - image: {get_param: resource2Image} - key_name: test_key diff --git a/examples/launchconfig_result.yaml b/examples/launchconfig_result.yaml deleted file mode 100644 index 76c12b84..00000000 --- a/examples/launchconfig_result.yaml +++ /dev/null @@ -1,43 +0,0 @@ -Description: examples/launchconfig1.yaml,examples/launchconfig2.yaml -HeatTemplateFormatVersion: '2012-12-12' -Parameters: - A: - Default: test1 - Type: String - B: - Default: test2 - Type: String - C: - Default: test3 - Type: String - resource1Image: - Default: resource1 - Type: String - resource2Image: - Default: resource2 - Type: String -Resources: - notcomputeConfig: - Metadata: - OpenStack::Role: notcomputeConfig - a: - Ref: A - b: - Ref: B - c: - Ref: C - Type: AWS::AutoScaling::LaunchConfiguration - resource1: - Properties: - flavor: test_flavor - image: - Ref: resource1Image - key_name: test_key - Type: OS::Nova::Server - resource2: - Properties: - flavor: test_flavor - image: - Ref: resource2Image - key_name: test_key - Type: OS::Nova::Server diff --git a/examples/launchconfig_result_hot.yaml b/examples/launchconfig_result_hot.yaml deleted file mode 100644 index 1375bae9..00000000 --- a/examples/launchconfig_result_hot.yaml +++ /dev/null @@ -1,43 +0,0 @@ -description: examples/launchconfig1_hot.yaml,examples/launchconfig2_hot.yaml -heat_template_version: '2014-10-16' -parameters: - A: - default: test1 - type: string - B: - default: test2 - type: string - C: - default: test3 - type: string - resource1Image: - default: resource1 - type: string - resource2Image: - default: resource2 - type: string -resources: - notcomputeConfig: - metadata: - OpenStack::Role: notcomputeConfig - a: - get_param: A - b: - get_param: B - c: - get_param: C - type: AWS::AutoScaling::LaunchConfiguration - resource1: - properties: - flavor: test_flavor - image: - get_param: resource1Image - key_name: test_key - type: OS::Nova::Server - resource2: - properties: - flavor: test_flavor - image: - get_param: resource2Image - key_name: test_key - type: OS::Nova::Server diff --git a/examples/lib.yaml b/examples/lib.yaml deleted file mode 100644 index d42e95f8..00000000 --- a/examples/lib.yaml +++ /dev/null @@ -1,13 +0,0 @@ -Parameters: - ImportantValue: - Default: a_default - Type: String - BImage: - Type: String -Resources: - GenericB: - Type: OS::Nova::Server - Properties: - image: {Ref: BImage} - Metadata: - my_meta: {Ref: ImportantValue} diff --git a/examples/lib_hot.yaml b/examples/lib_hot.yaml deleted file mode 100644 index b5af05e4..00000000 --- a/examples/lib_hot.yaml +++ /dev/null @@ -1,13 +0,0 @@ -parameters: - ImportantValue: - default: a_default - type: string - BImage: - type: string -resources: - GenericB: - type: OS::Nova::Server - properties: - image: {get_param: BImage} - metadata: - my_meta: {get_param: ImportantValue} diff --git a/examples/scale1.yaml b/examples/scale1.yaml deleted file mode 100644 index 6acb6049..00000000 --- a/examples/scale1.yaml +++ /dev/null @@ -1,32 +0,0 @@ -HeatTemplateFormatVersion: '2012-12-12' -Resources: - ComputeUser: - Type: AWS::IAM::User - Properties: - Policies: [ { Ref: ComputeAccessPolicy } ] - GlobalAccessPolicy: - Type: OS::Heat::AccessPolicy - NovaCompute0Key: - Type: FileInclude - Path: examples/scale2.yaml - SubKey: Resources.NovaCompute0Key - NovaCompute0CompletionCondition: - Type: FileInclude - Path: examples/scale2.yaml - SubKey: Resources.NovaCompute0CompletionCondition - NovaCompute0CompletionHandle: - Type: FileInclude - Path: examples/scale2.yaml - SubKey: Resources.NovaCompute0CompletionHandle - NovaCompute0Config: - Type: FileInclude - Path: examples/scale2.yaml - SubKey: Resources.NovaCompute0Config - Parameters: - ComputeImage: "123" - RabbitUserName: "guest" - RabbitPassword: "guest" - NovaCompute0: - Type: FileInclude - Path: examples/scale2.yaml - SubKey: Resources.NovaCompute0 diff --git a/examples/scale1_hot.yaml b/examples/scale1_hot.yaml deleted file mode 100644 index 6e46a32d..00000000 --- a/examples/scale1_hot.yaml +++ /dev/null @@ -1,32 +0,0 @@ -heat_template_version: 2014-10-16 -resources: - ComputeUser: - type: AWS::IAM::User - properties: - Policies: [ { get_param: ComputeAccessPolicy } ] - GlobalAccessPolicy: - type: OS::Heat::AccessPolicy - NovaCompute0Key: - type: FileInclude - Path: examples/scale2_hot.yaml - SubKey: resources.NovaCompute0Key - NovaCompute0CompletionCondition: - type: FileInclude - Path: examples/scale2_hot.yaml - SubKey: resources.NovaCompute0CompletionCondition - NovaCompute0CompletionHandle: - type: FileInclude - Path: examples/scale2_hot.yaml - SubKey: resources.NovaCompute0CompletionHandle - NovaCompute0Config: - type: FileInclude - Path: examples/scale2_hot.yaml - SubKey: resources.NovaCompute0Config - parameters: - ComputeImage: "123" - RabbitUserName: "guest" - RabbitPassword: "guest" - NovaCompute0: - type: FileInclude - Path: examples/scale2_hot.yaml - SubKey: resources.NovaCompute0 diff --git a/examples/scale2.yaml b/examples/scale2.yaml deleted file mode 100644 index 8b3e4f8f..00000000 --- a/examples/scale2.yaml +++ /dev/null @@ -1,69 +0,0 @@ -HeatTemplateFormatVersion: '2012-12-12' -Parameters: - ComputeImage: - Type: String - RabbitUserName: - Type: String - RabbitPassword: - Type: String - NoEcho: true -Resources: - ComputeAccessPolicy: - Type: OS::Heat::AccessPolicy - Properties: - AllowedResources: [ NovaCompute0 ] - NovaCompute0Key: - Type: AWS::IAM::AccessKey - Properties: - UserName: - Ref: ComputeUser - NovaCompute0CompletionCondition: - Type: AWS::CloudFormation::WaitCondition - DependsOn: notcompute - Properties: - Handle: {Ref: NovaCompute0CompletionHandle} - Count: '1' - Timeout: '1800' - NovaCompute0CompletionHandle: - Type: AWS::CloudFormation::WaitConditionHandle - NovaCompute0: - Type: OS::Nova::Server - Properties: - image: - Ref: ComputeImage - Metadata: - os-collect-config: - cfn: - access_key_id: - Ref: NovaCompute0Key - secret_access_key: - Fn::GetAtt: [ NovaCompute0Key, SecretAccessKey ] - stack_name: {Ref: 'AWS::StackName'} - path: NovaCompute0Config.Metadata - NovaCompute0Config: - Type: AWS::AutoScaling::LaunchConfiguration - Metadata: - completion-handle: - Ref: NovaCompute0CompletionHandle - os-collect-config: - cfn: - access_key_id: - Ref: NovaCompute0Key - secret_access_key: - Fn::GetAtt: [ NovaCompute0Key, SecretAccessKey ] - stack_name: {Ref: 'AWS::StackName'} - path: NovaCompute0Config.Metadata - neutron: - ovs: - local_ip: - Fn::Select: - - 0 - - Fn::Select: - - ctlplane - - Fn::GetAtt: - - NovaCompute0 - - networks - rabbit: - username: {Ref: RabbitUserName} - password: {Ref: RabbitPassword} - diff --git a/examples/scale2_hot.yaml b/examples/scale2_hot.yaml deleted file mode 100644 index eb507616..00000000 --- a/examples/scale2_hot.yaml +++ /dev/null @@ -1,62 +0,0 @@ -heat_template_version: 2014-10-16 -parameters: - ComputeImage: - type: string - RabbitUserName: - type: string - RabbitPassword: - type: string - hidden: true -resources: - ComputeAccessPolicy: - type: OS::Heat::AccessPolicy - properties: - AllowedResources: [ NovaCompute0 ] - NovaCompute0Key: - type: AWS::IAM::AccessKey - properties: - UserName: - get_param: ComputeUser - NovaCompute0CompletionCondition: - type: AWS::CloudFormation::WaitCondition - depends_on: notcompute - properties: - Handle: {get_resource: NovaCompute0CompletionHandle} - Count: '1' - Timeout: '1800' - NovaCompute0CompletionHandle: - type: AWS::CloudFormation::WaitConditionHandle - NovaCompute0: - type: OS::Nova::Server - properties: - image: - get_param: ComputeImage - metadata: - os-collect-config: - cfn: - access_key_id: - get_resource: NovaCompute0Key - secret_access_key: - get_attr: [ NovaCompute0Key, SecretAccessKey ] - stack_name: {get_param: 'AWS::StackName'} - path: NovaCompute0Config.Metadata - NovaCompute0Config: - type: AWS::AutoScaling::LaunchConfiguration - metadata: - completion-handle: - get_resource: NovaCompute0CompletionHandle - os-collect-config: - cfn: - access_key_id: - get_resource: NovaCompute0Key - secret_access_key: - get_attr: [ NovaCompute0Key, SecretAccessKey ] - stack_name: {get_param: 'AWS::StackName'} - path: NovaCompute0Config.Metadata - neutron: - ovs: - local_ip: {get_attr: [NovaCompute0, networks, ctlplane, 0]} - rabbit: - username: {get_param: RabbitUserName} - password: {get_param: RabbitPassword} - diff --git a/examples/scale_map.yaml b/examples/scale_map.yaml deleted file mode 100644 index 08bcbf7c..00000000 --- a/examples/scale_map.yaml +++ /dev/null @@ -1,56 +0,0 @@ -HeatTemplateFormatVersion: '2012-12-12' -Resources: - ComputeUser: - Type: AWS::IAM::User - Properties: - Policies: [ { Ref: ComputeAccessPolicy } ] - GlobalAccessPolicy: - Type: OS::Heat::AccessPolicy - NovaCompute0Key: - Type: FileInclude - Path: examples/scale_map2.yaml - SubKey: Resources.NovaCompute0Key - NovaCompute0CompletionCondition: - Type: FileInclude - Path: examples/scale_map2.yaml - SubKey: Resources.NovaCompute0CompletionCondition - NovaCompute0CompletionHandle: - Type: FileInclude - Path: examples/scale_map2.yaml - SubKey: Resources.NovaCompute0CompletionHandle - NovaCompute0Config: - Type: FileInclude - Path: examples/scale_map2.yaml - SubKey: Resources.NovaCompute0Config - Parameters: - AllHosts: - Fn::Join: - - "\n" - - Merge::Map: - NovaCompute0: - Fn::Join: - - ' ' - - - Fn::Select: - - 0 - - Fn::Select: - - ctlplane - - Fn::GetAtt: - - NovaCompute0 - - networks - - Fn::Select: - - name - - Fn::GetAtt: - - NovaCompute0 - - show - - Fn::Join: - - '.' - - - Fn::Select: - - name - - Fn::GetAtt: - - NovaCompute0 - - show - - 'local' - NovaCompute0: - Type: FileInclude - Path: examples/scale_map2.yaml - SubKey: Resources.NovaCompute0 diff --git a/examples/scale_map2.yaml b/examples/scale_map2.yaml deleted file mode 100644 index 7e5c839c..00000000 --- a/examples/scale_map2.yaml +++ /dev/null @@ -1,54 +0,0 @@ -HeatTemplateFormatVersion: '2012-12-12' -Parameters: - AllHosts: - Type: String - ComputeImage: - Type: String -Resources: - ComputeAccessPolicy: - Type: OS::Heat::AccessPolicy - Properties: - AllowedResources: [ NovaCompute0 ] - NovaCompute0Key: - Type: AWS::IAM::AccessKey - Properties: - UserName: - Ref: ComputeUser - NovaCompute0CompletionCondition: - Type: AWS::CloudFormation::WaitCondition - DependsOn: notcompute - Properties: - Handle: {Ref: NovaCompute0CompletionHandle} - Count: '1' - Timeout: '1800' - NovaCompute0CompletionHandle: - Type: AWS::CloudFormation::WaitConditionHandle - NovaCompute0: - Type: OS::Nova::Server - Properties: - image: - Ref: ComputeImage - Metadata: - os-collect-config: - cfn: - access_key_id: - Ref: NovaCompute0Key - secret_access_key: - Fn::GetAtt: [ NovaCompute0Key, SecretAccessKey ] - stack_name: {Ref: 'AWS::StackName'} - path: NovaCompute0Config.Metadata - NovaCompute0Config: - Type: AWS::AutoScaling::LaunchConfiguration - Metadata: - completion-handle: - Ref: NovaCompute0CompletionHandle - os-collect-config: - cfn: - access_key_id: - Ref: NovaCompute0Key - secret_access_key: - Fn::GetAtt: [ NovaCompute0Key, SecretAccessKey ] - stack_name: {Ref: 'AWS::StackName'} - path: NovaCompute0Config.Metadata - hosts: - Ref: AllHosts diff --git a/examples/scale_map2_hot.yaml b/examples/scale_map2_hot.yaml deleted file mode 100644 index 1d7dc5fc..00000000 --- a/examples/scale_map2_hot.yaml +++ /dev/null @@ -1,54 +0,0 @@ -heat_template_version: 2014-10-16 -parameters: - AllHosts: - type: string - ComputeImage: - type: string -resources: - ComputeAccessPolicy: - type: OS::Heat::AccessPolicy - properties: - AllowedResources: [ NovaCompute0 ] - NovaCompute0Key: - type: AWS::IAM::AccessKey - properties: - UserName: - get_param: ComputeUser - NovaCompute0CompletionCondition: - type: AWS::CloudFormation::WaitCondition - depends_on: notcompute - properties: - Handle: {get_resource: NovaCompute0CompletionHandle} - Count: '1' - Timeout: '1800' - NovaCompute0CompletionHandle: - type: AWS::CloudFormation::WaitConditionHandle - NovaCompute0: - type: OS::Nova::Server - properties: - image: - get_param: ComputeImage - metadata: - os-collect-config: - cfn: - access_key_id: - get_resource: NovaCompute0Key - secret_access_key: - get_attr: [ NovaCompute0Key, SecretAccessKey ] - stack_name: {get_param: 'AWS::StackName'} - path: NovaCompute0Config.Metadata - NovaCompute0Config: - type: AWS::AutoScaling::LaunchConfiguration - metadata: - completion-handle: - get_resource: NovaCompute0CompletionHandle - os-collect-config: - cfn: - access_key_id: - get_resource: NovaCompute0Key - secret_access_key: - get_attr: [ NovaCompute0Key, SecretAccessKey ] - stack_name: {get_param: 'AWS::StackName'} - path: NovaCompute0Config.Metadata - hosts: - get_param: AllHosts diff --git a/examples/scale_map_hot.yaml b/examples/scale_map_hot.yaml deleted file mode 100644 index 4a6d6843..00000000 --- a/examples/scale_map_hot.yaml +++ /dev/null @@ -1,42 +0,0 @@ -heat_template_version: 2014-10-16 -resources: - ComputeUser: - type: AWS::IAM::User - properties: - Policies: [ { get_param: ComputeAccessPolicy } ] - GlobalAccessPolicy: - type: OS::Heat::AccessPolicy - NovaCompute0Key: - type: FileInclude - Path: examples/scale_map2_hot.yaml - SubKey: resources.NovaCompute0Key - NovaCompute0CompletionCondition: - type: FileInclude - Path: examples/scale_map2_hot.yaml - SubKey: resources.NovaCompute0CompletionCondition - NovaCompute0CompletionHandle: - type: FileInclude - Path: examples/scale_map2_hot.yaml - SubKey: resources.NovaCompute0CompletionHandle - NovaCompute0Config: - type: FileInclude - Path: examples/scale_map2_hot.yaml - SubKey: resources.NovaCompute0Config - parameters: - AllHosts: - list_join: - - "\n" - - Merge::Map: - NovaCompute0: - list_join: - - ' ' - - - {get_attr: [NovaCompute0, networks, ctlplane, 0]} - - {get_attr: [NovaCompute0, show, name]} - - list_join: - - '.' - - - {get_attr: [NovaCompute0, show, name]} - - 'local' - NovaCompute0: - type: FileInclude - Path: examples/scale_map2_hot.yaml - SubKey: resources.NovaCompute0 diff --git a/examples/scale_map_result.yaml b/examples/scale_map_result.yaml deleted file mode 100644 index c4617835..00000000 --- a/examples/scale_map_result.yaml +++ /dev/null @@ -1,367 +0,0 @@ -Description: examples/scale_map.yaml -HeatTemplateFormatVersion: '2012-12-12' -Resources: - ComputeUser: - Properties: - Policies: - - Ref: ComputeAccessPolicy - Type: AWS::IAM::User - GlobalAccessPolicy: - Type: OS::Heat::AccessPolicy - NovaCompute0: - Metadata: - os-collect-config: - cfn: - access_key_id: - Ref: NovaCompute0Key - path: NovaCompute0Config.Metadata - secret_access_key: - Fn::GetAtt: - - NovaCompute0Key - - SecretAccessKey - stack_name: - Ref: AWS::StackName - Properties: - image: - Ref: ComputeImage - Type: OS::Nova::Server - NovaCompute0CompletionCondition: - DependsOn: notcompute - Properties: - Count: '1' - Handle: - Ref: NovaCompute0CompletionHandle - Timeout: '1800' - Type: AWS::CloudFormation::WaitCondition - NovaCompute0CompletionHandle: - Type: AWS::CloudFormation::WaitConditionHandle - NovaCompute0Config: - Metadata: - completion-handle: - Ref: NovaCompute0CompletionHandle - hosts: - Fn::Join: - - ' - - ' - - - Fn::Join: - - ' ' - - - Fn::Select: - - 0 - - Fn::Select: - - ctlplane - - Fn::GetAtt: - - NovaCompute0 - - networks - - Fn::Select: - - name - - Fn::GetAtt: - - NovaCompute0 - - show - - Fn::Join: - - . - - - Fn::Select: - - name - - Fn::GetAtt: - - NovaCompute0 - - show - - local - - Fn::Join: - - ' ' - - - Fn::Select: - - 0 - - Fn::Select: - - ctlplane - - Fn::GetAtt: - - NovaCompute1 - - networks - - Fn::Select: - - name - - Fn::GetAtt: - - NovaCompute1 - - show - - Fn::Join: - - . - - - Fn::Select: - - name - - Fn::GetAtt: - - NovaCompute1 - - show - - local - - Fn::Join: - - ' ' - - - Fn::Select: - - 0 - - Fn::Select: - - ctlplane - - Fn::GetAtt: - - NovaCompute2 - - networks - - Fn::Select: - - name - - Fn::GetAtt: - - NovaCompute2 - - show - - Fn::Join: - - . - - - Fn::Select: - - name - - Fn::GetAtt: - - NovaCompute2 - - show - - local - os-collect-config: - cfn: - access_key_id: - Ref: NovaCompute0Key - path: NovaCompute0Config.Metadata - secret_access_key: - Fn::GetAtt: - - NovaCompute0Key - - SecretAccessKey - stack_name: - Ref: AWS::StackName - Type: AWS::AutoScaling::LaunchConfiguration - NovaCompute0Key: - Properties: - UserName: - Ref: ComputeUser - Type: AWS::IAM::AccessKey - NovaCompute1: - Metadata: - os-collect-config: - cfn: - access_key_id: - Ref: NovaCompute1Key - path: NovaCompute1Config.Metadata - secret_access_key: - Fn::GetAtt: - - NovaCompute1Key - - SecretAccessKey - stack_name: - Ref: AWS::StackName - Properties: - image: - Ref: ComputeImage - Type: OS::Nova::Server - NovaCompute1CompletionCondition: - DependsOn: notcompute - Properties: - Count: '1' - Handle: - Ref: NovaCompute1CompletionHandle - Timeout: '1800' - Type: AWS::CloudFormation::WaitCondition - NovaCompute1CompletionHandle: - Type: AWS::CloudFormation::WaitConditionHandle - NovaCompute1Config: - Metadata: - completion-handle: - Ref: NovaCompute1CompletionHandle - hosts: - Fn::Join: - - ' - - ' - - - Fn::Join: - - ' ' - - - Fn::Select: - - 0 - - Fn::Select: - - ctlplane - - Fn::GetAtt: - - NovaCompute0 - - networks - - Fn::Select: - - name - - Fn::GetAtt: - - NovaCompute0 - - show - - Fn::Join: - - . - - - Fn::Select: - - name - - Fn::GetAtt: - - NovaCompute0 - - show - - local - - Fn::Join: - - ' ' - - - Fn::Select: - - 0 - - Fn::Select: - - ctlplane - - Fn::GetAtt: - - NovaCompute1 - - networks - - Fn::Select: - - name - - Fn::GetAtt: - - NovaCompute1 - - show - - Fn::Join: - - . - - - Fn::Select: - - name - - Fn::GetAtt: - - NovaCompute1 - - show - - local - - Fn::Join: - - ' ' - - - Fn::Select: - - 0 - - Fn::Select: - - ctlplane - - Fn::GetAtt: - - NovaCompute2 - - networks - - Fn::Select: - - name - - Fn::GetAtt: - - NovaCompute2 - - show - - Fn::Join: - - . - - - Fn::Select: - - name - - Fn::GetAtt: - - NovaCompute2 - - show - - local - os-collect-config: - cfn: - access_key_id: - Ref: NovaCompute1Key - path: NovaCompute1Config.Metadata - secret_access_key: - Fn::GetAtt: - - NovaCompute1Key - - SecretAccessKey - stack_name: - Ref: AWS::StackName - Type: AWS::AutoScaling::LaunchConfiguration - NovaCompute1Key: - Properties: - UserName: - Ref: ComputeUser - Type: AWS::IAM::AccessKey - NovaCompute2: - Metadata: - os-collect-config: - cfn: - access_key_id: - Ref: NovaCompute2Key - path: NovaCompute2Config.Metadata - secret_access_key: - Fn::GetAtt: - - NovaCompute2Key - - SecretAccessKey - stack_name: - Ref: AWS::StackName - Properties: - image: - Ref: ComputeImage - Type: OS::Nova::Server - NovaCompute2CompletionCondition: - DependsOn: notcompute - Properties: - Count: '1' - Handle: - Ref: NovaCompute2CompletionHandle - Timeout: '1800' - Type: AWS::CloudFormation::WaitCondition - NovaCompute2CompletionHandle: - Type: AWS::CloudFormation::WaitConditionHandle - NovaCompute2Config: - Metadata: - completion-handle: - Ref: NovaCompute2CompletionHandle - hosts: - Fn::Join: - - ' - - ' - - - Fn::Join: - - ' ' - - - Fn::Select: - - 0 - - Fn::Select: - - ctlplane - - Fn::GetAtt: - - NovaCompute0 - - networks - - Fn::Select: - - name - - Fn::GetAtt: - - NovaCompute0 - - show - - Fn::Join: - - . - - - Fn::Select: - - name - - Fn::GetAtt: - - NovaCompute0 - - show - - local - - Fn::Join: - - ' ' - - - Fn::Select: - - 0 - - Fn::Select: - - ctlplane - - Fn::GetAtt: - - NovaCompute1 - - networks - - Fn::Select: - - name - - Fn::GetAtt: - - NovaCompute1 - - show - - Fn::Join: - - . - - - Fn::Select: - - name - - Fn::GetAtt: - - NovaCompute1 - - show - - local - - Fn::Join: - - ' ' - - - Fn::Select: - - 0 - - Fn::Select: - - ctlplane - - Fn::GetAtt: - - NovaCompute2 - - networks - - Fn::Select: - - name - - Fn::GetAtt: - - NovaCompute2 - - show - - Fn::Join: - - . - - - Fn::Select: - - name - - Fn::GetAtt: - - NovaCompute2 - - show - - local - os-collect-config: - cfn: - access_key_id: - Ref: NovaCompute2Key - path: NovaCompute2Config.Metadata - secret_access_key: - Fn::GetAtt: - - NovaCompute2Key - - SecretAccessKey - stack_name: - Ref: AWS::StackName - Type: AWS::AutoScaling::LaunchConfiguration - NovaCompute2Key: - Properties: - UserName: - Ref: ComputeUser - Type: AWS::IAM::AccessKey diff --git a/examples/scale_map_result_hot.yaml b/examples/scale_map_result_hot.yaml deleted file mode 100644 index 4e657238..00000000 --- a/examples/scale_map_result_hot.yaml +++ /dev/null @@ -1,331 +0,0 @@ -description: examples/scale_map_hot.yaml -heat_template_version: '2014-10-16' -resources: - ComputeUser: - properties: - Policies: - - get_param: ComputeAccessPolicy - type: AWS::IAM::User - GlobalAccessPolicy: - type: OS::Heat::AccessPolicy - NovaCompute0: - metadata: - os-collect-config: - cfn: - access_key_id: - get_resource: NovaCompute0Key - path: NovaCompute0Config.Metadata - secret_access_key: - get_attr: - - NovaCompute0Key - - SecretAccessKey - stack_name: - get_param: AWS::StackName - properties: - image: - get_param: ComputeImage - type: OS::Nova::Server - NovaCompute0CompletionCondition: - depends_on: notcompute - properties: - Count: '1' - Handle: - get_resource: NovaCompute0CompletionHandle - Timeout: '1800' - type: AWS::CloudFormation::WaitCondition - NovaCompute0CompletionHandle: - type: AWS::CloudFormation::WaitConditionHandle - NovaCompute0Config: - metadata: - completion-handle: - get_resource: NovaCompute0CompletionHandle - hosts: - list_join: - - ' - - ' - - - list_join: - - ' ' - - - get_attr: - - NovaCompute0 - - networks - - ctlplane - - 0 - - get_attr: - - NovaCompute0 - - show - - name - - list_join: - - . - - - get_attr: - - NovaCompute0 - - show - - name - - local - - list_join: - - ' ' - - - get_attr: - - NovaCompute1 - - networks - - ctlplane - - 0 - - get_attr: - - NovaCompute1 - - show - - name - - list_join: - - . - - - get_attr: - - NovaCompute1 - - show - - name - - local - - list_join: - - ' ' - - - get_attr: - - NovaCompute2 - - networks - - ctlplane - - 0 - - get_attr: - - NovaCompute2 - - show - - name - - list_join: - - . - - - get_attr: - - NovaCompute2 - - show - - name - - local - os-collect-config: - cfn: - access_key_id: - get_resource: NovaCompute0Key - path: NovaCompute0Config.Metadata - secret_access_key: - get_attr: - - NovaCompute0Key - - SecretAccessKey - stack_name: - get_param: AWS::StackName - type: AWS::AutoScaling::LaunchConfiguration - NovaCompute0Key: - properties: - UserName: - get_param: ComputeUser - type: AWS::IAM::AccessKey - NovaCompute1: - metadata: - os-collect-config: - cfn: - access_key_id: - get_resource: NovaCompute1Key - path: NovaCompute1Config.Metadata - secret_access_key: - get_attr: - - NovaCompute1Key - - SecretAccessKey - stack_name: - get_param: AWS::StackName - properties: - image: - get_param: ComputeImage - type: OS::Nova::Server - NovaCompute1CompletionCondition: - depends_on: notcompute - properties: - Count: '1' - Handle: - get_resource: NovaCompute1CompletionHandle - Timeout: '1800' - type: AWS::CloudFormation::WaitCondition - NovaCompute1CompletionHandle: - type: AWS::CloudFormation::WaitConditionHandle - NovaCompute1Config: - metadata: - completion-handle: - get_resource: NovaCompute1CompletionHandle - hosts: - list_join: - - ' - - ' - - - list_join: - - ' ' - - - get_attr: - - NovaCompute0 - - networks - - ctlplane - - 0 - - get_attr: - - NovaCompute0 - - show - - name - - list_join: - - . - - - get_attr: - - NovaCompute0 - - show - - name - - local - - list_join: - - ' ' - - - get_attr: - - NovaCompute1 - - networks - - ctlplane - - 0 - - get_attr: - - NovaCompute1 - - show - - name - - list_join: - - . - - - get_attr: - - NovaCompute1 - - show - - name - - local - - list_join: - - ' ' - - - get_attr: - - NovaCompute2 - - networks - - ctlplane - - 0 - - get_attr: - - NovaCompute2 - - show - - name - - list_join: - - . - - - get_attr: - - NovaCompute2 - - show - - name - - local - os-collect-config: - cfn: - access_key_id: - get_resource: NovaCompute1Key - path: NovaCompute1Config.Metadata - secret_access_key: - get_attr: - - NovaCompute1Key - - SecretAccessKey - stack_name: - get_param: AWS::StackName - type: AWS::AutoScaling::LaunchConfiguration - NovaCompute1Key: - properties: - UserName: - get_param: ComputeUser - type: AWS::IAM::AccessKey - NovaCompute2: - metadata: - os-collect-config: - cfn: - access_key_id: - get_resource: NovaCompute2Key - path: NovaCompute2Config.Metadata - secret_access_key: - get_attr: - - NovaCompute2Key - - SecretAccessKey - stack_name: - get_param: AWS::StackName - properties: - image: - get_param: ComputeImage - type: OS::Nova::Server - NovaCompute2CompletionCondition: - depends_on: notcompute - properties: - Count: '1' - Handle: - get_resource: NovaCompute2CompletionHandle - Timeout: '1800' - type: AWS::CloudFormation::WaitCondition - NovaCompute2CompletionHandle: - type: AWS::CloudFormation::WaitConditionHandle - NovaCompute2Config: - metadata: - completion-handle: - get_resource: NovaCompute2CompletionHandle - hosts: - list_join: - - ' - - ' - - - list_join: - - ' ' - - - get_attr: - - NovaCompute0 - - networks - - ctlplane - - 0 - - get_attr: - - NovaCompute0 - - show - - name - - list_join: - - . - - - get_attr: - - NovaCompute0 - - show - - name - - local - - list_join: - - ' ' - - - get_attr: - - NovaCompute1 - - networks - - ctlplane - - 0 - - get_attr: - - NovaCompute1 - - show - - name - - list_join: - - . - - - get_attr: - - NovaCompute1 - - show - - name - - local - - list_join: - - ' ' - - - get_attr: - - NovaCompute2 - - networks - - ctlplane - - 0 - - get_attr: - - NovaCompute2 - - show - - name - - list_join: - - . - - - get_attr: - - NovaCompute2 - - show - - name - - local - os-collect-config: - cfn: - access_key_id: - get_resource: NovaCompute2Key - path: NovaCompute2Config.Metadata - secret_access_key: - get_attr: - - NovaCompute2Key - - SecretAccessKey - stack_name: - get_param: AWS::StackName - type: AWS::AutoScaling::LaunchConfiguration - NovaCompute2Key: - properties: - UserName: - get_param: ComputeUser - type: AWS::IAM::AccessKey diff --git a/examples/scale_map_result_hot_blacklist.yaml b/examples/scale_map_result_hot_blacklist.yaml deleted file mode 100644 index decb0d5e..00000000 --- a/examples/scale_map_result_hot_blacklist.yaml +++ /dev/null @@ -1,367 +0,0 @@ -description: examples/scale_map_hot.yaml -heat_template_version: '2013-05-23' -resources: - ComputeUser: - properties: - Policies: - - get_param: ComputeAccessPolicy - type: AWS::IAM::User - GlobalAccessPolicy: - type: OS::Heat::AccessPolicy - NovaCompute0: - metadata: - os-collect-config: - cfn: - access_key_id: - get_resource: NovaCompute0Key - path: NovaCompute0Config.Metadata - secret_access_key: - get_attr: - - NovaCompute0Key - - SecretAccessKey - stack_name: - get_param: AWS::StackName - properties: - image: - get_param: ComputeImage - type: OS::Nova::Server - NovaCompute0CompletionCondition: - depends_on: notcompute - properties: - Count: '1' - Handle: - get_resource: NovaCompute0CompletionHandle - Timeout: '1800' - type: AWS::CloudFormation::WaitCondition - NovaCompute0CompletionHandle: - type: AWS::CloudFormation::WaitConditionHandle - NovaCompute0Config: - metadata: - completion-handle: - get_resource: NovaCompute0CompletionHandle - hosts: - list_join: - - ' - - ' - - - list_join: - - ' ' - - - Fn::Select: - - 0 - - Fn::Select: - - ctlplane - - get_attr: - - NovaCompute0 - - networks - - Fn::Select: - - name - - get_attr: - - NovaCompute0 - - show - - list_join: - - . - - - Fn::Select: - - name - - get_attr: - - NovaCompute0 - - show - - local - - list_join: - - ' ' - - - Fn::Select: - - 0 - - Fn::Select: - - ctlplane - - get_attr: - - NovaCompute3 - - networks - - Fn::Select: - - name - - get_attr: - - NovaCompute3 - - show - - list_join: - - . - - - Fn::Select: - - name - - get_attr: - - NovaCompute3 - - show - - local - - list_join: - - ' ' - - - Fn::Select: - - 0 - - Fn::Select: - - ctlplane - - get_attr: - - NovaCompute4 - - networks - - Fn::Select: - - name - - get_attr: - - NovaCompute4 - - show - - list_join: - - . - - - Fn::Select: - - name - - get_attr: - - NovaCompute4 - - show - - local - os-collect-config: - cfn: - access_key_id: - get_resource: NovaCompute0Key - path: NovaCompute0Config.Metadata - secret_access_key: - get_attr: - - NovaCompute0Key - - SecretAccessKey - stack_name: - get_param: AWS::StackName - type: AWS::AutoScaling::LaunchConfiguration - NovaCompute0Key: - properties: - UserName: - get_param: ComputeUser - type: AWS::IAM::AccessKey - NovaCompute3: - metadata: - os-collect-config: - cfn: - access_key_id: - get_resource: NovaCompute3Key - path: NovaCompute3Config.Metadata - secret_access_key: - get_attr: - - NovaCompute3Key - - SecretAccessKey - stack_name: - get_param: AWS::StackName - properties: - image: - get_param: ComputeImage - type: OS::Nova::Server - NovaCompute3CompletionCondition: - depends_on: notcompute - properties: - Count: '1' - Handle: - get_resource: NovaCompute3CompletionHandle - Timeout: '1800' - type: AWS::CloudFormation::WaitCondition - NovaCompute3CompletionHandle: - type: AWS::CloudFormation::WaitConditionHandle - NovaCompute3Config: - metadata: - completion-handle: - get_resource: NovaCompute3CompletionHandle - hosts: - list_join: - - ' - - ' - - - list_join: - - ' ' - - - Fn::Select: - - 0 - - Fn::Select: - - ctlplane - - get_attr: - - NovaCompute0 - - networks - - Fn::Select: - - name - - get_attr: - - NovaCompute0 - - show - - list_join: - - . - - - Fn::Select: - - name - - get_attr: - - NovaCompute0 - - show - - local - - list_join: - - ' ' - - - Fn::Select: - - 0 - - Fn::Select: - - ctlplane - - get_attr: - - NovaCompute3 - - networks - - Fn::Select: - - name - - get_attr: - - NovaCompute3 - - show - - list_join: - - . - - - Fn::Select: - - name - - get_attr: - - NovaCompute3 - - show - - local - - list_join: - - ' ' - - - Fn::Select: - - 0 - - Fn::Select: - - ctlplane - - get_attr: - - NovaCompute4 - - networks - - Fn::Select: - - name - - get_attr: - - NovaCompute4 - - show - - list_join: - - . - - - Fn::Select: - - name - - get_attr: - - NovaCompute4 - - show - - local - os-collect-config: - cfn: - access_key_id: - get_resource: NovaCompute3Key - path: NovaCompute3Config.Metadata - secret_access_key: - get_attr: - - NovaCompute3Key - - SecretAccessKey - stack_name: - get_param: AWS::StackName - type: AWS::AutoScaling::LaunchConfiguration - NovaCompute3Key: - properties: - UserName: - get_param: ComputeUser - type: AWS::IAM::AccessKey - NovaCompute4: - metadata: - os-collect-config: - cfn: - access_key_id: - get_resource: NovaCompute4Key - path: NovaCompute4Config.Metadata - secret_access_key: - get_attr: - - NovaCompute4Key - - SecretAccessKey - stack_name: - get_param: AWS::StackName - properties: - image: - get_param: ComputeImage - type: OS::Nova::Server - NovaCompute4CompletionCondition: - depends_on: notcompute - properties: - Count: '1' - Handle: - get_resource: NovaCompute4CompletionHandle - Timeout: '1800' - type: AWS::CloudFormation::WaitCondition - NovaCompute4CompletionHandle: - type: AWS::CloudFormation::WaitConditionHandle - NovaCompute4Config: - metadata: - completion-handle: - get_resource: NovaCompute4CompletionHandle - hosts: - list_join: - - ' - - ' - - - list_join: - - ' ' - - - Fn::Select: - - 0 - - Fn::Select: - - ctlplane - - get_attr: - - NovaCompute0 - - networks - - Fn::Select: - - name - - get_attr: - - NovaCompute0 - - show - - list_join: - - . - - - Fn::Select: - - name - - get_attr: - - NovaCompute0 - - show - - local - - list_join: - - ' ' - - - Fn::Select: - - 0 - - Fn::Select: - - ctlplane - - get_attr: - - NovaCompute3 - - networks - - Fn::Select: - - name - - get_attr: - - NovaCompute3 - - show - - list_join: - - . - - - Fn::Select: - - name - - get_attr: - - NovaCompute3 - - show - - local - - list_join: - - ' ' - - - Fn::Select: - - 0 - - Fn::Select: - - ctlplane - - get_attr: - - NovaCompute4 - - networks - - Fn::Select: - - name - - get_attr: - - NovaCompute4 - - show - - list_join: - - . - - - Fn::Select: - - name - - get_attr: - - NovaCompute4 - - show - - local - os-collect-config: - cfn: - access_key_id: - get_resource: NovaCompute4Key - path: NovaCompute4Config.Metadata - secret_access_key: - get_attr: - - NovaCompute4Key - - SecretAccessKey - stack_name: - get_param: AWS::StackName - type: AWS::AutoScaling::LaunchConfiguration - NovaCompute4Key: - properties: - UserName: - get_param: ComputeUser - type: AWS::IAM::AccessKey diff --git a/examples/scale_result.yaml b/examples/scale_result.yaml deleted file mode 100644 index 5b28684b..00000000 --- a/examples/scale_result.yaml +++ /dev/null @@ -1,193 +0,0 @@ -Description: examples/scale1.yaml -HeatTemplateFormatVersion: '2012-12-12' -Resources: - ComputeUser: - Properties: - Policies: - - Ref: ComputeAccessPolicy - Type: AWS::IAM::User - GlobalAccessPolicy: - Type: OS::Heat::AccessPolicy - NovaCompute0: - Metadata: - os-collect-config: - cfn: - access_key_id: - Ref: NovaCompute0Key - path: NovaCompute0Config.Metadata - secret_access_key: - Fn::GetAtt: - - NovaCompute0Key - - SecretAccessKey - stack_name: - Ref: AWS::StackName - Properties: - image: - Ref: ComputeImage - Type: OS::Nova::Server - NovaCompute0CompletionCondition: - DependsOn: notcompute - Properties: - Count: '1' - Handle: - Ref: NovaCompute0CompletionHandle - Timeout: '1800' - Type: AWS::CloudFormation::WaitCondition - NovaCompute0CompletionHandle: - Type: AWS::CloudFormation::WaitConditionHandle - NovaCompute0Config: - Metadata: - completion-handle: - Ref: NovaCompute0CompletionHandle - neutron: - ovs: - local_ip: - Fn::Select: - - 0 - - Fn::Select: - - ctlplane - - Fn::GetAtt: - - NovaCompute0 - - networks - os-collect-config: - cfn: - access_key_id: - Ref: NovaCompute0Key - path: NovaCompute0Config.Metadata - secret_access_key: - Fn::GetAtt: - - NovaCompute0Key - - SecretAccessKey - stack_name: - Ref: AWS::StackName - rabbit: - password: guest - username: guest - Type: AWS::AutoScaling::LaunchConfiguration - NovaCompute0Key: - Properties: - UserName: - Ref: ComputeUser - Type: AWS::IAM::AccessKey - NovaCompute1: - Metadata: - os-collect-config: - cfn: - access_key_id: - Ref: NovaCompute1Key - path: NovaCompute1Config.Metadata - secret_access_key: - Fn::GetAtt: - - NovaCompute1Key - - SecretAccessKey - stack_name: - Ref: AWS::StackName - Properties: - image: - Ref: ComputeImage - Type: OS::Nova::Server - NovaCompute1CompletionCondition: - DependsOn: notcompute - Properties: - Count: '1' - Handle: - Ref: NovaCompute1CompletionHandle - Timeout: '1800' - Type: AWS::CloudFormation::WaitCondition - NovaCompute1CompletionHandle: - Type: AWS::CloudFormation::WaitConditionHandle - NovaCompute1Config: - Metadata: - completion-handle: - Ref: NovaCompute1CompletionHandle - neutron: - ovs: - local_ip: - Fn::Select: - - 0 - - Fn::Select: - - ctlplane - - Fn::GetAtt: - - NovaCompute1 - - networks - os-collect-config: - cfn: - access_key_id: - Ref: NovaCompute1Key - path: NovaCompute1Config.Metadata - secret_access_key: - Fn::GetAtt: - - NovaCompute1Key - - SecretAccessKey - stack_name: - Ref: AWS::StackName - rabbit: - password: guest - username: guest - Type: AWS::AutoScaling::LaunchConfiguration - NovaCompute1Key: - Properties: - UserName: - Ref: ComputeUser - Type: AWS::IAM::AccessKey - NovaCompute2: - Metadata: - os-collect-config: - cfn: - access_key_id: - Ref: NovaCompute2Key - path: NovaCompute2Config.Metadata - secret_access_key: - Fn::GetAtt: - - NovaCompute2Key - - SecretAccessKey - stack_name: - Ref: AWS::StackName - Properties: - image: - Ref: ComputeImage - Type: OS::Nova::Server - NovaCompute2CompletionCondition: - DependsOn: notcompute - Properties: - Count: '1' - Handle: - Ref: NovaCompute2CompletionHandle - Timeout: '1800' - Type: AWS::CloudFormation::WaitCondition - NovaCompute2CompletionHandle: - Type: AWS::CloudFormation::WaitConditionHandle - NovaCompute2Config: - Metadata: - completion-handle: - Ref: NovaCompute2CompletionHandle - neutron: - ovs: - local_ip: - Fn::Select: - - 0 - - Fn::Select: - - ctlplane - - Fn::GetAtt: - - NovaCompute2 - - networks - os-collect-config: - cfn: - access_key_id: - Ref: NovaCompute2Key - path: NovaCompute2Config.Metadata - secret_access_key: - Fn::GetAtt: - - NovaCompute2Key - - SecretAccessKey - stack_name: - Ref: AWS::StackName - rabbit: - password: guest - username: guest - Type: AWS::AutoScaling::LaunchConfiguration - NovaCompute2Key: - Properties: - UserName: - Ref: ComputeUser - Type: AWS::IAM::AccessKey diff --git a/examples/scale_result_hot.yaml b/examples/scale_result_hot.yaml deleted file mode 100644 index a1b27095..00000000 --- a/examples/scale_result_hot.yaml +++ /dev/null @@ -1,187 +0,0 @@ -description: examples/scale1_hot.yaml -heat_template_version: '2014-10-16' -resources: - ComputeUser: - properties: - Policies: - - get_param: ComputeAccessPolicy - type: AWS::IAM::User - GlobalAccessPolicy: - type: OS::Heat::AccessPolicy - NovaCompute0: - metadata: - os-collect-config: - cfn: - access_key_id: - get_resource: NovaCompute0Key - path: NovaCompute0Config.Metadata - secret_access_key: - get_attr: - - NovaCompute0Key - - SecretAccessKey - stack_name: - get_param: AWS::StackName - properties: - image: - get_param: ComputeImage - type: OS::Nova::Server - NovaCompute0CompletionCondition: - depends_on: notcompute - properties: - Count: '1' - Handle: - get_resource: NovaCompute0CompletionHandle - Timeout: '1800' - type: AWS::CloudFormation::WaitCondition - NovaCompute0CompletionHandle: - type: AWS::CloudFormation::WaitConditionHandle - NovaCompute0Config: - metadata: - completion-handle: - get_resource: NovaCompute0CompletionHandle - neutron: - ovs: - local_ip: - get_attr: - - NovaCompute0 - - networks - - ctlplane - - 0 - os-collect-config: - cfn: - access_key_id: - get_resource: NovaCompute0Key - path: NovaCompute0Config.Metadata - secret_access_key: - get_attr: - - NovaCompute0Key - - SecretAccessKey - stack_name: - get_param: AWS::StackName - rabbit: - password: guest - username: guest - type: AWS::AutoScaling::LaunchConfiguration - NovaCompute0Key: - properties: - UserName: - get_param: ComputeUser - type: AWS::IAM::AccessKey - NovaCompute1: - metadata: - os-collect-config: - cfn: - access_key_id: - get_resource: NovaCompute1Key - path: NovaCompute1Config.Metadata - secret_access_key: - get_attr: - - NovaCompute1Key - - SecretAccessKey - stack_name: - get_param: AWS::StackName - properties: - image: - get_param: ComputeImage - type: OS::Nova::Server - NovaCompute1CompletionCondition: - depends_on: notcompute - properties: - Count: '1' - Handle: - get_resource: NovaCompute1CompletionHandle - Timeout: '1800' - type: AWS::CloudFormation::WaitCondition - NovaCompute1CompletionHandle: - type: AWS::CloudFormation::WaitConditionHandle - NovaCompute1Config: - metadata: - completion-handle: - get_resource: NovaCompute1CompletionHandle - neutron: - ovs: - local_ip: - get_attr: - - NovaCompute1 - - networks - - ctlplane - - 0 - os-collect-config: - cfn: - access_key_id: - get_resource: NovaCompute1Key - path: NovaCompute1Config.Metadata - secret_access_key: - get_attr: - - NovaCompute1Key - - SecretAccessKey - stack_name: - get_param: AWS::StackName - rabbit: - password: guest - username: guest - type: AWS::AutoScaling::LaunchConfiguration - NovaCompute1Key: - properties: - UserName: - get_param: ComputeUser - type: AWS::IAM::AccessKey - NovaCompute2: - metadata: - os-collect-config: - cfn: - access_key_id: - get_resource: NovaCompute2Key - path: NovaCompute2Config.Metadata - secret_access_key: - get_attr: - - NovaCompute2Key - - SecretAccessKey - stack_name: - get_param: AWS::StackName - properties: - image: - get_param: ComputeImage - type: OS::Nova::Server - NovaCompute2CompletionCondition: - depends_on: notcompute - properties: - Count: '1' - Handle: - get_resource: NovaCompute2CompletionHandle - Timeout: '1800' - type: AWS::CloudFormation::WaitCondition - NovaCompute2CompletionHandle: - type: AWS::CloudFormation::WaitConditionHandle - NovaCompute2Config: - metadata: - completion-handle: - get_resource: NovaCompute2CompletionHandle - neutron: - ovs: - local_ip: - get_attr: - - NovaCompute2 - - networks - - ctlplane - - 0 - os-collect-config: - cfn: - access_key_id: - get_resource: NovaCompute2Key - path: NovaCompute2Config.Metadata - secret_access_key: - get_attr: - - NovaCompute2Key - - SecretAccessKey - stack_name: - get_param: AWS::StackName - rabbit: - password: guest - username: guest - type: AWS::AutoScaling::LaunchConfiguration - NovaCompute2Key: - properties: - UserName: - get_param: ComputeUser - type: AWS::IAM::AccessKey diff --git a/examples/source.yaml b/examples/source.yaml deleted file mode 100644 index 88f0bde7..00000000 --- a/examples/source.yaml +++ /dev/null @@ -1,16 +0,0 @@ -HeatTemplateFormatVersion: '2012-12-12' -Parameters: - SourceImage: - Type: String - Default: my_image -Resources: - A: - Type: OS::Nova::Server - Properties: - image: {Ref: SourceImage} - B: - Type: FileInclude - Path: examples/lib.yaml - SubKey: Resources.GenericB - Parameters: - ImportantValue: {'Fn::Join': [ '', ['one', 'two', 'three']]} diff --git a/examples/source2.yaml b/examples/source2.yaml deleted file mode 100644 index f59f85ef..00000000 --- a/examples/source2.yaml +++ /dev/null @@ -1,4 +0,0 @@ -__include__: - path: examples/lib.yaml - params: - ImportantValue: Foo diff --git a/examples/source2_hot.yaml b/examples/source2_hot.yaml deleted file mode 100644 index e3861a6c..00000000 --- a/examples/source2_hot.yaml +++ /dev/null @@ -1,4 +0,0 @@ -__include__: - path: examples/lib_hot.yaml - params: - ImportantValue: Foo diff --git a/examples/source2_lib_result.yaml b/examples/source2_lib_result.yaml deleted file mode 100644 index 172dce0f..00000000 --- a/examples/source2_lib_result.yaml +++ /dev/null @@ -1,16 +0,0 @@ -Description: examples/source2.yaml -HeatTemplateFormatVersion: '2012-12-12' -Parameters: - BImage: - Type: String - ImportantValue: - Default: a_default - Type: String -Resources: - GenericB: - Metadata: - my_meta: Foo - Properties: - image: - Ref: BImage - Type: OS::Nova::Server diff --git a/examples/source2_lib_result_hot.yaml b/examples/source2_lib_result_hot.yaml deleted file mode 100644 index 294fed89..00000000 --- a/examples/source2_lib_result_hot.yaml +++ /dev/null @@ -1,16 +0,0 @@ -description: examples/source2_hot.yaml -heat_template_version: '2014-10-16' -parameters: - BImage: - type: string - ImportantValue: - default: a_default - type: string -resources: - GenericB: - metadata: - my_meta: Foo - properties: - image: - get_param: BImage - type: OS::Nova::Server diff --git a/examples/source_hot.yaml b/examples/source_hot.yaml deleted file mode 100644 index 15314886..00000000 --- a/examples/source_hot.yaml +++ /dev/null @@ -1,16 +0,0 @@ -heat_template_version: 2014-10-16 -parameters: - SourceImage: - type: string - default: my_image -resources: - A: - type: OS::Nova::Server - properties: - image: {get_param: SourceImage} - B: - type: FileInclude - Path: examples/lib_hot.yaml - SubKey: resources.GenericB - parameters: - ImportantValue: {"Fn::Join": [ '', ['one', 'two', 'three']]} diff --git a/examples/source_include_subkey.yaml b/examples/source_include_subkey.yaml deleted file mode 100644 index 37591d80..00000000 --- a/examples/source_include_subkey.yaml +++ /dev/null @@ -1,11 +0,0 @@ -HeatTemplateFormatVersion: '2012-12-12' -Parameters: - Foo: - Type: String -Resources: - __include__: - path: examples/lib.yaml - subkey: Resources - params: - BImage: - Ref: Foo diff --git a/examples/source_include_subkey_hot.yaml b/examples/source_include_subkey_hot.yaml deleted file mode 100644 index 8970db44..00000000 --- a/examples/source_include_subkey_hot.yaml +++ /dev/null @@ -1,11 +0,0 @@ -heat_template_version: 2014-10-16 -parameters: - Foo: - type: string -resources: - __include__: - path: examples/lib_hot.yaml - subkey: resources - params: - BImage: - get_param: Foo diff --git a/examples/source_include_subkey_result.yaml b/examples/source_include_subkey_result.yaml deleted file mode 100644 index 641e8148..00000000 --- a/examples/source_include_subkey_result.yaml +++ /dev/null @@ -1,14 +0,0 @@ -Description: examples/source_include_subkey.yaml -HeatTemplateFormatVersion: '2012-12-12' -Parameters: - Foo: - Type: String -Resources: - GenericB: - Metadata: - my_meta: - Ref: ImportantValue - Properties: - image: - Ref: Foo - Type: OS::Nova::Server diff --git a/examples/source_include_subkey_result_hot.yaml b/examples/source_include_subkey_result_hot.yaml deleted file mode 100644 index ec3bfb6f..00000000 --- a/examples/source_include_subkey_result_hot.yaml +++ /dev/null @@ -1,14 +0,0 @@ -description: examples/source_include_subkey_hot.yaml -heat_template_version: '2014-10-16' -parameters: - Foo: - type: string -resources: - GenericB: - metadata: - my_meta: - get_param: ImportantValue - properties: - image: - get_param: Foo - type: OS::Nova::Server diff --git a/examples/source_lib_result.yaml b/examples/source_lib_result.yaml deleted file mode 100644 index 5844c813..00000000 --- a/examples/source_lib_result.yaml +++ /dev/null @@ -1,24 +0,0 @@ -Description: examples/source.yaml -HeatTemplateFormatVersion: '2012-12-12' -Parameters: - SourceImage: - Default: my_image - Type: String -Resources: - A: - Properties: - image: - Ref: SourceImage - Type: OS::Nova::Server - B: - Metadata: - my_meta: - Fn::Join: - - '' - - - one - - two - - three - Properties: - image: - Ref: BImage - Type: OS::Nova::Server diff --git a/examples/source_lib_result_hot.yaml b/examples/source_lib_result_hot.yaml deleted file mode 100644 index 0235f200..00000000 --- a/examples/source_lib_result_hot.yaml +++ /dev/null @@ -1,24 +0,0 @@ -description: examples/source_hot.yaml -heat_template_version: '2014-10-16' -parameters: - SourceImage: - default: my_image - type: string -resources: - A: - properties: - image: - get_param: SourceImage - type: OS::Nova::Server - B: - metadata: - my_meta: - list_join: - - '' - - - one - - two - - three - properties: - image: - get_param: BImage - type: OS::Nova::Server diff --git a/extraconfig/all_nodes/default.yaml b/extraconfig/all_nodes/default.yaml new file mode 100644 index 00000000..68f9eadd --- /dev/null +++ b/extraconfig/all_nodes/default.yaml @@ -0,0 +1,27 @@ +heat_template_version: 2014-10-16 + +description: > + Noop extra config for allnodes extra cluster config + +# Parameters passed from the parent template - note if you maintain +# out-of-tree templates they may require additional parameters if the +# in-tree templates add a new role. +parameters: + controller_servers: + type: json + compute_servers: + type: json + blockstorage_servers: + type: json + objectstorage_servers: + type: json + cephstorage_servers: + type: json +# Note extra parameters can be defined, then passed data via the +# environment parameter_defaults, without modifying the parent template + +outputs: + # This value should change if the configuration data has changed + # It is used to e.g re-apply puppet after hieradata values change. + config_identifier: + value: none diff --git a/extraconfig/all_nodes/mac_hostname.yaml b/extraconfig/all_nodes/mac_hostname.yaml new file mode 100644 index 00000000..739cbf0a --- /dev/null +++ b/extraconfig/all_nodes/mac_hostname.yaml @@ -0,0 +1,116 @@ +heat_template_version: 2014-10-16 + +description: > + Example extra config for cluster config + this example collects the hostname and MAC addresses for each node in + the deployment, then distributes that info to all Controller nodes. + +# Parameters passed from the parent template - note if you maintain +# out-of-tree templates they may require additional parameters if the +# in-tree templates add a new role. +parameters: + controller_servers: + type: json + compute_servers: + type: json + blockstorage_servers: + type: json + objectstorage_servers: + type: json + cephstorage_servers: + type: json +# Note extra parameters can be defined, then passed data via the +# environment parameter_defaults, without modifying the parent template + +resources: + + CollectMacConfig: + type: OS::Heat::SoftwareConfig + properties: + group: script + config: | + #!/bin/sh + MACS=$(ifconfig | grep ether | awk '{print $2}' | tr "\n" " ") + HOSTNAME=$(hostname -s) + echo "$HOSTNAME $MACS" + + # FIXME(shardy): Long term it'd be better if Heat SoftwareDeployments accepted + # list instead of a map, then we could join the lists of servers into one + # deployment instead of requiring one deployment per-role. + CollectMacDeploymentsController: + type: OS::Heat::SoftwareDeployments + properties: + servers: {get_param: controller_servers} + config: {get_resource: CollectMacConfig} + actions: ['CREATE'] # Only do this on CREATE + + CollectMacDeploymentsCompute: + type: OS::Heat::SoftwareDeployments + properties: + servers: {get_param: compute_servers} + config: {get_resource: CollectMacConfig} + actions: ['CREATE'] # Only do this on CREATE + + CollectMacDeploymentsBlockStorage: + type: OS::Heat::SoftwareDeployments + properties: + servers: {get_param: blockstorage_servers} + config: {get_resource: CollectMacConfig} + actions: ['CREATE'] # Only do this on CREATE + + CollectMacDeploymentsObjectStorage: + type: OS::Heat::SoftwareDeployments + properties: + servers: {get_param: objectstorage_servers} + config: {get_resource: CollectMacConfig} + actions: ['CREATE'] # Only do this on CREATE + + CollectMacDeploymentsCephStorage: + type: OS::Heat::SoftwareDeployments + properties: + servers: {get_param: cephstorage_servers} + config: {get_resource: CollectMacConfig} + actions: ['CREATE'] # Only do this on CREATE + + # Now we distribute all-the-macs to all nodes + DistributeMacConfig: + type: OS::Heat::SoftwareConfig + properties: + group: script + inputs: + - name: controller_mappings + - name: compute_mappings + - name: blockstorage_mappings + - name: objectstorage_mappings + - name: cephstorage_mappings + config: | + #!/bin/sh + echo $controller_mappings > /root/controller_mappings + echo $compute_mappings > /root/compute_mappings + echo $blockstorage_mappings > /root/blockstorage_mappings + echo $objectstorage_mappings > /root/objectstorage_mappings + echo $cephstorage_mappings > /root/cephstorage_mappings + echo "mappings = $(cat /root/*_mappings)" + + DistributeMacDeploymentsController: + type: OS::Heat::SoftwareDeployments + properties: + servers: {get_param: controller_servers} + config: {get_resource: DistributeMacConfig} + input_values: + # FIXME(shardy): It'd be more convenient if we could join these + # items together but because the returned format is a map (not a list) + # we can't use list_join or str_replace. Possible Heat TODO. + controller_mappings: {get_attr: [CollectMacDeploymentsController, deploy_stdouts]} + compute_mappings: {get_attr: [CollectMacDeploymentsCompute, deploy_stdouts]} + blockstorage_mappings: {get_attr: [CollectMacDeploymentsBlockStorage, deploy_stdouts]} + objectstorage_mappings: {get_attr: [CollectMacDeploymentsObjectStorage, deploy_stdouts]} + cephstorage_mappings: {get_attr: [CollectMacDeploymentsCephStorage, deploy_stdouts]} + actions: ['CREATE'] # Only do this on CREATE + +outputs: + # This value should change if the configuration data has changed + # It is used to e.g re-apply puppet after hieradata values change. + config_identifier: + value: {get_attr: [DistributeMacDeploymentsController, deploy_stdouts]} + diff --git a/extraconfig/all_nodes/random_string.yaml b/extraconfig/all_nodes/random_string.yaml new file mode 100644 index 00000000..b4b30274 --- /dev/null +++ b/extraconfig/all_nodes/random_string.yaml @@ -0,0 +1,63 @@ +heat_template_version: 2014-10-16 + +description: > + Example extra config for cluster config + this example deploys a random string to all controller and compute + nodes, showing how data may be shared amongst nodes, vs the + other ExtraConfig interfaces which act only on individual nodes. + +# Parameters passed from the parent template - note if you maintain +# out-of-tree templates they may require additional parameters if the +# in-tree templates add a new role. +parameters: + controller_servers: + type: json + compute_servers: + type: json + blockstorage_servers: + type: json + objectstorage_servers: + type: json + cephstorage_servers: + type: json +# Note extra parameters can be defined, then passed data via the +# environment parameter_defaults, without modifying the parent template + +resources: + + Random: + type: OS::Heat::RandomString + + RandomConfig: + type: OS::Heat::SoftwareConfig + properties: + group: script + inputs: + - name: random_value + config: | + #!/bin/sh + echo $random_value > /root/random_value + + RandomDeploymentsController: + type: OS::Heat::SoftwareDeployments + properties: + servers: {get_param: controller_servers} + config: {get_resource: RandomConfig} + actions: ['CREATE'] # Only do this on CREATE + input_values: + random_value: {get_attr: [Random, value]} + + RandomDeploymentsCompute: + type: OS::Heat::SoftwareDeployments + properties: + servers: {get_param: compute_servers} + config: {get_resource: RandomConfig} + actions: ['CREATE'] # Only do this on CREATE + input_values: + random_value: {get_attr: [Random, value]} + +outputs: + # This value should change if the configuration data has changed + # It is used to e.g re-apply puppet after hieradata values change. + config_identifier: + value: {get_attr: [Random, value]} diff --git a/extraconfig/controller/cinder-netapp.yaml b/extraconfig/controller/cinder-netapp.yaml deleted file mode 100644 index 223ceacd..00000000 --- a/extraconfig/controller/cinder-netapp.yaml +++ /dev/null @@ -1,38 +0,0 @@ -heat_template_version: 2015-04-30 - -description: > - Configure hieradata for Cinder Netapp configuration - -resources: - CinderNetappConfig: - type: OS::Heat::StructuredConfig - properties: - group: os-apply-config - config: - hiera: - datafiles: - cinder_netapp_data: - mapped_data: - cinder_enable_netapp_backend: {get_param: CinderEnableNetappBackend} - cinder::backend::netapp::title: {get_param: CinderNetappBackendName} - cinder::backend::netapp::netapp_login: {get_param: CinderNetappLogin} - cinder::backend::netapp::netapp_password: {get_param: CinderNetappPassword} - cinder::backend::netapp::netapp_hostname: {get_param: CinderNetappServerHostname} - cinder::backend::netapp::netapp_server_port: {get_param: CinderNetappServerPort} - cinder::backend::netapp::netapp_size_multiplier: {get_param: CinderNetappSizeMultiplier} - cinder::backend::netapp::netapp_storage_family: {get_param: CinderNetappStorageFamily} - cinder::backend::netapp::netapp_storage_protocol: {get_param: CinderNetappStorageProtocol} - cinder::backend::netapp::netapp_transport_type: {get_param: CinderNetappTransportType} - cinder::backend::netapp::netapp_vfiler: {get_param: CinderNetappVfiler} - cinder::backend::netapp::netapp_volume_list: {get_param: CinderNetappVolumeList} - cinder::backend::netapp::netapp_vserver: {get_param: CinderNetappVserver} - cinder::backend::netapp::netapp_partner_backend_name: {get_param: CinderNetappPartnerBackendName} - cinder::backend::netapp::nfs_shares: {get_param: CinderNetappNfsShares} - cinder::backend::netapp::nfs_shares_config: {get_param: CinderNetappNfsSharesConfig} - cinder::backend::netapp::nfs_mount_options: {get_param: CinderNetappNfsMountOptions} - cinder::backend::netapp::netapp_copyoffload_tool_path: {get_param: CinderNetappCopyOffloadToolPath} - cinder::backend::netapp::netapp_controller_ips: {get_param: CinderNetappControllerIps} - cinder::backend::netapp::netapp_sa_password: {get_param: CinderNetappSaPassword} - cinder::backend::netapp::netapp_storage_pools: {get_param: CinderNetappStoragePools} - cinder::backend::netapp::netapp_eseries_host_type: {get_param: CinderNetappEseriesHostType} - cinder::backend::netapp::netapp_webservice_path: {get_param: CinderNetappWebservicePath} diff --git a/extraconfig/controller/noop.yaml b/extraconfig/controller/noop.yaml deleted file mode 100644 index 2eb35763..00000000 --- a/extraconfig/controller/noop.yaml +++ /dev/null @@ -1,3 +0,0 @@ -heat_template_version: 2015-04-30 - -description: A stack which doesn't configure anything. diff --git a/extraconfig/post_deploy/README b/extraconfig/post_deploy/README new file mode 100644 index 00000000..3c53fa2c --- /dev/null +++ b/extraconfig/post_deploy/README @@ -0,0 +1,2 @@ +This tree contains additional configuration which happens "post deployment", +e.g after the OpenStack service configuration has been completed. diff --git a/extraconfig/post_deploy/rhel-registration/rhel-registration-resource-registry.yaml b/extraconfig/post_deploy/rhel-registration/rhel-registration-resource-registry.yaml deleted file mode 100644 index 7b48392d..00000000 --- a/extraconfig/post_deploy/rhel-registration/rhel-registration-resource-registry.yaml +++ /dev/null @@ -1,2 +0,0 @@ -resource_registry: - OS::TripleO::NodeExtraConfigPost: rhel-registration.yaml diff --git a/extraconfig/post_deploy/rhel-registration/environment-rhel-registration.yaml b/extraconfig/pre_deploy/rhel-registration/environment-rhel-registration.yaml index 70437a8a..70437a8a 100644 --- a/extraconfig/post_deploy/rhel-registration/environment-rhel-registration.yaml +++ b/extraconfig/pre_deploy/rhel-registration/environment-rhel-registration.yaml diff --git a/extraconfig/pre_deploy/rhel-registration/rhel-registration-resource-registry.yaml b/extraconfig/pre_deploy/rhel-registration/rhel-registration-resource-registry.yaml new file mode 100644 index 00000000..75453302 --- /dev/null +++ b/extraconfig/pre_deploy/rhel-registration/rhel-registration-resource-registry.yaml @@ -0,0 +1,2 @@ +resource_registry: + OS::TripleO::NodeExtraConfig: rhel-registration.yaml diff --git a/extraconfig/post_deploy/rhel-registration/rhel-registration.yaml b/extraconfig/pre_deploy/rhel-registration/rhel-registration.yaml index bf6c88cd..d5160915 100644 --- a/extraconfig/post_deploy/rhel-registration/rhel-registration.yaml +++ b/extraconfig/pre_deploy/rhel-registration/rhel-registration.yaml @@ -6,8 +6,8 @@ description: > # Note extra parameters can be defined, then passed data via the # environment parameter_defaults, without modifying the parent template parameters: - servers: - type: json + server: + type: string # To be defined via a local or global environment in parameter_defaults rhel_reg_activation_key: type: string @@ -71,9 +71,9 @@ resources: config: {get_file: scripts/rhel-registration} RHELRegistrationDeployment: - type: OS::Heat::SoftwareDeployments + type: OS::Heat::SoftwareDeployment properties: - servers: {get_param: servers} + server: {get_param: server} config: {get_resource: RHELRegistration} actions: ['CREATE'] # Only do this on CREATE input_values: @@ -104,10 +104,16 @@ resources: - name: REG_METHOD RHELUnregistrationDeployment: - type: OS::Heat::SoftwareDeployments + type: OS::Heat::SoftwareDeployment properties: - servers: {get_param: servers} + server: {get_param: server} config: {get_resource: RHELUnregistration} actions: ['DELETE'] # Only do this on DELETE input_values: REG_METHOD: {get_param: rhel_reg_method} + +outputs: + deploy_stdout: + description: Deployment reference, used to trigger puppet apply on changes + value: {get_attr: [RHELRegistrationDeployment, deploy_stdout]} + diff --git a/extraconfig/post_deploy/rhel-registration/scripts/rhel-registration b/extraconfig/pre_deploy/rhel-registration/scripts/rhel-registration index c2bf1894..cbbd6a1d 100644 --- a/extraconfig/post_deploy/rhel-registration/scripts/rhel-registration +++ b/extraconfig/pre_deploy/rhel-registration/scripts/rhel-registration @@ -94,7 +94,7 @@ fi case "${REG_METHOD:-}" in portal) subscription-manager register $opts - if [ -z "${REG_AUTO_ATTACH:-}" ]; then + if [ -z "${REG_AUTO_ATTACH:-}" -a -z "${REG_ACTIVATION_KEY:-}" ]; then subscription-manager attach $attach_opts fi subscription-manager $repos diff --git a/extraconfig/post_deploy/rhel-registration/scripts/rhel-unregistration b/extraconfig/pre_deploy/rhel-registration/scripts/rhel-unregistration index 1e72e0a6..1e72e0a6 100644 --- a/extraconfig/post_deploy/rhel-registration/scripts/rhel-unregistration +++ b/extraconfig/pre_deploy/rhel-registration/scripts/rhel-unregistration diff --git a/extraconfig/tasks/yum_update.sh b/extraconfig/tasks/yum_update.sh index 3d4c772b..3ba13f23 100755 --- a/extraconfig/tasks/yum_update.sh +++ b/extraconfig/tasks/yum_update.sh @@ -8,6 +8,7 @@ # command_arguments - yum command arguments, defaults to "" echo "Started yum_update.sh on server $deploy_server_id at `date`" +echo -n "false" > $heat_outputs_path.update_managed_packages if [[ -z "$update_identifier" ]]; then echo "Not running due to unset update_identifier" @@ -20,6 +21,9 @@ mkdir -p $timestamp_dir # sanitise to remove unusual characters update_identifier=${update_identifier//[^a-zA-Z0-9-_]/} +# seconds to wait for this node to rejoin the cluster after update +cluster_start_timeout=360 + timestamp_file="$timestamp_dir/$update_identifier" if [[ -a "$timestamp_file" ]]; then echo "Not running for already-run timestamp \"$update_identifier\"" @@ -27,6 +31,136 @@ if [[ -a "$timestamp_file" ]]; then fi touch "$timestamp_file" +command_arguments=${command_arguments:-} + +list_updates=$(yum list updates) + +if [[ "$list_updates" == "" ]]; then + echo "No packages require updating" + exit 0 +fi + +pacemaker_status=$(systemctl is-active pacemaker) + +if [[ "$pacemaker_status" == "active" ]] ; then + echo "Checking for and adding missing constraints" + + if ! pcs constraint order show | grep "start openstack-nova-novncproxy-clone then start openstack-nova-api-clone"; then + pcs constraint order start openstack-nova-novncproxy-clone then openstack-nova-api-clone + fi + + if ! pcs constraint order show | grep "start rabbitmq-clone then start openstack-keystone-clone"; then + pcs constraint order start rabbitmq-clone then openstack-keystone-clone + fi + + if ! pcs constraint order show | grep "promote galera-master then start openstack-keystone-clone"; then + pcs constraint order promote galera-master then openstack-keystone-clone + fi + + if ! pcs constraint order show | grep "start haproxy-clone then start openstack-keystone-clone"; then + pcs constraint order start haproxy-clone then openstack-keystone-clone + fi + + if ! pcs constraint order show | grep "start memcached-clone then start openstack-keystone-clone"; then + pcs constraint order start memcached-clone then openstack-keystone-clone + fi + + if ! pcs constraint order show | grep "promote redis-master then start openstack-ceilometer-central-clone"; then + pcs constraint order promote redis-master then start openstack-ceilometer-central-clone require-all=false + fi + + if ! pcs resource defaults | grep "resource-stickiness: INFINITY"; then + pcs resource defaults resource-stickiness=INFINITY + fi + + echo "Setting resource start/stop timeouts" + + # timeouts for non-openstack services and special cases + pcs resource update haproxy op start timeout=100s + pcs resource update haproxy op stop timeout=100s + # mongod start timeout is also higher, setting only stop timeout + pcs resource update mongod op stop timeout=100s + # rabbit start timeout is already 100s + pcs resource update rabbitmq op stop timeout=100s + pcs resource update memcached op start timeout=100s + pcs resource update memcached op stop timeout=100s + pcs resource update httpd op start timeout=100s + pcs resource update httpd op stop timeout=100s + # neutron-netns-cleanup stop timeout is 300s, setting only start timeout + pcs resource update neutron-netns-cleanup op start timeout=100s + # neutron-ovs-cleanup stop timeout is 300s, setting only start timeout + pcs resource update neutron-ovs-cleanup op start timeout=100s + + # timeouts for openstack services + pcs resource update neutron-dhcp-agent op start timeout=100s + pcs resource update neutron-dhcp-agent op stop timeout=100s + pcs resource update neutron-l3-agent op start timeout=100s + pcs resource update neutron-l3-agent op stop timeout=100s + pcs resource update neutron-metadata-agent op start timeout=100s + pcs resource update neutron-metadata-agent op stop timeout=100s + pcs resource update neutron-openvswitch-agent op start timeout=100s + pcs resource update neutron-openvswitch-agent op stop timeout=100s + pcs resource update neutron-server op start timeout=100s + pcs resource update neutron-server op stop timeout=100s + pcs resource update openstack-ceilometer-alarm-evaluator op start timeout=100s + pcs resource update openstack-ceilometer-alarm-evaluator op stop timeout=100s + pcs resource update openstack-ceilometer-alarm-notifier op start timeout=100s + pcs resource update openstack-ceilometer-alarm-notifier op stop timeout=100s + pcs resource update openstack-ceilometer-api op start timeout=100s + pcs resource update openstack-ceilometer-api op stop timeout=100s + pcs resource update openstack-ceilometer-central op start timeout=100s + pcs resource update openstack-ceilometer-central op stop timeout=100s + pcs resource update openstack-ceilometer-collector op start timeout=100s + pcs resource update openstack-ceilometer-collector op stop timeout=100s + pcs resource update openstack-ceilometer-notification op start timeout=100s + pcs resource update openstack-ceilometer-notification op stop timeout=100s + pcs resource update openstack-cinder-api op start timeout=100s + pcs resource update openstack-cinder-api op stop timeout=100s + pcs resource update openstack-cinder-scheduler op start timeout=100s + pcs resource update openstack-cinder-scheduler op stop timeout=100s + pcs resource update openstack-cinder-volume op start timeout=100s + pcs resource update openstack-cinder-volume op stop timeout=100s + pcs resource update openstack-glance-api op start timeout=100s + pcs resource update openstack-glance-api op stop timeout=100s + pcs resource update openstack-glance-registry op start timeout=100s + pcs resource update openstack-glance-registry op stop timeout=100s + pcs resource update openstack-heat-api op start timeout=100s + pcs resource update openstack-heat-api op stop timeout=100s + pcs resource update openstack-heat-api-cfn op start timeout=100s + pcs resource update openstack-heat-api-cfn op stop timeout=100s + pcs resource update openstack-heat-api-cloudwatch op start timeout=100s + pcs resource update openstack-heat-api-cloudwatch op stop timeout=100s + pcs resource update openstack-heat-engine op start timeout=100s + pcs resource update openstack-heat-engine op stop timeout=100s + pcs resource update openstack-keystone op start timeout=100s + pcs resource update openstack-keystone op stop timeout=100s + pcs resource update openstack-nova-api op start timeout=100s + pcs resource update openstack-nova-api op stop timeout=100s + pcs resource update openstack-nova-conductor op start timeout=100s + pcs resource update openstack-nova-conductor op stop timeout=100s + pcs resource update openstack-nova-consoleauth op start timeout=100s + pcs resource update openstack-nova-consoleauth op stop timeout=100s + pcs resource update openstack-nova-novncproxy op start timeout=100s + pcs resource update openstack-nova-novncproxy op stop timeout=100s + pcs resource update openstack-nova-scheduler op start timeout=100s + pcs resource update openstack-nova-scheduler op stop timeout=100s + + echo "Pacemaker running, stopping cluster node and doing full package update" + node_count=$(pcs status xml | grep -o "<nodes_configured.*/>" | grep -o 'number="[0-9]*"' | grep -o "[0-9]*") + if [[ "$node_count" == "1" ]] ; then + echo "Active node count is 1, stopping node with --force" + pcs cluster stop --force + else + pcs cluster stop + fi +else + echo "Excluding upgrading packages that are handled by config management tooling" + command_arguments="$command_arguments --skip-broken" + for exclude in $(cat /var/lib/tripleo/installed-packages/* | sort -u); do + command_arguments="$command_arguments --exclude $exclude" + done +fi + command=${command:-update} full_command="yum -y $command $command_arguments" echo "Running: $full_command" @@ -36,6 +170,27 @@ return_code=$? echo "$result" echo "yum return code: $return_code" +if [[ "$pacemaker_status" == "active" ]] ; then + echo "Starting cluster node" + pcs cluster start + + hostname=$(hostname -s) + tstart=$(date +%s) + while [[ "$(pcs status | grep "^Online" | grep -F -o $hostname)" == "" ]]; do + sleep 5 + tnow=$(date +%s) + if (( tnow-tstart > cluster_start_timeout )) ; then + echo "ERROR $hostname failed to join cluster in $cluster_start_timeout seconds" + pcs status + exit 1 + fi + done + pcs status + +else + echo -n "true" > $heat_outputs_path.update_managed_packages +fi + echo "Finished yum_update.sh on server $deploy_server_id at `date`" exit $return_code diff --git a/extraconfig/tasks/yum_update.yaml b/extraconfig/tasks/yum_update.yaml index e918149e..d313ca9f 100644 --- a/extraconfig/tasks/yum_update.yaml +++ b/extraconfig/tasks/yum_update.yaml @@ -20,7 +20,10 @@ resources: - name: command_arguments description: yum command arguments, defaults to "" default: '' + outputs: + - name: update_managed_packages + description: boolean value indicating whether to upgrade managed packages outputs: OS::stack_id: - value: {get_resource: config}
\ No newline at end of file + value: {get_resource: config} diff --git a/firstboot/userdata_heat_admin.yaml b/firstboot/userdata_heat_admin.yaml new file mode 100644 index 00000000..73481c63 --- /dev/null +++ b/firstboot/userdata_heat_admin.yaml @@ -0,0 +1,29 @@ +heat_template_version: 2014-10-16 + +parameters: + # Can be overriden via parameter_defaults in the environment + node_admin_username: + type: string + default: heat-admin + +description: > + Uses cloud-init to create an additional user with a known name, in addition + to the distro-default user created by the cloud-init default. + +resources: + userdata: + type: OS::Heat::MultipartMime + properties: + parts: + - config: {get_resource: user_config} + + # Note this requires cloud-init >= 0.7.2 ref bug #1100920 + user_config: + type: OS::Heat::CloudConfig + properties: + cloud_config: + user: {get_param: node_admin_username} + +outputs: + OS::stack_id: + value: {get_resource: userdata} diff --git a/nagios3.yaml b/nagios3.yaml deleted file mode 100644 index e2ba8ccf..00000000 --- a/nagios3.yaml +++ /dev/null @@ -1,137 +0,0 @@ -# Copyright 2014 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -heat_template_version: 2013-05-23 -description: Deploy Nagios -parameters: - adm_web_passwd: - type: string - description: Password for initial admin user - hidden: true - external_network: - type: string - description: Network to attach floating ips to. - default: ext-net - flavor: - type: string - description: What flavor to use for the nagios server. - default: m1.small - image: - type: string - description: Image for Nagios. - default: nagios - key_name: - type: string - description: What Nova SSH key to use for the nagios server. - default: default - monitor_networks: - type: json - description: Neutron networks to monitor. - default: [] - nova_os_auth_url: - type: string - default: '' - description: URL for Keystone to access Nova. - nova_os_password: - type: string - hidden: true - description: password to present to nova_host_ip. - default: '' - nova_os_username: - type: string - description: username to present to nova_host_ip. - default: '' - nova_os_tenant_name: - type: string - description: tenant name to present to nova_host_ip. - default: '' - server_network: - type: string - description: Network id for server. - default: default-net -resources: - nagios_config: - type: OS::Heat::StructuredConfig - properties: - config: - nagios3: - adm_web_passwd: { get_input: adm_web_passwd } - os_auth_url: { get_input: nova_os_auth_url } - os_password: { get_input: nova_os_password } - os_username: { get_input: nova_os_username } - os_tenant_name: { get_input: nova_os_tenant_name } - monitor_networks: { get_input: monitor_networks } - completion-signal: { get_input: deploy_signal_id } - nagios_security_group: - type: OS::Neutron::SecurityGroup - properties: - name: monitoring - rules: - - direction: ingress - port_range_max: 22 - port_range_min: 22 - protocol: tcp - - direction: ingress - port_range_max: 80 - port_range_min: 80 - protocol: tcp - - direction: ingress - protocol: icmp - - direction: egress - protocol: tcp - - direction: egress - protocol: udp - - direction: egress - protocol: icmp - nagios_net_port: - type: OS::Neutron::Port - properties: - network_id: { get_param: server_network } - security_groups: [ { get_resource: nagios_security_group } ] - nagios_server: - type: OS::Nova::Server - properties: - flavor: { get_param: flavor } - image: { get_param: image } - key_name: { get_param: key_name } - networks: - - network: { get_param: server_network } - port: { get_resource: nagios_net_port } - user_data_format: SOFTWARE_CONFIG - user_data: {get_resource: NodeUserData} - - NodeUserData: - type: OS::TripleO::NodeUserData - - nagios_floating_ip: - type: OS::Neutron::FloatingIP - properties: - floating_network_id: { get_param: external_network } - port_id: { get_resource: nagios_net_port } - nagios_deploy: - type: OS::Heat::StructuredDeployment - properties: - server: { get_resource: nagios_server } - config: { get_resource: nagios_config } - input_values: - adm_web_passwd: { get_param: adm_web_passwd } - nova_os_auth_url: { get_param: nova_os_auth_url } - nova_os_password: { get_param: nova_os_password } - nova_os_username: { get_param: nova_os_username } - nova_os_tenant_name: { get_param: nova_os_tenant_name } - monitor_networks: { get_param: monitor_networks } -outputs: - nagios_address: - description: Address of Nagios admin interface. - value: { get_attr: [ nagios_floating_ip, floating_ip_address ] } diff --git a/net-config-bond.yaml b/net-config-bond.yaml index d74fc0bc..797df4bf 100644 --- a/net-config-bond.yaml +++ b/net-config-bond.yaml @@ -4,6 +4,10 @@ description: > Software Config to drive os-net-config with 2 bonded nics on a bridge. parameters: + ControlPlaneIp: + default: '' + description: IP address/subnet on the ctlplane network + type: string ExternalIpSubnet: default: '' description: IP address/subnet on the external network diff --git a/net-config-bridge.yaml b/net-config-bridge.yaml index c3416e02..ad16ef0b 100644 --- a/net-config-bridge.yaml +++ b/net-config-bridge.yaml @@ -4,6 +4,10 @@ description: > Software Config to drive os-net-config for a simple bridge. parameters: + ControlPlaneIp: + default: '' + description: IP address/subnet on the ctlplane network + type: string ExternalIpSubnet: default: '' description: IP address/subnet on the external network diff --git a/net-config-noop.yaml b/net-config-noop.yaml index 3d88dd9c..30de5846 100644 --- a/net-config-noop.yaml +++ b/net-config-noop.yaml @@ -5,6 +5,10 @@ description: > to use the parameter driven (init-neutron-ovs) configuration instead. parameters: + ControlPlaneIp: + default: '' + description: IP address/subnet on the ctlplane network + type: string ExternalIpSubnet: default: '' description: IP address/subnet on the external network diff --git a/net-config-static-bridge.yaml b/net-config-static-bridge.yaml new file mode 100644 index 00000000..52c8f895 --- /dev/null +++ b/net-config-static-bridge.yaml @@ -0,0 +1,84 @@ +heat_template_version: 2015-04-30 + +description: > + Software Config to drive os-net-config for a simple bridge configured + with a static IP address for the ctlplane network. + +parameters: + ControlPlaneIp: + default: '' + description: IP address/subnet on the ctlplane network + type: string + ExternalIpSubnet: + default: '' + description: IP address/subnet on the external network + type: string + InternalApiIpSubnet: + default: '' + description: IP address/subnet on the internal API network + type: string + StorageIpSubnet: + default: '' + description: IP address/subnet on the storage network + type: string + StorageMgmtIpSubnet: + default: '' + description: IP address/subnet on the storage mgmt network + type: string + TenantIpSubnet: + default: '' + description: IP address/subnet on the tenant network + type: string + ControlPlaneSubnetCidr: # Override this via parameter_defaults + default: '24' + description: The subnet CIDR of the control plane network. + type: string + ControlPlaneDefaultRoute: # Override this via parameter_defaults + description: The default route of the control plane network. + type: string + DnsServers: # Override this via parameter_defaults + default: [] + description: A list of DNS servers (2 max for some implementations) that will be added to resolv.conf. + type: comma_delimited_list + EC2MetadataIp: # Override this via parameter_defaults + description: The IP address of the EC2 metadata server. + type: string + +resources: + OsNetConfigImpl: + type: OS::Heat::StructuredConfig + properties: + group: os-apply-config + config: + os_net_config: + network_config: + - + type: ovs_bridge + name: {get_input: bridge_name} + use_dhcp: false + dns_servers: {get_param: DnsServers} + addresses: + - + ip_netmask: + list_join: + - '/' + - - {get_param: ControlPlaneIp} + - {get_param: ControlPlaneSubnetCidr} + routes: + - + ip_netmask: 169.254.169.254/32 + next_hop: {get_param: EC2MetadataIp} + - + default: true + next_hop: {get_param: ControlPlaneDefaultRoute} + members: + - + type: interface + name: {get_input: interface_name} + # force the MAC address of the bridge to this interface + primary: true + +outputs: + OS::stack_id: + description: The OsNetConfigImpl resource. + value: {get_resource: OsNetConfigImpl} diff --git a/network/config/bond-with-vlans/ceph-storage.yaml b/network/config/bond-with-vlans/ceph-storage.yaml index cd70cbef..620d1f7a 100644 --- a/network/config/bond-with-vlans/ceph-storage.yaml +++ b/network/config/bond-with-vlans/ceph-storage.yaml @@ -5,6 +5,10 @@ description: > with VLANs attached for the ceph storage role. parameters: + ControlPlaneIp: + default: '' + description: IP address/subnet on the ctlplane network + type: string ExternalIpSubnet: default: '' description: IP address/subnet on the external network @@ -38,7 +42,20 @@ parameters: default: 40 description: Vlan ID for the storage mgmt network traffic. type: number - + ControlPlaneSubnetCidr: # Override this via parameter_defaults + default: '24' + description: The subnet CIDR of the control plane network. + type: string + ControlPlaneDefaultRoute: # Override this via parameter_defaults + description: The default route of the control plane network. + type: string + DnsServers: # Override this via parameter_defaults + default: [] + description: A list of DNS servers (2 max for some implementations) that will be added to resolv.conf. + type: comma_delimited_list + EC2MetadataIp: # Override this via parameter_defaults + description: The IP address of the EC2 metadata server. + type: string resources: OsNetConfigImpl: @@ -49,6 +66,25 @@ resources: os_net_config: network_config: - + type: interface + name: nic1 + use_dhcp: false + dns_servers: {get_param: DnsServers} + addresses: + - + ip_netmask: + list_join: + - '/' + - - {get_param: ControlPlaneIp} + - {get_param: ControlPlaneSubnetCidr} + routes: + - + ip_netmask: 169.254.169.254/32 + next_hop: {get_param: EC2MetadataIp} + - + default: true + next_hop: {get_param: ControlPlaneDefaultRoute} + - type: ovs_bridge name: br-bond members: @@ -69,15 +105,15 @@ resources: device: bond1 vlan_id: {get_param: StorageNetworkVlanID} addresses: - - - ip_netmask: {get_param: StorageIpSubnet} + - + ip_netmask: {get_param: StorageIpSubnet} - type: vlan device: bond1 vlan_id: {get_param: StorageMgmtNetworkVlanID} addresses: - - - ip_netmask: {get_param: StorageMgmtIpSubnet} + - + ip_netmask: {get_param: StorageMgmtIpSubnet} outputs: OS::stack_id: diff --git a/network/config/bond-with-vlans/cinder-storage.yaml b/network/config/bond-with-vlans/cinder-storage.yaml index 866112cb..f4c6de8f 100644 --- a/network/config/bond-with-vlans/cinder-storage.yaml +++ b/network/config/bond-with-vlans/cinder-storage.yaml @@ -5,6 +5,10 @@ description: > with VLANs attached for the cinder storage role. parameters: + ControlPlaneIp: + default: '' + description: IP address/subnet on the ctlplane network + type: string ExternalIpSubnet: default: '' description: IP address/subnet on the external network @@ -42,6 +46,20 @@ parameters: default: 40 description: Vlan ID for the storage mgmt network traffic. type: number + ControlPlaneSubnetCidr: # Override this via parameter_defaults + default: '24' + description: The subnet CIDR of the control plane network. + type: string + ControlPlaneDefaultRoute: # Override this via parameter_defaults + description: The default route of the control plane network. + type: string + DnsServers: # Override this via parameter_defaults + default: [] + description: A list of DNS servers (2 max for some implementations) that will be added to resolv.conf. + type: comma_delimited_list + EC2MetadataIp: # Override this via parameter_defaults + description: The IP address of the EC2 metadata server. + type: string resources: OsNetConfigImpl: @@ -52,6 +70,25 @@ resources: os_net_config: network_config: - + type: interface + name: nic1 + use_dhcp: false + dns_servers: {get_param: DnsServers} + addresses: + - + ip_netmask: + list_join: + - '/' + - - {get_param: ControlPlaneIp} + - {get_param: ControlPlaneSubnetCidr} + routes: + - + ip_netmask: 169.254.169.254/32 + next_hop: {get_param: EC2MetadataIp} + - + default: true + next_hop: {get_param: ControlPlaneDefaultRoute} + - type: ovs_bridge name: br-bond members: @@ -72,22 +109,22 @@ resources: device: bond1 vlan_id: {get_param: InternalApiNetworkVlanID} addresses: - - - ip_netmask: {get_param: InternalApiIpSubnet} + - + ip_netmask: {get_param: InternalApiIpSubnet} - type: vlan device: bond1 vlan_id: {get_param: StorageNetworkVlanID} addresses: - - - ip_netmask: {get_param: StorageIpSubnet} + - + ip_netmask: {get_param: StorageIpSubnet} - type: vlan device: bond1 vlan_id: {get_param: StorageMgmtNetworkVlanID} addresses: - - - ip_netmask: {get_param: StorageMgmtIpSubnet} + - + ip_netmask: {get_param: StorageMgmtIpSubnet} outputs: OS::stack_id: diff --git a/network/config/bond-with-vlans/compute.yaml b/network/config/bond-with-vlans/compute.yaml index 5105ee14..8cb3705b 100644 --- a/network/config/bond-with-vlans/compute.yaml +++ b/network/config/bond-with-vlans/compute.yaml @@ -5,6 +5,10 @@ description: > with VLANs attached for the compute role. parameters: + ControlPlaneIp: + default: '' + description: IP address/subnet on the ctlplane network + type: string ExternalIpSubnet: default: '' description: IP address/subnet on the external network @@ -42,6 +46,20 @@ parameters: default: 50 description: Vlan ID for the tenant network traffic. type: number + ControlPlaneSubnetCidr: # Override this via parameter_defaults + default: '24' + description: The subnet CIDR of the control plane network. + type: string + ControlPlaneDefaultRoute: # Override this via parameter_defaults + description: The default route of the control plane network. + type: string + DnsServers: # Override this via parameter_defaults + default: [] + description: A list of DNS servers (2 max for some implementations) that will be added to resolv.conf. + type: comma_delimited_list + EC2MetadataIp: # Override this via parameter_defaults + description: The IP address of the EC2 metadata server. + type: string resources: OsNetConfigImpl: @@ -52,8 +70,27 @@ resources: os_net_config: network_config: - + type: interface + name: nic1 + use_dhcp: false + dns_servers: {get_param: DnsServers} + addresses: + - + ip_netmask: + list_join: + - '/' + - - {get_param: ControlPlaneIp} + - {get_param: ControlPlaneSubnetCidr} + routes: + - + ip_netmask: 169.254.169.254/32 + next_hop: {get_param: EC2MetadataIp} + - + default: true + next_hop: {get_param: ControlPlaneDefaultRoute} + - type: ovs_bridge - name: br-bond + name: {get_input: bridge_name} members: - type: ovs_bond @@ -72,22 +109,22 @@ resources: device: bond1 vlan_id: {get_param: InternalApiNetworkVlanID} addresses: - - - ip_netmask: {get_param: InternalApiIpSubnet} + - + ip_netmask: {get_param: InternalApiIpSubnet} - type: vlan device: bond1 vlan_id: {get_param: StorageNetworkVlanID} addresses: - - - ip_netmask: {get_param: StorageIpSubnet} + - + ip_netmask: {get_param: StorageIpSubnet} - type: vlan device: bond1 vlan_id: {get_param: TenantNetworkVlanID} addresses: - - - ip_netmask: {get_param: TenantIpSubnet} + - + ip_netmask: {get_param: TenantIpSubnet} outputs: OS::stack_id: diff --git a/network/config/bond-with-vlans/controller.yaml b/network/config/bond-with-vlans/controller.yaml index 9d6a6810..eb4399ea 100644 --- a/network/config/bond-with-vlans/controller.yaml +++ b/network/config/bond-with-vlans/controller.yaml @@ -5,6 +5,10 @@ description: > with VLANs attached for the controller role. parameters: + ControlPlaneIp: + default: '' + description: IP address/subnet on the ctlplane network + type: string ExternalIpSubnet: default: '' description: IP address/subnet on the external network @@ -26,7 +30,7 @@ parameters: description: IP address/subnet on the tenant network type: string BondInterfaceOvsOptions: - default: '' + default: 'bond_mode=active-backup' description: The ovs_options string for the bond interface. Set things like lacp=active and/or bond_mode=balance-slb using this option. type: string @@ -54,6 +58,17 @@ parameters: default: '10.0.0.1' description: default route for the external network type: string + ControlPlaneSubnetCidr: # Override this via parameter_defaults + default: '24' + description: The subnet CIDR of the control plane network. + type: string + DnsServers: # Override this via parameter_defaults + default: [] + description: A list of DNS servers (2 max for some implementations) that will be added to resolv.conf. + type: comma_delimited_list + EC2MetadataIp: # Override this via parameter_defaults + description: The IP address of the EC2 metadata server. + type: string resources: OsNetConfigImpl: @@ -64,8 +79,24 @@ resources: os_net_config: network_config: - + type: interface + name: nic1 + use_dhcp: false + addresses: + - + ip_netmask: + list_join: + - '/' + - - {get_param: ControlPlaneIp} + - {get_param: ControlPlaneSubnetCidr} + routes: + - + ip_netmask: 169.254.169.254/32 + next_hop: {get_param: EC2MetadataIp} + - type: ovs_bridge name: {get_input: bridge_name} + dns_servers: {get_param: DnsServers} members: - type: ovs_bond @@ -95,29 +126,29 @@ resources: device: bond1 vlan_id: {get_param: InternalApiNetworkVlanID} addresses: - - - ip_netmask: {get_param: InternalApiIpSubnet} + - + ip_netmask: {get_param: InternalApiIpSubnet} - type: vlan device: bond1 vlan_id: {get_param: StorageNetworkVlanID} addresses: - - - ip_netmask: {get_param: StorageIpSubnet} + - + ip_netmask: {get_param: StorageIpSubnet} - type: vlan device: bond1 vlan_id: {get_param: StorageMgmtNetworkVlanID} addresses: - - - ip_netmask: {get_param: StorageMgmtIpSubnet} + - + ip_netmask: {get_param: StorageMgmtIpSubnet} - type: vlan device: bond1 vlan_id: {get_param: TenantNetworkVlanID} addresses: - - - ip_netmask: {get_param: TenantIpSubnet} + - + ip_netmask: {get_param: TenantIpSubnet} outputs: OS::stack_id: diff --git a/network/config/bond-with-vlans/swift-storage.yaml b/network/config/bond-with-vlans/swift-storage.yaml index f31ed0e7..f6b2a699 100644 --- a/network/config/bond-with-vlans/swift-storage.yaml +++ b/network/config/bond-with-vlans/swift-storage.yaml @@ -5,6 +5,10 @@ description: > with VLANs attached for the swift storage role. parameters: + ControlPlaneIp: + default: '' + description: IP address/subnet on the ctlplane network + type: string ExternalIpSubnet: default: '' description: IP address/subnet on the external network @@ -42,6 +46,20 @@ parameters: default: 40 description: Vlan ID for the storage mgmt network traffic. type: number + ControlPlaneSubnetCidr: # Override this via parameter_defaults + default: '24' + description: The subnet CIDR of the control plane network. + type: string + ControlPlaneDefaultRoute: # Override this via parameter_defaults + description: The default route of the control plane network. + type: string + DnsServers: # Override this via parameter_defaults + default: [] + description: A list of DNS servers (2 max for some implementations) that will be added to resolv.conf. + type: comma_delimited_list + EC2MetadataIp: # Override this via parameter_defaults + description: The IP address of the EC2 metadata server. + type: string resources: OsNetConfigImpl: @@ -52,6 +70,25 @@ resources: os_net_config: network_config: - + type: interface + name: nic1 + use_dhcp: false + dns_servers: {get_param: DnsServers} + addresses: + - + ip_netmask: + list_join: + - '/' + - - {get_param: ControlPlaneIp} + - {get_param: ControlPlaneSubnetCidr} + routes: + - + ip_netmask: 169.254.169.254/32 + next_hop: {get_param: EC2MetadataIp} + - + default: true + next_hop: {get_param: ControlPlaneDefaultRoute} + - type: ovs_bridge name: br-bond members: @@ -72,22 +109,22 @@ resources: device: bond1 vlan_id: {get_param: InternalApiNetworkVlanID} addresses: - - - ip_netmask: {get_param: InternalApiIpSubnet} + - + ip_netmask: {get_param: InternalApiIpSubnet} - type: vlan device: bond1 vlan_id: {get_param: StorageNetworkVlanID} addresses: - - - ip_netmask: {get_param: StorageIpSubnet} + - + ip_netmask: {get_param: StorageIpSubnet} - type: vlan device: bond1 vlan_id: {get_param: StorageMgmtNetworkVlanID} addresses: - - - ip_netmask: {get_param: StorageMgmtIpSubnet} + - + ip_netmask: {get_param: StorageMgmtIpSubnet} outputs: OS::stack_id: diff --git a/network/config/multiple-nics/README.md b/network/config/multiple-nics/README.md new file mode 100644 index 00000000..3d81f0be --- /dev/null +++ b/network/config/multiple-nics/README.md @@ -0,0 +1,21 @@ +This directory contains Heat templates to help configure +multiple NICs for each Overcloud role, where it is +assumed that each NIC is running a specific network +traffic type and that VLANs are not being used. + +Configuration +------------- + +To make use of these templates create a Heat environment that looks +something like this: + + resource\_registry: + OS::TripleO::BlockStorage::Net::SoftwareConfig: network/config/multiple-nics/cinder-storage.yaml + OS::TripleO::Compute::Net::SoftwareConfig: network/config/multiple-nics/compute.yaml + OS::TripleO::Controller::Net::SoftwareConfig: network/config/multiple-nics/controller.yaml + OS::TripleO::ObjectStorage::Net::SoftwareConfig: network/config/multiple-nics/swift-storage.yaml + OS::TripleO::CephStorage::Net::SoftwareConfig: network/config/multiple-nics/ceph-storage.yaml + +Or use this Heat environment file: + + environments/net-multiple-nics.yaml diff --git a/network/config/multiple-nics/ceph-storage.yaml b/network/config/multiple-nics/ceph-storage.yaml new file mode 100644 index 00000000..a0508583 --- /dev/null +++ b/network/config/multiple-nics/ceph-storage.yaml @@ -0,0 +1,113 @@ +heat_template_version: 2015-04-30 + +description: > + Software Config to drive os-net-config to configure multiple interfaces + for the ceph storage role. + +parameters: + ControlPlaneIp: + default: '' + description: IP address/subnet on the ctlplane network + type: string + ExternalIpSubnet: + default: '' + description: IP address/subnet on the external network + type: string + InternalApiIpSubnet: + default: '' + description: IP address/subnet on the internal API network + type: string + StorageIpSubnet: + default: '' + description: IP address/subnet on the storage network + type: string + StorageMgmtIpSubnet: + default: '' + description: IP address/subnet on the storage mgmt network + type: string + TenantIpSubnet: + default: '' + description: IP address/subnet on the tenant network + type: string + ExternalNetworkVlanID: + default: 10 + description: Vlan ID for the external network traffic. + type: number + InternalApiNetworkVlanID: + default: 20 + description: Vlan ID for the internal_api network traffic. + type: number + StorageNetworkVlanID: + default: 30 + description: Vlan ID for the storage network traffic. + type: number + StorageMgmtNetworkVlanID: + default: 40 + description: Vlan ID for the storage mgmt network traffic. + type: number + TenantNetworkVlanID: + default: 50 + description: Vlan ID for the tenant network traffic. + type: number + ExternalInterfaceDefaultRoute: + default: '10.0.0.1' + description: default route for the external network + type: string + ControlPlaneSubnetCidr: # Override this via parameter_defaults + default: '24' + description: The subnet CIDR of the control plane network. + type: string + ControlPlaneDefaultRoute: # Override this via parameter_defaults + description: The subnet CIDR of the control plane network. + type: string + DnsServers: # Override this via parameter_defaults + default: [] + description: A list of DNS servers (2 max for some implementations) that will be added to resolv.conf. + type: json + EC2MetadataIp: # Override this via parameter_defaults + description: The IP address of the EC2 metadata server. + type: string + +resources: + OsNetConfigImpl: + type: OS::Heat::StructuredConfig + properties: + group: os-apply-config + config: + os_net_config: + network_config: + - + type: interface + name: nic1 + use_dhcp: false + dns_servers: {get_param: DnsServers} + addresses: + - + ip_netmask: + list_join: + - '/' + - - {get_param: ControlPlaneIp} + - {get_param: ControlPlaneSubnetCidr} + routes: + - + ip_netmask: 169.254.169.254/32 + next_hop: {get_param: EC2MetadataIp} + - + type: interface + name: nic2 + use_dhcp: false + addresses: + - + ip_netmask: {get_param: StorageIpSubnet} + - + type: interface + name: nic3 + use_dhcp: false + addresses: + - + ip_netmask: {get_param: StorageMgmtIpSubnet} + +outputs: + OS::stack_id: + description: The OsNetConfigImpl resource. + value: {get_resource: OsNetConfigImpl} diff --git a/network/config/multiple-nics/cinder-storage.yaml b/network/config/multiple-nics/cinder-storage.yaml new file mode 100644 index 00000000..c84586bb --- /dev/null +++ b/network/config/multiple-nics/cinder-storage.yaml @@ -0,0 +1,120 @@ +heat_template_version: 2015-04-30 + +description: > + Software Config to drive os-net-config to configure multiple interfaces + for the cinder storage role. + +parameters: + ControlPlaneIp: + default: '' + description: IP address/subnet on the ctlplane network + type: string + ExternalIpSubnet: + default: '' + description: IP address/subnet on the external network + type: string + InternalApiIpSubnet: + default: '' + description: IP address/subnet on the internal API network + type: string + StorageIpSubnet: + default: '' + description: IP address/subnet on the storage network + type: string + StorageMgmtIpSubnet: + default: '' + description: IP address/subnet on the storage mgmt network + type: string + TenantIpSubnet: + default: '' + description: IP address/subnet on the tenant network + type: string + ExternalNetworkVlanID: + default: 10 + description: Vlan ID for the external network traffic. + type: number + InternalApiNetworkVlanID: + default: 20 + description: Vlan ID for the internal_api network traffic. + type: number + StorageNetworkVlanID: + default: 30 + description: Vlan ID for the storage network traffic. + type: number + StorageMgmtNetworkVlanID: + default: 40 + description: Vlan ID for the storage mgmt network traffic. + type: number + TenantNetworkVlanID: + default: 50 + description: Vlan ID for the tenant network traffic. + type: number + ExternalInterfaceDefaultRoute: + default: '10.0.0.1' + description: default route for the external network + type: string + ControlPlaneSubnetCidr: # Override this via parameter_defaults + default: '24' + description: The subnet CIDR of the control plane network. + type: string + ControlPlaneDefaultRoute: # Override this via parameter_defaults + description: The subnet CIDR of the control plane network. + type: string + DnsServers: # Override this via parameter_defaults + default: [] + description: A list of DNS servers (2 max for some implementations) that will be added to resolv.conf. + type: json + EC2MetadataIp: # Override this via parameter_defaults + description: The IP address of the EC2 metadata server. + type: string + +resources: + OsNetConfigImpl: + type: OS::Heat::StructuredConfig + properties: + group: os-apply-config + config: + os_net_config: + network_config: + - + type: interface + name: nic1 + use_dhcp: false + dns_servers: {get_param: DnsServers} + addresses: + - + ip_netmask: + list_join: + - '/' + - - {get_param: ControlPlaneIp} + - {get_param: ControlPlaneSubnetCidr} + routes: + - + ip_netmask: 169.254.169.254/32 + next_hop: {get_param: EC2MetadataIp} + - + type: interface + name: nic2 + use_dhcp: false + addresses: + - + ip_netmask: {get_param: StorageIpSubnet} + - + type: interface + name: nic3 + use_dhcp: false + addresses: + - + ip_netmask: {get_param: StorageMgmtIpSubnet} + - + type: interface + name: nic4 + use_dhcp: false + addresses: + - + ip_netmask: {get_param: InternalApiIpSubnet} + +outputs: + OS::stack_id: + description: The OsNetConfigImpl resource. + value: {get_resource: OsNetConfigImpl} diff --git a/network/config/multiple-nics/compute.yaml b/network/config/multiple-nics/compute.yaml new file mode 100644 index 00000000..70a18081 --- /dev/null +++ b/network/config/multiple-nics/compute.yaml @@ -0,0 +1,116 @@ +heat_template_version: 2015-04-30 + +description: > + Software Config to drive os-net-config to configure multiple interfaces + for the compute role. + +parameters: + ControlPlaneIp: + default: '' + description: IP address/subnet on the ctlplane network + type: string + ExternalIpSubnet: + default: '' + description: IP address/subnet on the external network + type: string + InternalApiIpSubnet: + default: '' + description: IP address/subnet on the internal API network + type: string + StorageIpSubnet: + default: '' + description: IP address/subnet on the storage network + type: string + StorageMgmtIpSubnet: + default: '' + description: IP address/subnet on the storage mgmt network + type: string + TenantIpSubnet: + default: '' + description: IP address/subnet on the tenant network + type: string + InternalApiNetworkVlanID: + default: 20 + description: Vlan ID for the internal_api network traffic. + type: number + StorageNetworkVlanID: + default: 30 + description: Vlan ID for the storage network traffic. + type: number + TenantNetworkVlanID: + default: 50 + description: Vlan ID for the tenant network traffic. + type: number + ControlPlaneSubnetCidr: # Override this via parameter_defaults + default: '24' + description: The subnet CIDR of the control plane network. + type: string + ControlPlaneDefaultRoute: # Override this via parameter_defaults + description: The subnet CIDR of the control plane network. + type: string + DnsServers: # Override this via parameter_defaults + default: [] + description: A list of DNS servers (2 max for some implementations) that will be added to resolv.conf. + type: json + EC2MetadataIp: # Override this via parameter_defaults + description: The IP address of the EC2 metadata server. + type: string + +resources: + OsNetConfigImpl: + type: OS::Heat::StructuredConfig + properties: + group: os-apply-config + config: + os_net_config: + network_config: + - + type: interface + name: nic1 + use_dhcp: false + dns_servers: {get_param: DnsServers} + addresses: + - + ip_netmask: + list_join: + - '/' + - - {get_param: ControlPlaneIp} + - {get_param: ControlPlaneSubnetCidr} + routes: + - + ip_netmask: 169.254.169.254/32 + next_hop: {get_param: EC2MetadataIp} + - + type: interface + name: nic2 + use_dhcp: false + addresses: + - + ip_netmask: {get_param: StorageIpSubnet} + - + type: interface + name: nic4 + use_dhcp: false + addresses: + - + ip_netmask: {get_param: InternalApiIpSubnet} + - + # Create a bridge which can also be used for VLAN-mode bridge mapping + type: ovs_bridge + name: br-tenant + use_dhcp: false + addresses: + - + ip_netmask: {get_param: TenantIpSubnet} + members: + - + type: interface + name: nic5 + use_dhcp: false + # force the MAC address of the bridge to this interface + primary: true + +outputs: + OS::stack_id: + description: The OsNetConfigImpl resource. + value: {get_resource: OsNetConfigImpl} diff --git a/network/config/multiple-nics/controller.yaml b/network/config/multiple-nics/controller.yaml new file mode 100644 index 00000000..63f53a1f --- /dev/null +++ b/network/config/multiple-nics/controller.yaml @@ -0,0 +1,152 @@ +heat_template_version: 2015-04-30 + +description: > + Software Config to drive os-net-config to configure multiple interfaces + for the controller role. + +parameters: + ControlPlaneIp: + default: '' + description: IP address/subnet on the ctlplane network + type: string + ExternalIpSubnet: + default: '' + description: IP address/subnet on the external network + type: string + InternalApiIpSubnet: + default: '' + description: IP address/subnet on the internal API network + type: string + StorageIpSubnet: + default: '' + description: IP address/subnet on the storage network + type: string + StorageMgmtIpSubnet: + default: '' + description: IP address/subnet on the storage mgmt network + type: string + TenantIpSubnet: + default: '' + description: IP address/subnet on the tenant network + type: string + ExternalNetworkVlanID: + default: 10 + description: Vlan ID for the external network traffic. + type: number + InternalApiNetworkVlanID: + default: 20 + description: Vlan ID for the internal_api network traffic. + type: number + StorageNetworkVlanID: + default: 30 + description: Vlan ID for the storage network traffic. + type: number + StorageMgmtNetworkVlanID: + default: 40 + description: Vlan ID for the storage mgmt network traffic. + type: number + TenantNetworkVlanID: + default: 50 + description: Vlan ID for the tenant network traffic. + type: number + ExternalInterfaceDefaultRoute: + default: '10.0.0.1' + description: default route for the external network + type: string + ControlPlaneSubnetCidr: # Override this via parameter_defaults + default: '24' + description: The subnet CIDR of the control plane network. + type: string + ControlPlaneDefaultRoute: # Override this via parameter_defaults + description: The subnet CIDR of the control plane network. + type: string + DnsServers: # Override this via parameter_defaults + default: [] + description: A list of DNS servers (2 max for some implementations) that will be added to resolv.conf. + type: json + EC2MetadataIp: # Override this via parameter_defaults + description: The IP address of the EC2 metadata server. + type: string + +resources: + OsNetConfigImpl: + type: OS::Heat::StructuredConfig + properties: + group: os-apply-config + config: + os_net_config: + network_config: + - + type: interface + name: nic1 + use_dhcp: false + dns_servers: {get_param: DnsServers} + addresses: + - + ip_netmask: + list_join: + - '/' + - - {get_param: ControlPlaneIp} + - {get_param: ControlPlaneSubnetCidr} + routes: + - + ip_netmask: 169.254.169.254/32 + next_hop: {get_param: EC2MetadataIp} + - + type: interface + name: nic2 + use_dhcp: false + addresses: + - + ip_netmask: {get_param: StorageIpSubnet} + - + type: interface + name: nic3 + use_dhcp: false + addresses: + - + ip_netmask: {get_param: StorageMgmtIpSubnet} + - + type: interface + name: nic4 + use_dhcp: false + addresses: + - + ip_netmask: {get_param: InternalApiIpSubnet} + - + # Create a bridge which can also be used for VLAN-mode bridge mapping + type: ovs_bridge + name: br-tenant + use_dhcp: false + addresses: + - + ip_netmask: {get_param: TenantIpSubnet} + members: + - + type: interface + name: nic5 + use_dhcp: false + # force the MAC address of the bridge to this interface + primary: true + - + type: ovs_bridge + name: {get_input: bridge_name} + use_dhcp: false + addresses: + - + ip_netmask: {get_param: ExternalIpSubnet} + routes: + - + ip_netmask: 0.0.0.0/0 + next_hop: {get_param: ExternalInterfaceDefaultRoute} + members: + - + type: interface + name: nic6 + # force the MAC address of the bridge to this interface + primary: true + +outputs: + OS::stack_id: + description: The OsNetConfigImpl resource. + value: {get_resource: OsNetConfigImpl} diff --git a/network/config/multiple-nics/swift-storage.yaml b/network/config/multiple-nics/swift-storage.yaml new file mode 100644 index 00000000..25ac75f2 --- /dev/null +++ b/network/config/multiple-nics/swift-storage.yaml @@ -0,0 +1,120 @@ +heat_template_version: 2015-04-30 + +description: > + Software Config to drive os-net-config to configure multiple interfaces + for the swift storage role. + +parameters: + ControlPlaneIp: + default: '' + description: IP address/subnet on the ctlplane network + type: string + ExternalIpSubnet: + default: '' + description: IP address/subnet on the external network + type: string + InternalApiIpSubnet: + default: '' + description: IP address/subnet on the internal API network + type: string + StorageIpSubnet: + default: '' + description: IP address/subnet on the storage network + type: string + StorageMgmtIpSubnet: + default: '' + description: IP address/subnet on the storage mgmt network + type: string + TenantIpSubnet: + default: '' + description: IP address/subnet on the tenant network + type: string + ExternalNetworkVlanID: + default: 10 + description: Vlan ID for the external network traffic. + type: number + InternalApiNetworkVlanID: + default: 20 + description: Vlan ID for the internal_api network traffic. + type: number + StorageNetworkVlanID: + default: 30 + description: Vlan ID for the storage network traffic. + type: number + StorageMgmtNetworkVlanID: + default: 40 + description: Vlan ID for the storage mgmt network traffic. + type: number + TenantNetworkVlanID: + default: 50 + description: Vlan ID for the tenant network traffic. + type: number + ExternalInterfaceDefaultRoute: + default: '10.0.0.1' + description: default route for the external network + type: string + ControlPlaneSubnetCidr: # Override this via parameter_defaults + default: '24' + description: The subnet CIDR of the control plane network. + type: string + ControlPlaneDefaultRoute: # Override this via parameter_defaults + description: The subnet CIDR of the control plane network. + type: string + DnsServers: # Override this via parameter_defaults + default: [] + description: A list of DNS servers (2 max for some implementations) that will be added to resolv.conf. + type: json + EC2MetadataIp: # Override this via parameter_defaults + description: The IP address of the EC2 metadata server. + type: string + +resources: + OsNetConfigImpl: + type: OS::Heat::StructuredConfig + properties: + group: os-apply-config + config: + os_net_config: + network_config: + - + type: interface + name: nic1 + use_dhcp: false + dns_servers: {get_param: DnsServers} + addresses: + - + ip_netmask: + list_join: + - '/' + - - {get_param: ControlPlaneIp} + - {get_param: ControlPlaneSubnetCidr} + routes: + - + ip_netmask: 169.254.169.254/32 + next_hop: {get_param: EC2MetadataIp} + - + type: interface + name: nic2 + use_dhcp: false + addresses: + - + ip_netmask: {get_param: StorageIpSubnet} + - + type: interface + name: nic3 + use_dhcp: false + addresses: + - + ip_netmask: {get_param: StorageMgmtIpSubnet} + - + type: interface + name: nic4 + use_dhcp: false + addresses: + - + ip_netmask: {get_param: InternalApiIpSubnet} + +outputs: + OS::stack_id: + description: The OsNetConfigImpl resource. + value: {get_resource: OsNetConfigImpl} diff --git a/network/config/single-nic-vlans/ceph-storage.yaml b/network/config/single-nic-vlans/ceph-storage.yaml index 4a25f763..5148c520 100644 --- a/network/config/single-nic-vlans/ceph-storage.yaml +++ b/network/config/single-nic-vlans/ceph-storage.yaml @@ -5,6 +5,10 @@ description: > ceph storage role. parameters: + ControlPlaneIp: + default: '' + description: IP address/subnet on the ctlplane network + type: string ExternalIpSubnet: default: '' description: IP address/subnet on the external network @@ -33,6 +37,20 @@ parameters: default: 40 description: Vlan ID for the storage mgmt network traffic. type: number + ControlPlaneSubnetCidr: # Override this via parameter_defaults + default: '24' + description: The subnet CIDR of the control plane network. + type: string + ControlPlaneDefaultRoute: # Override this via parameter_defaults + description: The default route of the control plane network. + type: string + DnsServers: # Override this via parameter_defaults + default: [] + description: A list of DNS servers (2 max for some implementations) that will be added to resolv.conf. + type: comma_delimited_list + EC2MetadataIp: # Override this via parameter_defaults + description: The IP address of the EC2 metadata server. + type: string resources: OsNetConfigImpl: @@ -45,7 +63,22 @@ resources: - type: ovs_bridge name: br-storage - use_dhcp: true + use_dhcp: false + dns_servers: {get_param: DnsServers} + addresses: + - + ip_netmask: + list_join: + - '/' + - - {get_param: ControlPlaneIp} + - {get_param: ControlPlaneSubnetCidr} + routes: + - + ip_netmask: 169.254.169.254/32 + next_hop: {get_param: EC2MetadataIp} + - + default: true + next_hop: {get_param: ControlPlaneDefaultRoute} members: - type: interface @@ -56,14 +89,14 @@ resources: type: vlan vlan_id: {get_param: StorageNetworkVlanID} addresses: - - - ip_netmask: {get_param: StorageIpSubnet} + - + ip_netmask: {get_param: StorageIpSubnet} - type: vlan vlan_id: {get_param: StorageMgmtNetworkVlanID} addresses: - - - ip_netmask: {get_param: StorageMgmtIpSubnet} + - + ip_netmask: {get_param: StorageMgmtIpSubnet} outputs: OS::stack_id: diff --git a/network/config/single-nic-vlans/cinder-storage.yaml b/network/config/single-nic-vlans/cinder-storage.yaml index 397b1ecd..e79a9f4b 100644 --- a/network/config/single-nic-vlans/cinder-storage.yaml +++ b/network/config/single-nic-vlans/cinder-storage.yaml @@ -5,6 +5,10 @@ description: > cinder storage role. parameters: + ControlPlaneIp: + default: '' + description: IP address/subnet on the ctlplane network + type: string ExternalIpSubnet: default: '' description: IP address/subnet on the external network @@ -37,6 +41,20 @@ parameters: default: 40 description: Vlan ID for the storage mgmt network traffic. type: number + ControlPlaneSubnetCidr: # Override this via parameter_defaults + default: '24' + description: The subnet CIDR of the control plane network. + type: string + ControlPlaneDefaultRoute: # Override this via parameter_defaults + description: The default route of the control plane network. + type: string + DnsServers: # Override this via parameter_defaults + default: [] + description: A list of DNS servers (2 max for some implementations) that will be added to resolv.conf. + type: comma_delimited_list + EC2MetadataIp: # Override this via parameter_defaults + description: The IP address of the EC2 metadata server. + type: string resources: OsNetConfigImpl: @@ -49,7 +67,22 @@ resources: - type: ovs_bridge name: br-storage - use_dhcp: true + use_dhcp: false + dns_servers: {get_param: DnsServers} + addresses: + - + ip_netmask: + list_join: + - '/' + - - {get_param: ControlPlaneIp} + - {get_param: ControlPlaneSubnetCidr} + routes: + - + ip_netmask: 169.254.169.254/32 + next_hop: {get_param: EC2MetadataIp} + - + default: true + next_hop: {get_param: ControlPlaneDefaultRoute} members: - type: interface @@ -60,20 +93,20 @@ resources: type: vlan vlan_id: {get_param: InternalApiNetworkVlanID} addresses: - - - ip_netmask: {get_param: InternalApiIpSubnet} + - + ip_netmask: {get_param: InternalApiIpSubnet} - type: vlan vlan_id: {get_param: StorageNetworkVlanID} addresses: - - - ip_netmask: {get_param: StorageIpSubnet} + - + ip_netmask: {get_param: StorageIpSubnet} - type: vlan vlan_id: {get_param: StorageMgmtNetworkVlanID} addresses: - - - ip_netmask: {get_param: StorageMgmtIpSubnet} + - + ip_netmask: {get_param: StorageMgmtIpSubnet} outputs: OS::stack_id: diff --git a/network/config/single-nic-vlans/compute.yaml b/network/config/single-nic-vlans/compute.yaml index c73aed5e..4e93b31c 100644 --- a/network/config/single-nic-vlans/compute.yaml +++ b/network/config/single-nic-vlans/compute.yaml @@ -5,6 +5,10 @@ description: > compute role. parameters: + ControlPlaneIp: + default: '' + description: IP address/subnet on the ctlplane network + type: string ExternalIpSubnet: default: '' description: IP address/subnet on the external network @@ -37,6 +41,20 @@ parameters: default: 50 description: Vlan ID for the tenant network traffic. type: number + ControlPlaneSubnetCidr: # Override this via parameter_defaults + default: '24' + description: The subnet CIDR of the control plane network. + type: string + ControlPlaneDefaultRoute: # Override this via parameter_defaults + description: The default route of the control plane network. + type: string + DnsServers: # Override this via parameter_defaults + default: [] + description: A list of DNS servers (2 max for some implementations) that will be added to resolv.conf. + type: comma_delimited_list + EC2MetadataIp: # Override this via parameter_defaults + description: The IP address of the EC2 metadata server. + type: string resources: OsNetConfigImpl: @@ -49,7 +67,22 @@ resources: - type: ovs_bridge name: {get_input: bridge_name} - use_dhcp: true + use_dhcp: false + dns_servers: {get_param: DnsServers} + addresses: + - + ip_netmask: + list_join: + - '/' + - - {get_param: ControlPlaneIp} + - {get_param: ControlPlaneSubnetCidr} + routes: + - + ip_netmask: 169.254.169.254/32 + next_hop: {get_param: EC2MetadataIp} + - + default: true + next_hop: {get_param: ControlPlaneDefaultRoute} members: - type: interface @@ -60,20 +93,20 @@ resources: type: vlan vlan_id: {get_param: InternalApiNetworkVlanID} addresses: - - - ip_netmask: {get_param: InternalApiIpSubnet} + - + ip_netmask: {get_param: InternalApiIpSubnet} - type: vlan vlan_id: {get_param: StorageNetworkVlanID} addresses: - - - ip_netmask: {get_param: StorageIpSubnet} + - + ip_netmask: {get_param: StorageIpSubnet} - type: vlan vlan_id: {get_param: TenantNetworkVlanID} addresses: - - - ip_netmask: {get_param: TenantIpSubnet} + - + ip_netmask: {get_param: TenantIpSubnet} outputs: OS::stack_id: diff --git a/network/config/single-nic-vlans/controller.yaml b/network/config/single-nic-vlans/controller.yaml index 4cfa1317..3c536d67 100644 --- a/network/config/single-nic-vlans/controller.yaml +++ b/network/config/single-nic-vlans/controller.yaml @@ -5,6 +5,10 @@ description: > controller role. parameters: + ControlPlaneIp: + default: '' + description: IP address/subnet on the ctlplane network + type: string ExternalIpSubnet: default: '' description: IP address/subnet on the external network @@ -49,6 +53,17 @@ parameters: default: '10.0.0.1' description: default route for the external network type: string + ControlPlaneSubnetCidr: # Override this via parameter_defaults + default: '24' + description: The subnet CIDR of the control plane network. + type: string + DnsServers: # Override this via parameter_defaults + default: [] + description: A list of DNS servers (2 max for some implementations) that will be added to resolv.conf. + type: comma_delimited_list + EC2MetadataIp: # Override this via parameter_defaults + description: The IP address of the EC2 metadata server. + type: string resources: OsNetConfigImpl: @@ -61,7 +76,19 @@ resources: - type: ovs_bridge name: {get_input: bridge_name} - use_dhcp: true + use_dhcp: false + dns_servers: {get_param: DnsServers} + addresses: + - + ip_netmask: + list_join: + - '/' + - - {get_param: ControlPlaneIp} + - {get_param: ControlPlaneSubnetCidr} + routes: + - + ip_netmask: 169.254.169.254/32 + next_hop: {get_param: EC2MetadataIp} members: - type: interface @@ -82,26 +109,26 @@ resources: type: vlan vlan_id: {get_param: InternalApiNetworkVlanID} addresses: - - - ip_netmask: {get_param: InternalApiIpSubnet} + - + ip_netmask: {get_param: InternalApiIpSubnet} - type: vlan vlan_id: {get_param: StorageNetworkVlanID} addresses: - - - ip_netmask: {get_param: StorageIpSubnet} + - + ip_netmask: {get_param: StorageIpSubnet} - type: vlan vlan_id: {get_param: StorageMgmtNetworkVlanID} addresses: - - - ip_netmask: {get_param: StorageMgmtIpSubnet} + - + ip_netmask: {get_param: StorageMgmtIpSubnet} - type: vlan vlan_id: {get_param: TenantNetworkVlanID} addresses: - - - ip_netmask: {get_param: TenantIpSubnet} + - + ip_netmask: {get_param: TenantIpSubnet} outputs: OS::stack_id: diff --git a/network/config/single-nic-vlans/swift-storage.yaml b/network/config/single-nic-vlans/swift-storage.yaml index f033ced7..83b3304f 100644 --- a/network/config/single-nic-vlans/swift-storage.yaml +++ b/network/config/single-nic-vlans/swift-storage.yaml @@ -5,6 +5,10 @@ description: > swift storage role. parameters: + ControlPlaneIp: + default: '' + description: IP address/subnet on the ctlplane network + type: string ExternalIpSubnet: default: '' description: IP address/subnet on the external network @@ -37,6 +41,20 @@ parameters: default: 40 description: Vlan ID for the storage mgmt network traffic. type: number + ControlPlaneSubnetCidr: # Override this via parameter_defaults + default: '24' + description: The subnet CIDR of the control plane network. + type: string + ControlPlaneDefaultRoute: # Override this via parameter_defaults + description: The default route of the control plane network. + type: string + DnsServers: # Override this via parameter_defaults + default: [] + description: A list of DNS servers (2 max for some implementations) that will be added to resolv.conf. + type: comma_delimited_list + EC2MetadataIp: # Override this via parameter_defaults + description: The IP address of the EC2 metadata server. + type: string resources: OsNetConfigImpl: @@ -49,7 +67,22 @@ resources: - type: ovs_bridge name: br-storage - use_dhcp: true + use_dhcp: false + dns_servers: {get_param: DnsServers} + addresses: + - + ip_netmask: + list_join: + - '/' + - - {get_param: ControlPlaneIp} + - {get_param: ControlPlaneSubnetCidr} + routes: + - + ip_netmask: 169.254.169.254/32 + next_hop: {get_param: EC2MetadataIp} + - + default: true + next_hop: {get_param: ControlPlaneDefaultRoute} members: - type: interface @@ -60,20 +93,20 @@ resources: type: vlan vlan_id: {get_param: InternalApiNetworkVlanID} addresses: - - - ip_netmask: {get_param: InternalApiIpSubnet} + - + ip_netmask: {get_param: InternalApiIpSubnet} - type: vlan vlan_id: {get_param: StorageNetworkVlanID} addresses: - - - ip_netmask: {get_param: StorageIpSubnet} + - + ip_netmask: {get_param: StorageIpSubnet} - type: vlan vlan_id: {get_param: StorageMgmtNetworkVlanID} addresses: - - - ip_netmask: {get_param: StorageMgmtIpSubnet} + - + ip_netmask: {get_param: StorageMgmtIpSubnet} outputs: OS::stack_id: diff --git a/network/endpoints/endpoint.yaml b/network/endpoints/endpoint.yaml new file mode 100644 index 00000000..6246cfdd --- /dev/null +++ b/network/endpoints/endpoint.yaml @@ -0,0 +1,60 @@ +heat_template_version: 2015-04-30 + +description: > + OpenStack Endpoint + +parameters: + EndpointName: + type: string + description: The name of the Endpoint being evaluated + EndpointMap: + type: json + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + IP: + type: string + description: The IP address of the Neutron Port that the endpoint is attached to + UriSuffix: + type: string + default: '' + description: A suffix attached to the URL + CloudName: + type: string + default: '' + description: The DNS name of this cloud. E.g. ci-overcloud.tripleo.org + +outputs: + endpoint: + description: > + A Hash containing a mapping of service endpoints to ports, protocols, uris + assigned IPs, and hostnames for a specific endpoint + value: + port: {get_param: [EndpointMap, {get_param: EndpointName }, port] } + protocol: {get_param: [EndpointMap, {get_param: EndpointName }, protocol] } + ip: {get_param: IP} + host: + str_replace: + template: {get_param: [EndpointMap, {get_param: EndpointName }, host]} + params: {IP_ADDRESS: {get_param: IP}, CLOUDNAME: {get_param: CloudName}} + uri: + list_join: + - '' + - - {get_param: [EndpointMap, {get_param: EndpointName }, protocol] } + - '://' + - str_replace: + template: {get_param: [EndpointMap, {get_param: EndpointName }, host]} + params: {IP_ADDRESS: {get_param: IP}, CLOUDNAME: {get_param: CloudName }} + - ':' + - {get_param: [EndpointMap, {get_param: EndpointName }, port] } + - {get_param: UriSuffix } + uri_no_suffix: + list_join: + - '' + - - {get_param: [EndpointMap, {get_param: EndpointName }, protocol] } + - '://' + - str_replace: + template: {get_param: [EndpointMap, {get_param: EndpointName }, host]} + params: {IP_ADDRESS: {get_param: IP}, CLOUDNAME: {get_param: CloudName} } + - ':' + - {get_param: [EndpointMap, {get_param: EndpointName }, port] } diff --git a/network/endpoints/endpoint_map.yaml b/network/endpoints/endpoint_map.yaml new file mode 100644 index 00000000..05214011 --- /dev/null +++ b/network/endpoints/endpoint_map.yaml @@ -0,0 +1,450 @@ +heat_template_version: 2015-04-30 + +description: > + A Map of OpenStack Endpoints + +parameters: + CeilometerApiVirtualIP: + type: string + default: '' + CinderApiVirtualIP: + type: string + default: '' + GlanceApiVirtualIP: + type: string + default: '' + GlanceRegistryVirtualIP: + type: string + default: '' + HeatApiVirtualIP: + type: string + default: '' + KeystoneAdminApiVirtualIP: + type: string + default: '' + KeystonePublicApiVirtualIP: + type: string + default: '' + MysqlVirtualIP: + type: string + default: '' + NeutronApiVirtualIP: + type: string + default: '' + NovaApiVirtualIP: + type: string + default: '' + PublicVirtualIP: + type: string + default: '' + SwiftProxyVirtualIP: + type: string + default: '' + EndpointMap: + type: json + default: + CeilometerAdmin: {protocol: 'http', port: '8777', host: 'IP_ADDRESS'} + CeilometerInternal: {protocol: 'http', port: '8777', host: 'IP_ADDRESS'} + CeilometerPublic: {protocol: 'http', port: '8777', host: 'IP_ADDRESS'} + CinderAdmin: {protocol: 'http', port: '8776', host: 'IP_ADDRESS'} + CinderInternal: {protocol: 'http', port: '8776', host: 'IP_ADDRESS'} + CinderPublic: {protocol: 'http', port: '8776', host: 'IP_ADDRESS'} + GlanceAdmin: {protocol: 'http', port: '9292', host: 'IP_ADDRESS'} + GlanceInternal: {protocol: 'http', port: '9292', host: 'IP_ADDRESS'} + GlancePublic: {protocol: 'http', port: '9292', host: 'IP_ADDRESS'} + GlanceRegistryAdmin: {protocol: 'http', port: '9191', host: 'IP_ADDRESS'} + GlanceRegistryInternal: {protocol: 'http', port: '9191', host: 'IP_ADDRESS'} + GlanceRegistryPublic: {protocol: 'http', port: '9191', host: 'IP_ADDRESS'} + HeatAdmin: {protocol: 'http', port: '8004', host: 'IP_ADDRESS'} + HeatInternal: {protocol: 'http', port: '8004', host: 'IP_ADDRESS'} + HeatPublic: {protocol: 'http', port: '8004', host: 'IP_ADDRESS'} + HorizonPublic: {protocol: 'http', port: '80', host: 'IP_ADDRESS'} + KeystoneAdmin: {protocol: 'http', port: '35357', host: 'IP_ADDRESS'} + KeystoneInternal: {protocol: 'http', port: '5000', host: 'IP_ADDRESS'} + KeystonePublic: {protocol: 'http', port: '5000', host: 'IP_ADDRESS'} + NeutronAdmin: {protocol: 'http', port: '9696', host: 'IP_ADDRESS'} + NeutronInternal: {protocol: 'http', port: '9696', host: 'IP_ADDRESS'} + NeutronPublic: {protocol: 'http', port: '9696', host: 'IP_ADDRESS'} + NovaAdmin: {protocol: 'http', port: '8774', host: 'IP_ADDRESS'} + NovaInternal: {protocol: 'http', port: '8774', host: 'IP_ADDRESS'} + NovaPublic: {protocol: 'http', port: '8774', host: 'IP_ADDRESS'} + NovaEC2Admin: {protocol: 'http', port: '8773', host: 'IP_ADDRESS'} + NovaEC2Internal: {protocol: 'http', port: '8773', host: 'IP_ADDRESS'} + NovaEC2Public: {protocol: 'http', port: '8773', host: 'IP_ADDRESS'} + SwiftAdmin: {protocol: 'http', port: '8080', host: 'IP_ADDRESS'} + SwiftInternal: {protocol: 'http', port: '8080', host: 'IP_ADDRESS'} + SwiftPublic: {protocol: 'http', port: '8080', host: 'IP_ADDRESS'} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + CloudName: + type: string + default: '' + description: The DNS name of this cloud. E.g. ci-overcloud.tripleo.org + +resources: + + CeilometerInternal: + type: OS::TripleO::Endpoint + properties: + EndpointName: CeilometerInternal + EndpointMap: { get_param: EndpointMap } + CloudName: {get_param: CloudName} + IP: {get_param: CeilometerApiVirtualIP} + CeilometerPublic: + type: OS::TripleO::Endpoint + properties: + EndpointName: CeilometerPublic + EndpointMap: { get_param: EndpointMap } + CloudName: {get_param: CloudName} + IP: {get_param: PublicVirtualIP} + CeilometerAdmin: + type: OS::TripleO::Endpoint + properties: + EndpointName: CeilometerAdmin + EndpointMap: { get_param: EndpointMap } + CloudName: {get_param: CloudName} + IP: {get_param: CeilometerApiVirtualIP} + + CinderInternal: + type: OS::TripleO::Endpoint + properties: + EndpointName: CinderInternal + EndpointMap: { get_param: EndpointMap } + CloudName: {get_param: CloudName} + IP: {get_param: CinderApiVirtualIP} + UriSuffix: '/v1/%(tenant_id)s' + CinderPublic: + type: OS::TripleO::Endpoint + properties: + EndpointName: CinderPublic + EndpointMap: { get_param: EndpointMap } + CloudName: {get_param: CloudName} + IP: {get_param: PublicVirtualIP} + UriSuffix: '/v1/%(tenant_id)s' + CinderAdmin: + type: OS::TripleO::Endpoint + properties: + EndpointName: CinderAdmin + EndpointMap: { get_param: EndpointMap } + CloudName: {get_param: CloudName} + IP: {get_param: CinderApiVirtualIP} + UriSuffix: '/v1/%(tenant_id)s' + + CinderV2Internal: + type: OS::TripleO::Endpoint + properties: + EndpointName: CinderInternal + EndpointMap: { get_param: EndpointMap } + CloudName: {get_param: CloudName} + IP: {get_param: CinderApiVirtualIP} + UriSuffix: '/v2/%(tenant_id)s' + CinderV2Public: + type: OS::TripleO::Endpoint + properties: + EndpointName: CinderPublic + EndpointMap: { get_param: EndpointMap } + CloudName: {get_param: CloudName} + IP: {get_param: PublicVirtualIP} + UriSuffix: '/v2/%(tenant_id)s' + CinderV2Admin: + type: OS::TripleO::Endpoint + properties: + EndpointName: CinderAdmin + EndpointMap: { get_param: EndpointMap } + CloudName: {get_param: CloudName} + IP: {get_param: CinderApiVirtualIP} + UriSuffix: '/v2/%(tenant_id)s' + + GlanceInternal: + type: OS::TripleO::Endpoint + properties: + EndpointName: GlanceInternal + EndpointMap: { get_param: EndpointMap } + CloudName: {get_param: CloudName} + IP: {get_param: GlanceApiVirtualIP} + GlancePublic: + type: OS::TripleO::Endpoint + properties: + EndpointName: GlancePublic + EndpointMap: { get_param: EndpointMap } + CloudName: {get_param: CloudName} + IP: {get_param: PublicVirtualIP} + GlanceAdmin: + type: OS::TripleO::Endpoint + properties: + EndpointName: GlanceAdmin + EndpointMap: { get_param: EndpointMap } + CloudName: {get_param: CloudName} + IP: {get_param: GlanceApiVirtualIP} + GlanceRegistryInternal: + type: OS::TripleO::Endpoint + properties: + EndpointName: GlanceInternal + EndpointMap: { get_param: EndpointMap } + IP: {get_param: GlanceRegistryVirtualIP} + GlanceRegistryPublic: + type: OS::TripleO::Endpoint + properties: + EndpointName: GlancePublic + EndpointMap: { get_param: EndpointMap } + IP: {get_param: PublicVirtualIP} + GlanceRegistryAdmin: + type: OS::TripleO::Endpoint + properties: + EndpointName: GlanceAdmin + EndpointMap: { get_param: EndpointMap } + IP: {get_param: GlanceRegistryVirtualIP} + + HeatInternal: + type: OS::TripleO::Endpoint + properties: + EndpointName: HeatInternal + EndpointMap: { get_param: EndpointMap } + IP: {get_param: HeatApiVirtualIP} + CloudName: {get_param: CloudName} + UriSuffix: '/v1/%(tenant_id)s' + HeatPublic: + type: OS::TripleO::Endpoint + properties: + EndpointName: HeatPublic + EndpointMap: { get_param: EndpointMap } + IP: {get_param: PublicVirtualIP} + CloudName: {get_param: CloudName} + UriSuffix: '/v1/%(tenant_id)s' + HeatAdmin: + type: OS::TripleO::Endpoint + properties: + EndpointName: HeatAdmin + EndpointMap: { get_param: EndpointMap } + IP: {get_param: HeatApiVirtualIP} + CloudName: {get_param: CloudName} + UriSuffix: '/v1/%(tenant_id)s' + + HorizonPublic: + type: OS::TripleO::Endpoint + properties: + EndpointName: HeatPublic + EndpointMap: { get_param: EndpointMap } + IP: {get_param: PublicVirtualIP} + CloudName: {get_param: CloudName} + UriSuffix: '/dashboard' + + KeystoneInternal: + type: OS::TripleO::Endpoint + properties: + EndpointName: KeystoneInternal + EndpointMap: { get_param: EndpointMap } + IP: {get_param: KeystonePublicApiVirtualIP} + CloudName: {get_param: CloudName} + UriSuffix: '/v2.0' + KeystonePublic: + type: OS::TripleO::Endpoint + properties: + EndpointName: KeystonePublic + EndpointMap: { get_param: EndpointMap } + IP: {get_param: PublicVirtualIP} + CloudName: {get_param: CloudName} + UriSuffix: '/v2.0' + KeystoneAdmin: + type: OS::TripleO::Endpoint + properties: + EndpointName: KeystoneAdmin + EndpointMap: { get_param: EndpointMap } + IP: {get_param: KeystoneAdminApiVirtualIP} + CloudName: {get_param: CloudName} + UriSuffix: '/v2.0' + KeystoneEC2: + type: OS::TripleO::Endpoint + properties: + EndpointName: KeystoneInternal + EndpointMap: { get_param: EndpointMap } + IP: {get_param: KeystonePublicApiVirtualIP} + CloudName: {get_param: CloudName} + UriSuffix: '/v2.0/ec2tokens' + + NeutronInternal: + type: OS::TripleO::Endpoint + properties: + EndpointName: NeutronInternal + EndpointMap: { get_param: EndpointMap } + IP: {get_param: NeutronApiVirtualIP} + CloudName: {get_param: CloudName} + NeutronPublic: + type: OS::TripleO::Endpoint + properties: + EndpointName: NeutronPublic + EndpointMap: { get_param: EndpointMap } + IP: {get_param: PublicVirtualIP} + CloudName: {get_param: CloudName} + NeutronAdmin: + type: OS::TripleO::Endpoint + properties: + EndpointName: NeutronAdmin + EndpointMap: { get_param: EndpointMap } + IP: {get_param: NeutronApiVirtualIP} + CloudName: {get_param: CloudName} + + NovaInternal: + type: OS::TripleO::Endpoint + properties: + EndpointName: NovaInternal + EndpointMap: { get_param: EndpointMap } + IP: {get_param: NovaApiVirtualIP} + CloudName: {get_param: CloudName} + UriSuffix: '/v2/%(tenant_id)s' + NovaPublic: + type: OS::TripleO::Endpoint + properties: + EndpointName: NovaPublic + EndpointMap: { get_param: EndpointMap } + IP: {get_param: PublicVirtualIP} + CloudName: {get_param: CloudName} + UriSuffix: '/v2/%(tenant_id)s' + NovaAdmin: + type: OS::TripleO::Endpoint + properties: + EndpointName: NovaAdmin + EndpointMap: { get_param: EndpointMap } + IP: {get_param: NovaApiVirtualIP} + CloudName: {get_param: CloudName} + UriSuffix: '/v2/%(tenant_id)s' + NovaV3Internal: + type: OS::TripleO::Endpoint + properties: + EndpointName: NovaInternal + EndpointMap: { get_param: EndpointMap } + IP: {get_param: NovaApiVirtualIP} + CloudName: {get_param: CloudName} + UriSuffix: '/v3' + NovaV3Public: + type: OS::TripleO::Endpoint + properties: + EndpointName: NovaPublic + EndpointMap: { get_param: EndpointMap } + IP: {get_param: PublicVirtualIP} + CloudName: {get_param: CloudName} + UriSuffix: '/v3' + NovaV3Admin: + type: OS::TripleO::Endpoint + properties: + EndpointName: NovaAdmin + EndpointMap: { get_param: EndpointMap } + IP: {get_param: NovaApiVirtualIP} + CloudName: {get_param: CloudName} + UriSuffix: '/v3' + + NovaEC2Internal: + type: OS::TripleO::Endpoint + properties: + EndpointName: NovaEC2Internal + EndpointMap: { get_param: EndpointMap } + IP: {get_param: NovaApiVirtualIP} + CloudName: {get_param: CloudName} + UriSuffix: '/services/Cloud' + NovaEC2Public: + type: OS::TripleO::Endpoint + properties: + EndpointName: NovaEC2Public + EndpointMap: { get_param: EndpointMap } + IP: {get_param: PublicVirtualIP} + CloudName: {get_param: CloudName} + UriSuffix: '/services/Cloud' + NovaEC2Admin: + type: OS::TripleO::Endpoint + properties: + EndpointName: NovaEC2Admin + EndpointMap: { get_param: EndpointMap } + IP: {get_param: NovaApiVirtualIP} + CloudName: {get_param: CloudName} + UriSuffix: '/services/Admin' + + SwiftInternal: + type: OS::TripleO::Endpoint + properties: + EndpointName: SwiftInternal + EndpointMap: { get_param: EndpointMap } + IP: {get_param: SwiftProxyVirtualIP} + CloudName: {get_param: CloudName} + UriSuffix: '/v1/AUTH_%(tenant_id)s' + SwiftPublic: + type: OS::TripleO::Endpoint + properties: + EndpointName: SwiftPublic + EndpointMap: { get_param: EndpointMap } + IP: {get_param: PublicVirtualIP} + CloudName: {get_param: CloudName} + UriSuffix: '/v1/AUTH_%(tenant_id)s' + SwiftAdmin: + type: OS::TripleO::Endpoint + properties: + EndpointName: SwiftAdmin + EndpointMap: { get_param: EndpointMap } + IP: {get_param: SwiftProxyVirtualIP} + CloudName: {get_param: CloudName} + # No Suffix for the Admin interface + SwiftS3Internal: + type: OS::TripleO::Endpoint + properties: + EndpointName: SwiftInternal + EndpointMap: { get_param: EndpointMap } + IP: {get_param: SwiftProxyVirtualIP} + CloudName: {get_param: CloudName} + SwiftS3Public: + type: OS::TripleO::Endpoint + properties: + EndpointName: SwiftPublic + EndpointMap: { get_param: EndpointMap } + IP: {get_param: PublicVirtualIP} + CloudName: {get_param: CloudName} + SwiftS3Admin: + type: OS::TripleO::Endpoint + properties: + EndpointName: SwiftAdmin + EndpointMap: { get_param: EndpointMap } + IP: {get_param: SwiftProxyVirtualIP} + CloudName: {get_param: CloudName} + +outputs: + endpoint_map: + value: + CeilometerInternal: {get_attr: [ CeilometerInternal, endpoint] } + CeilometerPublic: {get_attr: [ CeilometerPublic, endpoint] } + CeilometerAdmin: {get_attr: [ CeilometerAdmin, endpoint] } + CinderInternal: {get_attr: [ CinderInternal, endpoint] } + CinderPublic: {get_attr: [ CinderPublic, endpoint] } + CinderAdmin: {get_attr: [ CinderAdmin, endpoint] } + CinderV2Internal: {get_attr: [ CinderV2Internal, endpoint] } + CinderV2Public: {get_attr: [ CinderV2Public, endpoint] } + CinderV2Admin: {get_attr: [ CinderV2Admin, endpoint] } + GlanceInternal: {get_attr: [ GlanceInternal, endpoint] } + GlancePublic: {get_attr: [ GlancePublic, endpoint] } + GlanceAdmin: {get_attr: [ GlanceAdmin, endpoint] } + GlanceRegistryInternal: {get_attr: [ GlanceRegistryInternal, endpoint] } + GlanceRegistryPublic: {get_attr: [ GlanceRegistryPublic, endpoint] } + GlanceRegistryAdmin: {get_attr: [ GlanceRegistryAdmin, endpoint] } + HeatInternal: {get_attr: [ HeatInternal, endpoint] } + HeatPublic: {get_attr: [ HeatPublic, endpoint] } + HeatAdmin: {get_attr: [ HeatAdmin, endpoint] } + HorizonPublic: {get_attr: [ HorizonPublic, endpoint] } + KeystoneInternal: {get_attr: [ KeystoneInternal, endpoint] } + KeystonePublic: {get_attr: [ KeystonePublic, endpoint] } + KeystoneAdmin: {get_attr: [ KeystoneAdmin, endpoint] } + KeystoneEC2: {get_attr: [ KeystoneEC2, endpoint] } + NeutronInternal: {get_attr: [ NeutronInternal, endpoint] } + NeutronPublic: {get_attr: [ NeutronPublic, endpoint] } + NeutronAdmin: {get_attr: [ NeutronAdmin, endpoint] } + NovaInternal: {get_attr: [ NovaInternal, endpoint] } + NovaPublic: {get_attr: [ NovaPublic, endpoint] } + NovaAdmin: {get_attr: [ NovaAdmin, endpoint] } + NovaV3Internal: {get_attr: [ NovaV3Internal, endpoint] } + NovaV3Public: {get_attr: [ NovaV3Public, endpoint] } + NovaV3Admin: {get_attr: [ NovaV3Admin, endpoint] } + NovaEC2Internal: {get_attr: [ NovaEC2Internal, endpoint] } + NovaEC2Public: {get_attr: [ NovaEC2Public, endpoint] } + NovaEC2Admin: {get_attr: [ NovaEC2Admin, endpoint] } + SwiftInternal: {get_attr: [ SwiftInternal, endpoint] } + SwiftPublic: {get_attr: [ SwiftPublic, endpoint] } + SwiftAdmin: {get_attr: [ SwiftAdmin, endpoint] } + SwiftS3Internal: {get_attr: [ SwiftS3Internal, endpoint] } + SwiftS3Public: {get_attr: [ SwiftS3Public, endpoint] } + SwiftS3Admin: {get_attr: [ SwiftS3Admin, endpoint] } diff --git a/network/external.yaml b/network/external.yaml index bf4bdfe7..e8f92a5e 100644 --- a/network/external.yaml +++ b/network/external.yaml @@ -12,7 +12,7 @@ parameters: ExternalNetValueSpecs: default: {'provider:physical_network': 'external', 'provider:network_type': 'flat'} description: Value specs for the external network. - type: string + type: json ExternalNetAdminStateUp: default: false description: This admin state of of the network. diff --git a/network/internal_api.yaml b/network/internal_api.yaml index c7e822e9..69154bef 100644 --- a/network/internal_api.yaml +++ b/network/internal_api.yaml @@ -12,7 +12,7 @@ parameters: InternalApiNetValueSpecs: default: {'provider:physical_network': 'internal_api', 'provider:network_type': 'flat'} description: Value specs for the internal API network. - type: string + type: json InternalApiNetAdminStateUp: default: false description: This admin state of of the network. diff --git a/network/ports/ctlplane_vip.yaml b/network/ports/ctlplane_vip.yaml index d5001e43..0d2945bc 100644 --- a/network/ports/ctlplane_vip.yaml +++ b/network/ports/ctlplane_vip.yaml @@ -2,6 +2,7 @@ heat_template_version: 2015-04-30 description: > Creates a port for a VIP on the undercloud ctlplane network. + The IP address will be chosen automatically if FixedIPs is empty. parameters: NetworkName: @@ -19,15 +20,20 @@ parameters: description: The name of the undercloud Neutron control plane default: ctlplane type: string - + FixedIPs: + description: > + Control the IP allocation for the VIP port. E.g. + [{'ip_address':'1.2.3.4'}] + default: [] + type: json resources: - VipPort: type: OS::Neutron::Port properties: network: {get_param: ControlPlaneNetwork} name: {get_param: PortName} + fixed_ips: {get_param: FixedIPs} replacement_policy: AUTO outputs: @@ -36,7 +42,7 @@ outputs: value: {get_attr: [VipPort, fixed_ips, 0, ip_address]} ip_subnet: # FIXME: this assumes a 2 digit subnet CIDR (need more heat functions?) - description: IP/Subnet CIDR for the internal API network IP + description: IP/Subnet CIDR for the ctlplane network. value: list_join: - '' diff --git a/network/ports/external.yaml b/network/ports/external.yaml index b5c1e5c9..63e3eeb3 100644 --- a/network/ports/external.yaml +++ b/network/ports/external.yaml @@ -1,7 +1,8 @@ heat_template_version: 2015-04-30 description: > - Creates a port on the external network. + Creates a port on the external network. The IP address will be chosen + automatically if FixedIPs is empty. parameters: ExternalNetName: @@ -15,6 +16,16 @@ parameters: ControlPlaneIP: # Here for compatability with noop.yaml description: IP address on the control plane type: string + ControlPlaneNetwork: # Here for compatibility with ctlplane_vip.yaml + description: The name of the undercloud Neutron control plane + default: ctlplane + type: string + FixedIPs: + description: > + Control the IP allocation for the VIP port. E.g. + [{'ip_address':'1.2.3.4'}] + default: [] + type: json resources: @@ -23,6 +34,7 @@ resources: properties: network: {get_param: ExternalNetName} name: {get_param: PortName} + fixed_ips: {get_param: FixedIPs} replacement_policy: AUTO outputs: diff --git a/network/ports/net_ip_list_map.yaml b/network/ports/net_ip_list_map.yaml index 54614ead..257d3f9b 100644 --- a/network/ports/net_ip_list_map.yaml +++ b/network/ports/net_ip_list_map.yaml @@ -1,6 +1,9 @@ heat_template_version: 2015-04-30 parameters: + ControlPlaneIpList: + default: [] + type: comma_delimited_list ExternalIpList: default: [] type: comma_delimited_list @@ -23,6 +26,7 @@ outputs: A Hash containing a mapping of network names to assigned lists of IP addresses. value: + ctlplane: {get_param: ControlPlaneIpList} external: {get_param: ExternalIpList} internal_api: {get_param: InternalApiIpList} storage: {get_param: StorageIpList} diff --git a/network/ports/net_ip_map.yaml b/network/ports/net_ip_map.yaml index edc4060f..7aaed160 100644 --- a/network/ports/net_ip_map.yaml +++ b/network/ports/net_ip_map.yaml @@ -1,6 +1,9 @@ heat_template_version: 2015-04-30 parameters: + ControlPlaneIp: + default: '' + type: string ExternalIp: default: '' type: string @@ -23,6 +26,7 @@ outputs: A Hash containing a mapping of network names to assigned IPs for a specific machine. value: + ctlplane: {get_param: ControlPlaneIp} external: {get_param: ExternalIp} internal_api: {get_param: InternalApiIp} storage: {get_param: StorageIp} diff --git a/network/ports/net_ip_subnet_map.yaml b/network/ports/net_ip_subnet_map.yaml new file mode 100644 index 00000000..cf59adb3 --- /dev/null +++ b/network/ports/net_ip_subnet_map.yaml @@ -0,0 +1,43 @@ +heat_template_version: 2015-04-30 + +parameters: + ControlPlaneIp: + default: '' + type: string + ExternalIpSubnet: + default: '' + type: string + InternalApiIpSubnet: + default: '' + type: string + StorageIpSubnet: + default: '' + type: string + StorageMgmtIpSubnet: + default: '' + type: string + TenantIpSubnet: + default: '' + type: string + ControlPlaneSubnetCidr: # Override this via parameter_defaults + default: '24' + description: The subnet CIDR of the control plane network. + type: string + +outputs: + net_ip_subnet_map: + description: > + A Hash containing a mapping of network names to assigned + IP/subnet mappings. + value: + ctlplane: + list_join: + - '' + - - {get_param: ControlPlaneIp} + - '/' + - {get_param: ControlPlaneSubnetCidr} + external: {get_param: ExternalIpSubnet} + internal_api: {get_param: InternalApiIpSubnet} + storage: {get_param: StorageIpSubnet} + storage_mgmt: {get_param: StorageMgmtIpSubnet} + tenant: {get_param: TenantIpSubnet} diff --git a/network/ports/noop.yaml b/network/ports/noop.yaml index 9e1a1276..31ee6f3c 100644 --- a/network/ports/noop.yaml +++ b/network/ports/noop.yaml @@ -19,6 +19,10 @@ parameters: description: # Here for compatability with vip.yaml default: '' type: string + FixedIPs: + description: # Here for compatibility with vip.yaml + default: [] + type: json ControlPlaneSubnetCidr: # Override this via parameter_defaults default: '24' description: The subnet CIDR of the control plane network. diff --git a/network/ports/vip.yaml b/network/ports/vip.yaml index b957e132..299579dc 100644 --- a/network/ports/vip.yaml +++ b/network/ports/vip.yaml @@ -2,6 +2,7 @@ heat_template_version: 2015-04-30 description: > Creates a port for a VIP on the isolated network NetworkName. + The IP address will be chosen automatically if FixedIPs is empty. parameters: NetworkName: @@ -15,14 +16,24 @@ parameters: ControlPlaneIP: # Here for compatability with noop.yaml description: IP address on the control plane type: string + ControlPlaneNetwork: + description: The name of the undercloud Neutron control plane + default: ctlplane + type: string + FixedIPs: + description: > + Control the IP allocation for the VIP port. E.g. + [{'ip_address':'1.2.3.4'}] + default: [] + type: json resources: - VipPort: type: OS::Neutron::Port properties: network: {get_param: NetworkName} name: {get_param: PortName} + fixed_ips: {get_param: FixedIPs} replacement_policy: AUTO outputs: @@ -31,7 +42,7 @@ outputs: value: {get_attr: [VipPort, fixed_ips, 0, ip_address]} ip_subnet: # FIXME: this assumes a 2 digit subnet CIDR (need more heat functions?) - description: IP/Subnet CIDR for the internal API network IP + description: IP/Subnet CIDR for the network associated with this IP value: list_join: - '' diff --git a/network/storage.yaml b/network/storage.yaml index d403f9e5..60b779e0 100644 --- a/network/storage.yaml +++ b/network/storage.yaml @@ -12,7 +12,7 @@ parameters: StorageNetValueSpecs: default: {'provider:physical_network': 'storage', 'provider:network_type': 'flat'} description: Value specs for the storage network. - type: string + type: json StorageNetAdminStateUp: default: false description: This admin state of of the network. diff --git a/network/storage_mgmt.yaml b/network/storage_mgmt.yaml index d0c919b5..043bc87b 100644 --- a/network/storage_mgmt.yaml +++ b/network/storage_mgmt.yaml @@ -12,7 +12,7 @@ parameters: StorageMgmtNetValueSpecs: default: {'provider:physical_network': 'storage_mgmt', 'provider:network_type': 'flat'} description: Value specs for the storage_mgmt network. - type: string + type: json StorageMgmtNetAdminStateUp: default: false description: This admin state of of the network. diff --git a/network/tenant.yaml b/network/tenant.yaml index 055b87b8..daf5cb75 100644 --- a/network/tenant.yaml +++ b/network/tenant.yaml @@ -12,7 +12,7 @@ parameters: TenantNetValueSpecs: default: {'provider:physical_network': 'tenant', 'provider:network_type': 'flat'} description: Value specs for the tenant network. - type: string + type: json TenantNetAdminStateUp: default: false description: This admin state of of the network. diff --git a/all-nodes-config.yaml b/os-apply-config/all-nodes-config.yaml index 3f0bd61c..3f0bd61c 100644 --- a/all-nodes-config.yaml +++ b/os-apply-config/all-nodes-config.yaml diff --git a/ceph-cluster-config.yaml b/os-apply-config/ceph-cluster-config.yaml index f44e27c1..115de085 100644 --- a/ceph-cluster-config.yaml +++ b/os-apply-config/ceph-cluster-config.yaml @@ -2,6 +2,18 @@ heat_template_version: 2015-04-30 description: 'Ceph Cluster config data' parameters: + ceph_storage_count: + default: 0 + type: number + description: Number of Ceph storage nodes. Used to enable/disable managed Ceph installation. + ceph_external_mon_ips: + default: '' + type: string + description: List of external Ceph Mon host IPs. + ceph_client_key: + default: '' + type: string + description: Ceph key used to create the client user keyring. ceph_fsid: default: '' type: string @@ -15,6 +27,18 @@ parameters: type: comma_delimited_list ceph_mon_ips: type: comma_delimited_list + NovaRbdPoolName: + default: vms + type: string + CinderRbdPoolName: + default: volumes + type: string + GlanceRbdPoolName: + default: images + type: string + CephClientUserName: + default: openstack + type: string resources: CephClusterConfigImpl: diff --git a/ceph-storage-post.yaml b/os-apply-config/ceph-storage-post.yaml index 734f90bd..734f90bd 100644 --- a/ceph-storage-post.yaml +++ b/os-apply-config/ceph-storage-post.yaml diff --git a/ceph-storage.yaml b/os-apply-config/ceph-storage.yaml index 15092bae..fc321d88 100644 --- a/ceph-storage.yaml +++ b/os-apply-config/ceph-storage.yaml @@ -20,10 +20,12 @@ parameters: default: '' description: The Ceph monitors key. Can be created with ceph-authtool --gen-print-key. type: string + hidden: true CephAdminKey: default: '' description: The Ceph admin client key. Can be created with ceph-authtool --gen-print-key. type: string + hidden: true CephMonitors: default: '' description: The list of ip/names to use as Ceph monitors @@ -42,6 +44,18 @@ parameters: Hostname: type: string default: '' # Defaults to Heat created hostname + ExtraConfig: + default: {} + description: | + Additional configuration to inject into the cluster. Note + that CephStorageExtraConfig takes precedence over ExtraConfig. + type: json + CephStorageExtraConfig: + default: {} + description: | + Role specific additional configuration to inject into the cluster. + type: json + resources: CephStorage: @@ -73,6 +87,7 @@ resources: NetworkConfig: type: OS::TripleO::CephStorage::Net::SoftwareConfig properties: + ControlPlaneIp: {get_attr: [CephStorage, networks, ctlplane, 0]} StorageIpSubnet: {get_attr: [StoragePort, ip_subnet]} StorageMgmtIpSubnet: {get_attr: [StorageMgmtPort, ip_subnet]} diff --git a/cinder-storage-post.yaml b/os-apply-config/cinder-storage-post.yaml index ad4e0460..ad4e0460 100644 --- a/cinder-storage-post.yaml +++ b/os-apply-config/cinder-storage-post.yaml diff --git a/cinder-storage.yaml b/os-apply-config/cinder-storage.yaml index be088d66..7f1164c4 100644 --- a/cinder-storage.yaml +++ b/os-apply-config/cinder-storage.yaml @@ -62,6 +62,11 @@ parameters: } } type: json + BlockStorageExtraConfig: + default: {} + description: | + Role specific additional configuration to inject into the cluster. + type: json Flavor: description: Flavor for block storage nodes to request when deploying. type: string @@ -82,6 +87,7 @@ parameters: RabbitPassword: default: 'guest' type: string + hidden: true RabbitUserName: default: 'guest' type: string @@ -160,6 +166,7 @@ resources: NetworkConfig: type: OS::TripleO::BlockStorage::Net::SoftwareConfig properties: + ControlPlaneIp: {get_attr: [BlockStorage, networks, ctlplane, 0]} InternalApiIpSubnet: {get_attr: [InternalApiPort, ip_subnet]} StorageIpSubnet: {get_attr: [StoragePort, ip_subnet]} StorageMgmtIpSubnet: {get_attr: [StorageMgmtPort, ip_subnet]} diff --git a/compute-post.yaml b/os-apply-config/compute-post.yaml index 695690d4..695690d4 100644 --- a/compute-post.yaml +++ b/os-apply-config/compute-post.yaml diff --git a/compute.yaml b/os-apply-config/compute.yaml index 4a9a92b9..ee55c587 100644 --- a/compute.yaml +++ b/os-apply-config/compute.yaml @@ -25,6 +25,14 @@ parameters: description: The password for the ceilometer service account. type: string hidden: true + CinderEnableNfsBackend: + default: false + description: Whether to enable or not the NFS backend for Cinder + type: boolean + CinderEnableRbdBackend: + default: false + description: Whether to enable or not the Rbd backend for Cinder + type: boolean Debug: default: '' description: Set to True to enable debugging on all services. @@ -98,7 +106,10 @@ parameters: default: default constraints: - custom_constraint: nova.keypair - KeystoneHost: + KeystoneAdminApiVirtualIP: + type: string + default: '' + KeystonePublicApiVirtualIP: type: string default: '' NeutronBridgeMappings: @@ -125,7 +136,7 @@ parameters: NeutronNetworkType: type: string description: The tenant network type for Neutron, either gre or vxlan. - default: 'gre' + default: 'vxlan' NeutronNetworkVLANRanges: default: 'datacentre' description: > @@ -151,7 +162,19 @@ parameters: description: | The tunnel types for the Neutron tenant network. To specify multiple values, use a comma separated string, like so: 'gre,vxlan' - default: 'gre' + default: 'vxlan' + NeutronTunnelIdRanges: + description: | + Comma-separated list of <tun_min>:<tun_max> tuples enumerating ranges + of GRE tunnel IDs that are available for tenant network allocation + default: ["1:1000", ] + type: comma_delimited_list + NeutronVniRanges: + description: | + Comma-separated list of <vni_min>:<vni_max> tuples enumerating ranges + of VXLAN VNI IDs that are available for tenant network allocation + default: ["1:1000", ] + type: comma_delimited_list NeutronPublicInterfaceRawDevice: default: '' type: string @@ -162,6 +185,24 @@ parameters: default: 'unset' description: Shared secret to prevent spoofing type: string + hidden: true + NeutronCorePlugin: + default: "ml2" + description: | + The core plugin for Neutron. The value should be the entrypoint to be loaded + from neutron.core_plugins namespace. + type: string + NeutronServicePlugins: + default: "router" + description: | + Comma-separated list of service plugin entrypoints to be loaded from the + neutron.service_plugins namespace. + type: comma_delimited_list + NeutronTypeDrivers: + default: "vxlan,vlan,flat,gre" + description: | + Comma-separated list of network type driver entrypoints to be loaded. + type: comma_delimited_list NeutronMechanismDrivers: default: 'openvswitch' description: | @@ -296,6 +337,7 @@ resources: NetworkConfig: type: OS::TripleO::Compute::Net::SoftwareConfig properties: + ControlPlaneIp: {get_attr: [NovaCompute, networks, ctlplane, 0]} InternalApiIpSubnet: {get_attr: [InternalApiPort, ip_subnet]} StorageIpSubnet: {get_attr: [StoragePort, ip_subnet]} TenantIpSubnet: {get_attr: [TenantPort, ip_subnet]} @@ -347,6 +389,9 @@ resources: agent_mode: {get_input: neutron_agent_mode} ovs_db: {get_input: neutron_dsn} metadata_proxy_shared_secret: {get_input: neutron_metadata_proxy_shared_secret} + core_plugin: {get_input: neutron_core_plugin} + service_plugins: {get_input: neutron_service_plugins} + type_drivers: {get_input: neutron_type_drivers} mechanism_drivers: {get_input: neutron_mechanism_drivers} allow_automatic_l3agent_failover: {get_input: neutron_allow_l3agent_failover} l3_ha: {get_input: neutron_l3_ha} @@ -355,6 +400,8 @@ resources: tenant_network_type: {get_input: neutron_tenant_network_type} tunnel_types: {get_input: neutron_tunnel_types} network_vlan_ranges: {get_input: neutron_network_vlan_ranges} + tunnel_id_ranges: {get_input: neutron_tunnel_id_ranges} + vni_ranges: {get_input: neutron_vni_ranges} bridge_mappings: {get_input: neutron_bridge_mappings} enable_tunneling: {get_input: neutron_enable_tunneling} physical_bridge: {get_input: neutron_physical_bridge} @@ -391,12 +438,28 @@ resources: glance_host: {get_param: GlanceHost} glance_port: {get_param: GlancePort} glance_protocol: {get_param: GlanceProtocol} - keystone_host: {get_param: KeystoneHost} + keystone_host: {get_param: KeystonePublicApiVirtualIP} neutron_flat_networks: {get_param: NeutronFlatNetworks} neutron_host: {get_param: NeutronHost} neutron_local_ip: {get_attr: [NovaCompute, networks, ctlplane, 0]} neutron_tenant_network_type: {get_param: NeutronNetworkType} neutron_tunnel_types: {get_param: NeutronTunnelTypes} + neutron_tunnel_id_ranges: + str_replace: + template: "['RANGES']" + params: + RANGES: + list_join: + - "','" + - {get_param: NeutronTunnelIdRanges} + neutron_vni_ranges: + str_replace: + template: "['RANGES']" + params: + RANGES: + list_join: + - "','" + - {get_param: NeutronVniRanges} neutron_network_vlan_ranges: {get_param: NeutronNetworkVLANRanges} neutron_bridge_mappings: {get_param: NeutronBridgeMappings} neutron_enable_tunneling: {get_param: NeutronEnableTunnelling} @@ -406,6 +469,23 @@ resources: neutron_agent_mode: {get_param: NeutronAgentMode} neutron_router_distributed: {get_param: NeutronDVR} neutron_metadata_proxy_shared_secret: {get_param: NeutronMetadataProxySharedSecret} + neutron_core_plugin: {get_param: NeutronCorePlugin} + neutron_service_plugins: + str_replace: + template: "['PLUGINS']" + params: + PLUGINS: + list_join: + - "','" + - {get_param: NeutronServicePlugins} + neutron_type_drivers: + str_replace: + template: "['DRIVERS']" + params: + DRIVERS: + list_join: + - "','" + - {get_param: NeutronTypeDrivers} neutron_mechanism_drivers: {get_param: NeutronMechanismDrivers} neutron_allow_l3agent_failover: {get_param: NeutronAllowL3AgentFailover} neutron_l3_ha: {get_param: NeutronL3HA} diff --git a/controller-post.yaml b/os-apply-config/controller-post.yaml index aac96357..aac96357 100644 --- a/controller-post.yaml +++ b/os-apply-config/controller-post.yaml diff --git a/controller.yaml b/os-apply-config/controller.yaml index 6387f24e..09ea49b8 100644 --- a/controller.yaml +++ b/os-apply-config/controller.yaml @@ -28,6 +28,10 @@ parameters: description: The password for the ceilometer service and db account. type: string hidden: true + CinderEnableNfsBackend: + default: false + description: Whether to enable or not the NFS backend for Cinder + type: boolean CinderEnableIscsiBackend: default: true description: Whether to enable or not the Iscsi backend for Cinder @@ -44,6 +48,18 @@ parameters: default: 5000 description: The size of the loopback file used by the cinder LVM driver. type: number + CinderNfsMountOptions: + default: '' + description: > + Mount options for NFS mounts used by Cinder NFS backend. Effective + when CinderEnableNfsBackend is true. + type: string + CinderNfsServers: + default: '' + description: > + NFS servers used by Cinder NFS backend. Effective when + CinderEnableNfsBackend is true. + type: comma_delimited_list CinderPassword: default: unset description: The password for the cinder service and db account, used by cinder-api. @@ -67,6 +83,10 @@ parameters: default: '' description: Set to True to enable debugging on all services. type: string + EnableFencing: + default: false + description: Whether to enable fencing in Pacemaker or not. + type: boolean EnableGalera: default: true description: Whether to use Galera instead of regular MariaDB. @@ -117,6 +137,38 @@ parameters: } } type: json + FencingConfig: + default: {} + description: | + Pacemaker fencing configuration. The JSON should have + the following structure: + { + "devices": [ + { + "agent": "AGENT_NAME", + "host_mac": "HOST_MAC_ADDRESS", + "params": {"PARAM_NAME": "PARAM_VALUE"} + } + ] + } + For instance: + { + "devices": [ + { + "agent": "fence_xvm", + "host_mac": "52:54:00:aa:bb:cc", + "params": { + "multicast_address": "225.0.0.12", + "port": "baremetal_0", + "manage_fw": true, + "manage_key_file": true, + "key_file": "/etc/fence_xvm.key", + "key_file_password": "abcdef" + } + } + ] + } + type: json Flavor: description: Flavor for control nodes to request when deploying. type: string @@ -163,9 +215,11 @@ parameters: HeatAuthEncryptionKey: description: Auth encryption key for heat-engine type: string + hidden: true HorizonSecret: description: Secret key for Django type: string + hidden: true Image: type: string default: overcloud-control @@ -203,6 +257,16 @@ parameters: description: Keystone key for signing tokens. type: string hidden: true + KeystoneNotificationDriver: + description: Comma-separated list of Oslo notification drivers used by Keystone + default: ['messaging'] + type: comma_delimited_list + KeystoneNotificationFormat: + description: The Keystone notification format + default: 'basic' + type: string + constraints: + - allowed_values: [ 'basic', 'cadf' ] MysqlClusterUniquePart: description: A unique identifier of the MySQL cluster the controller is in. type: string @@ -217,6 +281,10 @@ parameters: lower level default. type: number default: 0 + MysqlMaxConnections: + description: Configures MySQL max_connections config setting + type: number + default: 4096 MysqlRootPassword: type: string hidden: true @@ -251,6 +319,24 @@ parameters: default: 'unset' description: Shared secret to prevent spoofing type: string + hidden: true + NeutronCorePlugin: + default: 'ml2' + description: | + The core plugin for Neutron. The value should be the entrypoint to be loaded + from neutron.core_plugins namespace. + type: string + NeutronServicePlugins: + default: "router" + description: | + Comma-separated list of service plugin entrypoints to be loaded from the + neutron.service_plugins namespace. + type: comma_delimited_list + NeutronTypeDrivers: + default: "vxlan,vlan,flat,gre" + description: | + Comma-separated list of network type driver entrypoints to be loaded. + type: comma_delimited_list NeutronMechanismDrivers: default: 'openvswitch' description: | @@ -265,6 +351,10 @@ parameters: default: 'False' description: Whether to enable l3-agent HA type: string + NeutronDhcpAgentsPerNetwork: + type: number + default: 3 + description: The number of neutron dhcp agents to schedule per network NeutronEnableTunnelling: type: string default: "True" @@ -273,7 +363,7 @@ parameters: default: 'datacentre' description: If set, flat networks to configure in neutron plugins. NeutronNetworkType: - default: 'gre' + default: 'vxlan' description: The tenant network type for Neutron, either gre or vxlan. type: string NeutronNetworkVLANRanges: @@ -315,22 +405,39 @@ parameters: description: If set, the public interface is a vlan with this device as the raw device. type: string NeutronTunnelTypes: - default: 'gre' + default: 'vxlan' description: | The tunnel types for the Neutron tenant network. To specify multiple values, use a comma separated string, like so: 'gre,vxlan' type: string + NeutronTunnelIdRanges: + description: | + Comma-separated list of <tun_min>:<tun_max> tuples enumerating ranges + of GRE tunnel IDs that are available for tenant network allocation + default: ["1:1000", ] + type: comma_delimited_list + NeutronVniRanges: + description: | + Comma-separated list of <vni_min>:<vni_max> tuples enumerating ranges + of VXLAN VNI IDs that are available for tenant network allocation + default: ["1:1000", ] + type: comma_delimited_list NovaPassword: default: unset description: The password for the nova service and db account, used by nova-api. type: string hidden: true + MongoDbNoJournal: + default: false + description: Should MongoDb journaling be disabled + type: boolean NtpServer: type: string default: '' PcsdPassword: type: string description: The password for the 'pcsd' user. + hidden: true PublicVirtualInterface: default: 'br-ex' description: > @@ -363,6 +470,10 @@ parameters: default: 5672 description: Set rabbit subscriber port, change this if using SSL type: number + RabbitFDLimit: + default: 16384 + description: Configures RabbitMQ FD limit + type: string RedisVirtualIP: type: string default: '' # Has to be here because of the ignored empty value bug @@ -429,6 +540,9 @@ parameters: MysqlVirtualIP: type: string default: '' + KeystoneAdminApiVirtualIP: + type: string + default: '' KeystonePublicApiVirtualIP: type: string default: '' @@ -496,6 +610,7 @@ resources: NetworkConfig: type: OS::TripleO::Controller::Net::SoftwareConfig properties: + ControlPlaneIp: {get_attr: [Controller, networks, ctlplane, 0]} ExternalIpSubnet: {get_attr: [ExternalPort, ip_subnet]} InternalApiIpSubnet: {get_attr: [InternalApiPort, ip_subnet]} StorageIpSubnet: {get_attr: [StoragePort, ip_subnet]} @@ -558,6 +673,7 @@ resources: debug: {get_input: debug} host: {get_input: controller_virtual_ip} port: {get_input: glance_port} + uri: {get_input: glance_uri} protocol: {get_input: glance_protocol} service-password: {get_input: glance_password} swift-store-user: service:glance @@ -597,9 +713,13 @@ resources: metadata_proxy_shared_secret: {get_input: neutron_metadata_proxy_shared_secret} agent_mode: {get_input: neutron_agent_mode} router_distributed: {get_input: neutron_router_distributed} + core_plugin: {get_input: neutron_core_plugin} + service_plugins: {get_input: neutron_service_plugins} + type_drivers: {get_input: neutron_type_drivers} mechanism_drivers: {get_input: neutron_mechanism_drivers} allow_automatic_l3agent_failover: {get_input: neutron_allow_l3agent_failover} l3_ha: {get_input: neutron_l3_ha} + dhcp_agents_per_network: {get_input: neutron_dhcp_agents_per_network} ovs: enable_tunneling: {get_input: neutron_enable_tunneling} local_ip: {get_input: controller_host} @@ -612,6 +732,8 @@ resources: physical_bridge: br-ex tenant_network_type: {get_input: neutron_tenant_network_type} tunnel_types: {get_input: neutron_tunnel_types} + tunnel_id_ranges: {get_input: neutron_tunnel_id_ranges} + vni_ranges: {get_input: neutron_vni_ranges} ovs_db: {get_input: neutron_dsn} service-password: {get_input: neutron_password} dnsmasq-options: {get_input: neutron_dnsmasq_options} @@ -632,6 +754,8 @@ resources: host: {get_input: controller_virtual_ip} metadata-proxy: true service-password: {get_input: nova_password} + mongodb: + nojournal: {get_input: mongodb_no_journal} rabbit: host: {get_input: controller_virtual_ip} username: {get_input: rabbit_username} @@ -827,6 +951,7 @@ resources: - '@' - {get_param: VirtualIP} - '/keystone' + mongodb_no_journal: {get_param: MongoDbNoJournal} mysql_innodb_buffer_pool_size: {get_param: MysqlInnodbBufferPoolSize} mysql_root_password: {get_param: MysqlRootPassword} mysql_cluster_name: @@ -838,9 +963,27 @@ resources: neutron_metadata_proxy_shared_secret: {get_param: NeutronMetadataProxySharedSecret} neutron_agent_mode: {get_param: NeutronAgentMode} neutron_router_distributed: {get_param: NeutronDVR} + neutron_core_plugin: {get_param: NeutronCorePlugin} + neutron_service_plugins: + str_replace: + template: "['PLUGINS']" + params: + PLUGINS: + list_join: + - "','" + - {get_param: NeutronServicePlugins} + neutron_type_drivers: + str_replace: + template: "['DRIVERS']" + params: + DRIVERS: + list_join: + - "','" + - {get_param: NeutronTypeDrivers} neutron_mechanism_drivers: {get_param: NeutronMechanismDrivers} neutron_allow_l3agent_failover: {get_param: NeutronAllowL3AgentFailover} neutron_l3_ha: {get_param: NeutronL3HA} + neutron_dhcp_agents_per_network: {get_param: NeutronDhcpAgentsPerNetwork} neutron_network_vlan_ranges: {get_param: NeutronNetworkVLANRanges} neutron_bridge_mappings: {get_param: NeutronBridgeMappings} neutron_public_interface: {get_param: NeutronPublicInterface} @@ -849,6 +992,22 @@ resources: neutron_public_interface_tag: {get_param: NeutronPublicInterfaceTag} neutron_tenant_network_type: {get_param: NeutronNetworkType} neutron_tunnel_types: {get_param: NeutronTunnelTypes} + neutron_tunnel_id_ranges: + str_replace: + template: "['RANGES']" + params: + RANGES: + list_join: + - "','" + - {get_param: NeutronTunnelIdRanges} + neutron_vni_ranges: + str_replace: + template: "['RANGES']" + params: + RANGES: + list_join: + - "','" + - {get_param: NeutronVniRanges} neutron_password: {get_param: NeutronPassword} neutron_dnsmasq_options: {get_param: NeutronDnsmasqOptions} neutron_dsn: diff --git a/swift-devices-and-proxy-config.yaml b/os-apply-config/swift-devices-and-proxy-config.yaml index 4f01dbea..4f01dbea 100644 --- a/swift-devices-and-proxy-config.yaml +++ b/os-apply-config/swift-devices-and-proxy-config.yaml diff --git a/swift-storage-post.yaml b/os-apply-config/swift-storage-post.yaml index 1b1c406d..1b1c406d 100644 --- a/swift-storage-post.yaml +++ b/os-apply-config/swift-storage-post.yaml diff --git a/swift-storage.yaml b/os-apply-config/swift-storage.yaml index e4cacf3c..d62d7d1a 100644 --- a/swift-storage.yaml +++ b/os-apply-config/swift-storage.yaml @@ -39,6 +39,11 @@ parameters: } } type: json + ObjectStorageExtraConfig: + default: {} + description: | + Role specific additional configuration to inject into the cluster. + type: json Flavor: description: Flavor for Swift storage nodes to request when deploying. type: string @@ -144,6 +149,7 @@ resources: NetworkConfig: type: OS::TripleO::ObjectStorage::Net::SoftwareConfig properties: + ControlPlaneIp: {get_attr: [SwiftStorage, networks, ctlplane, 0]} InternalApiIpSubnet: {get_attr: [InternalApiPort, ip_subnet]} StorageIpSubnet: {get_attr: [StoragePort, ip_subnet]} StorageMgmtIpSubnet: {get_attr: [StorageMgmtPort, ip_subnet]} diff --git a/vip-config.yaml b/os-apply-config/vip-config.yaml index 8f984ab7..8f984ab7 100644 --- a/vip-config.yaml +++ b/os-apply-config/vip-config.yaml diff --git a/overcloud-resource-registry-puppet.yaml b/overcloud-resource-registry-puppet.yaml index c9041c6b..4cfed6b4 100644 --- a/overcloud-resource-registry-puppet.yaml +++ b/overcloud-resource-registry-puppet.yaml @@ -1,29 +1,49 @@ resource_registry: - OS::TripleO::BlockStorage: puppet/cinder-storage-puppet.yaml + OS::TripleO::BlockStorage: puppet/cinder-storage.yaml OS::TripleO::BlockStorage::Net::SoftwareConfig: net-config-noop.yaml - OS::TripleO::Compute: puppet/compute-puppet.yaml + OS::TripleO::Compute: puppet/compute.yaml OS::TripleO::Compute::Net::SoftwareConfig: net-config-noop.yaml OS::TripleO::SoftwareDeployment: OS::Heat::StructuredDeployment - OS::TripleO::Controller: puppet/controller-puppet.yaml + OS::TripleO::Controller: puppet/controller.yaml OS::TripleO::Controller::Net::SoftwareConfig: net-config-bridge.yaml - OS::TripleO::ObjectStorage: puppet/swift-storage-puppet.yaml + OS::TripleO::ObjectStorage: puppet/swift-storage.yaml OS::TripleO::ObjectStorage::Net::SoftwareConfig: net-config-noop.yaml - OS::TripleO::CephStorage: puppet/ceph-storage-puppet.yaml + OS::TripleO::CephStorage: puppet/ceph-storage.yaml OS::TripleO::CephStorage::Net::SoftwareConfig: net-config-noop.yaml - OS::TripleO::ControllerPostDeployment: puppet/controller-post-puppet.yaml + OS::TripleO::ControllerPostDeployment: puppet/controller-post.yaml # set to controller-config-pacemaker.yaml to enable pacemaker OS::TripleO::ControllerConfig: puppet/controller-config.yaml - OS::TripleO::ComputePostDeployment: puppet/compute-post-puppet.yaml + OS::TripleO::ComputePostDeployment: puppet/compute-post.yaml OS::TripleO::ObjectStoragePostDeployment: puppet/swift-storage-post.yaml OS::TripleO::BlockStoragePostDeployment: puppet/cinder-storage-post.yaml - OS::TripleO::CephStoragePostDeployment: puppet/ceph-storage-post-puppet.yaml + OS::TripleO::CephStoragePostDeployment: puppet/ceph-storage-post.yaml OS::TripleO::SwiftDevicesAndProxy::SoftwareConfig: puppet/swift-devices-and-proxy-config.yaml OS::TripleO::CephClusterConfig::SoftwareConfig: puppet/ceph-cluster-config.yaml OS::TripleO::AllNodes::SoftwareConfig: puppet/all-nodes-config.yaml OS::TripleO::BootstrapNode::SoftwareConfig: puppet/bootstrap-config.yaml + OS::TripleO::Tasks::PackageUpdate: extraconfig/tasks/yum_update.yaml + + # This creates the "heat-admin" user for all OS images by default + # To disable, replace with firstboot/userdata_default.yaml + OS::TripleO::NodeAdminUserData: firstboot/userdata_heat_admin.yaml + + # Hooks for operator extra config + # NodeUserData == Cloud-init additional user-data, e.g cloud-config + # ControllerExtraConfigPre == Controller configuration pre service deployment + # NodeExtraConfig == All nodes configuration pre service deployment + # NodeExtraConfigPost == All nodes configuration post service deployment OS::TripleO::NodeUserData: firstboot/userdata_default.yaml + OS::TripleO::ControllerExtraConfigPre: puppet/extraconfig/pre_deploy/default.yaml + OS::TripleO::ComputeExtraConfigPre: puppet/extraconfig/pre_deploy/default.yaml + OS::TripleO::CephStorageExtraConfigPre: puppet/extraconfig/pre_deploy/default.yaml + OS::TripleO::NodeExtraConfig: puppet/extraconfig/pre_deploy/default.yaml OS::TripleO::NodeExtraConfigPost: extraconfig/post_deploy/default.yaml - OS::TripleO::Tasks::PackageUpdate: extraconfig/tasks/yum_update.yaml + + # "AllNodes" Extra cluster config, runs on all nodes prior to the post_deploy + # phase, e.g when puppet is applied, but after the pre_deploy phase. Useful when + # configuration with knowledge of all nodes in the cluster is required vs single + # node configuration in the pre_deploy step. + OS::TripleO::AllNodesExtraConfig: extraconfig/all_nodes/default.yaml # TripleO overcloud networks OS::TripleO::Network: network/networks.yaml @@ -37,6 +57,7 @@ resource_registry: OS::TripleO::Network::Tenant: network/noop.yaml OS::TripleO::Network::Ports::NetIpMap: network/ports/net_ip_map.yaml + OS::TripleO::Network::Ports::NetIpSubnetMap: network/ports/net_ip_subnet_map.yaml OS::TripleO::Network::Ports::NetIpListMap: network/ports/net_ip_list_map.yaml # Port assignments for the controller role @@ -65,11 +86,15 @@ resource_registry: OS::TripleO::BlockStorage::Ports::StoragePort: network/ports/noop.yaml OS::TripleO::BlockStorage::Ports::StorageMgmtPort: network/ports/noop.yaml - # Cinder backend config for the controller role - OS::TripleO::Controller::CinderBackend: extraconfig/controller/noop.yaml - # Port assignments for service virtual IPs for the controller role OS::TripleO::Controller::Ports::RedisVipPort: network/ports/ctlplane_vip.yaml + # Service Endpoint Mappings + OS::TripleO::Endpoint: network/endpoints/endpoint.yaml + OS::TripleO::EndpointMap: network/endpoints/endpoint_map.yaml + + # validation resources + OS::TripleO::AllNodes::Validation: all-nodes-validation.yaml + parameter_defaults: EnablePackageInstall: false diff --git a/overcloud-resource-registry.yaml b/overcloud-resource-registry.yaml index 78607b51..ed02551b 100644 --- a/overcloud-resource-registry.yaml +++ b/overcloud-resource-registry.yaml @@ -1,31 +1,37 @@ resource_registry: - OS::TripleO::BlockStorage: cinder-storage.yaml + OS::TripleO::BlockStorage: os-apply-config/cinder-storage.yaml OS::TripleO::BlockStorage::Net::SoftwareConfig: net-config-noop.yaml - OS::TripleO::Compute: compute.yaml + OS::TripleO::Compute: os-apply-config/compute.yaml OS::TripleO::Compute::Net::SoftwareConfig: net-config-noop.yaml OS::TripleO::SoftwareDeployment: OS::Heat::StructuredDeployment - OS::TripleO::Controller: controller.yaml + OS::TripleO::Controller: os-apply-config/controller.yaml OS::TripleO::Controller::Net::SoftwareConfig: net-config-noop.yaml - OS::TripleO::ObjectStorage: swift-storage.yaml + OS::TripleO::ObjectStorage: os-apply-config/swift-storage.yaml OS::TripleO::ObjectStorage::Net::SoftwareConfig: net-config-noop.yaml - OS::TripleO::CephStorage: ceph-storage.yaml + OS::TripleO::CephStorage: os-apply-config/ceph-storage.yaml OS::TripleO::CephStorage::Net::SoftwareConfig: net-config-noop.yaml - OS::TripleO::ControllerPostDeployment: controller-post.yaml - OS::TripleO::ComputePostDeployment: compute-post.yaml - OS::TripleO::ObjectStoragePostDeployment: swift-storage-post.yaml - OS::TripleO::BlockStoragePostDeployment: cinder-storage-post.yaml - OS::TripleO::CephStoragePostDeployment: ceph-storage-post.yaml - OS::TripleO::SwiftDevicesAndProxy::SoftwareConfig: swift-devices-and-proxy-config.yaml - OS::TripleO::CephClusterConfig::SoftwareConfig: ceph-cluster-config.yaml - OS::TripleO::AllNodes::SoftwareConfig: all-nodes-config.yaml + OS::TripleO::ControllerPostDeployment: os-apply-config/controller-post.yaml + OS::TripleO::ComputePostDeployment: os-apply-config/compute-post.yaml + OS::TripleO::ObjectStoragePostDeployment: os-apply-config/swift-storage-post.yaml + OS::TripleO::BlockStoragePostDeployment: os-apply-config/cinder-storage-post.yaml + OS::TripleO::CephStoragePostDeployment: os-apply-config/ceph-storage-post.yaml + OS::TripleO::SwiftDevicesAndProxy::SoftwareConfig: os-apply-config/swift-devices-and-proxy-config.yaml + OS::TripleO::CephClusterConfig::SoftwareConfig: os-apply-config/ceph-cluster-config.yaml + OS::TripleO::AllNodes::SoftwareConfig: os-apply-config/all-nodes-config.yaml OS::TripleO::BootstrapNode::SoftwareConfig: bootstrap-config.yaml OS::TripleO::NodeUserData: firstboot/userdata_default.yaml OS::TripleO::NodeExtraConfigPost: extraconfig/post_deploy/default.yaml OS::TripleO::Tasks::PackageUpdate: extraconfig/tasks/yum_update.yaml + # "AllNodes" Extra cluster config, runs on all nodes prior to the post_deploy + # phase, e.g when puppet is applied, but after the pre_deploy phase. Useful when + # configuration with knowledge of all nodes in the cluster is required vs single + # node configuration in the pre_deploy step. + OS::TripleO::AllNodesExtraConfig: extraconfig/all_nodes/default.yaml + # TripleO overcloud networks OS::TripleO::Network: network/networks.yaml - OS::TripleO::VipConfig: vip-config.yaml + OS::TripleO::VipConfig: os-apply-config/vip-config.yaml OS::TripleO::Network::External: network/noop.yaml OS::TripleO::Network::InternalApi: network/noop.yaml @@ -34,6 +40,7 @@ resource_registry: OS::TripleO::Network::Tenant: network/noop.yaml OS::TripleO::Network::Ports::NetIpMap: network/ports/net_ip_map.yaml + OS::TripleO::Network::Ports::NetIpSubnetMap: network/ports/net_ip_subnet_map.yaml OS::TripleO::Network::Ports::NetIpListMap: network/ports/net_ip_list_map.yaml # Port assignments for the controller role @@ -64,3 +71,10 @@ resource_registry: # Port assignments for service virtual IPs for the controller role OS::TripleO::Controller::Ports::RedisVipPort: network/ports/noop.yaml + + # Service Endpoint Mappings + OS::TripleO::Endpoint: network/endpoints/endpoint.yaml + OS::TripleO::EndpointMap: network/endpoints/endpoint_map.yaml + + # validation resources + OS::TripleO::AllNodes::Validation: os-apply-config/all-nodes-validation.yaml diff --git a/overcloud-without-mergepy.yaml b/overcloud-without-mergepy.yaml index add10b53..3e09cc5d 100644 --- a/overcloud-without-mergepy.yaml +++ b/overcloud-without-mergepy.yaml @@ -1,8 +1,10 @@ heat_template_version: 2015-04-30 description: > - Nova API,Keystone,Heat Engine and API,Glance,Neutron,Dedicated MySQL - server,Dedicated RabbitMQ Server,Group of Nova Computes + Deploy an OpenStack environment, consisting of several node types (roles), + Controller, Compute, BlockStorage, SwiftStorage and CephStorage. The Storage + roles enable independent scaling of the storage components, but the minimal + deployment is one Controller and one Compute node. # TODO(shadower): we should probably use the parameter groups to put @@ -38,14 +40,33 @@ parameters: default: '' description: The Ceph monitors key. Can be created with ceph-authtool --gen-print-key. type: string + hidden: true CephAdminKey: default: '' description: The Ceph admin client key. Can be created with ceph-authtool --gen-print-key. type: string + hidden: true + CinderEnableNfsBackend: + default: false + description: Whether to enable or not the NFS backend for Cinder + type: boolean + CephClientKey: + default: '' + description: The Ceph client key. Can be created with ceph-authtool --gen-print-key. Currently only used for external Ceph deployments to create the openstack user keyring. + type: string + hidden: true + CephExternalMonHost: + default: '' + type: string + description: List of externally managed Ceph Mon Host IPs. Only used for external Ceph deployments. CinderEnableIscsiBackend: default: true description: Whether to enable or not the Iscsi backend for Cinder type: boolean + CinderEnableRbdBackend: + default: false + description: Whether to enable or not the Rbd backend for Cinder + type: boolean CloudName: default: '' description: The DNS name of this cloud. E.g. ci-overcloud.tripleo.org @@ -58,14 +79,14 @@ parameters: default: '' description: Set to True to enable debugging on all services. type: string - GlancePort: - default: "9292" - description: Glance port. - type: string - GlanceProtocol: - default: http - description: Protocol to use when connecting to glance, set to https for SSL. + HAProxySyslogAddress: + default: /dev/log + description: Syslog address where HAproxy will send its log type: string + HorizonAllowedHosts: + default: '*' + description: A list of IP/Hostname allowed to connect to horizon + type: comma_delimited_list ImageUpdatePolicy: default: 'REBUILD_PRESERVE_EPHEMERAL' description: What policy to use when reconstructing instances. REBUILD for rebuilds, REBUILD_PRESERVE_EPHEMERAL to preserve /mnt. @@ -91,9 +112,9 @@ parameters: type: string default: "datacentre:br-ex" NeutronControlPlaneID: - default: '' + default: 'ctlplane' type: string - description: Neutron ID for ctlplane network. + description: Neutron ID or name for ctlplane network. NeutronEnableTunnelling: type: string default: "True" @@ -104,7 +125,7 @@ parameters: If set, flat networks to configure in neutron plugins. Defaults to 'datacentre' to permit external network creation. NeutronNetworkType: - default: 'gre' + default: 'vxlan' description: The tenant network type for Neutron, either gre or vxlan. type: string NeutronPassword: @@ -142,12 +163,42 @@ parameters: default: 'unset' description: Shared secret to prevent spoofing type: string + hidden: true NeutronTunnelTypes: - default: 'gre' + default: 'vxlan' description: | The tunnel types for the Neutron tenant network. To specify multiple values, use a comma separated string, like so: 'gre,vxlan' type: string + NeutronTunnelIdRanges: + description: | + Comma-separated list of <tun_min>:<tun_max> tuples enumerating ranges + of GRE tunnel IDs that are available for tenant network allocation + default: ["1:1000", ] + type: comma_delimited_list + NeutronVniRanges: + description: | + Comma-separated list of <vni_min>:<vni_max> tuples enumerating ranges + of VXLAN VNI IDs that are available for tenant network allocation + default: ["1:1000", ] + type: comma_delimited_list + NeutronCorePlugin: + default: 'ml2' + description: | + The core plugin for Neutron. The value should be the entrypoint to be loaded + from neutron.core_plugins namespace. + type: string + NeutronServicePlugins: + default: "router" + description: | + Comma-separated list of service plugin entrypoints to be loaded from the + neutron.service_plugins namespace. + type: comma_delimited_list + NeutronTypeDrivers: + default: "vxlan,vlan,flat,gre" + description: | + Comma-separated list of network type driver entrypoints to be loaded. + type: comma_delimited_list NeutronMechanismDrivers: default: 'openvswitch' description: | @@ -155,32 +206,36 @@ parameters: values, use a comma separated string, like so: 'openvswitch,l2_population' type: string NeutronAllowL3AgentFailover: - default: 'True' + default: 'False' description: Allow automatic l3-agent failover type: string NeutronL3HA: default: 'False' description: Whether to enable l3-agent HA type: string + NeutronDhcpAgentsPerNetwork: + type: number + default: 1 + description: The number of neutron dhcp agents to schedule per network NovaPassword: default: unset description: The password for the nova service account, used by nova-api. type: string hidden: true NtpServer: - type: string default: '' + description: Comma-separated list of ntp servers + type: comma_delimited_list + MongoDbNoJournal: + default: false + description: Should MongoDb journaling be disabled + type: boolean PublicVirtualFixedIPs: default: [] description: > Control the IP allocation for the PublicVirtualInterface port. E.g. [{'ip_address':'1.2.3.4'}] type: json - PublicVirtualNetwork: - default: 'ctlplane' - type: string - description: > - Neutron network to allocate public virtual IP port on. RabbitCookieSalt: type: string default: unset @@ -205,6 +260,11 @@ parameters: default: 5672 description: Set rabbit subscriber port, change this if using SSL type: number + # We need to set this as string because 'unlimited' is a valid setting + RabbitFDLimit: + default: 16384 + description: Configures RabbitMQ FD limit + type: string SnmpdReadonlyUserName: default: ro_snmp_user description: The user name for SNMPd with readonly rights running on all Overcloud nodes @@ -221,14 +281,22 @@ parameters: description: The keystone auth secret. type: string hidden: true - CinderEnableRbdBackend: - default: false - description: Whether to enable or not the Rbd backend for Cinder - type: boolean CinderLVMLoopDeviceSize: default: 5000 description: The size of the loopback file used by the cinder LVM driver. type: number + CinderNfsMountOptions: + default: '' + description: > + Mount options for NFS mounts used by Cinder NFS backend. Effective + when CinderEnableNfsBackend is true. + type: string + CinderNfsServers: + default: '' + description: > + NFS servers used by Cinder NFS backend. Effective when + CinderEnableNfsBackend is true. + type: comma_delimited_list CinderPassword: default: unset description: The password for the cinder service account, used by cinder-api. @@ -241,6 +309,8 @@ parameters: ControllerCount: type: number default: 1 + constraints: + - range: {min: 1} controllerExtraConfig: default: {} description: | @@ -261,6 +331,10 @@ parameters: default: 'br-ex' description: Interface where virtual ip will be assigned. type: string + EnableFencing: + default: false + description: Whether to enable fencing in Pacemaker or not. + type: boolean EnableGalera: default: true description: Whether to use Galera instead of regular MariaDB. @@ -276,39 +350,40 @@ parameters: ExtraConfig: default: {} description: | - Additional configuration to inject into the cluster. The JSON should have + Additional configuration to inject into the cluster. The format required + may be implementation specific, e.g puppet hieradata. Any role specific + ExtraConfig, e.g controllerExtraConfig takes precedence over ExtraConfig. + type: json + FencingConfig: + default: {} + description: | + Pacemaker fencing configuration. The JSON should have the following structure: - {"FILEKEY": - {"config": - [{"section": "SECTIONNAME", - "values": - [{"option": "OPTIONNAME", - "value": "VALUENAME" - } - ] - } - ] - } + { + "devices": [ + { + "agent": "AGENT_NAME", + "host_mac": "HOST_MAC_ADDRESS", + "params": {"PARAM_NAME": "PARAM_VALUE"} + } + ] } For instance: - {"nova": - {"config": - [{"section": "default", - "values": - [{"option": "force_config_drive", - "value": "always" - } - ] - }, - {"section": "cells", - "values": - [{"option": "driver", - "value": "nova.cells.rpc_driver.CellsRPCDriver" - } - ] - } - ] - } + { + "devices": [ + { + "agent": "fence_xvm", + "host_mac": "52:54:00:aa:bb:cc", + "params": { + "multicast_address": "225.0.0.12", + "port": "baremetal_0", + "manage_fw": true, + "manage_key_file": true, + "key_file": "/etc/fence_xvm.key", + "key_file_password": "abcdef" + } + } + ] } type: json GlanceLogFile: @@ -363,6 +438,16 @@ parameters: description: Keystone key for signing tokens. type: string hidden: true + KeystoneNotificationDriver: + description: Comma-separated list of Oslo notification drivers used by Keystone + default: ['messaging'] + type: comma_delimited_list + KeystoneNotificationFormat: + description: The Keystone notification format + default: 'basic' + type: string + constraints: + - allowed_values: [ 'basic', 'cadf' ] MysqlInnodbBufferPoolSize: description: > Specifies the size of the buffer pool in megabytes. Setting to @@ -370,6 +455,10 @@ parameters: lower level default. type: number default: 0 + MysqlMaxConnections: + description: Configures MySQL max_connections config setting + type: number + default: 4096 NeutronDnsmasqOptions: default: 'dhcp-option-force=26,1400' description: Dnsmasq options for neutron-dhcp-agent. The default value here forces MTU to be set to 1400 to account for the tunnel overhead. @@ -496,7 +585,7 @@ parameters: CinderIscsiNetwork: storage GlanceApiNetwork: storage GlanceRegistryNetwork: internal_api - KeystoneAdminApiNetwork: internal_api + KeystoneAdminApiNetwork: ctlplane # allows undercloud to config endpoints KeystonePublicApiNetwork: internal_api NeutronApiNetwork: internal_api HeatApiNetwork: internal_api @@ -512,6 +601,11 @@ parameters: MysqlNetwork: internal_api CephClusterNetwork: storage_mgmt CephPublicNetwork: storage + ControllerHostnameResolveNetwork: internal_api + ComputeHostnameResolveNetwork: internal_api + BlockStorageHostnameResolveNetwork: internal_api + ObjectStorageHostnameResolveNetwork: internal_api + CephStorageHostnameResolveNetwork: storage description: Mapping of service_name -> network name. Typically set via parameter_defaults in the resource registry. type: json @@ -528,6 +622,12 @@ parameters: type: string constraints: - custom_constraint: nova.flavor + BlockStorageExtraConfig: + default: {} + description: | + BlockStorage specific configuration to inject into the cluster. Same + structure as ExtraConfig. + type: json # Object storage specific parameters ObjectStorageCount: @@ -541,6 +641,13 @@ parameters: SwiftStorageImage: default: overcloud-swift-storage type: string + ObjectStorageExtraConfig: + default: {} + description: | + ObjectStorage specific configuration to inject into the cluster. Same + structure as ExtraConfig. + type: json + # Ceph storage specific parameters CephStorageCount: @@ -555,6 +662,12 @@ parameters: type: string constraints: - custom_constraint: nova.flavor + CephStorageExtraConfig: + default: {} + description: | + CephStorage specific configuration to inject into the cluster. Same + structure as ExtraConfig. + type: json # Hostname format for each role # Note %index% is translated into the index of the node, e.g 0/1/2 etc @@ -588,7 +701,46 @@ parameters: description: > Setting to a previously unused value during stack-update will trigger package update on all nodes + DeployIdentifier: + default: '' + type: string + description: > + Setting this to a unique value will re-run any deployment tasks which + perform configuration on a Heat stack-update. + # If you want to remove a specific node from a resource group, you can pass + # the node name or id as a <Group>RemovalPolicies parameter, for example: + # ComputeRemovalPolicies: [{'resource_list': ['0']}] + ControllerRemovalPolicies: + default: [] + type: json + description: > + List of resources to be removed from ControllerResourceGroup when + doing an update which requires removal of specific resources. + ComputeRemovalPolicies: + default: [] + type: json + description: > + List of resources to be removed from ComputeResourceGroup when + doing an update which requires removal of specific resources. + BlockStorageRemovalPolicies: + default: [] + type: json + description: > + List of resources to be removed from BlockStorageResourceGroup when + doing an update which requires removal of specific resources. + ObjectStorageRemovalPolicies: + default: [] + type: json + description: > + List of resources to be removed from ObjectStorageResourceGroup when + doing an update which requires removal of specific resources. + CephStorageRemovalPolicies: + default: [] + type: json + description: > + List of resources to be removed from CephStorageResourceGroup when + doing an update which requires removal of specific resources. resources: @@ -606,11 +758,29 @@ resources: properties: length: 10 + EndpointMap: + type: OS::TripleO::EndpointMap + properties: + CloudName: {get_param: CloudName} + CeilometerApiVirtualIP: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, CeilometerApiNetwork]}]} + CinderApiVirtualIP: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, CinderApiNetwork]}]} + GlanceApiVirtualIP: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, GlanceApiNetwork]}]} + GlanceRegistryVirtualIP: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, GlanceRegistryNetwork]}]} + HeatApiVirtualIP: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, HeatApiNetwork]}]} + KeystoneAdminApiVirtualIP: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, KeystoneAdminApiNetwork]}]} + KeystonePublicApiVirtualIP: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, KeystonePublicApiNetwork]}]} + MysqlVirtualIP: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, MysqlNetwork]}]} + NeutronApiVirtualIP: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, NeutronApiNetwork]}]} + NovaApiVirtualIP: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, NovaApiNetwork]}]} + SwiftProxyVirtualIP: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, SwiftProxyNetwork]}]} + PublicVirtualIP: {get_attr: [VipMap, net_ip_map, external]} + Controller: type: OS::Heat::ResourceGroup depends_on: Networks properties: count: {get_param: ControllerCount} + removal_policies: {get_param: ControllerRemovalPolicies} resource_def: type: OS::TripleO::Controller properties: @@ -620,28 +790,33 @@ resources: CeilometerMeteringSecret: {get_param: CeilometerMeteringSecret} CeilometerPassword: {get_param: CeilometerPassword} CinderLVMLoopDeviceSize: {get_param: CinderLVMLoopDeviceSize} + CinderNfsMountOptions: {get_param: CinderNfsMountOptions} + CinderNfsServers: {get_param: CinderNfsServers} CinderPassword: {get_param: CinderPassword} CinderISCSIHelper: {get_param: CinderISCSIHelper} + CinderEnableNfsBackend: {get_param: CinderEnableNfsBackend} CinderEnableIscsiBackend: {get_param: CinderEnableIscsiBackend} CinderEnableRbdBackend: {get_param: CinderEnableRbdBackend} CloudName: {get_param: CloudName} ControlVirtualInterface: {get_param: ControlVirtualInterface} ControllerExtraConfig: {get_param: controllerExtraConfig} Debug: {get_param: Debug} + EnableFencing: {get_param: EnableFencing} EnableGalera: {get_param: EnableGalera} EnableCephStorage: {get_param: ControllerEnableCephStorage} EnableSwiftStorage: {get_param: ControllerEnableSwiftStorage} ExtraConfig: {get_param: ExtraConfig} + FencingConfig: {get_param: FencingConfig} Flavor: {get_param: OvercloudControlFlavor} - GlancePort: {get_param: GlancePort} - GlanceProtocol: {get_param: GlanceProtocol} GlancePassword: {get_param: GlancePassword} GlanceBackend: {get_param: GlanceBackend} GlanceNotifierStrategy: {get_param: GlanceNotifierStrategy} GlanceLogFile: {get_param: GlanceLogFile} + HAProxySyslogAddress: {get_param: HAProxySyslogAddress} HeatPassword: {get_param: HeatPassword} HeatStackDomainAdminPassword: {get_param: HeatStackDomainAdminPassword} HeatAuthEncryptionKey: {get_resource: HeatAuthEncryptionKey} + HorizonAllowedHosts: {get_param: HorizonAllowedHosts} HorizonSecret: {get_resource: HorizonSecret} Image: {get_param: controllerImage} ImageUpdatePolicy: {get_param: ImageUpdatePolicy} @@ -651,8 +826,11 @@ resources: KeystoneSigningKey: {get_param: KeystoneSigningKey} KeystoneSSLCertificate: {get_param: KeystoneSSLCertificate} KeystoneSSLCertificateKey: {get_param: KeystoneSSLCertificateKey} + KeystoneNotificationDriver: {get_param: KeystoneNotificationDriver} + KeystoneNotificationFormat: {get_param: KeystoneNotificationFormat} MysqlClusterUniquePart: {get_attr: [MysqlClusterUniquePart, value]} MysqlInnodbBufferPoolSize: {get_param: MysqlInnodbBufferPoolSize} + MysqlMaxConnections: {get_param: MysqlMaxConnections} MysqlRootPassword: {get_attr: [MysqlRootPassword, value]} NeutronPublicInterfaceIP: {get_param: NeutronPublicInterfaceIP} NeutronFlatNetworks: {get_param: NeutronFlatNetworks} @@ -668,13 +846,18 @@ resources: NeutronDVR: {get_param: NeutronDVR} NeutronMetadataProxySharedSecret: {get_param: NeutronMetadataProxySharedSecret} NeutronAgentMode: {get_param: NeutronAgentMode} + NeutronCorePlugin: {get_param: NeutronCorePlugin} + NeutronServicePlugins: {get_param: NeutronServicePlugins} + NeutronTypeDrivers: {get_param: NeutronTypeDrivers} NeutronMechanismDrivers: {get_param: NeutronMechanismDrivers} NeutronAllowL3AgentFailover: {get_param: NeutronAllowL3AgentFailover} NeutronL3HA: {get_param: NeutronL3HA} + NeutronDhcpAgentsPerNetwork: {get_param: NeutronDhcpAgentsPerNetwork} NeutronNetworkType: {get_param: NeutronNetworkType} NeutronTunnelTypes: {get_param: NeutronTunnelTypes} NovaPassword: {get_param: NovaPassword} NtpServer: {get_param: NtpServer} + MongoDbNoJournal: {get_param: MongoDbNoJournal} PcsdPassword: {get_resource: PcsdPassword} PublicVirtualInterface: {get_param: PublicVirtualInterface} RabbitPassword: {get_param: RabbitPassword} @@ -682,6 +865,7 @@ resources: RabbitCookie: {get_attr: [RabbitCookie, value]} RabbitClientUseSSL: {get_param: RabbitClientUseSSL} RabbitClientPort: {get_param: RabbitClientPort} + RabbitFDLimit: {get_param: RabbitFDLimit} SnmpdReadonlyUserName: {get_param: SnmpdReadonlyUserName} SnmpdReadonlyUserPassword: {get_param: SnmpdReadonlyUserPassword} RedisVirtualIP: {get_attr: [RedisVirtualIP, ip_address]} @@ -695,11 +879,18 @@ resources: SwiftPassword: {get_param: SwiftPassword} SwiftReplicas: { get_param: SwiftReplicas} VirtualIP: {get_attr: [ControlVirtualIP, fixed_ips, 0, ip_address]} # deprecated. Use per service VIP settings instead now. - PublicVirtualIP: {get_attr: [PublicVirtualIP, fixed_ips, 0, ip_address]} # deprecated. Use per service VIP settings instead now. + PublicVirtualIP: {get_attr: [PublicVirtualIP, ip_address]} ServiceNetMap: {get_param: ServiceNetMap} + EndpointMap: {get_attr: [EndpointMap, endpoint_map]} + CeilometerApiVirtualIP: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, CeilometerApiNetwork]}]} + CinderApiVirtualIP: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, CinderApiNetwork]}]} HeatApiVirtualIP: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, HeatApiNetwork]}]} GlanceApiVirtualIP: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, GlanceApiNetwork]}]} + GlanceRegistryVirtualIP: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, GlanceRegistryNetwork]}]} + NovaApiVirtualIP: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, NovaApiNetwork]}]} + SwiftProxyVirtualIP: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, SwiftProxyNetwork]}]} MysqlVirtualIP: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, MysqlNetwork]}]} + KeystoneAdminApiVirtualIP: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, KeystoneAdminApiNetwork]}]} KeystonePublicApiVirtualIP: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, KeystonePublicApiNetwork]}]} NeutronApiVirtualIP: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, NeutronApiNetwork]}]} UpdateIdentifier: {get_param: UpdateIdentifier} @@ -714,6 +905,7 @@ resources: depends_on: Networks properties: count: {get_param: ComputeCount} + removal_policies: {get_param: ComputeRemovalPolicies} resource_def: type: OS::TripleO::Compute properties: @@ -721,16 +913,17 @@ resources: CeilometerComputeAgent: {get_param: CeilometerComputeAgent} CeilometerMeteringSecret: {get_param: CeilometerMeteringSecret} CeilometerPassword: {get_param: CeilometerPassword} + CinderEnableNfsBackend: {get_param: CinderEnableNfsBackend} + CinderEnableRbdBackend: {get_param: CinderEnableRbdBackend} Debug: {get_param: Debug} ExtraConfig: {get_param: ExtraConfig} Flavor: {get_param: OvercloudComputeFlavor} GlanceHost: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, GlanceApiNetwork]}]} - GlancePort: {get_param: GlancePort} - GlanceProtocol: {get_param: GlanceProtocol} Image: {get_param: NovaImage} ImageUpdatePolicy: {get_param: ImageUpdatePolicy} KeyName: {get_param: KeyName} - KeystoneHost: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, KeystonePublicApiNetwork]}]} + KeystoneAdminApiVirtualIP: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, KeystoneAdminApiNetwork]}]} + KeystonePublicApiVirtualIP: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, KeystonePublicApiNetwork]}]} NeutronBridgeMappings: {get_param: NeutronBridgeMappings} NeutronEnableTunnelling: {get_param: NeutronEnableTunnelling} NeutronFlatNetworks: {get_param: NeutronFlatNetworks} @@ -745,6 +938,9 @@ resources: NeutronMetadataProxySharedSecret: {get_param: NeutronMetadataProxySharedSecret} NeutronAgentMode: {get_param: NeutronComputeAgentMode} NeutronPublicInterfaceRawDevice: {get_param: NeutronPublicInterfaceRawDevice} + NeutronCorePlugin: {get_param: NeutronCorePlugin} + NeutronServicePlugins: {get_param: NeutronServicePlugins} + NeutronTypeDrivers: {get_param: NeutronTypeDrivers} NeutronMechanismDrivers: {get_param: NeutronMechanismDrivers} # L3 HA and Failover is not relevant for Computes, should be removed NeutronAllowL3AgentFailover: {get_param: NeutronAllowL3AgentFailover} @@ -754,7 +950,7 @@ resources: NovaComputeExtraConfig: {get_param: NovaComputeExtraConfig} NovaComputeLibvirtType: {get_param: NovaComputeLibvirtType} NovaEnableRbdBackend: {get_param: NovaEnableRbdBackend} - NovaPublicIP: {get_attr: [PublicVirtualIP, fixed_ips, 0, ip_address]} + NovaPublicIP: {get_attr: [PublicVirtualIP, ip_address]} NovaPassword: {get_param: NovaPassword} NtpServer: {get_param: NtpServer} RabbitHost: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, RabbitMqNetwork]}]} @@ -765,6 +961,7 @@ resources: SnmpdReadonlyUserName: {get_param: SnmpdReadonlyUserName} SnmpdReadonlyUserPassword: {get_param: SnmpdReadonlyUserPassword} ServiceNetMap: {get_param: ServiceNetMap} + EndpointMap: {get_attr: [EndpointMap, endpoint_map]} UpdateIdentifier: {get_param: UpdateIdentifier} Hostname: str_replace: @@ -777,6 +974,7 @@ resources: depends_on: Networks properties: count: {get_param: BlockStorageCount} + removal_policies: {get_param: BlockStorageRemovalPolicies} resource_def: type: OS::TripleO::BlockStorage properties: @@ -790,8 +988,6 @@ resources: KeyName: {get_param: KeyName} Flavor: {get_param: OvercloudBlockStorageFlavor} VirtualIP: {get_attr: [ControlVirtualIP, fixed_ips, 0, ip_address]} - GlancePort: {get_param: GlancePort} - GlanceProtocol: {get_param: GlanceProtocol} GlanceApiVirtualIP: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, GlanceApiNetwork]}]} RabbitPassword: {get_param: RabbitPassword} RabbitUserName: {get_param: RabbitUserName} @@ -805,13 +1001,17 @@ resources: params: '%stackname%': {get_param: 'OS::stack_name'} ServiceNetMap: {get_param: ServiceNetMap} + EndpointMap: {get_attr: [EndpointMap, endpoint_map]} MysqlVirtualIP: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, MysqlNetwork]}]} + ExtraConfig: {get_param: ExtraConfig} + BlockStorageExtraConfig: {get_param: BlockStorageExtraConfig} ObjectStorage: type: OS::Heat::ResourceGroup depends_on: Networks properties: count: {get_param: ObjectStorageCount} + removal_policies: {get_param: ObjectStorageRemovalPolicies} resource_def: type: OS::TripleO::ObjectStorage properties: @@ -831,12 +1031,15 @@ resources: template: {get_param: ObjectStorageHostnameFormat} params: '%stackname%': {get_param: 'OS::stack_name'} + ExtraConfig: {get_param: ExtraConfig} + ObjectStorageExtraConfig: {get_param: ObjectStorageExtraConfig} CephStorage: type: OS::Heat::ResourceGroup depends_on: Networks properties: count: {get_param: CephStorageCount} + removal_policies: {get_param: CephStorageRemovalPolicies} resource_def: type: OS::TripleO::CephStorage properties: @@ -851,10 +1054,13 @@ resources: template: {get_param: CephStorageHostnameFormat} params: '%stackname%': {get_param: 'OS::stack_name'} + ExtraConfig: {get_param: ExtraConfig} + CephStorageExtraConfig: {get_param: CephStorageExtraConfig} ControllerIpListMap: type: OS::TripleO::Network::Ports::NetIpListMap properties: + ControlPlaneIpList: {get_attr: [Controller, ip_address]} ExternalIpList: {get_attr: [Controller, external_ip_address]} InternalApiIpList: {get_attr: [Controller, internal_api_ip_address]} StorageIpList: {get_attr: [Controller, storage_ip_address]} @@ -914,7 +1120,7 @@ resources: depends_on: Networks properties: name: control_virtual_ip - network_id: {get_param: NeutronControlPlaneID} + network: {get_param: NeutronControlPlaneID} fixed_ips: {get_param: ControlFixedIPs} replacement_policy: AUTO @@ -927,15 +1133,15 @@ resources: PortName: redis_virtual_ip NetworkName: {get_param: [ServiceNetMap, RedisNetwork]} - # same as external + # The public VIP is on the External net, falls back to ctlplane PublicVirtualIP: - type: OS::Neutron::Port depends_on: Networks + type: OS::TripleO::Controller::Ports::ExternalPort properties: - name: public_virtual_ip - network: {get_param: PublicVirtualNetwork} - fixed_ips: {get_param: PublicVirtualFixedIPs} - replacement_policy: AUTO + ControlPlaneIP: {get_attr: [ControlVirtualIP, fixed_ips, 0, ip_address]} + ControlPlaneNetwork: {get_param: NeutronControlPlaneID} + PortName: public_virtual_ip + FixedIPs: {get_param: PublicVirtualFixedIPs} InternalApiVirtualIP: depends_on: Networks @@ -961,7 +1167,8 @@ resources: VipMap: type: OS::TripleO::Network::Ports::NetIpMap properties: - ExternalIp: {get_attr: [PublicVirtualIP, fixed_ips, 0, ip_address]} + ControlPlaneIp: {get_attr: [ControlVirtualIP, fixed_ips, 0, ip_address]} + ExternalIp: {get_attr: [PublicVirtualIP, ip_address]} InternalApiIp: {get_attr: [InternalApiVirtualIP, ip_address]} StorageIp: {get_attr: [StorageVirtualIP, ip_address]} StorageMgmtIp: {get_attr: [StorageMgmtVirtualIP, ip_address]} @@ -994,7 +1201,7 @@ resources: rabbit_vip: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, RabbitMqNetwork]}]} # direct configuration of Virtual IPs for each network control_virtual_ip: {get_attr: [ControlVirtualIP, fixed_ips, 0, ip_address]} - public_virtual_ip: {get_attr: [PublicVirtualIP, fixed_ips, 0, ip_address]} + public_virtual_ip: {get_attr: [PublicVirtualIP, ip_address]} internal_api_virtual_ip: {get_attr: [InternalApiVirtualIP, ip_address]} storage_virtual_ip: {get_attr: [StorageVirtualIP, ip_address]} storage_mgmt_virtual_ip: {get_attr: [StorageMgmtVirtualIP, ip_address]} @@ -1051,9 +1258,12 @@ resources: CephClusterConfig: type: OS::TripleO::CephClusterConfig::SoftwareConfig properties: + ceph_storage_count: {get_param: CephStorageCount} ceph_fsid: {get_param: CephClusterFSID} ceph_mon_key: {get_param: CephMonKey} ceph_admin_key: {get_param: CephAdminKey} + ceph_client_key: {get_param: CephClientKey} + ceph_external_mon_ips: {get_param: CephExternalMonHost} ceph_mon_names: {get_attr: [Controller, hostname]} ceph_mon_ips: {get_attr: [ControllerIpListMap, net_ip_map, {get_param: [ServiceNetMap, CephPublicNetwork]}]} @@ -1109,49 +1319,147 @@ resources: config: {get_attr: [allNodesConfig, config_id]} servers: {get_attr: [CephStorage, attributes, nova_server_resource]} + # All Nodes Validations + AllNodesValidationConfig: + type: OS::TripleO::AllNodes::Validation + properties: + PingTestIps: + list_join: + - ' ' + - - {get_attr: [Controller, resource.0.external_ip_address]} + - {get_attr: [Controller, resource.0.internal_api_ip_address]} + - {get_attr: [Controller, resource.0.storage_ip_address]} + - {get_attr: [Controller, resource.0.storage_mgmt_ip_address]} + - {get_attr: [Controller, resource.0.tenant_ip_address]} + + ControllerAllNodesValidationDeployment: + type: OS::Heat::StructuredDeployments + depends_on: ControllerAllNodesDeployment + properties: + config: {get_resource: AllNodesValidationConfig} + servers: {get_attr: [Controller, attributes, nova_server_resource]} + + ComputeAllNodesValidationDeployment: + type: OS::Heat::StructuredDeployments + depends_on: ComputeAllNodesDeployment + properties: + config: {get_resource: AllNodesValidationConfig} + servers: {get_attr: [Compute, attributes, nova_server_resource]} + + BlockStorageAllNodesValidationDeployment: + type: OS::Heat::StructuredDeployments + depends_on: BlockStorageAllNodesDeployment + properties: + config: {get_resource: AllNodesValidationConfig} + servers: {get_attr: [BlockStorage, attributes, nova_server_resource]} + + ObjectStorageAllNodesValidationDeployment: + type: OS::Heat::StructuredDeployments + depends_on: ObjectStorageAllNodesDeployment + properties: + config: {get_resource: AllNodesValidationConfig} + servers: {get_attr: [ObjectStorage, attributes, nova_server_resource]} + + CephStorageAllNodesValidationDeployment: + type: OS::Heat::StructuredDeployments + depends_on: CephStorageAllNodesDeployment + properties: + config: {get_resource: AllNodesValidationConfig} + servers: {get_attr: [CephStorage, attributes, nova_server_resource]} + + # Optional ExtraConfig for all nodes - all roles are passed in here, but + # the nested template may configure each role differently (or not at all) + AllNodesExtraConfig: + type: OS::TripleO::AllNodesExtraConfig + properties: + controller_servers: {get_attr: [Controller, attributes, nova_server_resource]} + compute_servers: {get_attr: [Compute, attributes, nova_server_resource]} + blockstorage_servers: {get_attr: [BlockStorage, attributes, nova_server_resource]} + objectstorage_servers: {get_attr: [ObjectStorage, attributes, nova_server_resource]} + cephstorage_servers: {get_attr: [CephStorage, attributes, nova_server_resource]} + # Nested stack deployment runs after all other controller deployments ControllerNodesPostDeployment: type: OS::TripleO::ControllerPostDeployment depends_on: [ControllerBootstrapNodeDeployment, ControllerAllNodesDeployment, ControllerSwiftDeployment, ControllerCephDeployment] properties: servers: {get_attr: [Controller, attributes, nova_server_resource]} - NodeConfigIdentifiers: {get_attr: [Controller, attributes, config_identifier]} + NodeConfigIdentifiers: + allnodes_extra: {get_attr: [AllNodesExtraConfig, config_identifier]} + controller_config: {get_attr: [Controller, attributes, config_identifier]} + deployment_identifier: {get_param: DeployIdentifier} ComputeNodesPostDeployment: type: OS::TripleO::ComputePostDeployment depends_on: [ComputeAllNodesDeployment, ComputeCephDeployment] properties: servers: {get_attr: [Compute, attributes, nova_server_resource]} - NodeConfigIdentifiers: {get_attr: [Compute, attributes, config_identifier]} + NodeConfigIdentifiers: + allnodes_extra: {get_attr: [AllNodesExtraConfig, config_identifier]} + compute_config: {get_attr: [Compute, attributes, config_identifier]} + deployment_identifier: {get_param: DeployIdentifier} ObjectStorageNodesPostDeployment: type: OS::TripleO::ObjectStoragePostDeployment depends_on: [ObjectStorageSwiftDeployment, ObjectStorageAllNodesDeployment] properties: servers: {get_attr: [ObjectStorage, attributes, nova_server_resource]} - NodeConfigIdentifiers: {get_attr: [ObjectStorage, attributes, config_identifier]} - + NodeConfigIdentifiers: + allnodes_extra: {get_attr: [AllNodesExtraConfig, config_identifier]} + objectstorage_config: {get_attr: [ObjectStorage, attributes, config_identifier]} + deployment_identifier: {get_param: DeployIdentifier} BlockStorageNodesPostDeployment: type: OS::TripleO::BlockStoragePostDeployment depends_on: [ControllerNodesPostDeployment, BlockStorageAllNodesDeployment] properties: servers: {get_attr: [BlockStorage, attributes, nova_server_resource]} - NodeConfigIdentifiers: {get_attr: [BlockStorage, attributes, config_identifier]} + NodeConfigIdentifiers: + allnodes_extra: {get_attr: [AllNodesExtraConfig, config_identifier]} + blockstorage_config: {get_attr: [BlockStorage, attributes, config_identifier]} + deployment_identifier: {get_param: DeployIdentifier} CephStorageNodesPostDeployment: type: OS::TripleO::CephStoragePostDeployment depends_on: [ControllerNodesPostDeployment, CephStorageCephDeployment, CephStorageAllNodesDeployment] properties: servers: {get_attr: [CephStorage, attributes, nova_server_resource]} - NodeConfigIdentifiers: {get_attr: [CephStorage, attributes, config_identifier]} + NodeConfigIdentifiers: + allnodes_extra: {get_attr: [AllNodesExtraConfig, config_identifier]} + cephstorage_config: {get_attr: [CephStorage, attributes, config_identifier]} + deployment_identifier: {get_param: DeployIdentifier} outputs: KeystoneURL: description: URL for the Overcloud Keystone service - value: - list_join: - - '' - - - http:// - - {get_attr: [PublicVirtualIP, fixed_ips, 0, ip_address]} - - :5000/v2.0/ + value: {get_attr: [EndpointMap, endpoint_map, KeystonePublic, uri]} + KeystoneAdminVip: + description: Keystone Admin VIP endpoint + value: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, KeystoneAdminApiNetwork]}]} + PublicVip: + description: Controller VIP for public API endpoints + value: {get_attr: [PublicVirtualIP, ip_address]} + CeilometerInternalVip: + description: VIP for Ceilometer API internal endpoint + value: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, CeilometerApiNetwork]}]} + CinderInternalVip: + description: VIP for Cinder API internal endpoint + value: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, CinderApiNetwork]}]} + GlanceInternalVip: + description: VIP for Glance API internal endpoint + value: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, GlanceApiNetwork]}]} + HeatInternalVip: + description: VIP for Heat API internal endpoint + value: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, HeatApiNetwork]}]} + KeystoneInternalVip: + description: VIP for Keystone API internal endpoint + value: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, KeystonePublicApiNetwork]}]} + NeutronInternalVip: + description: VIP for Neutron API internal endpoint + value: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, NeutronApiNetwork]}]} + NovaInternalVip: + description: VIP for Nova API internal endpoint + value: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, NovaApiNetwork]}]} + SwiftInternalVip: + description: VIP for Swift Proxy internal endpoint + value: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, SwiftProxyNetwork]}]} diff --git a/puppet/all-nodes-config.yaml b/puppet/all-nodes-config.yaml index 060f4c81..2bc519bb 100644 --- a/puppet/all-nodes-config.yaml +++ b/puppet/all-nodes-config.yaml @@ -238,6 +238,7 @@ resources: heat::rabbit_hosts: *rabbit_nodes_array neutron::rabbit_hosts: *rabbit_nodes_array nova::rabbit_hosts: *rabbit_nodes_array + keystone::rabbit_hosts: *rabbit_nodes_array outputs: config_id: diff --git a/puppet/ceph-cluster-config.yaml b/puppet/ceph-cluster-config.yaml index 33b18574..96198c3f 100644 --- a/puppet/ceph-cluster-config.yaml +++ b/puppet/ceph-cluster-config.yaml @@ -2,6 +2,18 @@ heat_template_version: 2015-04-30 description: 'Ceph Cluster config data for Puppet' parameters: + ceph_storage_count: + default: 0 + type: number + description: Number of Ceph storage nodes. Used to enable/disable managed Ceph installation. + ceph_external_mon_ips: + default: '' + type: string + description: List of external Ceph Mon host IPs. + ceph_client_key: + default: '' + type: string + description: Ceph key used to create the client user keyring. ceph_fsid: default: '' type: string @@ -15,6 +27,18 @@ parameters: type: comma_delimited_list ceph_mon_ips: type: comma_delimited_list + NovaRbdPoolName: + default: vms + type: string + CinderRbdPoolName: + default: volumes + type: string + GlanceRbdPoolName: + default: images + type: string + CephClientUserName: + default: openstack + type: string resources: CephClusterConfigImpl: @@ -26,6 +50,7 @@ resources: datafiles: ceph_cluster: mapped_data: + ceph_storage_count: {get_param: ceph_storage_count} ceph_mon_initial_members: list_join: - ',' @@ -52,15 +77,34 @@ resources: keyring_path: '/var/lib/ceph/bootstrap-osd/ceph.keyring', cap_mon: 'allow profile bootstrap-osd' }, - client.openstack: { + client.CLIENT_USER: { secret: 'ADMIN_KEY', mode: '0644', cap_mon: 'allow r', - cap_osd: 'allow class-read object_prefix rbd_children, allow rwx pool=volumes, allow rwx pool=vms, allow rwx pool=images' + cap_osd: 'allow class-read object_prefix rbd_children, allow rwx pool=CINDER_POOL, allow rwx pool=NOVA_POOL, allow rwx pool=GLANCE_POOL' } }" params: + CLIENT_USER: {get_param: CephClientUserName} ADMIN_KEY: {get_param: ceph_admin_key} + NOVA_POOL: {get_param: NovaRbdPoolName} + CINDER_POOL: {get_param: CinderRbdPoolName} + GLANCE_POOL: {get_param: GlanceRbdPoolName} + nova::compute::rbd::libvirt_images_rbd_pool: {get_param: NovaRbdPoolName} + cinder_rbd_pool_name: {get_param: CinderRbdPoolName} + glance::backend::rbd::rbd_store_pool: {get_param: GlanceRbdPoolName} + nova::compute::rbd::libvirt_rbd_user: {get_param: CephClientUserName} + glance::backend::rbd::rbd_store_user: {get_param: CephClientUserName} + nova::compute::rbd::rbd_keyring: + list_join: + - '.' + - - 'client' + - {get_param: CephClientUserName} + ceph_client_user_name: {get_param: CephClientUserName} + ceph_pools: + - {get_param: CinderRbdPoolName} + - {get_param: NovaRbdPoolName} + - {get_param: GlanceRbdPoolName} outputs: config_id: diff --git a/puppet/ceph-storage-post-puppet.yaml b/puppet/ceph-storage-post.yaml index 1b5b944d..0f7dd36f 100644 --- a/puppet/ceph-storage-post-puppet.yaml +++ b/puppet/ceph-storage-post.yaml @@ -4,6 +4,10 @@ description: > OpenStack ceph storage node post deployment for Puppet parameters: + ConfigDebug: + default: false + description: Whether to run config management (e.g. Puppet) in debug mode. + type: boolean servers: type: json NodeConfigIdentifiers: @@ -16,6 +20,8 @@ resources: type: OS::Heat::SoftwareConfig properties: group: puppet + options: + enable_debug: {get_param: ConfigDebug} outputs: - name: result config: diff --git a/puppet/ceph-storage-puppet.yaml b/puppet/ceph-storage.yaml index 245d8ebb..0d968504 100644 --- a/puppet/ceph-storage-puppet.yaml +++ b/puppet/ceph-storage.yaml @@ -22,8 +22,9 @@ parameters: constraints: - custom_constraint: nova.keypair NtpServer: - type: string default: '' + description: Comma-separated list of ntp servers + type: comma_delimited_list EnablePackageInstall: default: 'false' description: Set to true to enable package installation via Puppet @@ -42,6 +43,18 @@ parameters: Hostname: type: string default: '' # Defaults to Heat created hostname + ExtraConfig: + default: {} + description: | + Additional hiera configuration to inject into the cluster. Note + that CephStorageExtraConfig takes precedence over ExtraConfig. + type: json + CephStorageExtraConfig: + default: {} + description: | + Role specific additional hiera configuration to inject into the cluster. + type: json + resources: CephStorage: @@ -54,9 +67,26 @@ resources: networks: - network: ctlplane user_data_format: SOFTWARE_CONFIG - user_data: {get_resource: NodeUserData} + user_data: {get_resource: UserData} name: {get_param: Hostname} + # Combine the NodeAdminUserData and NodeUserData mime archives + UserData: + type: OS::Heat::MultipartMime + properties: + parts: + - config: {get_resource: NodeAdminUserData} + type: multipart + - config: {get_resource: NodeUserData} + type: multipart + + # Creates the "heat-admin" user if configured via the environment + # Should return a OS::Heat::MultipartMime reference via OS::stack_id + NodeAdminUserData: + type: OS::TripleO::NodeAdminUserData + + # For optional operator additional userdata + # Should return a OS::Heat::MultipartMime reference via OS::stack_id NodeUserData: type: OS::TripleO::NodeUserData @@ -73,14 +103,23 @@ resources: NetworkConfig: type: OS::TripleO::CephStorage::Net::SoftwareConfig properties: + ControlPlaneIp: {get_attr: [CephStorage, networks, ctlplane, 0]} StorageIpSubnet: {get_attr: [StoragePort, ip_subnet]} StorageMgmtIpSubnet: {get_attr: [StorageMgmtPort, ip_subnet]} - NetIpSubnetMap: + NetIpMap: type: OS::TripleO::Network::Ports::NetIpMap properties: - StorageIp: {get_attr: [StoragePort, ip_subnet]} - StorageMgmtIp: {get_attr: [StorageMgmtPort, ip_subnet]} + ControlPlaneIp: {get_attr: [CephStorage, networks, ctlplane, 0]} + StorageIp: {get_attr: [StoragePort, ip_address]} + StorageMgmtIp: {get_attr: [StorageMgmtPort, ip_address]} + + NetIpSubnetMap: + type: OS::TripleO::Network::Ports::NetIpSubnetMap + properties: + ControlPlaneIp: {get_attr: [CephStorage, networks, ctlplane, 0]} + StorageIpSubnet: {get_attr: [StoragePort, ip_subnet]} + StorageMgmtIpSubnet: {get_attr: [StorageMgmtPort, ip_subnet]} NetworkDeployment: type: OS::TripleO::SoftwareDeployment @@ -95,14 +134,11 @@ resources: config: {get_resource: CephStorageConfig} server: {get_resource: CephStorage} input_values: - ntp_servers: - str_replace: - template: '["server"]' - params: - server: {get_param: NtpServer} + ntp_servers: {get_param: NtpServer} enable_package_install: {get_param: EnablePackageInstall} - ceph_cluster_network: {get_attr: [NetIpSubnetMap, net_ip_map, {get_param: [ServiceNetMap, CephClusterNetwork]}]} - ceph_public_network: {get_attr: [NetIpSubnetMap, net_ip_map, {get_param: [ServiceNetMap, CephPublicNetwork]}]} + enable_package_upgrade: {get_attr: [UpdateDeployment, update_managed_packages]} + ceph_cluster_network: {get_attr: [NetIpSubnetMap, net_ip_subnet_map, {get_param: [ServiceNetMap, CephClusterNetwork]}]} + ceph_public_network: {get_attr: [NetIpSubnetMap, net_ip_subnet_map, {get_param: [ServiceNetMap, CephPublicNetwork]}]} CephStorageConfig: type: OS::Heat::StructuredConfig @@ -111,7 +147,10 @@ resources: config: hiera: hierarchy: + - '"%{::uuid}"' - heat_config_%{::deploy_config_name} + - ceph_extraconfig + - extraconfig - ceph_cluster # provided by CephClusterConfig - ceph - '"%{::osfamily}"' @@ -119,14 +158,34 @@ resources: datafiles: common: raw_data: {get_file: hieradata/common.yaml} + ceph_extraconfig: + mapped_data: {get_param: CephStorageExtraConfig} + extraconfig: + mapped_data: {get_param: ExtraConfig} ceph: raw_data: {get_file: hieradata/ceph.yaml} mapped_data: ntp::servers: {get_input: ntp_servers} - enable_package_install: {get_input: enable_package_install} + tripleo::packages::enable_install: {get_input: enable_package_install} + tripleo::packages::enable_upgrade: {get_input: enable_package_upgrade} ceph::profile::params::cluster_network: {get_input: ceph_cluster_network} ceph::profile::params::public_network: {get_input: ceph_public_network} + # Hook for site-specific additional pre-deployment config, e.g extra hieradata + CephStorageExtraConfigPre: + depends_on: CephStorageDeployment + type: OS::TripleO::CephStorageExtraConfigPre + properties: + server: {get_resource: CephStorage} + + # Hook for site-specific additional pre-deployment config, + # applying to all nodes, e.g node registration/unregistration + NodeExtraConfig: + depends_on: CephStorageExtraConfigPre + type: OS::TripleO::NodeExtraConfig + properties: + server: {get_resource: CephStorage} + UpdateConfig: type: OS::TripleO::Tasks::PackageUpdate @@ -145,7 +204,7 @@ outputs: str_replace: template: "IP HOST.localdomain HOST" params: - IP: {get_attr: [CephStorage, networks, ctlplane, 0]} + IP: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, CephStorageHostnameResolveNetwork]}]} HOST: {get_attr: [CephStorage, name]} nova_server_resource: description: Heat resource handle for the ceph storage server @@ -159,5 +218,9 @@ outputs: value: {get_attr: [StorageMgmtPort, ip_address]} config_identifier: description: identifier which changes if the node configuration may need re-applying - value: {get_attr: [CephStorageDeployment, deploy_stdout]} - + value: + list_join: + - ',' + - - {get_attr: [CephStorageDeployment, deploy_stdout]} + - {get_attr: [CephStorageExtraConfigPre, deploy_stdout]} + - {get_param: UpdateIdentifier} diff --git a/puppet/cinder-storage-post.yaml b/puppet/cinder-storage-post.yaml index 24d2b8a3..c97cfcf9 100644 --- a/puppet/cinder-storage-post.yaml +++ b/puppet/cinder-storage-post.yaml @@ -2,6 +2,10 @@ heat_template_version: 2015-04-30 description: 'OpenStack cinder storage post deployment for Puppet' parameters: + ConfigDebug: + default: false + description: Whether to run config management (e.g. Puppet) in debug mode. + type: boolean servers: type: json NodeConfigIdentifiers: @@ -14,6 +18,8 @@ resources: type: OS::Heat::SoftwareConfig properties: group: puppet + options: + enable_debug: {get_param: ConfigDebug} outputs: - name: result config: diff --git a/puppet/cinder-storage-puppet.yaml b/puppet/cinder-storage.yaml index cc8d17c4..b536418d 100644 --- a/puppet/cinder-storage-puppet.yaml +++ b/puppet/cinder-storage.yaml @@ -31,54 +31,19 @@ parameters: ExtraConfig: default: {} description: | - Additional configuration to inject into the cluster. The JSON should have - the following structure: - {"FILEKEY": - {"config": - [{"section": "SECTIONNAME", - "values": - [{"option": "OPTIONNAME", - "value": "VALUENAME" - } - ] - } - ] - } - } - For instance: - {"nova": - {"config": - [{"section": "default", - "values": - [{"option": "force_config_drive", - "value": "always" - } - ] - }, - {"section": "cells", - "values": - [{"option": "driver", - "value": "nova.cells.rpc_driver.CellsRPCDriver" - } - ] - } - ] - } - } + Additional hiera configuration to inject into the cluster. Note + that BlockStorageExtraConfig takes precedence over ExtraConfig. + type: json + BlockStorageExtraConfig: + default: {} + description: | + Role specific additional hiera configuration to inject into the cluster. type: json Flavor: description: Flavor for block storage nodes to request when deploying. type: string constraints: - custom_constraint: nova.flavor - GlancePort: - default: "9292" - description: Glance port. - type: string - GlanceProtocol: - default: http - description: Protocol to use when connecting to glance, set to https for SSL. - type: string KeyName: default: default description: Name of an existing EC2 KeyPair to enable SSH access to the instances @@ -86,6 +51,7 @@ parameters: RabbitPassword: default: 'guest' type: string + hidden: true RabbitUserName: default: 'guest' type: string @@ -109,8 +75,9 @@ parameters: type: string hidden: true NtpServer: - type: string default: '' + description: Comma-separated list of ntp servers + type: comma_delimited_list EnablePackageInstall: default: 'false' description: Set to true to enable package installation via Puppet @@ -129,6 +96,11 @@ parameters: description: Mapping of service_name -> network name. Typically set via parameter_defaults in the resource registry. type: json + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json GlanceApiVirtualIP: type: string default: '' @@ -147,9 +119,26 @@ resources: networks: - network: ctlplane user_data_format: SOFTWARE_CONFIG - user_data: {get_resource: NodeUserData} + user_data: {get_resource: UserData} name: {get_param: Hostname} + # Combine the NodeAdminUserData and NodeUserData mime archives + UserData: + type: OS::Heat::MultipartMime + properties: + parts: + - config: {get_resource: NodeAdminUserData} + type: multipart + - config: {get_resource: NodeUserData} + type: multipart + + # Creates the "heat-admin" user if configured via the environment + # Should return a OS::Heat::MultipartMime reference via OS::stack_id + NodeAdminUserData: + type: OS::TripleO::NodeAdminUserData + + # For optional operator additional userdata + # Should return a OS::Heat::MultipartMime reference via OS::stack_id NodeUserData: type: OS::TripleO::NodeUserData @@ -171,6 +160,7 @@ resources: NetworkConfig: type: OS::TripleO::BlockStorage::Net::SoftwareConfig properties: + ControlPlaneIp: {get_attr: [BlockStorage, networks, ctlplane, 0]} InternalApiIpSubnet: {get_attr: [InternalApiPort, ip_subnet]} StorageIpSubnet: {get_attr: [StoragePort, ip_subnet]} StorageMgmtIpSubnet: {get_attr: [StorageMgmtPort, ip_subnet]} @@ -178,6 +168,7 @@ resources: NetIpMap: type: OS::TripleO::Network::Ports::NetIpMap properties: + ControlPlaneIp: {get_attr: [BlockStorage, networks, ctlplane, 0]} InternalApiIp: {get_attr: [InternalApiPort, ip_address]} StorageIp: {get_attr: [StoragePort, ip_address]} StorageMgmtIp: {get_attr: [StorageMgmtPort, ip_address]} @@ -207,24 +198,14 @@ resources: cinder_enable_iscsi_backend: {get_param: CinderEnableIscsiBackend} cinder_iscsi_helper: {get_param: CinderISCSIHelper} cinder_iscsi_ip_address: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, CinderIscsiNetwork]}]} - glance_api_servers: - list_join: - - '' - - - {get_param: GlanceProtocol} - - '://' - - {get_param: GlanceApiVirtualIP} - - ':' - - {get_param: GlancePort} + glance_api_servers: {get_param: [EndpointMap, GlanceInternal, uri]} rabbit_username: {get_param: RabbitUserName} rabbit_password: {get_param: RabbitPassword} rabbit_client_use_ssl: {get_param: RabbitClientUseSSL} rabbit_client_port: {get_param: RabbitClientPort} - ntp_servers: - str_replace: - template: '["server"]' - params: - server: {get_param: NtpServer} + ntp_servers: {get_param: NtpServer} enable_package_install: {get_param: EnablePackageInstall} + enable_package_upgrade: {get_attr: [UpdateDeployment, update_managed_packages]} # Map heat metadata into hiera datafiles BlockStorageConfig: @@ -234,7 +215,10 @@ resources: config: hiera: hierarchy: + - '"%{::uuid}"' - heat_config_%{::deploy_config_name} + - volume_extraconfig + - extraconfig - volume - all_nodes # provided by allNodesConfig - '"%{::osfamily}"' @@ -242,6 +226,10 @@ resources: datafiles: common: raw_data: {get_file: hieradata/common.yaml} + volume_extraconfig: + mapped_data: {get_param: BlockStorageExtraConfig} + extraconfig: + mapped_data: {get_param: ExtraConfig} volume: raw_data: {get_file: hieradata/volume.yaml} mapped_data: @@ -258,10 +246,19 @@ resources: cinder_iscsi_ip_address: {get_input: cinder_iscsi_ip_address} cinder::glance::glance_api_servers: {get_input: glance_api_servers} ntp::servers: {get_input: ntp_servers} - enable_package_install: {get_input: enable_package_install} + tripleo::packages::enable_install: {get_input: enable_package_install} + tripleo::packages::enable_upgrade: {get_input: enable_package_upgrade} snmpd_readonly_user_name: {get_input: snmpd_readonly_user_name} snmpd_readonly_user_password: {get_input: snmpd_readonly_user_password} + # Hook for site-specific additional pre-deployment config, + # applying to all nodes, e.g node registration/unregistration + NodeExtraConfig: + depends_on: BlockStorageDeployment + type: OS::TripleO::NodeExtraConfig + properties: + server: {get_resource: BlockStorage} + UpdateConfig: type: OS::TripleO::Tasks::PackageUpdate @@ -280,7 +277,7 @@ outputs: str_replace: template: "IP HOST.localdomain HOST" params: - IP: {get_attr: [BlockStorage, networks, ctlplane, 0]} + IP: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, BlockStorageHostnameResolveNetwork]}]} HOST: {get_attr: [BlockStorage, name]} nova_server_resource: description: Heat resource handle for the block storage server @@ -297,4 +294,8 @@ outputs: value: {get_attr: [StorageMgmtPort, ip_address]} config_identifier: description: identifier which changes if the node configuration may need re-applying - value: {get_attr: [BlockStorageDeployment, deploy_stdout]} + value: + list_join: + - '' + - - {get_attr: [BlockStorageDeployment, deploy_stdout]} + - {get_param: UpdateIdentifier} diff --git a/puppet/compute-post-puppet.yaml b/puppet/compute-post.yaml index b4a6126b..b63b06b4 100644 --- a/puppet/compute-post-puppet.yaml +++ b/puppet/compute-post.yaml @@ -4,6 +4,10 @@ description: > OpenStack compute node post deployment for Puppet. parameters: + ConfigDebug: + default: false + description: Whether to run config management (e.g. Puppet) in debug mode. + type: boolean servers: type: json NodeConfigIdentifiers: @@ -17,6 +21,8 @@ resources: type: OS::Heat::SoftwareConfig properties: group: puppet + options: + enable_debug: {get_param: ConfigDebug} outputs: - name: result config: diff --git a/puppet/compute-puppet.yaml b/puppet/compute.yaml index 7e49bc22..18547732 100644 --- a/puppet/compute-puppet.yaml +++ b/puppet/compute.yaml @@ -25,6 +25,14 @@ parameters: description: The password for the ceilometer service account. type: string hidden: true + CinderEnableNfsBackend: + default: false + description: Whether to enable or not the NFS backend for Cinder + type: boolean + CinderEnableRbdBackend: + default: false + description: Whether to enable or not the Rbd backend for Cinder + type: boolean Debug: default: '' description: Set to True to enable debugging on all services. @@ -32,40 +40,8 @@ parameters: ExtraConfig: default: {} description: | - Additional configuration to inject into the cluster. The JSON should have - the following structure: - {"FILEKEY": - {"config": - [{"section": "SECTIONNAME", - "values": - [{"option": "OPTIONNAME", - "value": "VALUENAME" - } - ] - } - ] - } - } - For instance: - {"nova": - {"config": - [{"section": "default", - "values": - [{"option": "force_config_drive", - "value": "always" - } - ] - }, - {"section": "cells", - "values": - [{"option": "driver", - "value": "nova.cells.rpc_driver.CellsRPCDriver" - } - ] - } - ] - } - } + Additional hiera configuration to inject into the cluster. Note + that NovaComputeExtraConfig takes precedence over ExtraConfig. type: json Flavor: description: Flavor for the nova compute node @@ -75,14 +51,6 @@ parameters: GlanceHost: type: string default: '' # Has to be here because of the ignored empty value bug - GlancePort: - default: "9292" - description: Glance port. - type: string - GlanceProtocol: - default: http - description: Protocol to use when connecting to glance, set to https for SSL. - type: string Image: type: string default: overcloud-compute @@ -98,9 +66,12 @@ parameters: default: default constraints: - custom_constraint: nova.keypair - KeystoneHost: + KeystoneAdminApiVirtualIP: type: string default: '' + KeystonePublicApiVirtualIP: + type: string + default: '' NeutronBridgeMappings: description: > The OVS logical->physical bridge mappings to use. See the Neutron @@ -125,7 +96,7 @@ parameters: NeutronNetworkType: type: string description: The tenant network type for Neutron, either gre or vxlan. - default: 'gre' + default: 'vxlan' NeutronNetworkVLANRanges: default: 'datacentre' description: > @@ -151,7 +122,19 @@ parameters: description: | The tunnel types for the Neutron tenant network. To specify multiple values, use a comma separated string, like so: 'gre,vxlan' - default: 'gre' + default: 'vxlan' + NeutronTunnelIdRanges: + description: | + Comma-separated list of <tun_min>:<tun_max> tuples enumerating ranges + of GRE tunnel IDs that are available for tenant network allocation + default: ["1:1000", ] + type: comma_delimited_list + NeutronVniRanges: + description: | + Comma-separated list of <vni_min>:<vni_max> tuples enumerating ranges + of VXLAN VNI IDs that are available for tenant network allocation + default: ["1:1000", ] + type: comma_delimited_list NeutronPublicInterfaceRawDevice: default: '' type: string @@ -162,6 +145,24 @@ parameters: default: 'unset' description: Shared secret to prevent spoofing type: string + hidden: true + NeutronCorePlugin: + default: 'ml2' + description: | + The core plugin for Neutron. The value should be the entrypoint to be loaded + from neutron.core_plugins namespace. + type: string + NeutronServicePlugins: + default: "router" + description: | + Comma-separated list of service plugin entrypoints to be loaded from the + neutron.service_plugins namespace. + type: comma_delimited_list + NeutronTypeDrivers: + default: "vxlan,vlan,flat,gre" + description: | + Comma-separated list of network type driver entrypoints to be loaded. + type: comma_delimited_list NeutronMechanismDrivers: default: 'openvswitch' description: | @@ -210,8 +211,9 @@ parameters: type: string default: '' # Has to be here because of the ignored empty value bug NtpServer: - type: string default: '' + description: Comma-separated list of ntp servers + type: comma_delimited_list RabbitHost: type: string default: '' # Has to be here because of the ignored empty value bug @@ -252,6 +254,11 @@ parameters: description: Mapping of service_name -> network name. Typically set via parameter_defaults in the resource registry. type: json + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json UpdateIdentifier: default: '' type: string @@ -276,9 +283,26 @@ resources: networks: - network: ctlplane user_data_format: SOFTWARE_CONFIG - user_data: {get_resource: NodeUserData} + user_data: {get_resource: UserData} name: {get_param: Hostname} + # Combine the NodeAdminUserData and NodeUserData mime archives + UserData: + type: OS::Heat::MultipartMime + properties: + parts: + - config: {get_resource: NodeAdminUserData} + type: multipart + - config: {get_resource: NodeUserData} + type: multipart + + # Creates the "heat-admin" user if configured via the environment + # Should return a OS::Heat::MultipartMime reference via OS::stack_id + NodeAdminUserData: + type: OS::TripleO::NodeAdminUserData + + # For optional operator additional userdata + # Should return a OS::Heat::MultipartMime reference via OS::stack_id NodeUserData: type: OS::TripleO::NodeUserData @@ -300,6 +324,7 @@ resources: NetIpMap: type: OS::TripleO::Network::Ports::NetIpMap properties: + ControlPlaneIp: {get_attr: [NovaCompute, networks, ctlplane, 0]} InternalApiIp: {get_attr: [InternalApiPort, ip_address]} StorageIp: {get_attr: [StoragePort, ip_address]} TenantIp: {get_attr: [TenantPort, ip_address]} @@ -307,6 +332,7 @@ resources: NetworkConfig: type: OS::TripleO::Compute::Net::SoftwareConfig properties: + ControlPlaneIp: {get_attr: [NovaCompute, networks, ctlplane, 0]} InternalApiIpSubnet: {get_attr: [InternalApiPort, ip_subnet]} StorageIpSubnet: {get_attr: [StoragePort, ip_subnet]} TenantIpSubnet: {get_attr: [TenantPort, ip_subnet]} @@ -327,14 +353,22 @@ resources: config: hiera: hierarchy: + - '"%{::uuid}"' - heat_config_%{::deploy_config_name} + - compute_extraconfig + - extraconfig - compute - ceph_cluster # provided by CephClusterConfig - ceph - all_nodes # provided by allNodesConfig - '"%{::osfamily}"' - common + - cisco_n1kv_data # Optionally provided by ComputeExtraConfigPre datafiles: + compute_extraconfig: + mapped_data: {get_param: NovaComputeExtraConfig} + extraconfig: + mapped_data: {get_param: ExtraConfig} common: raw_data: {get_file: hieradata/common.yaml} ceph: @@ -342,6 +376,7 @@ resources: compute: raw_data: {get_file: hieradata/compute.yaml} mapped_data: + cinder_enable_nfs_backend: {get_input: cinder_enable_nfs_backend} nova::debug: {get_input: debug} nova::rabbit_userid: {get_input: rabbit_username} nova::rabbit_password: {get_input: rabbit_password} @@ -351,7 +386,8 @@ resources: nova::compute::libvirt::libvirt_virt_type: {get_input: nova_compute_libvirt_type} nova_api_host: {get_input: nova_api_host} nova::compute::vncproxy_host: {get_input: nova_public_ip} - nova_enable_rbd_backend: {get_input: nova_enable_rbd_backend} + nova::compute::rbd::ephemeral_storage: {get_input: nova_enable_rbd_backend} + rbd_persistent_storage: {get_input: cinder_enable_rbd_backend} nova_password: {get_input: nova_password} nova::compute::vncserver_proxyclient_address: {get_input: nova_vnc_proxyclient_address} ceilometer::debug: {get_input: debug} @@ -368,7 +404,7 @@ resources: nova::glance_api_servers: {get_input: glance_api_servers} neutron::debug: {get_input: debug} neutron::rabbit_password: {get_input: rabbit_password} - neutron::rabbit_user: {get_input: rabbit_user} + neutron::rabbit_user: {get_input: rabbit_username} neutron::rabbit_use_ssl: {get_input: rabbit_client_use_ssl} neutron::rabbit_port: {get_input: rabbit_client_port} neutron_flat_networks: {get_input: neutron_flat_networks} @@ -378,21 +414,27 @@ resources: neutron_tenant_network_type: {get_input: neutron_tenant_network_type} neutron_tunnel_types: {get_input: neutron_tunnel_types} neutron::plugins::ml2::network_vlan_ranges: {get_input: neutron_network_vlan_ranges} + neutron::plugins::ml2::tunnel_id_ranges: {get_input: neutron_tunnel_id_ranges} + neutron::plugins::ml2::vni_ranges: {get_input: neutron_vni_ranges} neutron_bridge_mappings: {get_input: neutron_bridge_mappings} neutron::agents::ml2::ovs::enable_tunneling: {get_input: neutron_enable_tunneling} neutron_physical_bridge: {get_input: neutron_physical_bridge} neutron_public_interface: {get_input: neutron_public_interface} nova::network::neutron::neutron_admin_password: {get_input: neutron_password} - nova::network::neutron::neutron_url: {get_input: neutron_url} + nova::network::neutron::neutron_url: {get_input: neutron_internal_url} nova::network::neutron::neutron_admin_auth_url: {get_input: neutron_admin_auth_url} neutron_router_distributed: {get_input: neutron_router_distributed} neutron_agent_mode: {get_input: neutron_agent_mode} neutron_metadata_proxy_shared_secret: {get_input: neutron_metadata_proxy_shared_secret} + neutron::core_plugin: {get_input: neutron_core_plugin} + neutron::service_plugins: {get_input: neutron_service_plugins} + neutron::plugins::ml2::type_drivers: {get_input: neutron_type_drivers} neutron_mechanism_drivers: {get_input: neutron_mechanism_drivers} neutron_public_interface_raw_device: {get_input: neutron_public_interface_raw_device} admin_password: {get_input: admin_password} ntp::servers: {get_input: ntp_servers} - enable_package_install: {get_input: enable_package_install} + tripleo::packages::enable_install: {get_input: enable_package_install} + tripleo::packages::enable_upgrade: {get_input: enable_package_upgrade} NovaComputeDeployment: type: OS::TripleO::SoftwareDeployment @@ -401,6 +443,7 @@ resources: config: {get_resource: NovaComputeConfig} server: {get_resource: NovaCompute} input_values: + cinder_enable_nfs_backend: {get_param: CinderEnableNfsBackend} debug: {get_param: Debug} nova_compute_driver: {get_param: NovaComputeDriver} nova_compute_libvirt_type: {get_param: NovaComputeLibvirtType} @@ -408,31 +451,36 @@ resources: nova_api_host: {get_param: NovaApiHost} nova_password: {get_param: NovaPassword} nova_enable_rbd_backend: {get_param: NovaEnableRbdBackend} + cinder_enable_rbd_backend: {get_param: CinderEnableRbdBackend} nova_vnc_proxyclient_address: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, NovaVncProxyNetwork]}]} ceilometer_metering_secret: {get_param: CeilometerMeteringSecret} ceilometer_password: {get_param: CeilometerPassword} ceilometer_compute_agent: {get_param: CeilometerComputeAgent} - ceilometer_agent_auth_url: - list_join: - - '' - - - 'http://' - - {get_param: KeystoneHost} - - ':5000/v2.0' + ceilometer_agent_auth_url: {get_param: [EndpointMap, KeystoneInternal, uri]} snmpd_readonly_user_name: {get_param: SnmpdReadonlyUserName} snmpd_readonly_user_password: {get_param: SnmpdReadonlyUserPassword} - glance_api_servers: - list_join: - - '' - - - {get_param: GlanceProtocol} - - '://' - - {get_param: GlanceHost} - - ':' - - {get_param: GlancePort} + glance_api_servers: {get_param: [EndpointMap, GlanceInternal, uri]} neutron_flat_networks: {get_param: NeutronFlatNetworks} neutron_host: {get_param: NeutronHost} neutron_local_ip: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, NeutronTenantNetwork]}]} neutron_tenant_network_type: {get_param: NeutronNetworkType} neutron_tunnel_types: {get_param: NeutronTunnelTypes} + neutron_tunnel_id_ranges: + str_replace: + template: "['RANGES']" + params: + RANGES: + list_join: + - "','" + - {get_param: NeutronTunnelIdRanges} + neutron_vni_ranges: + str_replace: + template: "['RANGES']" + params: + RANGES: + list_join: + - "','" + - {get_param: NeutronVniRanges} neutron_network_vlan_ranges: str_replace: template: "['RANGES']" @@ -449,31 +497,50 @@ resources: neutron_agent_mode: {get_param: NeutronAgentMode} neutron_router_distributed: {get_param: NeutronDVR} neutron_metadata_proxy_shared_secret: {get_param: NeutronMetadataProxySharedSecret} + neutron_core_plugin: {get_param: NeutronCorePlugin} + neutron_service_plugins: + str_replace: + template: "['PLUGINS']" + params: + PLUGINS: + list_join: + - "','" + - {get_param: NeutronServicePlugins} + neutron_type_drivers: + str_replace: + template: "['DRIVERS']" + params: + DRIVERS: + list_join: + - "','" + - {get_param: NeutronTypeDrivers} neutron_mechanism_drivers: {get_param: NeutronMechanismDrivers} neutron_public_interface_raw_device: {get_param: NeutronPublicInterfaceRawDevice} - neutron_url: - list_join: - - '' - - - 'http://' - - {get_param: NeutronHost} - - ':9696' - neutron_admin_auth_url: - list_join: - - '' - - - 'http://' - - {get_param: NeutronHost} - - ':35357/v2.0' + neutron_internal_url: {get_param: [EndpointMap, NeutronInternal, uri]} + neutron_admin_auth_url: {get_param: [EndpointMap, KeystoneAdmin, uri]} admin_password: {get_param: AdminPassword} rabbit_username: {get_param: RabbitUserName} rabbit_password: {get_param: RabbitPassword} rabbit_client_use_ssl: {get_param: RabbitClientUseSSL} rabbit_client_port: {get_param: RabbitClientPort} - ntp_servers: - str_replace: - template: '["server"]' - params: - server: {get_param: NtpServer} + ntp_servers: {get_param: NtpServer} enable_package_install: {get_param: EnablePackageInstall} + enable_package_upgrade: {get_attr: [UpdateDeployment, update_managed_packages]} + + # Hook for site-specific additional pre-deployment config, e.g extra hieradata + ComputeExtraConfigPre: + depends_on: NovaComputeDeployment + type: OS::TripleO::ComputeExtraConfigPre + properties: + server: {get_resource: NovaCompute} + + # Hook for site-specific additional pre-deployment config, + # applying to all nodes, e.g node registration/unregistration + NodeExtraConfig: + depends_on: ComputeExtraConfigPre + type: OS::TripleO::NodeExtraConfig + properties: + server: {get_resource: NovaCompute} UpdateConfig: type: OS::TripleO::Tasks::PackageUpdate @@ -510,7 +577,7 @@ outputs: str_replace: template: "IP HOST.localdomain HOST" params: - IP: {get_attr: [NovaCompute, networks, ctlplane, 0]} + IP: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, ComputeHostnameResolveNetwork]}]} HOST: {get_attr: [NovaCompute, name]} nova_server_resource: description: Heat resource handle for the Nova compute server @@ -518,5 +585,9 @@ outputs: {get_resource: NovaCompute} config_identifier: description: identifier which changes if the node configuration may need re-applying - value: {get_attr: [NovaComputeDeployment, deploy_stdout]} - + value: + list_join: + - ',' + - - {get_attr: [NovaComputeDeployment, deploy_stdout]} + - {get_attr: [ComputeExtraConfigPre, deploy_stdout]} + - {get_param: UpdateIdentifier} diff --git a/puppet/controller-config-pacemaker.yaml b/puppet/controller-config-pacemaker.yaml index 38161cd7..dc81498a 100644 --- a/puppet/controller-config-pacemaker.yaml +++ b/puppet/controller-config-pacemaker.yaml @@ -3,6 +3,12 @@ heat_template_version: 2015-04-30 description: > A software config which runs manifests/overcloud_controller_pacemaker.pp +parameters: + ConfigDebug: + default: false + description: Whether to run config management (e.g. Puppet) in debug mode. + type: boolean + resources: ControllerPuppetConfigImpl: @@ -10,6 +16,7 @@ resources: properties: group: puppet options: + enable_debug: {get_param: ConfigDebug} enable_hiera: True enable_facter: False outputs: diff --git a/puppet/controller-config.yaml b/puppet/controller-config.yaml index 4135ffac..f85e1a9e 100644 --- a/puppet/controller-config.yaml +++ b/puppet/controller-config.yaml @@ -3,6 +3,12 @@ heat_template_version: 2015-04-30 description: > A software config which runs manifests/overcloud_controller.pp +parameters: + ConfigDebug: + default: false + description: Whether to run config management (e.g. Puppet) in debug mode. + type: boolean + resources: ControllerPuppetConfigImpl: @@ -10,6 +16,7 @@ resources: properties: group: puppet options: + enable_debug: {get_param: ConfigDebug} enable_hiera: True enable_facter: False outputs: diff --git a/puppet/controller-post-puppet.yaml b/puppet/controller-post.yaml index e88561e6..941e1ac5 100644 --- a/puppet/controller-post-puppet.yaml +++ b/puppet/controller-post.yaml @@ -4,6 +4,10 @@ description: > OpenStack controller node post deployment for Puppet. parameters: + ConfigDebug: + default: false + description: Whether to run config management (e.g. Puppet) in debug mode. + type: boolean servers: type: json NodeConfigIdentifiers: @@ -46,6 +50,7 @@ resources: properties: group: puppet options: + enable_debug: {get_param: ConfigDebug} enable_hiera: True enable_facter: False inputs: @@ -83,6 +88,16 @@ resources: step: 4 update_identifier: {get_param: NodeConfigIdentifiers} + ControllerOvercloudServicesDeployment_Step6: + type: OS::Heat::StructuredDeployments + depends_on: ControllerOvercloudServicesDeployment_Step5 + properties: + servers: {get_param: servers} + config: {get_resource: ControllerPuppetConfig} + input_values: + step: 5 + update_identifier: {get_param: NodeConfigIdentifiers} + # Note, this should come last, so use depends_on to ensure # this is created after any other resources. ExtraConfig: diff --git a/puppet/controller-puppet.yaml b/puppet/controller.yaml index c5adeb91..ae2b66e3 100644 --- a/puppet/controller-puppet.yaml +++ b/puppet/controller.yaml @@ -4,6 +4,11 @@ description: > OpenStack controller node configured by Puppet. parameters: + AdminEmail: + default: 'admin@example.com' + description: The email for the keystone admin account. + type: string + hidden: true AdminPassword: default: unset description: The password for the keystone admin account, used for monitoring, querying neutron etc. @@ -14,6 +19,9 @@ parameters: description: The keystone auth secret and db password. type: string hidden: true + CeilometerApiVirtualIP: + type: string + default: '' CeilometerBackend: default: 'mongodb' description: The ceilometer backend type. @@ -28,6 +36,13 @@ parameters: description: The password for the ceilometer service and db account. type: string hidden: true + CinderApiVirtualIP: + type: string + default: '' + CinderEnableNfsBackend: + default: false + description: Whether to enable or not the NFS backend for Cinder + type: boolean CinderEnableIscsiBackend: default: true description: Whether to enable or not the Iscsi backend for Cinder @@ -44,6 +59,18 @@ parameters: default: 5000 description: The size of the loopback file used by the cinder LVM driver. type: number + CinderNfsMountOptions: + default: '' + description: > + Mount options for NFS mounts used by Cinder NFS backend. Effective + when CinderEnableNfsBackend is true. + type: string + CinderNfsServers: + default: '' + description: > + NFS servers used by Cinder NFS backend. Effective when + CinderEnableNfsBackend is true. + type: comma_delimited_list CinderPassword: default: unset description: The password for the cinder service and db account, used by cinder-api. @@ -61,8 +88,7 @@ parameters: ControllerExtraConfig: default: {} description: | - Controller specific configuration to inject into the cluster. Same - structure as ExtraConfig. + Controller specific hiera configuration data to inject into the cluster. type: json ControlVirtualInterface: default: 'br-ex' @@ -72,6 +98,10 @@ parameters: default: '' description: Set to True to enable debugging on all services. type: string + EnableFencing: + default: false + description: Whether to enable fencing in Pacemaker or not. + type: boolean EnableGalera: default: true description: Whether to use Galera instead of regular MariaDB. @@ -87,39 +117,39 @@ parameters: ExtraConfig: default: {} description: | - Additional configuration to inject into the cluster. The JSON should have + Additional hieradata to inject into the cluster, note that + ControllerExtraConfig takes precedence over ExtraConfig. + type: json + FencingConfig: + default: {} + description: | + Pacemaker fencing configuration. The JSON should have the following structure: - {"FILEKEY": - {"config": - [{"section": "SECTIONNAME", - "values": - [{"option": "OPTIONNAME", - "value": "VALUENAME" - } - ] - } - ] - } + { + "devices": [ + { + "agent": "AGENT_NAME", + "host_mac": "HOST_MAC_ADDRESS", + "params": {"PARAM_NAME": "PARAM_VALUE"} + } + ] } For instance: - {"nova": - {"config": - [{"section": "default", - "values": - [{"option": "compute_manager", - "value": "ironic.nova.compute.manager.ClusterComputeManager" - } - ] - }, - {"section": "cells", - "values": - [{"option": "driver", - "value": "nova.cells.rpc_driver.CellsRPCDriver" - } - ] - } - ] - } + { + "devices": [ + { + "agent": "fence_xvm", + "host_mac": "52:54:00:aa:bb:cc", + "params": { + "multicast_address": "225.0.0.12", + "port": "baremetal_0", + "manage_fw": true, + "manage_key_file": true, + "key_file": "/etc/fence_xvm.key", + "key_file_password": "abcdef" + } + } + ] } type: json Flavor: @@ -140,14 +170,6 @@ parameters: description: The password for the glance service and db account, used by the glance services. type: string hidden: true - GlancePort: - default: "9292" - description: Glance port. - type: string - GlanceProtocol: - default: http - description: Protocol to use when connecting to glance, set to https for SSL. - type: string GlanceBackend: default: swift description: The short name of the Glance backend to use. Should be one @@ -155,6 +177,34 @@ parameters: type: string constraints: - allowed_values: ['swift', 'file', 'rbd'] + GlanceFilePcmkDevice: + default: '' + description: > + An exported storage device that should be mounted by Pacemaker + as Glance storage. Effective when GlanceFilePcmkManage is true. + type: string + GlanceFilePcmkFstype: + default: 'nfs' + description: > + Filesystem type for Pacemaker mount used as Glance storage. + Effective when GlanceFilePcmkManage is true. + type: string + GlanceFilePcmkManage: + default: false + description: > + Whether to make Glance file backend a mount managed by Pacemaker. + Effective when GlanceBackend is 'file'. + type: boolean + GlanceFilePcmkOptions: + default: '' + description: > + Mount options for Pacemaker mount used as Glance storage. + Effective when GlanceFilePcmkManage is true. + type: string + HAProxySyslogAddress: + default: /dev/log + description: Syslog address where HAproxy will send its log + type: string HeatPassword: default: unset description: The password for the Heat service and db account, used by the Heat services. @@ -168,9 +218,15 @@ parameters: HeatAuthEncryptionKey: description: Auth encryption key for heat-engine type: string + hidden: true + HorizonAllowedHosts: + default: '*' + description: A list of IP/Hostname allowed to connect to horizon + type: comma_delimited_list HorizonSecret: description: Secret key for Django type: string + hidden: true Image: type: string default: overcloud-control @@ -208,6 +264,20 @@ parameters: description: Keystone key for signing tokens. type: string hidden: true + KeystoneNotificationDriver: + description: Comma-separated list of Oslo notification drivers used by Keystone + default: ['messaging'] + type: comma_delimited_list + KeystoneNotificationFormat: + description: The Keystone notification format + default: 'basic' + type: string + constraints: + - allowed_values: [ 'basic', 'cadf' ] + KeystoneRegion: + type: string + default: 'regionOne' + description: Keystone region for endpoint MysqlClusterUniquePart: description: A unique identifier of the MySQL cluster the controller is in. type: string @@ -222,6 +292,10 @@ parameters: lower level default. type: number default: 0 + MysqlMaxConnections: + description: Configures MySQL max_connections config setting + type: number + default: 4096 MysqlRootPassword: type: string hidden: true @@ -252,6 +326,10 @@ parameters: default: 'False' description: Whether to enable l3-agent HA type: string + NeutronDhcpAgentsPerNetwork: + type: number + default: 3 + description: The number of neutron dhcp agents to schedule per network NeutronDVR: default: 'False' description: Whether to configure Neutron Distributed Virtual Routers @@ -260,6 +338,24 @@ parameters: default: 'unset' description: Shared secret to prevent spoofing type: string + hidden: true + NeutronCorePlugin: + default: 'ml2' + description: | + The core plugin for Neutron. The value should be the entrypoint to be loaded + from neutron.core_plugins namespace. + type: string + NeutronServicePlugins: + default: "router" + description: | + Comma-separated list of service plugin entrypoints to be loaded from the + neutron.service_plugins namespace. + type: comma_delimited_list + NeutronTypeDrivers: + default: "vxlan,vlan,flat,gre" + description: | + Comma-separated list of network type driver entrypoints to be loaded. + type: comma_delimited_list NeutronMechanismDrivers: default: 'openvswitch' description: | @@ -282,7 +378,7 @@ parameters: description: Whether to enable l3-agent HA type: string NeutronNetworkType: - default: 'gre' + default: 'vxlan' description: The tenant network type for Neutron, either gre or vxlan. type: string NeutronNetworkVLANRanges: @@ -324,29 +420,50 @@ parameters: description: If set, the public interface is a vlan with this device as the raw device. type: string NeutronTunnelTypes: - default: 'gre' + default: 'vxlan' description: | The tunnel types for the Neutron tenant network. To specify multiple values, use a comma separated string, like so: 'gre,vxlan' type: string + NeutronTunnelIdRanges: + description: | + Comma-separated list of <tun_min>:<tun_max> tuples enumerating ranges + of GRE tunnel IDs that are available for tenant network allocation + default: ["1:1000", ] + type: comma_delimited_list + NeutronVniRanges: + description: | + Comma-separated list of <vni_min>:<vni_max> tuples enumerating ranges + of VXLAN VNI IDs that are available for tenant network allocation + default: ["1:1000", ] + type: comma_delimited_list + NovaApiVirtualIP: + type: string + default: '' NovaPassword: default: unset description: The password for the nova service and db account, used by nova-api. type: string hidden: true + MongoDbNoJournal: + default: false + description: Should MongoDb journaling be disabled + type: boolean NtpServer: - type: string default: '' + description: Comma-separated list of ntp servers + type: comma_delimited_list PcsdPassword: type: string description: The password for the 'pcsd' user. + hidden: true PublicVirtualInterface: default: 'br-ex' description: > Specifies the interface where the public-facing virtual ip will be assigned. This should be int_public when a VLAN is being used. type: string - PublicVirtualIP: # DEPRECATED: use per service settings instead + PublicVirtualIP: type: string default: '' # Has to be here because of the ignored empty value bug RabbitCookie: @@ -372,6 +489,10 @@ parameters: default: 5672 description: Set rabbit subscriber port, change this if using SSL type: number + RabbitFDLimit: + default: 16384 + description: Configures RabbitMQ FD limit + type: string RedisVirtualIP: type: string default: '' # Has to be here because of the ignored empty value bug @@ -422,6 +543,9 @@ parameters: services. hidden: true type: string + SwiftProxyVirtualIP: + type: string + default: '' SwiftReplicas: type: number default: 3 @@ -435,9 +559,15 @@ parameters: GlanceApiVirtualIP: type: string default: '' + GlanceRegistryVirtualIP: + type: string + default: '' MysqlVirtualIP: type: string default: '' + KeystoneAdminApiVirtualIP: + type: string + default: '' KeystonePublicApiVirtualIP: type: string default: '' @@ -453,6 +583,11 @@ parameters: description: Mapping of service_name -> network name. Typically set via parameter_defaults in the resource registry. type: json + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json UpdateIdentifier: default: '' type: string @@ -475,9 +610,26 @@ resources: networks: - network: ctlplane user_data_format: SOFTWARE_CONFIG - user_data: {get_resource: NodeUserData} + user_data: {get_resource: UserData} name: {get_param: Hostname} + # Combine the NodeAdminUserData and NodeUserData mime archives + UserData: + type: OS::Heat::MultipartMime + properties: + parts: + - config: {get_resource: NodeAdminUserData} + type: multipart + - config: {get_resource: NodeUserData} + type: multipart + + # Creates the "heat-admin" user if configured via the environment + # Should return a OS::Heat::MultipartMime reference via OS::stack_id + NodeAdminUserData: + type: OS::TripleO::NodeAdminUserData + + # For optional operator additional userdata + # Should return a OS::Heat::MultipartMime reference via OS::stack_id NodeUserData: type: OS::TripleO::NodeUserData @@ -509,6 +661,7 @@ resources: NetIpMap: type: OS::TripleO::Network::Ports::NetIpMap properties: + ControlPlaneIp: {get_attr: [Controller, networks, ctlplane, 0]} ExternalIp: {get_attr: [ExternalPort, ip_address]} InternalApiIp: {get_attr: [InternalApiPort, ip_address]} StorageIp: {get_attr: [StoragePort, ip_address]} @@ -516,17 +669,19 @@ resources: TenantIp: {get_attr: [TenantPort, ip_address]} NetIpSubnetMap: - type: OS::TripleO::Network::Ports::NetIpMap + type: OS::TripleO::Network::Ports::NetIpSubnetMap properties: - ExternalIp: {get_attr: [ExternalPort, ip_subnet]} - InternalApiIp: {get_attr: [InternalApiPort, ip_subnet]} - StorageIp: {get_attr: [StoragePort, ip_subnet]} - StorageMgmtIp: {get_attr: [StorageMgmtPort, ip_subnet]} - TenantIp: {get_attr: [TenantPort, ip_subnet]} + ControlPlaneIp: {get_attr: [Controller, networks, ctlplane, 0]} + ExternalIpSubnet: {get_attr: [ExternalPort, ip_subnet]} + InternalApiIpSubnet: {get_attr: [InternalApiPort, ip_subnet]} + StorageIpSubnet: {get_attr: [StoragePort, ip_subnet]} + StorageMgmtIpSubnet: {get_attr: [StorageMgmtPort, ip_subnet]} + TenantIpSubnet: {get_attr: [TenantPort, ip_subnet]} NetworkConfig: type: OS::TripleO::Controller::Net::SoftwareConfig properties: + ControlPlaneIp: {get_attr: [Controller, networks, ctlplane, 0]} ExternalIpSubnet: {get_attr: [ExternalPort, ip_subnet]} InternalApiIpSubnet: {get_attr: [InternalApiPort, ip_subnet]} StorageIpSubnet: {get_attr: [StoragePort, ip_subnet]} @@ -551,6 +706,7 @@ resources: input_values: bootstack_nodeid: {get_attr: [Controller, name]} neutron_enable_tunneling: {get_param: NeutronEnableTunnelling} + haproxy_log_address: {get_param: HAProxySyslogAddress} heat.watch_server_url: list_join: - '' @@ -570,12 +726,24 @@ resources: - {get_param: HeatApiVirtualIP} - ':8000/v1/waitcondition' heat_auth_encryption_key: {get_param: HeatAuthEncryptionKey} + horizon_allowed_hosts: {get_param: HorizonAllowedHosts} horizon_secret: {get_param: HorizonSecret} + admin_email: {get_param: AdminEmail} admin_password: {get_param: AdminPassword} admin_token: {get_param: AdminToken} neutron_public_interface_ip: {get_param: NeutronPublicInterfaceIP} debug: {get_param: Debug} + cinder_enable_nfs_backend: {get_param: CinderEnableNfsBackend} cinder_enable_rbd_backend: {get_param: CinderEnableRbdBackend} + cinder_nfs_mount_options: {get_param: CinderNfsMountOptions} + cinder_nfs_servers: + str_replace: + template: "['SERVERS']" + params: + SERVERS: + list_join: + - "','" + - {get_param: CinderNfsServers} cinder_lvm_loop_device_size: {get_param: CinderLVMLoopDeviceSize} cinder_password: {get_param: CinderPassword} cinder_enable_iscsi_backend: {get_param: CinderEnableIscsiBackend} @@ -589,9 +757,13 @@ resources: - '@' - {get_param: MysqlVirtualIP} - '/cinder' - glance_port: {get_param: GlancePort} + glance_port: {get_param: [EndpointMap, GlanceInternal, port]} glance_password: {get_param: GlancePassword} glance_backend: {get_param: GlanceBackend} + glance_file_pcmk_device: {get_param: GlanceFilePcmkDevice} + glance_file_pcmk_fstype: {get_param: GlanceFilePcmkFstype} + glance_file_pcmk_manage: {get_param: GlanceFilePcmkManage} + glance_file_pcmk_options: {get_param: GlanceFilePcmkOptions} glance_notifier_strategy: {get_param: GlanceNotifierStrategy} glance_log_file: {get_param: GlanceLogFile} glance_dsn: @@ -612,12 +784,13 @@ resources: - '@' - {get_param: MysqlVirtualIP} - '/heat' - keystone_auth_address: {list_join: ['', ['http://', {get_param: KeystonePublicApiVirtualIP} , ':5000/v2.0']]} keystone_ca_certificate: {get_param: KeystoneCACertificate} keystone_signing_key: {get_param: KeystoneSigningKey} keystone_signing_certificate: {get_param: KeystoneSigningCertificate} keystone_ssl_certificate: {get_param: KeystoneSSLCertificate} keystone_ssl_certificate_key: {get_param: KeystoneSSLCertificateKey} + keystone_notification_driver: {get_param: KeystoneNotificationDriver} + keystone_notification_format: {get_param: KeystoneNotificationFormat} keystone_dsn: list_join: - '' @@ -626,22 +799,17 @@ resources: - '@' - {get_param: MysqlVirtualIP} - '/keystone' - keystone_identity_uri: - list_join: - - '' - - - 'http://' - - {get_param: KeystonePublicApiVirtualIP} - - ':35357/' - keystone_auth_uri: - list_join: - - '' - - - 'http://' - - {get_param: KeystonePublicApiVirtualIP} - - ':5000/v2.0/' + keystone_identity_uri: { get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix] } + keystone_auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri] } + keystone_public_url: { get_param: [EndpointMap, KeystonePublic, uri_no_suffix] } + keystone_internal_url: { get_param: [EndpointMap, KeystoneInternal, uri_no_suffix] } + keystone_ec2_uri: { get_param: [EndpointMap, KeystoneEC2, uri] } + enable_fencing: {get_param: EnableFencing} enable_galera: {get_param: EnableGalera} enable_ceph_storage: {get_param: EnableCephStorage} enable_swift_storage: {get_param: EnableSwiftStorage} mysql_innodb_buffer_pool_size: {get_param: MysqlInnodbBufferPoolSize} + mysql_max_connections: {get_param: MysqlMaxConnections} mysql_root_password: {get_param: MysqlRootPassword} mysql_cluster_name: str_replace: @@ -652,9 +820,27 @@ resources: neutron_metadata_proxy_shared_secret: {get_param: NeutronMetadataProxySharedSecret} neutron_agent_mode: {get_param: NeutronAgentMode} neutron_router_distributed: {get_param: NeutronDVR} + neutron_core_plugin: {get_param: NeutronCorePlugin} + neutron_service_plugins: + str_replace: + template: "['PLUGINS']" + params: + PLUGINS: + list_join: + - "','" + - {get_param: NeutronServicePlugins} + neutron_type_drivers: + str_replace: + template: "['DRIVERS']" + params: + DRIVERS: + list_join: + - "','" + - {get_param: NeutronTypeDrivers} neutron_mechanism_drivers: {get_param: NeutronMechanismDrivers} neutron_allow_l3agent_failover: {get_param: NeutronAllowL3AgentFailover} neutron_l3_ha: {get_param: NeutronL3HA} + neutron_dhcp_agents_per_network: {get_param: NeutronDhcpAgentsPerNetwork} neutron_network_vlan_ranges: str_replace: template: "['RANGES']" @@ -671,6 +857,22 @@ resources: neutron_public_interface_tag: {get_param: NeutronPublicInterfaceTag} neutron_tenant_network_type: {get_param: NeutronNetworkType} neutron_tunnel_types: {get_param: NeutronTunnelTypes} + neutron_tunnel_id_ranges: + str_replace: + template: "['RANGES']" + params: + RANGES: + list_join: + - "','" + - {get_param: NeutronTunnelIdRanges} + neutron_vni_ranges: + str_replace: + template: "['RANGES']" + params: + RANGES: + list_join: + - "','" + - {get_param: NeutronVniRanges} neutron_password: {get_param: NeutronPassword} neutron_dnsmasq_options: {get_param: NeutronDnsmasqOptions} neutron_dsn: @@ -681,18 +883,10 @@ resources: - '@' - {get_param: MysqlVirtualIP} - '/ovs_neutron?charset=utf8' - neutron_url: - list_join: - - '' - - - 'http://' - - {get_param: NeutronApiVirtualIP} - - ':9696' - neutron_admin_auth_url: - list_join: - - '' - - - 'http://' - - {get_param: KeystonePublicApiVirtualIP} - - ':35357/v2.0' + neutron_internal_url: { get_param: [ EndpointMap, NeutronInternal, uri ] } + neutron_public_url: { get_param: [ EndpointMap, NeutronPublic, uri ] } + neutron_admin_url: { get_param: [ EndpointMap, NeutronAdmin, uri ] } + neutron_admin_auth_url: { get_param: [ EndpointMap, KeystoneAdmin, uri ] } ceilometer_backend: {get_param: CeilometerBackend} ceilometer_metering_secret: {get_param: CeilometerMeteringSecret} ceilometer_password: {get_param: CeilometerPassword} @@ -705,7 +899,9 @@ resources: ceilometer_dsn: list_join: - '' - - - 'mysql://ceilometer:unset@' + - - 'mysql://ceilometer:' + - {get_param: CeilometerPassword} + - '@' - {get_param: MysqlVirtualIP} - '/ceilometer' snmpd_readonly_user_name: {get_param: SnmpdReadonlyUserName} @@ -719,17 +915,23 @@ resources: - '@' - {get_param: MysqlVirtualIP} - '/nova' + fencing_config: {get_param: FencingConfig} pcsd_password: {get_param: PcsdPassword} rabbit_username: {get_param: RabbitUserName} rabbit_password: {get_param: RabbitPassword} rabbit_cookie: {get_param: RabbitCookie} rabbit_client_use_ssl: {get_param: RabbitClientUseSSL} rabbit_client_port: {get_param: RabbitClientPort} - ntp_servers: + mongodb_no_journal: {get_param: MongoDbNoJournal} + # We need to force this into quotes or hiera will return integer causing + # the puppet module validation regexp to fail. + # Remove when: https://github.com/puppetlabs/puppetlabs-rabbitmq/pull/401 + rabbit_fd_limit: str_replace: - template: '["server"]' + template: "'LIMIT'" params: - server: {get_param: NtpServer} + LIMIT: {get_param: RabbitFDLimit} + ntp_servers: {get_param: NtpServer} control_virtual_interface: {get_param: ControlVirtualInterface} public_virtual_interface: {get_param: PublicVirtualInterface} swift_hash_suffix: {get_param: SwiftHashSuffix} @@ -739,23 +941,19 @@ resources: swift_min_part_hours: {get_param: SwiftMinPartHours} swift_mount_check: {get_param: SwiftMountCheck} enable_package_install: {get_param: EnablePackageInstall} + enable_package_upgrade: {get_attr: [UpdateDeployment, update_managed_packages]} swift_proxy_network: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, SwiftProxyNetwork]}]} swift_management_network: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, SwiftMgmtNetwork]}]} cinder_iscsi_network: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, CinderIscsiNetwork]}]} cinder_api_network: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, CinderApiNetwork]}]} glance_api_network: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, GlanceApiNetwork]}]} glance_registry_network: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, GlanceRegistryNetwork]}]} - glance_api_servers: - list_join: - - '' - - - {get_param: GlanceProtocol} - - '://' - - {get_param: GlanceApiVirtualIP} - - ':' - - {get_param: GlancePort} + glance_api_servers: { get_param: [EndpointMap, GlanceInternal, uri]} + glance_registry_host: {get_param: GlanceRegistryVirtualIP} heat_api_network: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, HeatApiNetwork]}]} keystone_public_api_network: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, KeystonePublicApiNetwork]}]} keystone_admin_api_network: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, KeystoneAdminApiNetwork]}]} + keystone_region: {get_param: KeystoneRegion} mongo_db_network: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, MongoDbNetwork]}]} neutron_api_network: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, NeutronApiNetwork]}]} neutron_local_ip: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, NeutronTenantNetwork]}]} @@ -768,8 +966,9 @@ resources: redis_vip: {get_param: RedisVirtualIP} memcached_network: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, MemcachedNetwork]}]} mysql_network: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, MysqlNetwork]}]} - ceph_cluster_network: {get_attr: [NetIpSubnetMap, net_ip_map, {get_param: [ServiceNetMap, CephClusterNetwork]}]} - ceph_public_network: {get_attr: [NetIpSubnetMap, net_ip_map, {get_param: [ServiceNetMap, CephPublicNetwork]}]} + mysql_virtual_ip: {get_param: MysqlVirtualIP} + ceph_cluster_network: {get_attr: [NetIpSubnetMap, net_ip_subnet_map, {get_param: [ServiceNetMap, CephClusterNetwork]}]} + ceph_public_network: {get_attr: [NetIpSubnetMap, net_ip_subnet_map, {get_param: [ServiceNetMap, CephPublicNetwork]}]} ceph_public_ip: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, CephPublicNetwork]}]} # Map heat metadata into hiera datafiles @@ -780,8 +979,12 @@ resources: config: hiera: hierarchy: + - '"%{::uuid}"' - heat_config_%{::deploy_config_name} + - controller_extraconfig + - extraconfig - controller + - database - object - swift_devices_and_proxy # provided by SwiftDevicesAndProxyConfig - ceph_cluster # provided by CephClusterConfig @@ -791,7 +994,15 @@ resources: - vip_data # provided by vip-config - '"%{::osfamily}"' - common + - cinder_netapp_data # Optionally provided by ControllerExtraConfigPre + - neutron_bigswitch_data # Optionally provided by ControllerExtraConfigPre + - neutron_cisco_data # Optionally provided by ControllerExtraConfigPre + - cisco_n1kv_data # Optionally provided by ControllerExtraConfigPre datafiles: + controller_extraconfig: + mapped_data: {get_param: ControllerExtraConfig} + extraconfig: + mapped_data: {get_param: ExtraConfig} common: raw_data: {get_file: hieradata/common.yaml} ceph: @@ -800,6 +1011,8 @@ resources: ceph::profile::params::cluster_network: {get_input: ceph_cluster_network} ceph::profile::params::public_network: {get_input: ceph_public_network} ceph::mon::public_addr: {get_input: ceph_public_ip} + database: + raw_data: {get_file: hieradata/database.yaml} object: raw_data: {get_file: hieradata/object.yaml} controller: @@ -808,7 +1021,9 @@ resources: bootstack_nodeid: {get_input: bootstack_nodeid} # Pacemaker + enable_fencing: {get_input: enable_fencing} hacluster_pwd: {get_input: pcsd_password} + tripleo::fencing::config: {get_input: fencing_config} # Swift swift::proxy::proxy_local_net_ip: {get_input: swift_proxy_network} @@ -827,7 +1042,10 @@ resources: tripleo::ringbuilder::build_ring: True # Cinder + cinder_enable_nfs_backend: {get_input: cinder_enable_nfs_backend} cinder_enable_rbd_backend: {get_input: cinder_enable_rbd_backend} + cinder_nfs_mount_options: {get_input: cinder_nfs_mount_options} + cinder_nfs_servers: {get_input: cinder_nfs_servers} cinder_lvm_loop_device_size: {get_input: cinder_lvm_loop_device_size} cinder_iscsi_helper: {get_input: cinder_iscsi_helper} cinder_iscsi_ip_address: {get_input: cinder_iscsi_network} @@ -844,13 +1062,14 @@ resources: cinder_enable_iscsi_backend: {get_input: cinder_enable_iscsi_backend} cinder::glance::glance_api_servers: {get_input: glance_api_servers} cinder_backend_config: {get_input: CinderBackendConfig} + cinder::db::mysql::password: {get_input: cinder_password} # Glance glance::api::bind_port: {get_input: glance_port} glance::api::bind_host: {get_input: glance_api_network} glance::api::auth_uri: {get_input: keystone_auth_uri} glance::api::identity_uri: {get_input: keystone_identity_uri} - glance::api::registry_host: {get_input: glance_registry_network} + glance::api::registry_host: {get_input: glance_registry_host} glance::api::keystone_password: {get_input: glance_password} glance::api::debug: {get_input: debug} glance_notifier_strategy: {get_input: glance_notifier_strategy} @@ -859,14 +1078,19 @@ resources: glance::api::database_connection: {get_input: glance_dsn} glance::registry::keystone_password: {get_input: glance_password} glance::registry::database_connection: {get_input: glance_dsn} - glance::registry::bind_host: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, GlanceRegistryNetwork]}]} + glance::registry::bind_host: {get_input: glance_registry_network} glance::registry::auth_uri: {get_input: keystone_auth_uri} glance::registry::identity_uri: {get_input: keystone_identity_uri} glance::registry::debug: {get_input: debug} - glance::backend::swift::swift_store_auth_address: {get_input: keystone_auth_address} + glance::backend::swift::swift_store_auth_address: {get_input: keystone_auth_uri} glance::backend::swift::swift_store_user: service:glance glance::backend::swift::swift_store_key: {get_input: glance_password} glance_backend: {get_input: glance_backend} + glance::db::mysql::password: {get_input: glance_password} + glance_file_pcmk_device: {get_input: glance_file_pcmk_device} + glance_file_pcmk_fstype: {get_input: glance_file_pcmk_fstype} + glance_file_pcmk_manage: {get_input: glance_file_pcmk_manage} + glance_file_pcmk_options: {get_input: glance_file_pcmk_options} # Heat heat_stack_domain_admin_password: {get_input: heat_stack_domain_admin_password} @@ -879,14 +1103,15 @@ resources: heat::rabbit_use_ssl: {get_input: rabbit_client_use_ssl} heat::rabbit_port: {get_input: rabbit_client_port} heat::auth_uri: {get_input: keystone_auth_uri} + heat::keystone_ec2_uri: {get_input: keystone_ec2_uri} heat::identity_uri: {get_input: keystone_identity_uri} heat::keystone_password: {get_input: heat_password} heat::api::bind_host: {get_input: heat_api_network} heat::api_cloudwatch::bind_host: {get_input: heat_api_network} heat::api_cfn::bind_host: {get_input: heat_api_network} heat::database_connection: {get_input: heat_dsn} - heat::instance_user: heat-admin heat::debug: {get_input: debug} + heat::db::mysql::password: {get_input: heat_password} # Keystone keystone::admin_token: {get_input: admin_token} @@ -899,22 +1124,38 @@ resources: keystone::public_bind_host: {get_input: keystone_public_api_network} keystone::admin_bind_host: {get_input: keystone_admin_api_network} keystone::debug: {get_input: debug} + keystone::db::mysql::password: {get_input: admin_token} + keystone::rabbit_userid: {get_input: rabbit_username} + keystone::rabbit_password: {get_input: rabbit_password} + keystone::rabbit_use_ssl: {get_input: rabbit_client_use_ssl} + keystone::rabbit_port: {get_input: rabbit_client_port} + keystone::notification_driver: {get_input: keystone_notification_driver} + keystone::notification_format: {get_input: keystone_notification_format} + keystone::roles::admin::email: {get_input: admin_email} + keystone::roles::admin::password: {get_input: admin_password} + keystone::endpoint::public_url: {get_input: keystone_public_url} + keystone::endpoint::internal_url: {get_input: keystone_internal_url} + keystone::endpoint::admin_url: {get_input: keystone_identity_uri} + keystone::endpoint::region: {get_input: keystone_region} # MongoDB mongodb::server::bind_ip: {get_input: mongo_db_network} + mongodb::server::nojournal: {get_input: mongodb_no_journal} # MySQL admin_password: {get_input: admin_password} enable_galera: {get_input: enable_galera} enable_ceph_storage: {get_input: enable_ceph_storage} enable_swift_storage: {get_input: enable_swift_storage} mysql_innodb_buffer_pool_size: {get_input: mysql_innodb_buffer_pool_size} + mysql_max_connections: {get_input: mysql_max_connections} mysql::server::root_password: {get_input: mysql_root_password} mysql_cluster_name: {get_input: mysql_cluster_name} mysql_bind_host: {get_input: mysql_network} + mysql_virtual_ip: {get_input: mysql_virtual_ip} # Neutron neutron::bind_host: {get_input: neutron_api_network} neutron::rabbit_password: {get_input: rabbit_password} - neutron::rabbit_user: {get_input: rabbit_user} + neutron::rabbit_user: {get_input: rabbit_username} neutron::rabbit_use_ssl: {get_input: rabbit_client_use_ssl} neutron::rabbit_port: {get_input: rabbit_client_port} neutron::debug: {get_input: debug} @@ -929,10 +1170,16 @@ resources: neutron::agents::metadata::metadata_ip: {get_input: neutron_api_network} neutron_agent_mode: {get_input: neutron_agent_mode} neutron_router_distributed: {get_input: neutron_router_distributed} + neutron::core_plugin: {get_input: neutron_core_plugin} + neutron::service_plugins: {get_input: neutron_service_plugins} + neutron::plugins::ml2::type_drivers: {get_input: neutron_type_drivers} neutron_mechanism_drivers: {get_input: neutron_mechanism_drivers} neutron::server::allow_automatic_l3agent_failover: {get_input: neutron_allow_l3agent_failover} neutron::server::l3_ha: {get_input: neutron_l3_ha} + neutron::dhcp_agents_per_network: {get_input: neutron_dhcp_agents_per_network} neutron::plugins::ml2::network_vlan_ranges: {get_input: neutron_network_vlan_ranges} + neutron::plugins::ml2::tunnel_id_ranges: {get_input: neutron_tunnel_id_ranges} + neutron::plugins::ml2::vni_ranges: {get_input: neutron_vni_ranges} neutron_bridge_mappings: {get_input: neutron_bridge_mappings} neutron_public_interface: {get_input: neutron_public_interface} neutron_public_interface_raw_device: {get_input: neutron_public_interface_raw_device} @@ -945,6 +1192,7 @@ resources: neutron_dnsmasq_options: {get_input: neutron_dnsmasq_options} neutron_dsn: {get_input: neutron_dsn} neutron::agents::metadata::auth_url: {get_input: keystone_identity_uri} + neutron::db::mysql::password: {get_input: neutron_password} # Ceilometer ceilometer_backend: {get_input: ceilometer_backend} @@ -960,8 +1208,9 @@ resources: ceilometer::api::keystone_auth_uri: {get_input: keystone_auth_uri} ceilometer::api::keystone_identity_uri: {get_input: keystone_identity_uri} ceilometer::agent::auth::auth_password: {get_input: ceilometer_password} - ceilometer::agent::auth::auth_url: {get_input: keystone_auth_address} + ceilometer::agent::auth::auth_url: {get_input: keystone_auth_uri} ceilometer::agent::central::coordination_url: {get_input: ceilometer_coordination_url} + ceilometer::db::mysql::password: {get_input: ceilometer_password} snmpd_readonly_user_name: {get_input: snmpd_readonly_user_name} snmpd_readonly_user_password: {get_input: snmpd_readonly_user_password} @@ -980,12 +1229,14 @@ resources: nova::glance_api_servers: {get_input: glance_api_servers} nova::api::neutron_metadata_proxy_shared_secret: {get_input: neutron_metadata_proxy_shared_secret} nova::network::neutron::neutron_admin_password: {get_input: neutron_password} - nova::network::neutron::neutron_url: {get_input: neutron_url} + nova::network::neutron::neutron_url: {get_input: neutron_internal_url} nova::network::neutron::neutron_admin_auth_url: {get_input: neutron_admin_auth_url} nova::vncproxy::host: {get_input: nova_api_network} + nova::db::mysql::password: {get_input: nova_password} # Horizon apache::ip: {get_input: horizon_network} + horizon::allowed_hosts: {get_input: horizon_allowed_hosts} horizon::django_debug: {get_input: debug} horizon::secret_key: {get_input: horizon_secret} horizon::bind_address: {get_input: horizon_network} @@ -994,6 +1245,9 @@ resources: # Rabbit rabbitmq::node_ip_address: {get_input: rabbitmq_network} rabbitmq::erlang_cookie: {get_input: rabbit_cookie} + rabbitmq::file_limit: {get_input: rabbit_fd_limit} + rabbitmq::default_user: {get_input: rabbit_username} + rabbitmq::default_pass: {get_input: rabbit_password} # Redis redis::bind: {get_input: redis_network} redis_vip: {get_input: redis_vip} @@ -1005,7 +1259,24 @@ resources: public_virtual_interface: {get_input: public_virtual_interface} tripleo::loadbalancer::control_virtual_interface: {get_input: control_virtual_interface} tripleo::loadbalancer::public_virtual_interface: {get_input: public_virtual_interface} - enable_package_install: {get_input: enable_package_install} + tripleo::loadbalancer::haproxy_log_address: {get_input: haproxy_log_address} + tripleo::packages::enable_install: {get_input: enable_package_install} + tripleo::packages::enable_upgrade: {get_input: enable_package_upgrade} + + # Hook for site-specific additional pre-deployment config, e.g extra hieradata + ControllerExtraConfigPre: + depends_on: ControllerDeployment + type: OS::TripleO::ControllerExtraConfigPre + properties: + server: {get_resource: Controller} + + # Hook for site-specific additional pre-deployment config, + # applying to all nodes, e.g node registration/unregistration + NodeExtraConfig: + depends_on: ControllerExtraConfigPre + type: OS::TripleO::NodeExtraConfig + properties: + server: {get_resource: Controller} UpdateConfig: type: OS::TripleO::Tasks::PackageUpdate @@ -1055,7 +1326,7 @@ outputs: str_replace: template: IP HOST.localdomain HOST CLOUDNAME params: - IP: {get_attr: [Controller, networks, ctlplane, 0]} + IP: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, ControllerHostnameResolveNetwork]}]} HOST: {get_attr: [Controller, name]} CLOUDNAME: {get_param: CloudName} nova_server_resource: @@ -1078,4 +1349,9 @@ outputs: IP: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, MemcachedNetwork]}]} config_identifier: description: identifier which changes if the controller configuration may need re-applying - value: {get_attr: [ControllerDeployment, deploy_stdout]} + value: + list_join: + - ',' + - - {get_attr: [ControllerDeployment, deploy_stdout]} + - {get_attr: [ControllerExtraConfigPre, deploy_stdout]} + - {get_param: UpdateIdentifier} diff --git a/puppet/extraconfig/all_nodes/neutron-ml2-cisco-nexus-ucsm.yaml b/puppet/extraconfig/all_nodes/neutron-ml2-cisco-nexus-ucsm.yaml new file mode 100644 index 00000000..2413f5a4 --- /dev/null +++ b/puppet/extraconfig/all_nodes/neutron-ml2-cisco-nexus-ucsm.yaml @@ -0,0 +1,338 @@ +heat_template_version: 2015-04-30 + +description: Configure hieradata for Network Cisco configuration + +parameters: + # Parameters passed from the parent template + controller_servers: + type: json + compute_servers: + type: json + blockstorage_servers: + type: json + objectstorage_servers: + type: json + cephstorage_servers: + type: json + + # extra parameters passed via parameter_defaults + NetworkUCSMIp: + type: string + description: Cisco UCSM IP + default: 127.0.0.1 + NetworkUCSMUsername: + type: string + description: Cisco UCSM username + default: admin + NetworkUCSMPassword: + type: string + description: Cisco UCSM password + default: password + NetworkUCSMHostList: + type: string + description: > + Mac address to service profile mapping for UCSM-controlled hosts + The format is + '<host1-mac>:<profile>, <host2-mac>:<profile>, ...' + default: '' + NetworkUCSMSupportedPciDevs: + type: string + description: Cisco UCSM SR-IOV and VM-FEX vendors supported + default: '' + NetworkNexusConfig: + type: json + description: Nexus switch configuration + default: {} + NetworkNexusManagedPhysicalNetwork: + type: string + description: The name of the physical_network + default: '' + NetworkNexusVlanNamePrefix: + type: string + description: A short prefix to prepend to the VLAN name + default: 'q-' + NetworkNexusSviRoundRobin: + type: boolean + description: A flag to enable round robin scheduling + default: false + NetworkNexusProviderVlanNamePrefix: + type: string + description: A short prefix to prepend to the VLAN name + default: 'p-' + NetworkNexusPersistentSwitchConfig: + type: string + description: To make Nexus device persistent + default: false + NetworkNexusSwitchHeartbeatTime: + type: number + description: Time interval to check the state of the Nexus device + default: 0 + NetworkNexusSwitchReplayCount: + type: number + description: Number of times to attempt config replay + default: 3 + NetworkNexusProviderVlanAutoCreate: + type: boolean + description: A flag whether to manage the creation and removal of VLANs + default: true + NetworkNexusProviderVlanAutoTrunk: + type: boolean + description: A flag whether to manage the trunk ports on the Nexus + default: true + NetworkNexusVxlanGlobalConfig: + type: boolean + description: A flag whether to manage the VXLAN global settings + default: true + NetworkNexusHostKeyChecks: + type: boolean + description: enable strict host key checks when connecting to Nexus switches + default: false + NetworkNexusVxlanVniRanges: + type: string + description: VXLAN Network IDs that are available for tenant network + default: '' + NetworkNexusVxlanMcastRanges: + type: string + description: Multicast groups for the VXLAN interface. + default: '' + + +resources: + # First we lay down the base configuration via the static hieradata mappings + NetworkCiscoConfig: + type: OS::Heat::StructuredConfig + properties: + group: os-apply-config + config: + hiera: + datafiles: + neutron_cisco_data: + mapped_data: + neutron::plugins::ml2::cisco::ucsm::ucsm_ip: {get_input: UCSM_ip} + neutron::plugins::ml2::cisco::ucsm::ucsm_username: {get_input: UCSM_username} + neutron::plugins::ml2::cisco::ucsm::ucsm_password: {get_input: UCSM_password} + neutron::plugins::ml2::cisco::ucsm::ucsm_host_list: {get_input: UCSM_host_list} + neutron::plugins::ml2::cisco::ucsm::supported_pci_devs: {get_input: UCSMSupportedPciDevs} + neutron::plugins::ml2::cisco::nexus::nexus_config: {get_input: NexusConfig} + neutron::plugins::ml2::cisco::nexus::managed_physical_network: {get_input: NexusManagedPhysicalNetwork} + neutron::plugins::ml2::cisco::nexus::vlan_name_prefix: {get_input: NexusVlanNamePrefix} + neutron::plugins::ml2::cisco::nexus::svi_round_robin: {get_input: NexusSviRoundRobin} + neutron::plugins::ml2::cisco::nexus::provider_vlan_name_prefix: {get_input: NexusProviderVlanNamePrefix} + neutron::plugins::ml2::cisco::nexus::persistent_switch_config: {get_input: NexusPersistentSwitchConfig} + neutron::plugins::ml2::cisco::nexus::switch_heartbeat_time: {get_input: NexusSwitchHeartbeatTime} + neutron::plugins::ml2::cisco::nexus::switch_replay_count: {get_input: NexusSwitchReplayCount} + neutron::plugins::ml2::cisco::nexus::provider_vlan_auto_create: {get_input: NexusProviderVlanAutoCreate} + neutron::plugins::ml2::cisco::nexus::provider_vlan_auto_trunk: {get_input: NexusProviderVlanAutoTrunk} + neutron::plugins::ml2::cisco::nexus::vxlan_global_config: {get_input: NexusVxlanGlobalConfig} + neutron::plugins::ml2::cisco::nexus::host_key_checks: {get_input: NexusHostKeyChecks} + neutron::plugins::ml2::cisco::type_nexus_vxlan::vni_ranges: {get_input: NexusVxlanVniRanges} + neutron::plugins::ml2::cisco::type_nexus_vxlan::mcast_ranges: {get_input: NexusVxlanMcastRanges} + + NetworkCiscoDeployment: + type: OS::Heat::StructuredDeployments + properties: + config: {get_resource: NetworkCiscoConfig} + servers: {get_param: controller_servers} + input_values: + UCSM_ip: {get_param: NetworkUCSMIp} + UCSM_username: {get_param: NetworkUCSMUsername} + UCSM_password: {get_param: NetworkUCSMPassword} + UCSM_host_list: {get_attr: [MappingToUCSMDeploymentsController, deploy_stdout]} + UCSMSupportedPciDevs: {get_param: NetworkUCSMSupportedPciDevs} + NexusConfig: {get_attr: [MappingToNexusDeploymentsController, deploy_stdout]} + NexusManagedPhysicalNetwork: {get_param: NetworkNexusManagedPhysicalNetwork} + NexusVlanNamePrefix: {get_param: NetworkNexusVlanNamePrefix} + NexusSviRoundRobin: {get_param: NetworkNexusSviRoundRobin} + NexusProviderVlanNamePrefix: {get_param: NetworkNexusProviderVlanNamePrefix} + NexusPersistentSwitchConfig: {get_param: NetworkNexusPersistentSwitchConfig} + NexusSwitchHeartbeatTime: {get_param: NetworkNexusSwitchHeartbeatTime} + NexusSwitchReplayCount: {get_param: NetworkNexusSwitchReplayCount} + NexusProviderVlanAutoCreate: {get_param: NetworkNexusProviderVlanAutoCreate} + NexusProviderVlanAutoTrunk: {get_param: NetworkNexusProviderVlanAutoTrunk} + NexusVxlanGlobalConfig: {get_param: NetworkNexusVxlanGlobalConfig} + NexusHostKeyChecks: {get_param: NetworkNexusHostKeyChecks} + NexusVxlanVniRanges: {get_param: NetworkNexusVxlanVniRanges} + NexusVxlanMcastRanges: {get_param: NetworkNexusVxlanMcastRanges} + + # Now we collect the Mac->Hostname mappings for all nodes, which enables + # calculation of the neutron::plugins::ml2::cisco::nexus::nexus_config data + CollectMacConfig: + type: OS::Heat::SoftwareConfig + properties: + group: script + config: | + #!/bin/sh + MACS=$(ifconfig | grep ether | awk '{print $2}' | tr "\n" " ") + HOST_FQDN=$(hostname -f) + if [ -z "$HOST_FQDN" ]; then + HOSTNAME=$(hostname -s) + # hardcoding the domain name to avoid DNS lookup dependency + # same type of hardcoding appears elsewhere + # --ie. controller-puppet.yaml + # FIXME_HOSTNAME_DOMAIN_HARDCODE + echo "$HOSTNAME.localdomain $MACS" + else + echo "$HOST_FQDN $MACS" + fi + + CollectMacDeploymentsController: + type: OS::Heat::SoftwareDeployments + properties: + servers: {get_param: controller_servers} + config: {get_resource: CollectMacConfig} + actions: ['CREATE'] # Only do this on CREATE + + CollectMacDeploymentsCompute: + type: OS::Heat::SoftwareDeployments + properties: + servers: {get_param: compute_servers} + config: {get_resource: CollectMacConfig} + actions: ['CREATE'] # Only do this on CREATE + + CollectMacDeploymentsBlockStorage: + type: OS::Heat::SoftwareDeployments + properties: + servers: {get_param: blockstorage_servers} + config: {get_resource: CollectMacConfig} + actions: ['CREATE'] # Only do this on CREATE + + CollectMacDeploymentsObjectStorage: + type: OS::Heat::SoftwareDeployments + properties: + servers: {get_param: objectstorage_servers} + config: {get_resource: CollectMacConfig} + actions: ['CREATE'] # Only do this on CREATE + + CollectMacDeploymentsCephStorage: + type: OS::Heat::SoftwareDeployments + properties: + servers: {get_param: cephstorage_servers} + config: {get_resource: CollectMacConfig} + actions: ['CREATE'] # Only do this on CREATE + + # Now we calculate the additional nexus config based on the mappings + MappingToNexusConfig: + type: OS::Heat::SoftwareConfig + properties: + group: script + inputs: + - name: controller_mappings + - name: compute_mappings + - name: blockstorage_mappings + - name: objectstorage_mappings + - name: cephstorage_mappings + - name: nexus_config + config: | + #!/bin/python + import ast + import json + import os + from copy import deepcopy + + mappings = ['controller_mappings', + 'compute_mappings', + 'blockstorage_mappings', + 'objectstorage_mappings', + 'cephstorage_mappings', + 'nexus_config'] + mapdict_list = [] + nexus = {} + for map_name in mappings: + f_name = '/root/' + map_name + map_data = os.getenv(map_name, "Nada") + with open(f_name, 'a') as f: + f.write(map_data) + if map_data is not "Nada": + if map_name is not 'nexus_config': + mapdict_list.append(ast.literal_eval(map_data)) + else: + nexus = ast.literal_eval(map_data) + + mac2host = {} + for mapdict in mapdict_list: + for (listnum, host2mac_list) in mapdict.iteritems(): + vals = host2mac_list.rstrip().split() + for mac in vals[1:]: + mac2host[mac.lower()] = vals[0] + + with open('/root/mac2host', 'a') as f: + f.write(str(mac2host)) + + # now we have mac to host, map host to switchport in hieradata + # nexus = ast.literal_eval(os.getenv('nexus_config', None)) + nexus_cp = deepcopy(nexus) + for nexus_switch in nexus: + for (mac,swport) in nexus[nexus_switch]['servers'].iteritems(): + lmac=mac.lower() + if lmac in mac2host: + hostname = mac2host[lmac] + # for puppet we need a unique title even at the 2nd key level + serv_key = nexus_switch + "::" + hostname + if serv_key in nexus_cp[nexus_switch]['servers']: + nexus_cp[nexus_switch]['servers'][serv_key]['ports'] += ',' + swport['ports'] + else: + nexus_cp[nexus_switch]['servers'][serv_key] = swport + nexus_cp[nexus_switch]['servers'][serv_key]['hostname'] = hostname + del nexus_cp[nexus_switch]['servers'][mac] + # Note this echo means you can view the data via heat deployment-show + print json.dumps(nexus_cp) + + MappingToNexusDeploymentsController: + type: OS::Heat::SoftwareDeployment + properties: + server: {get_param: [controller_servers, '0']} + config: {get_resource: MappingToNexusConfig} + input_values: + # FIXME(shardy): It'd be more convenient if we could join these + # items together but because the returned format is a map (not a list) + # we can't use list_join or str_replace. Possible Heat TODO. + controller_mappings: {get_attr: [CollectMacDeploymentsController, deploy_stdouts]} + compute_mappings: {get_attr: [CollectMacDeploymentsCompute, deploy_stdouts]} + blockstorage_mappings: {get_attr: [CollectMacDeploymentsBlockStorage, deploy_stdouts]} + objectstorage_mappings: {get_attr: [CollectMacDeploymentsObjectStorage, deploy_stdouts]} + cephstorage_mappings: {get_attr: [CollectMacDeploymentsCephStorage, deploy_stdouts]} + nexus_config: {get_param: NetworkNexusConfig} + actions: ['CREATE'] # Only do this on CREATE + + MappingToUCSMConfig: + type: OS::Heat::SoftwareConfig + properties: + group: script + inputs: + - name: ucsm_config + config: | + #!/bin/python + import ast + import os + with open('/root/mac2host', 'r') as f: + s=f.read() + m2h=ast.literal_eval(s) + ucs_config = os.getenv('ucsm_config', "Nada") + ucs_data = [] + lines = ucs_config.split(',') + for line in lines: + entry=line.rsplit(":",1) + mac = entry[0].lower().strip() + if mac in m2h: + ucs_data.append(m2h[mac] + ":" + entry[1]) + + print ", ".join(ucs_data) + + + MappingToUCSMDeploymentsController: + type: OS::Heat::SoftwareDeployment + depends_on: MappingToNexusDeploymentsController + properties: + server: {get_param: [controller_servers, '0']} + config: {get_resource: MappingToUCSMConfig} + input_values: + ucsm_config: {get_param: NetworkUCSMHostList} + actions: ['CREATE'] # Only do this on CREATE + +outputs: + # The Deployment applying the hieradata outputs the derived config-id, which + # changes if the input_values change, so if the stdouts from + # NetworkCiscoDeployment change, we need to reapply puppet (which will + # happen if we return a different config_identifier) + config_identifier: + value: {get_attr: [NetworkCiscoDeployment, deploy_stdouts]} diff --git a/puppet/extraconfig/ceph/ceph-external-config.yaml b/puppet/extraconfig/ceph/ceph-external-config.yaml new file mode 100644 index 00000000..7cefc24b --- /dev/null +++ b/puppet/extraconfig/ceph/ceph-external-config.yaml @@ -0,0 +1,95 @@ +heat_template_version: 2015-04-30 +description: 'Configure parameters for an external Ceph cluster via Puppet.' + +parameters: + ceph_storage_count: + default: 0 + type: number + description: Number of Ceph storage nodes. Used to enable/disable managed Ceph installation. + ceph_external_mon_ips: + default: '' + type: string + description: List of external Ceph Mon host IPs. + ceph_client_key: + default: '' + type: string + description: Ceph key used to create the 'openstack' user keyring. + ceph_fsid: + default: '' + type: string + # The following parameters are unused for external Ceph clusters and + # are here and exist for compatibility + ceph_admin_key: + default: '' + type: string + ceph_mon_key: + default: '' + type: string + ceph_mon_names: + type: comma_delimited_list + ceph_mon_ips: + type: comma_delimited_list + NovaRbdPoolName: + default: vms + type: string + CinderRbdPoolName: + default: volumes + type: string + GlanceRbdPoolName: + default: images + type: string + CephClientUserName: + default: openstack + type: string + +resources: + CephClusterConfigImpl: + type: OS::Heat::StructuredConfig + properties: + group: os-apply-config + config: + hiera: + datafiles: + ceph_cluster: + mapped_data: + ceph_storage_count: {get_param: ceph_storage_count} + enable_external_ceph: true + ceph::profile::params::mon_host: {get_param: ceph_external_mon_ips} + ceph::profile::params::fsid: {get_param: ceph_fsid} + ceph::profile::params::client_keys: + str_replace: + template: "{ + client.CLIENT_USER: { + secret: 'CLIENT_KEY', + mode: '0644', + cap_mon: 'allow r', + cap_osd: 'allow class-read object_prefix rbd_children, allow rwx pool=CINDER_POOL, allow rwx pool=NOVA_POOL, allow rwx pool=GLANCE_POOL' + } + }" + params: + CLIENT_USER: {get_param: CephClientUserName} + CLIENT_KEY: {get_param: ceph_client_key} + NOVA_POOL: {get_param: NovaRbdPoolName} + CINDER_POOL: {get_param: CinderRbdPoolName} + GLANCE_POOL: {get_param: GlanceRbdPoolName} + nova::compute::rbd::libvirt_images_rbd_pool: {get_param: NovaRbdPoolName} + cinder_rbd_pool_name: {get_param: CinderRbdPoolName} + glance::backend::rbd::rbd_store_pool: {get_param: GlanceRbdPoolName} + nova::compute::rbd::libvirt_rbd_user: {get_param: CephClientUserName} + glance::backend::rbd::rbd_store_pool: {get_param: CephClientUserName} + nova::compute::rbd::rbd_keyring: + list_join: + - '.' + - - 'client' + - {get_param: CephClientUserName} + ceph_client_user_name: {get_param: CephClientUserName} + ceph_pools: + - {get_param: CinderRbdPoolName} + - {get_param: NovaRbdPoolName} + - {get_param: GlanceRbdPoolName} + +outputs: + config_id: + description: The ID of the CephClusterConfigImpl resource. + value: + {get_resource: CephClusterConfigImpl} diff --git a/puppet/extraconfig/pre_deploy/README b/puppet/extraconfig/pre_deploy/README new file mode 100644 index 00000000..51fc3406 --- /dev/null +++ b/puppet/extraconfig/pre_deploy/README @@ -0,0 +1,12 @@ +This tree contains additional configuration which happens "pre deployment", +e.g before the OpenStack services themselves are configured but after the +nodes themselves have been provisioned and initially configured. + +Typically for puppet deployments these additional configs will put in place +hieradata which is then consumed by the subsequent puppet configuration +which occurs during the post-deployment phase. + +If you need to specify multiple configs, you can chain them together in a +template, see the multiple.yaml example: + + OS::TripleO::ControllerExtraConfigPre: puppet/extraconfig/pre_deploy/controller/multiple.yaml diff --git a/puppet/extraconfig/pre_deploy/controller/cinder-netapp.yaml b/puppet/extraconfig/pre_deploy/controller/cinder-netapp.yaml new file mode 100644 index 00000000..7ec2190f --- /dev/null +++ b/puppet/extraconfig/pre_deploy/controller/cinder-netapp.yaml @@ -0,0 +1,147 @@ +heat_template_version: 2015-04-30 + +description: Configure hieradata for Cinder Netapp configuration + +parameters: + server: + description: ID of the controller node to apply this config to + type: string + + # Config specific parameters, to be provided via parameter_defaults + CinderEnableNetappBackend: + type: boolean + default: true + CinderNetappBackendName: + type: string + default: 'tripleo_netapp' + CinderNetappLogin: + type: string + CinderNetappPassword: + type: string + hidden: true + CinderNetappServerHostname: + type: string + CinderNetappServerPort: + type: string + default: '80' + CinderNetappSizeMultiplier: + type: string + default: '1.2' + CinderNetappStorageFamily: + type: string + default: 'ontap_cluster' + CinderNetappStorageProtocol: + type: string + default: 'nfs' + CinderNetappTransportType: + type: string + default: 'http' + CinderNetappVfiler: + type: string + default: '' + CinderNetappVolumeList: + type: string + default: '' + CinderNetappVserver: + type: string + default: '' + CinderNetappPartnerBackendName: + type: string + default: '' + CinderNetappNfsShares: + type: string + default: '' + CinderNetappNfsSharesConfig: + type: string + default: '/etc/cinder/shares.conf' + CinderNetappNfsMountOptions: + type: string + default: '' + CinderNetappCopyOffloadToolPath: + type: string + default: '' + CinderNetappControllerIps: + type: string + default: '' + CinderNetappSaPassword: + type: string + default: '' + hidden: true + CinderNetappStoragePools: + type: string + default: '' + CinderNetappEseriesHostType: + type: string + default: 'linux_dm_mp' + CinderNetappWebservicePath: + type: string + default: '/devmgr/v2' + +resources: + CinderNetappConfig: + type: OS::Heat::StructuredConfig + properties: + group: os-apply-config + config: + hiera: + datafiles: + cinder_netapp_data: + mapped_data: + cinder_enable_netapp_backend: {get_input: EnableNetappBackend} + cinder::backend::netapp::title: {get_input: NetappBackendName} + cinder::backend::netapp::netapp_login: {get_input: NetappLogin} + cinder::backend::netapp::netapp_password: {get_input: NetappPassword} + cinder::backend::netapp::netapp_server_hostname: {get_input: NetappServerHostname} + cinder::backend::netapp::netapp_server_port: {get_input: NetappServerPort} + cinder::backend::netapp::netapp_size_multiplier: {get_input: NetappSizeMultiplier} + cinder::backend::netapp::netapp_storage_family: {get_input: NetappStorageFamily} + cinder::backend::netapp::netapp_storage_protocol: {get_input: NetappStorageProtocol} + cinder::backend::netapp::netapp_transport_type: {get_input: NetappTransportType} + cinder::backend::netapp::netapp_vfiler: {get_input: NetappVfiler} + cinder::backend::netapp::netapp_volume_list: {get_input: NetappVolumeList} + cinder::backend::netapp::netapp_vserver: {get_input: NetappVserver} + cinder::backend::netapp::netapp_partner_backend_name: {get_input: NetappPartnerBackendName} + cinder::backend::netapp::nfs_shares: {get_input: NetappNfsShares} + cinder::backend::netapp::nfs_shares_config: {get_input: NetappNfsSharesConfig} + cinder::backend::netapp::nfs_mount_options: {get_input: NetappNfsMountOptions} + cinder::backend::netapp::netapp_copyoffload_tool_path: {get_input: NetappCopyOffloadToolPath} + cinder::backend::netapp::netapp_controller_ips: {get_input: NetappControllerIps} + cinder::backend::netapp::netapp_sa_password: {get_input: NetappSaPassword} + cinder::backend::netapp::netapp_storage_pools: {get_input: NetappStoragePools} + cinder::backend::netapp::netapp_eseries_host_type: {get_input: NetappEseriesHostType} + cinder::backend::netapp::netapp_webservice_path: {get_input: NetappWebservicePath} + + CinderNetappDeployment: + type: OS::Heat::StructuredDeployment + properties: + config: {get_resource: CinderNetappConfig} + server: {get_param: server} + input_values: + EnableNetappBackend: {get_param: CinderEnableNetappBackend} + NetappBackendName: {get_param: CinderNetappBackendName} + NetappLogin: {get_param: CinderNetappLogin} + NetappPassword: {get_param: CinderNetappPassword} + NetappServerHostname: {get_param: CinderNetappServerHostname} + NetappServerPort: {get_param: CinderNetappServerPort} + NetappSizeMultiplier: {get_param: CinderNetappSizeMultiplier} + NetappStorageFamily: {get_param: CinderNetappStorageFamily} + NetappStorageProtocol: {get_param: CinderNetappStorageProtocol} + NetappTransportType: {get_param: CinderNetappTransportType} + NetappVfiler: {get_param: CinderNetappVfiler} + NetappVolumeList: {get_param: CinderNetappVolumeList} + NetappVserver: {get_param: CinderNetappVserver} + NetappPartnerBackendName: {get_param: CinderNetappPartnerBackendName} + NetappNfsShares: {get_param: CinderNetappNfsShares} + NetappNfsSharesConfig: {get_param: CinderNetappNfsSharesConfig} + NetappNfsMountOptions: {get_param: CinderNetappNfsMountOptions} + NetappCopyOffloadToolPath: {get_param: CinderNetappCopyOffloadToolPath} + NetappControllerIps: {get_param: CinderNetappControllerIps} + NetappSaPassword: {get_param: CinderNetappSaPassword} + NetappStoragePools: {get_param: CinderNetappStoragePools} + NetappEseriesHostType: {get_param: CinderNetappEseriesHostType} + NetappWebservicePath: {get_param: CinderNetappWebservicePath} + +outputs: + deploy_stdout: + description: Deployment reference, used to trigger puppet apply on changes + value: {get_attr: [CinderNetappDeployment, deploy_stdout]} diff --git a/puppet/extraconfig/pre_deploy/controller/multiple.yaml b/puppet/extraconfig/pre_deploy/controller/multiple.yaml new file mode 100644 index 00000000..f949a397 --- /dev/null +++ b/puppet/extraconfig/pre_deploy/controller/multiple.yaml @@ -0,0 +1,18 @@ +heat_template_version: 2014-10-16 +description: 'Extra Pre-Deployment Config, multiple' +parameters: + server: + type: string + +resources: + + CinderNetappConfig: + type: cinder-netapp.yaml + properties: + server: {get_param: server} + + # Note depends_on may be used for serialization if ordering is important + OtherConfig: + type: other.yaml + properties: + server: {get_param: server} diff --git a/puppet/extraconfig/pre_deploy/controller/neutron-ml2-bigswitch.yaml b/puppet/extraconfig/pre_deploy/controller/neutron-ml2-bigswitch.yaml new file mode 100644 index 00000000..bf06d25d --- /dev/null +++ b/puppet/extraconfig/pre_deploy/controller/neutron-ml2-bigswitch.yaml @@ -0,0 +1,73 @@ +heat_template_version: 2015-04-30 + +description: Configure hieradata for Neutron Big Switch configuration + +parameters: + server: + description: ID of the controller node to apply this config to + type: string + NeutronBigswitchRestproxyServers: + description: 'Big Switch controllers ("IP:port,IP:port")' + type: string + NeutronBigswitchRestproxyServerAuth: + description: 'Big Switch controller credentials ("username:password")' + type: string + NeutronBigswitchRestproxyAutoSyncOnFailure: + description: Resynchronize with the new master server on Big Switch failover. + type: boolean + default: true + NeutronBigswitchRestproxyConsistencyInterval: + description: Keepalive message interval (from Neutron to Big Switch controller). + type: number + default: 60 + NeutronBigswitchRestproxyNeutronId: + description: Unique identifier of the Neutron instance for the Big Switch controller. + type: string + default: 'neutron' + NeutronBigswitchRestproxyServerSsl: + description: Whether Neutron should use SSL to talk to the Big Switch controllers. + type: boolean + default: true + NeutronBigswitchRestproxySslCertDirectory: + description: Directory where Big Switch controller certificate will be stored. + type: string + default: '/var/lib/neutron' + + +resources: + NeutronBigswitchConfig: + type: OS::Heat::StructuredConfig + properties: + group: os-apply-config + config: + hiera: + datafiles: + neutron_bigswitch_data: + mapped_data: + neutron_enable_bigswitch_ml2: true + neutron::plugins::ml2::bigswitch::restproxy::servers: {get_input: restproxy_servers} + neutron::plugins::ml2::bigswitch::restproxy::server_auth: {get_input: restproxy_server_auth} + neutron::plugins::ml2::bigswitch::restproxy::auto_sync_on_failure: {get_input: restproxy_auto_sync_on_failure} + neutron::plugins::ml2::bigswitch::restproxy::consistency_interval: {get_input: restproxy_consistency_interval} + neutron::plugins::ml2::bigswitch::restproxy::neutron_id: {get_input: restproxy_neutron_id} + neutron::plugins::ml2::bigswitch::restproxy::server_ssl: {get_input: restproxy_server_ssl} + neutron::plugins::ml2::bigswitch::restproxy::ssl_cert_directory: {get_input: restproxy_ssl_cert_directory} + + NeutronBigswitchDeployment: + type: OS::Heat::StructuredDeployment + properties: + config: {get_resource: NeutronBigswitchConfig} + server: {get_param: server} + input_values: + restproxy_servers: {get_param: NeutronBigswitchRestproxyServers} + restproxy_server_auth: {get_param: NeutronBigswitchRestproxyServerAuth } + restproxy_auto_sync_on_failure: {get_param: NeutronBigswitchRestproxyAutoSyncOnFailure} + restproxy_consistency_interval: {get_param: NeutronBigswitchRestproxyConsistencyInterval} + restproxy_neutron_id: {get_param: NeutronBigswitchRestproxyNeutronId} + restproxy_server_ssl: {get_param: NeutronBigswitchRestproxyServerSsl} + restproxy_ssl_cert_directory: {get_param: NeutronBigswitchRestproxySslCertDirectory} + +outputs: + deploy_stdout: + description: Deployment reference, used to trigger puppet apply on changes + value: {get_attr: [NeutronBigswitchDeployment, deploy_stdout]} diff --git a/puppet/extraconfig/pre_deploy/controller/neutron-ml2-cisco-n1kv.yaml b/puppet/extraconfig/pre_deploy/controller/neutron-ml2-cisco-n1kv.yaml new file mode 100644 index 00000000..6730ddf1 --- /dev/null +++ b/puppet/extraconfig/pre_deploy/controller/neutron-ml2-cisco-n1kv.yaml @@ -0,0 +1,179 @@ +heat_template_version: 2015-04-30 + +description: Configure hieradata for Cisco N1KV configuration + +parameters: + server: + description: ID of the controller node to apply this config to + type: string + + # Config specific parameters, to be provided via parameter_defaults + N1000vVSMIP: + type: string + default: '192.0.2.50' + N1000vVSMDomainID: + type: number + default: 100 + N1000vVSMIPV6: + type: string + default: '::1' + N1000vVEMHostMgmtIntf: + type: string + default: 'br-ex' + N1000vUplinkProfile: + type: string + default: '{eth1: system-uplink,}' + N1000vVtepConfig: + type: string + default: '{}' + N1000vVEMSource: + type: string + default: '' + N1000vVEMVersion: + type: string + default: '' + N1000vPortDB: + type: string + default: 'ovs' + N1000vVtepsInSameSub: + type: boolean + default: false + N1000vVEMFastpathFlood: + type: string + default: 'enable' +#VSM Puppet Parameter + N1000vVSMSource: + type: string + default: '' + N1000vVSMVersion: + type: string + default: 'latest' + N1000vVSMHostMgmtIntf: + type: string + default: 'br-ex' + N1000vVSMRole: + type: string + default: 'primary' + N1000vVSMPassword: + type: string + default: 'Password' + N1000vMgmtNetmask: + type: string + default: '255.255.255.0' + N1000vMgmtGatewayIP: + type: string + default: '192.0.2.1' + N1000vPacemakerControl: + type: boolean + default: true + N1000vExistingBridge: + type: boolean + default: true + N1000vVSMHostMgmtIntfVlan: + type: number + default: 0 +#Plugin Parameters + N1000vVSMUser: + type: string + default: 'admin' + N1000vPollDuration: + type: number + default: 60 + N1000vHttpPoolSize: + type: number + default: 5 + N1000vHttpTimeout: + type: number + default: 15 + N1000vSyncInterval: + type: number + default: 300 + N1000vMaxVSMRetries: + type: number + default: 2 + +resources: + CiscoN1kvConfig: + type: OS::Heat::StructuredConfig + properties: + group: os-apply-config + config: + hiera: + datafiles: + cisco_n1kv_data: + mapped_data: + #enable_cisco_n1kv: {get_input: EnableCiscoN1kv} + # VEM Parameters + n1kv_vem_source: {get_input: n1kv_vem_source} + n1kv_vem_version: {get_input: n1kv_vem_version} + neutron::agents::n1kv_vem::n1kv_vsm_ip: {get_input: n1kv_vsm_ip} + neutron::agents::n1kv_vem::n1kv_vsm_domain_id: {get_input: n1kv_vsm_domain_id} + neutron::agents::n1kv_vem::n1kv_vsm_ip_v6: {get_input: n1kv_vsm_ip_v6} + neutron::agents::n1kv_vem::host_mgmt_intf: {get_input: n1kv_vem_host_mgmt_intf} + neutron::agents::n1kv_vem::uplink_profile: {get_input: n1kv_vem_uplink_profile} + neutron::agents::n1kv_vem::vtep_config: {get_input: n1kv_vem_vtep_config} + neutron::agents::n1kv_vem::portdb: {get_input: n1kv_vem_portdb} + neutron::agents::n1kv_vem::vteps_in_same_subnet: {get_input: n1kv_vem_vteps_in_same_subnet} + neutron::agents::n1kv_vem::fastpath_flood: {get_input: n1kv_vem_fastpath_flood} + #VSM Parameter + n1kv_vsm_source: {get_input: n1kv_vsm_source} + n1kv_vsm_version: {get_input: n1kv_vsm_version} + n1k_vsm::phy_if_bridge: {get_input: n1kv_vsm_host_mgmt_intf} + n1k_vsm::vsm_role: {get_input: n1kv_vsm_role} + n1k_vsm::pacemaker_control: {get_input: n1kv_vsm_pacemaker_ctrl} + n1k_vsm::existing_bridge: {get_input: n1kv_vsm_existing_br} + n1k_vsm::vsm_admin_passwd: {get_input: n1kv_vsm_password} + n1k_vsm::vsm_domain_id: {get_input: n1kv_vsm_domain_id} + n1k_vsm::vsm_mgmt_ip: {get_input: n1kv_vsm_ip} + n1k_vsm::vsm_mgmt_netmask: {get_input: n1kv_vsm_mgmt_netmask} + n1k_vsm::vsm_mgmt_gateway: {get_input: n1kv_vsm_gateway_ip} + n1k_vsm::phy_gateway: {get_input: n1kv_vsm_gateway_ip} + n1k_vsm::phy_bridge_vlan: {get_input: n1kv_phy_brige_vlan} + # Cisco N1KV driver Parameters + neutron::plugins::ml2::cisco::nexus1000v::n1kv_vsm_ip: {get_input: n1kv_vsm_ip} + neutron::plugins::ml2::cisco::nexus1000v::n1kv_vsm_username: {get_input: n1kv_vsm_username} + neutron::plugins::ml2::cisco::nexus1000v::n1kv_vsm_password: {get_input: n1kv_vsm_password} + neutron::plugins::ml2::cisco::nexus1000v::poll_duration: {get_input: n1kv_vsm_poll_duration} + neutron::plugins::ml2::cisco::nexus1000v::http_pool_size: {get_input: n1kv_vsm_http_pool_size} + neutron::plugins::ml2::cisco::nexus1000v::http_timeout: {get_input: n1kv_vsm_http_timeout} + neutron::plugins::ml2::cisco::nexus1000v::n1kv_vsm_sync_interval: {get_input: n1kv_vsm_sync_interval} + neutron::plugins::ml2::cisco::nexus1000v::max_vsm_retries: {get_input: n1kv_max_vsm_retries} + + CiscoN1kvDeployment: + type: OS::Heat::StructuredDeployment + properties: + config: {get_resource: CiscoN1kvConfig} + server: {get_param: server} + input_values: + n1kv_vsm_ip: {get_param: N1000vVSMIP} + n1kv_vsm_domain_id: {get_param: N1000vVSMDomainID} + n1kv_vsm_ip_v6: {get_param: N1000vVSMIPV6} + n1kv_vem_host_mgmt_intf: {get_param: N1000vVEMHostMgmtIntf} + n1kv_vem_uplink_profile: {get_param: N1000vUplinkProfile} + n1kv_vem_vtep_config: {get_param: N1000vVtepConfig} + n1kv_vem_source: {get_param: N1000vVEMSource} + n1kv_vem_version: {get_param: N1000vVEMVersion} + n1kv_vem_portdb: {get_param: N1000vPortDB} + n1kv_vem_vteps_in_same_subnet: {get_param: N1000vVtepsInSameSub} + n1kv_vem_fastpath_flood: {get_param: N1000vVEMFastpathFlood} + n1kv_vsm_source: {get_param: N1000vVSMSource} + n1kv_vsm_version: {get_param: N1000vVSMVersion} + n1kv_vsm_host_mgmt_intf: {get_param: N1000vVSMHostMgmtIntf} + n1kv_vsm_role: {get_param: N1000vVSMRole} + n1kv_vsm_password: {get_param: N1000vVSMPassword} + n1kv_vsm_mgmt_netmask: {get_param: N1000vMgmtNetmask} + n1kv_vsm_gateway_ip: {get_param: N1000vMgmtGatewayIP} + n1kv_phy_brige_vlan: {get_param: N1000vVSMHostMgmtIntfVlan} + n1kv_vsm_pacemaker_ctrl: {get_param: N1000vPacemakerControl} + n1kv_vsm_existing_br: {get_param: N1000vExistingBridge} + n1kv_vsm_username: {get_param: N1000vVSMUser} + n1kv_vsm_poll_duration: {get_param: N1000vPollDuration} + n1kv_vsm_http_pool_size: {get_param: N1000vHttpPoolSize} + n1kv_vsm_http_timeout: {get_param: N1000vHttpTimeout} + n1kv_vsm_sync_interval: {get_param: N1000vSyncInterval} + n1kv_max_vsm_retries: {get_param: N1000vMaxVSMRetries} + +outputs: + deploy_stdout: + description: Deployment reference, used to trigger puppet apply on changes + value: {get_attr: [CiscoN1kvDeployment, deploy_stdout]} diff --git a/puppet/extraconfig/pre_deploy/default.yaml b/puppet/extraconfig/pre_deploy/default.yaml new file mode 100644 index 00000000..dcbc6811 --- /dev/null +++ b/puppet/extraconfig/pre_deploy/default.yaml @@ -0,0 +1,8 @@ +heat_template_version: 2014-10-16 +description: 'Noop Extra Pre-Deployment Config' +parameters: + server: + type: string +outputs: + deploy_stdout: + value: "None" diff --git a/puppet/extraconfig/pre_deploy/per_node.yaml b/puppet/extraconfig/pre_deploy/per_node.yaml new file mode 100644 index 00000000..80c8ad6e --- /dev/null +++ b/puppet/extraconfig/pre_deploy/per_node.yaml @@ -0,0 +1,56 @@ +heat_template_version: 2015-04-30 + +description: Configure hieradata overrides for specific nodes + +parameters: + server: + description: ID of the controller node to apply this config to + type: string + + # Config specific parameters, to be provided via parameter_defaults + # This would be a lookup of the node UUID as provided by dmidecode + # to the json required for the node-specific hieradata + # Note this needs to be a json blob e.g: + # parameter_defaults: + # NodeDataLookup: | + # {"AB4114B1-9C9D-409A-BEFB-D88C151BF2C3": {"foo": "bar"}, + # "8CF1A7EA-7B4B-4433-AC83-17675514B1B8": {"foo2": "bar2"}} + NodeDataLookup: + type: string + default: '' + description: json string containing per-node configuration map + +resources: + NodeSpecificConfig: + type: OS::Heat::SoftwareConfig + properties: + group: script + inputs: + - name: node_lookup + config: | + #!/bin/sh + node_id=$(dmidecode --s system-uuid) + + # Create a /etc/puppet/hieradata/UUID.json file to provide + # the data of the NodeDataLookup parameter that matches the + # system UUID + echo $node_lookup | python -c " + import json + import sys + input = sys.stdin.readline() or '{}' + cnt = json.loads(input) + print json.dumps(cnt.get('${node_id}', {})) + " > /etc/puppet/hieradata/${node_id}.json + + NodeSpecificDeployment: + type: OS::Heat::SoftwareDeployment + properties: + config: {get_resource: NodeSpecificConfig} + server: {get_param: server} + input_values: + node_lookup: {get_param: NodeDataLookup} + +outputs: + deploy_stdout: + description: Deployment reference, used to trigger puppet apply on changes + value: {get_attr: [NodeSpecificDeployment, deploy_stdout]} diff --git a/puppet/hieradata/ceph.yaml b/puppet/hieradata/ceph.yaml index 280457df..ca6d3954 100644 --- a/puppet/hieradata/ceph.yaml +++ b/puppet/hieradata/ceph.yaml @@ -7,7 +7,6 @@ ceph::profile::params::osds: {/srv/data: {}} ceph::profile::params::manage_repo: false ceph::profile::params::authentication_type: cephx -ceph_pools: - - volumes - - vms - - images +ceph_classes: [] + +ceph_osd_selinux_permissive: true diff --git a/puppet/hieradata/common.yaml b/puppet/hieradata/common.yaml index 40c44aef..030f661d 100644 --- a/puppet/hieradata/common.yaml +++ b/puppet/hieradata/common.yaml @@ -13,15 +13,7 @@ nova::network::neutron::vif_plugging_is_fatal: false nova::network::neutron::vif_plugging_timeout: 30 nova::network::neutron::dhcp_domain: '' -neutron::plugins::ml2::tunnel_id_ranges: - - '1:1000' -neutron::plugins::ml2::vni_ranges: - - '1:1000' -neutron::plugins::ml2::type_drivers: - - flat - - gre - - vxlan - - vlan +neutron::allow_overlapping_ips: true sysctl_settings: net.ipv4.tcp_keepalive_intvl: @@ -29,4 +21,13 @@ sysctl_settings: net.ipv4.tcp_keepalive_probes: value: 5 net.ipv4.tcp_keepalive_time: - value: 5
\ No newline at end of file + value: 5 + +nova::rabbit_heartbeat_timeout_threshold: 60 +neutron::rabbit_heartbeat_timeout_threshold: 60 +cinder::rabbit_heartbeat_timeout_threshold: 60 +ceilometer::rabbit_heartbeat_timeout_threshold: 60 +heat::rabbit_heartbeat_timeout_threshold: 60 +keystone::rabbit_heartbeat_timeout_threshold: 60 + +nova::cinder_catalog_info: 'volumev2:cinderv2:internalURL' diff --git a/puppet/hieradata/compute.yaml b/puppet/hieradata/compute.yaml index 4915d3c8..173020f8 100644 --- a/puppet/hieradata/compute.yaml +++ b/puppet/hieradata/compute.yaml @@ -10,9 +10,9 @@ nova::compute::vnc_enabled: true nova::compute::libvirt::vncserver_listen: '0.0.0.0' nova::compute::libvirt::migration_support: true -nova::compute::rbd::libvirt_rbd_user: 'openstack' -nova::compute::rbd::rbd_keyring: 'client.openstack' -nova::compute::rbd::libvirt_images_rbd_pool: 'vms' nova::compute::rbd::libvirt_rbd_secret_uuid: "%{hiera('ceph::profile::params::fsid')}" ceilometer::agent::auth::auth_tenant_name: 'service' +ceilometer::agent::auth::auth_endpoint_type: 'internalURL' + +compute_classes: [] diff --git a/puppet/hieradata/controller.yaml b/puppet/hieradata/controller.yaml index 3de9bd91..a4914c0e 100644 --- a/puppet/hieradata/controller.yaml +++ b/puppet/hieradata/controller.yaml @@ -22,6 +22,7 @@ rabbitmq_config_variables: cluster_partition_handling: 'pause_minority' mongodb::server::replset: tripleo +mongodb::server::journal: false redis::port: 6379 redis::sentinel::master_name: "%{hiera('bootstrap_nodeid')}" @@ -39,6 +40,11 @@ swift::proxy::authtoken::admin_tenant_name: 'service' ceilometer::api::keystone_tenant: 'service' heat::keystone_tenant: 'service' +# keystone +keystone::cron::token_flush::maxdelay: 3600 +keystone::roles::admin::service_tenant: 'service' +keystone::roles::admin::admin_tenant: 'admin' + #swift swift::proxy::pipeline: - 'catch_errors' @@ -47,9 +53,9 @@ swift::proxy::pipeline: - 'ratelimit' - 'tempurl' - 'formpost' - - 'staticweb' - 'authtoken' - 'keystone' + - 'staticweb' - 'proxy-logging' - 'proxy-server' @@ -57,37 +63,45 @@ swift::proxy::account_autocreate: true # glance glance::api::pipeline: 'keystone' +glance::api::show_image_direct_url: true glance::registry::pipeline: 'keystone' glance::backend::swift::swift_store_create_container_on_put: true glance::backend::rbd::rbd_store_user: 'openstack' +glance_file_pcmk_directory: '/var/lib/glance/images' # neutron -neutron::core_plugin: 'ml2' -neutron::service_plugins: - - 'neutron.services.l3_router.l3_router_plugin.L3RouterPlugin' -neutron::dhcp_agents_per_network: 2 neutron::server::sync_db: true neutron::agents::dhcp::dnsmasq_config_file: /etc/neutron/dnsmasq-neutron.conf # nova nova::notify_on_state_change: 'vm_and_task_state' +nova::api::default_floating_pool: 'public' nova::api::osapi_v3: true +nova::scheduler::filter::ram_allocation_ratio: '1.0' + +# ceilometer +ceilometer::agent::auth::auth_endpoint_type: 'internalURL' # cinder cinder::scheduler::scheduler_driver: cinder.scheduler.filter_scheduler.FilterScheduler # heat -heat::engine::heat_stack_user_role: '' heat::engine::configure_delegated_roles: false heat::engine::trusts_delegated_roles: [] +heat::instance_user: '' # pacemaker pacemaker::corosync::cluster_name: 'tripleo_cluster' pacemaker::corosync::manage_fw: false +pacemaker::resource_defaults::defaults: + resource-stickiness: { value: INFINITY } # horizon -horizon::allowed_hosts: '*' +horizon::cache_backend: django.core.cache.backends.memcached.MemcachedCache horizon::django_session_engine: 'django.contrib.sessions.backends.cache' +horizon::vhost_extra_params: + add_listen: false + priority: 10 # mysql mysql::server::manage_config_file: true @@ -111,3 +125,5 @@ tripleo::loadbalancer::heat_api: true tripleo::loadbalancer::heat_cloudwatch: true tripleo::loadbalancer::heat_cfn: true tripleo::loadbalancer::horizon: true + +controller_classes: [] diff --git a/puppet/hieradata/database.yaml b/puppet/hieradata/database.yaml new file mode 100644 index 00000000..7e925d90 --- /dev/null +++ b/puppet/hieradata/database.yaml @@ -0,0 +1,55 @@ +# Nova +nova::db::mysql::user: nova +nova::db::mysql::host: "%{hiera('mysql_virtual_ip')}" +nova::db::mysql::dbname: nova +nova::db::mysql::allowed_hosts: + - '%' + - "%{hiera('mysql_bind_host')}" + +# Glance +glance::db::mysql::user: glance +glance::db::mysql::host: "%{hiera('mysql_virtual_ip')}" +glance::db::mysql::dbname: glance +glance::db::mysql::allowed_hosts: + - '%' + - "%{hiera('mysql_bind_host')}" + +# Keystone +keystone::db::mysql::user: keystone +keystone::db::mysql::host: "%{hiera('mysql_virtual_ip')}" +keystone::db::mysql::dbname: keystone +keystone::db::mysql::allowed_hosts: + - '%' + - "%{hiera('mysql_bind_host')}" + +# Neutron +neutron::db::mysql::user: neutron +neutron::db::mysql::host: "%{hiera('mysql_virtual_ip')}" +neutron::db::mysql::dbname: ovs_neutron +neutron::db::mysql::allowed_hosts: + - '%' + - "%{hiera('mysql_bind_host')}" + +# Cinder +cinder::db::mysql::user: cinder +cinder::db::mysql::host: "%{hiera('mysql_virtual_ip')}" +cinder::db::mysql::dbname: cinder +cinder::db::mysql::allowed_hosts: + - '%' + - "%{hiera('mysql_bind_host')}" + +# Heat +heat::db::mysql::user: heat +heat::db::mysql::host: "%{hiera('mysql_virtual_ip')}" +heat::db::mysql::dbname: heat +heat::db::mysql::allowed_hosts: + - '%' + - "%{hiera('mysql_bind_host')}" + +# Ceilometer +ceilometer::db::mysql::user: ceilometer +ceilometer::db::mysql::host: "%{hiera('mysql_virtual_ip')}" +ceilometer::db::mysql::dbname: ceilometer +ceilometer::db::mysql::allowed_hosts: + - '%' + - "%{hiera('mysql_bind_host')}" diff --git a/puppet/hieradata/object.yaml b/puppet/hieradata/object.yaml index 59a8b1cf..d4a0e81d 100644 --- a/puppet/hieradata/object.yaml +++ b/puppet/hieradata/object.yaml @@ -1,4 +1,7 @@ # Hiera data for swift storage nodes +swift::storage::all::incoming_chmod: 'Du=rwx,g=rx,o=rx,Fu=rw,g=r,o=r' +swift::storage::all::outgoing_chmod: 'Du=rwx,g=rx,o=rx,Fu=rw,g=r,o=r' + swift::storage::all::object_pipeline: - healthcheck - recon @@ -14,3 +17,5 @@ swift::proxy::keystone::operator_roles: - admin - swiftoperator - ResellerAdmin + +object_classes: [] diff --git a/puppet/hieradata/volume.yaml b/puppet/hieradata/volume.yaml index ad9e2c2a..f4cd78a9 100644 --- a/puppet/hieradata/volume.yaml +++ b/puppet/hieradata/volume.yaml @@ -2,3 +2,11 @@ # cinder cinder::scheduler::scheduler_driver: cinder.scheduler.filter_scheduler.FilterScheduler + +cinder::config::cinder_config: + DEFAULT/nova_catalog_info: + value: 'compute:Compute Service:internalURL' + DEFAULT/swift_catalog_info: + value: 'object-store:swift:internalURL' + +volume_classes: []
\ No newline at end of file diff --git a/puppet/manifests/overcloud_cephstorage.pp b/puppet/manifests/overcloud_cephstorage.pp index b645f9fe..51f5e88d 100644 --- a/puppet/manifests/overcloud_cephstorage.pp +++ b/puppet/manifests/overcloud_cephstorage.pp @@ -13,16 +13,7 @@ # License for the specific language governing permissions and limitations # under the License. -if !str2bool(hiera('enable_package_install', 'false')) { - case $::osfamily { - 'RedHat': { - Package { provider => 'norpm' } # provided by tripleo-puppet - } - default: { - warning('enable_package_install option not supported.') - } - } -} +include ::tripleo::packages create_resources(sysctl::value, hiera('sysctl_settings'), {}) @@ -30,5 +21,22 @@ if count(hiera('ntp::servers')) > 0 { include ::ntp } +if str2bool(hiera('ceph_osd_selinux_permissive', true)) { + exec { 'set selinux to permissive on boot': + command => "sed -ie 's/^SELINUX=.*/SELINUX=permissive/' /etc/selinux/config", + onlyif => "test -f /etc/selinux/config && ! grep '^SELINUX=permissive' /etc/selinux/config", + path => ['/usr/bin', '/usr/sbin'], + } + + exec { 'set selinux to permissive': + command => 'setenforce 0', + onlyif => "which setenforce && getenforce | grep -i 'enforcing'", + path => ['/usr/bin', '/usr/sbin'], + } -> Class['ceph::profile::osd'] +} + include ::ceph::profile::client -include ::ceph::profile::osd
\ No newline at end of file +include ::ceph::profile::osd + +hiera_include('ceph_classes') +package_manifest{'/var/lib/tripleo/installed-packages/overcloud_ceph': ensure => present} diff --git a/puppet/manifests/overcloud_compute.pp b/puppet/manifests/overcloud_compute.pp index 00bab7f6..cd41cc79 100644 --- a/puppet/manifests/overcloud_compute.pp +++ b/puppet/manifests/overcloud_compute.pp @@ -13,16 +13,7 @@ # License for the specific language governing permissions and limitations # under the License. -if !str2bool(hiera('enable_package_install', 'false')) { - case $::osfamily { - 'RedHat': { - Package { provider => 'norpm' } # provided by tripleo-puppet - } - default: { - warning('enable_package_install option not supported.') - } - } -} +include ::tripleo::packages create_resources(sysctl::value, hiera('sysctl_settings'), {}) @@ -33,17 +24,18 @@ if count(hiera('ntp::servers')) > 0 { file { ['/etc/libvirt/qemu/networks/autostart/default.xml', '/etc/libvirt/qemu/networks/default.xml']: ensure => absent, - before => Service['libvirt'] + before => Service['libvirt'], } # in case libvirt has been already running before the Puppet run, make # sure the default network is destroyed exec { 'libvirt-default-net-destroy': command => '/usr/bin/virsh net-destroy default', - onlyif => '/usr/bin/virsh net-info default | /bin/grep -i "^active:\s*yes"', - before => Service['libvirt'], + onlyif => '/usr/bin/virsh net-info default | /bin/grep -i "^active:\s*yes"', + before => Service['libvirt'], } include ::nova +include ::nova::config include ::nova::compute nova_config { @@ -51,31 +43,53 @@ nova_config { 'DEFAULT/linuxnet_interface_driver': value => 'nova.network.linux_net.LinuxOVSInterfaceDriver'; } -$nova_enable_rbd_backend = hiera('nova_enable_rbd_backend', false) -if $nova_enable_rbd_backend { +$rbd_ephemeral_storage = hiera('nova::compute::rbd::ephemeral_storage', false) +$rbd_persistent_storage = hiera('rbd_persistent_storage', false) +if $rbd_ephemeral_storage or $rbd_persistent_storage { include ::ceph::profile::client $client_keys = hiera('ceph::profile::params::client_keys') + $client_user = join(['client.', hiera('ceph_client_user_name')]) class { '::nova::compute::rbd': - libvirt_rbd_secret_key => $client_keys['client.openstack']['secret'], + libvirt_rbd_secret_key => $client_keys[$client_user]['secret'], } } +if hiera('cinder_enable_nfs_backend', false) { + if str2bool($::selinux) { + selboolean { 'virt_use_nfs': + value => on, + persistent => true, + } -> Package['nfs-utils'] + } + + package {'nfs-utils': } -> Service['nova-compute'] +} + include ::nova::compute::libvirt include ::nova::network::neutron include ::neutron -class { 'neutron::plugins::ml2': +class { '::neutron::plugins::ml2': flat_networks => split(hiera('neutron_flat_networks'), ','), tenant_network_types => [hiera('neutron_tenant_network_type')], } -class { 'neutron::agents::ml2::ovs': +class { '::neutron::agents::ml2::ovs': bridge_mappings => split(hiera('neutron_bridge_mappings'), ','), tunnel_types => split(hiera('neutron_tunnel_types'), ','), } +if 'cisco_n1kv' in hiera('neutron_mechanism_drivers') { + class { '::neutron::agents::n1kv_vem': + n1kv_source => hiera('n1kv_vem_source', undef), + n1kv_version => hiera('n1kv_vem_version', undef), + } +} + + include ::ceilometer +include ::ceilometer::config include ::ceilometer::agent::compute include ::ceilometer::agent::auth @@ -84,7 +98,10 @@ snmp::snmpv3_user { $snmpd_user: authtype => 'MD5', authpass => hiera('snmpd_readonly_user_password'), } -class { 'snmp': +class { '::snmp': agentaddress => ['udp:161','udp6:[::1]:161'], snmpd_config => [ join(['rouser ', hiera('snmpd_readonly_user_name')]), 'proc cron', 'includeAllDisks 10%', 'master agentx', 'trapsink localhost public', 'iquerySecName internalUser', 'rouser internalUser', 'defaultMonitors yes', 'linkUpDownNotifications yes' ], } + +hiera_include('compute_classes') +package_manifest{'/var/lib/tripleo/installed-packages/overcloud_compute': ensure => present} diff --git a/puppet/manifests/overcloud_controller.pp b/puppet/manifests/overcloud_controller.pp index bc20bad5..34be39f3 100644 --- a/puppet/manifests/overcloud_controller.pp +++ b/puppet/manifests/overcloud_controller.pp @@ -13,16 +13,7 @@ # License for the specific language governing permissions and limitations # under the License. -if !str2bool(hiera('enable_package_install', 'false')) { - case $::osfamily { - 'RedHat': { - Package { provider => 'norpm' } # provided by tripleo-puppet - } - default: { - warning('enable_package_install option not supported.') - } - } -} +include ::tripleo::packages if hiera('step') >= 1 { @@ -79,84 +70,34 @@ if hiera('step') >= 2 { include ::tripleo::redis_notification } - if str2bool(hiera('enable_galera', 'true')) { + if str2bool(hiera('enable_galera', true)) { $mysql_config_file = '/etc/my.cnf.d/galera.cnf' } else { $mysql_config_file = '/etc/my.cnf.d/server.cnf' } # TODO Galara - class { 'mysql::server': - config_file => $mysql_config_file, - override_options => { + class { '::mysql::server': + config_file => $mysql_config_file, + override_options => { 'mysqld' => { - 'bind-address' => hiera('mysql_bind_host'), - 'max_connections' => '1024', + 'bind-address' => hiera('mysql_bind_host'), + 'max_connections' => hiera('mysql_max_connections'), 'open_files_limit' => '-1', }, - } + }, + remove_default_accounts => true, } # FIXME: this should only occur on the bootstrap host (ditto for db syncs) # Create all the database schemas - # Example DSN format: mysql://user:password@host/dbname - $allowed_hosts = ['%',hiera('mysql_bind_host')] - $keystone_dsn = split(hiera('keystone::database_connection'), '[@:/?]') - class { 'keystone::db::mysql': - user => $keystone_dsn[3], - password => $keystone_dsn[4], - host => $keystone_dsn[5], - dbname => $keystone_dsn[6], - allowed_hosts => $allowed_hosts, - } - $glance_dsn = split(hiera('glance::api::database_connection'), '[@:/?]') - class { 'glance::db::mysql': - user => $glance_dsn[3], - password => $glance_dsn[4], - host => $glance_dsn[5], - dbname => $glance_dsn[6], - allowed_hosts => $allowed_hosts, - } - $nova_dsn = split(hiera('nova::database_connection'), '[@:/?]') - class { 'nova::db::mysql': - user => $nova_dsn[3], - password => $nova_dsn[4], - host => $nova_dsn[5], - dbname => $nova_dsn[6], - allowed_hosts => $allowed_hosts, - } - $neutron_dsn = split(hiera('neutron::server::database_connection'), '[@:/?]') - class { 'neutron::db::mysql': - user => $neutron_dsn[3], - password => $neutron_dsn[4], - host => $neutron_dsn[5], - dbname => $neutron_dsn[6], - allowed_hosts => $allowed_hosts, - } - $cinder_dsn = split(hiera('cinder::database_connection'), '[@:/?]') - class { 'cinder::db::mysql': - user => $cinder_dsn[3], - password => $cinder_dsn[4], - host => $cinder_dsn[5], - dbname => $cinder_dsn[6], - allowed_hosts => $allowed_hosts, - } - $heat_dsn = split(hiera('heat::database_connection'), '[@:/?]') - class { 'heat::db::mysql': - user => $heat_dsn[3], - password => $heat_dsn[4], - host => $heat_dsn[5], - dbname => $heat_dsn[6], - allowed_hosts => $allowed_hosts, - } + include ::keystone::db::mysql + include ::glance::db::mysql + include ::nova::db::mysql + include ::neutron::db::mysql + include ::cinder::db::mysql + include ::heat::db::mysql if downcase(hiera('ceilometer_backend')) == 'mysql' { - $ceilometer_dsn = split(hiera('ceilometer_mysql_conn_string'), '[@:/?]') - class { 'ceilometer::db::mysql': - user => $ceilometer_dsn[3], - password => $ceilometer_dsn[4], - host => $ceilometer_dsn[5], - dbname => $ceilometer_dsn[6], - allowed_hosts => $allowed_hosts, - } + include ::ceilometer::db::mysql } $rabbit_nodes = hiera('rabbit_node_ips') @@ -182,26 +123,44 @@ if hiera('step') >= 2 { # pre-install swift here so we can build rings include ::swift - $cinder_enable_rbd_backend = hiera('cinder_enable_rbd_backend', false) - $enable_ceph = $cinder_enable_rbd_backend + $enable_ceph = hiera('ceph_storage_count', 0) > 0 if $enable_ceph { - class { 'ceph::profile::params': - mon_initial_members => downcase(hiera('ceph_mon_initial_members')) + class { '::ceph::profile::params': + mon_initial_members => downcase(hiera('ceph_mon_initial_members')), } include ::ceph::profile::mon } - if str2bool(hiera('enable_ceph_storage', 'false')) { - include ::ceph::profile::client + if str2bool(hiera('enable_ceph_storage', false)) { + if str2bool(hiera('ceph_osd_selinux_permissive', true)) { + exec { 'set selinux to permissive on boot': + command => "sed -ie 's/^SELINUX=.*/SELINUX=permissive/' /etc/selinux/config", + onlyif => "test -f /etc/selinux/config && ! grep '^SELINUX=permissive' /etc/selinux/config", + path => ['/usr/bin', '/usr/sbin'], + } + + exec { 'set selinux to permissive': + command => 'setenforce 0', + onlyif => "which setenforce && getenforce | grep -i 'enforcing'", + path => ['/usr/bin', '/usr/sbin'], + } -> Class['ceph::profile::osd'] + } + include ::ceph::profile::osd } + if str2bool(hiera('enable_external_ceph', false)) { + include ::ceph::profile::client + } + } #END STEP 2 if hiera('step') >= 3 { include ::keystone + include ::keystone::roles::admin + include ::keystone::endpoint #TODO: need a cleanup-keystone-tokens.sh solution here keystone_config { @@ -237,21 +196,26 @@ if hiera('step') >= 3 { $glance_backend = downcase(hiera('glance_backend', 'swift')) case $glance_backend { - swift: { $glance_store = 'glance.store.swift.Store' } - file: { $glance_store = 'glance.store.filesystem.Store' } - rbd: { $glance_store = 'glance.store.rbd.Store' } + 'swift': { $backend_store = 'glance.store.swift.Store' } + 'file': { $backend_store = 'glance.store.filesystem.Store' } + 'rbd': { $backend_store = 'glance.store.rbd.Store' } default: { fail('Unrecognized glance_backend parameter.') } } + $http_store = ['glance.store.http.Store'] + $glance_store = concat($http_store, $backend_store) # TODO: notifications, scrubber, etc. include ::glance - class { 'glance::api': - known_stores => [$glance_store] + class { '::glance::api': + known_stores => $glance_store, } include ::glance::registry include join(['::glance::backend::', $glance_backend]) - include ::nova + class { '::nova' : + memcached_servers => suffix(hiera('memcache_node_ips'), ':11211'), + } + include ::nova::config include ::nova::api include ::nova::cert include ::nova::conductor @@ -259,6 +223,7 @@ if hiera('step') >= 3 { include ::nova::network::neutron include ::nova::vncproxy include ::nova::scheduler + include ::nova::scheduler::filter include ::neutron include ::neutron::server @@ -274,13 +239,46 @@ if hiera('step') >= 3 { require => Package['neutron'], } - class { 'neutron::plugins::ml2': - flat_networks => split(hiera('neutron_flat_networks'), ','), + class { '::neutron::plugins::ml2': + flat_networks => split(hiera('neutron_flat_networks'), ','), tenant_network_types => [hiera('neutron_tenant_network_type')], + mechanism_drivers => [hiera('neutron_mechanism_drivers')], } - class { 'neutron::agents::ml2::ovs': + class { '::neutron::agents::ml2::ovs': bridge_mappings => split(hiera('neutron_bridge_mappings'), ','), - tunnel_types => split(hiera('neutron_tunnel_types'), ','), + tunnel_types => split(hiera('neutron_tunnel_types'), ','), + } + if 'cisco_n1kv' in hiera('neutron_mechanism_drivers') { + include ::neutron::plugins::ml2::cisco::nexus1000v + + class { '::neutron::agents::n1kv_vem': + n1kv_source => hiera('n1kv_vem_source', undef), + n1kv_version => hiera('n1kv_vem_version', undef), + } + + class { '::n1k_vsm': + n1kv_source => hiera('n1kv_vsm_source', undef), + n1kv_version => hiera('n1kv_vsm_version', undef), + pacemaker_control => false, + } + } + + if 'cisco_ucsm' in hiera('neutron_mechanism_drivers') { + include ::neutron::plugins::ml2::cisco::ucsm + } + if 'cisco_nexus' in hiera('neutron_mechanism_drivers') { + include ::neutron::plugins::ml2::cisco::nexus + include ::neutron::plugins::ml2::cisco::type_nexus_vxlan + } + + if hiera('neutron_enable_bigswitch_ml2', false) { + include ::neutron::plugins::ml2::bigswitch::restproxy + } + neutron_l3_agent_config { + 'DEFAULT/ovs_use_veth': value => hiera('neutron_ovs_use_veth', false); + } + neutron_dhcp_agent_config { + 'DEFAULT/ovs_use_veth': value => hiera('neutron_ovs_use_veth', false); } Service['neutron-server'] -> Service['neutron-dhcp-service'] @@ -293,7 +291,7 @@ if hiera('step') >= 3 { include ::cinder::glance include ::cinder::scheduler include ::cinder::volume - class {'cinder::setup_test_volume': + class { '::cinder::setup_test_volume': size => join([hiera('cinder_lvm_loop_device_size'), 'M']), } @@ -317,20 +315,21 @@ if hiera('step') >= 3 { $ceph_pools = hiera('ceph_pools') ceph::pool { $ceph_pools : } + + $cinder_pool_requires = [Ceph::Pool[hiera('cinder_rbd_pool_name')]] + + } else { + $cinder_pool_requires = [] } - if $cinder_enable_rbd_backend { + if hiera('cinder_enable_rbd_backend', false) { $cinder_rbd_backend = 'tripleo_ceph' - cinder_config { - "${cinder_rbd_backend}/host": value => 'hostgroup'; - } - cinder::backend::rbd { $cinder_rbd_backend : - rbd_pool => 'volumes', - rbd_user => 'openstack', + rbd_pool => hiera('cinder_rbd_pool_name'), + rbd_user => hiera('ceph_client_user_name'), rbd_secret_uuid => hiera('ceph::profile::params::fsid'), - require => Ceph::Pool['volumes'], + require => $cinder_pool_requires, } } @@ -341,16 +340,53 @@ if hiera('step') >= 3 { "${cinder_netapp_backend}/host": value => 'hostgroup'; } - if hiera('cinder_netapp_nfs_shares', undef) { - $cinder_netapp_nfs_shares = split(hiera('cinder_netapp_nfs_shares', undef), ',') + if hiera('cinder::backend::netapp::nfs_shares', undef) { + $cinder_netapp_nfs_shares = split(hiera('cinder::backend::netapp::nfs_shares', undef), ',') } cinder::backend::netapp { $cinder_netapp_backend : - nfs_shares => $cinder_netapp_nfs_shares, + netapp_login => hiera('cinder::backend::netapp::netapp_login', undef), + netapp_password => hiera('cinder::backend::netapp::netapp_password', undef), + netapp_server_hostname => hiera('cinder::backend::netapp::netapp_server_hostname', undef), + netapp_server_port => hiera('cinder::backend::netapp::netapp_server_port', undef), + netapp_size_multiplier => hiera('cinder::backend::netapp::netapp_size_multiplier', undef), + netapp_storage_family => hiera('cinder::backend::netapp::netapp_storage_family', undef), + netapp_storage_protocol => hiera('cinder::backend::netapp::netapp_storage_protocol', undef), + netapp_transport_type => hiera('cinder::backend::netapp::netapp_transport_type', undef), + netapp_vfiler => hiera('cinder::backend::netapp::netapp_vfiler', undef), + netapp_volume_list => hiera('cinder::backend::netapp::netapp_volume_list', undef), + netapp_vserver => hiera('cinder::backend::netapp::netapp_vserver', undef), + netapp_partner_backend_name => hiera('cinder::backend::netapp::netapp_partner_backend_name', undef), + nfs_shares => $cinder_netapp_nfs_shares, + nfs_shares_config => hiera('cinder::backend::netapp::nfs_shares_config', undef), + netapp_copyoffload_tool_path => hiera('cinder::backend::netapp::netapp_copyoffload_tool_path', undef), + netapp_controller_ips => hiera('cinder::backend::netapp::netapp_controller_ips', undef), + netapp_sa_password => hiera('cinder::backend::netapp::netapp_sa_password', undef), + netapp_storage_pools => hiera('cinder::backend::netapp::netapp_storage_pools', undef), + netapp_eseries_host_type => hiera('cinder::backend::netapp::netapp_eseries_host_type', undef), + netapp_webservice_path => hiera('cinder::backend::netapp::netapp_webservice_path', undef), } } - $cinder_enabled_backends = delete_undef_values([$cinder_iscsi_backend, $cinder_rbd_backend, $cinder_netapp_backend]) + if hiera('cinder_enable_nfs_backend', false) { + $cinder_nfs_backend = 'tripleo_nfs' + + if str2bool($::selinux) { + selboolean { 'virt_use_nfs': + value => on, + persistent => true, + } -> Package['nfs-utils'] + } + + package {'nfs-utils': } -> + cinder::backend::nfs { $cinder_nfs_backend : + nfs_servers => hiera('cinder_nfs_servers'), + nfs_mount_options => hiera('cinder_nfs_mount_options',''), + nfs_shares_config => '/etc/cinder/shares-nfs.conf', + } + } + + $cinder_enabled_backends = delete_undef_values([$cinder_iscsi_backend, $cinder_rbd_backend, $cinder_netapp_backend, $cinder_nfs_backend]) class { '::cinder::backends' : enabled_backends => $cinder_enabled_backends, } @@ -370,9 +406,9 @@ if hiera('step') >= 3 { include ::swift::proxy::formpost # swift storage - if str2bool(hiera('enable_swift_storage', 'true')) { - class {'swift::storage::all': - mount_check => str2bool(hiera('swift_mount_check')) + if str2bool(hiera('enable_swift_storage', true)) { + class { '::swift::storage::all': + mount_check => str2bool(hiera('swift_mount_check')), } if(!defined(File['/srv/node'])) { file { '/srv/node': @@ -398,6 +434,7 @@ if hiera('step') >= 3 { } } include ::ceilometer + include ::ceilometer::config include ::ceilometer::api include ::ceilometer::agent::notification include ::ceilometer::agent::central @@ -405,7 +442,7 @@ if hiera('step') >= 3 { include ::ceilometer::alarm::evaluator include ::ceilometer::expirer include ::ceilometer::collector - include ceilometer::agent::auth + include ::ceilometer::agent::auth class { '::ceilometer::db' : database_connection => $ceilometer_database_connection, } @@ -420,10 +457,16 @@ if hiera('step') >= 3 { include ::heat::engine # Horizon - $vhost_params = { add_listen => false } - class { 'horizon': - cache_server_ip => hiera('memcache_node_ips', '127.0.0.1'), - vhost_extra_params => $vhost_params, + if 'cisco_n1kv' in hiera('neutron_mechanism_drivers') { + $_profile_support = 'cisco' + } else { + $_profile_support = 'None' + } + $neutron_options = {'profile_support' => $_profile_support } + + class { '::horizon': + cache_server_ip => hiera('memcache_node_ips', '127.0.0.1'), + neutron_options => $neutron_options, } $snmpd_user = hiera('snmpd_readonly_user_name') @@ -431,9 +474,18 @@ if hiera('step') >= 3 { authtype => 'MD5', authpass => hiera('snmpd_readonly_user_password'), } - class { 'snmp': + class { '::snmp': agentaddress => ['udp:161','udp6:[::1]:161'], snmpd_config => [ join(['rouser ', hiera('snmpd_readonly_user_name')]), 'proc cron', 'includeAllDisks 10%', 'master agentx', 'trapsink localhost public', 'iquerySecName internalUser', 'rouser internalUser', 'defaultMonitors yes', 'linkUpDownNotifications yes' ], } + hiera_include('controller_classes') + } #END STEP 3 + +if hiera('step') >= 4 { + include ::keystone::cron::token_flush +} #END STEP 4 + +$package_manifest_name = join(['/var/lib/tripleo/installed-packages/overcloud_controller', hiera('step')]) +package_manifest{$package_manifest_name: ensure => present} diff --git a/puppet/manifests/overcloud_controller_pacemaker.pp b/puppet/manifests/overcloud_controller_pacemaker.pp index 3d693313..b9623714 100644 --- a/puppet/manifests/overcloud_controller_pacemaker.pp +++ b/puppet/manifests/overcloud_controller_pacemaker.pp @@ -18,16 +18,7 @@ Pcmk_resource <| |> { try_sleep => 3, } -if !str2bool(hiera('enable_package_install', 'false')) { - case $::osfamily { - 'RedHat': { - Package { provider => 'norpm' } # provided by tripleo-puppet - } - default: { - warning('enable_package_install option not supported.') - } - } -} +include ::tripleo::packages if $::hostname == downcase(hiera('bootstrap_nodeid')) { $pacemaker_master = true @@ -37,6 +28,8 @@ if $::hostname == downcase(hiera('bootstrap_nodeid')) { $sync_db = false } +$enable_fencing = str2bool(hiera('enable_fencing', false)) and hiera('step') >= 5 + # When to start and enable services which haven't been Pacemakerized # FIXME: remove when we start all OpenStack services using Pacemaker # (occurences of this variable will be gradually replaced with false) @@ -62,7 +55,7 @@ if hiera('step') >= 1 { $pacemaker_cluster_members = downcase(regsubst(hiera('controller_node_names'), ',', ' ', 'G')) user { 'hacluster': - ensure => present, + ensure => present, } -> class { '::pacemaker': hacluster_pwd => hiera('hacluster_pwd'), @@ -72,7 +65,20 @@ if hiera('step') >= 1 { setup_cluster => $pacemaker_master, } class { '::pacemaker::stonith': - disable => true, + disable => !$enable_fencing, + } + if $enable_fencing { + include ::tripleo::fencing + + # enable stonith after all fencing devices have been created + Class['tripleo::fencing'] -> Class['pacemaker::stonith'] + } + + # FIXME(gfidente): sets 100secs as default start timeout op + # param; until we can use pcmk global defaults we'll still + # need to add it to every resource which redefines op params + Pacemaker::Resource::Service { + op_params => 'start timeout=100s stop timeout=100s', } # Only configure RabbitMQ in this step, don't start it yet to @@ -87,7 +93,7 @@ if hiera('step') >= 1 { environment_variables => hiera('rabbitmq_environment'), } -> file { '/var/lib/rabbitmq/.erlang.cookie': - ensure => 'present', + ensure => file, owner => 'rabbitmq', group => 'rabbitmq', mode => '0400', @@ -114,7 +120,7 @@ if hiera('step') >= 1 { } # Galera - if str2bool(hiera('enable_galera', 'true')) { + if str2bool(hiera('enable_galera', true)) { $mysql_config_file = '/etc/my.cnf.d/galera.cnf' } else { $mysql_config_file = '/etc/my.cnf.d/server.cnf' @@ -132,7 +138,7 @@ if hiera('step') >= 1 { 'query_cache_size' => '0', 'query_cache_type' => '0', 'bind-address' => hiera('mysql_bind_host'), - 'max_connections' => '1024', + 'max_connections' => hiera('mysql_max_connections'), 'open_files_limit' => '-1', 'wsrep_provider' => '/usr/lib64/galera/libgalera_smm.so', 'wsrep_cluster_name' => 'galera_cluster', @@ -148,16 +154,17 @@ if hiera('step') >= 1 { 'wsrep_causal_reads' => '0', 'wsrep_notify_cmd' => '', 'wsrep_sst_method' => 'rsync', - } + }, } class { '::mysql::server': - create_root_user => false, - create_root_my_cnf => false, - config_file => $mysql_config_file, - override_options => $mysqld_options, - service_manage => false, - service_enabled => false, + create_root_user => false, + create_root_my_cnf => false, + config_file => $mysql_config_file, + override_options => $mysqld_options, + remove_default_accounts => $pacemaker_master, + service_manage => false, + service_enabled => false, } } @@ -171,17 +178,84 @@ if hiera('step') >= 2 { if $pacemaker_master { + include ::pacemaker::resource_defaults + # FIXME: we should not have to access tripleo::loadbalancer class # parameters here to configure pacemaker VIPs. The configuration # of pacemaker VIPs could move into puppet-tripleo or we should # make use of less specific hiera parameters here for the settings. + pacemaker::resource::service { 'haproxy': + clone_params => true, + } + $control_vip = hiera('tripleo::loadbalancer::controller_virtual_ip') pacemaker::resource::ip { 'control_vip': ip_address => $control_vip, } + pacemaker::constraint::base { 'control_vip-then-haproxy': + constraint_type => 'order', + first_resource => "ip-${control_vip}", + second_resource => 'haproxy-clone', + first_action => 'start', + second_action => 'start', + constraint_params => 'kind=Optional', + require => [Pacemaker::Resource::Service['haproxy'], + Pacemaker::Resource::Ip['control_vip']], + } + pacemaker::constraint::colocation { 'control_vip-with-haproxy': + source => "ip-${control_vip}", + target => 'haproxy-clone', + score => 'INFINITY', + require => [Pacemaker::Resource::Service['haproxy'], + Pacemaker::Resource::Ip['control_vip']], + } + $public_vip = hiera('tripleo::loadbalancer::public_virtual_ip') - pacemaker::resource::ip { 'public_vip': - ip_address => $public_vip, + if $public_vip and $public_vip != $control_vip { + pacemaker::resource::ip { 'public_vip': + ip_address => $public_vip, + } + pacemaker::constraint::base { 'public_vip-then-haproxy': + constraint_type => 'order', + first_resource => "ip-${public_vip}", + second_resource => 'haproxy-clone', + first_action => 'start', + second_action => 'start', + constraint_params => 'kind=Optional', + require => [Pacemaker::Resource::Service['haproxy'], + Pacemaker::Resource::Ip['public_vip']], + } + pacemaker::constraint::colocation { 'public_vip-with-haproxy': + source => "ip-${public_vip}", + target => 'haproxy-clone', + score => 'INFINITY', + require => [Pacemaker::Resource::Service['haproxy'], + Pacemaker::Resource::Ip['public_vip']], + } + } + + $redis_vip = hiera('redis_vip') + if $redis_vip and $redis_vip != $control_vip { + pacemaker::resource::ip { 'redis_vip': + ip_address => $redis_vip, + } + pacemaker::constraint::base { 'redis_vip-then-haproxy': + constraint_type => 'order', + first_resource => "ip-${redis_vip}", + second_resource => 'haproxy-clone', + first_action => 'start', + second_action => 'start', + constraint_params => 'kind=Optional', + require => [Pacemaker::Resource::Service['haproxy'], + Pacemaker::Resource::Ip['redis_vip']], + } + pacemaker::constraint::colocation { 'redis_vip-with-haproxy': + source => "ip-${redis_vip}", + target => 'haproxy-clone', + score => 'INFINITY', + require => [Pacemaker::Resource::Service['haproxy'], + Pacemaker::Resource::Ip['redis_vip']], + } } $internal_api_vip = hiera('tripleo::loadbalancer::internal_api_virtual_ip') @@ -189,6 +263,23 @@ if hiera('step') >= 2 { pacemaker::resource::ip { 'internal_api_vip': ip_address => $internal_api_vip, } + pacemaker::constraint::base { 'internal_api_vip-then-haproxy': + constraint_type => 'order', + first_resource => "ip-${internal_api_vip}", + second_resource => 'haproxy-clone', + first_action => 'start', + second_action => 'start', + constraint_params => 'kind=Optional', + require => [Pacemaker::Resource::Service['haproxy'], + Pacemaker::Resource::Ip['internal_api_vip']], + } + pacemaker::constraint::colocation { 'internal_api_vip-with-haproxy': + source => "ip-${internal_api_vip}", + target => 'haproxy-clone', + score => 'INFINITY', + require => [Pacemaker::Resource::Service['haproxy'], + Pacemaker::Resource::Ip['internal_api_vip']], + } } $storage_vip = hiera('tripleo::loadbalancer::storage_virtual_ip') @@ -196,6 +287,23 @@ if hiera('step') >= 2 { pacemaker::resource::ip { 'storage_vip': ip_address => $storage_vip, } + pacemaker::constraint::base { 'storage_vip-then-haproxy': + constraint_type => 'order', + first_resource => "ip-${storage_vip}", + second_resource => 'haproxy-clone', + first_action => 'start', + second_action => 'start', + constraint_params => 'kind=Optional', + require => [Pacemaker::Resource::Service['haproxy'], + Pacemaker::Resource::Ip['storage_vip']], + } + pacemaker::constraint::colocation { 'storage_vip-with-haproxy': + source => "ip-${storage_vip}", + target => 'haproxy-clone', + score => 'INFINITY', + require => [Pacemaker::Resource::Service['haproxy'], + Pacemaker::Resource::Ip['storage_vip']], + } } $storage_mgmt_vip = hiera('tripleo::loadbalancer::storage_mgmt_virtual_ip') @@ -203,13 +311,27 @@ if hiera('step') >= 2 { pacemaker::resource::ip { 'storage_mgmt_vip': ip_address => $storage_mgmt_vip, } + pacemaker::constraint::base { 'storage_mgmt_vip-then-haproxy': + constraint_type => 'order', + first_resource => "ip-${storage_mgmt_vip}", + second_resource => 'haproxy-clone', + first_action => 'start', + second_action => 'start', + constraint_params => 'kind=Optional', + require => [Pacemaker::Resource::Service['haproxy'], + Pacemaker::Resource::Ip['storage_mgmt_vip']], + } + pacemaker::constraint::colocation { 'storage_mgmt_vip-with-haproxy': + source => "ip-${storage_mgmt_vip}", + target => 'haproxy-clone', + score => 'INFINITY', + require => [Pacemaker::Resource::Service['haproxy'], + Pacemaker::Resource::Ip['storage_mgmt_vip']], + } } - pacemaker::resource::service { 'haproxy': - clone_params => true, - } pacemaker::resource::service { $::memcached::params::service_name : - clone_params => true, + clone_params => 'interleave=true', require => Class['::memcached'], } @@ -222,7 +344,7 @@ if hiera('step') >= 2 { if downcase(hiera('ceilometer_backend')) == 'mongodb' { pacemaker::resource::service { $::mongodb::params::service_name : - op_params => 'start timeout=120s', + op_params => 'start timeout=120s stop timeout=100s', clone_params => true, require => Class['::mongodb::server'], } @@ -255,28 +377,6 @@ if hiera('step') >= 2 { resource_params => 'wait_last_known_master=true', require => Class['::redis'], } - $redis_vip = hiera('redis_vip') - if $redis_vip and $redis_vip != $control_vip { - pacemaker::resource::ip { 'vip-redis': - ip_address => $redis_vip, - } - } - pacemaker::constraint::base { 'redis-master-then-vip-redis': - constraint_type => 'order', - first_resource => 'redis-master', - second_resource => "ip-${redis_vip}", - first_action => 'promote', - second_action => 'start', - require => [Pacemaker::Resource::Ocf['redis'], - Pacemaker::Resource::Ip['vip-redis']], - } - pacemaker::constraint::colocation { 'vip-redis-with-redis-master': - source => "ip-${redis_vip}", - target => 'redis-master', - score => 'INFINITY', - require => [Pacemaker::Resource::Ocf['redis'], - Pacemaker::Resource::Ip['vip-redis']], - } } @@ -285,7 +385,7 @@ if hiera('step') >= 2 { timeout => 30, tries => 180, try_sleep => 10, - environment => ["AVAILABLE_WHEN_READONLY=0"], + environment => ['AVAILABLE_WHEN_READONLY=0'], require => File['/etc/sysconfig/clustercheck'], } @@ -310,72 +410,29 @@ MYSQL_HOST=localhost\n", } # Create all the database schemas - # Example DSN format: mysql://user:password@host/dbname if $sync_db { - $allowed_hosts = ['%',hiera('mysql_bind_host')] - $keystone_dsn = split(hiera('keystone::database_connection'), '[@:/?]') - class { 'keystone::db::mysql': - user => $keystone_dsn[3], - password => $keystone_dsn[4], - host => $keystone_dsn[5], - dbname => $keystone_dsn[6], - allowed_hosts => $allowed_hosts, - require => Exec['galera-ready'], - } - $glance_dsn = split(hiera('glance::api::database_connection'), '[@:/?]') - class { 'glance::db::mysql': - user => $glance_dsn[3], - password => $glance_dsn[4], - host => $glance_dsn[5], - dbname => $glance_dsn[6], - allowed_hosts => $allowed_hosts, - require => Exec['galera-ready'], - } - $nova_dsn = split(hiera('nova::database_connection'), '[@:/?]') - class { 'nova::db::mysql': - user => $nova_dsn[3], - password => $nova_dsn[4], - host => $nova_dsn[5], - dbname => $nova_dsn[6], - allowed_hosts => $allowed_hosts, - require => Exec['galera-ready'], - } - $neutron_dsn = split(hiera('neutron::server::database_connection'), '[@:/?]') - class { 'neutron::db::mysql': - user => $neutron_dsn[3], - password => $neutron_dsn[4], - host => $neutron_dsn[5], - dbname => $neutron_dsn[6], - allowed_hosts => $allowed_hosts, - require => Exec['galera-ready'], - } - $cinder_dsn = split(hiera('cinder::database_connection'), '[@:/?]') - class { 'cinder::db::mysql': - user => $cinder_dsn[3], - password => $cinder_dsn[4], - host => $cinder_dsn[5], - dbname => $cinder_dsn[6], - allowed_hosts => $allowed_hosts, - require => Exec['galera-ready'], - } - $heat_dsn = split(hiera('heat::database_connection'), '[@:/?]') - class { 'heat::db::mysql': - user => $heat_dsn[3], - password => $heat_dsn[4], - host => $heat_dsn[5], - dbname => $heat_dsn[6], - allowed_hosts => $allowed_hosts, - require => Exec['galera-ready'], + class { '::keystone::db::mysql': + require => Exec['galera-ready'], + } + class { '::glance::db::mysql': + require => Exec['galera-ready'], + } + class { '::nova::db::mysql': + require => Exec['galera-ready'], + } + class { '::neutron::db::mysql': + require => Exec['galera-ready'], + } + class { '::cinder::db::mysql': + require => Exec['galera-ready'], } + class { '::heat::db::mysql': + require => Exec['galera-ready'], + } + if downcase(hiera('ceilometer_backend')) == 'mysql' { - $ceilometer_dsn = split(hiera('ceilometer_mysql_conn_string'), '[@:/?]') - class { 'ceilometer::db::mysql': - user => $ceilometer_dsn[3], - password => $ceilometer_dsn[4], - host => $ceilometer_dsn[5], - dbname => $ceilometer_dsn[6], - allowed_hosts => $allowed_hosts, - require => Exec['galera-ready'], + class { '::ceilometer::db::mysql': + require => Exec['galera-ready'], } } } @@ -384,30 +441,46 @@ MYSQL_HOST=localhost\n", include ::swift # Ceph - $cinder_enable_rbd_backend = hiera('cinder_enable_rbd_backend', false) - $enable_ceph = $cinder_enable_rbd_backend + $enable_ceph = hiera('ceph_storage_count', 0) > 0 if $enable_ceph { - class { 'ceph::profile::params': - mon_initial_members => downcase(hiera('ceph_mon_initial_members')) + class { '::ceph::profile::params': + mon_initial_members => downcase(hiera('ceph_mon_initial_members')), } include ::ceph::profile::mon } - if str2bool(hiera('enable_ceph_storage', 'false')) { - include ::ceph::profile::client + if str2bool(hiera('enable_ceph_storage', false)) { + if str2bool(hiera('ceph_osd_selinux_permissive', true)) { + exec { 'set selinux to permissive on boot': + command => "sed -ie 's/^SELINUX=.*/SELINUX=permissive/' /etc/selinux/config", + onlyif => "test -f /etc/selinux/config && ! grep '^SELINUX=permissive' /etc/selinux/config", + path => ['/usr/bin', '/usr/sbin'], + } + + exec { 'set selinux to permissive': + command => 'setenforce 0', + onlyif => "which setenforce && getenforce | grep -i 'enforcing'", + path => ['/usr/bin', '/usr/sbin'], + } -> Class['ceph::profile::osd'] + } + include ::ceph::profile::osd } + if str2bool(hiera('enable_external_ceph', false)) { + include ::ceph::profile::client + } + } #END STEP 2 if hiera('step') >= 3 { class { '::keystone': - sync_db => $sync_db, + sync_db => $sync_db, manage_service => false, - enabled => false, + enabled => false, } #TODO: need a cleanup-keystone-tokens.sh solution here @@ -444,73 +517,90 @@ if hiera('step') >= 3 { $glance_backend = downcase(hiera('glance_backend', 'swift')) case $glance_backend { - swift: { $glance_store = 'glance.store.swift.Store' } - file: { $glance_store = 'glance.store.filesystem.Store' } - rbd: { $glance_store = 'glance.store.rbd.Store' } + 'swift': { $backend_store = 'glance.store.swift.Store' } + 'file': { $backend_store = 'glance.store.filesystem.Store' } + 'rbd': { $backend_store = 'glance.store.rbd.Store' } default: { fail('Unrecognized glance_backend parameter.') } } + $http_store = ['glance.store.http.Store'] + $glance_store = concat($http_store, $backend_store) + + if $glance_backend == 'file' and hiera('glance_file_pcmk_manage', false) { + pacemaker::resource::filesystem { 'glance-fs': + device => hiera('glance_file_pcmk_device'), + directory => hiera('glance_file_pcmk_directory'), + fstype => hiera('glance_file_pcmk_fstype'), + fsoptions => hiera('glance_file_pcmk_options', ''), + clone_params => '', + } + } # TODO: notifications, scrubber, etc. include ::glance - class { 'glance::api': - known_stores => [$glance_store], + class { '::glance::api': + known_stores => $glance_store, manage_service => false, - enabled => false, + enabled => false, } class { '::glance::registry' : - sync_db => $sync_db, + sync_db => $sync_db, manage_service => false, - enabled => false, + enabled => false, } include join(['::glance::backend::', $glance_backend]) - include ::nova + class { '::nova' : + memcached_servers => suffix(hiera('memcache_node_ips'), ':11211'), + } + + include ::nova::config class { '::nova::api' : - sync_db => $sync_db, + sync_db => $sync_db, manage_service => false, - enabled => false, + enabled => false, } class { '::nova::cert' : manage_service => false, - enabled => false, + enabled => false, } class { '::nova::conductor' : manage_service => false, - enabled => false, + enabled => false, } class { '::nova::consoleauth' : manage_service => false, - enabled => false, + enabled => false, } class { '::nova::vncproxy' : manage_service => false, - enabled => false, + enabled => false, } + include ::nova::scheduler::filter class { '::nova::scheduler' : manage_service => false, - enabled => false, + enabled => false, } include ::nova::network::neutron # Neutron class definitions include ::neutron class { '::neutron::server' : - sync_db => $sync_db, + sync_db => $sync_db, manage_service => false, - enabled => false, + enabled => false, } class { '::neutron::agents::dhcp' : manage_service => false, - enabled => false, + enabled => false, } class { '::neutron::agents::l3' : manage_service => false, - enabled => false, + enabled => false, } - class { 'neutron::agents::metadata': + class { '::neutron::agents::metadata': manage_service => false, - enabled => false, + enabled => false, } file { '/etc/neutron/dnsmasq-neutron.conf': content => hiera('neutron_dnsmasq_options'), @@ -519,33 +609,65 @@ if hiera('step') >= 3 { notify => Service['neutron-dhcp-service'], require => Package['neutron'], } - class { 'neutron::plugins::ml2': - flat_networks => split(hiera('neutron_flat_networks'), ','), + class { '::neutron::plugins::ml2': + flat_networks => split(hiera('neutron_flat_networks'), ','), tenant_network_types => [hiera('neutron_tenant_network_type')], + mechanism_drivers => [hiera('neutron_mechanism_drivers')], + } + class { '::neutron::agents::ml2::ovs': + manage_service => false, + enabled => false, + bridge_mappings => split(hiera('neutron_bridge_mappings'), ','), + tunnel_types => split(hiera('neutron_tunnel_types'), ','), + } + + if 'cisco_ucsm' in hiera('neutron_mechanism_drivers') { + include ::neutron::plugins::ml2::cisco::ucsm + } + if 'cisco_nexus' in hiera('neutron_mechanism_drivers') { + include ::neutron::plugins::ml2::cisco::nexus + include ::neutron::plugins::ml2::cisco::type_nexus_vxlan + } + if 'cisco_n1kv' in hiera('neutron_mechanism_drivers') { + include ::neutron::plugins::ml2::cisco::nexus1000v + + class { '::neutron::agents::n1kv_vem': + n1kv_source => hiera('n1kv_vem_source', undef), + n1kv_version => hiera('n1kv_vem_version', undef), + } + + class { '::n1k_vsm': + n1kv_source => hiera('n1kv_vsm_source', undef), + n1kv_version => hiera('n1kv_vsm_version', undef), + } } - class { 'neutron::agents::ml2::ovs': - # manage_service => false # not implemented - enabled => false, - bridge_mappings => split(hiera('neutron_bridge_mappings'), ','), - tunnel_types => split(hiera('neutron_tunnel_types'), ','), + + if hiera('neutron_enable_bigswitch_ml2', false) { + include ::neutron::plugins::ml2::bigswitch::restproxy + } + neutron_l3_agent_config { + 'DEFAULT/ovs_use_veth': value => hiera('neutron_ovs_use_veth', false); + } + neutron_dhcp_agent_config { + 'DEFAULT/ovs_use_veth': value => hiera('neutron_ovs_use_veth', false); } include ::cinder class { '::cinder::api': - sync_db => $sync_db, + sync_db => $sync_db, manage_service => false, - enabled => false, + enabled => false, } class { '::cinder::scheduler' : manage_service => false, - enabled => false, + enabled => false, } class { '::cinder::volume' : manage_service => false, - enabled => false, + enabled => false, } include ::cinder::glance - class {'cinder::setup_test_volume': + class { '::cinder::setup_test_volume': size => join([hiera('cinder_lvm_loop_device_size'), 'M']), } @@ -569,20 +691,21 @@ if hiera('step') >= 3 { $ceph_pools = hiera('ceph_pools') ceph::pool { $ceph_pools : } + + $cinder_pool_requires = [Ceph::Pool[hiera('cinder_rbd_pool_name')]] + + } else { + $cinder_pool_requires = [] } - if $cinder_enable_rbd_backend { + if hiera('cinder_enable_rbd_backend', false) { $cinder_rbd_backend = 'tripleo_ceph' - cinder_config { - "${cinder_rbd_backend}/host": value => 'hostgroup'; - } - cinder::backend::rbd { $cinder_rbd_backend : - rbd_pool => 'volumes', - rbd_user => 'openstack', + rbd_pool => hiera('cinder_rbd_pool_name'), + rbd_user => hiera('ceph_client_user_name'), rbd_secret_uuid => hiera('ceph::profile::params::fsid'), - require => Ceph::Pool['volumes'], + require => $cinder_pool_requires, } } @@ -593,16 +716,53 @@ if hiera('step') >= 3 { "${cinder_netapp_backend}/host": value => 'hostgroup'; } - if hiera('cinder_netapp_nfs_shares', undef) { - $cinder_netapp_nfs_shares = split(hiera('cinder_netapp_nfs_shares', undef), ',') + if hiera('cinder::backend::netapp::nfs_shares', undef) { + $cinder_netapp_nfs_shares = split(hiera('cinder::backend::netapp::nfs_shares', undef), ',') } cinder::backend::netapp { $cinder_netapp_backend : - nfs_shares => $cinder_netapp_nfs_shares, - } - } - - $cinder_enabled_backends = delete_undef_values([$cinder_iscsi_backend, $cinder_rbd_backend, $cinder_netapp_backend]) + netapp_login => hiera('cinder::backend::netapp::netapp_login', undef), + netapp_password => hiera('cinder::backend::netapp::netapp_password', undef), + netapp_server_hostname => hiera('cinder::backend::netapp::netapp_server_hostname', undef), + netapp_server_port => hiera('cinder::backend::netapp::netapp_server_port', undef), + netapp_size_multiplier => hiera('cinder::backend::netapp::netapp_size_multiplier', undef), + netapp_storage_family => hiera('cinder::backend::netapp::netapp_storage_family', undef), + netapp_storage_protocol => hiera('cinder::backend::netapp::netapp_storage_protocol', undef), + netapp_transport_type => hiera('cinder::backend::netapp::netapp_transport_type', undef), + netapp_vfiler => hiera('cinder::backend::netapp::netapp_vfiler', undef), + netapp_volume_list => hiera('cinder::backend::netapp::netapp_volume_list', undef), + netapp_vserver => hiera('cinder::backend::netapp::netapp_vserver', undef), + netapp_partner_backend_name => hiera('cinder::backend::netapp::netapp_partner_backend_name', undef), + nfs_shares => $cinder_netapp_nfs_shares, + nfs_shares_config => hiera('cinder::backend::netapp::nfs_shares_config', undef), + netapp_copyoffload_tool_path => hiera('cinder::backend::netapp::netapp_copyoffload_tool_path', undef), + netapp_controller_ips => hiera('cinder::backend::netapp::netapp_controller_ips', undef), + netapp_sa_password => hiera('cinder::backend::netapp::netapp_sa_password', undef), + netapp_storage_pools => hiera('cinder::backend::netapp::netapp_storage_pools', undef), + netapp_eseries_host_type => hiera('cinder::backend::netapp::netapp_eseries_host_type', undef), + netapp_webservice_path => hiera('cinder::backend::netapp::netapp_webservice_path', undef), + } + } + + if hiera('cinder_enable_nfs_backend', false) { + $cinder_nfs_backend = 'tripleo_nfs' + + if str2bool($::selinux) { + selboolean { 'virt_use_nfs': + value => on, + persistent => true, + } -> Package['nfs-utils'] + } + + package { 'nfs-utils': } -> + cinder::backend::nfs { $cinder_nfs_backend: + nfs_servers => hiera('cinder_nfs_servers'), + nfs_mount_options => hiera('cinder_nfs_mount_options',''), + nfs_shares_config => '/etc/cinder/shares-nfs.conf', + } + } + + $cinder_enabled_backends = delete_undef_values([$cinder_iscsi_backend, $cinder_rbd_backend, $cinder_netapp_backend, $cinder_nfs_backend]) class { '::cinder::backends' : enabled_backends => $cinder_enabled_backends, } @@ -610,7 +770,7 @@ if hiera('step') >= 3 { # swift proxy class { '::swift::proxy' : manage_service => $non_pcmk_start, - enabled => $non_pcmk_start, + enabled => $non_pcmk_start, } include ::swift::proxy::proxy_logging include ::swift::proxy::healthcheck @@ -618,28 +778,27 @@ if hiera('step') >= 3 { include ::swift::proxy::keystone include ::swift::proxy::authtoken include ::swift::proxy::staticweb - include ::swift::proxy::ceilometer include ::swift::proxy::ratelimit include ::swift::proxy::catch_errors include ::swift::proxy::tempurl include ::swift::proxy::formpost # swift storage - if str2bool(hiera('enable_swift_storage', 'true')) { + if str2bool(hiera('enable_swift_storage', true)) { class {'::swift::storage::all': - mount_check => str2bool(hiera('swift_mount_check')) + mount_check => str2bool(hiera('swift_mount_check')), } class {'::swift::storage::account': manage_service => $non_pcmk_start, - enabled => $non_pcmk_start, + enabled => $non_pcmk_start, } class {'::swift::storage::container': manage_service => $non_pcmk_start, - enabled => $non_pcmk_start, + enabled => $non_pcmk_start, } class {'::swift::storage::object': manage_service => $non_pcmk_start, - enabled => $non_pcmk_start, + enabled => $non_pcmk_start, } if(!defined(File['/srv/node'])) { file { '/srv/node': @@ -655,47 +814,47 @@ if hiera('step') >= 3 { } # Ceilometer - $ceilometer_backend = downcase(hiera('ceilometer_backend')) - case $ceilometer_backend { - /mysql/ : { + case downcase(hiera('ceilometer_backend')) { + /mysql/: { $ceilometer_database_connection = hiera('ceilometer_mysql_conn_string') } - default : { + default: { $mongo_node_string = join($mongo_node_ips_with_port, ',') $ceilometer_database_connection = "mongodb://${mongo_node_string}/ceilometer?replicaSet=${mongodb_replset}" } } include ::ceilometer + include ::ceilometer::config class { '::ceilometer::api' : manage_service => false, - enabled => false, + enabled => false, } class { '::ceilometer::agent::notification' : manage_service => false, - enabled => false, + enabled => false, } class { '::ceilometer::agent::central' : manage_service => false, - enabled => false, + enabled => false, } class { '::ceilometer::alarm::notifier' : manage_service => false, - enabled => false, + enabled => false, } class { '::ceilometer::alarm::evaluator' : manage_service => false, - enabled => false, + enabled => false, } class { '::ceilometer::collector' : manage_service => false, - enabled => false, + enabled => false, } include ::ceilometer::expirer class { '::ceilometer::db' : database_connection => $ceilometer_database_connection, sync_db => $sync_db, } - include ceilometer::agent::auth + include ::ceilometer::agent::auth Cron <| title == 'ceilometer-expirer' |> { command => "sleep $((\$(od -A n -t d -N 3 /dev/urandom) % 86400)) && ${::ceilometer::params::expirer_command}" } @@ -705,33 +864,37 @@ if hiera('step') >= 3 { } class { '::heat::api' : manage_service => false, - enabled => false, + enabled => false, } class { '::heat::api_cfn' : manage_service => false, - enabled => false, + enabled => false, } class { '::heat::api_cloudwatch' : manage_service => false, - enabled => false, + enabled => false, } class { '::heat::engine' : manage_service => false, - enabled => false, + enabled => false, } # httpd/apache and horizon # NOTE(gfidente): server-status can be consumed by the pacemaker resource agent - include ::apache + class { '::apache' : + service_enable => false, + # service_manage => false, # <-- not supported with horizon&apache mod_wsgi? + } include ::apache::mod::status - $vhost_params = { - add_listen => false, - priority => 10, + if 'cisco_n1kv' in hiera('neutron_mechanism_drivers') { + $_profile_support = 'cisco' + } else { + $_profile_support = 'None' } - class { 'horizon': - cache_server_ip => hiera('memcache_node_ips', '127.0.0.1'), - vhost_extra_params => $vhost_params, - server_aliases => $::hostname, + $neutron_options = {'profile_support' => $_profile_support } + class { '::horizon': + cache_server_ip => hiera('memcache_node_ips', '127.0.0.1'), + neutron_options => $neutron_options, } $snmpd_user = hiera('snmpd_readonly_user_name') @@ -739,28 +902,73 @@ if hiera('step') >= 3 { authtype => 'MD5', authpass => hiera('snmpd_readonly_user_password'), } - class { 'snmp': + class { '::snmp': agentaddress => ['udp:161','udp6:[::1]:161'], snmpd_config => [ join(['rouser ', hiera('snmpd_readonly_user_name')]), 'proc cron', 'includeAllDisks 10%', 'master agentx', 'trapsink localhost public', 'iquerySecName internalUser', 'rouser internalUser', 'defaultMonitors yes', 'linkUpDownNotifications yes' ], } + hiera_include('controller_classes') + } #END STEP 3 if hiera('step') >= 4 { + include ::keystone::cron::token_flush + if $pacemaker_master { # Keystone pacemaker::resource::service { $::keystone::params::service_name : - clone_params => "interleave=true", + clone_params => 'interleave=true', + verify_on_create => true, + require => [File['/etc/keystone/ssl/certs/ca.pem'], + File['/etc/keystone/ssl/private/signing_key.pem'], + File['/etc/keystone/ssl/certs/signing_cert.pem']], + } + + pacemaker::constraint::base { 'haproxy-then-keystone-constraint': + constraint_type => 'order', + first_resource => 'haproxy-clone', + second_resource => "${::keystone::params::service_name}-clone", + first_action => 'start', + second_action => 'start', + require => [Pacemaker::Resource::Service['haproxy'], + Pacemaker::Resource::Service[$::keystone::params::service_name]], + } + pacemaker::constraint::base { 'rabbitmq-then-keystone-constraint': + constraint_type => 'order', + first_resource => 'rabbitmq-clone', + second_resource => "${::keystone::params::service_name}-clone", + first_action => 'start', + second_action => 'start', + require => [Pacemaker::Resource::Ocf['rabbitmq'], + Pacemaker::Resource::Service[$::keystone::params::service_name]], + } + pacemaker::constraint::base { 'memcached-then-keystone-constraint': + constraint_type => 'order', + first_resource => 'memcached-clone', + second_resource => "${::keystone::params::service_name}-clone", + first_action => 'start', + second_action => 'start', + require => [Pacemaker::Resource::Service['memcached'], + Pacemaker::Resource::Service[$::keystone::params::service_name]], + } + pacemaker::constraint::base { 'galera-then-keystone-constraint': + constraint_type => 'order', + first_resource => 'galera-master', + second_resource => "${::keystone::params::service_name}-clone", + first_action => 'promote', + second_action => 'start', + require => [Pacemaker::Resource::Ocf['galera'], + Pacemaker::Resource::Service[$::keystone::params::service_name]], } # Cinder pacemaker::resource::service { $::cinder::params::api_service : - clone_params => "interleave=true", + clone_params => 'interleave=true', require => Pacemaker::Resource::Service[$::keystone::params::service_name], } pacemaker::resource::service { $::cinder::params::scheduler_service : - clone_params => "interleave=true", + clone_params => 'interleave=true', } pacemaker::resource::service { $::cinder::params::volume_service : } @@ -774,45 +982,45 @@ if hiera('step') >= 4 { Pacemaker::Resource::Service[$::keystone::params::service_name]], } pacemaker::constraint::base { 'cinder-api-then-cinder-scheduler-constraint': - constraint_type => "order", - first_resource => "${::cinder::params::api_service}-clone", + constraint_type => 'order', + first_resource => "${::cinder::params::api_service}-clone", second_resource => "${::cinder::params::scheduler_service}-clone", - first_action => "start", - second_action => "start", - require => [Pacemaker::Resource::Service[$::cinder::params::api_service], - Pacemaker::Resource::Service[$::cinder::params::scheduler_service]], + first_action => 'start', + second_action => 'start', + require => [Pacemaker::Resource::Service[$::cinder::params::api_service], + Pacemaker::Resource::Service[$::cinder::params::scheduler_service]], } pacemaker::constraint::colocation { 'cinder-scheduler-with-cinder-api-colocation': - source => "${::cinder::params::scheduler_service}-clone", - target => "${::cinder::params::api_service}-clone", - score => "INFINITY", + source => "${::cinder::params::scheduler_service}-clone", + target => "${::cinder::params::api_service}-clone", + score => 'INFINITY', require => [Pacemaker::Resource::Service[$::cinder::params::api_service], Pacemaker::Resource::Service[$::cinder::params::scheduler_service]], } pacemaker::constraint::base { 'cinder-scheduler-then-cinder-volume-constraint': - constraint_type => "order", - first_resource => "${::cinder::params::scheduler_service}-clone", - second_resource => "${::cinder::params::volume_service}", - first_action => "start", - second_action => "start", - require => [Pacemaker::Resource::Service[$::cinder::params::scheduler_service], - Pacemaker::Resource::Service[$::cinder::params::volume_service]], + constraint_type => 'order', + first_resource => "${::cinder::params::scheduler_service}-clone", + second_resource => $::cinder::params::volume_service, + first_action => 'start', + second_action => 'start', + require => [Pacemaker::Resource::Service[$::cinder::params::scheduler_service], + Pacemaker::Resource::Service[$::cinder::params::volume_service]], } pacemaker::constraint::colocation { 'cinder-volume-with-cinder-scheduler-colocation': - source => "${::cinder::params::volume_service}", - target => "${::cinder::params::scheduler_service}-clone", - score => "INFINITY", + source => $::cinder::params::volume_service, + target => "${::cinder::params::scheduler_service}-clone", + score => 'INFINITY', require => [Pacemaker::Resource::Service[$::cinder::params::scheduler_service], Pacemaker::Resource::Service[$::cinder::params::volume_service]], } # Glance pacemaker::resource::service { $::glance::params::registry_service_name : - clone_params => "interleave=true", + clone_params => 'interleave=true', require => Pacemaker::Resource::Service[$::keystone::params::service_name], } pacemaker::resource::service { $::glance::params::api_service_name : - clone_params => "interleave=true", + clone_params => 'interleave=true', } pacemaker::constraint::base { 'keystone-then-glance-registry-constraint': @@ -825,18 +1033,18 @@ if hiera('step') >= 4 { Pacemaker::Resource::Service[$::keystone::params::service_name]], } pacemaker::constraint::base { 'glance-registry-then-glance-api-constraint': - constraint_type => "order", + constraint_type => 'order', first_resource => "${::glance::params::registry_service_name}-clone", second_resource => "${::glance::params::api_service_name}-clone", - first_action => "start", - second_action => "start", - require => [Pacemaker::Resource::Service[$::glance::params::registry_service_name], - Pacemaker::Resource::Service[$::glance::params::api_service_name]], + first_action => 'start', + second_action => 'start', + require => [Pacemaker::Resource::Service[$::glance::params::registry_service_name], + Pacemaker::Resource::Service[$::glance::params::api_service_name]], } pacemaker::constraint::colocation { 'glance-api-with-glance-registry-colocation': source => "${::glance::params::api_service_name}-clone", target => "${::glance::params::registry_service_name}-clone", - score => "INFINITY", + score => 'INFINITY', require => [Pacemaker::Resource::Service[$::glance::params::registry_service_name], Pacemaker::Resource::Service[$::glance::params::api_service_name]], } @@ -846,178 +1054,156 @@ if hiera('step') >= 4 { # as soon as neutron-server is started; to avoid races we want to make this # happen only on one node, before normal Pacemaker initialization # https://bugzilla.redhat.com/show_bug.cgi?id=1233061 - exec { 'neutron-server-start-wait-stop' : - command => "systemctl start neutron-server && \ - sleep 5s && \ - systemctl stop neutron-server", - path => ["/usr/bin", "/usr/sbin"], - } -> + exec { '/usr/bin/systemctl start neutron-server && /usr/bin/sleep 5' : } -> pacemaker::resource::service { $::neutron::params::server_service: - op_params => "start timeout=90", - clone_params => "interleave=true", - require => Pacemaker::Resource::Service[$::keystone::params::service_name] + clone_params => 'interleave=true', + require => Pacemaker::Resource::Service[$::keystone::params::service_name], } pacemaker::resource::service { $::neutron::params::l3_agent_service: - clone_params => "interleave=true", + clone_params => 'interleave=true', } pacemaker::resource::service { $::neutron::params::dhcp_agent_service: - clone_params => "interleave=true", + clone_params => 'interleave=true', } pacemaker::resource::service { $::neutron::params::ovs_agent_service: - clone_params => "interleave=true", + clone_params => 'interleave=true', } pacemaker::resource::service { $::neutron::params::metadata_agent_service: - clone_params => "interleave=true", + clone_params => 'interleave=true', } pacemaker::resource::ocf { $::neutron::params::ovs_cleanup_service: - ocf_agent_name => "neutron:OVSCleanup", - clone_params => "interleave=true", + ocf_agent_name => 'neutron:OVSCleanup', + clone_params => 'interleave=true', } pacemaker::resource::ocf { 'neutron-netns-cleanup': - ocf_agent_name => "neutron:NetnsCleanup", - clone_params => "interleave=true", - } - pacemaker::resource::ocf { 'neutron-scale': - ocf_agent_name => "neutron:NeutronScale", - clone_params => "globally-unique=true clone-max=3 interleave=true", - } - pacemaker::constraint::base { 'keystone-to-neutron-server-constraint': - constraint_type => "order", - first_resource => "${::keystone::params::service_name}-clone", - second_resource => "${::neutron::params::server_service}-clone", - first_action => "start", - second_action => "start", - require => [Pacemaker::Resource::Service[$::keystone::params::service_name], - Pacemaker::Resource::Service[$::neutron::params::server_service]], - } - pacemaker::constraint::base { 'neutron-server-to-neutron-scale-constraint': - constraint_type => "order", - first_resource => "${::neutron::params::server_service}-clone", - second_resource => "neutron-scale-clone", - first_action => "start", - second_action => "start", - require => [Pacemaker::Resource::Service[$::neutron::params::server_service], - Pacemaker::Resource::Ocf['neutron-scale']], - } - pacemaker::constraint::base { 'neutron-scale-to-ovs-cleanup-constraint': - constraint_type => "order", - first_resource => "neutron-scale-clone", - second_resource => "${::neutron::params::ovs_cleanup_service}-clone", - first_action => "start", - second_action => "start", - require => [Pacemaker::Resource::Ocf['neutron-scale'], - Pacemaker::Resource::Ocf["${::neutron::params::ovs_cleanup_service}"]], - } - pacemaker::constraint::colocation { 'neutron-scale-to-ovs-cleanup-colocation': - source => "${::neutron::params::ovs_cleanup_service}-clone", - target => "neutron-scale-clone", - score => "INFINITY", - require => [Pacemaker::Resource::Ocf['neutron-scale'], - Pacemaker::Resource::Ocf["${::neutron::params::ovs_cleanup_service}"]], + ocf_agent_name => 'neutron:NetnsCleanup', + clone_params => 'interleave=true', } + + # neutron - one chain ovs-cleanup-->netns-cleanup-->ovs-agent pacemaker::constraint::base { 'neutron-ovs-cleanup-to-netns-cleanup-constraint': - constraint_type => "order", - first_resource => "${::neutron::params::ovs_cleanup_service}-clone", - second_resource => "neutron-netns-cleanup-clone", - first_action => "start", - second_action => "start", - require => [Pacemaker::Resource::Ocf["${::neutron::params::ovs_cleanup_service}"], - Pacemaker::Resource::Ocf['neutron-netns-cleanup']], + constraint_type => 'order', + first_resource => "${::neutron::params::ovs_cleanup_service}-clone", + second_resource => 'neutron-netns-cleanup-clone', + first_action => 'start', + second_action => 'start', + require => [Pacemaker::Resource::Ocf[$::neutron::params::ovs_cleanup_service], + Pacemaker::Resource::Ocf['neutron-netns-cleanup']], } pacemaker::constraint::colocation { 'neutron-ovs-cleanup-to-netns-cleanup-colocation': - source => "neutron-netns-cleanup-clone", - target => "${::neutron::params::ovs_cleanup_service}-clone", - score => "INFINITY", - require => [Pacemaker::Resource::Ocf["${::neutron::params::ovs_cleanup_service}"], + source => 'neutron-netns-cleanup-clone', + target => "${::neutron::params::ovs_cleanup_service}-clone", + score => 'INFINITY', + require => [Pacemaker::Resource::Ocf[$::neutron::params::ovs_cleanup_service], Pacemaker::Resource::Ocf['neutron-netns-cleanup']], } pacemaker::constraint::base { 'neutron-netns-cleanup-to-openvswitch-agent-constraint': - constraint_type => "order", - first_resource => "neutron-netns-cleanup-clone", + constraint_type => 'order', + first_resource => 'neutron-netns-cleanup-clone', second_resource => "${::neutron::params::ovs_agent_service}-clone", - first_action => "start", - second_action => "start", - require => [Pacemaker::Resource::Ocf["neutron-netns-cleanup"], - Pacemaker::Resource::Service["${::neutron::params::ovs_agent_service}"]], + first_action => 'start', + second_action => 'start', + require => [Pacemaker::Resource::Ocf['neutron-netns-cleanup'], + Pacemaker::Resource::Service[$::neutron::params::ovs_agent_service]], } pacemaker::constraint::colocation { 'neutron-netns-cleanup-to-openvswitch-agent-colocation': - source => "${::neutron::params::ovs_agent_service}-clone", - target => "neutron-netns-cleanup-clone", - score => "INFINITY", - require => [Pacemaker::Resource::Ocf["neutron-netns-cleanup"], - Pacemaker::Resource::Service["${::neutron::params::ovs_agent_service}"]], + source => "${::neutron::params::ovs_agent_service}-clone", + target => 'neutron-netns-cleanup-clone', + score => 'INFINITY', + require => [Pacemaker::Resource::Ocf['neutron-netns-cleanup'], + Pacemaker::Resource::Service[$::neutron::params::ovs_agent_service]], + } + + #another chain keystone-->neutron-server-->ovs-agent-->dhcp-->l3 + pacemaker::constraint::base { 'keystone-to-neutron-server-constraint': + constraint_type => 'order', + first_resource => "${::keystone::params::service_name}-clone", + second_resource => "${::neutron::params::server_service}-clone", + first_action => 'start', + second_action => 'start', + require => [Pacemaker::Resource::Service[$::keystone::params::service_name], + Pacemaker::Resource::Service[$::neutron::params::server_service]], + } + pacemaker::constraint::base { 'neutron-server-to-openvswitch-agent-constraint': + constraint_type => 'order', + first_resource => "${::neutron::params::server_service}-clone", + second_resource => "${::neutron::params::ovs_agent_service}-clone", + first_action => 'start', + second_action => 'start', + require => [Pacemaker::Resource::Service[$::neutron::params::server_service], + Pacemaker::Resource::Service[$::neutron::params::ovs_agent_service]], } pacemaker::constraint::base { 'neutron-openvswitch-agent-to-dhcp-agent-constraint': - constraint_type => "order", - first_resource => "${::neutron::params::ovs_agent_service}-clone", + constraint_type => 'order', + first_resource => "${::neutron::params::ovs_agent_service}-clone", second_resource => "${::neutron::params::dhcp_agent_service}-clone", - first_action => "start", - second_action => "start", - require => [Pacemaker::Resource::Service["${::neutron::params::ovs_agent_service}"], - Pacemaker::Resource::Service["${::neutron::params::dhcp_agent_service}"]], + first_action => 'start', + second_action => 'start', + require => [Pacemaker::Resource::Service[$::neutron::params::ovs_agent_service], + Pacemaker::Resource::Service[$::neutron::params::dhcp_agent_service]], } pacemaker::constraint::colocation { 'neutron-openvswitch-agent-to-dhcp-agent-colocation': - source => "${::neutron::params::dhcp_agent_service}-clone", - target => "${::neutron::params::ovs_agent_service}-clone", - score => "INFINITY", - require => [Pacemaker::Resource::Service["${::neutron::params::ovs_agent_service}"], - Pacemaker::Resource::Service["${::neutron::params::dhcp_agent_service}"]], + source => "${::neutron::params::dhcp_agent_service}-clone", + target => "${::neutron::params::ovs_agent_service}-clone", + score => 'INFINITY', + require => [Pacemaker::Resource::Service[$::neutron::params::ovs_agent_service], + Pacemaker::Resource::Service[$::neutron::params::dhcp_agent_service]], } pacemaker::constraint::base { 'neutron-dhcp-agent-to-l3-agent-constraint': - constraint_type => "order", - first_resource => "${::neutron::params::dhcp_agent_service}-clone", + constraint_type => 'order', + first_resource => "${::neutron::params::dhcp_agent_service}-clone", second_resource => "${::neutron::params::l3_agent_service}-clone", - first_action => "start", - second_action => "start", - require => [Pacemaker::Resource::Service["${::neutron::params::dhcp_agent_service}"], - Pacemaker::Resource::Service["${::neutron::params::l3_agent_service}"]] + first_action => 'start', + second_action => 'start', + require => [Pacemaker::Resource::Service[$::neutron::params::dhcp_agent_service], + Pacemaker::Resource::Service[$::neutron::params::l3_agent_service]], } pacemaker::constraint::colocation { 'neutron-dhcp-agent-to-l3-agent-colocation': - source => "${::neutron::params::l3_agent_service}-clone", - target => "${::neutron::params::dhcp_agent_service}-clone", - score => "INFINITY", - require => [Pacemaker::Resource::Service["${::neutron::params::dhcp_agent_service}"], - Pacemaker::Resource::Service["${::neutron::params::l3_agent_service}"]] + source => "${::neutron::params::l3_agent_service}-clone", + target => "${::neutron::params::dhcp_agent_service}-clone", + score => 'INFINITY', + require => [Pacemaker::Resource::Service[$::neutron::params::dhcp_agent_service], + Pacemaker::Resource::Service[$::neutron::params::l3_agent_service]], } pacemaker::constraint::base { 'neutron-l3-agent-to-metadata-agent-constraint': - constraint_type => "order", - first_resource => "${::neutron::params::l3_agent_service}-clone", + constraint_type => 'order', + first_resource => "${::neutron::params::l3_agent_service}-clone", second_resource => "${::neutron::params::metadata_agent_service}-clone", - first_action => "start", - second_action => "start", - require => [Pacemaker::Resource::Service["${::neutron::params::l3_agent_service}"], - Pacemaker::Resource::Service["${::neutron::params::metadata_agent_service}"]] + first_action => 'start', + second_action => 'start', + require => [Pacemaker::Resource::Service[$::neutron::params::l3_agent_service], + Pacemaker::Resource::Service[$::neutron::params::metadata_agent_service]], } pacemaker::constraint::colocation { 'neutron-l3-agent-to-metadata-agent-colocation': - source => "${::neutron::params::metadata_agent_service}-clone", - target => "${::neutron::params::l3_agent_service}-clone", - score => "INFINITY", - require => [Pacemaker::Resource::Service["${::neutron::params::l3_agent_service}"], - Pacemaker::Resource::Service["${::neutron::params::metadata_agent_service}"]] + source => "${::neutron::params::metadata_agent_service}-clone", + target => "${::neutron::params::l3_agent_service}-clone", + score => 'INFINITY', + require => [Pacemaker::Resource::Service[$::neutron::params::l3_agent_service], + Pacemaker::Resource::Service[$::neutron::params::metadata_agent_service]], } # Nova pacemaker::resource::service { $::nova::params::api_service_name : - clone_params => "interleave=true", - op_params => "monitor start-delay=10s", + clone_params => 'interleave=true', + op_params => 'start timeout=100s stop timeout=100s monitor start-delay=10s', } pacemaker::resource::service { $::nova::params::conductor_service_name : - clone_params => "interleave=true", - op_params => "monitor start-delay=10s", + clone_params => 'interleave=true', + op_params => 'start timeout=100s stop timeout=100s monitor start-delay=10s', } pacemaker::resource::service { $::nova::params::consoleauth_service_name : - clone_params => "interleave=true", - op_params => "monitor start-delay=10s", - require => Pacemaker::Resource::Service[$::keystone::params::service_name], + clone_params => 'interleave=true', + op_params => 'start timeout=100s stop timeout=100s monitor start-delay=10s', + require => Pacemaker::Resource::Service[$::keystone::params::service_name], } pacemaker::resource::service { $::nova::params::vncproxy_service_name : - clone_params => "interleave=true", - op_params => "monitor start-delay=10s", + clone_params => 'interleave=true', + op_params => 'start timeout=100s stop timeout=100s monitor start-delay=10s', } pacemaker::resource::service { $::nova::params::scheduler_service_name : - clone_params => "interleave=true", - op_params => "monitor start-delay=10s", + clone_params => 'interleave=true', + op_params => 'start timeout=100s stop timeout=100s monitor start-delay=10s', } pacemaker::constraint::base { 'keystone-then-nova-consoleauth-constraint': @@ -1030,77 +1216,85 @@ if hiera('step') >= 4 { Pacemaker::Resource::Service[$::keystone::params::service_name]], } pacemaker::constraint::base { 'nova-consoleauth-then-nova-vncproxy-constraint': - constraint_type => "order", + constraint_type => 'order', first_resource => "${::nova::params::consoleauth_service_name}-clone", second_resource => "${::nova::params::vncproxy_service_name}-clone", - first_action => "start", - second_action => "start", - require => [Pacemaker::Resource::Service[$::nova::params::consoleauth_service_name], - Pacemaker::Resource::Service[$::nova::params::vncproxy_service_name]], + first_action => 'start', + second_action => 'start', + require => [Pacemaker::Resource::Service[$::nova::params::consoleauth_service_name], + Pacemaker::Resource::Service[$::nova::params::vncproxy_service_name]], } pacemaker::constraint::colocation { 'nova-vncproxy-with-nova-consoleauth-colocation': - source => "${::nova::params::vncproxy_service_name}-clone", - target => "${::nova::params::consoleauth_service_name}-clone", - score => "INFINITY", + source => "${::nova::params::vncproxy_service_name}-clone", + target => "${::nova::params::consoleauth_service_name}-clone", + score => 'INFINITY', require => [Pacemaker::Resource::Service[$::nova::params::consoleauth_service_name], Pacemaker::Resource::Service[$::nova::params::vncproxy_service_name]], } - # FIXME(gfidente): novncproxy will not start unless websockify is updated to 0.6 - # which is not the case for f20 nor f21; ucomment when it becomes available - #pacemaker::constraint::base { 'nova-vncproxy-then-nova-api-constraint': - # constraint_type => "order", - # first_resource => "${::nova::params::vncproxy_service_name}-clone", - # second_resource => "${::nova::params::api_service_name}-clone", - # first_action => "start", - # second_action => "start", - # require => [Pacemaker::Resource::Service[$::nova::params::vncproxy_service_name], - # Pacemaker::Resource::Service[$::nova::params::api_service_name]], - #} - #pacemaker::constraint::colocation { 'nova-api-with-nova-vncproxy-colocation': - # source => "${::nova::params::api_service_name}-clone", - # target => "${::nova::params::vncproxy_service_name}-clone", - # score => "INFINITY", - # require => [Pacemaker::Resource::Service[$::nova::params::vncproxy_service_name], - # Pacemaker::Resource::Service[$::nova::params::api_service_name]], - #} + pacemaker::constraint::base { 'nova-vncproxy-then-nova-api-constraint': + constraint_type => 'order', + first_resource => "${::nova::params::vncproxy_service_name}-clone", + second_resource => "${::nova::params::api_service_name}-clone", + first_action => 'start', + second_action => 'start', + require => [Pacemaker::Resource::Service[$::nova::params::vncproxy_service_name], + Pacemaker::Resource::Service[$::nova::params::api_service_name]], + } + pacemaker::constraint::colocation { 'nova-api-with-nova-vncproxy-colocation': + source => "${::nova::params::api_service_name}-clone", + target => "${::nova::params::vncproxy_service_name}-clone", + score => 'INFINITY', + require => [Pacemaker::Resource::Service[$::nova::params::vncproxy_service_name], + Pacemaker::Resource::Service[$::nova::params::api_service_name]], + } pacemaker::constraint::base { 'nova-api-then-nova-scheduler-constraint': - constraint_type => "order", + constraint_type => 'order', first_resource => "${::nova::params::api_service_name}-clone", second_resource => "${::nova::params::scheduler_service_name}-clone", - first_action => "start", - second_action => "start", - require => [Pacemaker::Resource::Service[$::nova::params::api_service_name], - Pacemaker::Resource::Service[$::nova::params::scheduler_service_name]], + first_action => 'start', + second_action => 'start', + require => [Pacemaker::Resource::Service[$::nova::params::api_service_name], + Pacemaker::Resource::Service[$::nova::params::scheduler_service_name]], } pacemaker::constraint::colocation { 'nova-scheduler-with-nova-api-colocation': - source => "${::nova::params::scheduler_service_name}-clone", - target => "${::nova::params::api_service_name}-clone", - score => "INFINITY", + source => "${::nova::params::scheduler_service_name}-clone", + target => "${::nova::params::api_service_name}-clone", + score => 'INFINITY', require => [Pacemaker::Resource::Service[$::nova::params::api_service_name], Pacemaker::Resource::Service[$::nova::params::scheduler_service_name]], } pacemaker::constraint::base { 'nova-scheduler-then-nova-conductor-constraint': - constraint_type => "order", + constraint_type => 'order', first_resource => "${::nova::params::scheduler_service_name}-clone", second_resource => "${::nova::params::conductor_service_name}-clone", - first_action => "start", - second_action => "start", - require => [Pacemaker::Resource::Service[$::nova::params::scheduler_service_name], - Pacemaker::Resource::Service[$::nova::params::conductor_service_name]], + first_action => 'start', + second_action => 'start', + require => [Pacemaker::Resource::Service[$::nova::params::scheduler_service_name], + Pacemaker::Resource::Service[$::nova::params::conductor_service_name]], } pacemaker::constraint::colocation { 'nova-conductor-with-nova-scheduler-colocation': - source => "${::nova::params::conductor_service_name}-clone", - target => "${::nova::params::scheduler_service_name}-clone", - score => "INFINITY", + source => "${::nova::params::conductor_service_name}-clone", + target => "${::nova::params::scheduler_service_name}-clone", + score => 'INFINITY', require => [Pacemaker::Resource::Service[$::nova::params::scheduler_service_name], Pacemaker::Resource::Service[$::nova::params::conductor_service_name]], } # Ceilometer - pacemaker::resource::service { $::ceilometer::params::agent_central_service_name : - clone_params => 'interleave=true', - require => [Pacemaker::Resource::Service[$::keystone::params::service_name], - Pacemaker::Resource::Service[$::mongodb::params::service_name]], + case downcase(hiera('ceilometer_backend')) { + /mysql/: { + pacemaker::resource::service { $::ceilometer::params::agent_central_service_name : + clone_params => 'interleave=true', + require => Pacemaker::Resource::Service[$::keystone::params::service_name], + } + } + default: { + pacemaker::resource::service { $::ceilometer::params::agent_central_service_name : + clone_params => 'interleave=true', + require => [Pacemaker::Resource::Service[$::keystone::params::service_name], + Pacemaker::Resource::Service[$::mongodb::params::service_name]], + } + } } pacemaker::resource::service { $::ceilometer::params::collector_service_name : clone_params => 'interleave=true', @@ -1122,6 +1316,22 @@ if hiera('step') >= 4 { clone_params => 'interleave=true', resource_params => 'startdelay=10', } + # Fedora doesn't know `require-all` parameter for constraints yet + if $::operatingsystem == 'Fedora' { + $redis_ceilometer_constraint_params = undef + } else { + $redis_ceilometer_constraint_params = 'require-all=false' + } + pacemaker::constraint::base { 'redis-then-ceilometer-central-constraint': + constraint_type => 'order', + first_resource => 'redis-master', + second_resource => "${::ceilometer::params::agent_central_service_name}-clone", + first_action => 'promote', + second_action => 'start', + constraint_params => $redis_ceilometer_constraint_params, + require => [Pacemaker::Resource::Ocf['redis'], + Pacemaker::Resource::Service[$::ceilometer::params::agent_central_service_name]], + } pacemaker::constraint::base { 'keystone-then-ceilometer-central-constraint': constraint_type => 'order', first_resource => "${::keystone::params::service_name}-clone", @@ -1231,15 +1441,6 @@ if hiera('step') >= 4 { Pacemaker::Resource::Service[$::mongodb::params::service_name]], } } - pacemaker::constraint::base { 'vip-redis-then-ceilometer-central': - constraint_type => 'order', - first_resource => "ip-${redis_vip}", - second_resource => "${::ceilometer::params::agent_central_service_name}-clone", - first_action => 'start', - second_action => 'start', - require => [Pacemaker::Resource::Service[$::ceilometer::params::agent_central_service_name], - Pacemaker::Resource::Ip['vip-redis']], - } # Heat pacemaker::resource::service { $::heat::params::api_service_name : @@ -1269,8 +1470,8 @@ if hiera('step') >= 4 { second_resource => "${::heat::params::api_cfn_service_name}-clone", first_action => 'start', second_action => 'start', - require => [Pacemaker::Resource::Service[$::heat::params::api_service_name], - Pacemaker::Resource::Service[$::heat::params::api_cfn_service_name]], + require => [Pacemaker::Resource::Service[$::heat::params::api_service_name], + Pacemaker::Resource::Service[$::heat::params::api_cfn_service_name]], } pacemaker::constraint::colocation { 'heat-api-cfn-with-heat-api-colocation': source => "${::heat::params::api_cfn_service_name}-clone", @@ -1285,8 +1486,8 @@ if hiera('step') >= 4 { second_resource => "${::heat::params::api_cloudwatch_service_name}-clone", first_action => 'start', second_action => 'start', - require => [Pacemaker::Resource::Service[$::heat::params::api_cloudwatch_service_name], - Pacemaker::Resource::Service[$::heat::params::api_cfn_service_name]], + require => [Pacemaker::Resource::Service[$::heat::params::api_cloudwatch_service_name], + Pacemaker::Resource::Service[$::heat::params::api_cfn_service_name]], } pacemaker::constraint::colocation { 'heat-api-cloudwatch-with-heat-api-cfn-colocation': source => "${::heat::params::api_cloudwatch_service_name}-clone", @@ -1301,8 +1502,8 @@ if hiera('step') >= 4 { second_resource => "${::heat::params::engine_service_name}-clone", first_action => 'start', second_action => 'start', - require => [Pacemaker::Resource::Service[$::heat::params::api_cloudwatch_service_name], - Pacemaker::Resource::Service[$::heat::params::engine_service_name]], + require => [Pacemaker::Resource::Service[$::heat::params::api_cloudwatch_service_name], + Pacemaker::Resource::Service[$::heat::params::engine_service_name]], } pacemaker::constraint::colocation { 'heat-engine-with-heat-api-cloudwatch-colocation': source => "${::heat::params::engine_service_name}-clone", @@ -1323,10 +1524,52 @@ if hiera('step') >= 4 { # Horizon pacemaker::resource::service { $::horizon::params::http_service: - clone_params => "interleave=true", + clone_params => 'interleave=true', } + #VSM + if 'cisco_n1kv' in hiera('neutron_mechanism_drivers') { + pacemaker::resource::ocf { 'vsm-p' : + ocf_agent_name => 'heartbeat:VirtualDomain', + resource_params => 'force_stop=true config=/var/spool/cisco/vsm/vsm_primary_deploy.xml', + require => Class['n1k_vsm'], + meta_params => 'resource-stickiness=INFINITY', + } + if str2bool(hiera('n1k_vsm::pacemaker_control', true)) { + pacemaker::resource::ocf { 'vsm-s' : + ocf_agent_name => 'heartbeat:VirtualDomain', + resource_params => 'force_stop=true config=/var/spool/cisco/vsm/vsm_secondary_deploy.xml', + require => Class['n1k_vsm'], + meta_params => 'resource-stickiness=INFINITY', + } + pacemaker::constraint::colocation { 'vsm-colocation-contraint': + source => 'vsm-p', + target => 'vsm-s', + score => '-INFINITY', + require => [Pacemaker::Resource::Ocf['vsm-p'], + Pacemaker::Resource::Ocf['vsm-s']], + } + } + } } } #END STEP 4 + +if hiera('step') >= 5 { + + if $pacemaker_master { + + class {'::keystone::roles::admin' : + require => Pacemaker::Resource::Service[$::keystone::params::service_name], + } -> + class {'::keystone::endpoint' : + require => Pacemaker::Resource::Service[$::keystone::params::service_name], + } + + } + +} #END STEP 5 + +$package_manifest_name = join(['/var/lib/tripleo/installed-packages/overcloud_controller_pacemaker', hiera('step')]) +package_manifest{$package_manifest_name: ensure => present} diff --git a/puppet/manifests/overcloud_object.pp b/puppet/manifests/overcloud_object.pp index 24799c8c..5f0b4c82 100644 --- a/puppet/manifests/overcloud_object.pp +++ b/puppet/manifests/overcloud_object.pp @@ -13,16 +13,7 @@ # License for the specific language governing permissions and limitations # under the License. -if !str2bool(hiera('enable_package_install', 'false')) { - case $::osfamily { - 'RedHat': { - Package { provider => 'norpm' } # provided by tripleo-puppet - } - default: { - warning('enable_package_install option not supported.') - } - } -} +include ::tripleo::packages create_resources(sysctl::value, hiera('sysctl_settings'), {}) @@ -31,8 +22,8 @@ if count(hiera('ntp::servers')) > 0 { } include ::swift -class {'swift::storage::all': - mount_check => str2bool(hiera('swift_mount_check')) +class { '::swift::storage::all': + mount_check => str2bool(hiera('swift_mount_check')), } if(!defined(File['/srv/node'])) { file { '/srv/node': @@ -52,7 +43,10 @@ snmp::snmpv3_user { $snmpd_user: authtype => 'MD5', authpass => hiera('snmpd_readonly_user_password'), } -class { 'snmp': +class { '::snmp': agentaddress => ['udp:161','udp6:[::1]:161'], snmpd_config => [ join(['rouser ', hiera('snmpd_readonly_user_name')]), 'proc cron', 'includeAllDisks 10%', 'master agentx', 'trapsink localhost public', 'iquerySecName internalUser', 'rouser internalUser', 'defaultMonitors yes', 'linkUpDownNotifications yes' ], } + +hiera_include('object_classes') +package_manifest{'/var/lib/tripleo/installed-packages/overcloud_object': ensure => present} diff --git a/puppet/manifests/overcloud_volume.pp b/puppet/manifests/overcloud_volume.pp index edfeaeca..7f24959a 100644 --- a/puppet/manifests/overcloud_volume.pp +++ b/puppet/manifests/overcloud_volume.pp @@ -13,16 +13,7 @@ # License for the specific language governing permissions and limitations # under the License. -if str2bool(hiera('disable_package_install', 'false')) { - case $::osfamily { - 'RedHat': { - Package { provider => 'norpm' } # provided by tripleo-puppet - } - default: { - warning('disable_package_install option not supported.') - } - } -} +include ::tripleo::packages create_resources(sysctl::value, hiera('sysctl_settings'), {}) @@ -31,6 +22,7 @@ if count(hiera('ntp::servers')) > 0 { } include ::cinder +include ::cinder::config include ::cinder::glance include ::cinder::volume include ::cinder::setup_test_volume @@ -55,7 +47,10 @@ snmp::snmpv3_user { $snmpd_user: authtype => 'MD5', authpass => hiera('snmpd_readonly_user_password'), } -class { 'snmp': +class { '::snmp': agentaddress => ['udp:161','udp6:[::1]:161'], snmpd_config => [ join(['rouser ', hiera('snmpd_readonly_user_name')]), 'proc cron', 'includeAllDisks 10%', 'master agentx', 'trapsink localhost public', 'iquerySecName internalUser', 'rouser internalUser', 'defaultMonitors yes', 'linkUpDownNotifications yes' ], } + +hiera_include('volume_classes') +package_manifest{'/var/lib/tripleo/installed-packages/overcloud_volume': ensure => present} diff --git a/puppet/manifests/ringbuilder.pp b/puppet/manifests/ringbuilder.pp index 531706d2..4296208b 100644 --- a/puppet/manifests/ringbuilder.pp +++ b/puppet/manifests/ringbuilder.pp @@ -13,16 +13,7 @@ # License for the specific language governing permissions and limitations # under the License. -if str2bool(hiera('disable_package_install', 'false')) { - case $::osfamily { - 'RedHat': { - Package { provider => 'norpm' } # provided by tripleo-puppet - } - default: { - warning('disable_package_install option not supported.') - } - } -} +include ::tripleo::packages define add_devices( $swift_zones = '1' @@ -46,31 +37,33 @@ define add_devices( $base = regsubst($name,'^r1.*-(.*)$','\1') $object = regsubst($base, '%PORT%', '6000') ring_object_device { $object: - zone => '1', - weight => 100, + zone => '1', + weight => 100, } $container = regsubst($base, '%PORT%', '6001') ring_container_device { $container: - zone => '1', - weight => 100, + zone => '1', + weight => 100, } $account = regsubst($base, '%PORT%', '6002') ring_account_device { $account: - zone => '1', - weight => 100, + zone => '1', + weight => 100, } } class tripleo::ringbuilder ( $swift_zones = '1', $devices = '', - $build_ring = 'True', + $build_ring = true, $part_power, $replicas, $min_part_hours, ) { - if str2bool(downcase("$build_ring")) { + validate_bool($build_ring) + + if $build_ring { $device_array = strip(split(rstrip($devices), ',')) @@ -83,7 +76,7 @@ class tripleo::ringbuilder ( # add all other devices add_devices {$device_array: - swift_zones => $swift_zones + swift_zones => $swift_zones, } -> # rebalance @@ -99,3 +92,5 @@ class tripleo::ringbuilder ( } include ::tripleo::ringbuilder + +package_manifest{'/var/lib/tripleo/installed-packages/ringbuilder': ensure => present} diff --git a/puppet/swift-storage-post.yaml b/puppet/swift-storage-post.yaml index ee50c86a..d22f5386 100644 --- a/puppet/swift-storage-post.yaml +++ b/puppet/swift-storage-post.yaml @@ -2,6 +2,10 @@ heat_template_version: 2015-04-30 description: 'OpenStack swift storage node post deployment for Puppet' parameters: + ConfigDebug: + default: false + description: Whether to run config management (e.g. Puppet) in debug mode. + type: boolean servers: type: json NodeConfigIdentifiers: @@ -15,6 +19,8 @@ resources: type: OS::Heat::SoftwareConfig properties: group: puppet + options: + enable_debug: {get_param: ConfigDebug} outputs: - name: result config: @@ -32,6 +38,8 @@ resources: type: OS::Heat::SoftwareConfig properties: group: puppet + options: + enable_debug: {get_param: ConfigDebug} outputs: - name: result config: diff --git a/puppet/swift-storage-puppet.yaml b/puppet/swift-storage.yaml index 82922a87..3d9b9018 100644 --- a/puppet/swift-storage-puppet.yaml +++ b/puppet/swift-storage.yaml @@ -45,8 +45,9 @@ parameters: type: string hidden: true NtpServer: - type: string default: '' + description: Comma-separated list of ntp servers + type: comma_delimited_list EnablePackageInstall: default: 'false' description: Set to true to enable package installation via Puppet @@ -65,6 +66,18 @@ parameters: Hostname: type: string default: '' # Defaults to Heat created hostname + ExtraConfig: + default: {} + description: | + Additional hiera configuration to inject into the cluster. Note + that ObjectStorageExtraConfig takes precedence over ExtraConfig. + type: json + ObjectStorageExtraConfig: + default: {} + description: | + Role specific additional hiera configuration to inject into the cluster. + type: json + resources: @@ -77,9 +90,26 @@ resources: networks: - network: ctlplane user_data_format: SOFTWARE_CONFIG - user_data: {get_resource: NodeUserData} + user_data: {get_resource: UserData} name: {get_param: Hostname} + # Combine the NodeAdminUserData and NodeUserData mime archives + UserData: + type: OS::Heat::MultipartMime + properties: + parts: + - config: {get_resource: NodeAdminUserData} + type: multipart + - config: {get_resource: NodeUserData} + type: multipart + + # Creates the "heat-admin" user if configured via the environment + # Should return a OS::Heat::MultipartMime reference via OS::stack_id + NodeAdminUserData: + type: OS::TripleO::NodeAdminUserData + + # For optional operator additional userdata + # Should return a OS::Heat::MultipartMime reference via OS::stack_id NodeUserData: type: OS::TripleO::NodeUserData @@ -101,6 +131,7 @@ resources: NetworkConfig: type: OS::TripleO::ObjectStorage::Net::SoftwareConfig properties: + ControlPlaneIp: {get_attr: [SwiftStorage, networks, ctlplane, 0]} InternalApiIpSubnet: {get_attr: [InternalApiPort, ip_subnet]} StorageIpSubnet: {get_attr: [StoragePort, ip_subnet]} StorageMgmtIpSubnet: {get_attr: [StorageMgmtPort, ip_subnet]} @@ -108,6 +139,7 @@ resources: NetIpMap: type: OS::TripleO::Network::Ports::NetIpMap properties: + ControlPlaneIp: {get_attr: [SwiftStorage, networks, ctlplane, 0]} InternalApiIp: {get_attr: [InternalApiPort, ip_address]} StorageIp: {get_attr: [StoragePort, ip_address]} StorageMgmtIp: {get_attr: [StorageMgmtPort, ip_address]} @@ -125,7 +157,10 @@ resources: config: hiera: hierarchy: + - '"%{::uuid}"' - heat_config_%{::deploy_config_name} + - object_extraconfig + - extraconfig - object - swift_devices_and_proxy # provided by SwiftDevicesAndProxyConfig - all_nodes # provided by allNodesConfig @@ -134,6 +169,10 @@ resources: datafiles: common: raw_data: {get_file: hieradata/common.yaml} + object_extraconfig: + mapped_data: {get_param: ObjectStorageExtraConfig} + extraconfig: + mapped_data: {get_param: ExtraConfig} object: raw_data: {get_file: hieradata/object.yaml} mapped_data: # data supplied directly to this deployment configuration, etc @@ -150,7 +189,8 @@ resources: tripleo::ringbuilder::build_ring: True snmpd_readonly_user_name: {get_input: snmpd_readonly_user_name} snmpd_readonly_user_password: {get_input: snmpd_readonly_user_password} - enable_package_install: {get_input: enable_package_install} + tripleo::packages::enable_install: {get_input: enable_package_install} + tripleo::packages::enable_upgrade: {get_input: enable_package_upgrade} SwiftStorageHieraDeploy: @@ -168,14 +208,19 @@ resources: swift_min_part_hours: {get_param: MinPartHours} swift_part_power: {get_param: PartPower} swift_replicas: { get_param: Replicas} - ntp_servers: - str_replace: - template: '["server"]' - params: - server: {get_param: NtpServer} + ntp_servers: {get_param: NtpServer} enable_package_install: {get_param: EnablePackageInstall} + enable_package_upgrade: {get_attr: [UpdateDeployment, update_managed_packages]} swift_management_network: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, SwiftMgmtNetwork]}]} + # Hook for site-specific additional pre-deployment config, + # applying to all nodes, e.g node registration/unregistration + NodeExtraConfig: + depends_on: SwiftStorageHieraDeploy + type: OS::TripleO::NodeExtraConfig + properties: + server: {get_resource: SwiftStorage} + UpdateConfig: type: OS::TripleO::Tasks::PackageUpdate @@ -194,7 +239,7 @@ outputs: str_replace: template: "IP HOST.localdomain HOST" params: - IP: {get_attr: [SwiftStorage, networks, ctlplane, 0]} + IP: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, ObjectStorageHostnameResolveNetwork]}]} HOST: {get_attr: [SwiftStorage, name]} nova_server_resource: description: Heat resource handle for the swift storage server @@ -218,5 +263,8 @@ outputs: value: {get_attr: [StorageMgmtPort, ip_address]} config_identifier: description: identifier which changes if the node configuration may need re-applying - value: {get_attr: [SwiftStorageHieraDeploy, deploy_stdout]} - + value: + list_join: + - ',' + - - {get_attr: [SwiftStorageHieraDeploy, deploy_stdout]} + - {get_param: UpdateIdentifier} @@ -2,7 +2,7 @@ name = tripleo-heat-templates summary = Heat templates for deploying OpenStack with OpenStack. description-file = - README.md + README.rst author = OpenStack author_email = openstack-dev@lists.openstack.org license = Apache License (2.0) diff --git a/tools/yaml-validate.py b/tools/yaml-validate.py new file mode 100755 index 00000000..cb5669a7 --- /dev/null +++ b/tools/yaml-validate.py @@ -0,0 +1,46 @@ +#!/usr/bin/env python +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import os +import sys +import traceback +import yaml + +base_path = sys.argv[1] +exit_val = 0 +failed_files = [] + +def validate(filename): + try: + yaml.load(open(filename).read()) + except Exception: + print(traceback.format_exc()) + return 1 + return 0 + +for subdir, dirs, files in os.walk(base_path): + for f in files: + if f.endswith('.yaml'): + file_path = os.path.join(subdir, f) + failed = validate(file_path) + if failed: + failed_files.append(file_path) + exit_val |= failed + +if failed_files: + print('Validation failed on:') + for f in failed_files: + print(f) +else: + print('Validation successful!') +sys.exit(exit_val) @@ -9,3 +9,6 @@ deps = -r{toxinidir}/requirements.txt [testenv:venv] commands = {posargs} + +[testenv:validate] +commands = python ./tools/yaml-validate.py .
\ No newline at end of file diff --git a/validation-scripts/all-nodes.sh b/validation-scripts/all-nodes.sh new file mode 100644 index 00000000..8057f201 --- /dev/null +++ b/validation-scripts/all-nodes.sh @@ -0,0 +1,47 @@ +#!/bin/bash + +# For each unique remote IP (specified via Heat) we check to +# see if one of the locally configured networks matches and if so we +# attempt a ping test the remote network IP. +function ping_controller_ips() { + local REMOTE_IPS=$1 + + for REMOTE_IP in $(echo $REMOTE_IPS | sed -e "s| |\n|g" | sort -u); do + + for LOCAL_NETWORK in $(ip r | grep -v default | cut -d " " -f 1); do + local LOCAL_CIDR=$(echo $LOCAL_NETWORK | cut -d "/" -f 2) + local LOCAL_NETMASK=$(ipcalc -m $LOCAL_NETWORK | grep NETMASK | cut -d "=" -f 2) + local REMOTE_NETWORK=$(ipcalc -np $REMOTE_IP $LOCAL_NETMASK | grep NETWORK | cut -d "=" -f 2) + + if [ $REMOTE_NETWORK/$LOCAL_CIDR == $LOCAL_NETWORK ]; then + echo -n "Trying to ping $REMOTE_IP for local network $LOCAL_NETWORK..." + if ! ping -W 300 -c 1 $REMOTE_IP &> /dev/null; then + echo "FAILURE" + echo "$REMOTE_IP is not pingable. Local Network: $LOCAL_NETWORK" >&2 + exit 1 + fi + echo "SUCCESS" + fi + done + done +} + +# Ping all default gateways. There should only be one +# if using upstream t-h-t network templates but we test +# all of them should some manual network config have +# multiple gateways. +function ping_default_gateways() { + DEFAULT_GW=$(ip r | grep ^default | cut -d " " -f 3) + for GW in $DEFAULT_GW; do + echo -n "Trying to ping default gateway ${GW}..." + if ! ping -c 1 $GW &> /dev/null; then + echo "FAILURE" + echo "$GW is not pingable." + exit 1 + fi + done + echo "SUCCESS" +} + +ping_controller_ips "$ping_test_ips" +ping_default_gateways |