diff options
406 files changed, 16890 insertions, 3985 deletions
@@ -59,3 +59,6 @@ puppet/compute-config.yaml puppet/controller-config.yaml puppet/objectstorage-config.yaml puppet/post.yaml + +# Files created by releasenotes build +releasenotes/build diff --git a/Gemfile b/Gemfile deleted file mode 100644 index 302ef415..00000000 --- a/Gemfile +++ /dev/null @@ -1,24 +0,0 @@ -source 'https://rubygems.org' - -group :development, :test do - gem 'puppetlabs_spec_helper', :require => false - - gem 'puppet-lint', '~> 1.1' - gem 'puppet-lint-absolute_classname-check' - gem 'puppet-lint-absolute_template_path' - gem 'puppet-lint-trailing_newline-check' - - # Puppet 4.x related lint checks - gem 'puppet-lint-unquoted_string-check' - gem 'puppet-lint-leading_zero-check' - gem 'puppet-lint-variable_contains_upcase' - gem 'puppet-lint-numericvariable' -end - -if puppetversion = ENV['PUPPET_GEM_VERSION'] - gem 'puppet', puppetversion, :require => false -else - gem 'puppet', :require => false -end - -# vim:ft=ruby @@ -100,6 +100,8 @@ and should be executed according to the following table: +----------------+-------------+-------------+-------------+-------------+-----------------+ | sahara | | | X | | | +----------------+-------------+-------------+-------------+-------------+-----------------+ +| mistral | | | X | | | ++----------------+-------------+-------------+-------------+-------------+-----------------+ | swift | | X | | | X | +----------------+-------------+-------------+-------------+-------------+-----------------+ | aodh | X | | | | | @@ -114,5 +116,15 @@ and should be executed according to the following table: +----------------+-------------+-------------+-------------+-------------+-----------------+ | zaqar | | X | | | | +----------------+-------------+-------------+-------------+-------------+-----------------+ +| ec2api | | X | | | | ++----------------+-------------+-------------+-------------+-------------+-----------------+ | cephrgw | | X | | X | | +----------------+-------------+-------------+-------------+-------------+-----------------+ +| tacker | X | | | | | ++----------------+-------------+-------------+-------------+-------------+-----------------+ +| congress | X | | | | | ++----------------+-------------+-------------+-------------+-------------+-----------------+ +| cephmds | | | | X | | ++----------------+-------------+-------------+-------------+-------------+-----------------+ +| manila | | | | X | | ++----------------+-------------+-------------+-------------+-------------+-----------------+ diff --git a/Rakefile b/Rakefile deleted file mode 100644 index bca6a6c2..00000000 --- a/Rakefile +++ /dev/null @@ -1,6 +0,0 @@ -require 'puppetlabs_spec_helper/rake_tasks' -require 'puppet-lint/tasks/puppet-lint' - -PuppetLint.configuration.fail_on_warnings = true -PuppetLint.configuration.send('disable_80chars') -PuppetLint.configuration.send('disable_autoloader_layout') diff --git a/capabilities-map.yaml b/capabilities-map.yaml index ae747621..66dc1d1d 100644 --- a/capabilities-map.yaml +++ b/capabilities-map.yaml @@ -160,6 +160,16 @@ topics: description: Deploy Mistral service requires: - overcloud-resource-registry-puppet.yaml + - title: Ceilometer Api + description: + environments: + - file: environments/services/disable-ceilometer-api.yaml + title: Ceilometer Api + description: Disable Ceilometer Api service. This service is + deprecated and will be removed in future releases. Please move + to using gnocchi/aodh/panko apis instead. + requires: + - overcloud-resource-registry-puppet.yaml # - title: Network Interface Configuration # description: @@ -325,21 +335,11 @@ topics: description: Enables Neutron Nuage backend on the controller requires: - overcloud-resource-registry-puppet.yaml - - file: environments/neutron-opencontrail.yaml - title: OpenContrail Extensions - description: Enables OpenContrail extensions - requires: - - overcloud-resource-registry-puppet.yaml - file: environments/neutron-opendaylight.yaml title: OpenDaylight description: Enables OpenDaylight requires: - overcloud-resource-registry-puppet.yaml - - file: environments/neutron-opendaylight-l3.yaml - title: OpenDaylight with L3 DVR - description: Enables OpenDaylight with L3 DVR - requires: - - overcloud-resource-registry-puppet.yaml - file: environments/neutron-ovs-dpdk.yaml title: DPDK with OVS description: Deploy DPDK with OVS @@ -355,6 +355,16 @@ topics: description: Enables PLUMgrid extensions requires: - overcloud-resource-registry-puppet.yaml + - file: environments/neutron-ml2-fujitsu-cfab.yaml + title: Fujitsu Neutron plugin for C-Fabric + description: Enable C-Fabric in the overcloud + requires: + - overcloud-resource-registry-puppet.yaml + - file: environments/neutron-ml2-fujitsu-fossw.yaml + title: Fujitsu Neutron plugin for FOS + description: Enable FOS in the overcloud + requires: + - overcloud-resource-registry-puppet.yaml - title: Nova Extensions description: @@ -393,42 +403,90 @@ topics: requires: - overcloud-resource-registry-puppet.yaml - file: environments/cinder-dellsc-config.yaml - title: Cinder Dell Storage Center ISCSI backend + title: Cinder Dell EMC Storage Center ISCSI backend + description: > + Enables a Cinder Dell EMC Storage Center ISCSI backend, + configured via puppet + requires: + - overcloud-resource-registry-puppet.yaml + - file: environments/cinder-hpelefthand-config.yaml + title: Cinder HPELeftHandISCSI backend description: > - Enables a Cinder Dell Storage Center ISCSI backend, configured + Enables a Cinder HPELeftHandISCSI backend, configured via puppet requires: - overcloud-resource-registry-puppet.yaml - - file: environments/cinder-eqlx-config.yaml - title: Cinder EQLX backend + - file: environments/cinder-dellps-config.yaml + title: Cinder Dell EMC PS Series backend description: > - Enables a Cinder EQLX backend, configured via puppet + Enables a Cinder Dell EMC PS Series backend, + configured via puppet requires: - overcloud-resource-registry-puppet.yaml - - title: Externally managed Ceph + - file: environments/cinder-iser.yaml + title: Cinder iSER backend + description: > + Enable a Cinder iSER RDMA backend, configured via puppet + - file: environments/cinder-scaleio-config.yaml + title: Cinder Dell EMC ScaleIO backend + description: > + Enables a Cinder Dell EMC ScaleIO backend, + configured via puppet + requires: + - overcloud-resource-registry-puppet.yaml + - title: Ceph description: > - Enable the use of an externally managed Ceph cluster + Enable the use of Ceph in the overcloud environments: - file: environments/puppet-ceph-external.yaml title: Externally managed Ceph - description: + description: > + Configures the overcloud to use an externally managed Ceph cluster, via RBD driver. + requires: + - overcloud-resource-registry-puppet.yaml + - file: environments/puppet-ceph.yaml + title: TripleO managed Ceph + description: > + Deploys a Ceph cluster via TripleO, requires at lease one CephStorage node or + use of hyperconverged-ceph.yaml environment for the HCI scenario, where CephOSD is + colocated with NovaCompute and configures the overcloud to use it, via RBD driver. requires: - overcloud-resource-registry-puppet.yaml - - title: Ceph Devel + - title: CephMDS description: > - Enable a Ceph storage cluster using the controller and 2 ceph nodes. - Rbd backends are enabled for Cinder, Glance, and Nova. + Deploys CephMDS via TripleO, an additional Ceph service needed to create shared + filesystems hosted in Ceph. environments: - - file: environments/puppet-ceph-devel.yaml - title: Ceph Devel + - file: environments/services/ceph-mds.yaml + title: Deploys CephMDS description: requires: + - environments/puppet-ceph.yaml + - title: Ceph Rados Gateway + description: > + Deploys CephRGW via TripleO, transparently replaces Swift providing a compatible API + which stores data in the Ceph cluster. + environments: + - file: environments/ceph-radosgw.yaml + title: Deploys CephRGW + description: + requires: + - environments/puppet-ceph.yaml + - title: Manila with CephFS + description: > + Deploys Manila and configures it with the CephFS driver. This requires the deployment of + Ceph and CephMDS from TripleO or the use of an external Ceph cluster for the overcloud. + environments: + - file: environments/manila-cephfsnative-config.yaml + title: Deploys Manila with CephFS driver + description: Deploys Manila and configures CephFS as its default backend. + requires: - overcloud-resource-registry-puppet.yaml - title: Storage Environment description: > Can be used to set up storage backends. Defaults to Ceph used as a - backend for Cinder, Glance and Nova ephemeral storage. It configures - for example which services will use Ceph, or if any of the services + backend for Cinder, Glance, Nova ephemeral storage and Gnocchi. It + configures which services will use Ceph, or if any of the services will use NFS. And more. Usually requires to be edited by user first. tags: - no-gui @@ -476,14 +534,6 @@ topics: description: requires: - overcloud-resource-registry-puppet.yaml - - title: Manage Firewall - description: - environments: - - file: environments/manage-firewall.yaml - title: Manage Firewall - description: - requires: - - overcloud-resource-registry-puppet.yaml - title: Operational Tools description: @@ -504,3 +554,36 @@ topics: description: requires: - overcloud-resource-registry-puppet.yaml + + - title: Security Options + description: Security Hardening Options + environment_groups: + - title: SSH Banner Text + description: Enables population of SSH Banner Text + environments: + - file: environments/sshd-banner.yaml + title: SSH Banner Text + description: + requires: + - overcloud-resource-registry-puppet.yaml + - title: Horizon Password Validation + description: Enable Horizon Password validation + environments: + - file: environments/horizon_password_validation.yaml + title: Horizon Password Validation + description: + requires: + - overcloud-resource-registry-puppet.yaml + - title: AuditD Rules + description: Management of AuditD rules + environments: + - file: environments/auditd.yaml + title: AuditD Rule Management + description: + requires: + - overcloud-resource-registry-puppet.yaml + - title: Keystone CADF auditing + description: Enable CADF notifications in Keystone for auditing + environments: + - file: environments/cadf.yaml + title: Keystone CADF auditing diff --git a/ci/common/net-config-multinode-os-net-config.yaml b/ci/common/net-config-multinode-os-net-config.yaml new file mode 100644 index 00000000..8c50b641 --- /dev/null +++ b/ci/common/net-config-multinode-os-net-config.yaml @@ -0,0 +1,114 @@ +heat_template_version: ocata + +description: > + Software Config to drive os-net-config for a simple bridge configured + with a static IP address for the ctlplane network. + +parameters: + ControlPlaneIp: + default: '' + description: IP address/subnet on the ctlplane network + type: string + ExternalIpSubnet: + default: '' + description: IP address/subnet on the external network + type: string + InternalApiIpSubnet: + default: '' + description: IP address/subnet on the internal API network + type: string + StorageIpSubnet: + default: '' + description: IP address/subnet on the storage network + type: string + StorageMgmtIpSubnet: + default: '' + description: IP address/subnet on the storage mgmt network + type: string + TenantIpSubnet: + default: '' + description: IP address/subnet on the tenant network + type: string + ManagementIpSubnet: + default: '' + description: IP address/subnet on the management network + type: string + ControlPlaneSubnetCidr: # Override this via parameter_defaults + default: '24' + description: The subnet CIDR of the control plane network. + type: string + OvSBridgeMtu: + default: 1300 + description: The mtu of the OvS bridge + type: number + +resources: + + OsNetConfigImpl: + type: OS::Heat::SoftwareConfig + properties: + group: script + config: + list_join: + - '' + - - | + #!/bin/bash + function network_config_hook { + primary_private_ip=$(cat /etc/nodepool/primary_node_private) + sed -i "s/primary_private_ip/$primary_private_ip/" /etc/os-net-config/config.json + subnode_private_ip=$(cat /etc/nodepool/node_private) + sed -i "s/subnode_private_ip/$subnode_private_ip/" /etc/os-net-config/config.json + # We start with an arbitrarily high vni key so that we don't + # overlap with Neutron created values. These will also match the + # values that we've been using previously from the devstack-gate + # code. + vni=1000002 + subnode_index=$(grep -n $(cat /etc/nodepool/node_private) /etc/nodepool/sub_nodes_private | cut -d: -f1) + let vni+=$subnode_index + sed -i "s/vni/$vni/" /etc/os-net-config/config.json + export interface_name="br-ex_$primary_private_ip" + # Until we are fully migrated to os-net-config we need to clean + # up the old bridge first created by devstack-gate + ovs-vsctl del-br br-ex + } + + - + str_replace: + template: + get_file: ../../network/scripts/run-os-net-config.sh + params: + $network_config: + network_config: + - type: ovs_bridge + name: bridge_name + mtu: + get_param: OvSBridgeMtu + use_dhcp: false + addresses: + - ip_netmask: + list_join: + - "/" + - - get_param: ControlPlaneIp + - get_param: ControlPlaneSubnetCidr + members: + - type: ovs_tunnel + name: interface_name + tunnel_type: vxlan + ovs_options: + - list_join: + - "=" + - - key + - vni + - list_join: + - "=" + - - remote_ip + - primary_private_ip + - list_join: + - "=" + - - local_ip + - subnode_private_ip + +outputs: + OS::stack_id: + description: The OsNetConfigImpl resource. + value: {get_resource: OsNetConfigImpl} diff --git a/ci/common/net-config-multinode.yaml b/ci/common/net-config-multinode.yaml index bf947d3e..dc31235a 100644 --- a/ci/common/net-config-multinode.yaml +++ b/ci/common/net-config-multinode.yaml @@ -47,7 +47,9 @@ resources: str_replace: template: | #!/bin/bash - ip addr add CONTROLPLANEIP/CONTROLPLANESUBNETCIDR dev $bridge_name + if ! ip addr show dev $bridge_name | grep CONTROLPLANEIP/CONTROLPLANESUBNETCIDR; then + ip addr add CONTROLPLANEIP/CONTROLPLANESUBNETCIDR dev $bridge_name + fi params: CONTROLPLANEIP: {get_param: ControlPlaneIp} CONTROLPLANESUBNETCIDR: {get_param: ControlPlaneSubnetCidr} diff --git a/ci/environments/multinode-3nodes.yaml b/ci/environments/multinode-3nodes.yaml new file mode 100644 index 00000000..d6e2376a --- /dev/null +++ b/ci/environments/multinode-3nodes.yaml @@ -0,0 +1,79 @@ +# Specifies which roles (groups of nodes) will be deployed +# Note this is used as an input to the various *.j2.yaml +# jinja2 templates, so that they are converted into *.yaml +# during the plan creation (via a mistral action/workflow). +# +# The format is a list, with the following format: +# +# * name: (string) mandatory, name of the role, must be unique +# +# CountDefault: (number) optional, default number of nodes, defaults to 0 +# sets the default for the {{role.name}}Count parameter in overcloud.yaml +# +# HostnameFormatDefault: (string) optional default format string for hostname +# defaults to '%stackname%-{{role.name.lower()}}-%index%' +# sets the default for {{role.name}}HostnameFormat parameter in overcloud.yaml +# +# ServicesDefault: (list) optional default list of services to be deployed +# on the role, defaults to an empty list. Sets the default for the +# {{role.name}}Services parameter in overcloud.yaml + +- name: ControllerApi + CountDefault: 1 + ServicesDefault: + - OS::TripleO::Services::CACerts + - OS::TripleO::Services::CinderApi + - OS::TripleO::Services::CinderScheduler + - OS::TripleO::Services::Core + - OS::TripleO::Services::Kernel + - OS::TripleO::Services::Keystone + - OS::TripleO::Services::GlanceApi + - OS::TripleO::Services::HeatApi + - OS::TripleO::Services::HeatApiCfn + - OS::TripleO::Services::HeatApiCloudwatch + - OS::TripleO::Services::HeatEngine + - OS::TripleO::Services::NeutronDhcpAgent + - OS::TripleO::Services::NeutronL3Agent + - OS::TripleO::Services::NeutronMetadataAgent + - OS::TripleO::Services::NeutronApi + - OS::TripleO::Services::NeutronCorePlugin + - OS::TripleO::Services::NeutronOvsAgent + - OS::TripleO::Services::NovaConductor + - OS::TripleO::Services::NovaApi + - OS::TripleO::Services::NovaPlacement + - OS::TripleO::Services::NovaMetadata + - OS::TripleO::Services::NovaScheduler + - OS::TripleO::Services::NovaConsoleauth + - OS::TripleO::Services::NovaVncProxy + - OS::TripleO::Services::Ntp + - OS::TripleO::Services::SwiftProxy + - OS::TripleO::Services::SwiftStorage + - OS::TripleO::Services::SwiftRingBuilder + - OS::TripleO::Services::Snmp + - OS::TripleO::Services::Timezone + - OS::TripleO::Services::TripleoPackages + - OS::TripleO::Services::TripleoFirewall + - OS::TripleO::Services::NovaCompute + - OS::TripleO::Services::NovaLibvirt + - OS::TripleO::Services::MySQLClient + +- name: Controller + CountDefault: 1 + ServicesDefault: + - OS::TripleO::Services::CACerts + - OS::TripleO::Services::CinderBackup + - OS::TripleO::Services::CinderVolume + - OS::TripleO::Services::Core + - OS::TripleO::Services::Kernel + - OS::TripleO::Services::MySQL + - OS::TripleO::Services::MySQLClient + - OS::TripleO::Services::RabbitMQ + - OS::TripleO::Services::HAproxy + - OS::TripleO::Services::Keepalived + - OS::TripleO::Services::Memcached + - OS::TripleO::Services::Pacemaker + - OS::TripleO::Services::Ntp + - OS::TripleO::Services::Snmp + - OS::TripleO::Services::Timezone + - OS::TripleO::Services::TripleoPackages + - OS::TripleO::Services::TripleoFirewall diff --git a/puppet/services/pacemaker/core.yaml b/ci/environments/multinode-core.yaml index f7e3d984..0c07a1b0 100644 --- a/puppet/services/pacemaker/core.yaml +++ b/ci/environments/multinode-core.yaml @@ -1,7 +1,7 @@ heat_template_version: ocata description: > - OpenStack Core (fake) service with Pacemaker configured with Puppet. + OpenStack Core Service parameters: ServiceNetMap: @@ -18,12 +18,20 @@ parameters: description: Mapping of service endpoint -> protocol. Typically set via parameter_defaults in the resource registry. type: json + Debug: + type: string + default: '' + +resources: outputs: role_data: - description: Role data for the Core role. + description: Role data for the multinode firewall configuration value: - service_name: core - config_settings: {} - step_config: | - include ::tripleo::profile::pacemaker::core
\ No newline at end of file + service_name: multinode_core + config_settings: + tripleo.core.firewall_rules: + '999 core': + proto: 'udp' + dport: + - 4789 diff --git a/ci/environments/multinode.yaml b/ci/environments/multinode.yaml new file mode 100644 index 00000000..c946ec8a --- /dev/null +++ b/ci/environments/multinode.yaml @@ -0,0 +1,64 @@ +resource_registry: + OS::TripleO::Controller::Net::SoftwareConfig: ../common/net-config-multinode-os-net-config.yaml + OS::TripleO::Compute::Net::SoftwareConfig: ../common/net-config-multinode-os-net-config.yaml + OS::TripleO::Services::RabbitMQ: ../../puppet/services/pacemaker/rabbitmq.yaml + OS::TripleO::Services::HAproxy: ../../puppet/services/pacemaker/haproxy.yaml + OS::TripleO::Services::Pacemaker: ../../puppet/services/pacemaker.yaml + OS::TripleO::Services::MySQL: ../../puppet/services/pacemaker/database/mysql.yaml + OS::TripleO::Services::CinderVolume: ../../puppet/services/pacemaker/cinder-volume.yaml + OS::TripleO::Services::Keepalived: OS::Heat::None + OS::TripleO::Tasks::ControllerPrePuppet: ../../extraconfig/tasks/pre_puppet_pacemaker.yaml + OS::TripleO::Tasks::ControllerPostPuppet: ../../extraconfig/tasks/post_puppet_pacemaker.yaml + OS::TripleO::Tasks::ControllerPostPuppetRestart: ../../extraconfig/tasks/post_puppet_pacemaker_restart.yaml + +parameter_defaults: + ControllerServices: + - OS::TripleO::Services::CephMon + - OS::TripleO::Services::CephOSD + - OS::TripleO::Services::CinderApi + - OS::TripleO::Services::CinderScheduler + - OS::TripleO::Services::CinderVolume + - OS::TripleO::Services::Kernel + - OS::TripleO::Services::Keystone + - OS::TripleO::Services::GlanceApi + - OS::TripleO::Services::HeatApi + - OS::TripleO::Services::HeatApiCfn + - OS::TripleO::Services::HeatApiCloudwatch + - OS::TripleO::Services::HeatEngine + - OS::TripleO::Services::MySQL + - OS::TripleO::Services::MySQLClient + - OS::TripleO::Services::NeutronDhcpAgent + - OS::TripleO::Services::NeutronL3Agent + - OS::TripleO::Services::NeutronMetadataAgent + - OS::TripleO::Services::NeutronServer + - OS::TripleO::Services::NeutronCorePlugin + - OS::TripleO::Services::NeutronOvsAgent + - OS::TripleO::Services::RabbitMQ + - OS::TripleO::Services::HAproxy + - OS::TripleO::Services::Keepalived + - OS::TripleO::Services::Memcached + - OS::TripleO::Services::Pacemaker + - OS::TripleO::Services::NovaConductor + - OS::TripleO::Services::NovaApi + - OS::TripleO::Services::NovaPlacement + - OS::TripleO::Services::NovaMetadata + - OS::TripleO::Services::NovaScheduler + - OS::TripleO::Services::Ntp + - OS::TripleO::Services::SwiftProxy + - OS::TripleO::Services::SwiftStorage + - OS::TripleO::Services::SwiftRingBuilder + - OS::TripleO::Services::Snmp + - OS::TripleO::Services::Timezone + - OS::TripleO::Services::NovaCompute + - OS::TripleO::Services::NovaLibvirt + ControllerExtraConfig: + nova::compute::libvirt::services::libvirt_virt_type: qemu + nova::compute::libvirt::libvirt_virt_type: qemu + # Required for Centos 7.3 and Qemu 2.6.0 + nova::compute::libvirt::libvirt_cpu_mode: 'none' + #NOTE(gfidente): not great but we need this to deploy on ext4 + #http://docs.ceph.com/docs/jewel/rados/configuration/filesystem-recommendations/ + ceph::profile::params::osd_max_object_name_len: 256 + ceph::profile::params::osd_max_object_namespace_len: 64 + SwiftCeilometerPipelineEnabled: False + Debug: True diff --git a/ci/environments/multinode_major_upgrade.yaml b/ci/environments/multinode_major_upgrade.yaml new file mode 100644 index 00000000..2251cc0c --- /dev/null +++ b/ci/environments/multinode_major_upgrade.yaml @@ -0,0 +1,65 @@ +resource_registry: + OS::TripleO::Controller::Net::SoftwareConfig: ../common/net-config-multinode.yaml + OS::TripleO::Compute::Net::SoftwareConfig: ../common/net-config-multinode.yaml + OS::TripleO::Services::RabbitMQ: ../../puppet/services/pacemaker/rabbitmq.yaml + OS::TripleO::Services::HAproxy: ../../puppet/services/pacemaker/haproxy.yaml + OS::TripleO::Services::Pacemaker: ../../puppet/services/pacemaker.yaml + OS::TripleO::Services::MySQL: ../../puppet/services/pacemaker/database/mysql.yaml + OS::TripleO::Services::CinderVolume: ../../puppet/services/pacemaker/cinder-volume.yaml + OS::TripleO::Services::Keepalived: OS::Heat::None + OS::TripleO::Tasks::ControllerPrePuppet: ../../extraconfig/tasks/pre_puppet_pacemaker.yaml + OS::TripleO::Tasks::ControllerPostPuppet: ../../extraconfig/tasks/post_puppet_pacemaker.yaml + OS::TripleO::Tasks::ControllerPostPuppetRestart: ../../extraconfig/tasks/post_puppet_pacemaker_restart.yaml + +parameter_defaults: + ControllerServices: + - OS::TripleO::Services::CACerts + - OS::TripleO::Services::Kernel + - OS::TripleO::Services::Keystone + - OS::TripleO::Services::GlanceApi + - OS::TripleO::Services::GlanceRegistry + - OS::TripleO::Services::NeutronDhcpAgent + - OS::TripleO::Services::NeutronL3Agent + - OS::TripleO::Services::NeutronMetadataAgent + - OS::TripleO::Services::NeutronServer + - OS::TripleO::Services::NeutronCorePlugin + - OS::TripleO::Services::NeutronOvsAgent + - OS::TripleO::Services::CinderApi + - OS::TripleO::Services::CinderScheduler + - OS::TripleO::Services::CinderVolume + - OS::TripleO::Services::HeatApi + - OS::TripleO::Services::HeatApiCfn + - OS::TripleO::Services::HeatApiCloudwatch + - OS::TripleO::Services::HeatEngine + - OS::TripleO::Services::SwiftProxy + - OS::TripleO::Services::SwiftStorage + - OS::TripleO::Services::SwiftRingBuilder + - OS::TripleO::Services::SaharaApi + - OS::TripleO::Services::SaharaEngine + - OS::TripleO::Services::MySQL + - OS::TripleO::Services::MySQLClient + - OS::TripleO::Services::RabbitMQ + - OS::TripleO::Services::HAproxy + - OS::TripleO::Services::Keepalived + - OS::TripleO::Services::Memcached + - OS::TripleO::Services::Ntp + - OS::TripleO::Services::Timezone + - OS::TripleO::Services::TripleoPackages + - OS::TripleO::Services::TripleoFirewall + - OS::TripleO::Services::NovaConductor + - OS::TripleO::Services::NovaApi + - OS::TripleO::Services::NovaPlacement + - OS::TripleO::Services::NovaMetadata + - OS::TripleO::Services::NovaScheduler + - OS::TripleO::Services::NovaCompute + - OS::TripleO::Services::NovaLibvirt + - OS::TripleO::Services::Pacemaker + - OS::TripleO::Services::Horizon + ControllerExtraConfig: + nova::compute::libvirt::services::libvirt_virt_type: qemu + nova::compute::libvirt::libvirt_virt_type: qemu + # Required for Centos 7.3 and Qemu 2.6.0 + nova::compute::libvirt::libvirt_cpu_mode: 'none' + heat::rpc_response_timeout: 600 + SwiftCeilometerPipelineEnabled: False + Debug: True diff --git a/ci/environments/scenario001-multinode.yaml b/ci/environments/scenario001-multinode.yaml index 0e9c1c7a..a6f35711 100644 --- a/ci/environments/scenario001-multinode.yaml +++ b/ci/environments/scenario001-multinode.yaml @@ -1,22 +1,36 @@ resource_registry: OS::TripleO::Controller::Net::SoftwareConfig: ../common/net-config-multinode.yaml OS::TripleO::Compute::Net::SoftwareConfig: ../common/net-config-multinode.yaml - OS::TripleO::Services::CephMon: /usr/share/openstack-tripleo-heat-templates/puppet/services/ceph-mon.yaml - OS::TripleO::Services::CephOSD: /usr/share/openstack-tripleo-heat-templates/puppet/services/ceph-osd.yaml - OS::TripleO::Services::CephClient: /usr/share/openstack-tripleo-heat-templates/puppet/services/ceph-client.yaml - OS::TripleO::Services::PankoApi: /usr/share/openstack-tripleo-heat-templates/puppet/services/panko-api.yaml + OS::TripleO::Services::CephMon: ../../puppet/services/ceph-mon.yaml + OS::TripleO::Services::CephOSD: ../../puppet/services/ceph-osd.yaml + OS::TripleO::Services::CephClient: ../../puppet/services/ceph-client.yaml + OS::TripleO::Services::PankoApi: ../../puppet/services/panko-api.yaml + OS::TripleO::Services::Collectd: ../../puppet/services/metrics/collectd.yaml + OS::TripleO::Services::Tacker: ../../puppet/services/tacker.yaml + OS::TripleO::Services::Congress: ../../puppet/services/congress.yaml + OS::TripleO::Services::RabbitMQ: ../../puppet/services/pacemaker/rabbitmq.yaml + OS::TripleO::Services::HAproxy: ../../puppet/services/pacemaker/haproxy.yaml + OS::TripleO::Services::Pacemaker: ../../puppet/services/pacemaker.yaml + OS::TripleO::Services::Redis: ../../puppet/services/pacemaker/database/redis.yaml + OS::TripleO::Services::MySQL: ../../puppet/services/pacemaker/database/mysql.yaml + OS::TripleO::Services::CinderBackup: ../../puppet/services/pacemaker/cinder-backup.yaml + OS::TripleO::Services::CinderVolume: ../../puppet/services/pacemaker/cinder-volume.yaml + OS::TripleO::Services::Keepalived: OS::Heat::None + OS::TripleO::Tasks::ControllerPrePuppet: ../../extraconfig/tasks/pre_puppet_pacemaker.yaml + OS::TripleO::Tasks::ControllerPostPuppet: ../../extraconfig/tasks/post_puppet_pacemaker.yaml + OS::TripleO::Tasks::ControllerPostPuppetRestart: ../../extraconfig/tasks/post_puppet_pacemaker_restart.yaml parameter_defaults: ControllerServices: - OS::TripleO::Services::Kernel - OS::TripleO::Services::Keystone - OS::TripleO::Services::GlanceApi - - OS::TripleO::Services::GlanceRegistry - OS::TripleO::Services::HeatApi - OS::TripleO::Services::HeatApiCfn - OS::TripleO::Services::HeatApiCloudwatch - OS::TripleO::Services::HeatEngine - OS::TripleO::Services::MySQL + - OS::TripleO::Services::MySQLClient - OS::TripleO::Services::NeutronDhcpAgent - OS::TripleO::Services::NeutronL3Agent - OS::TripleO::Services::NeutronMetadataAgent @@ -30,10 +44,12 @@ parameter_defaults: - OS::TripleO::Services::Pacemaker - OS::TripleO::Services::NovaConductor - OS::TripleO::Services::NovaApi + - OS::TripleO::Services::NovaPlacement - OS::TripleO::Services::NovaMetadata - OS::TripleO::Services::NovaScheduler - OS::TripleO::Services::Ntp - OS::TripleO::Services::Snmp + - OS::TripleO::Services::Sshd - OS::TripleO::Services::Timezone - OS::TripleO::Services::NovaCompute - OS::TripleO::Services::NovaLibvirt @@ -59,6 +75,11 @@ parameter_defaults: - OS::TripleO::Services::CinderBackup - OS::TripleO::Services::CinderScheduler - OS::TripleO::Services::CinderVolume + - OS::TripleO::Services::Collectd + - OS::TripleO::Services::Tacker + - OS::TripleO::Services::Congress + - OS::TripleO::Services::TripleoPackages + - OS::TripleO::Services::TripleoFirewall ControllerExtraConfig: nova::compute::libvirt::services::libvirt_virt_type: qemu nova::compute::libvirt::libvirt_virt_type: qemu @@ -81,3 +102,14 @@ parameter_defaults: GlanceBackend: rbd GnocchiBackend: rbd CinderEnableIscsiBackend: false + BannerText: | + ****************************************************************** + * This system is for the use of authorized users only. Usage of * + * this system may be monitored and recorded by system personnel. * + * Anyone using this system expressly consents to such monitoring * + * and is advised that if such monitoring reveals possible * + * evidence of criminal activity, system personnel may provide * + * the evidence from such monitoring to law enforcement officials.* + ****************************************************************** + CollectdExtraPlugins: + - rrdtool diff --git a/ci/environments/scenario002-multinode.yaml b/ci/environments/scenario002-multinode.yaml index 7875ef4e..cbcfa9b3 100644 --- a/ci/environments/scenario002-multinode.yaml +++ b/ci/environments/scenario002-multinode.yaml @@ -3,18 +3,29 @@ resource_registry: OS::TripleO::Compute::Net::SoftwareConfig: ../common/net-config-multinode.yaml OS::TripleO::Services::BarbicanApi: ../../puppet/services/barbican-api.yaml OS::TripleO::Services::Zaqar: ../../puppet/services/zaqar.yaml + OS::TripleO::Services::Ec2Api: ../../puppet/services/ec2-api.yaml + OS::TripleO::Services::RabbitMQ: ../../puppet/services/pacemaker/rabbitmq.yaml + OS::TripleO::Services::HAproxy: ../../puppet/services/pacemaker/haproxy.yaml + OS::TripleO::Services::Pacemaker: ../../puppet/services/pacemaker.yaml + OS::TripleO::Services::MySQL: ../../puppet/services/pacemaker/database/mysql.yaml + OS::TripleO::Services::CinderBackup: ../../puppet/services/pacemaker/cinder-backup.yaml + OS::TripleO::Services::CinderVolume: ../../puppet/services/pacemaker/cinder-volume.yaml + OS::TripleO::Services::Keepalived: OS::Heat::None + OS::TripleO::Tasks::ControllerPrePuppet: ../../extraconfig/tasks/pre_puppet_pacemaker.yaml + OS::TripleO::Tasks::ControllerPostPuppet: ../../extraconfig/tasks/post_puppet_pacemaker.yaml + OS::TripleO::Tasks::ControllerPostPuppetRestart: ../../extraconfig/tasks/post_puppet_pacemaker_restart.yaml parameter_defaults: ControllerServices: - OS::TripleO::Services::Kernel - OS::TripleO::Services::Keystone - OS::TripleO::Services::GlanceApi - - OS::TripleO::Services::GlanceRegistry - OS::TripleO::Services::HeatApi - OS::TripleO::Services::HeatApiCfn - OS::TripleO::Services::HeatApiCloudwatch - OS::TripleO::Services::HeatEngine - OS::TripleO::Services::MySQL + - OS::TripleO::Services::MySQLClient - OS::TripleO::Services::NeutronDhcpAgent - OS::TripleO::Services::NeutronL3Agent - OS::TripleO::Services::NeutronMetadataAgent @@ -28,6 +39,7 @@ parameter_defaults: - OS::TripleO::Services::Pacemaker - OS::TripleO::Services::NovaConductor - OS::TripleO::Services::NovaApi + - OS::TripleO::Services::NovaPlacement - OS::TripleO::Services::NovaMetadata - OS::TripleO::Services::NovaScheduler - OS::TripleO::Services::Ntp @@ -45,6 +57,9 @@ parameter_defaults: - OS::TripleO::Services::BarbicanApi - OS::TripleO::Services::MongoDb - OS::TripleO::Services::Zaqar + - OS::TripleO::Services::Ec2Api + - OS::TripleO::Services::TripleoPackages + - OS::TripleO::Services::TripleoFirewall ControllerExtraConfig: nova::compute::libvirt::services::libvirt_virt_type: qemu nova::compute::libvirt::libvirt_virt_type: qemu diff --git a/ci/environments/scenario003-multinode.yaml b/ci/environments/scenario003-multinode.yaml index 26f94d03..6e926f74 100644 --- a/ci/environments/scenario003-multinode.yaml +++ b/ci/environments/scenario003-multinode.yaml @@ -6,18 +6,26 @@ resource_registry: OS::TripleO::Services::MistralApi: ../../puppet/services/mistral-api.yaml OS::TripleO::Services::MistralEngine: ../../puppet/services/mistral-engine.yaml OS::TripleO::Services::MistralExecutor: ../../puppet/services/mistral-executor.yaml + OS::TripleO::Services::RabbitMQ: ../../puppet/services/pacemaker/rabbitmq.yaml + OS::TripleO::Services::HAproxy: ../../puppet/services/pacemaker/haproxy.yaml + OS::TripleO::Services::Pacemaker: ../../puppet/services/pacemaker.yaml + OS::TripleO::Services::MySQL: ../../puppet/services/pacemaker/database/mysql.yaml + OS::TripleO::Services::Keepalived: OS::Heat::None + OS::TripleO::Tasks::ControllerPrePuppet: ../../extraconfig/tasks/pre_puppet_pacemaker.yaml + OS::TripleO::Tasks::ControllerPostPuppet: ../../extraconfig/tasks/post_puppet_pacemaker.yaml + OS::TripleO::Tasks::ControllerPostPuppetRestart: ../../extraconfig/tasks/post_puppet_pacemaker_restart.yaml parameter_defaults: ControllerServices: - OS::TripleO::Services::Kernel - OS::TripleO::Services::Keystone - OS::TripleO::Services::GlanceApi - - OS::TripleO::Services::GlanceRegistry - OS::TripleO::Services::HeatApi - OS::TripleO::Services::HeatApiCfn - OS::TripleO::Services::HeatApiCloudwatch - OS::TripleO::Services::HeatEngine - OS::TripleO::Services::MySQL + - OS::TripleO::Services::MySQLClient - OS::TripleO::Services::NeutronDhcpAgent - OS::TripleO::Services::NeutronL3Agent - OS::TripleO::Services::NeutronMetadataAgent @@ -31,6 +39,7 @@ parameter_defaults: - OS::TripleO::Services::Pacemaker - OS::TripleO::Services::NovaConductor - OS::TripleO::Services::NovaApi + - OS::TripleO::Services::NovaPlacement - OS::TripleO::Services::NovaMetadata - OS::TripleO::Services::NovaScheduler - OS::TripleO::Services::Ntp @@ -43,6 +52,8 @@ parameter_defaults: - OS::TripleO::Services::MistralApi - OS::TripleO::Services::MistralEngine - OS::TripleO::Services::MistralExecutor + - OS::TripleO::Services::TripleoPackages + - OS::TripleO::Services::TripleoFirewall ControllerExtraConfig: nova::compute::libvirt::services::libvirt_virt_type: qemu nova::compute::libvirt::libvirt_virt_type: qemu diff --git a/ci/environments/scenario004-multinode.yaml b/ci/environments/scenario004-multinode.yaml index 0d94cea0..dc05ab4e 100644 --- a/ci/environments/scenario004-multinode.yaml +++ b/ci/environments/scenario004-multinode.yaml @@ -1,25 +1,44 @@ resource_registry: OS::TripleO::Controller::Net::SoftwareConfig: ../common/net-config-multinode.yaml OS::TripleO::Compute::Net::SoftwareConfig: ../common/net-config-multinode.yaml - OS::TripleO::Services::CephMon: /usr/share/openstack-tripleo-heat-templates/puppet/services/ceph-mon.yaml - OS::TripleO::Services::CephOSD: /usr/share/openstack-tripleo-heat-templates/puppet/services/ceph-osd.yaml - OS::TripleO::Services::CephClient: /usr/share/openstack-tripleo-heat-templates/puppet/services/ceph-client.yaml - OS::TripleO::Services::CephRgw: /usr/share/openstack-tripleo-heat-templates/puppet/services/ceph-rgw.yaml + OS::TripleO::Services::CephMds: ../../puppet/services/ceph-mds.yaml + OS::TripleO::Services::CephMon: ../../puppet/services/ceph-mon.yaml + OS::TripleO::Services::CephOSD: ../../puppet/services/ceph-osd.yaml + OS::TripleO::Services::CephRgw: ../../puppet/services/ceph-rgw.yaml OS::TripleO::Services::SwiftProxy: OS::Heat::None OS::TripleO::Services::SwiftStorage: OS::Heat::None OS::TripleO::Services::SwiftRingBuilder: OS::Heat::None + OS::TripleO::Services::ManilaApi: ../../puppet/services/manila-api.yaml + OS::TripleO::Services::ManilaScheduler: ../../puppet/services/manila-scheduler.yaml + OS::TripleO::Services::ManilaShare: ../../puppet/services/pacemaker/manila-share.yaml + OS::TripleO::Services::ManilaBackendCephFs: ../../puppet/services/manila-backend-cephfs.yaml + # These enable Pacemaker + OS::TripleO::Tasks::ControllerPrePuppet: ../../extraconfig/tasks/pre_puppet_pacemaker.yaml + OS::TripleO::Tasks::ControllerPostPuppet: ../../extraconfig/tasks/post_puppet_pacemaker.yaml + OS::TripleO::Tasks::ControllerPostPuppetRestart: ../../extraconfig/tasks/post_puppet_pacemaker_restart.yaml + OS::TripleO::Services::RabbitMQ: ../../puppet/services/pacemaker/rabbitmq.yaml + OS::TripleO::Services::HAproxy: ../../puppet/services/pacemaker/haproxy.yaml + OS::TripleO::Services::Pacemaker: ../../puppet/services/pacemaker.yaml + OS::TripleO::Services::Redis: ../../puppet/services/pacemaker/database/redis.yaml + OS::TripleO::Services::MySQL: ../../puppet/services/pacemaker/database/mysql.yaml + OS::TripleO::Services::Keepalived: OS::Heat::None + parameter_defaults: ControllerServices: + - OS::TripleO::Services::CephMds + - OS::TripleO::Services::CephMon + - OS::TripleO::Services::CephOSD + - OS::TripleO::Services::CephRgw - OS::TripleO::Services::Kernel - OS::TripleO::Services::Keystone - OS::TripleO::Services::GlanceApi - - OS::TripleO::Services::GlanceRegistry - OS::TripleO::Services::HeatApi - OS::TripleO::Services::HeatApiCfn - OS::TripleO::Services::HeatApiCloudwatch - OS::TripleO::Services::HeatEngine - OS::TripleO::Services::MySQL + - OS::TripleO::Services::MySQLClient - OS::TripleO::Services::NeutronDhcpAgent - OS::TripleO::Services::NeutronL3Agent - OS::TripleO::Services::NeutronMetadataAgent @@ -29,10 +48,15 @@ parameter_defaults: - OS::TripleO::Services::RabbitMQ - OS::TripleO::Services::HAproxy - OS::TripleO::Services::Keepalived + - OS::TripleO::Services::ManilaApi + - OS::TripleO::Services::ManilaScheduler + - OS::TripleO::Services::ManilaBackendCephFs + - OS::TripleO::Services::ManilaShare - OS::TripleO::Services::Memcached - OS::TripleO::Services::Pacemaker - OS::TripleO::Services::NovaConductor - OS::TripleO::Services::NovaApi + - OS::TripleO::Services::NovaPlacement - OS::TripleO::Services::NovaMetadata - OS::TripleO::Services::NovaScheduler - OS::TripleO::Services::Ntp @@ -40,10 +64,8 @@ parameter_defaults: - OS::TripleO::Services::Timezone - OS::TripleO::Services::NovaCompute - OS::TripleO::Services::NovaLibvirt - - OS::TripleO::Services::CephMon - - OS::TripleO::Services::CephOSD - - OS::TripleO::Services::CephClient - - OS::TripleO::Services::CephRgw + - OS::TripleO::Services::TripleoPackages + - OS::TripleO::Services::TripleoFirewall ControllerExtraConfig: nova::compute::libvirt::services::libvirt_virt_type: qemu nova::compute::libvirt::libvirt_virt_type: qemu diff --git a/ci/pingtests/scenario002-multinode.yaml b/ci/pingtests/scenario002-multinode.yaml index 7af1ba0c..da1ae60c 100644 --- a/ci/pingtests/scenario002-multinode.yaml +++ b/ci/pingtests/scenario002-multinode.yaml @@ -81,7 +81,7 @@ resources: type: OS::Cinder::EncryptedVolumeType properties: volume_type: {get_resource: luks_volume_type} - provider: nova.volume.encryptors.luks.LuksEncryptor + provider: luks cipher: aes-xts-plain64 control_location: front-end key_size: 256 diff --git a/ci/pingtests/scenario004-multinode.yaml b/ci/pingtests/scenario004-multinode.yaml index a188fd1c..ebdfea14 100644 --- a/ci/pingtests/scenario004-multinode.yaml +++ b/ci/pingtests/scenario004-multinode.yaml @@ -118,6 +118,18 @@ resources: ram: 512 vcpus: 1 + manila_share_type: + type: OS::Manila::ShareType + properties: + name: default + driver_handles_share_servers: false + + manila_share: + type: OS::Manila::Share + properties: + share_protocol: CEPHFS + size: 1 + outputs: server1_private_ip: description: IP address of server1 in private network diff --git a/ci/pingtests/tenantvm_floatingip.yaml b/ci/pingtests/tenantvm_floatingip.yaml new file mode 100644 index 00000000..b910d6c1 --- /dev/null +++ b/ci/pingtests/tenantvm_floatingip.yaml @@ -0,0 +1,142 @@ +heat_template_version: ocata + +description: > + This template resides in tripleo-ci for Mitaka CI jobs only. + For Newton and beyond, please look in THT. + HOT template to create a new neutron network plus a router to the public + network, and for deploying a server into the new network. The template also + assigns a floating IP address and sets security group rules. ADAPTED FROM + https://raw.githubusercontent.com/openstack/heat-templates/master/hot/servers_in_new_neutron_net.yaml +parameters: + key_name: + type: string + description: Name of keypair to assign to servers + default: 'pingtest_key' + image: + type: string + description: Name of image to use for servers + default: 'pingtest_image' + public_net_name: + type: string + default: 'nova' + description: > + ID or name of public network for which floating IP addresses will be allocated + private_net_name: + type: string + description: Name of private network to be created + default: 'default-net' + private_net_cidr: + type: string + description: Private network address (CIDR notation) + default: '192.168.2.0/24' + private_net_gateway: + type: string + description: Private network gateway address + default: '192.168.2.1' + private_net_pool_start: + type: string + description: Start of private network IP address allocation pool + default: '192.168.2.100' + private_net_pool_end: + type: string + default: '192.168.2.200' + description: End of private network IP address allocation pool + +resources: + + key_pair: + type: OS::Nova::KeyPair + properties: + save_private_key: true + name: {get_param: key_name } + + private_net: + type: OS::Neutron::Net + properties: + name: { get_param: private_net_name } + + private_subnet: + type: OS::Neutron::Subnet + properties: + network_id: { get_resource: private_net } + cidr: { get_param: private_net_cidr } + gateway_ip: { get_param: private_net_gateway } + allocation_pools: + - start: { get_param: private_net_pool_start } + end: { get_param: private_net_pool_end } + + router: + type: OS::Neutron::Router + properties: + external_gateway_info: + network: { get_param: public_net_name } + + router_interface: + type: OS::Neutron::RouterInterface + properties: + router_id: { get_resource: router } + subnet_id: { get_resource: private_subnet } + + volume1: + type: OS::Cinder::Volume + properties: + name: Volume1 + image: { get_param: image } + size: 1 + + server1: + type: OS::Nova::Server + depends_on: volume1 + properties: + name: Server1 + block_device_mapping: + - device_name: vda + volume_id: { get_resource: volume1 } + flavor: { get_resource: test_flavor } + key_name: { get_resource: key_pair } + networks: + - port: { get_resource: server1_port } + + server1_port: + type: OS::Neutron::Port + properties: + network_id: { get_resource: private_net } + fixed_ips: + - subnet_id: { get_resource: private_subnet } + security_groups: [{ get_resource: server_security_group }] + + server1_floating_ip: + type: OS::Neutron::FloatingIP + # TODO: investigate why we need this depends_on and if we could + # replace it by router_id with get_resource: router_interface + depends_on: router_interface + properties: + floating_network: { get_param: public_net_name } + port_id: { get_resource: server1_port } + + server_security_group: + type: OS::Neutron::SecurityGroup + properties: + description: Add security group rules for server + name: pingtest-security-group + rules: + - remote_ip_prefix: 0.0.0.0/0 + protocol: tcp + port_range_min: 22 + port_range_max: 22 + - remote_ip_prefix: 0.0.0.0/0 + protocol: icmp + + test_flavor: + type: OS::Nova::Flavor + properties: + ram: 512 + vcpus: 1 + +outputs: + server1_private_ip: + description: IP address of server1 in private network + value: { get_attr: [ server1, first_address ] } + server1_public_ip: + description: Floating IP address of server1 in public network + value: { get_attr: [ server1_floating_ip, floating_ip_address ] } diff --git a/ci/scripts/freeipa_setup.sh b/ci/scripts/freeipa_setup.sh new file mode 100644 index 00000000..e699841f --- /dev/null +++ b/ci/scripts/freeipa_setup.sh @@ -0,0 +1,120 @@ +#!/bin/bash +# +# Used environment variables: +# +# - Hostname +# - FreeIPAIP +# - DirectoryManagerPassword +# - AdminPassword +# - UndercloudFQDN +# - HostsSecret +# - ProvisioningCIDR: If set, it adds the given CIDR to the provisioning +# interface (which is hardcoded to eth1) +# - UsingNovajoin: If unset, we pre-provision the service principals +# needed for the overcloud deploy. If set, we skip this, +# since novajoin will do it. +# +set -eux + +if [ -f "~/freeipa-setup.env" ]; then + source ~/freeipa-setup.env +elif [ -f "/tmp/freeipa-setup.env" ]; then + source /tmp/freeipa-setup.env +fi + +export Hostname=${Hostname:-""} +export FreeIPAIP=${FreeIPAIP:-""} +export DirectoryManagerPassword=${DirectoryManagerPassword:-""} +export AdminPassword=${AdminPassword:-""} +export UndercloudFQDN=${UndercloudFQDN:-""} +export HostsSecret=${HostsSecret:-""} +export ProvisioningCIDR=${ProvisioningCIDR:-""} +export UsingNovajoin=${UsingNovajoin:-""} + +if [ -n "$ProvisioningCIDR" ]; then + # Add address to provisioning network interface + ip link set dev eth1 up + ip addr add $ProvisioningCIDR dev eth1 +fi + +# Set DNS servers +echo "nameserver 8.8.8.8" >> /etc/resolv.conf +echo "nameserver 8.8.4.4" >> /etc/resolv.conf + +yum -q -y remove openstack-dashboard + +# Install the needed packages +yum -q install -y ipa-server ipa-server-dns epel-release rng-tools mod_nss git +yum -q install -y haveged + +# Prepare hostname +hostnamectl set-hostname --static $Hostname + +echo $FreeIPAIP `hostname` | tee -a /etc/hosts + +# Set iptables rules +cat << EOF > freeipa-iptables-rules.txt +# Firewall configuration written by system-config-firewall +# Manual customization of this file is not recommended. +*filter +:INPUT ACCEPT [0:0] +:FORWARD ACCEPT [0:0] +:OUTPUT ACCEPT [0:0] +-A INPUT -m state --state ESTABLISHED,RELATED -j ACCEPT +-A INPUT -p icmp -j ACCEPT +-A INPUT -i lo -j ACCEPT +-A INPUT -m state --state NEW -m tcp -p tcp --dport 22 -j ACCEPT +#TCP ports for FreeIPA +-A INPUT -m state --state NEW -m tcp -p tcp --dport 80 -j ACCEPT +-A INPUT -m state --state NEW -m tcp -p tcp --dport 443 -j ACCEPT +-A INPUT -m state --state NEW -m tcp -p tcp --dport 389 -j ACCEPT +-A INPUT -m state --state NEW -m tcp -p tcp --dport 636 -j ACCEPT +-A INPUT -m state --state NEW -m tcp -p tcp --dport 88 -j ACCEPT +-A INPUT -m state --state NEW -m tcp -p tcp --dport 464 -j ACCEPT +-A INPUT -m state --state NEW -m tcp -p tcp --dport 53 -j ACCEPT +#UDP ports for FreeIPA +-A INPUT -m state --state NEW -m udp -p udp --dport 88 -j ACCEPT +-A INPUT -m state --state NEW -m udp -p udp --dport 464 -j ACCEPT +-A INPUT -m state --state NEW -m udp -p udp --dport 123 -j ACCEPT +-A INPUT -m state --state NEW -m udp -p udp --dport 53 -j ACCEPT +-A INPUT -j REJECT --reject-with icmp-host-prohibited +-A FORWARD -j REJECT --reject-with icmp-host-prohibited +COMMIT +EOF + +iptables-restore < freeipa-iptables-rules.txt + +# Entropy generation; otherwise, ipa-server-install will lag. +chkconfig haveged on +systemctl start haveged + +# Remove conflicting httpd configuration +rm -f /etc/httpd/conf.d/ssl.conf + +# Set up FreeIPA +ipa-server-install -U -r `hostname -d|tr "[a-z]" "[A-Z]"` \ + -p $DirectoryManagerPassword -a $AdminPassword \ + --hostname `hostname -f` \ + --ip-address=$FreeIPAIP \ + --setup-dns --auto-forwarders --auto-reverse + +# Authenticate +echo $AdminPassword | kinit admin + +# Verify we have TGT +klist + +if [ "$?" = '1' ]; then + exit 1 +fi + +if [ -z "$UsingNovajoin" ]; then + # Create undercloud host + ipa host-add $UndercloudFQDN --password=$HostsSecret --force + + # Create overcloud nodes and services + git clone https://github.com/JAORMX/freeipa-tripleo-incubator.git + cd freeipa-tripleo-incubator + python create_ipa_tripleo_host_setup.py -w $HostsSecret -d $(hostname -d) \ + --controller-count 1 --compute-count 1 +fi diff --git a/deployed-server/README.rst b/deployed-server/README.rst index f269b6a4..e4d8299b 100644 --- a/deployed-server/README.rst +++ b/deployed-server/README.rst @@ -119,7 +119,7 @@ from the deployment command, the script should be ready to run: [NovaCompute]: CREATE_IN_PROGRESS state changed The user running the script must be able to ssh as root to each server. Define -the the names of your custom roles (if applicable) and hostnames of the deployed +the names of your custom roles (if applicable) and hostnames of the deployed servers you intend to use for each role type. For each role name, a corresponding <role-name>_hosts variable should also be defined, e.g.:: diff --git a/deployed-server/ctlplane-port.yaml b/deployed-server/ctlplane-port.yaml new file mode 100644 index 00000000..7b5cdf11 --- /dev/null +++ b/deployed-server/ctlplane-port.yaml @@ -0,0 +1,28 @@ +heat_template_version: ocata + +parameters: + network: + type: string + default: ctlplane + name: + type: string + replacement_policy: + type: string + default: AUTO + +resources: + + ControlPlanePort: + type: OS::Neutron::Port + properties: + network: ctlplane + name: + list_join: + - '-' + - - {get_param: name} + - port + replacement_policy: AUTO + +outputs: + fixed_ips: + value: {get_attr: [ControlPlanePort, fixed_ips]} diff --git a/deployed-server/deployed-neutron-port.yaml b/deployed-server/deployed-neutron-port.yaml index 5ce63770..bddf8bc1 100644 --- a/deployed-server/deployed-neutron-port.yaml +++ b/deployed-server/deployed-neutron-port.yaml @@ -8,7 +8,7 @@ description: " Example: parameter_defaults: DeployedServerPortMap: - gatsby_ctlplane: + gatsby-ctlplane: fixed_ips: - ip_address: 127.0.0.1 subnets: diff --git a/deployed-server/deployed-server-bootstrap-centos.sh b/deployed-server/deployed-server-bootstrap-centos.sh new file mode 100644 index 00000000..7266ca57 --- /dev/null +++ b/deployed-server/deployed-server-bootstrap-centos.sh @@ -0,0 +1,16 @@ +#!/bin/bash + +set -eux + +yum install -y \ + jq \ + python-ipaddr \ + openstack-puppet-modules \ + os-net-config \ + openvswitch \ + python-heat-agent* + +ln -s -f /usr/share/openstack-puppet/modules/* /etc/puppet/modules + +setenforce 0 +sed -i 's/^SELINUX=.*/SELINUX=permissive/' /etc/selinux/config diff --git a/deployed-server/deployed-server-bootstrap-centos.yaml b/deployed-server/deployed-server-bootstrap-centos.yaml new file mode 100644 index 00000000..c1740d78 --- /dev/null +++ b/deployed-server/deployed-server-bootstrap-centos.yaml @@ -0,0 +1,22 @@ +heat_template_version: ocata + +description: 'Deployed Server Bootstrap Config' + +parameters: + + server: + type: string + +resources: + + DeployedServerBootstrapConfig: + type: OS::Heat::SoftwareConfig + properties: + group: script + config: {get_file: deployed-server-bootstrap-centos.sh} + + DeployedServerBootstrapDeployment: + type: OS::Heat::SoftwareDeployment + properties: + config: {get_resource: DeployedServerBootstrapConfig} + server: {get_param: server} diff --git a/deployed-server/deployed-server-bootstrap-rhel.sh b/deployed-server/deployed-server-bootstrap-rhel.sh new file mode 100644 index 00000000..36ff0077 --- /dev/null +++ b/deployed-server/deployed-server-bootstrap-rhel.sh @@ -0,0 +1,13 @@ +#!/bin/bash + +set -eux + +yum install -y \ + jq \ + python-ipaddr \ + openstack-puppet-modules \ + os-net-config \ + openvswitch \ + python-heat-agent* + +ln -s -f /usr/share/openstack-puppet/modules/* /etc/puppet/modules diff --git a/deployed-server/deployed-server-bootstrap-rhel.yaml b/deployed-server/deployed-server-bootstrap-rhel.yaml new file mode 100644 index 00000000..2d2f5156 --- /dev/null +++ b/deployed-server/deployed-server-bootstrap-rhel.yaml @@ -0,0 +1,22 @@ +heat_template_version: ocata + +description: 'Deployed Server Bootstrap Config' + +parameters: + + server: + type: string + +resources: + + DeployedServerBootstrapConfig: + type: OS::Heat::SoftwareConfig + properties: + group: script + config: {get_file: deployed-server-bootstrap-rhel.sh} + + DeployedServerBootstrapDeployment: + type: OS::Heat::SoftwareDeployment + properties: + config: {get_resource: DeployedServerBootstrapConfig} + server: {get_param: server} diff --git a/deployed-server/deployed-server-roles-data.yaml b/deployed-server/deployed-server-roles-data.yaml index 40a15189..04da5565 100644 --- a/deployed-server/deployed-server-roles-data.yaml +++ b/deployed-server/deployed-server-roles-data.yaml @@ -21,7 +21,7 @@ # on the role, defaults to an empty list. Sets the default for the # {{role.name}}Services parameter in overcloud.yaml -- name: Controller +- name: ControllerDeployedServer CountDefault: 1 disable_constraints: True ServicesDefault: @@ -33,16 +33,15 @@ - OS::TripleO::Services::CinderBackup - OS::TripleO::Services::CinderScheduler - OS::TripleO::Services::CinderVolume - - OS::TripleO::Services::Core - OS::TripleO::Services::Kernel - OS::TripleO::Services::Keystone - OS::TripleO::Services::GlanceApi - - OS::TripleO::Services::GlanceRegistry - OS::TripleO::Services::HeatApi - OS::TripleO::Services::HeatApiCfn - OS::TripleO::Services::HeatApiCloudwatch - OS::TripleO::Services::HeatEngine - OS::TripleO::Services::MySQL + - OS::TripleO::Services::MySQLClient - OS::TripleO::Services::NeutronDhcpAgent - OS::TripleO::Services::NeutronL3Agent - OS::TripleO::Services::NeutronMetadataAgent @@ -59,6 +58,7 @@ - OS::TripleO::Services::MongoDb - OS::TripleO::Services::NovaApi - OS::TripleO::Services::NovaMetadata + - OS::TripleO::Services::NovaPlacement - OS::TripleO::Services::NovaScheduler - OS::TripleO::Services::NovaConsoleauth - OS::TripleO::Services::NovaVncProxy @@ -103,7 +103,7 @@ - OS::TripleO::Services::Zaqar - OS::TripleO::Services::OVNDBs -- name: Compute +- name: ComputeDeployedServer CountDefault: 1 HostnameFormatDefault: '%stackname%-novacompute-%index%' disable_constraints: True @@ -129,7 +129,7 @@ - OS::TripleO::Services::SensuClient - OS::TripleO::Services::FluentdClient -- name: BlockStorage +- name: BlockStorageDeployedServer disable_constraints: True ServicesDefault: - OS::TripleO::Services::CACerts @@ -143,7 +143,7 @@ - OS::TripleO::Services::SensuClient - OS::TripleO::Services::FluentdClient -- name: ObjectStorage +- name: ObjectStorageDeployedServer disable_constraints: True ServicesDefault: - OS::TripleO::Services::CACerts @@ -158,7 +158,7 @@ - OS::TripleO::Services::SensuClient - OS::TripleO::Services::FluentdClient -- name: CephStorage +- name: CephStorageDeployedServer disable_constraints: True ServicesDefault: - OS::TripleO::Services::CACerts diff --git a/deployed-server/deployed-server.yaml b/deployed-server/deployed-server.yaml index 2929c5eb..1e8afb25 100644 --- a/deployed-server/deployed-server.yaml +++ b/deployed-server/deployed-server.yaml @@ -38,6 +38,12 @@ parameters: type: json description: Optional scheduler hints to pass to nova default: {} + UpgradeInitCommand: + type: string + description: | + Command or script snippet to run on all overcloud nodes to + initialize the upgrade process. E.g. a repository switch. + default: '' resources: deployed-server: @@ -46,6 +52,25 @@ resources: name: {get_param: name} software_config_transport: {get_param: software_config_transport} + UpgradeInitConfig: + type: OS::Heat::SoftwareConfig + properties: + group: script + config: + list_join: + - '' + - - "#!/bin/bash\n\n" + - "if [[ -f /etc/resolv.conf.save ]] ; then rm /etc/resolv.conf.save; fi\n\n" + - get_param: UpgradeInitCommand + + UpgradeInitDeployment: + type: OS::Heat::SoftwareDeployment + properties: + name: UpgradeInitDeployment + server: {get_resource: deployed-server} + config: {get_resource: UpgradeInitConfig} + + InstanceIdConfig: type: OS::Heat::StructuredConfig properties: @@ -58,6 +83,7 @@ resources: properties: config: {get_resource: InstanceIdConfig} server: {get_resource: deployed-server} + depends_on: UpgradeInitDeployment HostsEntryConfig: type: OS::Heat::SoftwareConfig @@ -80,7 +106,12 @@ resources: config: {get_resource: HostsEntryConfig} server: {get_resource: deployed-server} - ControlPlanePortImpl: + DeployedServerBootstrapConfig: + type: OS::TripleO::DeployedServer::Bootstrap + properties: + server: {get_resource: deployed-server} + + ControlPlanePort: type: OS::TripleO::DeployedServer::ControlPlanePort properties: network: ctlplane @@ -97,6 +128,6 @@ outputs: networks: value: ctlplane: - - {get_attr: [ControlPlanePortImpl, fixed_ips, 0, ip_address]} + - {get_attr: [ControlPlanePort, fixed_ips, 0, ip_address]} name: value: {get_attr: [HostsEntryDeployment, hostname]} diff --git a/deployed-server/scripts/get-occ-config.sh b/deployed-server/scripts/get-occ-config.sh index 404244b1..6c196f97 100755 --- a/deployed-server/scripts/get-occ-config.sh +++ b/deployed-server/scripts/get-occ-config.sh @@ -79,7 +79,14 @@ for role in $OVERCLOUD_ROLES; do server_stack=$(openstack stack resource show $stack $server_resource_name -c physical_resource_id -f value) done - deployed_server_metadata_url=$(openstack stack resource metadata $server_stack deployed-server | jq -r '.["os-collect-config"].request.metadata_url') + while true; do + deployed_server_metadata_url=$(openstack stack resource metadata $server_stack deployed-server | jq -r '.["os-collect-config"].request.metadata_url') + if [ "$deployed_server_metadata_url" = "null" ]; then + continue + else + break + fi + done echo "======================" echo "$role$i os-collect-config.conf configuration:" diff --git a/docker/copy-etc.sh b/docker/copy-etc.sh deleted file mode 100644 index 1a6cd520..00000000 --- a/docker/copy-etc.sh +++ /dev/null @@ -1,3 +0,0 @@ -#!/bin/bash -echo "Copying agent container /etc to /var/lib/etc-data" -cp -a /etc/* /var/lib/etc-data/ diff --git a/docker/copy-json.py b/docker/copy-json.py deleted file mode 100644 index e85ff11e..00000000 --- a/docker/copy-json.py +++ /dev/null @@ -1,72 +0,0 @@ -#!/bin/python -import json -import os - -data = {} -file_perms = '0600' -libvirt_perms = '0644' - -libvirt_config = os.getenv('libvirt_config').split(',') -nova_config = os.getenv('nova_config').split(',') -neutron_openvswitch_agent_config = os.getenv('neutron_openvswitch_agent_config').split(',') - -# Command, Config_files, Owner, Perms -services = { - 'nova-libvirt': [ - '/usr/sbin/libvirtd', - libvirt_config, - 'root', - libvirt_perms], - 'nova-compute': [ - '/usr/bin/nova-compute', - nova_config, - 'nova', - file_perms], - 'neutron-openvswitch-agent': [ - '/usr/bin/neutron-openvswitch-agent', - neutron_openvswitch_agent_config, - 'neutron', - file_perms], - 'ovs-vswitchd': [ - '/usr/sbin/ovs-vswitchd unix:/run/openvswitch/db.sock -vconsole:emer -vsyslog:err -vfile:info --mlockall --log-file=/var/log/kolla/openvswitch/ovs-vswitchd.log'], - 'ovsdb-server': [ - '/usr/sbin/ovsdb-server /etc/openvswitch/conf.db -vconsole:emer -vsyslog:err -vfile:info --remote=punix:/run/openvswitch/db.sock --remote=ptcp:6640:127.0.0.1 --log-file=/var/log/kolla/openvswitch/ovsdb-server.log'] -} - - -def build_config_files(config, owner, perms): - config_source = '/var/lib/kolla/config_files/' - config_files_dict = {} - source = os.path.basename(config) - dest = config - config_files_dict.update({'source': config_source + source, - 'dest': dest, - 'owner': owner, - 'perm': perms}) - return config_files_dict - - -for service in services: - if service != 'ovs-vswitchd' and service != 'ovsdb-server': - command = services.get(service)[0] - config_files = services.get(service)[1] - owner = services.get(service)[2] - perms = services.get(service)[3] - config_files_list = [] - for config_file in config_files: - if service == 'nova-libvirt': - command = command + ' --config ' + config_file - else: - command = command + ' --config-file ' + config_file - data['command'] = command - config_files_dict = build_config_files(config_file, owner, perms) - config_files_list.append(config_files_dict) - data['config_files'] = config_files_list - else: - data['command'] = services.get(service)[0] - data['config_files'] = [] - - json_config_dir = '/var/lib/etc-data/json-config/' - with open(json_config_dir + service + '.json', 'w') as json_file: - json.dump(data, json_file, sort_keys=True, indent=4, - separators=(',', ': ')) diff --git a/docker/create-config-dir.sh b/docker/create-config-dir.sh new file mode 100644 index 00000000..1be1a56f --- /dev/null +++ b/docker/create-config-dir.sh @@ -0,0 +1,6 @@ +#!/bin/bash +# This is where we stack puppet configuration (for now)... +mkdir -p /var/lib/config-data + +# This is the docker-puppet configs end in +mkdir -p /var/lib/docker-puppet diff --git a/docker/docker-puppet.py b/docker/docker-puppet.py new file mode 100755 index 00000000..157bf63f --- /dev/null +++ b/docker/docker-puppet.py @@ -0,0 +1,256 @@ +#!/usr/bin/env python +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +# Shell script tool to run puppet inside of the given docker container image. +# Uses the config file at /var/lib/docker-puppet/docker-puppet.json as a source for a JSON +# array of [config_volume, puppet_tags, manifest, config_image, [volumes]] settings +# that can be used to generate config files or run ad-hoc puppet modules +# inside of a container. + +import json +import os +import subprocess +import sys +import tempfile +import multiprocessing + + +# this is to match what we do in deployed-server +def short_hostname(): + subproc = subprocess.Popen(['hostname', '-s'], + stdout=subprocess.PIPE, + stderr=subprocess.PIPE) + cmd_stdout, cmd_stderr = subproc.communicate() + return cmd_stdout.rstrip() + + +def pull_image(name): + print('Pulling image: %s' % name) + subproc = subprocess.Popen(['/usr/bin/docker', 'pull', name], + stdout=subprocess.PIPE, + stderr=subprocess.PIPE) + cmd_stdout, cmd_stderr = subproc.communicate() + print(cmd_stdout) + print(cmd_stderr) + + +def rm_container(name): + if os.environ.get('SHOW_DIFF', None): + print('Diffing container: %s' % name) + subproc = subprocess.Popen(['/usr/bin/docker', 'diff', name], + stdout=subprocess.PIPE, + stderr=subprocess.PIPE) + cmd_stdout, cmd_stderr = subproc.communicate() + print(cmd_stdout) + print(cmd_stderr) + + print('Removing container: %s' % name) + subproc = subprocess.Popen(['/usr/bin/docker', 'rm', name], + stdout=subprocess.PIPE, + stderr=subprocess.PIPE) + cmd_stdout, cmd_stderr = subproc.communicate() + print(cmd_stdout) + print(cmd_stderr) + +process_count = int(os.environ.get('PROCESS_COUNT', + multiprocessing.cpu_count())) + +config_file = os.environ.get('CONFIG', '/var/lib/docker-puppet/docker-puppet.json') +print('docker-puppet') +print('CONFIG: %s' % config_file) +with open(config_file) as f: + json_data = json.load(f) + +# To save time we support configuring 'shared' services at the same +# time. For example configuring all of the heat services +# in a single container pass makes sense and will save some time. +# To support this we merge shared settings together here. +# +# We key off of config_volume as this should be the same for a +# given group of services. We are also now specifying the container +# in which the services should be configured. This should match +# in all instances where the volume name is also the same. + +configs = {} + +for service in (json_data or []): + if service is None: + continue + if isinstance(service, dict): + service = [ + service.get('config_volume'), + service.get('puppet_tags'), + service.get('step_config'), + service.get('config_image'), + service.get('volumes', []), + ] + + config_volume = service[0] or '' + puppet_tags = service[1] or '' + manifest = service[2] or '' + config_image = service[3] or '' + volumes = service[4] if len(service) > 4 else [] + + if not manifest or not config_image: + continue + + print('---------') + print('config_volume %s' % config_volume) + print('puppet_tags %s' % puppet_tags) + print('manifest %s' % manifest) + print('config_image %s' % config_image) + print('volumes %s' % volumes) + # We key off of config volume for all configs. + if config_volume in configs: + # Append puppet tags and manifest. + print("Existing service, appending puppet tags and manifest\n") + if puppet_tags: + configs[config_volume][1] = '%s,%s' % (configs[config_volume][1], + puppet_tags) + if manifest: + configs[config_volume][2] = '%s\n%s' % (configs[config_volume][2], + manifest) + if configs[config_volume][3] != config_image: + print("WARNING: Config containers do not match even though" + " shared volumes are the same!\n") + else: + print("Adding new service\n") + configs[config_volume] = service + +print('Service compilation completed.\n') + +def mp_puppet_config((config_volume, puppet_tags, manifest, config_image, volumes)): + + print('---------') + print('config_volume %s' % config_volume) + print('puppet_tags %s' % puppet_tags) + print('manifest %s' % manifest) + print('config_image %s' % config_image) + print('volumes %s' % volumes) + hostname = short_hostname() + sh_script = '/var/lib/docker-puppet/docker-puppet-%s.sh' % config_volume + + with open(sh_script, 'w') as script_file: + os.chmod(script_file.name, 0755) + script_file.write("""#!/bin/bash + set -ex + mkdir -p /etc/puppet + cp -a /tmp/puppet-etc/* /etc/puppet + rm -Rf /etc/puppet/ssl # not in use and causes permission errors + echo '{"step": %(step)s}' > /etc/puppet/hieradata/docker.json + TAGS="" + if [ -n "%(puppet_tags)s" ]; then + TAGS='--tags "%(puppet_tags)s"' + fi + FACTER_hostname=%(hostname)s FACTER_uuid=docker /usr/bin/puppet apply --verbose $TAGS /etc/config.pp + + # Disables archiving + if [ -z "%(no_archive)s" ]; then + rm -Rf /var/lib/config-data/%(name)s + + # copying etc should be enough for most services + mkdir -p /var/lib/config-data/%(name)s/etc + cp -a /etc/* /var/lib/config-data/%(name)s/etc/ + + if [ -d /root/ ]; then + cp -a /root/ /var/lib/config-data/%(name)s/root/ + fi + if [ -d /var/lib/ironic/tftpboot/ ]; then + mkdir -p /var/lib/config-data/%(name)s/var/lib/ironic/ + cp -a /var/lib/ironic/tftpboot/ /var/lib/config-data/%(name)s/var/lib/ironic/tftpboot/ + fi + if [ -d /var/lib/ironic/httpboot/ ]; then + mkdir -p /var/lib/config-data/%(name)s/var/lib/ironic/ + cp -a /var/lib/ironic/httpboot/ /var/lib/config-data/%(name)s/var/lib/ironic/httpboot/ + fi + + # apache services may files placed in /var/www/ + if [ -d /var/www/ ]; then + mkdir -p /var/lib/config-data/%(name)s/var/www + cp -a /var/www/* /var/lib/config-data/%(name)s/var/www/ + fi + fi + """ % {'puppet_tags': puppet_tags, 'name': config_volume, + 'hostname': hostname, + 'no_archive': os.environ.get('NO_ARCHIVE', ''), + 'step': os.environ.get('STEP', '6')}) + + with tempfile.NamedTemporaryFile() as tmp_man: + with open(tmp_man.name, 'w') as man_file: + man_file.write('include ::tripleo::packages\n') + man_file.write(manifest) + + rm_container('docker-puppet-%s' % config_volume) + pull_image(config_image) + + dcmd = ['/usr/bin/docker', 'run', + '--user', 'root', + '--name', 'docker-puppet-%s' % config_volume, + '--volume', '%s:/etc/config.pp:ro' % tmp_man.name, + '--volume', '/etc/puppet/:/tmp/puppet-etc/:ro', + '--volume', '/usr/share/openstack-puppet/modules/:/usr/share/openstack-puppet/modules/:ro', + '--volume', '/var/lib/config-data/:/var/lib/config-data/:rw', + '--volume', 'tripleo_logs:/var/log/tripleo/', + '--volume', '%s:%s:rw' % (sh_script, sh_script) ] + + for volume in volumes: + dcmd.extend(['--volume', volume]) + + dcmd.extend(['--entrypoint', sh_script]) + + env = {} + if os.environ.get('NET_HOST', 'false') == 'true': + print('NET_HOST enabled') + dcmd.extend(['--net', 'host', '--volume', + '/etc/hosts:/etc/hosts:ro']) + dcmd.append(config_image) + + subproc = subprocess.Popen(dcmd, stdout=subprocess.PIPE, + stderr=subprocess.PIPE, env=env) + cmd_stdout, cmd_stderr = subproc.communicate() + print(cmd_stdout) + print(cmd_stderr) + if subproc.returncode != 0: + print('Failed running docker-puppet.py for %s' % config_volume) + rm_container('docker-puppet-%s' % config_volume) + return subproc.returncode + +# Holds all the information for each process to consume. +# Instead of starting them all linearly we run them using a process +# pool. This creates a list of arguments for the above function +# to consume. +process_map = [] + +for config_volume in configs: + + service = configs[config_volume] + puppet_tags = service[1] or '' + manifest = service[2] or '' + config_image = service[3] or '' + volumes = service[4] if len(service) > 4 else [] + + if puppet_tags: + puppet_tags = "file,file_line,concat,%s" % puppet_tags + else: + puppet_tags = "file,file_line,concat" + + process_map.append([config_volume, puppet_tags, manifest, config_image, volumes]) + +for p in process_map: + print '--\n%s' % p + +# Fire off processes to perform each configuration. Defaults +# to the number of CPUs on the system. +p = multiprocessing.Pool(process_count) +p.map(mp_puppet_config, process_map) diff --git a/docker/docker-steps.j2 b/docker/docker-steps.j2 new file mode 100644 index 00000000..76232d14 --- /dev/null +++ b/docker/docker-steps.j2 @@ -0,0 +1,325 @@ +# certain initialization steps (run in a container) will occur +# on the first role listed in the roles file +{% set primary_role_name = roles[0].name -%} + +heat_template_version: ocata + +description: > + Post-deploy configuration steps via puppet for all roles, + as defined in ../roles_data.yaml + +parameters: + servers: + type: json + description: Mapping of Role name e.g Controller to a list of servers + role_data: + type: json + description: Mapping of Role name e.g Controller to the per-role data + DeployIdentifier: + default: '' + type: string + description: > + Setting this to a unique value will re-run any deployment tasks which + perform configuration on a Heat stack-update. + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + +resources: + + # These utility tasks use docker-puppet.py to execute tasks via puppet + # We only execute these on the first node in the primary role + {{primary_role_name}}DockerPuppetTasks: + type: OS::Heat::Value + properties: + type: json + value: + yaql: + expression: + dict($.data.docker_puppet_tasks.where($1 != null).selectMany($.items()).groupBy($[0], $[1])) + data: + docker_puppet_tasks: {get_param: [role_data, {{primary_role_name}}, docker_puppet_tasks]} + +# BEGIN primary_role_name docker-puppet-tasks (run only on a single node) +{% for step in range(1, 6) %} + + {{primary_role_name}}DockerPuppetJsonConfig{{step}}: + type: OS::Heat::StructuredConfig + properties: + group: json-file + config: + /var/lib/docker-puppet/docker-puppet-tasks{{step}}.json: + {get_attr: [{{primary_role_name}}DockerPuppetTasks, value, 'step_{{step}}']} + + {{primary_role_name}}DockerPuppetJsonDeployment{{step}}: + type: OS::Heat::SoftwareDeployment + properties: + server: {get_param: [servers, {{primary_role_name}}, '0']} + config: {get_resource: {{primary_role_name}}DockerPuppetJsonConfig{{step}}} + + {{primary_role_name}}DockerPuppetTasksConfig{{step}}: + type: OS::Heat::SoftwareConfig + properties: + group: script + config: {get_file: docker-puppet.py} + inputs: + - name: CONFIG + - name: NET_HOST + - name: NO_ARCHIVE + - name: STEP + + {{primary_role_name}}DockerPuppetTasksDeployment{{step}}: + type: OS::Heat::SoftwareDeployment + depends_on: + {% for dep in roles %} + - {{dep.name}}Deployment_Step{{step}} + - {{dep.name}}ContainersDeployment_Step{{step}} + {% endfor %} + - {{primary_role_name}}DockerPuppetJsonDeployment{{step}} + properties: + name: {{primary_role_name}}DockerPuppetJsonDeployment{{step}} + server: {get_param: [servers, {{primary_role_name}}, '0']} + config: {get_resource: {{primary_role_name}}DockerPuppetTasksConfig{{step}}} + input_values: + CONFIG: /var/lib/docker-puppet/docker-puppet-tasks{{step}}.json + NET_HOST: 'true' + NO_ARCHIVE: 'true' + STEP: {{step}} + +{% endfor %} +# END primary_role_name docker-puppet-tasks + +{% for role in roles %} + # Post deployment steps for all roles + # A single config is re-applied with an incrementing step number + # {{role.name}} Role steps + {{role.name}}ArtifactsConfig: + type: ../puppet/deploy-artifacts.yaml + + {{role.name}}ArtifactsDeploy: + type: OS::Heat::StructuredDeploymentGroup + properties: + servers: {get_param: [servers, {{role.name}}]} + config: {get_resource: {{role.name}}ArtifactsConfig} + + {{role.name}}PreConfig: + type: OS::TripleO::Tasks::{{role.name}}PreConfig + properties: + servers: {get_param: [servers, {{role.name}}]} + input_values: + update_identifier: {get_param: DeployIdentifier} + + {{role.name}}CreateConfigDir: + type: OS::Heat::SoftwareConfig + properties: + group: script + config: {get_file: create-config-dir.sh} + + {{role.name}}CreateConfigDirDeployment: + type: OS::Heat::SoftwareDeploymentGroup + properties: + servers: {get_param: [servers, {{role.name}}]} + config: {get_resource: {{role.name}}CreateConfigDir} + + # this creates a JSON config file for our docker-puppet.py script + {{role.name}}GenPuppetConfig: + type: OS::Heat::StructuredConfig + properties: + group: json-file + config: + /var/lib/docker-puppet/docker-puppet.json: + {get_param: [role_data, {{role.name}}, puppet_config]} + + {{role.name}}GenPuppetDeployment: + type: OS::Heat::SoftwareDeploymentGroup + properties: + servers: {get_param: [servers, {{role.name}}]} + config: {get_resource: {{role.name}}GenPuppetConfig} + + {{role.name}}GenerateConfig: + type: OS::Heat::SoftwareConfig + properties: + group: script + config: {get_file: docker-puppet.py} + + {{role.name}}GenerateConfigDeployment: + type: OS::Heat::SoftwareDeploymentGroup + depends_on: [{{role.name}}GenPuppetDeployment, {{role.name}}ArtifactsDeploy, {{role.name}}CreateConfigDirDeployment] + properties: + name: {{role.name}}GenerateConfigDeployment + servers: {get_param: [servers, {{role.name}}]} + config: {get_resource: {{role.name}}GenerateConfig} + + {{role.name}}PuppetStepConfig: + type: OS::Heat::Value + properties: + type: string + value: + yaql: + expression: + # select 'step_config' only from services that do not have a docker_image + $.data.service_names.zip($.data.step_config, $.data.docker_image).where($[2] = null).where($[1] != null).select($[1]).join("\n") + data: + service_names: {get_param: [role_data, {{role.name}}, service_names]} + step_config: {get_param: [role_data, {{role.name}}, step_config]} + docker_image: {get_param: [role_data, {{role.name}}, docker_image]} + + {{role.name}}DockerConfig: + type: OS::Heat::Value + properties: + type: json + value: + yaql: + expression: + # select 'docker_config' only from services that have a docker_image + $.data.service_names.zip($.data.docker_config, $.data.docker_image).where($[2] != null).select($[1]).reduce($1.mergeWith($2), {}) + data: + service_names: {get_param: [role_data, {{role.name}}, service_names]} + docker_config: {get_param: [role_data, {{role.name}}, docker_config]} + docker_image: {get_param: [role_data, {{role.name}}, docker_image]} + + # Here we are dumping all the docker container startup configuration data + # so that we can have access to how they are started outside of heat + # and docker-cmd. This lets us create command line tools to start and + # test these containers. + {{role.name}}DockerConfigJsonStartupData: + type: OS::Heat::StructuredConfig + properties: + group: json-file + config: + /var/lib/docker-container-startup-configs.json: + {get_attr: [{{role.name}}DockerConfig, value]} + + {{role.name}}DockerConfigJsonStartupDataDeployment: + type: OS::Heat::SoftwareDeploymentGroup + properties: + config: {get_resource: {{role.name}}DockerConfigJsonStartupData} + servers: {get_param: [servers, {{role.name}}]} + + {{role.name}}KollaJsonConfig: + type: OS::Heat::StructuredConfig + properties: + group: json-file + config: + {get_param: [role_data, {{role.name}}, kolla_config]} + + {{role.name}}KollaJsonDeployment: + type: OS::Heat::SoftwareDeploymentGroup + properties: + name: {{role.name}}KollaJsonDeployment + config: {get_resource: {{role.name}}KollaJsonConfig} + servers: {get_param: [servers, {{role.name}}]} + + # BEGIN BAREMETAL CONFIG STEPS + + {% if role.name == 'Controller' %} + ControllerPrePuppet: + type: OS::TripleO::Tasks::ControllerPrePuppet + properties: + servers: {get_param: [servers, Controller]} + input_values: + update_identifier: {get_param: DeployIdentifier} + {% endif %} + + {{role.name}}Config: + type: OS::TripleO::{{role.name}}Config + properties: + StepConfig: {get_attr: [{{role.name}}PuppetStepConfig, value]} + + {% for step in range(1, 6) %} + + {{role.name}}Deployment_Step{{step}}: + type: OS::Heat::StructuredDeploymentGroup + {% if step == 1 %} + depends_on: [{{role.name}}PreConfig, {{role.name}}ArtifactsDeploy] + {% else %} + depends_on: + {% for dep in roles %} + - {{dep.name}}Deployment_Step{{step -1}} + - {{dep.name}}ContainersDeployment_Step{{step -1}} + {% endfor %} + - {{primary_role_name}}DockerPuppetTasksDeployment{{step -1}} + {% endif %} + properties: + name: {{role.name}}Deployment_Step{{step}} + servers: {get_param: [servers, {{role.name}}]} + config: {get_resource: {{role.name}}Config} + input_values: + step: {{step}} + update_identifier: {get_param: DeployIdentifier} + + {% endfor %} + # END BAREMETAL CONFIG STEPS + + # BEGIN CONTAINER CONFIG STEPS + {% for step in range(1, 6) %} + + {{role.name}}ContainersConfig_Step{{step}}: + type: OS::Heat::StructuredConfig + properties: + group: docker-cmd + config: + {get_attr: [{{role.name}}DockerConfig, value, step_{{step}}]} + + {{role.name}}ContainersDeployment_Step{{step}}: + type: OS::Heat::StructuredDeploymentGroup + {% if step == 1 %} + depends_on: + - {{role.name}}PreConfig + - {{role.name}}KollaJsonDeployment + - {{role.name}}GenPuppetDeployment + - {{role.name}}GenerateConfigDeployment + {% else %} + depends_on: + {% for dep in roles %} + - {{dep.name}}ContainersDeployment_Step{{step -1}} + - {{dep.name}}Deployment_Step{{step}} # baremetal steps of the same level run first + - {{dep.name}}Deployment_Step{{step -1}} + {% endfor %} + - {{primary_role_name}}DockerPuppetTasksDeployment{{step -1}} + {% endif %} + properties: + name: {{role.name}}ContainersDeployment_Step{{step}} + servers: {get_param: [servers, {{role.name}}]} + config: {get_resource: {{role.name}}ContainersConfig_Step{{step}}} + + {% endfor %} + # END CONTAINER CONFIG STEPS + + {{role.name}}PostConfig: + type: OS::TripleO::Tasks::{{role.name}}PostConfig + depends_on: + {% for dep in roles %} + - {{dep.name}}Deployment_Step5 + - {{primary_role_name}}DockerPuppetTasksDeployment5 + {% endfor %} + properties: + servers: {get_param: servers} + input_values: + update_identifier: {get_param: DeployIdentifier} + + # Note, this should come last, so use depends_on to ensure + # this is created after any other resources. + {{role.name}}ExtraConfigPost: + depends_on: + {% for dep in roles %} + - {{dep.name}}PostConfig + {% endfor %} + type: OS::TripleO::NodeExtraConfigPost + properties: + servers: {get_param: [servers, {{role.name}}]} + + {% if role.name == 'Controller' %} + ControllerPostPuppet: + depends_on: + - ControllerExtraConfigPost + type: OS::TripleO::Tasks::ControllerPostPuppet + properties: + servers: {get_param: [servers, Controller]} + input_values: + update_identifier: {get_param: DeployIdentifier} + {% endif %} + +{% endfor %} diff --git a/docker/docker-toool b/docker/docker-toool new file mode 100755 index 00000000..36aba4a7 --- /dev/null +++ b/docker/docker-toool @@ -0,0 +1,189 @@ +#!/usr/bin/env python +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import argparse +import os +import shutil +import sys +import json + +docker_cmd = '/bin/docker' + +# Tool to start docker containers as configured via +# tripleo-heat-templates. +# +# This tool reads data from a json file generated from heat when the +# TripleO stack is run. All the configuration data used to start the +# containerized services is in this file. +# +# By default this tool lists all the containers that are started and +# their start order. +# +# If you wish to see the command line used to start a given container, +# specify it by name using the --container argument. --run can then be +# used with this to actually execute docker to run the container.\n +# +# Other options listed allow you to modify this command line for +# debugging purposes. For example: +# +# docker-toool -c swift-proxy -r -e /bin/bash -u root -i -n test +# +# will run the swift proxy container as user root, executing /bin/bash, +# +# named 'test', and will run interactively (eg -ti). + + +def parse_opts(argv): + parser = argparse.ArgumentParser("Tool to start docker containers via " + "TripleO configurations") + parser.add_argument('-f', '--config', + help="""File to use as docker startup configuration data.""", + default='/var/lib/docker-container-startup-configs.json') + parser.add_argument('-r', '--run', + action='store_true', + help="""Run the container as specified with --container.""", + default=False) + parser.add_argument('-e', '--command', + help="""Override the command used to run the container.""", + default='') + parser.add_argument('-c', '--container', + help="""Specify a container to run or show the command for.""", + default='') + parser.add_argument('-u', '--user', + help="""User to run container as.""", + default='') + parser.add_argument('-n', '--name', + help="""Name of container.""", + default='') + parser.add_argument('-i', '--interactive', + action='store_true', + help="""Start docker container interactively (-ti).""", + default=False) + opts = parser.parse_args(argv[1:]) + + return opts + +def docker_arg_map(key, value): + value = str(value).encode('ascii', 'ignore') + return { + 'environment': "--env=%s" % value, + # 'image': value, + 'net': "--net=%s" % value, + 'pid': "--pid=%s" % value, + 'privileged': "--privileged=%s" % value.lower(), + #'restart': "--restart=%s" % "false", + 'user': "--user=%s" % value, + 'volumes': "--volume=%s" % value, + 'volumes_from': "--volumes-from=%s" % value, + }.get(key, None) + +def run_docker_container(opts, container_name): + container_found = False + + with open(opts.config) as f: + json_data = json.load(f) + + for step in (json_data or []): + if step is None: + continue + for container in (json_data[step] or []): + if container == container_name: + print('container found: %s' % container) + container_found = True + # A few positional arguments: + command = '' + image = '' + + cmd = [ + docker_cmd, + 'run', + '--name', + opts.name or container + ] + for container_data in (json_data[step][container] or []): + if container_data == "environment": + for env in (json_data[step][container][container_data] or []): + arg = docker_arg_map("environment", env) + if arg: + cmd.append(arg) + elif container_data == "volumes": + for volume in (json_data[step][container][container_data] or []): + arg = docker_arg_map("volumes", volume) + if arg: + cmd.append(arg) + elif container_data == "volumes_from": + for volume in (json_data[step][container][container_data] or []): + arg = docker_arg_map("volumes_from", volume) + if arg: + cmd.append(arg) + elif container_data == 'command': + command = json_data[step][container][container_data] + elif container_data == 'image': + image = json_data[step][container][container_data] + else: + # Only add a restart if we're not interactive + if container_data == 'restart': + if opts.interactive: + continue + if container_data == 'user': + if opts.user: + continue + arg = docker_arg_map(container_data, + json_data[step][container][container_data]) + if arg: + cmd.append(arg) + + if opts.user: + cmd.append('--user') + cmd.append(opts.user) + if opts.interactive: + cmd.append('-ti') + # May as well remove it when we're done too + cmd.append('--rm') + cmd.append(image) + if opts.command: + cmd.append(opts.command) + elif command: + cmd.extend(command) + + print ' '.join(cmd) + + if opts.run: + os.execl(docker_cmd, *cmd) + + if not container_found: + print("Container '%s' not found!" % container_name) + +def list_docker_containers(opts): + print opts + with open(opts.config) as f: + json_data = json.load(f) + + for step in (json_data or []): + if step is None: + continue + print step + for container in (json_data[step] or []): + print('\tcontainer: %s' % container) + for container_data in (json_data[step][container] or []): + #print('\t\tcontainer_data: %s' % container_data) + if container_data == "start_order": + print('\t\tstart_order: %s' % json_data[step][container][container_data]) + +opts = parse_opts(sys.argv) + +if opts.container: + run_docker_container(opts, opts.container) +else: + list_docker_containers(opts) + diff --git a/docker/firstboot/setup_docker_host.sh b/docker/firstboot/setup_docker_host.sh new file mode 100755 index 00000000..b2287e91 --- /dev/null +++ b/docker/firstboot/setup_docker_host.sh @@ -0,0 +1,26 @@ +#!/bin/bash +set -eux +# TODO This would be better in puppet + +# TODO remove this when built image includes docker +if [ ! -f "/usr/bin/docker" ]; then + yum -y install docker +fi + +# NOTE(mandre) $docker_namespace_is_registry is not a bash variable but is +# a place holder for text replacement done via heat +if [ "$docker_namespace_is_registry" = "True" ]; then + /usr/bin/systemctl stop docker.service + # if namespace is used with local registry, trim all namespacing + trim_var=$docker_registry + registry_host="${trim_var%%/*}" + /bin/sed -i -r "s/^[# ]*INSECURE_REGISTRY *=.+$/INSECURE_REGISTRY='--insecure-registry $registry_host'/" /etc/sysconfig/docker +fi + +# enable and start docker +/usr/bin/systemctl enable docker.service +/usr/bin/systemctl start docker.service + +# Disable libvirtd +/usr/bin/systemctl disable libvirtd.service +/usr/bin/systemctl stop libvirtd.service diff --git a/docker/firstboot/install_docker_agents.yaml b/docker/firstboot/setup_docker_host.yaml index 41a87406..2f258987 100644 --- a/docker/firstboot/install_docker_agents.yaml +++ b/docker/firstboot/setup_docker_host.yaml @@ -1,9 +1,6 @@ heat_template_version: ocata parameters: - DockerAgentImage: - type: string - default: heat-docker-agents DockerNamespace: type: string default: tripleoupstream @@ -17,22 +14,18 @@ resources: type: OS::Heat::MultipartMime properties: parts: - - config: {get_resource: install_docker_agents} + - config: {get_resource: setup_docker_host} - install_docker_agents: + setup_docker_host: type: OS::Heat::SoftwareConfig properties: group: script config: str_replace: params: - $agent_image: - list_join: - - '/' - - [ {get_param: DockerNamespace}, {get_param: DockerAgentImage} ] $docker_registry: {get_param: DockerNamespace} $docker_namespace_is_registry: {get_param: DockerNamespaceIsRegistry} - template: {get_file: ./start_docker_agents.sh} + template: {get_file: ./setup_docker_host.sh} outputs: OS::stack_id: diff --git a/docker/firstboot/start_docker_agents.sh b/docker/firstboot/start_docker_agents.sh deleted file mode 100755 index 1c5cc18d..00000000 --- a/docker/firstboot/start_docker_agents.sh +++ /dev/null @@ -1,69 +0,0 @@ -#!/bin/bash -set -eux - -# TODO remove this when built image includes docker -if [ ! -f "/usr/bin/docker" ]; then - yum -y install docker -fi - -# Local docker registry 1.8 -# NOTE(mandre) $docker_namespace_is_registry is not a bash variable but is -# a place holder for text replacement done via heat -if [ "$docker_namespace_is_registry" = "True" ]; then - /usr/bin/systemctl stop docker.service - # if namespace is used with local registry, trim all namespacing - trim_var=$docker_registry - registry_host="${trim_var%%/*}" - /bin/sed -i -r "s/^[# ]*INSECURE_REGISTRY *=.+$/INSECURE_REGISTRY='--insecure-registry $registry_host'/" /etc/sysconfig/docker -fi - -mkdir -p /var/lib/etc-data/json-config #FIXME: this should be a docker data container - -# NOTE(flaper87): Heat Agent required mounts -AGENT_COMMAND_MOUNTS="\ --v /var/lib/etc-data:/var/lib/etc-data \ --v /run:/run \ --v /etc/hosts:/etc/hosts \ --v /etc:/host/etc \ --v /var/lib/dhclient:/var/lib/dhclient \ --v /var/lib/cloud:/var/lib/cloud \ --v /var/lib/heat-cfntools:/var/lib/heat-cfntools \ --v /var/lib/os-collect-config:/var/lib/os-collect-config \ --v /var/lib/os-apply-config-deployments:/var/lib/os-apply-config-deployments \ --v /var/lib/heat-config:/var/lib/heat-config \ --v /etc/sysconfig/docker:/etc/sysconfig/docker \ --v /etc/sysconfig/network-scripts:/etc/sysconfig/network-scripts \ --v /usr/lib64/libseccomp.so.2:/usr/lib64/libseccomp.so.2 \ --v /usr/bin/docker:/usr/bin/docker \ --v /usr/bin/docker-current:/usr/bin/docker-current \ --v /var/lib/os-collect-config:/var/lib/os-collect-config" - -# heat-docker-agents service -cat <<EOF > /etc/systemd/system/heat-docker-agents.service -[Unit] -Description=Heat Docker Agent Container -After=docker.service -Requires=docker.service -Before=os-collect-config.service -Conflicts=os-collect-config.service - -[Service] -User=root -Restart=always -ExecStartPre=-/usr/bin/docker rm -f heat-agents -ExecStart=/usr/bin/docker run --name heat-agents --privileged --net=host \ - $AGENT_COMMAND_MOUNTS \ - --entrypoint=/usr/bin/os-collect-config $agent_image -ExecStop=/usr/bin/docker stop heat-agents - -[Install] -WantedBy=multi-user.target -EOF - -# enable and start heat-docker-agents -/usr/bin/systemctl enable heat-docker-agents.service -/usr/bin/systemctl start --no-block heat-docker-agents.service - -# Disable libvirtd -/usr/bin/systemctl disable libvirtd.service -/usr/bin/systemctl stop libvirtd.service diff --git a/docker/post-upgrade.j2.yaml b/docker/post-upgrade.j2.yaml new file mode 100644 index 00000000..4477f868 --- /dev/null +++ b/docker/post-upgrade.j2.yaml @@ -0,0 +1,4 @@ +# Note the include here is the same as post.j2.yaml but the data used at +# # the time of rendering is different if any roles disable upgrades +{% set roles = roles|rejectattr('disable_upgrade_deployment')|list -%} +{% include 'docker-steps.j2' %} diff --git a/docker/post.j2.yaml b/docker/post.j2.yaml index 41d33895..fd956215 100644 --- a/docker/post.j2.yaml +++ b/docker/post.j2.yaml @@ -1,231 +1 @@ -heat_template_version: ocata - -description: > - Post-deploy configuration steps via puppet for all roles, - as defined in ../roles_data.yaml - -parameters: - servers: - type: json - description: Mapping of Role name e.g Controller to a list of servers - - role_data: - type: json - description: Mapping of Role name e.g Controller to the per-role data - - DeployIdentifier: - default: '' - type: string - description: > - Setting this to a unique value will re-run any deployment tasks which - perform configuration on a Heat stack-update. - - DockerNamespace: - description: namespace - default: 'tripleoupstream' - type: string - - LibvirtConfig: - type: string - default: "/etc/libvirt/libvirtd.conf" - - NovaConfig: - type: string - default: "/etc/nova/nova.conf,/etc/nova/rootwrap.conf" - - NeutronOpenvswitchAgentConfig: - type: string - default: "/etc/neutron/neutron.conf,/etc/neutron/plugins/openvswitch/ovs_neutron_plugin.ini" - -resources: - -{% for role in roles %} - # Post deployment steps for all roles - # A single config is re-applied with an incrementing step number - # {{role.name}} Role steps - {{role.name}}ArtifactsConfig: - type: ../puppet/deploy-artifacts.yaml - - {{role.name}}ArtifactsDeploy: - type: OS::Heat::StructuredDeploymentGroup - properties: - servers: {get_param: [servers, {{role.name}}]} - config: {get_resource: {{role.name}}ArtifactsConfig} - - {{role.name}}PreConfig: - type: OS::TripleO::Tasks::{{role.name}}PreConfig - properties: - servers: {get_param: [servers, {{role.name}}]} - input_values: - update_identifier: {get_param: DeployIdentifier} - - {{role.name}}Config: - type: OS::TripleO::{{role.name}}Config - properties: - StepConfig: {get_param: [role_data, {{role.name}}, step_config]} - {% if role.name.lower() == 'compute' %} - PuppetTags: {get_param: [role_data, {{role.name}}, puppet_tags]} - {% endif %} - - # Step through a series of configuration steps - {{role.name}}Deployment_Step1: - type: OS::Heat::StructuredDeploymentGroup - depends_on: [{{role.name}}PreConfig, {{role.name}}ArtifactsDeploy] - properties: - name: {{role.name}}Deployment_Step1 - servers: {get_param: [servers, {{role.name}}]} - config: {get_resource: {{role.name}}Config} - input_values: - step: 1 - update_identifier: {get_param: DeployIdentifier} - - {{role.name}}Deployment_Step2: - type: OS::Heat::StructuredDeploymentGroup - depends_on: - {% for dep in roles %} - - {{dep.name}}Deployment_Step1 - {% endfor %} - properties: - name: {{role.name}}Deployment_Step2 - servers: {get_param: [servers, {{role.name}}]} - config: {get_resource: {{role.name}}Config} - input_values: - step: 2 - update_identifier: {get_param: DeployIdentifier} - - {{role.name}}Deployment_Step3: - type: OS::Heat::StructuredDeploymentGroup - depends_on: - {% for dep in roles %} - - {{dep.name}}Deployment_Step2 - {% endfor %} - properties: - name: {{role.name}}Deployment_Step3 - servers: {get_param: [servers, {{role.name}}]} - config: {get_resource: {{role.name}}Config} - input_values: - step: 3 - update_identifier: {get_param: DeployIdentifier} - - {{role.name}}Deployment_Step4: - type: OS::Heat::StructuredDeploymentGroup - depends_on: - {% for dep in roles %} - - {{dep.name}}Deployment_Step3 - {% endfor %} - properties: - name: {{role.name}}Deployment_Step4 - servers: {get_param: [servers, {{role.name}}]} - config: {get_resource: {{role.name}}Config} - input_values: - step: 4 - update_identifier: {get_param: DeployIdentifier} - - {{role.name}}Deployment_Step5: - type: OS::Heat::StructuredDeploymentGroup - depends_on: - {% for dep in roles %} - - {{dep.name}}Deployment_Step4 - {% endfor %} - properties: - name: {{role.name}}Deployment_Step5 - servers: {get_param: [servers, {{role.name}}]} - config: {get_resource: {{role.name}}Config} - input_values: - step: 5 - update_identifier: {get_param: DeployIdentifier} - - {{role.name}}PostConfig: - type: OS::TripleO::Tasks::{{role.name}}PostConfig - depends_on: - {% for dep in roles %} - - {{dep.name}}Deployment_Step5 - {% endfor %} - properties: - servers: {get_param: servers} - input_values: - update_identifier: {get_param: DeployIdentifier} - - # Note, this should come last, so use depends_on to ensure - # this is created after any other resources. - {{role.name}}ExtraConfigPost: - depends_on: - {% for dep in roles %} - - {{dep.name}}PostConfig - {% endfor %} - type: OS::TripleO::NodeExtraConfigPost - properties: - servers: {get_param: [servers, {{role.name}}]} - - {% if role.name.lower() == 'compute' %} - CopyEtcConfig: - type: OS::Heat::SoftwareConfig - depends_on: {{role.name}}PostConfig - properties: - group: script - outputs: - - name: result - config: {get_file: ../docker/copy-etc.sh} - - CopyEtcDeployment: - type: OS::Heat::SoftwareDeploymentGroup - properties: - name: CopyEtcDeployment - servers: {get_param: [servers, {{role.name}}]} - config: {get_resource: CopyEtcConfig} - - CopyJsonConfig: - type: OS::Heat::SoftwareConfig - properties: - group: script - inputs: - - name: libvirt_config - - name: nova_config - - name: neutron_openvswitch_agent_config - config: {get_file: ../docker/copy-json.py} - - CopyJsonDeployment: - type: OS::Heat::SoftwareDeploymentGroup - depends_on: CopyEtcDeployment - properties: - name: CopyJsonDeployment - config: {get_resource: CopyJsonConfig} - servers: {get_param: [servers, {{role.name}}]} - input_values: - libvirt_config: {get_param: LibvirtConfig} - nova_config: {get_param: NovaConfig} - neutron_openvswitch_agent_config: {get_param: NeutronOpenvswitchAgentConfig} - - {{role.name}}ContainersConfig_Step1: - type: OS::Heat::StructuredConfig - depends_on: CopyJsonDeployment - properties: - group: docker-cmd - config: - {get_param: [role_data, {{role.name}}, docker_config, step_1]} - - {{role.name}}ContainersConfig_Step2: - type: OS::Heat::StructuredConfig - depends_on: CopyJsonDeployment - properties: - group: docker-cmd - config: - {get_param: [role_data, {{role.name}}, docker_config, step_2]} - - {{role.name}}ContainersDeployment_Step1: - type: OS::Heat::StructuredDeploymentGroup - depends_on: [{{role.name}}PreConfig, {{role.name}}ArtifactsDeploy] - properties: - name: {{role.name}}ContainersDeployment_Step1 - servers: {get_param: [servers, {{role.name}}]} - config: {get_resource: {{role.name}}ContainersConfig_Step1} - - {{role.name}}ContainersDeployment_Step2: - type: OS::Heat::StructuredDeploymentGroup - depends_on: {{role.name}}ContainersDeployment_Step1 - properties: - name: {{role.name}}ContainersDeployment_Step2 - servers: {get_param: [servers, {{role.name}}]} - config: {get_resource: {{role.name}}ContainersConfig_Step2} - {% endif %} -{% endfor %} +{% include 'docker-steps.j2' %} diff --git a/docker/services/README.rst b/docker/services/README.rst index 8d1f9e86..881a2a37 100644 --- a/docker/services/README.rst +++ b/docker/services/README.rst @@ -1,60 +1,115 @@ -======== -services -======== +=============== +Docker Services +=============== -A TripleO nested stack Heat template that encapsulates generic configuration -data to configure a specific service. This generally includes everything -needed to configure the service excluding the local bind ports which -are still managed in the per-node role templates directly (controller.yaml, -compute.yaml, etc.). All other (global) service settings go into -the puppet/service templates. +TripleO docker services are currently built on top of the puppet services. +To do this each of the docker services includes the output of the +t-h-t puppet/service templates where appropriate. -Input Parameters ----------------- +In general global docker specific service settings should reside in these +templates (templates in the docker/services directory.) The required and +optional items are specified in the docker settings section below. -Each service may define its own input parameters and defaults. -Operators will use the parameter_defaults section of any Heat -environment to set per service parameters. +If you are adding a config setting that applies to both docker and +baremetal that setting should (so long as we use puppet) go into the +puppet/services templates themselves. -Config Settings ---------------- - -Each service may define a config_settings output variable which returns -Hiera settings to be configured. - -Steps ------ - -Each service may define an output variable which returns a puppet manifest -snippet that will run at each of the following steps. Earlier manifests -are re-asserted when applying latter ones. - - * config_settings: Custom hiera settings for this service. These are - used to generate configs. +Building Kolla Images +--------------------- - * step_config: A puppet manifest that is used to step through the deployment - sequence. Each sequence is given a "step" (via hiera('step') that provides - information for when puppet classes should activate themselves. +TripleO currently relies on Kolla docker containers. Kolla supports container +customization and we are making use of this feature within TripleO to inject +puppet (our configuration tool of choice) into the Kolla base images. The +undercloud nova-scheduler also requires openstack-tripleo-common to +provide custom filters. - * docker_compose: +To build Kolla images for TripleO adjust your kolla config to build your +centos base image with puppet using the example below: - * container_name: +.. code-block:: - * volumes: +$ cat template-overrides.j2 +{% extends parent_template %} +{% set base_centos_binary_packages_append = ['puppet'] %} +{% set nova_scheduler_packages_append = ['openstack-tripleo-common'] %} -Steps correlate to the following: - - 1) Service configuration generation with puppet. +kolla-build --base centos --template-override template-overrides.j2 - 2) Early Openstack Service setup (database init?) +.. - 3) Early containerized networking services startup (OVS) - 4) Network configuration - - 5) General OpenStack Services +Docker settings +--------------- +Each service may define an output variable which returns a puppet manifest +snippet that will run at each of the following steps. Earlier manifests +are re-asserted when applying latter ones. - 6) Service activation (Pacemaker) + * config_settings: This setting is generally inherited from the + puppet/services templates and only need to be appended + to on accasion if docker specific config settings are required. + + * step_config: This setting controls the manifest that is used to + create docker config files via puppet. The puppet tags below are + used along with this manifest to generate a config directory for + this container. + + * kolla_config: Contains YAML that represents how to map config files + into the kolla container. This config file is typically mapped into + the container itself at the /var/lib/kolla/config_files/config.json + location and drives how kolla's external config mechanisms work. + + * docker_image: The full name of the docker image that will be used. + + * docker_config: Data that is passed to the docker-cmd hook to configure + a container, or step of containers at each step. See the available steps + below and the related docker-cmd hook documentation in the heat-agents + project. + + * puppet_tags: Puppet resource tag names that are used to generate config + files with puppet. Only the named config resources are used to generate + a config file. Any service that specifies tags will have the default + tags of 'file,concat,file_line' appended to the setting. + Example: keystone_config + + * config_volume: The name of the volume (directory) where config files + will be generated for this service. Use this as the location to + bind mount into the running Kolla container for configuration. + + * config_image: The name of the docker image that will be used for + generating configuration files. This is often the same value as + 'docker_image' above but some containers share a common set of + config files which are generated in a common base container. + + * docker_puppet_tasks: This section provides data to drive the + docker-puppet.py tool directly. The task is executed only once + within the cluster (not on each node) and is useful for several + puppet snippets we require for initialization of things like + keystone endpoints, database users, etc. See docker-puppet.py + for formatting. + +Docker steps +------------ +Similar to baremetal docker containers are brought up in a stepwise manner. +The current architecture supports bringing up baremetal services alongside +of containers. For each step the baremetal puppet manifests are executed +first and then any docker containers are brought up afterwards. - 7) Fencing (Pacemaker) +Steps correlate to the following: + Pre) Containers config files generated per hiera settings. + 1) Load Balancer configuration baremetal + a) step 1 baremetal + b) step 1 containers + 2) Core Services (Database/Rabbit/NTP/etc.) + a) step 2 baremetal + b) step 2 containers + 3) Early Openstack Service setup (Ringbuilder, etc.) + a) step 3 baremetal + b) step 3 containers + 4) General OpenStack Services + a) step 4 baremetal + b) step 4 containers + c) Keystone containers post initialization (tenant,service,endpoint creation) + 5) Service activation (Pacemaker) + a) step 5 baremetal + b) step 5 containers diff --git a/docker/services/database/mongodb.yaml b/docker/services/database/mongodb.yaml new file mode 100644 index 00000000..e83f4f19 --- /dev/null +++ b/docker/services/database/mongodb.yaml @@ -0,0 +1,105 @@ +heat_template_version: ocata + +description: > + MongoDB service deployment using puppet and docker + +parameters: + DockerNamespace: + description: namespace + default: 'tripleoupstream' + type: string + DockerMongodbImage: + description: image + default: 'centos-binary-mongodb:latest' + type: string + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + ServiceNetMap: + default: {} + description: Mapping of service_name -> network name. Typically set + via parameter_defaults in the resource registry. This + mapping overrides those in ServiceNetMapDefaults. + type: json + DefaultPasswords: + default: {} + type: json + +resources: + + MongodbPuppetBase: + type: ../../../puppet/services/database/mongodb.yaml + properties: + EndpointMap: {get_param: EndpointMap} + ServiceNetMap: {get_param: ServiceNetMap} + DefaultPasswords: {get_param: DefaultPasswords} + +outputs: + role_data: + description: Containerized service Mongodb using composable services. + value: + service_name: {get_attr: [MongodbPuppetBase, role_data, service_name]} + config_settings: + map_merge: + - get_attr: [MongodbPuppetBase, role_data, config_settings] + - mongodb::server::fork: false + step_config: &step_config + list_join: + - "\n" + - - "['Mongodb_database', 'Mongodb_user', 'Mongodb_replset'].each |String $val| { noop_resource($val) }" + - {get_attr: [MongodbPuppetBase, role_data, step_config]} + # BEGIN DOCKER SETTINGS # + docker_image: &mongodb_image + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerMongodbImage} ] + puppet_config: + config_volume: mongodb + puppet_tags: file # set this even though file is the default + step_config: *step_config + config_image: *mongodb_image + kolla_config: + /var/lib/kolla/config_files/mongodb.json: + command: /usr/bin/mongod --unixSocketPrefix=/var/run/mongodb --config /etc/mongod.conf run + config_files: + - dest: /etc/mongod.conf + source: /var/lib/kolla/config_files/src/etc/mongod.conf + owner: mongodb + perm: '0600' + - dest: /etc/mongos.conf + source: /var/lib/kolla/config_files/src/etc/mongos.conf + owner: mongodb + perm: '0600' + docker_config: + step_2: + mongodb: + image: *mongodb_image + net: host + privileged: false + volumes: &mongodb_volumes + - /var/lib/kolla/config_files/mongodb.json:/var/lib/kolla/config_files/config.json + - /var/lib/config-data/mongodb/:/var/lib/kolla/config_files/src:ro + - /etc/localtime:/etc/localtime:ro + - logs:/var/log/kolla + - mongodb:/var/lib/mongodb/ + environment: + - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS + docker_puppet_tasks: + # MySQL database initialization occurs only on single node + step_2: + config_volume: 'mongodb_init_tasks' + puppet_tags: 'mongodb_database,mongodb_user,mongodb_replset' + step_config: 'include ::tripleo::profile::base::database::mongodb' + config_image: + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerMongodbImage} ] + volumes: + - "mongodb:/var/lib/mongodb/" + - "logs:/var/log/kolla:ro" + upgrade_tasks: + - name: Stop and disable mongodb service + tags: step2 + service: name=mongod state=stopped enabled=no diff --git a/docker/services/database/mysql.yaml b/docker/services/database/mysql.yaml new file mode 100644 index 00000000..c34ebe93 --- /dev/null +++ b/docker/services/database/mysql.yaml @@ -0,0 +1,137 @@ +heat_template_version: ocata + +description: > + MySQL service deployment using puppet + +parameters: + DockerNamespace: + description: namespace + default: 'tripleoupstream' + type: string + DockerMysqlImage: + description: image + default: 'centos-binary-mariadb:latest' + type: string + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + ServiceNetMap: + default: {} + description: Mapping of service_name -> network name. Typically set + via parameter_defaults in the resource registry. This + mapping overrides those in ServiceNetMapDefaults. + type: json + DefaultPasswords: + default: {} + type: json + MysqlRootPassword: + type: string + hidden: true + default: '' + +resources: + + MysqlPuppetBase: + type: ../../../puppet/services/database/mysql.yaml + properties: + EndpointMap: {get_param: EndpointMap} + ServiceNetMap: {get_param: ServiceNetMap} + DefaultPasswords: {get_param: DefaultPasswords} + +outputs: + role_data: + description: Containerized service MySQL using composable services. + value: + service_name: {get_attr: [MysqlPuppetBase, role_data, service_name]} + config_settings: + map_merge: + - {get_attr: [MysqlPuppetBase, role_data, config_settings]} + # Set PID file to what kolla mariadb bootstrap script expects + - tripleo::profile::base::database::mysql::mysql_server_options: + mysqld: + pid-file: /var/lib/mysql/mariadb.pid + mysqld_safe: + pid-file: /var/lib/mysql/mariadb.pid + step_config: &step_config + list_join: + - "\n" + - - "['Mysql_datadir', 'Mysql_user', 'Mysql_database', 'Mysql_grant', 'Mysql_plugin'].each |String $val| { noop_resource($val) }" + - {get_attr: [MysqlPuppetBase, role_data, step_config]} + # BEGIN DOCKER SETTINGS # + docker_image: &mysql_image + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerMysqlImage} ] + puppet_config: + config_volume: mysql + puppet_tags: file # set this even though file is the default + step_config: *step_config + config_image: *mysql_image + kolla_config: + /var/lib/kolla/config_files/mysql.json: + command: /usr/bin/mysqld_safe + config_files: + - dest: /etc/mysql/my.cnf + source: /var/lib/kolla/config_files/src/etc/my.cnf + owner: mysql + perm: '0644' + - dest: /etc/my.cnf.d/galera.cnf + source: /var/lib/kolla/config_files/src/etc/my.cnf.d/galera.cnf + owner: mysql + perm: '0644' + docker_config: + step_2: + mysql_bootstrap: + start_order: 0 + detach: false + image: *mysql_image + net: host + volumes: &mysql_volumes + - /var/lib/kolla/config_files/mysql.json:/var/lib/kolla/config_files/config.json + - /var/lib/config-data/mysql/:/var/lib/kolla/config_files/src:ro + - /etc/localtime:/etc/localtime:ro + - /etc/hosts:/etc/hosts:ro + - mariadb:/var/lib/mysql/ + environment: + - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS + - KOLLA_BOOTSTRAP=True + # NOTE(mandre) skip wsrep cluster status check + - KOLLA_KUBERNETES=True + - + list_join: + - '=' + - - 'DB_ROOT_PASSWORD' + - + yaql: + expression: $.data.passwords.where($ != '').first() + data: + passwords: + - {get_param: MysqlRootPassword} + - {get_param: [DefaultPasswords, mysql_root_password]} + mysql: + start_order: 1 + image: *mysql_image + restart: always + net: host + volumes: *mysql_volumes + environment: + - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS + docker_puppet_tasks: + # MySQL database initialization occurs only on single node + step_2: + config_volume: 'mysql_init_tasks' + puppet_tags: 'mysql_database,mysql_grant,mysql_user' + step_config: 'include ::tripleo::profile::base::database::mysql' + config_image: + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerMysqlImage} ] + volumes: + - "mariadb:/var/lib/mysql/:ro" + - "/var/lib/config-data/mysql/root:/root:ro" #provides .my.cnf + upgrade_tasks: + - name: Stop and disable mysql service + tags: step2 + service: name=mariadb state=stopped enabled=no diff --git a/docker/services/glance-api.yaml b/docker/services/glance-api.yaml new file mode 100644 index 00000000..73d76ad5 --- /dev/null +++ b/docker/services/glance-api.yaml @@ -0,0 +1,103 @@ +heat_template_version: ocata + +description: > + OpenStack Glance service configured with Puppet + +parameters: + DockerNamespace: + description: namespace + default: 'tripleoupstream' + type: string + DockerGlanceApiImage: + description: image + default: 'centos-binary-glance-api:latest' + type: string + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + ServiceNetMap: + default: {} + description: Mapping of service_name -> network name. Typically set + via parameter_defaults in the resource registry. This + mapping overrides those in ServiceNetMapDefaults. + type: json + DefaultPasswords: + default: {} + type: json + +resources: + + GlanceApiPuppetBase: + type: ../../puppet/services/glance-api.yaml + properties: + EndpointMap: {get_param: EndpointMap} + ServiceNetMap: {get_param: ServiceNetMap} + DefaultPasswords: {get_param: DefaultPasswords} + +outputs: + role_data: + description: Role data for the Glance API role. + value: + service_name: {get_attr: [GlanceApiPuppetBase, role_data, service_name]} + config_settings: + map_merge: + - get_attr: [GlanceApiPuppetBase, role_data, config_settings] + - glance::api::sync_db: false + step_config: &step_config + get_attr: [GlanceApiPuppetBase, role_data, step_config] + service_config_settings: {get_attr: [GlanceApiPuppetBase, role_data, service_config_settings]} + # BEGIN DOCKER SETTINGS # + docker_image: &glance_image + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerGlanceApiImage} ] + puppet_config: + config_volume: glance_api + puppet_tags: glance_api_config,glance_api_paste_ini,glance_swift_config,glance_cache_config + step_config: *step_config + config_image: *glance_image + kolla_config: + /var/lib/kolla/config_files/glance-api.json: + command: /usr/bin/glance-api --config-file /usr/share/glance/glance-api-dist.conf --config-file /etc/glance/glance-api.conf + config_files: + - dest: /etc/glance/glance-api.conf + owner: glance + perm: '0600' + source: /var/lib/kolla/config_files/src/etc/glance/glance-api.conf + - dest: /etc/glance/glance-swift.conf + owner: glance + perm: '0600' + source: /var/lib/kolla/config_files/src/etc/glance/glance-swift.conf + docker_config: + step_3: + glance_api_db_sync: + image: *glance_image + net: host + privileged: false + detach: false + volumes: &glance_volumes + - /var/lib/kolla/config_files/glance-api.json:/var/lib/kolla/config_files/config.json + - /etc/localtime:/etc/localtime:ro + - /lib/modules:/lib/modules:ro + - /var/lib/config-data/glance_api/:/var/lib/kolla/config_files/src:ro + - /run:/run + - /dev:/dev + - /etc/hosts:/etc/hosts:ro + environment: + - KOLLA_BOOTSTRAP=True + - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS + step_4: + glance_api: + image: *glance_image + net: host + privileged: false + restart: always + volumes: *glance_volumes + environment: + - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS + upgrade_tasks: + - name: Stop and disable glance_api service + tags: step2 + service: name=openstack-glance-api state=stopped enabled=no diff --git a/docker/services/heat-api-cfn.yaml b/docker/services/heat-api-cfn.yaml new file mode 100644 index 00000000..2f54c0f1 --- /dev/null +++ b/docker/services/heat-api-cfn.yaml @@ -0,0 +1,97 @@ +heat_template_version: ocata + +description: > + OpenStack containerized Heat API CFN service + +parameters: + DockerNamespace: + description: namespace + default: 'tripleoupstream' + type: string + DockerHeatApiCfnImage: + description: image + default: 'centos-binary-heat-api-cfn:latest' + type: string + # we configure all heat services in the same heat engine container + DockerHeatEngineImage: + description: image + default: 'centos-binary-heat-engine:latest' + type: string + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + ServiceNetMap: + default: {} + description: Mapping of service_name -> network name. Typically set + via parameter_defaults in the resource registry. This + mapping overrides those in ServiceNetMapDefaults. + type: json + DefaultPasswords: + default: {} + type: json + + +resources: + + HeatBase: + type: ../../puppet/services/heat-api-cfn.yaml + properties: + EndpointMap: {get_param: EndpointMap} + ServiceNetMap: {get_param: ServiceNetMap} + DefaultPasswords: {get_param: DefaultPasswords} + +outputs: + role_data: + description: Role data for the Heat API CFN role. + value: + service_name: {get_attr: [HeatBase, role_data, service_name]} + config_settings: + map_merge: + - get_attr: [HeatBase, role_data, config_settings] + - apache::default_vhost: false + step_config: &step_config + get_attr: [HeatBase, role_data, step_config] + service_config_settings: {get_attr: [HeatBase, role_data, service_config_settings]} + # BEGIN DOCKER SETTINGS + docker_image: &heat_api_cfn_image + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerHeatApiCfnImage} ] + puppet_config: + config_volume: heat + puppet_tags: heat_config,file,concat,file_line + step_config: *step_config + config_image: + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerHeatEngineImage} ] + kolla_config: + /var/lib/kolla/config_files/heat_api_cfn.json: + command: /usr/bin/heat-api-cfn --config-file /usr/share/heat/heat-dist.conf --config-file /etc/heat/heat.conf + config_files: + - dest: /etc/heat/heat.conf + owner: heat + perm: '0640' + source: /var/lib/kolla/config_files/src/etc/heat/heat.conf + docker_config: + step_4: + heat_api_cfn: + image: *heat_api_cfn_image + net: host + privileged: false + restart: always + volumes: + - /run:/run + - /var/lib/kolla/config_files/heat_api_cfn.json:/var/lib/kolla/config_files/config.json:ro + - /var/lib/config-data/heat/:/var/lib/kolla/config_files/src:ro + - /etc/hosts:/etc/hosts:ro + - /etc/localtime:/etc/localtime:ro + - /dev:/dev + environment: + - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS + upgrade_tasks: + - name: Stop and disable heat_api_cfn service + tags: step2 + service: name=openstack-heat-api-cfn state=stopped enabled=no diff --git a/docker/services/heat-api.yaml b/docker/services/heat-api.yaml new file mode 100644 index 00000000..a212d254 --- /dev/null +++ b/docker/services/heat-api.yaml @@ -0,0 +1,97 @@ +heat_template_version: ocata + +description: > + OpenStack containerized Heat API service + +parameters: + DockerNamespace: + description: namespace + default: 'tripleoupstream' + type: string + DockerHeatApiImage: + description: image + default: 'centos-binary-heat-api:latest' + type: string + # we configure all heat services in the same heat engine container + DockerHeatEngineImage: + description: image + default: 'centos-binary-heat-engine:latest' + type: string + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + ServiceNetMap: + default: {} + description: Mapping of service_name -> network name. Typically set + via parameter_defaults in the resource registry. This + mapping overrides those in ServiceNetMapDefaults. + type: json + DefaultPasswords: + default: {} + type: json + + +resources: + + HeatBase: + type: ../../puppet/services/heat-api.yaml + properties: + EndpointMap: {get_param: EndpointMap} + ServiceNetMap: {get_param: ServiceNetMap} + DefaultPasswords: {get_param: DefaultPasswords} + +outputs: + role_data: + description: Role data for the Heat API role. + value: + service_name: {get_attr: [HeatBase, role_data, service_name]} + config_settings: + map_merge: + - get_attr: [HeatBase, role_data, config_settings] + - apache::default_vhost: false + step_config: &step_config + get_attr: [HeatBase, role_data, step_config] + service_config_settings: {get_attr: [HeatBase, role_data, service_config_settings]} + # BEGIN DOCKER SETTINGS + docker_image: &heat_api_image + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerHeatApiImage} ] + puppet_config: + config_volume: heat + puppet_tags: heat_config,file,concat,file_line + step_config: *step_config + config_image: + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerHeatEngineImage} ] + kolla_config: + /var/lib/kolla/config_files/heat_api.json: + command: /usr/bin/heat-api --config-file /usr/share/heat/heat-dist.conf --config-file /etc/heat/heat.conf + config_files: + - dest: /etc/heat/heat.conf + owner: heat + perm: '0640' + source: /var/lib/kolla/config_files/src/etc/heat/heat.conf + docker_config: + step_4: + heat_api: + image: *heat_api_image + net: host + privileged: false + restart: always + volumes: + - /run:/run + - /var/lib/kolla/config_files/heat_api.json:/var/lib/kolla/config_files/config.json:ro + - /var/lib/config-data/heat/:/var/lib/kolla/config_files/src:ro + - /etc/hosts:/etc/hosts:ro + - /etc/localtime:/etc/localtime:ro + - /dev:/dev + environment: + - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS + upgrade_tasks: + - name: Stop and disable heat_api service + tags: step2 + service: name=openstack-heat-api state=stopped enabled=no diff --git a/docker/services/heat-engine.yaml b/docker/services/heat-engine.yaml new file mode 100644 index 00000000..c60a3840 --- /dev/null +++ b/docker/services/heat-engine.yaml @@ -0,0 +1,99 @@ +heat_template_version: ocata + +description: > + OpenStack containerized Heat Engine service + +parameters: + DockerNamespace: + description: namespace + default: 'tripleoupstream' + type: string + DockerHeatEngineImage: + description: image + default: 'centos-binary-heat-engine:latest' + type: string + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + ServiceNetMap: + default: {} + description: Mapping of service_name -> network name. Typically set + via parameter_defaults in the resource registry. This + mapping overrides those in ServiceNetMapDefaults. + type: json + DefaultPasswords: + default: {} + type: json + + +resources: + + HeatBase: + type: ../../puppet/services/heat-engine.yaml + properties: + EndpointMap: {get_param: EndpointMap} + ServiceNetMap: {get_param: ServiceNetMap} + DefaultPasswords: {get_param: DefaultPasswords} + +outputs: + role_data: + description: Role data for the Heat Engine role. + value: + service_name: {get_attr: [HeatBase, role_data, service_name]} + config_settings: + map_merge: + - get_attr: [HeatBase, role_data, config_settings] + - apache::default_vhost: false + step_config: &step_config + get_attr: [HeatBase, role_data, step_config] + service_config_settings: {get_attr: [HeatBase, role_data, service_config_settings]} + # BEGIN DOCKER SETTINGS + docker_image: &heat_engine_image + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerHeatEngineImage} ] + puppet_config: + config_volume: heat + puppet_tags: heat_config,file,concat,file_line + step_config: *step_config + config_image: *heat_engine_image + kolla_config: + /var/lib/kolla/config_files/heat_engine.json: + command: /usr/bin/heat-engine --config-file /usr/share/heat/heat-dist.conf --config-file /etc/heat/heat.conf + config_files: + - dest: /etc/heat/heat.conf + owner: heat + perm: '0640' + source: /var/lib/kolla/config_files/src/etc/heat/heat.conf + docker_config: + step_3: + heat_engine_db_sync: + image: *heat_engine_image + net: host + privileged: false + detach: false + volumes: + - /var/lib/config-data/heat/etc/heat:/etc/heat:ro + - /etc/hosts:/etc/hosts:ro + - /etc/localtime:/etc/localtime:ro + command: ['heat-manage', 'db_sync'] + step_4: + heat_engine: + image: *heat_engine_image + net: host + privileged: false + restart: always + volumes: + - /run:/run + - /var/lib/kolla/config_files/heat_engine.json:/var/lib/kolla/config_files/config.json:ro + - /var/lib/config-data/heat/:/var/lib/kolla/config_files/src:ro + - /etc/hosts:/etc/hosts:ro + - /etc/localtime:/etc/localtime:ro + environment: + - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS + upgrade_tasks: + - name: Stop and disable heat_engine service + tags: step2 + service: name=openstack-heat-engine state=stopped enabled=no diff --git a/docker/services/ironic-api.yaml b/docker/services/ironic-api.yaml new file mode 100644 index 00000000..ca42c9ec --- /dev/null +++ b/docker/services/ironic-api.yaml @@ -0,0 +1,106 @@ +heat_template_version: ocata + +description: > + OpenStack containerized Ironic API service + +parameters: + DockerNamespace: + description: namespace + default: 'tripleoupstream' + type: string + DockerIronicApiImage: + description: image + default: 'centos-binary-ironic-api:latest' + type: string + DockerIronicConfigImage: + description: image + default: 'centos-binary-ironic-pxe:latest' + type: string + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + ServiceNetMap: + default: {} + description: Mapping of service_name -> network name. Typically set + via parameter_defaults in the resource registry. This + mapping overrides those in ServiceNetMapDefaults. + type: json + DefaultPasswords: + default: {} + type: json + +resources: + + IronicApiBase: + type: ../../puppet/services/ironic-api.yaml + properties: + EndpointMap: {get_param: EndpointMap} + ServiceNetMap: {get_param: ServiceNetMap} + DefaultPasswords: {get_param: DefaultPasswords} + +outputs: + role_data: + description: Role data for the Ironic API role. + value: + service_name: {get_attr: [IronicApiBase, role_data, service_name]} + config_settings: + map_merge: + - get_attr: [IronicApiBase, role_data, config_settings] + step_config: &step_config + get_attr: [IronicApiBase, role_data, step_config] + service_config_settings: {get_attr: [IronicApiBase, role_data, service_config_settings]} + # BEGIN DOCKER SETTINGS + docker_image: &ironic_image + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerIronicApiImage} ] + puppet_config: + config_volume: ironic + puppet_tags: ironic_config + step_config: *step_config + config_image: + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerIronicConfigImage} ] + kolla_config: + /var/lib/kolla/config_files/ironic_api.json: + command: /usr/bin/ironic-api + config_files: + - dest: /etc/ironic/ironic.conf + owner: ironic + perm: '0640' + source: /var/lib/kolla/config_files/src/etc/ironic/ironic.conf + docker_config: + step_3: + ironic_db_sync: + image: *ironic_image + net: host + privileged: false + detach: false + volumes: + - /var/lib/config-data/ironic/etc/:/etc/:ro + - /etc/hosts:/etc/hosts:ro + - /etc/localtime:/etc/localtime:ro + environment: + - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS + command: ['ironic-dbsync', '--config-file', '/etc/ironic/ironic.conf'] + step_4: + ironic_api: + start_order: 10 + image: *ironic_image + net: host + privileged: false + restart: always + volumes: + - /var/lib/kolla/config_files/ironic_api.json:/var/lib/kolla/config_files/config.json:ro + - /var/lib/config-data/ironic/:/var/lib/kolla/config_files/src:ro + - /etc/hosts:/etc/hosts:ro + - /etc/localtime:/etc/localtime:ro + environment: + - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS + upgrade_tasks: + - name: Stop and disable ironic_api service + tags: step2 + service: name=openstack-ironic-api state=stopped enabled=no diff --git a/docker/services/ironic-conductor.yaml b/docker/services/ironic-conductor.yaml new file mode 100644 index 00000000..ff470008 --- /dev/null +++ b/docker/services/ironic-conductor.yaml @@ -0,0 +1,118 @@ +heat_template_version: ocata + +description: > + OpenStack containerized Ironic Conductor service + +parameters: + DockerNamespace: + description: namespace + default: 'tripleoupstream' + type: string + DockerIronicConductorImage: + description: image + default: 'centos-binary-ironic-conductor:latest' + type: string + DockerIronicConfigImage: + description: image + default: 'centos-binary-ironic-pxe:latest' + type: string + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + ServiceNetMap: + default: {} + description: Mapping of service_name -> network name. Typically set + via parameter_defaults in the resource registry. This + mapping overrides those in ServiceNetMapDefaults. + type: json + DefaultPasswords: + default: {} + type: json + +resources: + + IronicConductorBase: + type: ../../puppet/services/ironic-conductor.yaml + properties: + EndpointMap: {get_param: EndpointMap} + ServiceNetMap: {get_param: ServiceNetMap} + DefaultPasswords: {get_param: DefaultPasswords} + +outputs: + role_data: + description: Role data for the Ironic Conductor role. + value: + service_name: {get_attr: [IronicConductorBase, role_data, service_name]} + config_settings: + map_merge: + - get_attr: [IronicConductorBase, role_data, config_settings] + # to avoid hard linking errors we store these on the same + # volume/device as the ironic master_path + - ironic::drivers::pxe::tftp_root: /var/lib/ironic/tftpboot + - ironic::drivers::pxe::tftp_master_path: /var/lib/ironic/tftpboot/master_images + - ironic::pxe::tftp_root: /var/lib/ironic/tftpboot + - ironic::pxe::http_root: /var/lib/ironic/httpboot + - ironic::conductor::http_root: /var/lib/ironic/httpboot + step_config: &step_config + get_attr: [IronicConductorBase, role_data, step_config] + service_config_settings: {get_attr: [IronicConductorBase, role_data, service_config_settings]} + # BEGIN DOCKER SETTINGS + docker_image: &ironic_image + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerIronicConductorImage} ] + puppet_config: + config_volume: ironic + puppet_tags: ironic_config + step_config: *step_config + config_image: + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerIronicConfigImage} ] + kolla_config: + /var/lib/kolla/config_files/ironic_conductor.json: + command: /usr/bin/ironic-conductor + config_files: + - dest: /etc/ironic/ironic.conf + owner: ironic + perm: '0640' + source: /var/lib/kolla/config_files/src/etc/ironic/ironic.conf + permissions: + - path: /var/lib/ironic/httpboot + owner: ironic:ironic + recurse: true + - path: /var/lib/ironic/tftpboot + owner: ironic:ironic + recurse: true + docker_config: + step_4: + ironic-init-dirs: + image: *ironic_image + user: root + command: ['/bin/bash', '-c', 'mkdir /var/lib/ironic/httpboot && mkdir /var/lib/ironic/tftpboot'] + volumes: + - ironic:/var/lib/ironic + ironic_conductor: + start_order: 80 + image: *ironic_image + net: host + privileged: true + restart: always + volumes: + - /var/lib/kolla/config_files/ironic_conductor.json:/var/lib/kolla/config_files/config.json:ro + - /var/lib/config-data/ironic/:/var/lib/kolla/config_files/src:ro + - /etc/hosts:/etc/hosts:ro + - /etc/localtime:/etc/localtime:ro + - /lib/modules:/lib/modules:ro + - /sys:/sys + - /dev:/dev + - /run:/run #shared? + - ironic:/var/lib/ironic + environment: + - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS + upgrade_tasks: + - name: Stop and disable ironic_conductor service + tags: step2 + service: name=openstack-ironic-conductor state=stopped enabled=no diff --git a/docker/services/ironic-pxe.yaml b/docker/services/ironic-pxe.yaml new file mode 100644 index 00000000..25505192 --- /dev/null +++ b/docker/services/ironic-pxe.yaml @@ -0,0 +1,133 @@ +heat_template_version: ocata + +description: > + OpenStack containerized Ironic PXE service + +parameters: + DockerNamespace: + description: namespace + default: 'tripleoupstream' + type: string + DockerIronicPxeImage: + description: image + default: 'centos-binary-ironic-pxe:latest' + type: string + DockerIronicConfigImage: + description: image + default: 'centos-binary-ironic-pxe:latest' + type: string + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + ServiceNetMap: + default: {} + description: Mapping of service_name -> network name. Typically set + via parameter_defaults in the resource registry. This + mapping overrides those in ServiceNetMapDefaults. + type: json + DefaultPasswords: + default: {} + type: json + +outputs: + role_data: + description: Role data for the Ironic PXE role. + value: + service_name: ironic_pxe + config_settings: {} + step_config: &step_config '' + service_config_settings: {} + # BEGIN DOCKER SETTINGS + docker_image: &ironic_pxe_image + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerIronicPxeImage} ] + puppet_config: + config_volume: ironic + puppet_tags: ironic_config + step_config: *step_config + config_image: + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerIronicConfigImage} ] + kolla_config: + /var/lib/kolla/config_files/ironic_pxe_http.json: + command: /usr/sbin/httpd -DFOREGROUND + config_files: + - dest: /etc/ironic/ironic.conf + owner: ironic + perm: '0640' + source: /var/lib/kolla/config_files/src/etc/ironic/ironic.conf + - dest: /etc/httpd/conf.d/10-ipxe_vhost.conf + owner: root + perm: '0644' + source: /var/lib/kolla/config_files/src/etc/httpd/conf.d/10-ipxe_vhost.conf + - dest: /etc/httpd/conf/httpd.conf + owner: root + perm: '0644' + source: /var/lib/kolla/config_files/src/etc/httpd/conf/httpd.conf + - dest: /etc/httpd/conf/ports.conf + owner: root + perm: '0644' + source: /var/lib/kolla/config_files/src/etc/httpd/conf/ports.conf + /var/lib/kolla/config_files/ironic_pxe_tftp.json: + command: /usr/sbin/in.tftpd --foreground --user root --address 0.0.0.0:69 --map-file /var/lib/ironic/tftpboot/map-file /var/lib/ironic/tftpboot + config_files: + - dest: /etc/ironic/ironic.conf + owner: ironic + perm: '0640' + source: /var/lib/kolla/config_files/src/etc/ironic/ironic.conf + - dest: /var/lib/ironic/tftpboot/chain.c32 + owner: ironic + perm: '0744' + source: /var/lib/kolla/config_files/src/var/lib/ironic/tftpboot/chain.c32 + - dest: /var/lib/ironic/tftpboot/pxelinux.0 + owner: ironic + perm: '0744' + source: /var/lib/kolla/config_files/src/var/lib/ironic/tftpboot/pxelinux.0 + - dest: /var/lib/ironic/tftpboot/ipxe.efi + owner: ironic + perm: '0744' + source: /var/lib/kolla/config_files/src/var/lib/ironic/tftpboot/ipxe.efi + - dest: /var/lib/ironic/tftpboot/undionly.kpxe + owner: ironic + perm: '0744' + source: /var/lib/kolla/config_files/src/var/lib/ironic/tftpboot/undionly.kpxe + - dest: /var/lib/ironic/tftpboot/map-file + owner: root + perm: '0644' + source: /var/lib/kolla/config_files/src/var/lib/ironic/tftpboot/map-file + docker_config: + step_4: + ironic_pxe_tftp: + start_order: 90 + image: *ironic_pxe_image + net: host + privileged: false + restart: always + volumes: + - /var/lib/kolla/config_files/ironic_pxe_tftp.json:/var/lib/kolla/config_files/config.json:ro + - /var/lib/config-data/ironic/:/var/lib/kolla/config_files/src:ro + - /etc/hosts:/etc/hosts:ro + - /etc/localtime:/etc/localtime:ro + - /dev/log:/dev/log + - ironic:/var/lib/ironic/ + environment: + - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS + ironic_pxe_http: + start_order: 91 + image: *ironic_pxe_image + net: host + privileged: false + restart: always + volumes: + - /var/lib/kolla/config_files/ironic_pxe_http.json:/var/lib/kolla/config_files/config.json:ro + - /var/lib/config-data/ironic/:/var/lib/kolla/config_files/src:ro + - /var/lib/config-data/ironic/etc/httpd/conf.modules.d:/etc/httpd/conf.modules.d:ro + - /etc/hosts:/etc/hosts:ro + - /etc/localtime:/etc/localtime:ro + - ironic:/var/lib/ironic/ + environment: + - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS diff --git a/docker/services/keystone.yaml b/docker/services/keystone.yaml new file mode 100644 index 00000000..358277a5 --- /dev/null +++ b/docker/services/keystone.yaml @@ -0,0 +1,160 @@ +heat_template_version: ocata + +description: > + OpenStack containerized Keystone service + +parameters: + DockerNamespace: + description: namespace + default: 'tripleoupstream' + type: string + DockerKeystoneImage: + description: image + default: 'centos-binary-keystone:latest' + type: string + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + ServiceNetMap: + default: {} + description: Mapping of service_name -> network name. Typically set + via parameter_defaults in the resource registry. This + mapping overrides those in ServiceNetMapDefaults. + type: json + DefaultPasswords: + default: {} + type: json + AdminPassword: + description: The password for the keystone admin account, used for monitoring, querying neutron etc. + type: string + hidden: true + +resources: + + KeystoneBase: + type: ../../puppet/services/keystone.yaml + properties: + EndpointMap: {get_param: EndpointMap} + ServiceNetMap: {get_param: ServiceNetMap} + DefaultPasswords: {get_param: DefaultPasswords} + +outputs: + role_data: + description: Role data for the Keystone API role. + value: + service_name: {get_attr: [KeystoneBase, role_data, service_name]} + config_settings: + map_merge: + - get_attr: [KeystoneBase, role_data, config_settings] + - apache::default_vhost: false + step_config: &step_config + list_join: + - "\n" + - - "['Keystone_user', 'Keystone_endpoint', 'Keystone_domain', 'Keystone_tenant', 'Keystone_user_role', 'Keystone_role', 'Keystone_service'].each |String $val| { noop_resource($val) }" + - {get_attr: [KeystoneBase, role_data, step_config]} + service_config_settings: {get_attr: [KeystoneBase, role_data, service_config_settings]} + # BEGIN DOCKER SETTINGS + docker_image: &keystone_image + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerKeystoneImage} ] + puppet_config: + config_volume: keystone + puppet_tags: keystone_config + step_config: *step_config + config_image: *keystone_image + kolla_config: + /var/lib/kolla/config_files/keystone.json: + command: /usr/sbin/httpd -DFOREGROUND + config_files: + - dest: /etc/keystone/keystone.conf + owner: keystone + perm: '0640' + source: /var/lib/kolla/config_files/src/etc/keystone/keystone.conf + - dest: /etc/keystone/credential-keys/0 + owner: keystone + perm: '0600' + source: /var/lib/kolla/config_files/src/etc/keystone/credential-keys/0 + - dest: /etc/keystone/credential-keys/1 + owner: keystone + perm: '0600' + source: /var/lib/kolla/config_files/src/etc/keystone/credential-keys/1 + - dest: /etc/httpd/conf.d/10-keystone_wsgi_admin.conf + owner: root + perm: '0644' + source: /var/lib/kolla/config_files/src/etc/httpd/conf.d/10-keystone_wsgi_admin.conf + - dest: /etc/httpd/conf.d/10-keystone_wsgi_main.conf + owner: root + perm: '0644' + source: /var/lib/kolla/config_files/src/etc/httpd/conf.d/10-keystone_wsgi_main.conf + - dest: /etc/httpd/conf/httpd.conf + owner: root + perm: '0644' + source: /var/lib/kolla/config_files/src/etc/httpd/conf/httpd.conf + - dest: /etc/httpd/conf/ports.conf + owner: root + perm: '0644' + source: /var/lib/kolla/config_files/src/etc/httpd/conf/ports.conf + - dest: /var/www/cgi-bin/keystone/keystone-admin + owner: keystone + perm: '0644' + source: /var/lib/kolla/config_files/src/var/www/cgi-bin/keystone/keystone-admin + - dest: /var/www/cgi-bin/keystone/keystone-public + owner: keystone + perm: '0644' + source: /var/lib/kolla/config_files/src/var/www/cgi-bin/keystone/keystone-public + docker_config: + step_3: + keystone-init-log: + start_order: 0 + image: *keystone_image + user: root + command: ['/bin/bash', '-c', 'mkdir -p /var/log/httpd && mkdir -p /var/log/keystone && chown keystone:keystone /var/log/keystone'] + volumes: + - logs:/var/log + keystone_db_sync: + start_order: 1 + image: *keystone_image + net: host + privileged: false + detach: false + volumes: &keystone_volumes + - /var/lib/kolla/config_files/keystone.json:/var/lib/kolla/config_files/config.json:ro + - /var/lib/config-data/keystone/:/var/lib/kolla/config_files/src:ro + - /var/lib/config-data/keystone/etc/httpd/conf.modules.d:/etc/httpd/conf.modules.d:ro + - /etc/hosts:/etc/hosts:ro + - /etc/localtime:/etc/localtime:ro + - logs:/var/log + environment: + - KOLLA_BOOTSTRAP=True + - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS + keystone: + start_order: 1 + image: *keystone_image + net: host + privileged: false + restart: always + volumes: *keystone_volumes + environment: + - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS + keystone_bootstrap: + start_order: 2 + action: exec + command: + [ 'keystone', 'keystone-manage', 'bootstrap', '--bootstrap-password', {get_param: AdminPassword} ] + docker_puppet_tasks: + # Keystone endpoint creation occurs only on single node + step_3: + config_volume: 'keystone_init_tasks' + puppet_tags: 'keystone_config,keystone_domain_config,keystone_endpoint,keystone_identity_provider,keystone_paste_ini,keystone_role,keystone_service,keystone_tenant,keystone_user,keystone_user_role,keystone_domain' + step_config: 'include ::tripleo::profile::base::keystone' + config_image: + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerKeystoneImage} ] + upgrade_tasks: + - name: Stop and disable keystone service (running under httpd) + tags: step2 + service: name=httpd state=stopped enabled=no diff --git a/docker/services/memcached.yaml b/docker/services/memcached.yaml new file mode 100644 index 00000000..9467567f --- /dev/null +++ b/docker/services/memcached.yaml @@ -0,0 +1,76 @@ +heat_template_version: ocata + +description: > + OpenStack containerized Memcached services + +parameters: + DockerNamespace: + description: namespace + default: 'tripleoupstream' + type: string + DockerMemcachedImage: + description: image + default: 'centos-binary-memcached:latest' + type: string + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + ServiceNetMap: + default: {} + description: Mapping of service_name -> network name. Typically set + via parameter_defaults in the resource registry. This + mapping overrides those in ServiceNetMapDefaults. + type: json + DefaultPasswords: + default: {} + type: json + +resources: + + MemcachedBase: + type: ../../puppet/services/memcached.yaml + properties: + EndpointMap: {get_param: EndpointMap} + ServiceNetMap: {get_param: ServiceNetMap} + DefaultPasswords: {get_param: DefaultPasswords} + +outputs: + role_data: + description: Role data for the Memcached API role. + value: + service_name: {get_attr: [MemcachedBase, role_data, service_name]} + config_settings: {get_attr: [MemcachedBase, role_data, config_settings]} + step_config: &step_config + get_attr: [MemcachedBase, role_data, step_config] + service_config_settings: {get_attr: [MemcachedBase, role_data, service_config_settings]} + # BEGIN DOCKER SETTINGS + docker_image: &memcached_image + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerMemcachedImage} ] + puppet_config: + config_volume: 'memcached' + puppet_tags: 'file' + step_config: *step_config + config_image: *memcached_image + kolla_config: {} + docker_config: + step_1: + memcached: + image: *memcached_image + net: host + privileged: false + restart: always + volumes: + - /var/lib/config-data/memcached/etc/sysconfig/memcached:/etc/sysconfig/memcached:ro + - /etc/hosts:/etc/hosts:ro + - /etc/localtime:/etc/localtime:ro + command: ['/bin/bash', '-c', 'source /etc/sysconfig/memcached; /usr/bin/memcached -p ${PORT} -u ${USER} -m ${CACHESIZE} -c ${MAXCONN} $OPTIONS'] + environment: + - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS + upgrade_tasks: + - name: Stop and disable memcached service + tags: step2 + service: name=memcached state=stopped enabled=no diff --git a/docker/services/mistral-api.yaml b/docker/services/mistral-api.yaml new file mode 100644 index 00000000..7680bc62 --- /dev/null +++ b/docker/services/mistral-api.yaml @@ -0,0 +1,122 @@ +heat_template_version: ocata + +description: > + OpenStack containerized Mistral API service + +parameters: + DockerNamespace: + description: namespace + default: 'tripleoupstream' + type: string + DockerMistralApiImage: + description: image + default: 'centos-binary-mistral-api:latest' + type: string + DockerMistralConfigImage: + description: image + default: 'centos-binary-mistral-api:latest' + type: string + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + ServiceNetMap: + default: {} + description: Mapping of service_name -> network name. Typically set + via parameter_defaults in the resource registry. This + mapping overrides those in ServiceNetMapDefaults. + type: json + DefaultPasswords: + default: {} + type: json + +resources: + + MistralApiBase: + type: ../../puppet/services/mistral-api.yaml + properties: + EndpointMap: {get_param: EndpointMap} + ServiceNetMap: {get_param: ServiceNetMap} + DefaultPasswords: {get_param: DefaultPasswords} + +outputs: + role_data: + description: Role data for the Mistral API role. + value: + service_name: {get_attr: [MistralApiBase, role_data, service_name]} + config_settings: + map_merge: + - get_attr: [MistralApiBase, role_data, config_settings] + step_config: &step_config + get_attr: [MistralApiBase, role_data, step_config] + service_config_settings: {get_attr: [MistralApiBase, role_data, service_config_settings]} + # BEGIN DOCKER SETTINGS + docker_image: &mistral_image + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerMistralApiImage} ] + puppet_config: + config_volume: mistral + puppet_tags: mistral_config + step_config: *step_config + config_image: + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerMistralConfigImage} ] + kolla_config: + /var/lib/kolla/config_files/mistral_api.json: + command: /usr/bin/mistral-server --config-file=/etc/mistral/mistral.conf --log-file=/var/log/mistral/api.log --server=api + config_files: + - dest: /etc/mistral/mistral.conf + owner: mistral + perm: '0640' + source: /var/lib/kolla/config_files/src/etc/mistral/mistral.conf + docker_config: + step_3: + mistral_db_sync: + start_order: 1 + image: *mistral_image + net: host + privileged: false + detach: false + volumes: + - /var/lib/config-data/mistral/etc/:/etc/:ro + - /etc/hosts:/etc/hosts:ro + - /etc/localtime:/etc/localtime:ro + environment: + - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS + command: ['mistral-db-manage', '--config-file', '/etc/mistral/mistral.conf', 'upgrade', 'head'] + mistral_db_populate: + start_order: 2 + image: *mistral_image + net: host + privileged: false + detach: false + volumes: + - /var/lib/config-data/mistral/etc/:/etc/:ro + - /etc/hosts:/etc/hosts:ro + - /etc/localtime:/etc/localtime:ro + environment: + - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS + # NOTE: dprince this requires that we install openstack-tripleo-common into + # the Mistral API image so that we get tripleo* actions + command: ['mistral-db-manage', '--config-file', '/etc/mistral/mistral.conf', 'populate'] + step_4: + mistral_api: + start_order: 15 + image: *mistral_image + net: host + privileged: false + restart: always + volumes: + - /var/lib/kolla/config_files/mistral_api.json:/var/lib/kolla/config_files/config.json:ro + - /var/lib/config-data/mistral/:/var/lib/kolla/config_files/src:ro + - /etc/hosts:/etc/hosts:ro + - /etc/localtime:/etc/localtime:ro + environment: + - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS + upgrade_tasks: + - name: Stop and disable mistral_api service + tags: step2 + service: name=openstack-mistral-api state=stopped enabled=no diff --git a/docker/services/mistral-engine.yaml b/docker/services/mistral-engine.yaml new file mode 100644 index 00000000..d61ab1c2 --- /dev/null +++ b/docker/services/mistral-engine.yaml @@ -0,0 +1,95 @@ +heat_template_version: ocata + +description: > + OpenStack containerized Mistral Engine service + +parameters: + DockerNamespace: + description: namespace + default: 'tripleoupstream' + type: string + DockerMistralEngineImage: + description: image + default: 'centos-binary-mistral-engine:latest' + type: string + DockerMistralConfigImage: + description: image + default: 'centos-binary-mistral-api:latest' + type: string + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + ServiceNetMap: + default: {} + description: Mapping of service_name -> network name. Typically set + via parameter_defaults in the resource registry. This + mapping overrides those in ServiceNetMapDefaults. + type: json + DefaultPasswords: + default: {} + type: json + + +resources: + + MistralBase: + type: ../../puppet/services/mistral-engine.yaml + properties: + EndpointMap: {get_param: EndpointMap} + ServiceNetMap: {get_param: ServiceNetMap} + DefaultPasswords: {get_param: DefaultPasswords} + +outputs: + role_data: + description: Role data for the Mistral Engine role. + value: + service_name: {get_attr: [MistralBase, role_data, service_name]} + config_settings: + map_merge: + - get_attr: [MistralBase, role_data, config_settings] + step_config: &step_config + get_attr: [MistralBase, role_data, step_config] + service_config_settings: {get_attr: [MistralBase, role_data, service_config_settings]} + # BEGIN DOCKER SETTINGS + docker_image: &mistral_engine_image + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerMistralEngineImage} ] + puppet_config: + config_volume: mistral + puppet_tags: mistral_config + step_config: *step_config + config_image: + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerMistralConfigImage} ] + kolla_config: + /var/lib/kolla/config_files/mistral_engine.json: + command: /usr/bin/mistral-server --config-file=/etc/mistral/mistral.conf --log-file=/var/log/mistral/engine.log --server=engine + config_files: + - dest: /etc/mistral/mistral.conf + owner: mistral + perm: '0640' + source: /var/lib/kolla/config_files/src/etc/mistral/mistral.conf + docker_config: + step_4: + mistral_engine: + image: *mistral_engine_image + net: host + privileged: false + restart: always + volumes: + - /run:/run + - /var/lib/kolla/config_files/mistral_engine.json:/var/lib/kolla/config_files/config.json:ro + - /var/lib/config-data/mistral/:/var/lib/kolla/config_files/src:ro + - /etc/hosts:/etc/hosts:ro + - /etc/localtime:/etc/localtime:ro + environment: + - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS + upgrade_tasks: + - name: Stop and disable mistral_engine service + tags: step2 + service: name=openstack-mistral-engine state=stopped enabled=no + diff --git a/docker/services/mistral-executor.yaml b/docker/services/mistral-executor.yaml new file mode 100644 index 00000000..42286426 --- /dev/null +++ b/docker/services/mistral-executor.yaml @@ -0,0 +1,98 @@ +heat_template_version: ocata + +description: > + OpenStack containerized Mistral Executor service + +parameters: + DockerNamespace: + description: namespace + default: 'tripleoupstream' + type: string + DockerMistralExecutorImage: + description: image + default: 'centos-binary-mistral-executor:latest' + type: string + DockerMistralConfigImage: + description: image + default: 'centos-binary-mistral-api:latest' + type: string + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + ServiceNetMap: + default: {} + description: Mapping of service_name -> network name. Typically set + via parameter_defaults in the resource registry. This + mapping overrides those in ServiceNetMapDefaults. + type: json + DefaultPasswords: + default: {} + type: json + + +resources: + + MistralBase: + type: ../../puppet/services/mistral-executor.yaml + properties: + EndpointMap: {get_param: EndpointMap} + ServiceNetMap: {get_param: ServiceNetMap} + DefaultPasswords: {get_param: DefaultPasswords} + +outputs: + role_data: + description: Role data for the Mistral Executor role. + value: + service_name: {get_attr: [MistralBase, role_data, service_name]} + config_settings: + map_merge: + - get_attr: [MistralBase, role_data, config_settings] + step_config: &step_config + get_attr: [MistralBase, role_data, step_config] + service_config_settings: {get_attr: [MistralBase, role_data, service_config_settings]} + # BEGIN DOCKER SETTINGS + docker_image: &mistral_executor_image + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerMistralExecutorImage} ] + puppet_config: + config_volume: mistral + puppet_tags: mistral_config + step_config: *step_config + config_image: + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerMistralConfigImage} ] + kolla_config: + /var/lib/kolla/config_files/mistral_executor.json: + command: /usr/bin/mistral-server --config-file=/etc/mistral/mistral.conf --log-file=/var/log/mistral/executor.log --server=executor + config_files: + - dest: /etc/mistral/mistral.conf + owner: mistral + perm: '0640' + source: /var/lib/kolla/config_files/src/etc/mistral/mistral.conf + docker_config: + step_4: + mistral_executor: + image: *mistral_executor_image + net: host + privileged: false + restart: always + volumes: + - /run:/run + - /var/lib/kolla/config_files/mistral_executor.json:/var/lib/kolla/config_files/config.json:ro + - /var/lib/config-data/mistral/:/var/lib/kolla/config_files/src:ro + - /etc/hosts:/etc/hosts:ro + - /etc/localtime:/etc/localtime:ro + # FIXME: this is required in order for Nova cells + # initialization workflows on the Undercloud. Need to + # exclude this on the overcloud for security reasons. + - /var/lib/config-data/nova/etc/nova:/etc/nova:ro + environment: + - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS + upgrade_tasks: + - name: Stop and disable mistral_executor service + tags: step2 + service: name=openstack-mistral-executor state=stopped enabled=no diff --git a/docker/services/neutron-api.yaml b/docker/services/neutron-api.yaml new file mode 100644 index 00000000..71389046 --- /dev/null +++ b/docker/services/neutron-api.yaml @@ -0,0 +1,112 @@ +heat_template_version: ocata + +description: > + OpenStack containerized Neutron API service + +parameters: + DockerNamespace: + description: namespace + default: 'tripleoupstream' + type: string + DockerNeutronApiImage: + description: image + default: 'centos-binary-neutron-server:latest' + type: string + # we configure all neutron services in the same neutron + DockerNeutronConfigImage: + description: image + default: 'centos-binary-neutron-openvswitch-agent:latest' + type: string + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + ServiceNetMap: + default: {} + description: Mapping of service_name -> network name. Typically set + via parameter_defaults in the resource registry. This + mapping overrides those in ServiceNetMapDefaults. + type: json + DefaultPasswords: + default: {} + type: json + +resources: + + NeutronBase: + type: ../../puppet/services/neutron-api.yaml + properties: + EndpointMap: {get_param: EndpointMap} + ServiceNetMap: {get_param: ServiceNetMap} + DefaultPasswords: {get_param: DefaultPasswords} + +outputs: + role_data: + description: Role data for the Neutron API role. + value: + service_name: {get_attr: [NeutronBase, role_data, service_name]} + config_settings: + map_merge: + - get_attr: [NeutronBase, role_data, config_settings] + step_config: &step_config + get_attr: [NeutronBase, role_data, step_config] + service_config_settings: {get_attr: [NeutronBase, role_data, service_config_settings]} + # BEGIN DOCKER SETTINGS + docker_image: &neutron_api_image + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerNeutronApiImage} ] + puppet_config: + config_volume: neutron + puppet_tags: neutron_config,neutron_api_config + step_config: *step_config + config_image: + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerNeutronConfigImage} ] + kolla_config: + /var/lib/kolla/config_files/neutron_api.json: + command: /usr/bin/neutron-server --config-file /usr/share/neutron/neutron-dist.conf --config-dir /usr/share/neutron/server --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugin.ini + config_files: + - dest: /etc/neutron/neutron.conf + owner: neutron + perm: '0640' + source: /var/lib/kolla/config_files/src/etc/neutron/neutron.conf + - dest: /etc/neutron/plugin.ini + owner: neutron + perm: '0640' + source: /var/lib/kolla/config_files/src/etc/neutron/plugins/ml2/ml2_conf.ini + docker_config: + step_3: + neutron_db_sync: + image: *neutron_api_image + net: host + privileged: false + detach: false + # FIXME: we should make config file permissions right + # and run as neutron user + user: root + volumes: + - /var/lib/config-data/neutron/etc/neutron:/etc/neutron:ro + - /var/lib/config-data/neutron/usr/share/neutron:/usr/share/neutron:ro + - /etc/hosts:/etc/hosts:ro + - /etc/localtime:/etc/localtime:ro + command: ['neutron-db-manage', 'upgrade', 'heads'] + step_4: + neutron_api: + image: *neutron_api_image + net: host + privileged: false + restart: always + volumes: + - /var/lib/kolla/config_files/neutron_api.json:/var/lib/kolla/config_files/config.json:ro + - /var/lib/config-data/neutron/:/var/lib/kolla/config_files/src:ro + - /etc/hosts:/etc/hosts:ro + - /etc/localtime:/etc/localtime:ro + environment: + - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS + upgrade_tasks: + - name: Stop and disable neutron_api service + tags: step2 + service: name=neutron-server state=stopped enabled=no diff --git a/docker/services/neutron-dhcp.yaml b/docker/services/neutron-dhcp.yaml new file mode 100644 index 00000000..ccde63f2 --- /dev/null +++ b/docker/services/neutron-dhcp.yaml @@ -0,0 +1,100 @@ +heat_template_version: ocata + +description: > + OpenStack containerized Neutron DHCP service + +parameters: + DockerNamespace: + description: namespace + default: 'tripleoupstream' + type: string + DockerNeutronApiImage: + description: image + default: 'centos-binary-neutron-dhcp-agent:latest' + type: string + # we configure all neutron services in the same neutron + DockerNeutronConfigImage: + description: image + default: 'centos-binary-neutron-openvswitch-agent:latest' + type: string + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + ServiceNetMap: + default: {} + description: Mapping of service_name -> network name. Typically set + via parameter_defaults in the resource registry. This + mapping overrides those in ServiceNetMapDefaults. + type: json + DefaultPasswords: + default: {} + type: json + +resources: + + NeutronBase: + type: ../../puppet/services/neutron-dhcp.yaml + properties: + EndpointMap: {get_param: EndpointMap} + ServiceNetMap: {get_param: ServiceNetMap} + DefaultPasswords: {get_param: DefaultPasswords} + +outputs: + role_data: + description: Role data for the Neutron DHCP role. + value: + service_name: {get_attr: [NeutronBase, role_data, service_name]} + config_settings: + map_merge: + - get_attr: [NeutronBase, role_data, config_settings] + step_config: &step_config + get_attr: [NeutronBase, role_data, step_config] + service_config_settings: {get_attr: [NeutronBase, role_data, service_config_settings]} + # BEGIN DOCKER SETTINGS + docker_image: &neutron_dhcp_image + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerNeutronApiImage} ] + puppet_config: + config_volume: neutron + puppet_tags: neutron_config,neutron_dhcp_agent_config + step_config: *step_config + config_image: + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerNeutronConfigImage} ] + kolla_config: + /var/lib/kolla/config_files/neutron_dhcp.json: + command: /usr/bin/neutron-dhcp-agent --config-file /usr/share/neutron/neutron-dist.conf --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/dhcp_agent.ini --log-file /var/log/neutron/dhcp-agent.log + config_files: + - dest: /etc/neutron/neutron.conf + owner: neutron + perm: '0640' + source: /var/lib/kolla/config_files/src/etc/neutron/neutron.conf + - dest: /etc/neutron/dhcp_agent.ini + owner: neutron + perm: '0640' + source: /var/lib/kolla/config_files/src/etc/neutron/dhcp_agent.ini + docker_config: + step_4: + neutron_dhcp: + image: *neutron_dhcp_image + net: host + pid: host + privileged: true + restart: always + volumes: + - /var/lib/kolla/config_files/neutron_dhcp.json:/var/lib/kolla/config_files/config.json:ro + - /var/lib/config-data/neutron/:/var/lib/kolla/config_files/src:ro + - /etc/localtime:/etc/localtime:ro + - /etc/hosts:/etc/hosts:ro + - /lib/modules:/lib/modules:ro + - /run/:/run + environment: + - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS + upgrade_tasks: + - name: Stop and disable neutron_dhcp service + tags: step2 + service: name=neutron-dhcp-agent state=stopped enabled=no diff --git a/docker/services/neutron-l3.yaml b/docker/services/neutron-l3.yaml new file mode 100644 index 00000000..d9a78288 --- /dev/null +++ b/docker/services/neutron-l3.yaml @@ -0,0 +1,92 @@ +heat_template_version: ocata + +description: > + OpenStack containerized Neutron L3 agent + +parameters: + DockerNamespace: + description: namespace + default: 'tripleoupstream' + type: string + DockerNeutronL3AgentImage: + description: image + default: 'centos-binary-neutron-l3-agent:latest' + type: string + # we configure all neutron services in the same neutron + DockerNeutronConfigImage: + description: image + default: 'centos-binary-neutron-openvswitch-agent:latest' + type: string + ServiceNetMap: + default: {} + description: Mapping of service_name -> network name. Typically set + via parameter_defaults in the resource registry. This + mapping overrides those in ServiceNetMapDefaults. + type: json + DefaultPasswords: + default: {} + type: json + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + +resources: + + NeutronL3Base: + type: ../../puppet/services/neutron-l3.yaml + properties: + EndpointMap: {get_param: EndpointMap} + ServiceNetMap: {get_param: ServiceNetMap} + DefaultPasswords: {get_param: DefaultPasswords} + +outputs: + role_data: + description: Role data for Neutron L3 agent + value: + service_name: {get_attr: [NeutronL3Base, role_data, service_name]} + config_settings: {get_attr: [NeutronL3Base, role_data, config_settings]} + step_config: &step_config + get_attr: [NeutronL3Base, role_data, step_config] + docker_image: &neutron_l3_agent_image + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerNeutronL3AgentImage} ] + + puppet_config: + puppet_tags: neutron_config,neutron_l3_agent_config + config_volume: neutron + step_config: *step_config + config_image: + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerNeutronConfigImage} ] + kolla_config: + /var/lib/kolla/config_files/neutron-l3-agent.json: + command: /usr/bin/neutron-l3-agent --config-file /usr/share/neutron/neutron-dist.conf --config-dir /usr/share/neutron/l3_agent --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/l3_agent.ini + config_files: + - dest: /etc/neutron/neutron.conf + owner: neutron + perm: '0600' + source: /var/lib/kolla/config_files/src/etc/neutron/neutron.conf + - dest: /etc/neutron/l3_agent.ini + owner: root + perm: '0644' + source: /var/lib/kolla/config_files/src/etc/neutron/l3_agent.ini + docker_config: + step_4: + neutronl3agent: + image: *neutron_l3_agent_image + net: host + pid: host + privileged: true + restart: always + volumes: + - /var/lib/kolla/config_files/neutron-l3-agent.json:/var/lib/kolla/config_files/config.json:ro + - /var/lib/config-data/neutron:/var/lib/kolla/config_files/src:ro + - /etc/localtime:/etc/localtime:ro + - /lib/modules:/lib/modules:ro + - /run:/run + environment: + - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS diff --git a/docker/services/neutron-ovs-agent.yaml b/docker/services/neutron-ovs-agent.yaml index 56bd073e..6dcf91d9 100644 --- a/docker/services/neutron-ovs-agent.yaml +++ b/docker/services/neutron-ovs-agent.yaml @@ -10,14 +10,8 @@ parameters: type: string DockerOpenvswitchImage: description: image - default: 'centos-binary-neutron-openvswitch-agent' + default: 'centos-binary-neutron-openvswitch-agent:latest' type: string - NeutronOpenvswitchAgentPluginVolume: - type: string - default: "/var/lib/etc-data/neutron/plugins/ml2/openvswitch_agent.ini:/var/lib/kolla/config_files/ovs_neutron_plugin.ini:ro" - NeutronOpenvswitchAgentOvsVolume: - type: string - default: " " ServiceNetMap: default: {} description: Mapping of service_name -> network name. Typically set @@ -38,38 +32,60 @@ resources: NeutronOvsAgentBase: type: ../../puppet/services/neutron-ovs-agent.yaml properties: + EndpointMap: {get_param: EndpointMap} ServiceNetMap: {get_param: ServiceNetMap} DefaultPasswords: {get_param: DefaultPasswords} - EndpointMap: {get_param: EndpointMap} outputs: role_data: description: Role data for Neutron openvswitch service value: + service_name: {get_attr: [NeutronOvsAgentBase, role_data, service_name]} config_settings: {get_attr: [NeutronOvsAgentBase, role_data, config_settings]} - step_config: {get_attr: [NeutronOvsAgentBase, role_data, step_config]} - puppet_tags: neutron_config,neutron_agent_ovs,neutron_plugin_ml2 + step_config: &step_config + get_attr: [NeutronOvsAgentBase, role_data, step_config] + docker_image: &neutron_ovs_agent_image + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerOpenvswitchImage} ] + puppet_config: + config_volume: neutron + puppet_tags: neutron_config,neutron_agent_ovs,neutron_plugin_ml2 + step_config: *step_config + config_image: *neutron_ovs_agent_image + kolla_config: + /var/lib/kolla/config_files/neutron-openvswitch-agent.json: + command: /usr/bin/neutron-openvswitch-agent --config-file /usr/share/neutron/neutron-dist.conf --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/ml2/openvswitch_agent.ini --config-file /etc/neutron/plugins/ml2/ml2_conf.ini + config_files: + - dest: /etc/neutron/neutron.conf + owner: neutron + perm: '0600' + source: /var/lib/kolla/config_files/src/etc/neutron/neutron.conf + - dest: /etc/neutron/plugins/ml2/openvswitch_agent.ini + owner: neutron + perm: '0600' + source: /var/lib/kolla/config_files/src/etc/neutron/plugins/ml2/openvswitch_agent.ini + - dest: /etc/neutron/plugins/ml2/ml2_conf.ini + owner: neutron + perm: '0600' + source: /var/lib/kolla/config_files/src/etc/neutron/plugins/ml2/ml2_conf.ini docker_config: - step_1: + step_4: neutronovsagent: - image: - list_join: - - '/' - - [ {get_param: DockerNamespace}, {get_param: DockerOpenvswitchImage} ] + image: *neutron_ovs_agent_image net: host pid: host privileged: true restart: always volumes: - - /var/lib/etc-data/json-config/neutron-openvswitch-agent.json:/var/lib/kolla/config_files/config.json - - /var/lib/etc-data/neutron/neutron.conf:/var/lib/kolla/config_files/neutron.conf:ro - - /var/lib/etc-data/neutron/plugins/ml2/ml2_conf.ini:/var/lib/kolla/config_files/ml2_conf.ini:ro - - {get_param: NeutronOpenvswitchAgentPluginVolume} - - {get_param: NeutronOpenvswitchAgentOvsVolume} + - /var/lib/kolla/config_files/neutron-openvswitch-agent.json:/var/lib/kolla/config_files/config.json:ro + - /var/lib/config-data/neutron:/var/lib/kolla/config_files/src:ro - /etc/localtime:/etc/localtime:ro - /lib/modules:/lib/modules:ro - /run:/run - - logs:/var/log/kolla/ environment: - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS - step_2: {} + upgrade_tasks: + - name: Stop and disable neutron_ovs_agent service + tags: step2 + service: name=neutron-openvswitch-agent state=stopped enabled=no diff --git a/docker/services/neutron-plugin-ml2.yaml b/docker/services/neutron-plugin-ml2.yaml new file mode 100644 index 00000000..5d1a348a --- /dev/null +++ b/docker/services/neutron-plugin-ml2.yaml @@ -0,0 +1,61 @@ +heat_template_version: ocata + +description: > + OpenStack containerized Neutron ML2 Plugin configured with Puppet + +parameters: + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + ServiceNetMap: + default: {} + description: Mapping of service_name -> network name. Typically set + via parameter_defaults in the resource registry. This + mapping overrides those in ServiceNetMapDefaults. + type: json + DockerNamespace: + description: namespace + default: 'tripleoupstream' + type: string + DockerNeutronConfigImage: + description: image + default: 'centos-binary-neutron-openvswitch-agent:latest' + type: string + DefaultPasswords: + default: {} + type: json + +resources: + + NeutronBase: + type: ../../puppet/services/neutron-plugin-ml2.yaml + properties: + EndpointMap: {get_param: EndpointMap} + ServiceNetMap: {get_param: ServiceNetMap} + DefaultPasswords: {get_param: DefaultPasswords} + +outputs: + role_data: + description: Role data for the Neutron ML2 Plugin role. + value: + service_name: {get_attr: [NeutronBase, role_data, service_name]} + config_settings: + map_merge: + - get_attr: [NeutronBase, role_data, config_settings] + step_config: &step_config + get_attr: [NeutronBase, role_data, step_config] + service_config_settings: {get_attr: [NeutronBase, role_data, service_config_settings]} + # BEGIN DOCKER SETTINGS + docker_image: &docker_image + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerNeutronConfigImage} ] + puppet_config: + config_volume: 'neutron' + puppet_tags: '' + step_config: *step_config + config_image: *docker_image + kolla_config: {} + docker_config: {} diff --git a/docker/services/nova-api.yaml b/docker/services/nova-api.yaml new file mode 100644 index 00000000..8a892325 --- /dev/null +++ b/docker/services/nova-api.yaml @@ -0,0 +1,151 @@ +heat_template_version: ocata + +description: > + OpenStack containerized Nova API service + +parameters: + DockerNamespace: + description: namespace + default: 'tripleoupstream' + type: string + DockerNovaApiImage: + description: image + default: 'centos-binary-nova-api:latest' + type: string + DockerNovaBaseImage: + description: image + default: 'centos-binary-nova-base:latest' + type: string + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + ServiceNetMap: + default: {} + description: Mapping of service_name -> network name. Typically set + via parameter_defaults in the resource registry. This + mapping overrides those in ServiceNetMapDefaults. + type: json + DefaultPasswords: + default: {} + type: json + +resources: + + NovaApiBase: + type: ../../puppet/services/nova-api.yaml + properties: + EndpointMap: {get_param: EndpointMap} + ServiceNetMap: {get_param: ServiceNetMap} + DefaultPasswords: {get_param: DefaultPasswords} + +outputs: + role_data: + description: Role data for the Nova API role. + value: + service_name: {get_attr: [NovaApiBase, role_data, service_name]} + config_settings: + map_merge: + - get_attr: [NovaApiBase, role_data, config_settings] + - apache::default_vhost: false + step_config: &step_config + get_attr: [NovaApiBase, role_data, step_config] + service_config_settings: {get_attr: [NovaApiBase, role_data, service_config_settings]} + # BEGIN DOCKER SETTINGS + docker_image: &nova_api_image + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerNovaApiImage} ] + puppet_config: + config_volume: nova + puppet_tags: nova_config + step_config: *step_config + config_image: + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerNovaBaseImage} ] + kolla_config: + /var/lib/kolla/config_files/nova_api.json: + command: /usr/bin/nova-api + config_files: + - dest: /etc/nova/nova.conf + owner: nova + perm: '0640' + source: /var/lib/kolla/config_files/src/etc/nova/nova.conf + docker_config: + step_3: + nova_api_db_sync: + start_order: 1 + image: *nova_api_image + net: host + detach: false + volumes: &nova_api_volumes + - /var/lib/config-data/nova/etc/:/etc/:ro + - /etc/hosts:/etc/hosts:ro + - /etc/localtime:/etc/localtime:ro + command: ['/usr/bin/nova-manage', 'api_db', 'sync'] + # FIXME: we probably want to wait on the 'cell_v2 update' in order for this + # to be capable of upgrading a baremetal setup. This is to ensure the name + # of the cell is 'default' + nova_api_map_cell0: + start_order: 2 + image: *nova_api_image + net: host + detach: false + volumes: *nova_api_volumes + command: + - '/usr/bin/nova-manage' + - 'cell_v2' + - 'map_cell0' + nova_api_create_default_cell: + start_order: 3 + image: *nova_api_image + net: host + detach: false + volumes: *nova_api_volumes + # NOTE: allowing the exit code 2 is a dirty way of making + # this idempotent (if the resource already exists a conflict + # is raised) + exit_codes: [0,2] + command: + - '/usr/bin/nova-manage' + - 'cell_v2' + - 'create_cell' + - '--name="default"' + nova_db_sync: + start_order: 4 + image: *nova_api_image + net: host + detach: false + volumes: *nova_api_volumes + command: ['/usr/bin/nova-manage', 'db', 'sync'] + step_4: + nova_api: + start_order: 2 + image: *nova_api_image + net: host + user: nova + privileged: true + restart: always + volumes: + - /var/lib/kolla/config_files/nova_api.json:/var/lib/kolla/config_files/config.json:ro + - /var/lib/config-data/nova/:/var/lib/kolla/config_files/src:ro + - /etc/hosts:/etc/hosts:ro + - /etc/localtime:/etc/localtime:ro + environment: + - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS + nova_api_discover_hosts: + start_order: 3 + image: *nova_api_image + net: host + detach: false + volumes: *nova_api_volumes + command: + - '/usr/bin/nova-manage' + - 'cell_v2' + - 'discover_hosts' + upgrade_tasks: + - name: Stop and disable nova_api service + tags: step2 + service: name=openstack-nova-api state=stopped enabled=no diff --git a/docker/services/nova-compute.yaml b/docker/services/nova-compute.yaml index c695c94d..9f4e353a 100644 --- a/docker/services/nova-compute.yaml +++ b/docker/services/nova-compute.yaml @@ -10,7 +10,7 @@ parameters: type: string DockerNovaComputeImage: description: image - default: 'centos-binary-nova-compute' + default: 'centos-binary-nova-compute:latest' type: string ServiceNetMap: default: {} @@ -29,41 +29,61 @@ parameters: resources: + NovaComputeBase: type: ../../puppet/services/nova-compute.yaml properties: EndpointMap: {get_param: EndpointMap} + ServiceNetMap: {get_param: ServiceNetMap} + DefaultPasswords: {get_param: DefaultPasswords} outputs: role_data: description: Role data for the Nova Compute service. value: + service_name: {get_attr: [NovaComputeBase, role_data, service_name]} config_settings: {get_attr: [NovaComputeBase, role_data, config_settings]} - step_config: {get_attr: [NovaComputeBase, role_data, step_config]} - puppet_tags: nova_config,nova_paste_api_ini + step_config: &step_config + get_attr: [NovaComputeBase, role_data, step_config] + docker_image: &nova_compute_image + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerNovaComputeImage} ] + puppet_config: + config_volume: nova_libvirt + puppet_tags: nova_config,nova_paste_api_ini + step_config: *step_config + config_image: *nova_compute_image + kolla_config: + /var/lib/kolla/config_files/nova-compute.json: + command: /usr/bin/nova-compute --config-file /etc/nova/nova.conf --config-file /etc/nova/rootwrap.conf + config_files: + - dest: /etc/nova/nova.conf + owner: nova + perm: '0600' + source: /var/lib/kolla/config_files/src/etc/nova/nova.conf + - dest: /etc/nova/rootwrap.conf + owner: nova + perm: '0600' + source: /var/lib/kolla/config_files/src/etc/nova/rootwrap.conf docker_config: - step_1: + # FIXME: run discover hosts here + step_4: novacompute: - image: - list_join: - - '/' - - [ {get_param: DockerNamespace}, {get_param: DockerNovaComputeImage} ] + image: *nova_compute_image net: host privileged: true user: root restart: always volumes: - - /var/lib/etc-data/json-config/nova-compute.json:/var/lib/kolla/config_files/config.json - - /var/lib/etc-data/nova/nova.conf:/var/lib/kolla/config_files/nova.conf:ro - - /var/lib/etc-data/nova/rootwrap.conf:/var/lib/kolla/config_files/rootwrap.conf:ro + - /var/lib/kolla/config_files/nova-compute.json:/var/lib/kolla/config_files/config.json:ro + - /var/lib/config-data/nova_libvirt:/var/lib/kolla/config_files/src:ro + - /dev:/dev + - /etc/iscsi:/etc/iscsi - /etc/localtime:/etc/localtime:ro - /lib/modules:/lib/modules:ro - /run:/run - - /dev:/dev - - logs:/var/log/kolla/ - - /etc/iscsi:/etc/iscsi + - /var/lib/nova:/var/lib/nova - libvirtd:/var/lib/libvirt - - nova_compute:/var/lib/nova/ environment: - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS - step_2: {} diff --git a/docker/services/nova-conductor.yaml b/docker/services/nova-conductor.yaml new file mode 100644 index 00000000..8bc81e32 --- /dev/null +++ b/docker/services/nova-conductor.yaml @@ -0,0 +1,92 @@ +heat_template_version: ocata + +description: > + OpenStack containerized Nova Conductor service + +parameters: + DockerNamespace: + description: namespace + default: 'tripleoupstream' + type: string + DockerNovaConductorImage: + description: image + default: 'centos-binary-nova-conductor:latest' + type: string + DockerNovaBaseImage: + description: image + default: 'centos-binary-nova-base:latest' + type: string + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + ServiceNetMap: + default: {} + description: Mapping of service_name -> network name. Typically set + via parameter_defaults in the resource registry. This + mapping overrides those in ServiceNetMapDefaults. + type: json + DefaultPasswords: + default: {} + type: json + + +resources: + + NovaConductorBase: + type: ../../puppet/services/nova-conductor.yaml + properties: + EndpointMap: {get_param: EndpointMap} + ServiceNetMap: {get_param: ServiceNetMap} + DefaultPasswords: {get_param: DefaultPasswords} + +outputs: + role_data: + description: Role data for the Nova Conductor service. + value: + service_name: {get_attr: [NovaConductorBase, role_data, service_name]} + config_settings: {get_attr: [NovaConductorBase, role_data, config_settings]} + step_config: &step_config + get_attr: [NovaConductorBase, role_data, step_config] + service_config_settings: {get_attr: [NovaConductorBase, role_data, service_config_settings]} + # BEGIN DOCKER SETTINGS + docker_image: &nova_conductor_image + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerNovaConductorImage} ] + puppet_config: + config_volume: nova + puppet_tags: nova_config + step_config: *step_config + config_image: + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerNovaBaseImage} ] + kolla_config: + /var/lib/kolla/config_files/nova_conductor.json: + command: /usr/bin/nova-conductor + config_files: + - dest: /etc/nova/nova.conf + owner: nova + perm: '0600' + source: /var/lib/kolla/config_files/src/etc/nova/nova.conf + docker_config: + step_4: + nova_conductor: + image: *nova_conductor_image + net: host + privileged: false + restart: always + volumes: + - /run:/run + - /var/lib/kolla/config_files/nova_conductor.json:/var/lib/kolla/config_files/config.json:ro + - /var/lib/config-data/nova/:/var/lib/kolla/config_files/src:ro + - /etc/hosts:/etc/hosts:ro + - /etc/localtime:/etc/localtime:ro + environment: + - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS + upgrade_tasks: + - name: Stop and disable nova_conductor service + tags: step2 + service: name=openstack-nova-conductor state=stopped enabled=no diff --git a/docker/services/nova-ironic.yaml b/docker/services/nova-ironic.yaml new file mode 100644 index 00000000..5b46010f --- /dev/null +++ b/docker/services/nova-ironic.yaml @@ -0,0 +1,91 @@ +heat_template_version: ocata + +description: > + OpenStack containerized Nova Ironic Compute service + +parameters: + DockerNamespace: + description: namespace + default: 'tripleoupstream' + type: string + DockerNovaComputeImage: + description: image + default: 'centos-binary-nova-compute-ironic:latest' + type: string + DockerNovaBaseImage: + description: image + default: 'centos-binary-nova-base:latest' + type: string + ServiceNetMap: + default: {} + description: Mapping of service_name -> network name. Typically set + via parameter_defaults in the resource registry. This + mapping overrides those in ServiceNetMapDefaults. + type: json + DefaultPasswords: + default: {} + type: json + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + +resources: + + + NovaIronicBase: + type: ../../puppet/services/nova-ironic.yaml + properties: + EndpointMap: {get_param: EndpointMap} + +outputs: + role_data: + description: Role data for the Nova Compute service. + value: + service_name: {get_attr: [NovaIronicBase, role_data, service_name]} + config_settings: {get_attr: [NovaIronicBase, role_data, config_settings]} + step_config: &step_config + get_attr: [NovaIronicBase, role_data, step_config] + docker_image: &nova_ironic_image + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerNovaComputeImage} ] + puppet_config: + config_volume: nova + puppet_tags: nova_config,nova_paste_api_ini + step_config: *step_config + config_image: + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerNovaBaseImage} ] + kolla_config: + /var/lib/kolla/config_files/nova_ironic.json: + command: /usr/bin/nova-compute --config-file /etc/nova/nova.conf --config-file /etc/nova/rootwrap.conf + config_files: + - dest: /etc/nova/nova.conf + owner: nova + perm: '0600' + source: /var/lib/kolla/config_files/src/etc/nova/nova.conf + - dest: /etc/nova/rootwrap.conf + owner: nova + perm: '0600' + source: /var/lib/kolla/config_files/src/etc/nova/rootwrap.conf + docker_config: + step_5: + novacompute: + image: *nova_ironic_image + net: host + privileged: true + user: root + restart: always + volumes: + - /var/lib/kolla/config_files/nova_ironic.json:/var/lib/kolla/config_files/config.json:ro + - /var/lib/config-data/nova:/var/lib/kolla/config_files/src:ro + - /etc/localtime:/etc/localtime:ro + - /run:/run + - /dev:/dev + - /etc/iscsi:/etc/iscsi + - nova_compute:/var/lib/nova/ + environment: + - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS diff --git a/docker/services/nova-libvirt.yaml b/docker/services/nova-libvirt.yaml index 2dfeed59..ed54f3d9 100644 --- a/docker/services/nova-libvirt.yaml +++ b/docker/services/nova-libvirt.yaml @@ -10,7 +10,13 @@ parameters: type: string DockerLibvirtImage: description: image - default: 'centos-binary-libvirt' + default: 'centos-binary-nova-libvirt:latest' + type: string + # we configure libvirt via the nova-compute container due to coupling + # in the puppet modules + DockerNovaComputeImage: + description: image + default: 'centos-binary-nova-compute:latest' type: string ServiceNetMap: default: {} @@ -33,37 +39,57 @@ resources: type: ../../puppet/services/nova-libvirt.yaml properties: EndpointMap: {get_param: EndpointMap} + ServiceNetMap: {get_param: ServiceNetMap} + DefaultPasswords: {get_param: DefaultPasswords} outputs: role_data: description: Role data for the Libvirt service. value: + service_name: {get_attr: [NovaLibvirtBase, role_data, service_name]} config_settings: {get_attr: [NovaLibvirtBase, role_data, config_settings]} - step_config: {get_attr: [NovaLibvirtBase, role_data, step_config]} - puppet_tags: nova_config + step_config: &step_config + get_attr: [NovaLibvirtBase, role_data, step_config] + docker_image: &libvirt_image + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerLibvirtImage} ] + puppet_config: + config_volume: nova_libvirt + puppet_tags: nova_config + step_config: *step_config + config_image: + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerNovaComputeImage} ] + kolla_config: + /var/lib/kolla/config_files/nova-libvirt.json: + command: /usr/sbin/libvirtd --config /etc/libvirt/libvirtd.conf + config_files: + - dest: /etc/libvirt/libvirtd.conf + owner: root + perm: '0644' + source: /var/lib/kolla/config_files/src/etc/libvirt/libvirtd.conf docker_config: - step_1: + step_3: nova_libvirt: - image: - list_join: - - '/' - - [ {get_param: DockerNamespace}, {get_param: DockerLibvirtImage} ] + image: *libvirt_image net: host pid: host privileged: true restart: always volumes: - - /var/lib/etc-data/json-config/nova-libvirt.json:/var/lib/kolla/config_files/config.json - - /var/lib/etc-data/libvirt/libvirtd.conf:/var/lib/kolla/config_files/libvirtd.conf + - /var/lib/kolla/config_files/nova-libvirt.json:/var/lib/kolla/config_files/config.json:ro + - /var/lib/config-data/nova_libvirt:/var/lib/kolla/config_files/src:ro + - /dev:/dev - /etc/localtime:/etc/localtime:ro - /lib/modules:/lib/modules:ro - /run:/run - - /dev:/dev - /sys/fs/cgroup:/sys/fs/cgroup - - logs:/var/log/kolla/ + - /var/lib/nova:/var/lib/nova + # Needed to use host's virtlogd + - /var/run/libvirt:/var/run/libvirt - libvirtd:/var/lib/libvirt - - nova_compute:/var/lib/nova/ - nova_libvirt_qemu:/etc/libvirt/qemu environment: - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS - step_2: {} diff --git a/puppet/services/pacemaker/nova-vnc-proxy.yaml b/docker/services/nova-metadata.yaml index 22eeb261..90c4c1c9 100644 --- a/puppet/services/pacemaker/nova-vnc-proxy.yaml +++ b/docker/services/nova-metadata.yaml @@ -1,9 +1,14 @@ heat_template_version: ocata description: > - OpenStack Nova Vncproxy service with Pacemaker configured with Puppet. + OpenStack containerized Nova Metadata service parameters: + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json ServiceNetMap: default: {} description: Mapping of service_name -> network name. Typically set @@ -13,33 +18,34 @@ parameters: DefaultPasswords: default: {} type: json - EndpointMap: - default: {} - description: Mapping of service endpoint -> protocol. Typically set - via parameter_defaults in the resource registry. - type: json + resources: - NovaVncproxyBase: - type: ../nova-vnc-proxy.yaml + NovaMetadataBase: + type: ../../puppet/services/nova-metadata.yaml properties: + EndpointMap: {get_param: EndpointMap} ServiceNetMap: {get_param: ServiceNetMap} DefaultPasswords: {get_param: DefaultPasswords} - EndpointMap: {get_param: EndpointMap} outputs: role_data: - description: Role data for the Nova Vncproxy role. + description: Role data for the Nova Metadata service. value: - service_name: nova_vnc_proxy - monitoring_subscription: {get_attr: [NovaVncproxyBase, role_data, monitoring_subscription]} - logging_source: {get_attr: [NovaVncproxyBase, role_data, logging_source]} - logging_groups: {get_attr: [NovaVncproxyBase, role_data, logging_groups]} + service_name: {get_attr: [NovaMetadataBase, role_data, service_name]} config_settings: map_merge: - - get_attr: [NovaVncproxyBase, role_data, config_settings] - - nova::vncproxy::manage_service: false - nova::vncproxy::enabled: false - step_config: | - include ::tripleo::profile::pacemaker::nova::vncproxy + - get_attr: [NovaMetadataBase, role_data, config_settings] + step_config: &step_config + get_attr: [NovaMetadataBase, role_data, step_config] + service_config_settings: {get_attr: [NovaMetadataBase, role_data, service_config_settings]} + # BEGIN DOCKER SETTINGS + docker_image: '' + puppet_config: + config_volume: '' + puppet_tags: '' + step_config: *step_config + config_image: '' + kolla_config: {} + docker_config: {} diff --git a/docker/services/nova-placement.yaml b/docker/services/nova-placement.yaml new file mode 100644 index 00000000..8da48d37 --- /dev/null +++ b/docker/services/nova-placement.yaml @@ -0,0 +1,114 @@ +heat_template_version: ocata + +description: > + OpenStack containerized Nova Placement API service + +parameters: + DockerNamespace: + description: namespace + default: 'tripleoupstream' + type: string + DockerNovaPlacementImage: + description: image + default: 'centos-binary-nova-placement-api' + type: string + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + ServiceNetMap: + default: {} + description: Mapping of service_name -> network name. Typically set + via parameter_defaults in the resource registry. This + mapping overrides those in ServiceNetMapDefaults. + type: json + DefaultPasswords: + default: {} + type: json + +resources: + + NovaPlacementBase: + type: ../../puppet/services/nova-placement.yaml + properties: + EndpointMap: {get_param: EndpointMap} + ServiceNetMap: {get_param: ServiceNetMap} + DefaultPasswords: {get_param: DefaultPasswords} + +outputs: + role_data: + description: Role data for the Nova Placement API role. + value: + service_name: {get_attr: [NovaPlacementBase, role_data, service_name]} + config_settings: + map_merge: + - get_attr: [NovaPlacementBase, role_data, config_settings] + - apache::default_vhost: false + step_config: &step_config + get_attr: [NovaPlacementBase, role_data, step_config] + service_config_settings: {get_attr: [NovaPlacementBase, role_data, service_config_settings]} + # BEGIN DOCKER SETTINGS + docker_image: &nova_placement_image + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerNovaPlacementImage} ] + puppet_config: + config_volume: nova_placement + puppet_tags: nova_config + step_config: *step_config + config_image: + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerNovaPlacementImage} ] + kolla_config: + /var/lib/kolla/config_files/nova_placement.json: + command: /usr/sbin/httpd -DFOREGROUND + config_files: + - dest: /etc/nova/nova.conf + owner: nova + perm: '0640' + source: /var/lib/kolla/config_files/src/etc/nova/nova.conf + - dest: /etc/httpd/conf.d/10-placement_wsgi.conf + owner: root + perm: '0644' + source: /var/lib/kolla/config_files/src/etc/httpd/conf.d/10-placement_wsgi.conf + # puppet generates a stubbed out version of the stock one so we + # copy it in to overwrite the existing one + - dest: /etc/httpd/conf.d/00-nova-placement-api.conf + owner: root + perm: '0644' + source: /var/lib/kolla/config_files/src/etc/httpd/conf.d/00-nova-placement-api.conf + - dest: /etc/httpd/conf/httpd.conf + owner: root + perm: '0644' + source: /var/lib/kolla/config_files/src/etc/httpd/conf/httpd.conf + - dest: /etc/httpd/conf/ports.conf + owner: root + perm: '0644' + source: /var/lib/kolla/config_files/src/etc/httpd/conf/ports.conf + - dest: /var/www/cgi-bin/nova/nova-placement-api + owner: nova + perm: '0644' + source: /var/lib/kolla/config_files/src/var/www/cgi-bin/nova/nova-placement-api + docker_config: + # start this early so it is up before computes start reporting + step_3: + nova_placement: + start_order: 1 + image: *nova_placement_image + net: host + user: root + restart: always + volumes: + - /var/lib/kolla/config_files/nova_placement.json:/var/lib/kolla/config_files/config.json:ro + - /var/lib/config-data/nova_placement/:/var/lib/kolla/config_files/src:ro + - /var/lib/config-data/nova_placement/etc/httpd/conf.modules.d:/etc/httpd/conf.modules.d:ro + - /etc/hosts:/etc/hosts:ro + - /etc/localtime:/etc/localtime:ro + environment: + - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS + upgrade_tasks: + - name: Stop and disable nova_placement service (running under httpd) + tags: step2 + service: name=httpd state=stopped enabled=no diff --git a/docker/services/nova-scheduler.yaml b/docker/services/nova-scheduler.yaml new file mode 100644 index 00000000..c24d5b26 --- /dev/null +++ b/docker/services/nova-scheduler.yaml @@ -0,0 +1,91 @@ +heat_template_version: ocata + +description: > + OpenStack containerized Nova Scheduler service + +parameters: + DockerNamespace: + description: namespace + default: 'tripleoupstream' + type: string + DockerNovaSchedulerImage: + description: image + default: 'centos-binary-nova-scheduler:latest' + type: string + DockerNovaBaseImage: + description: image + default: 'centos-binary-nova-base:latest' + type: string + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + ServiceNetMap: + default: {} + description: Mapping of service_name -> network name. Typically set + via parameter_defaults in the resource registry. This + mapping overrides those in ServiceNetMapDefaults. + type: json + DefaultPasswords: + default: {} + type: json + +resources: + + NovaSchedulerBase: + type: ../../puppet/services/nova-scheduler.yaml + properties: + EndpointMap: {get_param: EndpointMap} + ServiceNetMap: {get_param: ServiceNetMap} + DefaultPasswords: {get_param: DefaultPasswords} + +outputs: + role_data: + description: Role data for the Nova Scheduler service. + value: + service_name: {get_attr: [NovaSchedulerBase, role_data, service_name]} + config_settings: {get_attr: [NovaSchedulerBase, role_data, config_settings]} + step_config: &step_config + get_attr: [NovaSchedulerBase, role_data, step_config] + service_config_settings: {get_attr: [NovaSchedulerBase, role_data, service_config_settings]} + # BEGIN DOCKER SETTINGS + docker_image: &nova_scheduler_image + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerNovaSchedulerImage} ] + puppet_config: + config_volume: nova + puppet_tags: nova_config + step_config: *step_config + config_image: + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerNovaBaseImage} ] + kolla_config: + /var/lib/kolla/config_files/nova_scheduler.json: + command: /usr/bin/nova-scheduler + config_files: + - dest: /etc/nova/nova.conf + owner: nova + perm: '0600' + source: /var/lib/kolla/config_files/src/etc/nova/nova.conf + docker_config: + step_4: + nova_scheduler: + image: *nova_scheduler_image + net: host + privileged: false + restart: always + volumes: + - /run:/run + - /var/lib/kolla/config_files/nova_scheduler.json:/var/lib/kolla/config_files/config.json:ro + - /var/lib/config-data/nova/:/var/lib/kolla/config_files/src:ro + - /etc/hosts:/etc/hosts:ro + - /etc/localtime:/etc/localtime:ro + environment: + - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS + upgrade_tasks: + - name: Stop and disable nova_scheduler service + tags: step2 + service: name=openstack-nova-scheduler state=stopped enabled=no diff --git a/docker/services/rabbitmq.yaml b/docker/services/rabbitmq.yaml new file mode 100644 index 00000000..ed440718 --- /dev/null +++ b/docker/services/rabbitmq.yaml @@ -0,0 +1,126 @@ +heat_template_version: ocata + +description: > + OpenStack containerized Rabbitmq service + +parameters: + DockerNamespace: + description: namespace + default: 'tripleoupstream' + type: string + DockerRabbitmqImage: + description: image + default: 'centos-binary-rabbitmq:latest' + type: string + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + ServiceNetMap: + default: {} + description: Mapping of service_name -> network name. Typically set + via parameter_defaults in the resource registry. This + mapping overrides those in ServiceNetMapDefaults. + type: json + DefaultPasswords: + default: {} + type: json + RabbitCookie: + type: string + default: '' + hidden: true + +resources: + + RabbitmqBase: + type: ../../puppet/services/rabbitmq.yaml + properties: + EndpointMap: {get_param: EndpointMap} + ServiceNetMap: {get_param: ServiceNetMap} + DefaultPasswords: {get_param: DefaultPasswords} + +outputs: + role_data: + description: Role data for the Rabbitmq API role. + value: + service_name: {get_attr: [RabbitmqBase, role_data, service_name]} + config_settings: {get_attr: [RabbitmqBase, role_data, config_settings]} + step_config: &step_config + get_attr: [RabbitmqBase, role_data, step_config] + service_config_settings: {get_attr: [RabbitmqBase, role_data, service_config_settings]} + # BEGIN DOCKER SETTINGS + docker_image: &rabbitmq_image + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerRabbitmqImage} ] + puppet_config: + config_volume: rabbitmq + puppet_tags: file + step_config: *step_config + config_image: *rabbitmq_image + kolla_config: + /var/lib/kolla/config_files/rabbitmq.json: + command: /usr/lib/rabbitmq/bin/rabbitmq-server + config_files: + - dest: /etc/rabbitmq/rabbitmq.config + owner: root + perm: '0644' + source: /var/lib/kolla/config_files/src/etc/rabbitmq/rabbitmq.config + - dest: /etc/rabbitmq/enabled_plugins + owner: root + perm: '0644' + source: /var/lib/kolla/config_files/src/etc/rabbitmq/enabled_plugins + - dest: /etc/rabbitmq/rabbitmq-env.conf + owner: root + perm: '0644' + source: /var/lib/kolla/config_files/src/etc/rabbitmq/rabbitmq-env.conf + - dest: /etc/rabbitmq/rabbitmqadmin.conf + owner: root + perm: '0644' + source: /var/lib/kolla/config_files/src/etc/rabbitmq/rabbitmqadmin.conf + docker_config: + step_1: + rabbitmq_bootstrap: + start_order: 0 + image: *rabbitmq_image + net: host + privileged: false + volumes: + - /var/lib/kolla/config_files/rabbitmq.json:/var/lib/kolla/config_files/config.json:ro + - /var/lib/config-data/rabbitmq/:/var/lib/kolla/config_files/src:ro + - /etc/hosts:/etc/hosts:ro + - /etc/localtime:/etc/localtime:ro + - rabbitmq:/var/lib/rabbitmq/ + environment: + - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS + - KOLLA_BOOTSTRAP=True + - + list_join: + - '=' + - - 'RABBITMQ_CLUSTER_COOKIE' + - + yaql: + expression: $.data.passwords.where($ != '').first() + data: + passwords: + - {get_param: RabbitCookie} + - {get_param: [DefaultPasswords, rabbit_cookie]} + rabbitmq: + start_order: 1 + image: *rabbitmq_image + net: host + privileged: false + restart: always + volumes: + - /var/lib/kolla/config_files/rabbitmq.json:/var/lib/kolla/config_files/config.json:ro + - /var/lib/config-data/rabbitmq/:/var/lib/kolla/config_files/src:ro + - /etc/hosts:/etc/hosts:ro + - /etc/localtime:/etc/localtime:ro + - rabbitmq:/var/lib/rabbitmq/ + environment: + - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS + upgrade_tasks: + - name: Stop and disable rabbitmq service + tags: step2 + service: name=rabbitmq-server state=stopped enabled=no diff --git a/docker/services/services.yaml b/docker/services/services.yaml index 3d51eb19..3f094ff8 100644 --- a/docker/services/services.yaml +++ b/docker/services/services.yaml @@ -66,8 +66,22 @@ outputs: global_config_settings: {get_attr: [PuppetServices, role_data, global_config_settings]} step_config: - {get_attr: [PuppetServices, role_data, step_config]} - puppet_tags: {list_join: [",", {get_attr: [ServiceChain, role_data, puppet_tags]}]} + {get_attr: [ServiceChain, role_data, step_config]} + docker_image: {get_attr: [ServiceChain, role_data, docker_image]} + puppet_config: {get_attr: [ServiceChain, role_data, puppet_config]} + kolla_config: + map_merge: {get_attr: [ServiceChain, role_data, kolla_config]} docker_config: - step_1: {map_merge: {get_attr: [ServiceChain, role_data, docker_config, step_1]}} - step_2: {map_merge: {get_attr: [ServiceChain, role_data, docker_config, step_2]}} + {get_attr: [ServiceChain, role_data, docker_config]} + docker_puppet_tasks: + {get_attr: [ServiceChain, role_data, docker_puppet_tasks]} + upgrade_tasks: + yaql: + # Note we use distinct() here to filter any identical tasks, e.g yum update for all services + expression: $.data.where($ != null).select($.get('upgrade_tasks')).where($ != null).flatten().distinct() + data: {get_attr: [ServiceChain, role_data]} + upgrade_batch_tasks: + yaql: + # Note we use distinct() here to filter any identical tasks, e.g yum update for all services + expression: $.data.where($ != null).select($.get('upgrade_batch_tasks')).where($ != null).flatten().distinct() + data: {get_attr: [ServiceChain, role_data]} diff --git a/docker/services/swift-proxy.yaml b/docker/services/swift-proxy.yaml new file mode 100644 index 00000000..66118412 --- /dev/null +++ b/docker/services/swift-proxy.yaml @@ -0,0 +1,83 @@ +heat_template_version: ocata + +description: > + OpenStack containerized swift proxy service + +parameters: + DockerNamespace: + description: namespace + default: 'tripleoupstream' + type: string + DockerSwiftProxyImage: + description: image + default: 'centos-binary-swift-proxy-server:latest' + type: string + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + ServiceNetMap: + default: {} + description: Mapping of service_name -> network name. Typically set + via parameter_defaults in the resource registry. This + mapping overrides those in ServiceNetMapDefaults. + type: json + DefaultPasswords: + default: {} + type: json + +resources: + + SwiftProxyBase: + type: ../../puppet/services/swift-proxy.yaml + properties: + EndpointMap: {get_param: EndpointMap} + ServiceNetMap: {get_param: ServiceNetMap} + DefaultPasswords: {get_param: DefaultPasswords} + +outputs: + role_data: + description: Role data for the swift proxy. + value: + service_name: {get_attr: [SwiftProxyBase, role_data, service_name]} + config_settings: {get_attr: [SwiftProxyBase, role_data, config_settings]} + step_config: &step_config + get_attr: [SwiftProxyBase, role_data, step_config] + service_config_settings: {get_attr: [SwiftProxyBase, role_data, service_config_settings]} + # BEGIN DOCKER SETTINGS + docker_image: &swift_proxy_image + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerSwiftProxyImage} ] + puppet_config: + config_volume: swift + puppet_tags: swift_proxy_config + step_config: *step_config + config_image: *swift_proxy_image + kolla_config: + /var/lib/kolla/config_files/swift_proxy.json: + command: /usr/bin/swift-proxy-server /etc/swift/proxy-server.conf + docker_config: + step_4: + swift_proxy: + image: *swift_proxy_image + net: host + user: swift + restart: always + # I'm mounting /etc/swift as rw. Are the rings written to at all during runtime? + volumes: + - /var/lib/kolla/config_files/swift_proxy.json:/var/lib/kolla/config_files/config.json:ro + - /var/lib/config-data/swift:/var/lib/kolla/config_files/src:ro + - /var/lib/config-data/swift/etc/swift:/etc/swift:rw + - /etc/hosts:/etc/hosts:ro + - /etc/localtime:/etc/localtime:ro + - /run:/run + - swift-srv:/srv + - /dev:/dev + environment: + - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS + upgrade_tasks: + - name: Stop and disable swift_proxy service + tags: step2 + service: name=openstack-swift-proxy state=stopped enabled=no diff --git a/docker/services/swift-ringbuilder.yaml b/docker/services/swift-ringbuilder.yaml new file mode 100644 index 00000000..027a6956 --- /dev/null +++ b/docker/services/swift-ringbuilder.yaml @@ -0,0 +1,83 @@ +heat_template_version: ocata + +description: > + OpenStack Swift Ringbuilder + +parameters: + DockerNamespace: + description: namespace + default: 'tripleoupstream' + type: string + DockerSwiftProxyImage: + description: image + default: 'centos-binary-swift-proxy-server:latest' + type: string + ServiceNetMap: + default: {} + description: Mapping of service_name -> network name. Typically set + via parameter_defaults in the resource registry. This + mapping overrides those in ServiceNetMapDefaults. + type: json + DefaultPasswords: + default: {} + type: json + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + SwiftMinPartHours: + type: number + default: 1 + description: The minimum time (in hours) before a partition in a ring can be moved following a rebalance. + SwiftPartPower: + default: 10 + description: Partition Power to use when building Swift rings + type: number + SwiftRingBuild: + default: true + description: Whether to manage Swift rings or not + type: boolean + SwiftReplicas: + type: number + default: 3 + description: How many replicas to use in the swift rings. + SwiftRawDisks: + default: {} + description: 'A hash of additional raw devices to use as Swift backend (eg. {sdb: {}})' + type: json + SwiftUseLocalDir: + default: true + description: 'Use a local directory for Swift storage services when building rings' + type: boolean + +resources: + + SwiftRingbuilderBase: + type: ../../puppet/services/swift-ringbuilder.yaml + properties: + EndpointMap: {get_param: EndpointMap} + ServiceNetMap: {get_param: ServiceNetMap} + DefaultPasswords: {get_param: DefaultPasswords} + +outputs: + role_data: + description: Role data for Swift Ringbuilder configuration in containers. + value: + service_name: {get_attr: [SwiftRingbuilderBase, role_data, service_name]} + config_settings: {get_attr: [SwiftRingbuilderBase, role_data, config_settings]} + step_config: &step_config + get_attr: [SwiftRingbuilderBase, role_data, step_config] + service_config_settings: {get_attr: [SwiftRingbuilderBase, role_data, service_config_settings]} + # BEGIN DOCKER SETTINGS + docker_image: &docker_image + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerSwiftProxyImage} ] + puppet_config: + config_volume: 'swift' + puppet_tags: exec,ring_object_device,swift::ringbuilder::create,tripleo::profile::base::swift::add_devices,swift::ringbuilder::rebalance + step_config: *step_config + config_image: *docker_image + kolla_config: {} + docker_config: {} diff --git a/docker/services/swift-storage.yaml b/docker/services/swift-storage.yaml new file mode 100644 index 00000000..2eb55632 --- /dev/null +++ b/docker/services/swift-storage.yaml @@ -0,0 +1,363 @@ +heat_template_version: ocata + +description: > + OpenStack containerized Swift Storage services. + +parameters: + DockerNamespace: + description: namespace + default: 'tripleoupstream' + type: string + DockerSwiftProxyImage: + description: image + default: 'centos-binary-swift-proxy-server:latest' + type: string + DockerSwiftAccountImage: + description: image + default: 'centos-binary-swift-account:latest' + type: string + DockerSwiftContainerImage: + description: image + default: 'centos-binary-swift-container:latest' + type: string + DockerSwiftObjectImage: + description: image + default: 'centos-binary-swift-object:latest' + type: string + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + DefaultPasswords: + default: {} + type: json + ServiceNetMap: + default: {} + description: Mapping of service_name -> network name. Typically set + via parameter_defaults in the resource registry. This + mapping overrides those in ServiceNetMapDefaults. + type: json + +resources: + + SwiftStorageBase: + type: ../../puppet/services/swift-storage.yaml + properties: + EndpointMap: {get_param: EndpointMap} + ServiceNetMap: {get_param: ServiceNetMap} + DefaultPasswords: {get_param: DefaultPasswords} + +outputs: + role_data: + description: Role data for the swift storage services. + value: + service_name: {get_attr: [SwiftStorageBase, role_data, service_name]} + config_settings: {get_attr: [SwiftStorageBase, role_data, config_settings]} + step_config: &step_config + get_attr: [SwiftStorageBase, role_data, step_config] + service_config_settings: {get_attr: [SwiftStorageBase, role_data, service_config_settings]} + # BEGIN DOCKER SETTINGS + docker_image: &swift_proxy_image + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerSwiftProxyImage} ] + puppet_config: + config_volume: swift + puppet_tags: swift_config,swift_container_config,swift_container_sync_realms_config,swift_account_config,swift_object_config,swift_object_expirer_config + step_config: *step_config + config_image: *swift_proxy_image + kolla_config: + /var/lib/kolla/config_files/swift_account_auditor.json: + command: /usr/bin/swift-account-auditor /etc/swift/account-server.conf + /var/lib/kolla/config_files/swift_account_reaper.json: + command: /usr/bin/swift-account-reaper /etc/swift/account-server.conf + /var/lib/kolla/config_files/swift_account_replicator.json: + command: /usr/bin/swift-account-replicator /etc/swift/account-server.conf + /var/lib/kolla/config_files/swift_account_server.json: + command: /usr/bin/swift-account-server /etc/swift/account-server.conf + /var/lib/kolla/config_files/swift_container_auditor.json: + command: /usr/bin/swift-container-auditor /etc/swift/container-server.conf + /var/lib/kolla/config_files/swift_container_replicator.json: + command: /usr/bin/swift-container-replicator /etc/swift/container-server.conf + /var/lib/kolla/config_files/swift_container_updater.json: + command: /usr/bin/swift-container-updater /etc/swift/container-server.conf + /var/lib/kolla/config_files/swift_container_server.json: + command: /usr/bin/swift-container-server /etc/swift/container-server.conf + /var/lib/kolla/config_files/swift_object_auditor.json: + command: /usr/bin/swift-object-auditor /etc/swift/object-server.conf + /var/lib/kolla/config_files/swift_object_expirer.json: + command: /usr/bin/swift-object-expirer /etc/swift/object-expirer.conf + /var/lib/kolla/config_files/swift_object_replicator.json: + command: /usr/bin/swift-object-replicator /etc/swift/object-server.conf + /var/lib/kolla/config_files/swift_object_updater.json: + command: /usr/bin/swift-object-updater /etc/swift/object-server.conf + /var/lib/kolla/config_files/swift_object_server.json: + command: /usr/bin/swift-object-server /etc/swift/object-server.conf + docker_config: + step_3: + # The puppet config sets this up but we don't have a way to mount the named + # volume during the configuration stage. We just need to create this + # directory and make sure it's owned by swift. + swift_setup_srv: + image: + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerSwiftAccountImage} ] + user: root + command: ['/bin/bash', '-c', 'mkdir /srv/node && chown swift:swift /srv/node'] + volumes: + - swift-srv:/srv + step_4: + swift_account_auditor: + image: + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerSwiftAccountImage} ] + net: host + user: swift + restart: always + volumes: + - /var/lib/kolla/config_files/swift_account_auditor.json:/var/lib/kolla/config_files/config.json:ro + - /var/lib/config-data/swift/:/var/lib/kolla/config_files/src:ro + - /var/lib/config-data/swift/etc/swift:/etc/swift:rw + - /etc/hosts:/etc/hosts:ro + - /etc/localtime:/etc/localtime:ro + - /run:/run + - swift-srv:/srv + - /dev:/dev + environment: &kolla_env + - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS + swift_account_reaper: + image: + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerSwiftAccountImage} ] + net: host + user: swift + restart: always + volumes: + - /var/lib/kolla/config_files/swift_account_reaper.json:/var/lib/kolla/config_files/config.json:ro + - /var/lib/config-data/swift/:/var/lib/kolla/config_files/src:ro + - /var/lib/config-data/swift/etc/swift:/etc/swift:rw + - /etc/hosts:/etc/hosts:ro + - /etc/localtime:/etc/localtime:ro + - /run:/run + - swift-srv:/srv + - /dev:/dev + environment: *kolla_env + swift_account_replicator: + image: + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerSwiftAccountImage} ] + net: host + user: swift + restart: always + volumes: + - /var/lib/kolla/config_files/swift_account_replicator.json:/var/lib/kolla/config_files/config.json:ro + - /var/lib/config-data/swift/:/var/lib/kolla/config_files/src:ro + - /var/lib/config-data/swift/etc/swift:/etc/swift:rw + - /etc/hosts:/etc/hosts:ro + - /etc/localtime:/etc/localtime:ro + - /run:/run + - swift-srv:/srv + - /dev:/dev + environment: *kolla_env + swift_account_server: + image: + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerSwiftAccountImage} ] + net: host + user: swift + restart: always + volumes: + - /var/lib/kolla/config_files/swift_account_server.json:/var/lib/kolla/config_files/config.json:ro + - /var/lib/config-data/swift/:/var/lib/kolla/config_files/src:ro + - /var/lib/config-data/swift/etc/swift:/etc/swift:rw + - /etc/hosts:/etc/hosts:ro + - /etc/localtime:/etc/localtime:ro + - /run:/run + - swift-srv:/srv + - /dev:/dev + environment: *kolla_env + swift_container_auditor: + image: + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerSwiftContainerImage} ] + net: host + user: swift + restart: always + volumes: + - /var/lib/kolla/config_files/swift_container_auditor.json:/var/lib/kolla/config_files/config.json:ro + - /var/lib/config-data/swift/:/var/lib/kolla/config_files/src:ro + - /var/lib/config-data/swift/etc/swift:/etc/swift:rw + - /etc/hosts:/etc/hosts:ro + - /etc/localtime:/etc/localtime:ro + - /run:/run + - swift-srv:/srv + - /dev:/dev + environment: *kolla_env + swift_container_replicator: + image: + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerSwiftContainerImage} ] + net: host + user: swift + restart: always + volumes: + - /var/lib/kolla/config_files/swift_container_replicator.json:/var/lib/kolla/config_files/config.json:ro + - /var/lib/config-data/swift/:/var/lib/kolla/config_files/src:ro + - /var/lib/config-data/swift/etc/swift:/etc/swift:rw + - /etc/hosts:/etc/hosts:ro + - /etc/localtime:/etc/localtime:ro + - /run:/run + - swift-srv:/srv + - /dev:/dev + environment: *kolla_env + swift_container_updater: + image: + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerSwiftContainerImage} ] + net: host + user: swift + restart: always + volumes: + - /var/lib/kolla/config_files/swift_container_updater.json:/var/lib/kolla/config_files/config.json:ro + - /var/lib/config-data/swift/:/var/lib/kolla/config_files/src:ro + - /var/lib/config-data/swift/etc/swift:/etc/swift:rw + - /etc/hosts:/etc/hosts:ro + - /etc/localtime:/etc/localtime:ro + - /run:/run + - swift-srv:/srv + - /dev:/dev + environment: *kolla_env + swift_container_server: + image: + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerSwiftContainerImage} ] + net: host + user: swift + restart: always + volumes: + - /var/lib/kolla/config_files/swift_container_server.json:/var/lib/kolla/config_files/config.json:ro + - /var/lib/config-data/swift/:/var/lib/kolla/config_files/src:ro + - /var/lib/config-data/swift/etc/swift:/etc/swift:rw + - /etc/hosts:/etc/hosts:ro + - /etc/localtime:/etc/localtime:ro + - /run:/run + - swift-srv:/srv + - /dev:/dev + environment: *kolla_env + swift_object_auditor: + image: + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerSwiftObjectImage} ] + net: host + user: swift + restart: always + volumes: + - /var/lib/kolla/config_files/swift_object_auditor.json:/var/lib/kolla/config_files/config.json:ro + - /var/lib/config-data/swift/:/var/lib/kolla/config_files/src:ro + - /var/lib/config-data/swift/etc/swift:/etc/swift:rw + - /etc/hosts:/etc/hosts:ro + - /etc/localtime:/etc/localtime:ro + - /run:/run + - swift-srv:/srv + - /dev:/dev + environment: *kolla_env + swift_object_expirer: + image: + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerSwiftProxyImage} ] + net: host + user: swift + restart: always + volumes: + - /var/lib/kolla/config_files/swift_object_expirer.json:/var/lib/kolla/config_files/config.json:ro + - /var/lib/config-data/swift/:/var/lib/kolla/config_files/src:ro + - /var/lib/config-data/swift/etc/swift:/etc/swift:rw + - /etc/hosts:/etc/hosts:ro + - /etc/localtime:/etc/localtime:ro + - /run:/run + - swift-srv:/srv + - /dev:/dev + environment: *kolla_env + swift_object_replicator: + image: + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerSwiftObjectImage} ] + net: host + user: swift + restart: always + volumes: + - /var/lib/kolla/config_files/swift_object_replicator.json:/var/lib/kolla/config_files/config.json:ro + - /var/lib/config-data/swift/:/var/lib/kolla/config_files/src:ro + - /var/lib/config-data/swift/etc/swift:/etc/swift:rw + - /etc/hosts:/etc/hosts:ro + - /etc/localtime:/etc/localtime:ro + - /run:/run + - swift-srv:/srv + - /dev:/dev + environment: *kolla_env + swift_object_updater: + image: + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerSwiftObjectImage} ] + net: host + user: swift + restart: always + volumes: + - /var/lib/kolla/config_files/swift_object_updater.json:/var/lib/kolla/config_files/config.json:ro + - /var/lib/config-data/swift/:/var/lib/kolla/config_files/src:ro + - /var/lib/config-data/swift/etc/swift:/etc/swift:rw + - /etc/hosts:/etc/hosts:ro + - /etc/localtime:/etc/localtime:ro + - /run:/run + - swift-srv:/srv + - /dev:/dev + environment: *kolla_env + swift_object_server: + image: + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerSwiftObjectImage} ] + net: host + user: swift + restart: always + volumes: + - /var/lib/kolla/config_files/swift_object_server.json:/var/lib/kolla/config_files/config.json:ro + - /var/lib/config-data/swift/:/var/lib/kolla/config_files/src:ro + - /var/lib/config-data/swift/etc/swift:/etc/swift:rw + - /etc/hosts:/etc/hosts:ro + - /etc/localtime:/etc/localtime:ro + - /run:/run + - swift-srv:/srv + - /dev:/dev + environment: *kolla_env + upgrade_tasks: + - name: Stop and disable swift storage services + tags: step2 + service: name={{ item }} state=stopped enabled=no + with_items: + - openstack-swift-account-auditor + - openstack-swift-account-reaper + - openstack-swift-account-replicator + - openstack-swift-account + - openstack-swift-container-auditor + - openstack-swift-container-replicator + - openstack-swift-container-updater + - openstack-swift-container + - openstack-swift-object-auditor + - openstack-swift-object-replicator + - openstack-swift-object-updater + - openstack-swift-object diff --git a/docker/services/zaqar.yaml b/docker/services/zaqar.yaml new file mode 100644 index 00000000..30905ffe --- /dev/null +++ b/docker/services/zaqar.yaml @@ -0,0 +1,107 @@ +heat_template_version: ocata + +description: > + OpenStack containerized Zaqar services + +parameters: + DockerNamespace: + description: namespace + default: 'tripleoupstream' + type: string + DockerZaqarImage: + description: image + default: 'centos-binary-zaqar:latest' + type: string + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + ServiceNetMap: + default: {} + description: Mapping of service_name -> network name. Typically set + via parameter_defaults in the resource registry. This + mapping overrides those in ServiceNetMapDefaults. + type: json + DefaultPasswords: + default: {} + type: json + +resources: + + ZaqarBase: + type: ../../puppet/services/zaqar.yaml + properties: + EndpointMap: {get_param: EndpointMap} + ServiceNetMap: {get_param: ServiceNetMap} + DefaultPasswords: {get_param: DefaultPasswords} + +outputs: + role_data: + description: Role data for the Zaqar API role. + value: + service_name: {get_attr: [ZaqarBase, role_data, service_name]} + config_settings: {get_attr: [ZaqarBase, role_data, config_settings]} + step_config: &step_config + get_attr: [ZaqarBase, role_data, step_config] + service_config_settings: {get_attr: [ZaqarBase, role_data, service_config_settings]} + # BEGIN DOCKER SETTINGS + docker_image: &zaqar_image + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerZaqarImage} ] + puppet_config: + config_volume: zaqar + puppet_tags: zaqar_config + step_config: *step_config + config_image: *zaqar_image + kolla_config: + /var/lib/kolla/config_files/zaqar.json: + command: /usr/bin/zaqar-server --config-file /etc/zaqar/zaqar.conf + config_files: + - dest: /etc/zaqar/zaqar.conf + owner: zaqar + perm: '0640' + source: /var/lib/kolla/config_files/src/etc/zaqar/zaqar.conf + /var/lib/kolla/config_files/zaqar_websocket.json: + command: /usr/bin/zaqar-server --config-file /etc/zaqar/zaqar.conf --config-file /etc/zaqar/1.conf + config_files: + - dest: /etc/zaqar/zaqar.conf + owner: zaqar + perm: '0640' + source: /var/lib/kolla/config_files/src/etc/zaqar/zaqar.conf + - dest: /etc/zaqar/1.conf + owner: zaqar + perm: '0640' + source: /var/lib/kolla/config_files/src/etc/zaqar/1.conf + docker_config: + step_4: + zaqar: + image: *zaqar_image + net: host + privileged: false + restart: always + volumes: + - /var/lib/kolla/config_files/zaqar.json:/var/lib/kolla/config_files/config.json:ro + - /var/lib/config-data/zaqar/:/var/lib/kolla/config_files/src:ro + - /etc/hosts:/etc/hosts:ro + - /etc/localtime:/etc/localtime:ro + environment: + - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS + zaqar_websocket: + image: *zaqar_image + net: host + privileged: false + restart: always + volumes: + - /var/lib/kolla/config_files/zaqar_websocket.json:/var/lib/kolla/config_files/config.json:ro + - /var/lib/config-data/zaqar/:/var/lib/kolla/config_files/src:ro + - /etc/hosts:/etc/hosts:ro + - /etc/localtime:/etc/localtime:ro + environment: + - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS + upgrade_tasks: + - name: Stop and disable zaqar service + tags: step2 + service: name=openstack-zaqar.service state=stopped enabled=no + diff --git a/environments/auditd.yaml b/environments/auditd.yaml new file mode 100644 index 00000000..b358c98a --- /dev/null +++ b/environments/auditd.yaml @@ -0,0 +1,119 @@ +resource_registry: + OS::TripleO::Services::AuditD: ../puppet/services/auditd.yaml + +parameter_defaults: + AuditdRules: + 'Record attempts to alter time through adjtimex': + content: '-a always,exit -F arch=b64 -S adjtimex -k audit_time_rules' + order : 1 + 'Record attempts to alter time through settimeofday': + content: '-a always,exit -F arch=b64 -S settimeofday -k audit_time_rules' + order : 2 + 'Record Attempts to Alter Time Through stime': + content: '-a always,exit -F arch=b64 -S stime -k audit_time_rules' + order : 3 + 'Record Attempts to Alter Time Through clock_settime': + content: '-a always,exit -F arch=b64 -S clock_settime -k audit_time_rules' + order : 4 + 'Record Attempts to Alter the localtime File': + content: '-w /etc/localtime -p wa -k audit_time_rules' + order : 5 + 'Record Events that Modify the Systems Discretionary Access Controls - chmod': + content: '-a always,exit -F arch=b64 -S chmod -F auid>=1000 -F auid!=4294967295 -k perm_mod' + order : 5 + 'Record Events that Modify the Systems Discretionary Access Controls - chown': + content: '-a always,exit -F arch=b64 -S chown -F auid>=1000 -F auid!=4294967295 -k perm_mod' + order : 6 + 'Record Events that Modify the Systems Discretionary Access Controls - fchmod': + content: '-a always,exit -F arch=b64 -S fchmod -F auid>=1000 -F auid!=4294967295 -k perm_mod' + order : 7 + 'Record Events that Modify the Systems Discretionary Access Controls - fchmodat': + content: '-a always,exit -F arch=b64 -S fchmodat -F auid>=1000 -F auid!=4294967295 -k perm_mod' + order : 8 + 'Record Events that Modify the Systems Discretionary Access Controls - fchown': + content: '-a always,exit -F arch=b64 -S fchown -F auid>=1000 -F auid!=4294967295 -k perm_mod' + order : 9 + 'Record Events that Modify the Systems Discretionary Access Controls - fchownat': + content: '-a always,exit -F arch=b64 -S fchownat -F auid>=1000 -F auid!=4294967295 -k perm_mod' + order : 10 + 'Record Events that Modify the Systems Discretionary Access Controls - fremovexattr': + content: '-a always,exit -F arch=b64 -S fremovexattr -F auid>=1000 -F auid!=4294967295 -k perm_mod' + order : 11 + 'Record Events that Modify the Systems Discretionary Access Controls - fsetxattr': + content: '-a always,exit -F arch=b64 -S fsetxattr -F auid>=1000 -F auid!=4294967295 -k perm_mod' + order : 12 + 'Record Events that Modify the Systems Discretionary Access Controls - lchown': + content: '-a always,exit -F arch=b64 -S lchown -F auid>=1000 -F auid!=4294967295 -k perm_mod' + order : 13 + 'Record Events that Modify the Systems Discretionary Access Controls - lremovexattr': + content: '-a always,exit -F arch=b64 -S lremovexattr -F auid>=1000 -F auid!=4294967295 -k perm_mod' + order : 14 + 'Record Events that Modify the Systems Discretionary Access Controls - lsetxattr': + content: '-a always,exit -F arch=b64 -S lsetxattr -F auid>=1000 -F auid!=4294967295 -k perm_mod' + order : 15 + 'Record Events that Modify the Systems Discretionary Access Controls - removexattr': + content: '-a always,exit -F arch=b64 -S removexattr -F auid>=1000 -F auid!=4294967295 -k perm_mod' + order : 16 + 'Record Events that Modify the Systems Discretionary Access Controls - setxattr': + content: '-a always,exit -F arch=b64 -S setxattr -F auid>=1000 -F auid!=4294967295 -k perm_mod' + order : 17 + 'Record Events that Modify User/Group Information - /etc/group': + content: '-w /etc/group -p wa -k audit_rules_usergroup_modification' + order : 18 + 'Record Events that Modify User/Group Information - /etc/passwd': + content: '-w /etc/passwd -p wa -k audit_rules_usergroup_modification' + order : 19 + 'Record Events that Modify User/Group Information - /etc/gshadow': + content: '-w /etc/gshadow -p wa -k audit_rules_usergroup_modification' + order : 20 + 'Record Events that Modify User/Group Information - /etc/shadow': + content: '-w /etc/shadow -p wa -k audit_rules_usergroup_modification' + order : 21 + 'Record Events that Modify User/Group Information - /etc/opasswd': + content: '-w /etc/opasswd -p wa -k audit_rules_usergroup_modification' + order : 22 + 'Record Events that Modify the Systems Network Environment - sethostname / setdomainname': + content: '-a always,exit -F arch=b64 -S sethostname -S setdomainname -k audit_rules_networkconfig_modification' + order : 23 + 'Record Events that Modify the Systems Network Environment - /etc/issue': + content: '-w /etc/issue -p wa -k audit_rules_networkconfig_modification' + order : 24 + 'Record Events that Modify the Systems Network Environment - /etc/issue.net': + content: '-w /etc/issue.net -p wa -k audit_rules_networkconfig_modification' + order : 25 + 'Record Events that Modify the Systems Network Environment - /etc/hosts': + content: '-w /etc/hosts -p wa -k audit_rules_networkconfig_modification' + order : 26 + 'Record Events that Modify the Systems Network Environment - /etc/sysconfig/network': + content: '-w /etc/sysconfig/network -p wa -k audit_rules_networkconfig_modification' + order : 27 + 'Record Events that Modify the Systems Mandatory Access Controls': + content: '-w /etc/selinux/ -p wa -k MAC-policy' + order : 28 + 'Ensure auditd Collects Unauthorized Access Attempts to Files (unsuccessful / EACCES)': + content: '-a always,exit -F arch=b64 -S creat -S open -S openat -S open_by_handle_at -S truncate -S ftruncate -F exit=-EACCES -F auid>=1000 -F auid!=4294967295 -k access' + order : 29 + 'Ensure auditd Collects Unauthorized Access Attempts to Files (unsuccessful / EPERM)': + content: '-a always,exit -F arch=b64 -S creat -S open -S openat -S open_by_handle_at -S truncate -S ftruncate -F exit=-EPERM -F auid>=1000 -F auid!=4294967295 -k access' + order : 30 + 'Ensure auditd Collects Information on the Use of Privileged Commands': + content: '-a always,exit -F path=SETUID_PROG_PATH -F perm=x -F auid>=1000 -F auid!=4294967295 -k privileged' + order : 31 + 'Ensure auditd Collects Information on Exporting to Media (successful)': + content: '-a always,exit -F arch=b64 -S mount -F auid>=1000 -F auid!=4294967295 -k export' + order : 32 + 'Ensure auditd Collects File Deletion Events by User': + content: '-a always,exit -F arch=b64 -S rmdir -S unlink -S unlinkat -S rename -S renameat -F auid>=1000 -F auid!=4294967295 -k delete' + order : 33 + 'Ensure auditd Collects System Administrator Actions': + content: '-w /etc/sudoers -p wa -k actions' + order : 34 + 'Ensure auditd Collects Information on Kernel Module Loading and Unloading (insmod)': + content: '-w /usr/sbin/insmod -p x -k modules' + order : 35 + 'Ensure auditd Collects Information on Kernel Module Loading and Unloading (rmmod)': + content: '-w /usr/sbin/rmmod -p x -k modules' + order : 36 + 'Ensure auditd Collects Information on Kernel Module Loading and Unloading (modprobe)': + content: '-w /usr/sbin/modprobe -p x -k modules' + order : 37 diff --git a/environments/cadf.yaml b/environments/cadf.yaml new file mode 100644 index 00000000..af5c7fdf --- /dev/null +++ b/environments/cadf.yaml @@ -0,0 +1,2 @@ +parameter_defaults: + KeystoneNotificationFormat: cadf diff --git a/environments/cinder-dellps-config.yaml b/environments/cinder-dellps-config.yaml new file mode 100644 index 00000000..eefd0fd6 --- /dev/null +++ b/environments/cinder-dellps-config.yaml @@ -0,0 +1,31 @@ +# Copyright (c) 2016-2017 Dell Inc, or its subsidiaries. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# A Heat environment file which can be used to enable a +# a Cinder Dell EMC PS Series backend, configured via puppet +resource_registry: + OS::TripleO::Services::CinderBackendDellPs: ../puppet/services/cinder-backend-dellps.yaml + +parameter_defaults: + CinderEnableDellPsBackend: true + CinderDellPsBackendName: 'tripleo_dellps' + CinderDellPsSanIp: '' + CinderDellPsSanLogin: '' + CinderDellPsSanPassword: '' + CinderDellPsSanThinProvision: true + CinderDellPsGroupname: 'group-0' + CinderDellPsPool: 'default' + CinderDellPsChapLogin: '' + CinderDellPsChapPassword: '' + CinderDellPsUseChap: false diff --git a/environments/cinder-dellsc-config.yaml b/environments/cinder-dellsc-config.yaml index 92e257d4..617d640c 100644 --- a/environments/cinder-dellsc-config.yaml +++ b/environments/cinder-dellsc-config.yaml @@ -1,7 +1,7 @@ # A Heat environment file which can be used to enable a -# a Cinder Dell Storage Center ISCSI backend, configured via puppet +# Cinder Dell EMC Storage Center ISCSI backend, configured via puppet resource_registry: - OS::TripleO::ControllerExtraConfigPre: ../puppet/extraconfig/pre_deploy/controller/cinder-dellsc.yaml + OS::TripleO::Services::CinderBackendDellSc: ../puppet/services/cinder-backend-dellsc.yaml parameter_defaults: CinderEnableDellScBackend: true diff --git a/environments/cinder-eqlx-config.yaml b/environments/cinder-eqlx-config.yaml deleted file mode 100644 index ca2c5e5a..00000000 --- a/environments/cinder-eqlx-config.yaml +++ /dev/null @@ -1,17 +0,0 @@ -# A Heat environment file which can be used to enable a -# a Cinder eqlx backen, configured via puppet -resource_registry: - OS::TripleO::ControllerExtraConfigPre: ../puppet/extraconfig/pre_deploy/controller/cinder-eqlx.yaml - -parameter_defaults: - CinderEnableEqlxBackend: true - CinderEqlxBackendName: 'tripleo_eqlx' - CinderEqlxSanIp: '' - CinderEqlxSanLogin: '' - CinderEqlxSanPassword: '' - CinderEqlxSanThinProvision: true - CinderEqlxGroupname: 'group-0' - CinderEqlxPool: 'default' - CinderEqlxChapLogin: '' - CinderEqlxChapPassword: '' - CinderEqlxUseChap: false diff --git a/environments/cinder-hpelefthand-config.yaml b/environments/cinder-hpelefthand-config.yaml new file mode 100644 index 00000000..90d0261e --- /dev/null +++ b/environments/cinder-hpelefthand-config.yaml @@ -0,0 +1,13 @@ +# A Heat environment file which can be used to enable a +# a Cinder HPELeftHandISCSI backend, configured via puppet +resource_registry: + OS::TripleO::Services::CinderHPELeftHandISCSI: ../puppet/services/cinder-hpelefthand-iscsi.yaml + +parameter_defaults: + CinderHPELeftHandISCSIApiUrl: '' + CinderHPELeftHandISCSIUserName: '' + CinderHPELeftHandISCSIPassword: '' + CinderHPELeftHandISCSIBackendName: 'tripleo_hpelefthand' + CinderHPELeftHandISCSIChapEnabled: false + CinderHPELeftHandClusterName: '' + CinderHPELeftHandDebug: false diff --git a/environments/cinder-iser.yaml b/environments/cinder-iser.yaml new file mode 100644 index 00000000..5eae7c04 --- /dev/null +++ b/environments/cinder-iser.yaml @@ -0,0 +1,19 @@ +parameter_defaults: + + ## Whether to enable iscsi backend for Cinder. + CinderEnableIscsiBackend: true + CinderISCSIProtocol: 'iser' + CinderISCSIHelper: 'lioadm' + + ## Whether to enable rbd (Ceph) backend for Cinder. + CinderEnableRbdBackend: false + + ## Whether to enable NFS backend for Cinder. + CinderEnableNfsBackend: false + + ## Whether to enable rbd (Ceph) backend for Nova ephemeral storage. + NovaEnableRbdBackend: false + + ## Glance backend can be either 'rbd' (Ceph), 'swift' or 'file'. + ## GlanceBackend: swift + diff --git a/environments/cinder-scaleio-config.yaml b/environments/cinder-scaleio-config.yaml new file mode 100644 index 00000000..cebd619c --- /dev/null +++ b/environments/cinder-scaleio-config.yaml @@ -0,0 +1,35 @@ +# Copyright (c) 2016-2017 Dell Inc, or its subsidiaries. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# A Heat environment file which can be used to enable a +# a Cinder Dell EMC SacleIO backend, configured via puppet +resource_registry: + OS::TripleO::Services::CinderBackendScaleIO: ../puppet/services/cinder-backend-scaleio.yaml + +parameter_defaults: + CinderEnableScaleIOBackend: true + CinderScaleIOBackendName: 'tripleo_scaleio' + CinderScaleIOSanIp: '' + CinderScaleIOSanLogin: '' + CinderScaleIOSanPassword: '' + CinderScaleIORestServerPort: '443' + CinderScaleIOVerifyServerCertificate: false + CinderScaleIOServerCertificatePath: '' + CinderScaleIOProtectionDomainName: 'domain1' + CinderScaleIOStoragePoolName: 'pool1' + CinderScaleIOStoragePools: 'domain1:pool1' + CinderScaleIORoundVolumeCapacity: true + CinderScaleIOUnmapVolumeBeforeDeletion: false + CinderScaleIOMaxOverSubscriptionRatio: '' + CinderScaleIOSanThinProvision: true diff --git a/environments/collectd-environment.yaml b/environments/collectd-environment.yaml new file mode 100644 index 00000000..7780530c --- /dev/null +++ b/environments/collectd-environment.yaml @@ -0,0 +1,23 @@ +resource_registry: + OS::TripleO::Services::Collectd: ../puppet/services/metrics/collectd.yaml + +# parameter_defaults: +# +## You can specify additional plugins to load using the +## CollectdExtraPlugins key: +# +# CollectdExtraPlugins: +# - disk +# - df +# +## You can use ExtraConfig (or one of the related *ExtraConfig keys) +## to configure collectd. See the documentation for puppet-collectd at +## https://github.com/voxpupuli/puppet-collectd for details. +# +# ExtraConfig: +# collectd::plugin::disk::disks: +# - "/^[vhs]d[a-f][0-9]?$/" +# collectd::plugin::df::mountpoints: +# - "/" +# collectd::plugin::df::ignoreselected: false +# collectd::plugin::cpu::valuespercentage: true diff --git a/environments/contrail/contrail-net.yaml b/environments/contrail/contrail-net.yaml new file mode 100644 index 00000000..1e64f91d --- /dev/null +++ b/environments/contrail/contrail-net.yaml @@ -0,0 +1,26 @@ +resource_registry: + OS::TripleO::Compute::Net::SoftwareConfig: contrail-nic-config-compute.yaml + OS::TripleO::Controller::Net::SoftwareConfig: contrail-nic-config.yaml + OS::TripleO::ContrailController::Net::SoftwareConfig: contrail-nic-config.yaml + OS::TripleO::ContrailAnalytics::Net::SoftwareConfig: contrail-nic-config.yaml + OS::TripleO::ContrailAnalyticsDatabase::Net::SoftwareConfig: contrail-nic-config.yaml + OS::TripleO::ContrailTsn::Net::SoftwareConfig: contrail-nic-config-compute.yaml + +parameter_defaults: + ControlPlaneSubnetCidr: '24' + ControlPlaneDefaultRoute: 192.0.2.254 + InternalApiNetCidr: 10.0.0.0/24 + InternalApiAllocationPools: [{'start': '10.0.0.10', 'end': '10.0.0.200'}] + InternalApiDefaultRoute: 10.0.0.1 + ManagementNetCidr: 10.1.0.0/24 + ManagementAllocationPools: [{'start': '10.1.0.10', 'end': '10.1.0.200'}] + ManagementInterfaceDefaultRoute: 10.1.0.1 + ExternalNetCidr: 10.2.0.0/24 + ExternalAllocationPools: [{'start': '10.2.0.10', 'end': '10.2.0.200'}] + EC2MetadataIp: 192.0.2.1 # Generally the IP of the Undercloud + DnsServers: ["8.8.8.8","8.8.4.4"] + VrouterPhysicalInterface: eth1 + VrouterGateway: 10.0.0.1 + VrouterNetmask: 255.255.255.0 + ControlVirtualInterface: eth0 + PublicVirtualInterface: vlan10 diff --git a/environments/contrail/contrail-nic-config-compute.yaml b/environments/contrail/contrail-nic-config-compute.yaml new file mode 100644 index 00000000..3007638a --- /dev/null +++ b/environments/contrail/contrail-nic-config-compute.yaml @@ -0,0 +1,167 @@ +heat_template_version: ocata + +description: > + Software Config to drive os-net-config to configure multiple interfaces + for the compute role. This is an example for a Nova compute node using + Contrail vrouter and the vhost0 interface. + +parameters: + ControlPlaneIp: + default: '' + description: IP address/subnet on the ctlplane network + type: string + ExternalIpSubnet: + default: '' + description: IP address/subnet on the external network + type: string + InternalApiIpSubnet: + default: '' + description: IP address/subnet on the internal API network + type: string + InternalApiDefaultRoute: # Not used by default in this template + default: '10.0.0.1' + description: The default route of the internal api network. + type: string + StorageIpSubnet: + default: '' + description: IP address/subnet on the storage network + type: string + StorageMgmtIpSubnet: + default: '' + description: IP address/subnet on the storage mgmt network + type: string + TenantIpSubnet: + default: '' + description: IP address/subnet on the tenant network + type: string + ManagementIpSubnet: # Only populated when including environments/network-management.yaml + default: '' + description: IP address/subnet on the management network + type: string + ExternalNetworkVlanID: + default: 10 + description: Vlan ID for the external network traffic. + type: number + InternalApiNetworkVlanID: + default: 20 + description: Vlan ID for the internal_api network traffic. + type: number + StorageNetworkVlanID: + default: 30 + description: Vlan ID for the storage network traffic. + type: number + StorageMgmtNetworkVlanID: + default: 40 + description: Vlan ID for the storage mgmt network traffic. + type: number + TenantNetworkVlanID: + default: 50 + description: Vlan ID for the tenant network traffic. + type: number + ManagementNetworkVlanID: + default: 60 + description: Vlan ID for the management network traffic. + type: number + ControlPlaneSubnetCidr: # Override this via parameter_defaults + default: '24' + description: The subnet CIDR of the control plane network. + type: string + ControlPlaneDefaultRoute: # Override this via parameter_defaults + description: The default route of the control plane network. + type: string + ExternalInterfaceDefaultRoute: # Not used by default in this template + default: '10.0.0.1' + description: The default route of the external network. + type: string + ManagementInterfaceDefaultRoute: # Commented out by default in this template + default: unset + description: The default route of the management network. + type: string + DnsServers: # Override this via parameter_defaults + default: [] + description: A list of DNS servers (2 max for some implementations) that will be added to resolv.conf. + type: comma_delimited_list + EC2MetadataIp: # Override this via parameter_defaults + description: The IP address of the EC2 metadata server. + type: string + +resources: + OsNetConfigImpl: + type: OS::Heat::SoftwareConfig + properties: + group: script + config: + str_replace: + template: + get_file: ../../scripts/run-os-net-config.sh + params: + $network_config: + network_config: + - type: interface + name: nic1 + use_dhcp: false + dns_servers: + get_param: DnsServers + addresses: + - ip_netmask: + list_join: + - '/' + - - get_param: ControlPlaneIp + - get_param: ControlPlaneSubnetCidr + routes: + - ip_netmask: 169.254.169.254/32 + next_hop: + get_param: EC2MetadataIp + - type: interface + name: nic2 + use_dhcp: false + - type: interface + name: vhost0 + use_dhcp: false + addresses: + - ip_netmask: + get_param: InternalApiIpSubnet + routes: + - default: true + next_hop: + get_param: InternalApiDefaultRoute + - type: linux_bridge + name: br0 + use_dhcp: false + members: + - type: interface + name: nic3 + - type: vlan + vlan_id: + get_param: ManagementNetworkVlanID + device: br0 + addresses: + - ip_netmask: + get_param: ManagementIpSubnet + - type: vlan + vlan_id: + get_param: ExternalNetworkVlanID + device: br0 + addresses: + - ip_netmask: + get_param: ExternalIpSubnet + - type: vlan + vlan_id: + get_param: StorageNetworkVlanID + device: br0 + addresses: + - ip_netmask: + get_param: StorageIpSubnet + - type: vlan + vlan_id: + get_param: StorageMgmtNetworkVlanID + device: br0 + addresses: + - ip_netmask: + get_param: StorageMgmtIpSubnet + +outputs: + OS::stack_id: + description: The OsNetConfigImpl resource. + value: + get_resource: OsNetConfigImpl diff --git a/environments/contrail/contrail-services.yaml b/environments/contrail/contrail-services.yaml new file mode 100644 index 00000000..80ef9d3a --- /dev/null +++ b/environments/contrail/contrail-services.yaml @@ -0,0 +1,45 @@ +# A Heat environment file which can be used to enable OpenContrail +# # extensions, configured via puppet +resource_registry: + OS::TripleO::Services::NeutronDhcpAgent: OS::Heat::None + OS::TripleO::Services::NeutronL3Agent: OS::Heat::None + OS::TripleO::Services::NeutronMetadataAgent: OS::Heat::None + OS::TripleO::Services::NeutronOvsAgent: OS::Heat::None + OS::TripleO::Services::ComputeNeutronOvsAgent: OS::Heat::None + OS::TripleO::Services::NeutronCorePlugin: OS::TripleO::Services::NeutronCorePluginContrail + OS::TripleO::Services::ComputeNeutronCorePlugin: OS::TripleO::Services::ComputeNeutronCorePluginContrail + OS::TripleO::NodeUserData: ../../firstboot/install_vrouter_kmod.yaml + OS::TripleO::Services::ContrailHeat: ../../puppet/services/network/contrail-heat.yaml + OS::TripleO::Services::ContrailAnalytics: ../../puppet/services/network/contrail-analytics.yaml + OS::TripleO::Services::ContrailAnalyticsDatabase: ../../puppet/services/network/contrail-analytics-database.yaml + OS::TripleO::Services::ContrailConfig: ../../puppet/services/network/contrail-config.yaml + OS::TripleO::Services::ContrailControl: ../../puppet/services/network/contrail-control.yaml + OS::TripleO::Services::ContrailDatabase: ../../puppet/services/network/contrail-database.yaml + OS::TripleO::Services::ContrailWebUI: ../../puppet/services/network/contrail-webui.yaml + OS::TripleO::Services::ContrailTsn: ../../puppet/services/network/contrail-tsn.yaml + OS::TripleO::Services::ComputeNeutronCorePluginContrail: ../../puppet/services/network/contrail-vrouter.yaml + OS::TripleO::Services::NeutronCorePluginContrail: ../../puppet/services/network/contrail-neutron-plugin.yaml +parameter_defaults: + ContrailRepo: http://192.168.24.1/contrail-3.2.0.0-19 + EnablePackageInstall: true +# ContrailConfigIfmapUserName: api-server +# ContrailConfigIfmapUserPassword: api-server + OvercloudControlFlavor: control + OvercloudContrailControllerFlavor: contrail-controller + OvercloudContrailAnalyticsFlavor: contrail-analytics + OvercloudContrailAnalyticsDatabaseFlavor: contrail-analytics-database + OvercloudContrailTsnFlavor: contrail-tsn + OvercloudComputeFlavor: compute + ControllerCount: 3 + ContrailControllerCount: 3 + ContrailAnalyticsCount: 3 + ContrailAnalyticsDatabaseCount: 3 + ContrailTsnCount: 1 + ComputeCount: 3 + DnsServers: ["8.8.8.8","8.8.4.4"] + NtpServer: 10.0.0.1 + NeutronCorePlugin: neutron_plugin_contrail.plugins.opencontrail.contrail_plugin.NeutronPluginContrailCoreV2 + NeutronServicePlugins: '' + NeutronTunnelTypes: '' +# NeutronMetadataProxySharedSecret: +# ContrailControlRNDCSecret: # sda1/256 hmac key, e.g. echo -n "values" | openssl dgst -sha256 -hmac key -binary | base64 diff --git a/environments/contrail/roles_data_contrail.yaml b/environments/contrail/roles_data_contrail.yaml new file mode 100644 index 00000000..5f6c4691 --- /dev/null +++ b/environments/contrail/roles_data_contrail.yaml @@ -0,0 +1,237 @@ +# Specifies which roles (groups of nodes) will be deployed +# Note this is used as an input to the various *.j2.yaml +# jinja2 templates, so that they are converted into *.yaml +# during the plan creation (via a mistral action/workflow). +# +# The format is a list, with the following format: +# +# * name: (string) mandatory, name of the role, must be unique +# +# CountDefault: (number) optional, default number of nodes, defaults to 0 +# sets the default for the {{role.name}}Count parameter in overcloud.yaml +# +# HostnameFormatDefault: (string) optional default format string for hostname +# defaults to '%stackname%-{{role.name.lower()}}-%index%' +# sets the default for {{role.name}}HostnameFormat parameter in overcloud.yaml +# +# disable_constraints: (boolean) optional, whether to disable Nova and Glance +# constraints for each role specified in the templates. +# +# upgrade_batch_size: (number): batch size for upgrades where tasks are +# specified by services to run in batches vs all nodes at once. +# This defaults to 1, but larger batches may be specified here. +# +# ServicesDefault: (list) optional default list of services to be deployed +# on the role, defaults to an empty list. Sets the default for the +# {{role.name}}Services parameter in overcloud.yaml + +- name: Controller # the 'primary' role goes first + CountDefault: 1 + ServicesDefault: + - OS::TripleO::Services::CACerts + - OS::TripleO::Services::CephMds + - OS::TripleO::Services::CephMon + - OS::TripleO::Services::CephExternal + - OS::TripleO::Services::CephRbdMirror + - OS::TripleO::Services::CephRgw + - OS::TripleO::Services::CinderApi + - OS::TripleO::Services::CinderBackup + - OS::TripleO::Services::CinderScheduler + - OS::TripleO::Services::CinderVolume + - OS::TripleO::Services::ContrailHeat + - OS::TripleO::Services::Kernel + - OS::TripleO::Services::Keystone + - OS::TripleO::Services::GlanceApi + - OS::TripleO::Services::HeatApi + - OS::TripleO::Services::HeatApiCfn + - OS::TripleO::Services::HeatApiCloudwatch + - OS::TripleO::Services::HeatEngine + - OS::TripleO::Services::MySQL + - OS::TripleO::Services::NeutronApi + - OS::TripleO::Services::NeutronCorePlugin + - OS::TripleO::Services::RabbitMQ + - OS::TripleO::Services::HAproxy + - OS::TripleO::Services::Keepalived + - OS::TripleO::Services::Memcached + - OS::TripleO::Services::Pacemaker + - OS::TripleO::Services::Redis + - OS::TripleO::Services::NovaConductor + - OS::TripleO::Services::MongoDb + - OS::TripleO::Services::NovaApi + - OS::TripleO::Services::NovaPlacement + - OS::TripleO::Services::NovaMetadata + - OS::TripleO::Services::NovaScheduler + - OS::TripleO::Services::NovaConsoleauth + - OS::TripleO::Services::NovaVncProxy + - OS::TripleO::Services::Ec2Api + - OS::TripleO::Services::Ntp + - OS::TripleO::Services::SwiftProxy + - OS::TripleO::Services::SwiftStorage + - OS::TripleO::Services::SwiftRingBuilder + - OS::TripleO::Services::Snmp + - OS::TripleO::Services::Sshd + - OS::TripleO::Services::Timezone + - OS::TripleO::Services::CeilometerApi + - OS::TripleO::Services::CeilometerCollector + - OS::TripleO::Services::CeilometerExpirer + - OS::TripleO::Services::CeilometerAgentCentral + - OS::TripleO::Services::CeilometerAgentNotification + - OS::TripleO::Services::Horizon + - OS::TripleO::Services::GnocchiApi + - OS::TripleO::Services::GnocchiMetricd + - OS::TripleO::Services::GnocchiStatsd + - OS::TripleO::Services::ManilaApi + - OS::TripleO::Services::ManilaScheduler + - OS::TripleO::Services::ManilaBackendGeneric + - OS::TripleO::Services::ManilaBackendNetapp + - OS::TripleO::Services::ManilaBackendCephFs + - OS::TripleO::Services::ManilaShare + - OS::TripleO::Services::AodhApi + - OS::TripleO::Services::AodhEvaluator + - OS::TripleO::Services::AodhNotifier + - OS::TripleO::Services::AodhListener + - OS::TripleO::Services::SaharaApi + - OS::TripleO::Services::SaharaEngine + - OS::TripleO::Services::IronicApi + - OS::TripleO::Services::IronicConductor + - OS::TripleO::Services::NovaIronic + - OS::TripleO::Services::TripleoPackages + - OS::TripleO::Services::TripleoFirewall + - OS::TripleO::Services::OpenDaylightApi + - OS::TripleO::Services::OpenDaylightOvs + - OS::TripleO::Services::SensuClient + - OS::TripleO::Services::FluentdClient + - OS::TripleO::Services::BarbicanApi + - OS::TripleO::Services::PankoApi + - OS::TripleO::Services::Zaqar + - OS::TripleO::Services::OVNDBs + - OS::TripleO::Services::CinderHPELeftHandISCSI + - OS::TripleO::Services::Etcd + - OS::TripleO::Services::AuditD + +- name: Compute + CountDefault: 1 + HostnameFormatDefault: '%stackname%-novacompute-%index%' + disable_upgrade_deployment: True + ServicesDefault: + - OS::TripleO::Services::CACerts + - OS::TripleO::Services::CephClient + - OS::TripleO::Services::CephExternal + - OS::TripleO::Services::Timezone + - OS::TripleO::Services::Ntp + - OS::TripleO::Services::Snmp + - OS::TripleO::Services::Sshd + - OS::TripleO::Services::NovaCompute + - OS::TripleO::Services::NovaLibvirt + - OS::TripleO::Services::Kernel + - OS::TripleO::Services::ComputeNeutronCorePlugin + - OS::TripleO::Services::ComputeNeutronOvsAgent + - OS::TripleO::Services::ComputeCeilometerAgent + - OS::TripleO::Services::ComputeNeutronL3Agent + - OS::TripleO::Services::ComputeNeutronMetadataAgent + - OS::TripleO::Services::TripleoPackages + - OS::TripleO::Services::TripleoFirewall + - OS::TripleO::Services::NeutronSriovAgent + - OS::TripleO::Services::OpenDaylightOvs + - OS::TripleO::Services::SensuClient + - OS::TripleO::Services::FluentdClient + - OS::TripleO::Services::AuditD + +- name: BlockStorage + ServicesDefault: + - OS::TripleO::Services::CACerts + - OS::TripleO::Services::BlockStorageCinderVolume + - OS::TripleO::Services::Kernel + - OS::TripleO::Services::Ntp + - OS::TripleO::Services::Timezone + - OS::TripleO::Services::Snmp + - OS::TripleO::Services::Sshd + - OS::TripleO::Services::TripleoPackages + - OS::TripleO::Services::TripleoFirewall + - OS::TripleO::Services::SensuClient + - OS::TripleO::Services::FluentdClient + - OS::TripleO::Services::AuditD + +- name: ObjectStorage + disable_upgrade_deployment: True + ServicesDefault: + - OS::TripleO::Services::CACerts + - OS::TripleO::Services::Kernel + - OS::TripleO::Services::Ntp + - OS::TripleO::Services::SwiftStorage + - OS::TripleO::Services::SwiftRingBuilder + - OS::TripleO::Services::Snmp + - OS::TripleO::Services::Sshd + - OS::TripleO::Services::Timezone + - OS::TripleO::Services::TripleoPackages + - OS::TripleO::Services::TripleoFirewall + - OS::TripleO::Services::SensuClient + - OS::TripleO::Services::FluentdClient + - OS::TripleO::Services::AuditD + +- name: CephStorage + disable_upgrade_deployment: True + ServicesDefault: + - OS::TripleO::Services::CACerts + - OS::TripleO::Services::CephOSD + - OS::TripleO::Services::Kernel + - OS::TripleO::Services::Ntp + - OS::TripleO::Services::Snmp + - OS::TripleO::Services::Sshd + - OS::TripleO::Services::Timezone + - OS::TripleO::Services::TripleoPackages + - OS::TripleO::Services::TripleoFirewall + - OS::TripleO::Services::SensuClient + - OS::TripleO::Services::FluentdClient + - OS::TripleO::Services::AuditD + +- name: ContrailController + ServicesDefault: + - OS::TripleO::Services::CACerts + - OS::TripleO::Services::ContrailConfig + - OS::TripleO::Services::ContrailControl + - OS::TripleO::Services::ContrailDatabase + - OS::TripleO::Services::ContrailWebUI + - OS::TripleO::Services::Kernel + - OS::TripleO::Services::Ntp + - OS::TripleO::Services::Timezone + - OS::TripleO::Services::Snmp + - OS::TripleO::Services::TripleoPackages + - OS::TripleO::Services::SensuClient + - OS::TripleO::Services::FluentdClient + +- name: ContrailAnalytics + ServicesDefault: + - OS::TripleO::Services::CACerts + - OS::TripleO::Services::ContrailAnalytics + - OS::TripleO::Services::Kernel + - OS::TripleO::Services::Ntp + - OS::TripleO::Services::Timezone + - OS::TripleO::Services::Snmp + - OS::TripleO::Services::TripleoPackages + - OS::TripleO::Services::SensuClient + - OS::TripleO::Services::FluentdClient + +- name: ContrailAnalyticsDatabase + ServicesDefault: + - OS::TripleO::Services::CACerts + - OS::TripleO::Services::ContrailAnalyticsDatabase + - OS::TripleO::Services::Kernel + - OS::TripleO::Services::Ntp + - OS::TripleO::Services::Timezone + - OS::TripleO::Services::Snmp + - OS::TripleO::Services::TripleoPackages + - OS::TripleO::Services::SensuClient + - OS::TripleO::Services::FluentdClient + +- name: ContrailTsn + ServicesDefault: + - OS::TripleO::Services::CACerts + - OS::TripleO::Services::ContrailTsn + - OS::TripleO::Services::Kernel + - OS::TripleO::Services::Ntp + - OS::TripleO::Services::Timezone + - OS::TripleO::Services::Snmp + - OS::TripleO::Services::TripleoPackages + - OS::TripleO::Services::SensuClient + - OS::TripleO::Services::FluentdClient diff --git a/environments/deployed-server-bootstrap-environment-centos.yaml b/environments/deployed-server-bootstrap-environment-centos.yaml new file mode 100644 index 00000000..ebcdfc2b --- /dev/null +++ b/environments/deployed-server-bootstrap-environment-centos.yaml @@ -0,0 +1,7 @@ +# An environment that can be used with the deployed-server.yaml template to do +# initial bootstrapping of the deployed servers. +resource_registry: + OS::TripleO::DeployedServer::Bootstrap: ../deployed-server/deployed-server-bootstrap-centos.yaml + +parameter_defaults: + EnablePackageInstall: True diff --git a/environments/deployed-server-bootstrap-environment-rhel.yaml b/environments/deployed-server-bootstrap-environment-rhel.yaml new file mode 100644 index 00000000..f614a91a --- /dev/null +++ b/environments/deployed-server-bootstrap-environment-rhel.yaml @@ -0,0 +1,7 @@ +# An environment that can be used with the deployed-server.yaml template to do +# initial bootstrapping of the deployed servers. +resource_registry: + OS::TripleO::DeployedServer::Bootstrap: ../deployed-server/deployed-server-bootstrap-rhel.yaml + +parameter_defaults: + EnablePackageInstall: True diff --git a/environments/deployed-server-environment.yaml b/environments/deployed-server-environment.yaml index 7a6639f9..7bc1bd9b 100644 --- a/environments/deployed-server-environment.yaml +++ b/environments/deployed-server-environment.yaml @@ -1,3 +1,4 @@ resource_registry: OS::TripleO::Server: ../deployed-server/deployed-server.yaml OS::TripleO::DeployedServer::ControlPlanePort: OS::Neutron::Port + OS::TripleO::DeployedServer::Bootstrap: OS::Heat::None diff --git a/environments/deployed-server-noop-ctlplane.yaml b/environments/deployed-server-noop-ctlplane.yaml index 54f5e41d..8835d5b1 100644 --- a/environments/deployed-server-noop-ctlplane.yaml +++ b/environments/deployed-server-noop-ctlplane.yaml @@ -1,3 +1,4 @@ resource_registry: + OS::TripleO::DeployedServer::Bootstrap: OS::Heat::None OS::TripleO::Server: ../deployed-server/deployed-server.yaml OS::TripleO::DeployedServer::ControlPlanePort: ../deployed-server/deployed-neutron-port.yaml diff --git a/environments/deployed-server-pacemaker-environment.yaml b/environments/deployed-server-pacemaker-environment.yaml new file mode 100644 index 00000000..85fa7d2f --- /dev/null +++ b/environments/deployed-server-pacemaker-environment.yaml @@ -0,0 +1,4 @@ +resource_registry: + OS::TripleO::Tasks::ControllerDeployedServerPrePuppet: ../extraconfig/tasks/pre_puppet_pacemaker.yaml + OS::TripleO::Tasks::ControllerDeployedServerPostPuppet: ../extraconfig/tasks/post_puppet_pacemaker.yaml + OS::TripleO::Tasks::ControllerDeployedServerPostPuppetRestart: ../extraconfig/tasks/post_puppet_pacemaker_restart.yaml diff --git a/environments/docker-network-isolation.yaml b/environments/docker-network-isolation.yaml deleted file mode 100644 index 87c81d0b..00000000 --- a/environments/docker-network-isolation.yaml +++ /dev/null @@ -1,4 +0,0 @@ -parameter_defaults: - NeutronOpenvswitchAgentConfig: "/etc/neutron/neutron.conf,/etc/neutron/plugins/openvswitch/openvswitch_agent.ini" - NeutronOpenvswitchAgentPluginVolume: "/var/lib/etc-data/neutron/plugins/ml2/openvswitch_agent.ini:/var/lib/kolla/config_files/openvswitch_agent.ini:ro" - NeutronOpenvswitchAgentOvsVolume: "/var/lib/etc-data/neutron/conf.d/neutron-openvswitch-agent:/etc/neutron/conf.d/neutron-openvswitch-agent:ro" diff --git a/environments/docker.yaml b/environments/docker.yaml index 4f5b36b4..3696f908 100644 --- a/environments/docker.yaml +++ b/environments/docker.yaml @@ -1,28 +1,55 @@ resource_registry: - # Docker container with heat agents for containerized compute node. - OS::TripleO::Compute::NodeUserData: ../docker/firstboot/install_docker_agents.yaml + # This can be used when you don't want to run puppet on the host, + # e.g atomic, but it has been replaced with OS::TripleO::Services::Docker + # OS::TripleO::NodeUserData: ../docker/firstboot/setup_docker_host.yaml + OS::TripleO::Services::Docker: ../puppet/services/docker.yaml + + #NOTE (dprince) add roles to be docker enabled as we support them OS::TripleO::Services::NovaLibvirt: ../docker/services/nova-libvirt.yaml OS::TripleO::Services::ComputeNeutronOvsAgent: ../docker/services/neutron-ovs-agent.yaml OS::TripleO::Services::NovaCompute: ../docker/services/nova-compute.yaml - # NOTE (dprince) here we set new roles to be docker enabled as we add support - #OS::TripleO::ComputePostDeploySteps: ../docker/post.yaml - # NOTE (mandre) Defining per role post deploy steps doesn't work yet - # Set a global PostDeploySteps that works for both containerized and - # non-containerized roles + OS::TripleO::Services::Keystone: ../docker/services/keystone.yaml + OS::TripleO::Services::GlanceApi: ../docker/services/glance-api.yaml + OS::TripleO::Services::HeatApi: ../docker/services/heat-api.yaml + OS::TripleO::Services::HeatApiCfn: ../docker/services/heat-api-cfn.yaml + OS::TripleO::Services::HeatEngine: ../docker/services/heat-engine.yaml + OS::TripleO::Services::NovaApi: ../docker/services/nova-api.yaml + OS::TripleO::Services::NovaPlacement: ../docker/services/nova-placement.yaml + OS::TripleO::Services::NovaConductor: ../docker/services/nova-conductor.yaml + OS::TripleO::Services::NovaScheduler: ../docker/services/nova-scheduler.yaml + # FIXME: these need to go into a environments/services-docker dir? + OS::TripleO::Services::NovaIronic: ../docker/services/nova-ironic.yaml + OS::TripleO::Services::IronicApi: ../docker/services/ironic-api.yaml + OS::TripleO::Services::IronicConductor: ../docker/services/ironic-conductor.yaml + OS::TripleO::Services::IronicPxe: ../docker/services/ironic-pxe.yaml + OS::TripleO::Services::NeutronServer: ../docker/services/neutron-api.yaml + OS::TripleO::Services::NeutronApi: ../docker/services/neutron-api.yaml + OS::TripleO::Services::NeutronCorePlugin: ../docker/services/neutron-plugin-ml2.yaml + OS::TripleO::Services::NeutronOvsAgent: ../docker/services/neutron-ovs-agent.yaml + OS::TripleO::Services::NeutronDhcpAgent: ../docker/services/neutron-dhcp.yaml + OS::TripleO::Services::NeutronL3Agent: ../docker/services/neutron-l3.yaml + OS::TripleO::Services::MySQL: ../docker/services/database/mysql.yaml + OS::TripleO::Services::MistralApi: ../docker/services/mistral-api.yaml + OS::TripleO::Services::MistralEngine: ../docker/services/mistral-engine.yaml + OS::TripleO::Services::MistralExecutor: ../docker/services/mistral-executor.yaml + OS::TripleO::Services::Zaqar: ../docker/services/zaqar.yaml + OS::TripleO::Services::RabbitMQ: ../docker/services/rabbitmq.yaml + OS::TripleO::Services::MongoDb: ../docker/services/database/mongodb.yaml + OS::TripleO::Services::Memcached: ../docker/services/memcached.yaml + OS::TripleO::Services::SwiftProxy: ../docker/services/swift-proxy.yaml + OS::TripleO::Services::SwiftStorage: ../docker/services/swift-storage.yaml + OS::TripleO::Services::SwiftRingBuilder: ../docker/services/swift-ringbuilder.yaml + OS::TripleO::PostDeploySteps: ../docker/post.yaml + OS::TripleO::PostUpgradeSteps: ../docker/post-upgrade.yaml + OS::TripleO::Services: ../docker/services/services.yaml parameter_defaults: # Defaults to 'tripleoupstream'. Specify a local docker registry - # Example: 192.0.2.1:8787/tripleoupstream + # Example: 192.168.24.1:8787/tripleoupstream DockerNamespace: tripleoupstream - # Enable local Docker registry DockerNamespaceIsRegistry: false - DockerAgentImage: heat-docker-agents:newton - # Docker containers - DockerNovaComputeImage: centos-binary-nova-compute:newton - DockerLibvirtImage: centos-binary-nova-libvirt:newton - DockerOpenvswitchImage: centos-binary-neutron-openvswitch-agent:newton ComputeServices: - OS::TripleO::Services::NovaCompute diff --git a/environments/enable-internal-tls.yaml b/environments/enable-internal-tls.yaml index 6e912faa..ff4ecfbe 100644 --- a/environments/enable-internal-tls.yaml +++ b/environments/enable-internal-tls.yaml @@ -2,7 +2,18 @@ # a TLS for in the internal network via certmonger parameter_defaults: EnableInternalTLS: true + + # Required for novajoin to enroll the overcloud nodes + ServerMetadata: + ipa_enroll: True + resource_registry: OS::TripleO::Services::HAProxyInternalTLS: ../puppet/services/haproxy-internal-tls-certmonger.yaml OS::TripleO::Services::ApacheTLS: ../puppet/services/apache-internal-tls-certmonger.yaml OS::TripleO::Services::MySQLTLS: ../puppet/services/database/mysql-internal-tls-certmonger.yaml + # We use apache as a TLS proxy + OS::TripleO::Services::TLSProxyBase: ../puppet/services/apache.yaml + + # Creates nova metadata that will create the extra service principals per + # node. + OS::TripleO::ServiceServerMetadataHook: ../extraconfig/nova_metadata/krb-service-principals.yaml diff --git a/environments/enable-swap-partition.yaml b/environments/enable-swap-partition.yaml new file mode 100644 index 00000000..71b70ec9 --- /dev/null +++ b/environments/enable-swap-partition.yaml @@ -0,0 +1,3 @@ +# Use this environment to create a swap partition in all Overcloud nodes +resource_registry: + OS::TripleO::AllNodesExtraConfig: ../extraconfig/all_nodes/swap-partition.yaml diff --git a/environments/enable-swap.yaml b/environments/enable-swap.yaml new file mode 100644 index 00000000..9ba08642 --- /dev/null +++ b/environments/enable-swap.yaml @@ -0,0 +1,3 @@ +# Use this environment to create a swap file in all Overcloud nodes +resource_registry: + OS::TripleO::AllNodesExtraConfig: ../extraconfig/all_nodes/swap.yaml diff --git a/environments/enable_congress.yaml b/environments/enable_congress.yaml new file mode 100644 index 00000000..1eea7f5e --- /dev/null +++ b/environments/enable_congress.yaml @@ -0,0 +1,2 @@ +resource_registry: + OS::TripleO::Services::Congress: ../puppet/services/congress.yaml diff --git a/environments/enable_tacker.yaml b/environments/enable_tacker.yaml new file mode 100644 index 00000000..1f9eca01 --- /dev/null +++ b/environments/enable_tacker.yaml @@ -0,0 +1,2 @@ +resource_registry: + OS::TripleO::Services::Tacker: ../puppet/services/tacker.yaml diff --git a/environments/horizon_password_validation.yaml b/environments/horizon_password_validation.yaml new file mode 100644 index 00000000..1a0f92cc --- /dev/null +++ b/environments/horizon_password_validation.yaml @@ -0,0 +1,5 @@ +# Use this enviroment to pass in validation regex for horizons password +# validation checks +parameter_defaults: + HorizonPasswordValidator: '.*' + HorizonPasswordValidatorHelp: 'Your password does not meet the requirements.' diff --git a/environments/host-config-pre-network.j2.yaml b/environments/host-config-pre-network.j2.yaml new file mode 100644 index 00000000..c79e28b4 --- /dev/null +++ b/environments/host-config-pre-network.j2.yaml @@ -0,0 +1,16 @@ +resource_registry: +# Create the registry only for roles with the word "Compute" in it. Like ComputeOvsDpdk, ComputeSriov, etc., +{%- for role in roles -%} +{% if "Compute" in role.name %} + OS::TripleO::{{role.name}}::PreNetworkConfig: ../extraconfig/pre_network/{{role.name.lower()}}-host_config_and_reboot.yaml +{%- endif -%} +{% endfor %} + +#parameter_defaults: + # Sample parameters for Compute and ComputeOvsDpdk roles + #ComputeKernelArgs: "" + #ComputeTunedProfileName: "" + #ComputeHostCpuList: "" + #ComputeOvsDpdkKernelArgs: "" + #ComputeOvsDpdkTunedProfileName: "" + #ComputeOvsDpdkHostCpuList: "" diff --git a/environments/hyperconverged-ceph.yaml b/environments/hyperconverged-ceph.yaml index 77fa5a49..f59b0414 100644 --- a/environments/hyperconverged-ceph.yaml +++ b/environments/hyperconverged-ceph.yaml @@ -11,6 +11,7 @@ parameter_defaults: - OS::TripleO::Services::Timezone - OS::TripleO::Services::Ntp - OS::TripleO::Services::Snmp + - OS::TripleO::Services::Sshd - OS::TripleO::Services::NovaCompute - OS::TripleO::Services::NovaLibvirt - OS::TripleO::Services::Kernel @@ -25,4 +26,8 @@ parameter_defaults: - OS::TripleO::Services::OpenDaylightOvs - OS::TripleO::Services::SensuClient - OS::TripleO::Services::FluentdClient + - OS::TripleO::Services::AuditD + - OS::TripleO::Services::Collectd - OS::TripleO::Services::CephOSD + - OS::TripleO::Services::Vpp + - OS::TripleO::Services::MySQLClient diff --git a/environments/low-memory-usage.yaml b/environments/low-memory-usage.yaml index 47b2003d..3a606336 100644 --- a/environments/low-memory-usage.yaml +++ b/environments/low-memory-usage.yaml @@ -11,8 +11,8 @@ parameter_defaults: SwiftWorkers: 1 GnocchiMetricdWorkers: 1 - ApacheMaxRequestWorkers: 32 - ApacheServerLimit: 32 + ApacheMaxRequestWorkers: 100 + ApacheServerLimit: 100 ControllerExtraConfig: 'nova::network::neutron::neutron_url_timeout': '60' diff --git a/environments/major-upgrade-all-in-one.yaml b/environments/major-upgrade-all-in-one.yaml index 69d72edd..4283b212 100644 --- a/environments/major-upgrade-all-in-one.yaml +++ b/environments/major-upgrade-all-in-one.yaml @@ -1,8 +1,2 @@ -# We run the upgrade steps without disabling the OS::TripleO::PostDeploySteps -# this means you can do a major upgrade in one pass, which may be useful -# e.g for all-in-one deployments where we can upgrade the compute services -# at the same time as the controlplane -# Note that it will be necessary to pass a mapping of OS::Heat::None again for -# any subsequent updates, or the upgrade steps will run again. resource_registry: - OS::TripleO::UpgradeSteps: ../puppet/major_upgrade_steps.yaml + OS::TripleO::PostDeploySteps: ../puppet/major_upgrade_steps.yaml diff --git a/environments/major-upgrade-composable-steps-docker.yaml b/environments/major-upgrade-composable-steps-docker.yaml new file mode 100644 index 00000000..5fa2f2d8 --- /dev/null +++ b/environments/major-upgrade-composable-steps-docker.yaml @@ -0,0 +1,10 @@ +resource_registry: + # FIXME(shardy) do we need to break major_upgrade_steps.yaml apart to + # enable docker specific logic, or is just overridding PostUpgradeSteps + # enough (as we want to share the ansible tasks steps etc) + OS::TripleO::PostDeploySteps: ../puppet/major_upgrade_steps.yaml +parameter_defaults: + UpgradeLevelNovaCompute: auto + UpgradeInitCommonCommand: | + #!/bin/bash + # Ocata to Pike, put any needed host-level workarounds here diff --git a/environments/major-upgrade-composable-steps.yaml b/environments/major-upgrade-composable-steps.yaml index 7e10014b..9ecc2251 100644 --- a/environments/major-upgrade-composable-steps.yaml +++ b/environments/major-upgrade-composable-steps.yaml @@ -1,3 +1,15 @@ resource_registry: - OS::TripleO::UpgradeSteps: ../puppet/major_upgrade_steps.yaml - OS::TripleO::PostDeploySteps: OS::Heat::None + OS::TripleO::PostDeploySteps: ../puppet/major_upgrade_steps.yaml +parameter_defaults: + UpgradeLevelNovaCompute: auto + UpgradeInitCommonCommand: | + #!/bin/bash + # Newton to Ocata, we need to remove old hiera hook data and + # install ansible heat agents and ansible-pacemaker + set -eu + yum install -y openstack-heat-agents + yum install -y python-heat-agent-* + yum install -y ansible-pacemaker + rm -f /usr/libexec/os-apply-config/templates/etc/puppet/hiera.yaml + rm -f /usr/libexec/os-refresh-config/configure.d/40-hiera-datafiles + rm -f /etc/puppet/hieradata/*.yaml diff --git a/environments/major-upgrade-converge-docker.yaml b/environments/major-upgrade-converge-docker.yaml new file mode 100644 index 00000000..463206f1 --- /dev/null +++ b/environments/major-upgrade-converge-docker.yaml @@ -0,0 +1,7 @@ +# Use this to reset any mappings only used for upgrades after the +# update of all nodes is completed +resource_registry: + OS::TripleO::PostDeploySteps: ../docker/post.yaml +parameter_defaults: + UpgradeLevelNovaCompute: '' + UpgradeInitCommonCommand: '' diff --git a/environments/major-upgrade-converge.yaml b/environments/major-upgrade-converge.yaml new file mode 100644 index 00000000..f09fb20e --- /dev/null +++ b/environments/major-upgrade-converge.yaml @@ -0,0 +1,7 @@ +# Use this to reset any mappings only used for upgrades after the +# update of all nodes is completed +resource_registry: + OS::TripleO::PostDeploySteps: ../puppet/post.yaml +parameter_defaults: + UpgradeLevelNovaCompute: '' + UpgradeInitCommonCommand: '' diff --git a/environments/net-bond-with-vlans-no-external.yaml b/environments/net-bond-with-vlans-no-external.yaml index 75959a0b..cc27d4f0 100644 --- a/environments/net-bond-with-vlans-no-external.yaml +++ b/environments/net-bond-with-vlans-no-external.yaml @@ -20,7 +20,3 @@ resource_registry: OS::TripleO::Controller::Net::SoftwareConfig: ../network/config/bond-with-vlans/controller-no-external.yaml OS::TripleO::ObjectStorage::Net::SoftwareConfig: ../network/config/bond-with-vlans/swift-storage.yaml OS::TripleO::CephStorage::Net::SoftwareConfig: ../network/config/bond-with-vlans/ceph-storage.yaml - -# NOTE: with no external interface we should be able to use the -# default Neutron l3_agent.ini setting for the external bridge (br-ex) -# i.e. No need to set: NeutronExternalNetworkBridge: "''" diff --git a/environments/net-bond-with-vlans-v6.yaml b/environments/net-bond-with-vlans-v6.yaml index 73dda3d9..dc6fdfe3 100644 --- a/environments/net-bond-with-vlans-v6.yaml +++ b/environments/net-bond-with-vlans-v6.yaml @@ -12,9 +12,3 @@ resource_registry: OS::TripleO::Controller::Net::SoftwareConfig: ../network/config/bond-with-vlans/controller-v6.yaml OS::TripleO::ObjectStorage::Net::SoftwareConfig: ../network/config/bond-with-vlans/swift-storage.yaml OS::TripleO::CephStorage::Net::SoftwareConfig: ../network/config/bond-with-vlans/ceph-storage.yaml - -parameter_defaults: - # This sets 'external_network_bridge' in l3_agent.ini to an empty string - # so that external networks act like provider bridge networks (they - # will plug into br-int instead of br-ex) - NeutronExternalNetworkBridge: "''" diff --git a/environments/net-bond-with-vlans.yaml b/environments/net-bond-with-vlans.yaml index de8f8f74..38c31cac 100644 --- a/environments/net-bond-with-vlans.yaml +++ b/environments/net-bond-with-vlans.yaml @@ -11,9 +11,3 @@ resource_registry: OS::TripleO::Controller::Net::SoftwareConfig: ../network/config/bond-with-vlans/controller.yaml OS::TripleO::ObjectStorage::Net::SoftwareConfig: ../network/config/bond-with-vlans/swift-storage.yaml OS::TripleO::CephStorage::Net::SoftwareConfig: ../network/config/bond-with-vlans/ceph-storage.yaml - -parameter_defaults: - # This sets 'external_network_bridge' in l3_agent.ini to an empty string - # so that external networks act like provider bridge networks (they - # will plug into br-int instead of br-ex) - NeutronExternalNetworkBridge: "''" diff --git a/environments/net-single-nic-linux-bridge-with-vlans.yaml b/environments/net-single-nic-linux-bridge-with-vlans.yaml index fd80bb9b..f34cfb92 100644 --- a/environments/net-single-nic-linux-bridge-with-vlans.yaml +++ b/environments/net-single-nic-linux-bridge-with-vlans.yaml @@ -11,9 +11,3 @@ resource_registry: OS::TripleO::Controller::Net::SoftwareConfig: ../network/config/single-nic-linux-bridge-vlans/controller.yaml OS::TripleO::ObjectStorage::Net::SoftwareConfig: ../network/config/single-nic-linux-bridge-vlans/swift-storage.yaml OS::TripleO::CephStorage::Net::SoftwareConfig: ../network/config/single-nic-linux-bridge-vlans/ceph-storage.yaml - -parameter_defaults: - # This sets 'external_network_bridge' in l3_agent.ini to an empty string - # so that external networks act like provider bridge networks (they - # will plug into br-int instead of br-ex) - NeutronExternalNetworkBridge: "''" diff --git a/environments/net-single-nic-with-vlans-no-external.yaml b/environments/net-single-nic-with-vlans-no-external.yaml index c7594b32..65d38137 100644 --- a/environments/net-single-nic-with-vlans-no-external.yaml +++ b/environments/net-single-nic-with-vlans-no-external.yaml @@ -19,7 +19,3 @@ resource_registry: OS::TripleO::Controller::Net::SoftwareConfig: ../network/config/single-nic-vlans/controller-no-external.yaml OS::TripleO::ObjectStorage::Net::SoftwareConfig: ../network/config/single-nic-vlans/swift-storage.yaml OS::TripleO::CephStorage::Net::SoftwareConfig: ../network/config/single-nic-vlans/ceph-storage.yaml - -# NOTE: with no external interface we should be able to use the -# default Neutron l3_agent.ini setting for the external bridge (br-ex) -# i.e. No need to set: NeutronExternalNetworkBridge: "''" diff --git a/environments/net-single-nic-with-vlans-v6.yaml b/environments/net-single-nic-with-vlans-v6.yaml index 8210bad3..966e5fe9 100644 --- a/environments/net-single-nic-with-vlans-v6.yaml +++ b/environments/net-single-nic-with-vlans-v6.yaml @@ -11,9 +11,3 @@ resource_registry: OS::TripleO::Controller::Net::SoftwareConfig: ../network/config/single-nic-vlans/controller-v6.yaml OS::TripleO::ObjectStorage::Net::SoftwareConfig: ../network/config/single-nic-vlans/swift-storage.yaml OS::TripleO::CephStorage::Net::SoftwareConfig: ../network/config/single-nic-vlans/ceph-storage.yaml - -parameter_defaults: - # This sets 'external_network_bridge' in l3_agent.ini to an empty string - # so that external networks act like provider bridge networks (they - # will plug into br-int instead of br-ex) - NeutronExternalNetworkBridge: "''" diff --git a/environments/net-single-nic-with-vlans.yaml b/environments/net-single-nic-with-vlans.yaml index a61bc6e1..b087b3e4 100644 --- a/environments/net-single-nic-with-vlans.yaml +++ b/environments/net-single-nic-with-vlans.yaml @@ -11,9 +11,3 @@ resource_registry: OS::TripleO::Controller::Net::SoftwareConfig: ../network/config/single-nic-vlans/controller.yaml OS::TripleO::ObjectStorage::Net::SoftwareConfig: ../network/config/single-nic-vlans/swift-storage.yaml OS::TripleO::CephStorage::Net::SoftwareConfig: ../network/config/single-nic-vlans/ceph-storage.yaml - -parameter_defaults: - # This sets 'external_network_bridge' in l3_agent.ini to an empty string - # so that external networks act like provider bridge networks (they - # will plug into br-int instead of br-ex) - NeutronExternalNetworkBridge: "''" diff --git a/environments/network-environment.yaml b/environments/network-environment.yaml index b02fc198..210b6b03 100644 --- a/environments/network-environment.yaml +++ b/environments/network-environment.yaml @@ -43,13 +43,11 @@ parameter_defaults: ExternalInterfaceDefaultRoute: 10.0.0.1 # Uncomment if using the Management Network (see network-management.yaml) # ManagementNetCidr: 10.0.1.0/24 - # ManagementAllocationPools: [{'start': '10.0.1.10', 'end', '10.0.1.50'}] + # ManagementAllocationPools: [{'start': '10.0.1.10', 'end': '10.0.1.50'}] # Use either this parameter or ControlPlaneDefaultRoute in the NIC templates # ManagementInterfaceDefaultRoute: 10.0.1.1 # Define the DNS servers (maximum 2) for the overcloud nodes DnsServers: ["8.8.8.8","8.8.4.4"] - # Set to empty string to enable multiple external networks or VLANs - NeutronExternalNetworkBridge: "''" # List of Neutron network types for tenant networks (will be used in order) NeutronNetworkType: 'vxlan,vlan' # The tunnel type for the tenant network (vxlan or gre). Set to '' to disable tunneling. diff --git a/environments/neutron-ml2-fujitsu-cfab.yaml b/environments/neutron-ml2-fujitsu-cfab.yaml new file mode 100644 index 00000000..f14f7ee2 --- /dev/null +++ b/environments/neutron-ml2-fujitsu-cfab.yaml @@ -0,0 +1,21 @@ +# A Heat environment file which can be used to enable Fujitsu C-Fabric +# plugin, configured via puppet +resource_registry: + OS::TripleO::Services::NeutronML2FujitsuCfab: ../puppet/services/neutron-plugin-ml2-fujitsu-cfab.yaml + +parameter_defaults: + # Fixed + NeutronMechanismDrivers: ['openvswitch','fujitsu_cfab'] + NeutronTypeDrivers: 'vlan' + NeutronNetworkType: 'vlan' + + # Required + NeutronFujitsuCfabAddress: '192.168.0.1' + NeutronFujitsuCfabUserName: 'admin' + NeutronFujitsuCfabPassword: + + # Optional + #NeutronFujitsuCfabPhysicalNetworks: + #NeutronFujitsuCfabSharePprofile: + #NeutronFujitsuCfabPprofilePrefix: + #NeutronFujitsuCfabSaveConfig: diff --git a/environments/neutron-ml2-fujitsu-fossw.yaml b/environments/neutron-ml2-fujitsu-fossw.yaml new file mode 100644 index 00000000..8db8da75 --- /dev/null +++ b/environments/neutron-ml2-fujitsu-fossw.yaml @@ -0,0 +1,22 @@ +# A Heat environment file which can be used to enable Fujitsu fossw +# plugin, configured via puppet +resource_registry: + OS::TripleO::Services::NeutronML2FujitsuFossw: ../puppet/services/neutron-plugin-ml2-fujitsu-fossw.yaml + +parameter_defaults: + # Fixed + NeutronMechanismDrivers: ['openvswitch','fujitsu_fossw'] + NeutronTypeDrivers: ['vlan','vxlan'] + NeutronNetworkType: ['vlan','vxlan'] + + # Required + NeutronFujitsuFosswIps: '192.168.0.1,192.168.0.2' + NeutronFujitsuFosswUserName: + NeutronFujitsuFosswPassword: + + # Optional + #NeutronFujitsuFosswPort: + #NeutronFujitsuFosswTimeout: + #NeutronFujitsuFosswUdpDestPort: + #NeutronFujitsuFosswOvsdbVlanidRangeMin: + #NeutronFujitsuFosswOvsdbPort: diff --git a/environments/neutron-ml2-ovn.yaml b/environments/neutron-ml2-ovn.yaml index 3da560c8..7483bdbb 100644 --- a/environments/neutron-ml2-ovn.yaml +++ b/environments/neutron-ml2-ovn.yaml @@ -3,6 +3,7 @@ resource_registry: OS::TripleO::Services::NeutronL3Agent: OS::Heat::None OS::TripleO::Services::NeutronOvsAgent: OS::Heat::None + OS::TripleO::Services::NeutronMetadataAgent: OS::Heat::None OS::TripleO::Services::NeutronCorePlugin: OS::TripleO::Services::NeutronCorePluginML2OVN OS::TripleO::Services::ComputeNeutronCorePlugin: ../puppet/services/neutron-compute-plugin-ovn.yaml # Disabling Neutron services that overlap with OVN @@ -12,11 +13,12 @@ resource_registry: parameter_defaults: NeutronMechanismDrivers: ovn - OVNSouthboundServerPort: 6642 - OVNNorthboundServerPort: 6641 - OVNDbConnectionTimeout: 60 OVNVifType: ovs OVNNeutronSyncMode: log OVNQosDriver: ovn-qos OVNTunnelEncapType: geneve NeutronEnableDHCPAgent: false + NeutronTypeDrivers: 'geneve,vxlan,vlan,flat' + NeutronNetworkType: 'geneve' + NeutronServicePlugins: 'qos,ovn-router' + NeutronVniRanges: ['1:65536', ] diff --git a/environments/neutron-opencontrail.yaml b/environments/neutron-opencontrail.yaml deleted file mode 100644 index 51575b86..00000000 --- a/environments/neutron-opencontrail.yaml +++ /dev/null @@ -1,25 +0,0 @@ -# A Heat environment file which can be used to enable OpenContrail -# extensions, configured via puppet -resource_registry: - OS::TripleO::ComputeExtraConfigPre: ../puppet/extraconfig/pre_deploy/compute/neutron-opencontrail.yaml - OS::TripleO::Services::NeutronDhcpAgent: OS::Heat::None - OS::TripleO::Services::NeutronL3Agent: OS::Heat::None - OS::TripleO::Services::NeutronMetadataAgent: OS::Heat::None - OS::TripleO::Services::NeutronOvsAgent: OS::Heat::None - OS::TripleO::Services::ComputeNeutronOvsAgent: OS::Heat::None - # Override the NeutronCorePlugin to use Nuage - OS::TripleO::Services::NeutronCorePlugin: OS::TripleO::Services::NeutronCorePluginOpencontrail - OS::TripleO::Services::ComputeNeutronCorePlugin: ../puppet/services/neutron-compute-plugin-opencontrail.yaml - -parameter_defaults: - NeutronCorePlugin: neutron_plugin_contrail.plugins.opencontrail.contrail_plugin.NeutronPluginContrailCoreV2 - NeutronServicePlugins: neutron_plugin_contrail.plugins.opencontrail.loadbalancer.v2.plugin.LoadBalancerPluginV2 - NeutronTunnelTypes: '' - - # required params: - #ContrailApiServerIp: - #ContrailExtensions: '' - - # optional params - # ContrailApiServerPort: 8082 - # ContrailMultiTenancy: false diff --git a/environments/neutron-opendaylight-l3.yaml b/environments/neutron-opendaylight-l3.yaml deleted file mode 100644 index 00be3048..00000000 --- a/environments/neutron-opendaylight-l3.yaml +++ /dev/null @@ -1,14 +0,0 @@ -# A Heat environment that can be used to deploy OpenDaylight with L3 DVR -resource_registry: - OS::TripleO::Services::NeutronOvsAgent: OS::Heat::None - OS::TripleO::Services::ComputeNeutronOvsAgent: OS::Heat::None - OS::TripleO::Services::ComputeNeutronCorePlugin: OS::Heat::None - OS::TripleO::Services::OpenDaylightApi: ../puppet/services/opendaylight-api.yaml - OS::TripleO::Services::OpenDaylightOvs: ../puppet/services/opendaylight-ovs.yaml - OS::TripleO::Services::NeutronL3Agent: OS::Heat::None - -parameter_defaults: - NeutronEnableForceMetadata: true - NeutronMechanismDrivers: 'opendaylight' - NeutronServicePlugins: "networking_odl.l3.l3_odl.OpenDaylightL3RouterPlugin" - OpenDaylightEnableL3: "'yes'" diff --git a/environments/neutron-opendaylight.yaml b/environments/neutron-opendaylight.yaml index 35c90aab..ed7292b7 100644 --- a/environments/neutron-opendaylight.yaml +++ b/environments/neutron-opendaylight.yaml @@ -1,11 +1,13 @@ -# A Heat environment that can be used to deploy OpenDaylight +# A Heat environment that can be used to deploy OpenDaylight with L3 DVR resource_registry: OS::TripleO::Services::NeutronOvsAgent: OS::Heat::None OS::TripleO::Services::ComputeNeutronOvsAgent: OS::Heat::None OS::TripleO::Services::ComputeNeutronCorePlugin: OS::Heat::None OS::TripleO::Services::OpenDaylightApi: ../puppet/services/opendaylight-api.yaml OS::TripleO::Services::OpenDaylightOvs: ../puppet/services/opendaylight-ovs.yaml + OS::TripleO::Services::NeutronL3Agent: OS::Heat::None parameter_defaults: NeutronEnableForceMetadata: true - NeutronMechanismDrivers: 'opendaylight' + NeutronMechanismDrivers: 'opendaylight_v2' + NeutronServicePlugins: 'odl-router_v2,trunk' diff --git a/environments/neutron-sriov.yaml b/environments/neutron-sriov.yaml index 9b7e51f9..5e9e15e3 100755 --- a/environments/neutron-sriov.yaml +++ b/environments/neutron-sriov.yaml @@ -9,9 +9,6 @@ parameter_defaults: #NovaSchedulerDefaultFilters: ['RetryFilter','AvailabilityZoneFilter','RamFilter','ComputeFilter','ComputeCapabilitiesFilter','ImagePropertiesFilter','ServerGroupAntiAffinityFilter','ServerGroupAffinityFilter','PciPassthroughFilter'] #NovaSchedulerAvailableFilters: ["nova.scheduler.filters.all_filters","nova.scheduler.filters.pci_passthrough_filter.PciPassthroughFilter"] - # Provide the vendorid:productid of the VFs - #NeutronSupportedPCIVendorDevs: ['8086:154c','8086:10ca','8086:1520'] - #NeutronPhysicalDevMappings: "datacentre:ens20f2" # Number of VFs that needs to be configured for a physical interface diff --git a/environments/puppet-ceph-external.yaml b/environments/puppet-ceph-external.yaml index 06e4f7aa..5f8b02ad 100644 --- a/environments/puppet-ceph-external.yaml +++ b/environments/puppet-ceph-external.yaml @@ -30,5 +30,8 @@ parameter_defaults: # finally we disable the Cinder LVM backend CinderEnableIscsiBackend: false + # Uncomment if connecting to a pre-Jewel or RHCS1.3 Ceph Cluster + # RbdDefaultFeatures: 1 + # Backward compatibility setting, will be removed in the future CephAdminKey: '' diff --git a/environments/puppet-ceph.yaml b/environments/puppet-ceph.yaml new file mode 100644 index 00000000..57af540a --- /dev/null +++ b/environments/puppet-ceph.yaml @@ -0,0 +1,12 @@ +resource_registry: + OS::TripleO::Services::CephMon: ../puppet/services/ceph-mon.yaml + OS::TripleO::Services::CephOSD: ../puppet/services/ceph-osd.yaml + OS::TripleO::Services::CephClient: ../puppet/services/ceph-client.yaml + +parameter_defaults: + CinderEnableIscsiBackend: false + CinderEnableRbdBackend: true + CinderBackupBackend: ceph + NovaEnableRbdBackend: true + GlanceBackend: rbd + GnocchiBackend: rbd diff --git a/environments/puppet-pacemaker.yaml b/environments/puppet-pacemaker.yaml index b8e93f20..da607a72 100644 --- a/environments/puppet-pacemaker.yaml +++ b/environments/puppet-pacemaker.yaml @@ -1,7 +1,6 @@ # An environment which enables configuration of an # Overcloud controller with Pacemaker. resource_registry: - OS::TripleO::ControllerConfig: ../puppet/controller-config-pacemaker.yaml OS::TripleO::Tasks::ControllerPrePuppet: ../extraconfig/tasks/pre_puppet_pacemaker.yaml OS::TripleO::Tasks::ControllerPostPuppet: ../extraconfig/tasks/post_puppet_pacemaker.yaml OS::TripleO::Tasks::ControllerPostPuppetRestart: ../extraconfig/tasks/post_puppet_pacemaker_restart.yaml @@ -13,6 +12,7 @@ resource_registry: OS::TripleO::Services::RabbitMQ: ../puppet/services/pacemaker/rabbitmq.yaml OS::TripleO::Services::HAproxy: ../puppet/services/pacemaker/haproxy.yaml OS::TripleO::Services::Pacemaker: ../puppet/services/pacemaker.yaml + OS::TripleO::Services::PacemakerRemote: ../puppet/services/pacemaker_remote.yaml OS::TripleO::Services::Redis: ../puppet/services/pacemaker/database/redis.yaml OS::TripleO::Services::MySQL: ../puppet/services/pacemaker/database/mysql.yaml # Services that are disabled by default (use relevant environment files): diff --git a/environments/services/ceph-mds.yaml b/environments/services/ceph-mds.yaml new file mode 100644 index 00000000..2b51374c --- /dev/null +++ b/environments/services/ceph-mds.yaml @@ -0,0 +1,2 @@ +resource_registry: + OS::TripleO::Services::CephMds: ../../puppet/services/ceph-mds.yaml
\ No newline at end of file diff --git a/environments/services/ceph-rbdmirror.yaml b/environments/services/ceph-rbdmirror.yaml new file mode 100644 index 00000000..b350e4c5 --- /dev/null +++ b/environments/services/ceph-rbdmirror.yaml @@ -0,0 +1,2 @@ +resource_registry: + OS::TripleO::Services::CephRbdMirror: ../../puppet/services/pacemaker/ceph-rbdmirror.yaml diff --git a/environments/services/disable-ceilometer-api.yaml b/environments/services/disable-ceilometer-api.yaml new file mode 100644 index 00000000..94cd8d5d --- /dev/null +++ b/environments/services/disable-ceilometer-api.yaml @@ -0,0 +1,2 @@ +resource_registry: + OS::TripleO::Services::CeilometerApi: OS::Heat::None diff --git a/environments/services/ec2-api.yaml b/environments/services/ec2-api.yaml new file mode 100644 index 00000000..d751ba23 --- /dev/null +++ b/environments/services/ec2-api.yaml @@ -0,0 +1,3 @@ +# A Heat environment file which can be used to enable EC2-API service. +resource_registry: + OS::TripleO::Services::Ec2Api: ../../puppet/services/ec2-api.yaml diff --git a/environments/services/etcd.yaml b/environments/services/etcd.yaml new file mode 100644 index 00000000..08d54d58 --- /dev/null +++ b/environments/services/etcd.yaml @@ -0,0 +1,2 @@ +resource_registry: + OS::TripleO::Services::Etcd: ../../puppet/services/etcd.yaml diff --git a/environments/services/octavia.yaml b/environments/services/octavia.yaml new file mode 100644 index 00000000..24c57b8c --- /dev/null +++ b/environments/services/octavia.yaml @@ -0,0 +1,9 @@ +resource_registry: + OS::TripleO::Services::OctaviaApi: ../../puppet/services/octavia-api.yaml + OS::TripleO::Services::OctaviaHealthManager: ../../puppet/services/octavia-health-manager.yaml + OS::TripleO::Services::OctaviaHousekeeping: ../../puppet/services/octavia-housekeeping.yaml + OS::TripleO::Services::OctaviaWorker: ../../puppet/services/octavia-worker.yaml + +parameter_defaults: + NeutronServicePlugins: "qos,router,trunk,lbaasv2" + NeutronEnableForceMetadata: true diff --git a/environments/services/vpp.yaml b/environments/services/vpp.yaml new file mode 100644 index 00000000..9bad70f8 --- /dev/null +++ b/environments/services/vpp.yaml @@ -0,0 +1,9 @@ +resource_registry: + OS::TripleO::Services::Vpp: ../../puppet/services/vpp.yaml + +#parameter_defaults: + #VPP main thread core pinning + #VppCpuMainCore: '1' + + #List of cores for VPP worker thread pinning + #VppCpuCorelistWorkers: ['3','4'] diff --git a/environments/sshd-banner.yaml b/environments/sshd-banner.yaml new file mode 100644 index 00000000..041c0990 --- /dev/null +++ b/environments/sshd-banner.yaml @@ -0,0 +1,13 @@ +resource_registry: + OS::TripleO::Services::Sshd: ../puppet/services/sshd.yaml + +parameter_defaults: + BannerText: | + ****************************************************************** + * This system is for the use of authorized users only. Usage of * + * this system may be monitored and recorded by system personnel. * + * Anyone using this system expressly consents to such monitoring * + * and is advised that if such monitoring reveals possible * + * evidence of criminal activity, system personnel may provide * + * the evidence from such monitoring to law enforcement officials.* + ****************************************************************** diff --git a/environments/tls-endpoints-public-dns.yaml b/environments/tls-endpoints-public-dns.yaml index fb66b38a..1b666c5b 100644 --- a/environments/tls-endpoints-public-dns.yaml +++ b/environments/tls-endpoints-public-dns.yaml @@ -17,10 +17,48 @@ parameter_defaults: CinderAdmin: {protocol: 'http', port: '8776', host: 'IP_ADDRESS'} CinderInternal: {protocol: 'http', port: '8776', host: 'IP_ADDRESS'} CinderPublic: {protocol: 'https', port: '13776', host: 'CLOUDNAME'} + CongressAdmin: {protocol: 'http', port: '1789', host: 'IP_ADDRESS'} + CongressInternal: {protocol: 'http', port: '1789', host: 'IP_ADDRESS'} + CongressPublic: {protocol: 'https', port: '13789', host: 'CLOUDNAME'} + ContrailAnalyticsApiAdmin: {protocol: 'http', port: '8081', host: 'IP_ADDRESS'} + ContrailAnalyticsApiInternal: {protocol: 'http', port: '8081', host: 'IP_ADDRESS'} + ContrailAnalyticsApiPublic: {protocol: 'http', port: '8081', host: 'IP_ADDRESS'} + ContrailAnalyticsCollectorHttpAdmin: {protocol: 'http', port: '8089', + host: 'IP_ADDRESS'} + ContrailAnalyticsCollectorHttpInternal: {protocol: 'http', port: '8089', + host: 'IP_ADDRESS'} + ContrailAnalyticsCollectorHttpPublic: {protocol: 'http', port: '8089', + host: 'IP_ADDRESS'} + ContrailAnalyticsCollectorSandeshAdmin: {protocol: 'http', port: '8086', + host: 'IP_ADDRESS'} + ContrailAnalyticsCollectorSandeshInternal: {protocol: 'http', port: '8086', + host: 'IP_ADDRESS'} + ContrailAnalyticsCollectorSandeshPublic: {protocol: 'http', port: '8086', + host: 'IP_ADDRESS'} + ContrailAnalyticsHttpAdmin: {protocol: 'http', port: '8090', host: 'IP_ADDRESS'} + ContrailAnalyticsHttpInternal: {protocol: 'http', port: '8090', host: 'IP_ADDRESS'} + ContrailAnalyticsHttpPublic: {protocol: 'http', port: '8090', host: 'IP_ADDRESS'} + ContrailAnalyticsRedisAdmin: {protocol: 'http', port: '6379', host: 'IP_ADDRESS'} + ContrailAnalyticsRedisInternal: {protocol: 'http', port: '6379', host: 'IP_ADDRESS'} + ContrailAnalyticsRedisPublic: {protocol: 'http', port: '6379', host: 'IP_ADDRESS'} + ContrailConfigAdmin: {protocol: 'http', port: '8082', host: 'IP_ADDRESS'} + ContrailConfigInternal: {protocol: 'http', port: '8082', host: 'IP_ADDRESS'} + ContrailConfigPublic: {protocol: 'http', port: '8082', host: 'IP_ADDRESS'} + ContrailDiscoveryAdmin: {protocol: 'http', port: '5998', host: 'IP_ADDRESS'} + ContrailDiscoveryInternal: {protocol: 'http', port: '5998', host: 'IP_ADDRESS'} + ContrailDiscoveryPublic: {protocol: 'http', port: '5998', host: 'IP_ADDRESS'} + ContrailWebuiHttpAdmin: {protocol: 'http', port: '8080', host: 'IP_ADDRESS'} + ContrailWebuiHttpInternal: {protocol: 'http', port: '8080', host: 'IP_ADDRESS'} + ContrailWebuiHttpPublic: {protocol: 'http', port: '8080', host: 'IP_ADDRESS'} + ContrailWebuiHttpsAdmin: {protocol: 'http', port: '8143', host: 'IP_ADDRESS'} + ContrailWebuiHttpsInternal: {protocol: 'http', port: '8143', host: 'IP_ADDRESS'} + ContrailWebuiHttpsPublic: {protocol: 'http', port: '8143', host: 'IP_ADDRESS'} + Ec2ApiAdmin: {protocol: 'http', port: '8788', host: 'IP_ADDRESS'} + Ec2ApiInternal: {protocol: 'http', port: '8788', host: 'IP_ADDRESS'} + Ec2ApiPublic: {protocol: 'https', port: '13788', host: 'CLOUDNAME'} GlanceAdmin: {protocol: 'http', port: '9292', host: 'IP_ADDRESS'} GlanceInternal: {protocol: 'http', port: '9292', host: 'IP_ADDRESS'} GlancePublic: {protocol: 'https', port: '13292', host: 'CLOUDNAME'} - GlanceRegistryInternal: {protocol: 'http', port: '9191', host: 'IP_ADDRESS'} GnocchiAdmin: {protocol: 'http', port: '8041', host: 'IP_ADDRESS'} GnocchiInternal: {protocol: 'http', port: '8041', host: 'IP_ADDRESS'} GnocchiPublic: {protocol: 'https', port: '13041', host: 'CLOUDNAME'} @@ -50,9 +88,15 @@ parameter_defaults: NovaAdmin: {protocol: 'http', port: '8774', host: 'IP_ADDRESS'} NovaInternal: {protocol: 'http', port: '8774', host: 'IP_ADDRESS'} NovaPublic: {protocol: 'https', port: '13774', host: 'CLOUDNAME'} + NovaPlacementAdmin: {protocol: 'http', port: '8778', host: 'IP_ADDRESS'} + NovaPlacementInternal: {protocol: 'http', port: '8778', host: 'IP_ADDRESS'} + NovaPlacementPublic: {protocol: 'https', port: '13778', host: 'CLOUDNAME'} NovaVNCProxyAdmin: {protocol: 'http', port: '6080', host: 'IP_ADDRESS'} NovaVNCProxyInternal: {protocol: 'http', port: '6080', host: 'IP_ADDRESS'} NovaVNCProxyPublic: {protocol: 'https', port: '13080', host: 'CLOUDNAME'} + OctaviaAdmin: {protocol: 'http', port: '9876', host: 'IP_ADDRESS'} + OctaviaInternal: {protocol: 'http', port: '9876', host: 'IP_ADDRESS'} + OctaviaPublic: {protocol: 'https', port: '13876', host: 'CLOUDNAME'} PankoAdmin: {protocol: 'http', port: '8779', host: 'IP_ADDRESS'} PankoInternal: {protocol: 'http', port: '8779', host: 'IP_ADDRESS'} PankoPublic: {protocol: 'https', port: '13779', host: 'CLOUDNAME'} @@ -62,6 +106,9 @@ parameter_defaults: SwiftAdmin: {protocol: 'http', port: '8080', host: 'IP_ADDRESS'} SwiftInternal: {protocol: 'http', port: '8080', host: 'IP_ADDRESS'} SwiftPublic: {protocol: 'https', port: '13808', host: 'CLOUDNAME'} + TackerAdmin: {protocol: 'http', port: '9890', host: 'IP_ADDRESS'} + TackerInternal: {protocol: 'http', port: '9890', host: 'IP_ADDRESS'} + TackerPublic: {protocol: 'https', port: '13989', host: 'CLOUDNAME'} ZaqarAdmin: {protocol: 'http', port: '8888', host: 'IP_ADDRESS'} ZaqarInternal: {protocol: 'http', port: '8888', host: 'IP_ADDRESS'} ZaqarPublic: {protocol: 'https', port: '13888', host: 'CLOUDNAME'} diff --git a/environments/tls-endpoints-public-ip.yaml b/environments/tls-endpoints-public-ip.yaml index 6586a547..7311a1f9 100644 --- a/environments/tls-endpoints-public-ip.yaml +++ b/environments/tls-endpoints-public-ip.yaml @@ -17,10 +17,48 @@ parameter_defaults: CinderAdmin: {protocol: 'http', port: '8776', host: 'IP_ADDRESS'} CinderInternal: {protocol: 'http', port: '8776', host: 'IP_ADDRESS'} CinderPublic: {protocol: 'https', port: '13776', host: 'IP_ADDRESS'} + CongressAdmin: {protocol: 'http', port: '1789', host: 'IP_ADDRESS'} + CongressInternal: {protocol: 'http', port: '1789', host: 'IP_ADDRESS'} + CongressPublic: {protocol: 'https', port: '13789', host: 'IP_ADDRESS'} + ContrailAnalyticsApiAdmin: {protocol: 'http', port: '8081', host: 'IP_ADDRESS'} + ContrailAnalyticsApiInternal: {protocol: 'http', port: '8081', host: 'IP_ADDRESS'} + ContrailAnalyticsApiPublic: {protocol: 'http', port: '8081', host: 'IP_ADDRESS'} + ContrailAnalyticsCollectorHttpAdmin: {protocol: 'http', port: '8089', + host: 'IP_ADDRESS'} + ContrailAnalyticsCollectorHttpInternal: {protocol: 'http', port: '8089', + host: 'IP_ADDRESS'} + ContrailAnalyticsCollectorHttpPublic: {protocol: 'http', port: '8089', + host: 'IP_ADDRESS'} + ContrailAnalyticsCollectorSandeshAdmin: {protocol: 'http', port: '8086', + host: 'IP_ADDRESS'} + ContrailAnalyticsCollectorSandeshInternal: {protocol: 'http', port: '8086', + host: 'IP_ADDRESS'} + ContrailAnalyticsCollectorSandeshPublic: {protocol: 'http', port: '8086', + host: 'IP_ADDRESS'} + ContrailAnalyticsHttpAdmin: {protocol: 'http', port: '8090', host: 'IP_ADDRESS'} + ContrailAnalyticsHttpInternal: {protocol: 'http', port: '8090', host: 'IP_ADDRESS'} + ContrailAnalyticsHttpPublic: {protocol: 'http', port: '8090', host: 'IP_ADDRESS'} + ContrailAnalyticsRedisAdmin: {protocol: 'http', port: '6379', host: 'IP_ADDRESS'} + ContrailAnalyticsRedisInternal: {protocol: 'http', port: '6379', host: 'IP_ADDRESS'} + ContrailAnalyticsRedisPublic: {protocol: 'http', port: '6379', host: 'IP_ADDRESS'} + ContrailConfigAdmin: {protocol: 'http', port: '8082', host: 'IP_ADDRESS'} + ContrailConfigInternal: {protocol: 'http', port: '8082', host: 'IP_ADDRESS'} + ContrailConfigPublic: {protocol: 'http', port: '8082', host: 'IP_ADDRESS'} + ContrailDiscoveryAdmin: {protocol: 'http', port: '5998', host: 'IP_ADDRESS'} + ContrailDiscoveryInternal: {protocol: 'http', port: '5998', host: 'IP_ADDRESS'} + ContrailDiscoveryPublic: {protocol: 'http', port: '5998', host: 'IP_ADDRESS'} + ContrailWebuiHttpAdmin: {protocol: 'http', port: '8080', host: 'IP_ADDRESS'} + ContrailWebuiHttpInternal: {protocol: 'http', port: '8080', host: 'IP_ADDRESS'} + ContrailWebuiHttpPublic: {protocol: 'http', port: '8080', host: 'IP_ADDRESS'} + ContrailWebuiHttpsAdmin: {protocol: 'http', port: '8143', host: 'IP_ADDRESS'} + ContrailWebuiHttpsInternal: {protocol: 'http', port: '8143', host: 'IP_ADDRESS'} + ContrailWebuiHttpsPublic: {protocol: 'http', port: '8143', host: 'IP_ADDRESS'} + Ec2ApiAdmin: {protocol: 'http', port: '8788', host: 'IP_ADDRESS'} + Ec2ApiInternal: {protocol: 'http', port: '8788', host: 'IP_ADDRESS'} + Ec2ApiPublic: {protocol: 'https', port: '13788', host: 'IP_ADDRESS'} GlanceAdmin: {protocol: 'http', port: '9292', host: 'IP_ADDRESS'} GlanceInternal: {protocol: 'http', port: '9292', host: 'IP_ADDRESS'} GlancePublic: {protocol: 'https', port: '13292', host: 'IP_ADDRESS'} - GlanceRegistryInternal: {protocol: 'http', port: '9191', host: 'IP_ADDRESS'} GnocchiAdmin: {protocol: 'http', port: '8041', host: 'IP_ADDRESS'} GnocchiInternal: {protocol: 'http', port: '8041', host: 'IP_ADDRESS'} GnocchiPublic: {protocol: 'https', port: '13041', host: 'IP_ADDRESS'} @@ -50,9 +88,15 @@ parameter_defaults: NovaAdmin: {protocol: 'http', port: '8774', host: 'IP_ADDRESS'} NovaInternal: {protocol: 'http', port: '8774', host: 'IP_ADDRESS'} NovaPublic: {protocol: 'https', port: '13774', host: 'IP_ADDRESS'} + NovaPlacementAdmin: {protocol: 'http', port: '8778', host: 'IP_ADDRESS'} + NovaPlacementInternal: {protocol: 'http', port: '8778', host: 'IP_ADDRESS'} + NovaPlacementPublic: {protocol: 'https', port: '13778', host: 'IP_ADDRESS'} NovaVNCProxyAdmin: {protocol: 'http', port: '6080', host: 'IP_ADDRESS'} NovaVNCProxyInternal: {protocol: 'http', port: '6080', host: 'IP_ADDRESS'} NovaVNCProxyPublic: {protocol: 'https', port: '13080', host: 'IP_ADDRESS'} + OctaviaAdmin: {protocol: 'http', port: '9876', host: 'IP_ADDRESS'} + OctaviaInternal: {protocol: 'http', port: '9876', host: 'IP_ADDRESS'} + OctaviaPublic: {protocol: 'https', port: '13876', host: 'IP_ADDRESS'} PankoAdmin: {protocol: 'http', port: '8779', host: 'IP_ADDRESS'} PankoInternal: {protocol: 'http', port: '8779', host: 'IP_ADDRESS'} PankoPublic: {protocol: 'https', port: '13779', host: 'IP_ADDRESS'} @@ -62,6 +106,9 @@ parameter_defaults: SwiftAdmin: {protocol: 'http', port: '8080', host: 'IP_ADDRESS'} SwiftInternal: {protocol: 'http', port: '8080', host: 'IP_ADDRESS'} SwiftPublic: {protocol: 'https', port: '13808', host: 'IP_ADDRESS'} + TackerAdmin: {protocol: 'http', port: '9890', host: 'IP_ADDRESS'} + TackerInternal: {protocol: 'http', port: '9890', host: 'IP_ADDRESS'} + TackerPublic: {protocol: 'https', port: '13989', host: 'IP_ADDRESS'} ZaqarAdmin: {protocol: 'http', port: '8888', host: 'IP_ADDRESS'} ZaqarInternal: {protocol: 'http', port: '8888', host: 'IP_ADDRESS'} ZaqarPublic: {protocol: 'https', port: '13888', host: 'IP_ADDRESS'} diff --git a/environments/tls-everywhere-endpoints-dns.yaml b/environments/tls-everywhere-endpoints-dns.yaml index ebb491f0..e6608b57 100644 --- a/environments/tls-everywhere-endpoints-dns.yaml +++ b/environments/tls-everywhere-endpoints-dns.yaml @@ -17,10 +17,48 @@ parameter_defaults: CinderAdmin: {protocol: 'https', port: '8776', host: 'CLOUDNAME'} CinderInternal: {protocol: 'https', port: '8776', host: 'CLOUDNAME'} CinderPublic: {protocol: 'https', port: '13776', host: 'CLOUDNAME'} + CongressAdmin: {protocol: 'https', port: '1789', host: 'CLOUDNAME'} + CongressInternal: {protocol: 'https', port: '1789', host: 'CLOUDNAME'} + CongressPublic: {protocol: 'https', port: '13789', host: 'CLOUDNAME'} + ContrailAnalyticsApiAdmin: {protocol: 'http', port: '8081', host: 'IP_ADDRESS'} + ContrailAnalyticsApiInternal: {protocol: 'http', port: '8081', host: 'IP_ADDRESS'} + ContrailAnalyticsApiPublic: {protocol: 'http', port: '8081', host: 'IP_ADDRESS'} + ContrailAnalyticsCollectorHttpAdmin: {protocol: 'http', port: '8089', + host: 'IP_ADDRESS'} + ContrailAnalyticsCollectorHttpInternal: {protocol: 'http', port: '8089', + host: 'IP_ADDRESS'} + ContrailAnalyticsCollectorHttpPublic: {protocol: 'http', port: '8089', + host: 'IP_ADDRESS'} + ContrailAnalyticsCollectorSandeshAdmin: {protocol: 'http', port: '8086', + host: 'IP_ADDRESS'} + ContrailAnalyticsCollectorSandeshInternal: {protocol: 'http', port: '8086', + host: 'IP_ADDRESS'} + ContrailAnalyticsCollectorSandeshPublic: {protocol: 'http', port: '8086', + host: 'IP_ADDRESS'} + ContrailAnalyticsHttpAdmin: {protocol: 'http', port: '8090', host: 'IP_ADDRESS'} + ContrailAnalyticsHttpInternal: {protocol: 'http', port: '8090', host: 'IP_ADDRESS'} + ContrailAnalyticsHttpPublic: {protocol: 'http', port: '8090', host: 'IP_ADDRESS'} + ContrailAnalyticsRedisAdmin: {protocol: 'http', port: '6379', host: 'IP_ADDRESS'} + ContrailAnalyticsRedisInternal: {protocol: 'http', port: '6379', host: 'IP_ADDRESS'} + ContrailAnalyticsRedisPublic: {protocol: 'http', port: '6379', host: 'IP_ADDRESS'} + ContrailConfigAdmin: {protocol: 'http', port: '8082', host: 'IP_ADDRESS'} + ContrailConfigInternal: {protocol: 'http', port: '8082', host: 'IP_ADDRESS'} + ContrailConfigPublic: {protocol: 'http', port: '8082', host: 'IP_ADDRESS'} + ContrailDiscoveryAdmin: {protocol: 'http', port: '5998', host: 'IP_ADDRESS'} + ContrailDiscoveryInternal: {protocol: 'http', port: '5998', host: 'IP_ADDRESS'} + ContrailDiscoveryPublic: {protocol: 'http', port: '5998', host: 'IP_ADDRESS'} + ContrailWebuiHttpAdmin: {protocol: 'http', port: '8080', host: 'IP_ADDRESS'} + ContrailWebuiHttpInternal: {protocol: 'http', port: '8080', host: 'IP_ADDRESS'} + ContrailWebuiHttpPublic: {protocol: 'http', port: '8080', host: 'IP_ADDRESS'} + ContrailWebuiHttpsAdmin: {protocol: 'http', port: '8143', host: 'IP_ADDRESS'} + ContrailWebuiHttpsInternal: {protocol: 'http', port: '8143', host: 'IP_ADDRESS'} + ContrailWebuiHttpsPublic: {protocol: 'http', port: '8143', host: 'IP_ADDRESS'} + Ec2ApiAdmin: {protocol: 'https', port: '8788', host: 'CLOUDNAME'} + Ec2ApiInternal: {protocol: 'https', port: '8788', host: 'CLOUDNAME'} + Ec2ApiPublic: {protocol: 'https', port: '13788', host: 'CLOUDNAME'} GlanceAdmin: {protocol: 'https', port: '9292', host: 'CLOUDNAME'} GlanceInternal: {protocol: 'https', port: '9292', host: 'CLOUDNAME'} GlancePublic: {protocol: 'https', port: '13292', host: 'CLOUDNAME'} - GlanceRegistryInternal: {protocol: 'https', port: '9191', host: 'CLOUDNAME'} GnocchiAdmin: {protocol: 'https', port: '8041', host: 'CLOUDNAME'} GnocchiInternal: {protocol: 'https', port: '8041', host: 'CLOUDNAME'} GnocchiPublic: {protocol: 'https', port: '13041', host: 'CLOUDNAME'} @@ -50,9 +88,15 @@ parameter_defaults: NovaAdmin: {protocol: 'https', port: '8774', host: 'CLOUDNAME'} NovaInternal: {protocol: 'https', port: '8774', host: 'CLOUDNAME'} NovaPublic: {protocol: 'https', port: '13774', host: 'CLOUDNAME'} + NovaPlacementAdmin: {protocol: 'https', port: '8778', host: 'CLOUDNAME'} + NovaPlacementInternal: {protocol: 'https', port: '8778', host: 'CLOUDNAME'} + NovaPlacementPublic: {protocol: 'https', port: '13778', host: 'CLOUDNAME'} NovaVNCProxyAdmin: {protocol: 'https', port: '6080', host: 'CLOUDNAME'} NovaVNCProxyInternal: {protocol: 'https', port: '6080', host: 'CLOUDNAME'} NovaVNCProxyPublic: {protocol: 'https', port: '13080', host: 'CLOUDNAME'} + OctaviaAdmin: {protocol: 'https', port: '9876', host: 'IP_ADDRESS'} + OctaviaInternal: {protocol: 'https', port: '9876', host: 'IP_ADDRESS'} + OctaviaPublic: {protocol: 'https', port: '13876', host: 'CLOUDNAME'} PankoAdmin: {protocol: 'https', port: '8779', host: 'CLOUDNAME'} PankoInternal: {protocol: 'https', port: '8779', host: 'CLOUDNAME'} PankoPublic: {protocol: 'https', port: '13779', host: 'CLOUDNAME'} @@ -62,6 +106,9 @@ parameter_defaults: SwiftAdmin: {protocol: 'https', port: '8080', host: 'CLOUDNAME'} SwiftInternal: {protocol: 'https', port: '8080', host: 'CLOUDNAME'} SwiftPublic: {protocol: 'https', port: '13808', host: 'CLOUDNAME'} + TackerAdmin: {protocol: 'https', port: '9890', host: 'CLOUDNAME'} + TackerInternal: {protocol: 'https', port: '9890', host: 'CLOUDNAME'} + TackerPublic: {protocol: 'https', port: '13989', host: 'CLOUDNAME'} ZaqarAdmin: {protocol: 'https', port: '8888', host: 'CLOUDNAME'} ZaqarInternal: {protocol: 'https', port: '8888', host: 'CLOUDNAME'} ZaqarPublic: {protocol: 'https', port: '13888', host: 'CLOUDNAME'} diff --git a/environments/undercloud.yaml b/environments/undercloud.yaml new file mode 100644 index 00000000..2540fbe5 --- /dev/null +++ b/environments/undercloud.yaml @@ -0,0 +1,19 @@ +resource_registry: + OS::TripleO::Network::Ports::RedisVipPort: ../network/ports/noop.yaml + OS::TripleO::Network::Ports::ControlPlaneVipPort: ../deployed-server/deployed-neutron-port.yaml + OS::TripleO::Undercloud::Net::SoftwareConfig: ../net-config-undercloud.yaml + OS::TripleO::NodeExtraConfigPost: ../extraconfig/post_deploy/undercloud_post.yaml + +parameter_defaults: + StackAction: CREATE + SoftwareConfigTransport: POLL_SERVER_HEAT + NeutronTunnelTypes: [] + NeutronBridgeMappings: ctlplane:br-ctlplane + NeutronAgentExtensions: [] + NeutronFlatNetworks: '*' + NovaSchedulerAvailableFilters: 'tripleo_common.filters.list.tripleo_filters' + NovaSchedulerDefaultFilters: ['RetryFilter', 'TripleOCapabilitiesFilter', 'ComputeCapabilitiesFilter', 'AvailabilityZoneFilter', 'RamFilter', 'DiskFilter', 'ComputeFilter', 'ImagePropertiesFilter', 'ServerGroupAntiAffinityFilter', 'ServerGroupAffinityFilter'] + NeutronDhcpAgentsPerNetwork: 2 + HeatConvergenceEngine: false + HeatMaxResourcesPerStack: -1 + HeatMaxJsonBodySize: 2097152 diff --git a/environments/updates/README.md b/environments/updates/README.md index 426d7329..93714ed8 100644 --- a/environments/updates/README.md +++ b/environments/updates/README.md @@ -10,3 +10,6 @@ Contents **update-from-publicvip-on-ctlplane.yaml** To be used if the PublicVirtualIP resource was deployed as an additional VIP on the 'ctlplane'. + +**update-from-deloyed-server-newton.yaml** + To be used when updating from the deployed-server template from Newton. diff --git a/environments/updates/update-from-deployed-server-newton.yaml b/environments/updates/update-from-deployed-server-newton.yaml new file mode 100644 index 00000000..6fe3a4cb --- /dev/null +++ b/environments/updates/update-from-deployed-server-newton.yaml @@ -0,0 +1,2 @@ +resource_registry: + OS::TripleO::DeployedServer::ControlPlanePort: ../../deployed-server/ctlplane-port.yaml diff --git a/environments/updates/update-from-keystone-admin-internal-api.yaml b/environments/updates/update-from-keystone-admin-internal-api.yaml index a5075300..97687c6a 100644 --- a/environments/updates/update-from-keystone-admin-internal-api.yaml +++ b/environments/updates/update-from-keystone-admin-internal-api.yaml @@ -2,32 +2,5 @@ # Keystone Admin API service is running on the Internal API network parameter_defaults: - ServiceNetMapDefaults: - NeutronTenantNetwork: tenant - CeilometerApiNetwork: internal_api - MongodbNetwork: internal_api - CinderApiNetwork: internal_api - CinderIscsiNetwork: storage - GlanceApiNetwork: storage - GlanceRegistryNetwork: internal_api + ServiceNetMap: KeystoneAdminApiNetwork: internal_api - KeystonePublicApiNetwork: internal_api - NeutronApiNetwork: internal_api - HeatApiNetwork: internal_api - NovaApiNetwork: internal_api - NovaMetadataNetwork: internal_api - NovaVncProxyNetwork: internal_api - SwiftMgmtNetwork: storage_mgmt - SwiftProxyNetwork: storage - HorizonNetwork: internal_api - MemcachedNetwork: internal_api - RabbitmqNetwork: internal_api - RedisNetwork: internal_api - MysqlNetwork: internal_api - CephClusterNetwork: storage_mgmt - CephPublicNetwork: storage - ControllerHostnameResolveNetwork: internal_api - ComputeHostnameResolveNetwork: internal_api - BlockStorageHostnameResolveNetwork: internal_api - ObjectStorageHostnameResolveNetwork: internal_api - CephStorageHostnameResolveNetwork: storage diff --git a/extraconfig/all_nodes/swap-partition.j2.yaml b/extraconfig/all_nodes/swap-partition.j2.yaml index ffd30327..b6fef79f 100644 --- a/extraconfig/all_nodes/swap-partition.j2.yaml +++ b/extraconfig/all_nodes/swap-partition.j2.yaml @@ -1,11 +1,7 @@ heat_template_version: ocata -description: > - Extra config to add swap space to nodes. +description: Template file to add a swap partition to a node. -# Parameters passed from the parent template - note if you maintain -# out-of-tree templates they may require additional parameters if the -# in-tree templates add a new role. parameters: servers: type: json @@ -14,9 +10,7 @@ parameters: description: Swap partition label default: 'swap1' - resources: - SwapConfig: type: OS::Heat::SoftwareConfig properties: @@ -25,8 +19,13 @@ resources: #!/bin/bash set -eux swap_partition=$(realpath /dev/disk/by-label/$swap_partition_label) - swapon $swap_partition - echo "$swap_partition swap swap defaults 0 0" >> /etc/fstab + if [ -f "$swap_partition" ]; then + swapon $swap_partition + echo "$swap_partition swap swap defaults 0 0" >> /etc/fstab + else + echo "$swap_partition needs to be a valid path" + echo "Check that $swap_partition_label is a valid partition label" + fi inputs: - name: swap_partition_label description: Swap partition label diff --git a/extraconfig/all_nodes/swap.j2.yaml b/extraconfig/all_nodes/swap.j2.yaml index e8cd4c90..044f817c 100644 --- a/extraconfig/all_nodes/swap.j2.yaml +++ b/extraconfig/all_nodes/swap.j2.yaml @@ -1,11 +1,7 @@ heat_template_version: ocata -description: > - Extra config to add swap space to nodes. +description: Template file to add a swap file to a node. -# Parameters passed from the parent template - note if you maintain -# out-of-tree templates they may require additional parameters if the -# in-tree templates add a new role. parameters: servers: type: json @@ -18,9 +14,7 @@ parameters: description: Full path to location of swap file default: '/swap' - resources: - SwapConfig: type: OS::Heat::SoftwareConfig properties: diff --git a/extraconfig/nova_metadata/krb-service-principals.yaml b/extraconfig/nova_metadata/krb-service-principals.yaml new file mode 100644 index 00000000..c66e6460 --- /dev/null +++ b/extraconfig/nova_metadata/krb-service-principals.yaml @@ -0,0 +1,84 @@ +heat_template_version: ocata +description: 'Generates the relevant service principals for a server' + +parameters: + RoleData: + type: json + description: the list containing the 'role_data' output for the ServiceChain + + # Coming from parameter_defaults + CloudName: + default: overcloud.localdomain + description: The DNS name of this cloud. E.g. ci-overcloud.tripleo.org + type: string + CloudNameInternal: + default: overcloud.internalapi.localdomain + description: > + The DNS name of this cloud's internal API endpoint. E.g. + 'ci-overcloud.internalapi.tripleo.org'. + type: string + CloudNameStorage: + default: overcloud.storage.localdomain + description: > + The DNS name of this cloud's storage endpoint. E.g. + 'ci-overcloud.storage.tripleo.org'. + type: string + CloudNameStorageManagement: + default: overcloud.storagemgmt.localdomain + description: > + The DNS name of this cloud's storage management endpoint. E.g. + 'ci-overcloud.storagemgmt.tripleo.org'. + type: string + CloudNameCtlplane: + default: overcloud.ctlplane.localdomain + description: > + The DNS name of this cloud's storage management endpoint. E.g. + 'ci-overcloud.management.tripleo.org'. + type: string + +resources: + + IncomingMetadataSettings: + type: OS::Heat::Value + properties: + value: + yaql: + # Filter null values and values that contain don't contain + # 'metadata_settings', get the values from that key and get the + # unique ones. + expression: list($.data.where($ != null).where($.containsKey('metadata_settings')).metadata_settings.flatten().distinct()) + data: {get_param: RoleData} + + # Generates entries for nova metadata with the following format: + # 'managed_service_<id>' : <service>/<fqdn> + # Depending on the requested network + IndividualServices: + type: OS::Heat::Value + properties: + value: + yaql: + expression: let(fqdns => $.data.fqdns) -> dict($.data.metadata.where($ != null and $.type = 'vip').select([concat('managed_service_', $.service, $.network), concat($.service, '/', $fqdns.get($.network))])) + data: + metadata: {get_attr: [IncomingMetadataSettings, value]} + fqdns: + external: {get_param: CloudName} + internal_api: {get_param: CloudNameInternal} + storage: {get_param: CloudNameStorage} + storage_mgmt: {get_param: CloudNameStorageManagement} + ctlplane: {get_param: CloudNameCtlplane} + + CompactServices: + type: OS::Heat::Value + properties: + value: + yaql: + expression: dict($.data.where($ != null and $.type = 'node').select([$.service, $.network.replace('_', '')]).groupBy($[0], $[1])) + data: {get_attr: [IncomingMetadataSettings, value]} + +outputs: + metadata: + description: actual metadata entries that will be passed to the server. + value: + map_merge: + - {get_attr: [IndividualServices, value]} + - compact_services: {get_attr: [CompactServices, value]} diff --git a/extraconfig/post_deploy/undercloud_post.sh b/extraconfig/post_deploy/undercloud_post.sh new file mode 100755 index 00000000..8bcae1d3 --- /dev/null +++ b/extraconfig/post_deploy/undercloud_post.sh @@ -0,0 +1,126 @@ +#!/bin/bash +set -eux + +ln -sf /etc/puppet/hiera.yaml /etc/hiera.yaml + + +# WRITE OUT STACKRC +if [ ! -e /root/stackrc ]; then + touch /root/stackrc + chmod 0600 /root/stackrc + +cat >> /root/stackrc <<-EOF_CAT +export OS_PASSWORD=$admin_password +export OS_AUTH_URL=$auth_url +export OS_USERNAME=admin +export OS_TENANT_NAME=admin +export COMPUTE_API_VERSION=1.1 +export NOVA_VERSION=1.1 +export OS_BAREMETAL_API_VERSION=1.15 +export OS_NO_CACHE=True +export OS_CLOUDNAME=undercloud +EOF_CAT + + if [ -n "$ssl_certificate" ]; then +cat >> /root/stackrc <<-EOF_CAT +export PYTHONWARNINGS="ignore:Certificate has no, ignore:A true SSLContext object is not available" +EOF_CAT + fi +fi + +source /root/stackrc + +if [ ! -f /root/.ssh/authorized_keys ]; then + sudo mkdir -p /root/.ssh + sudo chmod 7000 /root/.ssh/ + sudo touch /root/.ssh/authorized_keys + sudo chmod 600 /root/.ssh/authorized_keys +fi + +if [ ! -f /root/.ssh/id_rsa ]; then + ssh-keygen -b 1024 -N '' -f /root/.ssh/id_rsa +fi + +if ! grep "$(cat /root/.ssh/id_rsa.pub)" /root/.ssh/authorized_keys; then + cat /root/.ssh/id_rsa.pub >> /root/.ssh/authorized_keys +fi + +PHYSICAL_NETWORK=ctlplane + +ctlplane_id=$(openstack network list -f csv -c ID -c Name --quote none | tail -n +2 | grep ctlplane | cut -d, -f1) +subnet_ids=$(openstack subnet list -f csv -c ID --quote none | tail -n +2) +subnet_id= + +for subnet_id in $subnet_ids; do + network_id=$(openstack subnet show -f value -c network_id $subnet_id) + if [ "$network_id" = "$ctlplane_id" ]; then + break + fi +done + +net_create=1 +if [ -n "$subnet_id" ]; then + cidr=$(openstack subnet show $subnet_id -f value -c cidr) + if [ "$cidr" = "$undercloud_network_cidr" ]; then + net_create=0 + else + echo "New cidr $undercloud_network_cidr does not equal old cidr $cidr" + echo "Will attempt to delete and recreate subnet $subnet_id" + fi +fi + +if [ "$net_create" -eq "1" ]; then + # Delete the subnet and network to make sure it doesn't already exist + if openstack subnet list | grep start; then + openstack subnet delete $(openstack subnet list | grep start | awk '{print $4}') + fi + if openstack network show ctlplane; then + openstack network delete ctlplane + fi + + + NETWORK_ID=$(openstack network create --provider-network-type=flat --provider-physical-network=ctlplane ctlplane | grep " id " | awk '{print $4}') + + NAMESERVER_ARG="" + if [ -n "${undercloud_nameserver:-}" ]; then + NAMESERVER_ARG="--dns-nameserver $undercloud_nameserver" + fi + + openstack subnet create --network=$NETWORK_ID \ + --gateway=$undercloud_network_gateway \ + --subnet-range=$undercloud_network_cidr \ + --allocation-pool start=$undercloud_dhcp_start,end=$undercloud_dhcp_end \ + --host-route destination=169.254.169.254/32,gateway=$local_ip \ + $NAMESERVER_ARG ctlplane +fi + +# Disable nova quotas +openstack quota set --cores -1 --instances -1 --ram -1 $(openstack project show admin | awk '$2=="id" {print $4}') + +# MISTRAL WORKFLOW CONFIGURATION +if [ "$(hiera mistral_api_enabled)" = "true" ]; then + # load workflows + for workbook in $(openstack workbook list | grep tripleo | cut -f 2 -d ' '); do + openstack workbook delete $workbook + done + for workflow in $(openstack workflow list | grep tripleo | cut -f 2 -d ' '); do + openstack workflow delete $workflow + done + for workbook in $(ls /usr/share/openstack-tripleo-common/workbooks/*); do + openstack workbook create $workbook + done + + # Store the SNMP password in a mistral environment + if ! openstack workflow env show tripleo.undercloud-config &>/dev/null; then + TMP_MISTRAL_ENV=$(mktemp) + echo "{\"name\": \"tripleo.undercloud-config\", \"variables\": {\"undercloud_ceilometer_snmpd_password\": \"$snmp_readonly_user_password\"}}" > $TMP_MISTRAL_ENV + openstack workflow env create $TMP_MISTRAL_ENV + fi + +fi + +# IP forwarding is needed to allow the overcloud nodes access to the outside +# internet in cases where they are on an isolated network. +sysctl -w net.ipv4.ip_forward=1 +# Make it persistent +echo "net.ipv4.ip_forward=1" > /etc/sysctl.d/ip-forward.conf diff --git a/extraconfig/post_deploy/undercloud_post.yaml b/extraconfig/post_deploy/undercloud_post.yaml new file mode 100644 index 00000000..38a9181e --- /dev/null +++ b/extraconfig/post_deploy/undercloud_post.yaml @@ -0,0 +1,93 @@ +heat_template_version: ocata + +description: > + Post-deployment for the TripleO undercloud + +parameters: + servers: + type: json + DeployedServerPortMap: + default: {} + type: json + UndercloudDhcpRangeStart: + type: string + default: '192.168.24.5' + UndercloudDhcpRangeEnd: + type: string + default: '192.168.24.24' + UndercloudNetworkCidr: + type: string + default: '192.168.24.0/24' + UndercloudNetworkGateway: + type: string + default: '192.168.24.1' + UndercloudNameserver: + type: string + default: '' + AdminPassword: #supplied by tripleo-undercloud-passwords.yaml + type: string + description: The password for the keystone admin account, used for monitoring, querying neutron etc. + hidden: True + SSLCertificate: + description: > + The content of the SSL certificate (without Key) in PEM format. + type: string + default: "" + hidden: True + SnmpdReadonlyUserPassword: + description: The user password for SNMPd with readonly rights running on all Overcloud nodes + type: string + hidden: true + +conditions: + + ssl_disabled: {equals : [{get_param: SSLCertificate}, ""]} + +resources: + + UndercloudPostConfig: + type: OS::Heat::SoftwareConfig + properties: + group: script + inputs: + - name: deploy_identifier + - name: local_ip + - name: undercloud_dhcp_start + - name: undercloud_dhcp_end + - name: undercloud_network_cidr + - name: undercloud_network_gateway + - name: undercloud_nameserver + - name: admin_password + - name: auth_url + - name: snmp_readonly_user_password + config: {get_file: ./undercloud_post.sh} + + UndercloudPostDeployment: + type: OS::Heat::SoftwareDeployments + properties: + servers: {get_param: servers} + config: {get_resource: UndercloudPostConfig} + input_values: + local_ip: {get_param: [DeployedServerPortMap, 'control_virtual_ip', fixed_ips, 0, ip_address]} + undercloud_dhcp_start: {get_param: UndercloudDhcpRangeStart} + undercloud_dhcp_end: {get_param: UndercloudDhcpRangeEnd} + undercloud_network_cidr: {get_param: UndercloudNetworkCidr} + undercloud_network_gateway: {get_param: UndercloudNetworkGateway} + undercloud_nameserver: {get_param: UndercloudNameserver} + ssl_certificate: {get_param: SSLCertificate} + admin_password: {get_param: AdminPassword} + snmp_readonly_user_password: {get_param: SnmpdReadonlyUserPassword} + # if SSL is enabled we use the public virtual ip as the stackrc endpoint + auth_url: + if: + - ssl_disabled + - list_join: + - '' + - - 'http://' + - {get_param: [DeployedServerPortMap, 'control_virtual_ip', fixed_ips, 0, ip_address]} + - ':5000/v2.0' + - list_join: + - '' + - - 'https://' + - {get_param: [DeployedServerPortMap, 'public_virtual_ip', fixed_ips, 0, ip_address]} + - ':13000/v2.0' diff --git a/extraconfig/pre_deploy/rhel-registration/environment-rhel-registration.yaml b/extraconfig/pre_deploy/rhel-registration/environment-rhel-registration.yaml index c388358a..24557517 100644 --- a/extraconfig/pre_deploy/rhel-registration/environment-rhel-registration.yaml +++ b/extraconfig/pre_deploy/rhel-registration/environment-rhel-registration.yaml @@ -21,3 +21,7 @@ parameter_defaults: rhel_reg_type: "" rhel_reg_method: "" rhel_reg_sat_repo: "rhel-7-server-satellite-tools-6.1-rpms" + rhel_reg_http_proxy_host: "" + rhel_reg_http_proxy_port: "" + rhel_reg_http_proxy_username: "" + rhel_reg_http_proxy_password: "" diff --git a/extraconfig/pre_deploy/rhel-registration/rhel-registration.yaml b/extraconfig/pre_deploy/rhel-registration/rhel-registration.yaml index fdf2e957..e8316c53 100644 --- a/extraconfig/pre_deploy/rhel-registration/rhel-registration.yaml +++ b/extraconfig/pre_deploy/rhel-registration/rhel-registration.yaml @@ -45,6 +45,14 @@ parameters: type: string rhel_reg_sat_repo: type: string + rhel_reg_http_proxy_host: + type: string + rhel_reg_http_proxy_port: + type: string + rhel_reg_http_proxy_username: + type: string + rhel_reg_http_proxy_password: + type: string resources: @@ -71,6 +79,10 @@ resources: - name: REG_TYPE - name: REG_METHOD - name: REG_SAT_REPO + - name: REG_HTTP_PROXY_HOST + - name: REG_HTTP_PROXY_PORT + - name: REG_HTTP_PROXY_USERNAME + - name: REG_HTTP_PROXY_PASSWORD config: {get_file: scripts/rhel-registration} RHELRegistrationDeployment: @@ -99,6 +111,10 @@ resources: REG_TYPE: {get_param: rhel_reg_type} REG_METHOD: {get_param: rhel_reg_method} REG_SAT_REPO: {get_param: rhel_reg_sat_repo} + REG_HTTP_PROXY_HOST: {get_param: rhel_reg_http_proxy_host} + REG_HTTP_PROXY_PORT: {get_param: rhel_reg_http_proxy_port} + REG_HTTP_PROXY_USERNAME: {get_param: rhel_reg_http_proxy_username} + REG_HTTP_PROXY_PASSWORD: {get_param: rhel_reg_http_proxy_password} RHELUnregistration: type: OS::Heat::SoftwareConfig diff --git a/extraconfig/pre_deploy/rhel-registration/scripts/rhel-registration b/extraconfig/pre_deploy/rhel-registration/scripts/rhel-registration index 71ab0767..6f83cc4b 100644 --- a/extraconfig/pre_deploy/rhel-registration/scripts/rhel-registration +++ b/extraconfig/pre_deploy/rhel-registration/scripts/rhel-registration @@ -11,11 +11,20 @@ if [ -e $OK ] ; then exit 0 fi +retryCount=0 opts= +config_opts= attach_opts= sat5_opts= repos="repos --enable rhel-7-server-rpms" satellite_repo=${REG_SAT_REPO} +proxy_host= +proxy_port= +proxy_url= +proxy_username= +proxy_password= + +# process variables.. if [ -n "${REG_AUTO_ATTACH:-}" ]; then opts="$opts --auto-attach" @@ -96,12 +105,79 @@ if [ -n "${REG_TYPE:-}" ]; then opts="$opts --type=$REG_TYPE" fi +# Proxy settings (host and port) +if [ -n "${REG_HTTP_PROXY_HOST:-}" ]; then + proxy_host="${REG_HTTP_PROXY_HOST}" +fi + +if [ -n "${REG_HTTP_PROXY_PORT:-}" ]; then + proxy_port="${REG_HTTP_PROXY_PORT}" +fi + +# Proxy settings (user and password) +if [ -n "${REG_HTTP_PROXY_USERNAME:-}" ]; then + proxy_username="${REG_HTTP_PROXY_USERNAME}" +fi + +if [ -n "${REG_HTTP_PROXY_PASSWORD:-}" ]; then + proxy_password="${REG_HTTP_PROXY_PASSWORD}" +fi + +# Sanity Checks for proxy host/port/user/password +if [ -n "${REG_HTTP_PROXY_HOST:-}" ]; then + if [ -n "${REG_HTTP_PROXY_PORT:-}" ]; then + # Good both values are not empty + proxy_url="http://${proxy_host}:${proxy_port}" + config_opts="--server.proxy_hostname=${proxy_host} --server.proxy_port=${proxy_port}" + sat5_opts="${sat5_opts} --proxy_hostname=${proxy_url}" + echo "RHSM Proxy set to: ${proxy_url}" + if [ -n "${REG_HTTP_PROXY_USERNAME:-}" ]; then + if [ -n "${REG_HTTP_PROXY_PASSWORD:-}" ]; then + config_opts="${config_opts} --server.proxy_user=${proxy_username} --server.proxy_password=${proxy_password}" + sat5_opts="${sat5_opts} --proxyUser=${proxy_username} --proxyPassword=${proxy_password}" + else + echo "Warning: REG_HTTP_PROXY_PASSWORD cannot be null with non-empty REG_HTTP_PROXY_USERNAME! Skipping..." + proxy_username= ; proxy_password= + fi + else + if [ -n "${REG_HTTP_PROXY_PASSWORD:-}" ]; then + echo "Warning: REG_HTTP_PROXY_USERNAME cannot be null with non-empty REG_HTTP_PROXY_PASSWORD! Skipping..." + proxy_username= ; proxy_password= + fi + fi + else + echo "Warning: REG_HTTP_PROXY_PORT cannot be null with non-empty REG_HTTP_PROXY_HOST! Skipping..." + proxy_host= ; proxy_port= ; proxy_url= ; proxy_username= ; proxy_password= + fi +else + if [ -n "${REG_HTTP_PROXY_PORT:-}" ]; then + echo "Warning: REG_HTTP_PROXY_HOST cannot be null with non-empty REG_HTTP_PROXY_PORT! Skipping..." + proxy_host= ; proxy_port= ; proxy_url= ; proxy_username= ; proxy_password= + fi +fi + +function retry() { + if [[ $retryCount < 3 ]]; then + $@ + if ! [[ $? == 0 ]]; then + retryCount=$(echo $retryCount + 1 | bc) + echo "WARN: Failed to connect when running '$@', retrying..." + retry $@ + else + retryCount=0 + fi + else + echo "ERROR: Failed to connect after 3 attempts when running '$@'" + exit 1 + fi +} + function detect_satellite_version { ping_api=$REG_SAT_URL/katello/api/ping - if curl -L -k -s -D - -o /dev/null $ping_api | grep "200 OK"; then + if curl --retry 3 --retry-delay 10 --max-time 30 -L -k -s -D - -o /dev/null $ping_api | grep "200 OK"; then echo Satellite 6 detected at $REG_SAT_URL satellite_version=6 - elif curl -L -k -s -D - -o /dev/null $REG_SAT_URL/rhn/Login.do | grep "200 OK"; then + elif curl --retry 3 --retry-delay 10 --max-time 30 -L -k -s -D - -o /dev/null $REG_SAT_URL/rhn/Login.do | grep "200 OK"; then echo Satellite 5 detected at $REG_SAT_URL satellite_version=5 else @@ -110,31 +186,52 @@ function detect_satellite_version { fi } +if [ "x${proxy_url}" != "x" ];then + # Config subscription-manager for proxy + subscription-manager config ${config_opts} + + # Config yum for proxy.. + sed -i -e '/^proxy=/d' /etc/yum.conf + echo "proxy=${proxy_url}" >> /etc/yum.conf + + # Handle optional username/password + if [ -n "${proxy_username}" ]; then + sed -i -e '/^proxy_username=/d' /etc/yum.conf + echo "proxy_username=${proxy_username}" >> /etc/yum.conf + fi + + if [ -n "${proxy_password}" ]; then + sed -i -e '/^proxy_password=/d' /etc/yum.conf + echo "proxy_password=${proxy_password}" >> /etc/yum.conf + fi + +fi + case "${REG_METHOD:-}" in portal) - subscription-manager register $opts + retry subscription-manager register $opts if [ -z "${REG_AUTO_ATTACH:-}" -a -z "${REG_ACTIVATION_KEY:-}" ]; then - subscription-manager attach $attach_opts + retry subscription-manager attach $attach_opts fi - subscription-manager repos --disable '*' - subscription-manager $repos + retry subscription-manager repos --disable='*' + retry subscription-manager $repos ;; satellite) detect_satellite_version if [ "$satellite_version" = "6" ]; then repos="$repos --enable ${satellite_repo}" - curl -L -k -O "$REG_SAT_URL/pub/katello-ca-consumer-latest.noarch.rpm" + curl --retry 3 --retry-delay 10 --max-time 30 -L -k -O "$REG_SAT_URL/pub/katello-ca-consumer-latest.noarch.rpm" rpm -Uvh katello-ca-consumer-latest.noarch.rpm || true - subscription-manager register $opts - subscription-manager $repos - yum install -y katello-agent || true # needed for errata reporting to satellite6 + retry subscription-manager register $opts + retry subscription-manager $repos + retry yum install -y katello-agent || true # needed for errata reporting to satellite6 katello-package-upload - subscription-manager repos --disable ${satellite_repo} + retry subscription-manager repos --disable ${satellite_repo} else pushd /usr/share/rhn/ - curl -k -O $REG_SAT_URL/pub/RHN-ORG-TRUSTED-SSL-CERT + curl --retry 3 --retry-delay 10 --max-time 30 -k -O $REG_SAT_URL/pub/RHN-ORG-TRUSTED-SSL-CERT popd - rhnreg_ks --serverUrl=$REG_SAT_URL/XMLRPC $sat5_opts + retry rhnreg_ks --serverUrl=$REG_SAT_URL/XMLRPC $sat5_opts fi ;; disable) diff --git a/extraconfig/pre_network/ansible_host_config.ansible b/extraconfig/pre_network/ansible_host_config.ansible new file mode 100644 index 00000000..c126c1a1 --- /dev/null +++ b/extraconfig/pre_network/ansible_host_config.ansible @@ -0,0 +1,58 @@ +--- +- name: Configuration to be applied before rebooting the node + connection: local + hosts: localhost + + tasks: + # Kernel Args Configuration + - block: + - name: Ensure the kernel args ( {{ _KERNEL_ARGS_ }} ) is present as TRIPLEO_HEAT_TEMPLATE_KERNEL_ARGS + lineinfile: + dest: /etc/default/grub + regexp: '^TRIPLEO_HEAT_TEMPLATE_KERNEL_ARGS.*' + insertafter: '^GRUB_CMDLINE_LINUX.*' + line: 'TRIPLEO_HEAT_TEMPLATE_KERNEL_ARGS=" {{ _KERNEL_ARGS_ }} "' + - name: Add TRIPLEO_HEAT_TEMPLATE_KERNEL_ARGS to the GRUB_CMDLINE_LINUX parameter + lineinfile: + dest: /etc/default/grub + line: 'GRUB_CMDLINE_LINUX="${GRUB_CMDLINE_LINUX:+$GRUB_CMDLINE_LINUX }${TRIPLEO_HEAT_TEMPLATE_KERNEL_ARGS}"' + insertafter: '^TRIPLEO_HEAT_TEMPLATE_KERNEL_ARGS.*' + - name: Generate grub config file + command: grub2-mkconfig -o /boot/grub2/grub.cfg + become: true + when: _KERNEL_ARGS_|default("") != "" + + # Tune-d Configuration + - block: + - name: Tune-d Configuration + lineinfile: + dest: /etc/tuned/cpu-partitioning-variables.conf + regexp: '^isolated_cores=.*' + line: 'isolated_cores={{ _HOST_CPUS_LIST_ }}' + when: _HOST_CPUS_LIST_|default("") != "" + + - name: Tune-d provile activation + shell: tuned-adm profile {{ _TUNED_PROFILE_NAME_ }} + become: true + when: _TUNED_PROFILE_NAME_|default("") != "" + + # Provisioning Network workaround + # The script will be executed before os-net-config, in which case, only Provisioning network will have IP + # BOOTPROTO of all interface config files (except provisioning), will be set to "none" to avoid reboot failing to acquire IP on other networks + - block: + - find: + paths: /etc/sysconfig/network-scripts/ + patterns: ifcfg-* + register: ifcfg_files + + - replace: + dest: "{{ item.path }}" + regexp: '^BOOTPROTO=.*' + replace: 'BOOTPROTO=none' + when: + - item.path | regex_replace('(^.*ifcfg-)(.*)', '\\2') != "lo" + # This condition will list all the interfaces except the one with valid IP (which is Provisioning network at this stage) + # Simpler Version - hostvars[inventory_hostname]['ansible_' + iface_name ]['ipv4']['address'] is undefined + - hostvars[inventory_hostname]['ansible_' + item.path | regex_replace('(^.*ifcfg-)(.*)', '\\2') ]['ipv4']['address'] is undefined + with_items: + - "{{ ifcfg_files.files }}" diff --git a/extraconfig/pre_network/config_then_reboot.yaml b/extraconfig/pre_network/config_then_reboot.yaml index ec4d2761..bb0b9511 100644 --- a/extraconfig/pre_network/config_then_reboot.yaml +++ b/extraconfig/pre_network/config_then_reboot.yaml @@ -1,4 +1,4 @@ -heat_template_version: 2014-10-16 +heat_template_version: ocata description: > Do some configuration, then reboot - sometimes needed for early-boot diff --git a/extraconfig/pre_network/host_config_and_reboot.role.j2.yaml b/extraconfig/pre_network/host_config_and_reboot.role.j2.yaml new file mode 100644 index 00000000..4ad53cb8 --- /dev/null +++ b/extraconfig/pre_network/host_config_and_reboot.role.j2.yaml @@ -0,0 +1,100 @@ +heat_template_version: ocata + +description: > + Do some configuration, then reboot - sometimes needed for early-boot + changes such as modifying kernel configuration + +parameters: + server: + type: string + {{role}}KernelArgs: + type: string + default: "" + {{role}}TunedProfileName: + type: string + default: "" + {{role}}HostCpusList: + type: string + default: "" + +conditions: + param_exists: + or: + - not: + equals: + - get_param: {{role}}KernelArgs + - "" + - not: + equals: + - get_param: {{role}}TunedProfileName + - "" + +resources: + + HostParametersConfig: + type: OS::Heat::SoftwareConfig + condition: param_exists + properties: + group: ansible + inputs: + - name: _KERNEL_ARGS_ + - name: _TUNED_PROFILE_NAME_ + - name: _HOST_CPUS_LIST_ + outputs: + - name: result + config: + get_file: ansible_host_config.ansible + + HostParametersDeployment: + type: OS::Heat::SoftwareDeployment + condition: param_exists + properties: + name: HostParametersDeployment + server: {get_param: server} + config: {get_resource: HostParametersConfig} + actions: ['CREATE'] # Only do this on CREATE + input_values: + _KERNEL_ARGS_: {get_param: {{role}}KernelArgs} + _TUNED_PROFILE_NAME_: {get_param: {{role}}TunedProfileName} + _HOST_CPUS_LIST_: {get_param: {{role}}HostCpusList} + + RebootConfig: + type: OS::Heat::SoftwareConfig + condition: param_exists + properties: + group: script + config: | + #!/bin/bash + # Stop os-collect-config to avoid any race collecting another + # deployment before reboot happens + systemctl stop os-collect-config.service + /sbin/reboot + + RebootDeployment: + type: OS::Heat::SoftwareDeployment + condition: param_exists + depends_on: HostParametersDeployment + properties: + name: RebootDeployment + server: {get_param: server} + config: {get_resource: RebootConfig} + actions: ['CREATE'] # Only do this on CREATE + signal_transport: NO_SIGNAL + +outputs: + result: + value: + get_attr: [HostParametersDeployment, result] + condition: param_exists + stdout: + value: + get_attr: [HostParametersDeployment, deploy_stdout] + condition: param_exists + stderr: + value: + get_attr: [HostParametersDeployment, deploy_stderr] + condition: param_exists + status_code: + value: + get_attr: [HostParametersDeployment, deploy_status_code] + condition: param_exists diff --git a/extraconfig/tasks/major_upgrade_block_storage.sh b/extraconfig/tasks/major_upgrade_block_storage.sh deleted file mode 100644 index 64c4457e..00000000 --- a/extraconfig/tasks/major_upgrade_block_storage.sh +++ /dev/null @@ -1,11 +0,0 @@ -#!/bin/bash -# -# This runs an upgrade of Cinder Block Storage nodes. -# -set -eu - -# Special-case OVS for https://bugs.launchpad.net/tripleo/+bug/1635205 -special_case_ovs_upgrade_if_needed - -yum -y install python-zaqarclient # needed for os-collect-config -yum -y -q update diff --git a/extraconfig/tasks/major_upgrade_ceph_mon.sh b/extraconfig/tasks/major_upgrade_ceph_mon.sh deleted file mode 100755 index e0d160f1..00000000 --- a/extraconfig/tasks/major_upgrade_ceph_mon.sh +++ /dev/null @@ -1,82 +0,0 @@ -#!/bin/bash -set -eu -set -o pipefail - -echo INFO: starting $(basename "$0") - -# Exit if not running -if ! pidof ceph-mon &> /dev/null; then - echo INFO: ceph-mon is not running, skipping - exit 0 -fi - -# Exit if not Hammer -INSTALLED_VERSION=$(ceph --version | awk '{print $3}') -if ! [[ "$INSTALLED_VERSION" =~ ^0\.94.* ]]; then - echo INFO: version of Ceph installed is not 0.94, skipping - exit 0 -fi - -CEPH_STATUS=$(ceph health | awk '{print $1}') -if [ ${CEPH_STATUS} = HEALTH_ERR ]; then - echo ERROR: Ceph cluster status is HEALTH_ERR, cannot be upgraded - exit 1 -fi - -# Useful when upgrading with OSDs num < replica size -if [[ ${ignore_ceph_upgrade_warnings:-False} != [Tt]rue ]]; then - timeout 300 bash -c "while [ ${CEPH_STATUS} != HEALTH_OK ]; do - echo WARNING: Waiting for Ceph cluster status to go HEALTH_OK; - sleep 30; - CEPH_STATUS=$(ceph health | awk '{print $1}') - done" -fi - -MON_PID=$(pidof ceph-mon) -MON_ID=$(hostname -s) - -# Stop daemon using Hammer sysvinit script -service ceph stop mon.${MON_ID} - -# Ensure it's stopped -timeout 60 bash -c "while kill -0 ${MON_PID} 2> /dev/null; do - sleep 2; -done" - -# Update to Jewel -yum -y -q update ceph-mon ceph - -# Restart/Exit if not on Jewel, only in that case we need the changes -UPDATED_VERSION=$(ceph --version | awk '{print $3}') -if [[ "$UPDATED_VERSION" =~ ^0\.94.* ]]; then - echo WARNING: Ceph was not upgraded, restarting daemons - service ceph start mon.${MON_ID} -elif [[ "$UPDATED_VERSION" =~ ^10\.2.* ]]; then - # RPM could own some of these but we can't take risks on the pre-existing files - for d in /var/lib/ceph/mon /var/log/ceph /var/run/ceph /etc/ceph; do - chown -L -R ceph:ceph $d || echo WARNING: chown of $d failed - done - - # Replay udev events with newer rules - udevadm trigger - - # Enable systemd unit - systemctl enable ceph-mon.target - systemctl enable ceph-mon@${MON_ID} - systemctl start ceph-mon@${MON_ID} - - # Wait for daemon to be back in the quorum - timeout 300 bash -c "until (ceph quorum_status | jq .quorum_names | grep -sq ${MON_ID}); do - echo WARNING: Waiting for mon.${MON_ID} to re-join quorum; - sleep 10; - done" - - # if tunables become legacy, cluster status will be HEALTH_WARN causing - # upgrade to fail on following node - ceph osd crush tunables default - - echo INFO: Ceph was upgraded to Jewel -else - echo ERROR: Ceph was upgraded to an unknown release, daemon is stopped, need manual intervention - exit 1 -fi diff --git a/extraconfig/tasks/major_upgrade_ceph_storage.sh b/extraconfig/tasks/major_upgrade_ceph_storage.sh deleted file mode 100644 index a745e723..00000000 --- a/extraconfig/tasks/major_upgrade_ceph_storage.sh +++ /dev/null @@ -1,106 +0,0 @@ -#!/bin/bash -# -# This delivers the ceph-storage upgrade script to be invoked as part of the tripleo -# major upgrade workflow. -# -set -eu -set -o pipefail - -UPGRADE_SCRIPT=/root/tripleo_upgrade_node.sh - -declare -f special_case_ovs_upgrade_if_needed > $UPGRADE_SCRIPT -# use >> here so we don't lose the declaration we added above -cat >> $UPGRADE_SCRIPT << 'ENDOFCAT' -#!/bin/bash -### DO NOT MODIFY THIS FILE -### This file is automatically delivered to the ceph-storage nodes as part of the -### tripleo upgrades workflow -set -eu - -echo INFO: starting $(basename "$0") - -# Exit if not running -if ! pidof ceph-osd &> /dev/null; then - echo INFO: ceph-osd is not running, skipping - exit 0 -fi - -# Exit if not Hammer -INSTALLED_VERSION=$(ceph --version | awk '{print $3}') -if ! [[ "$INSTALLED_VERSION" =~ ^0\.94.* ]]; then - echo INFO: version of Ceph installed is not 0.94, skipping - exit 0 -fi - -OSD_PIDS=$(pidof ceph-osd) -OSD_IDS=$(ls /var/lib/ceph/osd | awk 'BEGIN { FS = "-" } ; { print $2 }') - -# "so that mirrors aren't rebalanced as if the OSD died" - gfidente / leseb -ceph osd set noout -ceph osd set norebalance -ceph osd set nodeep-scrub -ceph osd set noscrub - -# Stop daemon using Hammer sysvinit script -for OSD_ID in $OSD_IDS; do - service ceph stop osd.${OSD_ID} -done - -# Nice guy will return non-0 only when all failed -timeout 60 bash -c "while kill -0 ${OSD_PIDS} 2> /dev/null; do - sleep 2; -done" - -special_case_ovs_upgrade_if_needed - -# Update (Ceph to Jewel) -yum -y install python-zaqarclient # needed for os-collect-config -yum -y update - -# Restart/Exit if not on Jewel, only in that case we need the changes -UPDATED_VERSION=$(ceph --version | awk '{print $3}') -if [[ "$UPDATED_VERSION" =~ ^0\.94.* ]]; then - echo WARNING: Ceph was not upgraded, restarting daemon - for OSD_ID in $OSD_IDS; do - service ceph start osd.${OSD_ID} - done -elif [[ "$UPDATED_VERSION" =~ ^10\.2.* ]]; then - # RPM could own some of these but we can't take risks on the pre-existing files - for d in /var/lib/ceph/osd /var/log/ceph /var/run/ceph /etc/ceph; do - chown -L -R ceph:ceph $d || echo WARNING: chown of $d failed - done - - # Replay udev events with newer rules - udevadm trigger && udevadm settle - - # If on ext4, we need to enforce lower values for name and namespace len - # or ceph-osd will refuse to start, see: http://tracker.ceph.com/issues/16187 - for OSD_ID in $OSD_IDS; do - OSD_FS=$(df -l --output=fstype /var/lib/ceph/osd/ceph-${OSD_ID} | tail -n +2) - if [ ${OSD_FS} = ext4 ]; then - crudini --set /etc/ceph/ceph.conf global osd_max_object_name_len 256 - crudini --set /etc/ceph/ceph.conf global osd_max_object_namespace_len 64 - fi - done - - # Enable systemd unit - systemctl enable ceph-osd.target - for OSD_ID in $OSD_IDS; do - systemctl enable ceph-osd@${OSD_ID} - systemctl start ceph-osd@${OSD_ID} - done - - echo INFO: Ceph was upgraded to Jewel -else - echo ERROR: Ceph was upgraded to an unknown release, daemon is stopped, need manual intervention - exit 1 -fi - -ceph osd unset noout -ceph osd unset norebalance -ceph osd unset nodeep-scrub -ceph osd unset noscrub -ENDOFCAT - -# ensure the permissions are OK -chmod 0755 $UPGRADE_SCRIPT diff --git a/extraconfig/tasks/major_upgrade_compute.sh b/extraconfig/tasks/major_upgrade_compute.sh deleted file mode 100644 index 7a3e1073..00000000 --- a/extraconfig/tasks/major_upgrade_compute.sh +++ /dev/null @@ -1,35 +0,0 @@ -#!/bin/bash -# -# This delivers the compute upgrade script to be invoked as part of the tripleo -# major upgrade workflow. -# -set -eu - -UPGRADE_SCRIPT=/root/tripleo_upgrade_node.sh - -cat > $UPGRADE_SCRIPT << ENDOFCAT -### DO NOT MODIFY THIS FILE -### This file is automatically delivered to the compute nodes as part of the -### tripleo upgrades workflow - -set -eu - -# pin nova to kilo (messaging +-1) for the nova-compute service - -crudini --set /etc/nova/nova.conf upgrade_levels compute $upgrade_level_nova_compute - -$(declare -f special_case_ovs_upgrade_if_needed) -special_case_ovs_upgrade_if_needed - -yum -y install python-zaqarclient # needed for os-collect-config -yum -y update - -# Due to bug#1640177 we need to restart compute agent -echo "Restarting openstack ceilometer agent compute" -systemctl restart openstack-ceilometer-compute - -ENDOFCAT - -# ensure the permissions are OK -chmod 0755 $UPGRADE_SCRIPT - diff --git a/extraconfig/tasks/major_upgrade_controller_pacemaker_2.sh b/extraconfig/tasks/major_upgrade_controller_pacemaker_2.sh index 6bfe1239..4b323854 100755 --- a/extraconfig/tasks/major_upgrade_controller_pacemaker_2.sh +++ b/extraconfig/tasks/major_upgrade_controller_pacemaker_2.sh @@ -41,7 +41,7 @@ done # https://bugzilla.redhat.com/show_bug.cgi?id=1341968 # # The default is to determine automatically if upgrade is needed based -# on mysql package versionning, but this can be overriden manually +# on mysql package versioning, but this can be overridden manually # to support specific upgrade scenario # Calling this function will set the DO_MYSQL_UPGRADE variable which is used @@ -50,6 +50,7 @@ mysql_need_update if [[ -n $(is_bootstrap_node) ]]; then if [ $DO_MYSQL_UPGRADE -eq 1 ]; then + backup_flags="--defaults-extra-file=/root/.my.cnf -u root --flush-privileges --all-databases --single-transaction" mysqldump $backup_flags > "$MYSQL_BACKUP_DIR/openstack_database.sql" cp -rdp /etc/my.cnf* "$MYSQL_BACKUP_DIR" fi @@ -108,7 +109,7 @@ yum -y -q update # We need to ensure at least those two configuration settings, otherwise # mariadb 10.1+ won't activate galera replication. # wsrep_cluster_address must only be set though, its value does not -# matter because it's overriden by the galera resource agent. +# matter because it's overridden by the galera resource agent. cat >> /etc/my.cnf.d/galera.cnf <<EOF [mysqld] wsrep_on = ON diff --git a/extraconfig/tasks/major_upgrade_controller_pacemaker_3.sh b/extraconfig/tasks/major_upgrade_controller_pacemaker_3.sh index 6748f891..a3cbd945 100755 --- a/extraconfig/tasks/major_upgrade_controller_pacemaker_3.sh +++ b/extraconfig/tasks/major_upgrade_controller_pacemaker_3.sh @@ -57,10 +57,10 @@ if [[ -n $(is_bootstrap_node) ]]; then # TODO: check if this can be triggered in puppet and removed from here ceilometer-upgrade --config-file=/etc/ceilometer/ceilometer.conf --skip-gnocchi-resource-types cinder-manage db sync - glance-manage --config-file=/etc/glance/glance-registry.conf db_sync + glance-manage db_sync heat-manage --config-file /etc/heat/heat.conf db_sync keystone-manage db_sync - neutron-db-manage --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugin.ini upgrade head + neutron-db-manage upgrade heads nova-manage db sync nova-manage api_db sync nova-manage db online_data_migrations diff --git a/extraconfig/tasks/major_upgrade_object_storage.sh b/extraconfig/tasks/major_upgrade_object_storage.sh deleted file mode 100644 index d9d1b4d5..00000000 --- a/extraconfig/tasks/major_upgrade_object_storage.sh +++ /dev/null @@ -1,42 +0,0 @@ -#!/bin/bash -# -# This delivers the swift-storage upgrade script to be invoked as part of the tripleo -# major upgrade workflow. -# -set -eu - -UPGRADE_SCRIPT=/root/tripleo_upgrade_node.sh - -cat > $UPGRADE_SCRIPT << ENDOFCAT -### DO NOT MODIFY THIS FILE -### This file is automatically delivered to the swift-storage nodes as part of the -### tripleo upgrades workflow - -set -eu - -function systemctl_swift { - action=\$1 - for S in openstack-swift-account-auditor openstack-swift-account-reaper openstack-swift-account-replicator openstack-swift-account \ - openstack-swift-container-auditor openstack-swift-container-replicator openstack-swift-container-updater openstack-swift-container \ - openstack-swift-object-auditor openstack-swift-object-replicator openstack-swift-object-updater openstack-swift-object; do - systemctl \$action \$S - done -} - -$(declare -f special_case_ovs_upgrade_if_needed) -special_case_ovs_upgrade_if_needed - -systemctl_swift stop - -yum -y install python-zaqarclient # needed for os-collect-config -yum -y update - -systemctl_swift start - - - -ENDOFCAT - -# ensure the permissions are OK -chmod 0755 $UPGRADE_SCRIPT - diff --git a/extraconfig/tasks/major_upgrade_pacemaker.yaml b/extraconfig/tasks/major_upgrade_pacemaker.yaml index b63aafbd..74d3be71 100644 --- a/extraconfig/tasks/major_upgrade_pacemaker.yaml +++ b/extraconfig/tasks/major_upgrade_pacemaker.yaml @@ -18,10 +18,6 @@ parameters: constraints: - allowed_values: ['auto', 'yes', 'no'] default: 'auto' - IgnoreCephUpgradeWarnings: - type: boolean - default: false - description: If enabled, Ceph upgrade will be forced even though cluster or PGs status is not clean KeepSaharaServicesOnUpgrade: type: boolean default: true @@ -33,33 +29,6 @@ resources: # map_merge with input_values instead of feeding params into scripts # via str_replace on bash snippets - CephMonUpgradeConfig: - type: OS::Heat::SoftwareConfig - properties: - group: script - config: - list_join: - - '' - - - str_replace: - template: | - #!/bin/bash - ignore_ceph_upgrade_warnings='IGNORE_CEPH_UPGRADE_WARNINGS' - params: - IGNORE_CEPH_UPGRADE_WARNINGS: {get_param: IgnoreCephUpgradeWarnings} - - get_file: major_upgrade_ceph_mon.sh - - CephMonUpgradeDeployment: - type: OS::Heat::SoftwareDeploymentGroup - properties: - servers: {get_param: [servers, Controller]} - config: {get_resource: CephMonUpgradeConfig} - input_values: {get_param: input_values} - update_policy: - batch_create: - max_batch_size: 1 - rolling_update: - max_batch_size: 1 - ControllerPacemakerUpgradeConfig_Step1: type: OS::Heat::SoftwareConfig properties: @@ -86,30 +55,11 @@ resources: ControllerPacemakerUpgradeDeployment_Step1: type: OS::Heat::SoftwareDeploymentGroup - depends_on: CephMonUpgradeDeployment properties: servers: {get_param: [servers, Controller]} config: {get_resource: ControllerPacemakerUpgradeConfig_Step1} input_values: {get_param: input_values} - BlockStorageUpgradeConfig: - type: OS::Heat::SoftwareConfig - depends_on: ControllerPacemakerUpgradeDeployment_Step1 - properties: - group: script - config: - list_join: - - '' - - - get_file: pacemaker_common_functions.sh - - get_file: major_upgrade_block_storage.sh - - BlockStorageUpgradeDeployment: - type: OS::Heat::SoftwareDeploymentGroup - properties: - servers: {get_param: [servers, BlockStorage]} - config: {get_resource: BlockStorageUpgradeConfig} - input_values: {get_param: input_values} - ControllerPacemakerUpgradeConfig_Step2: type: OS::Heat::SoftwareConfig properties: @@ -136,7 +86,7 @@ resources: ControllerPacemakerUpgradeDeployment_Step2: type: OS::Heat::SoftwareDeploymentGroup - depends_on: BlockStorageUpgradeDeployment + depends_on: ControllerPacemakerUpgradeDeployment_Step1 properties: servers: {get_param: [servers, Controller]} config: {get_resource: ControllerPacemakerUpgradeConfig_Step2} diff --git a/extraconfig/tasks/major_upgrade_pacemaker_init.j2.yaml b/extraconfig/tasks/major_upgrade_pacemaker_init.j2.yaml deleted file mode 100644 index c308720b..00000000 --- a/extraconfig/tasks/major_upgrade_pacemaker_init.j2.yaml +++ /dev/null @@ -1,96 +0,0 @@ -heat_template_version: ocata -description: 'Upgrade for Pacemaker deployments' - -parameters: - - servers: - type: json - input_values: - type: json - description: input values for the software deployments - - UpgradeInitCommand: - type: string - description: | - Command or script snippet to run on all overcloud nodes to - initialize the upgrade process. E.g. a repository switch. - default: '' - UpgradeLevelNovaCompute: - type: string - description: Nova Compute upgrade level - default: '' - -resources: - - # For the UpgradeInit also rename /etc/resolv.conf.save for +bug/1567004 - - UpgradeInitConfig: - type: OS::Heat::SoftwareConfig - properties: - group: script - config: - list_join: - - '' - - - "#!/bin/bash\n\n" - - "if [[ -f /etc/resolv.conf.save ]] ; then rm /etc/resolv.conf.save; fi\n\n" - - get_param: UpgradeInitCommand - - # TODO(jistr): for Mitaka->Newton upgrades and further we can use - # map_merge with input_values instead of feeding params into scripts - # via str_replace on bash snippets - - # FIXME(shardy) we have hard-coded per-role *ScriptConfig's here - # Would be better to have a common config for all roles - ComputeDeliverUpgradeScriptConfig: - type: OS::Heat::SoftwareConfig - properties: - group: script - config: - list_join: - - '' - - - str_replace: - template: | - #!/bin/bash - upgrade_level_nova_compute='UPGRADE_LEVEL_NOVA_COMPUTE' - params: - UPGRADE_LEVEL_NOVA_COMPUTE: {get_param: UpgradeLevelNovaCompute} - - get_file: pacemaker_common_functions.sh - - get_file: major_upgrade_compute.sh - - ObjectStorageDeliverUpgradeScriptConfig: - type: OS::Heat::SoftwareConfig - properties: - group: script - config: - list_join: - - '' - - - get_file: pacemaker_common_functions.sh - - get_file: major_upgrade_object_storage.sh - - CephStorageDeliverUpgradeScriptConfig: - type: OS::Heat::SoftwareConfig - properties: - group: script - config: - list_join: - - '' - - - get_file: pacemaker_common_functions.sh - - get_file: major_upgrade_ceph_storage.sh - -{% for role in roles %} - UpgradeInit{{role.name}}Deployment: - type: OS::Heat::SoftwareDeploymentGroup - properties: - servers: {get_param: [servers, {{role.name}}]} - config: {get_resource: UpgradeInitConfig} - input_values: {get_param: input_values} - - {% if not role.name in ['Controller', 'BlockStorage'] %} - {{role.name}}DeliverUpgradeScriptDeployment: - type: OS::Heat::SoftwareDeploymentGroup - properties: - servers: {get_param: [servers, {{role.name}}]} - config: {get_resource: {{role.name}}DeliverUpgradeScriptConfig} - input_values: {get_param: input_values} - {% endif %} -{% endfor %} diff --git a/extraconfig/tasks/major_upgrade_pacemaker_migrations.sh b/extraconfig/tasks/major_upgrade_pacemaker_migrations.sh index 6d02acc8..ae22a1e7 100644 --- a/extraconfig/tasks/major_upgrade_pacemaker_migrations.sh +++ b/extraconfig/tasks/major_upgrade_pacemaker_migrations.sh @@ -83,7 +83,6 @@ function services_to_migrate { openstack-cinder-api-clone openstack-cinder-scheduler-clone openstack-glance-api-clone - openstack-glance-registry-clone openstack-gnocchi-metricd-clone openstack-gnocchi-statsd-clone openstack-heat-api-cfn-clone diff --git a/extraconfig/tasks/run_puppet.sh b/extraconfig/tasks/run_puppet.sh new file mode 100755 index 00000000..b7771e33 --- /dev/null +++ b/extraconfig/tasks/run_puppet.sh @@ -0,0 +1,27 @@ +#!/bin/bash + +function run_puppet { + set -eux + local manifest="$1" + local role="$2" + local step="$3" + local rc=0 + + export FACTER_deploy_config_name="${role}Deployment_Step${step}" + if [ -e "/etc/puppet/hieradata/heat_config_${FACTER_deploy_config_name}.json" ]; then + set +e + puppet apply --detailed-exitcodes "${manifest}" + rc=$? + echo "puppet apply exited with exit code $rc" + else + echo "Step${step} doesn't exist for ${role}" + fi + set -e + + if [ $rc -eq 2 -o $rc -eq 0 ]; then + set +xu + return 0 + fi + set +xu + return $rc +} diff --git a/extraconfig/tasks/swift-ring-deploy.yaml b/extraconfig/tasks/swift-ring-deploy.yaml new file mode 100644 index 00000000..d17f78ae --- /dev/null +++ b/extraconfig/tasks/swift-ring-deploy.yaml @@ -0,0 +1,31 @@ +heat_template_version: ocata + +parameters: + servers: + type: json + SwiftRingGetTempurl: + default: '' + description: A temporary Swift URL to download rings from. + type: string + +resources: + SwiftRingDeployConfig: + type: OS::Heat::SoftwareConfig + properties: + group: script + inputs: + - name: swift_ring_get_tempurl + config: | + #!/bin/sh + pushd / + curl --insecure --silent "${swift_ring_get_tempurl}" | tar xz || true + popd + + SwiftRingDeploy: + type: OS::Heat::SoftwareDeployments + properties: + name: SwiftRingDeploy + config: {get_resource: SwiftRingDeployConfig} + servers: {get_param: servers} + input_values: + swift_ring_get_tempurl: {get_param: SwiftRingGetTempurl} diff --git a/extraconfig/tasks/swift-ring-update.yaml b/extraconfig/tasks/swift-ring-update.yaml new file mode 100644 index 00000000..440c6883 --- /dev/null +++ b/extraconfig/tasks/swift-ring-update.yaml @@ -0,0 +1,42 @@ +heat_template_version: ocata + +parameters: + servers: + type: json + SwiftRingPutTempurl: + default: '' + description: A temporary Swift URL to upload rings to. + type: string + +resources: + SwiftRingUpdateConfig: + type: OS::Heat::SoftwareConfig + properties: + group: script + inputs: + - name: swift_ring_put_tempurl + config: | + #!/bin/sh + TMP_DATA=$(mktemp -d) + function cleanup { + rm -Rf "$TMP_DATA" + } + trap cleanup EXIT + # sanity check in case rings are not consistent within cluster + swift-recon --md5 | grep -q "doesn't match" && exit 1 + pushd ${TMP_DATA} + tar -cvzf swift-rings.tar.gz /etc/swift/*.builder /etc/swift/*.ring.gz /etc/swift/backups/* + resp=`curl --insecure --silent -X PUT "${swift_ring_put_tempurl}" --write-out "%{http_code}" --data-binary @swift-rings.tar.gz` + popd + if [ "$resp" != "201" ]; then + exit 1 + fi + + SwiftRingUpdate: + type: OS::Heat::SoftwareDeployments + properties: + name: SwiftRingUpdate + config: {get_resource: SwiftRingUpdateConfig} + servers: {get_param: servers} + input_values: + swift_ring_put_tempurl: {get_param: SwiftRingPutTempurl} diff --git a/extraconfig/tasks/tripleo_upgrade_node.sh b/extraconfig/tasks/tripleo_upgrade_node.sh new file mode 100644 index 00000000..24211ab0 --- /dev/null +++ b/extraconfig/tasks/tripleo_upgrade_node.sh @@ -0,0 +1,69 @@ +#!/bin/bash +# +# This delivers the operator driven upgrade script to be invoked as part of +# the tripleo major upgrade workflow. The utility 'upgrade-non-controller.sh' +# is used from the undercloud to invoke the /root/tripleo_upgrade_node.sh +# +set -eu + +UPGRADE_SCRIPT=/root/tripleo_upgrade_node.sh + +cat > $UPGRADE_SCRIPT << ENDOFCAT +### DO NOT MODIFY THIS FILE +### This file is automatically delivered to those nodes where the +### disable_upgrade_deployment flag is set in roles_data.yaml. + +set -eu +NOVA_COMPUTE="" +if hiera -c /etc/puppet/hiera.yaml service_names | grep nova_compute ; then + NOVA_COMPUTE="true" +fi +SWIFT_STORAGE="" +if hiera -c /etc/puppet/hiera.yaml service_names | grep swift_storage ; then + SWIFT_STORAGE="true" +fi + +DEBUG="true" +SCRIPT_NAME=$(basename $0) +$(declare -f log_debug) +$(declare -f manage_systemd_service) +$(declare -f systemctl_swift) + +# pin nova messaging +-1 for the nova-compute service +if [[ -n \$NOVA_COMPUTE ]]; then + crudini --set /etc/nova/nova.conf upgrade_levels compute auto +fi + +if [[ -n \$SWIFT_STORAGE ]]; then + systemctl_swift stop +fi + +yum -y update + +if [[ -n \$SWIFT_STORAGE ]]; then + systemctl_swift start +fi +# Due to bug#1640177 we need to restart compute agent +if [[ -n \$NOVA_COMPUTE ]]; then + log_debug "Restarting openstack ceilometer agent compute" + systemctl restart openstack-ceilometer-compute +fi + +# Apply puppet manifest to converge just right after the ${ROLE} upgrade +$(declare -f run_puppet) +for step in 1 2 3 4 5 6; do + log_debug "Running puppet step \$step for ${ROLE}" + if ! run_puppet /root/${ROLE}_puppet_config.pp ${ROLE} \${step}; then + log_debug "Puppet failure at step \${step}" + exit 1 + fi + log_debug "Completed puppet step \$step" +done + +log_debug "TripleO upgrade run completed." + +ENDOFCAT + +# ensure the permissions are OK +chmod 0755 $UPGRADE_SCRIPT + diff --git a/extraconfig/tasks/yum_update.sh b/extraconfig/tasks/yum_update.sh index 74af7b02..4c87373e 100755 --- a/extraconfig/tasks/yum_update.sh +++ b/extraconfig/tasks/yum_update.sh @@ -10,6 +10,11 @@ echo "Started yum_update.sh on server $deploy_server_id at `date`" echo -n "false" > $heat_outputs_path.update_managed_packages +if [ -f /.dockerenv ]; then + echo "Not running due to running inside a container" + exit 0 +fi + if [[ -z "$update_identifier" ]]; then echo "Not running due to unset update_identifier" exit 0 @@ -42,7 +47,10 @@ if [[ "$list_updates" == "" ]]; then exit 0 fi -pacemaker_status=$(systemctl is-active pacemaker) +pacemaker_status="" +if hiera -c /etc/puppet/hiera.yaml service_names | grep -q pacemaker; then + pacemaker_status=$(systemctl is-active pacemaker) +fi # Fix the redis/rabbit resource start/stop timeouts. See https://bugs.launchpad.net/tripleo/+bug/1633455 # and https://bugs.launchpad.net/tripleo/+bug/1634851 @@ -62,9 +70,6 @@ if [[ "$pacemaker_status" == "active" && \ fi fi -# Special-case OVS for https://bugs.launchpad.net/tripleo/+bug/1635205 -special_case_ovs_upgrade_if_needed - if [[ "$pacemaker_status" == "active" ]] ; then echo "Pacemaker running, stopping cluster node and doing full package update" node_count=$(pcs status xml | grep -o "<nodes_configured.*/>" | grep -o 'number="[0-9]*"' | grep -o "[0-9]*") diff --git a/firstboot/install_vrouter_kmod.yaml b/firstboot/install_vrouter_kmod.yaml new file mode 100644 index 00000000..e936e605 --- /dev/null +++ b/firstboot/install_vrouter_kmod.yaml @@ -0,0 +1,105 @@ +heat_template_version: ocata + +parameters: + ContrailRepo: + type: string + default: http://192.168.24.1/contrail + VrouterPhysicalInterface: + default: 'eth0' + description: vRouter physical interface + type: string + +description: > + Prepares vhost0 interface to be used by os-net-config + +resources: + userdata: + type: OS::Heat::MultipartMime + properties: + parts: + - config: {get_resource: vrouter_module_config} + + vrouter_module_config: + type: OS::Heat::SoftwareConfig + properties: + config: + str_replace: + template: | + #!/bin/bash + sed -i '/\[main\]/a \ \ \ \ \parser = future' /etc/puppet/puppet.conf + cat <<EOF > /etc/yum.repos.d/contrail.repo + [Contrail] + name=Contrail Repo + baseurl=$contrail_repo + enabled=1 + gpgcheck=0 + protect=1 + EOF + if [[ `hostname |awk -F"-" '{print $2}'` == "novacompute" || `hostname |awk -F"-" '{print $2}'` == "contrailtsn" ]]; then + yum install -y contrail-vrouter-utils + function pkt_setup () { + for f in /sys/class/net/$1/queues/rx-* + do + q="$(echo $f | cut -d '-' -f2)" + r=$(($q%32)) + s=$(($q/32)) + ((mask=1<<$r)) + str=(`printf "%x" $mask`) + if [ $s -gt 0 ]; then + for ((i=0; i < $s; i++)) + do + str+=,00000000 + done + fi + echo $str > $f/rps_cpus + done + ifconfig $1 up + } + function insert_vrouter() { + insmod /tmp/vrouter.ko + if [ -f /sys/class/net/pkt1/queues/rx-0/rps_cpus ]; then + pkt_setup pkt1 + fi + if [ -f /sys/class/net/pkt2/queues/rx-0/rps_cpus ]; then + pkt_setup pkt2 + fi + if [ -f /sys/class/net/pkt3/queues/rx-0/rps_cpus ]; then + pkt_setup pkt3 + fi + DEV_MAC=$(cat /sys/class/net/$phy_int/address) + vif --create vhost0 --mac $DEV_MAC + vif --add $phy_int --mac $DEV_MAC --vrf 0 --vhost-phys --type physical + vif --add vhost0 --mac $DEV_MAC --vrf 0 --type vhost --xconnect $phy_int + ip link set vhost0 up + return 0 + } + yumdownloader contrail-vrouter --destdir /tmp + cd /tmp + rpm2cpio /tmp/contrail-vrouter*.rpm | cpio -idmv + cp `find /tmp/lib/modules -name vrouter.ko |tail -1` /tmp + insert_vrouter + if [[ `ifconfig $dev |grep "inet "` ]]; then + def_gw='' + if [[ `ip route show |grep default|grep $dev` ]]; then + def_gw=`ip route show |grep default|grep $dev|awk '{print $3}'` + fi + ip=`ifconfig $dev |grep "inet "|awk '{print $2}'` + mask=`ifconfig $dev |grep "inet "|awk '{print $4}'` + ip address delete $ip/$mask dev $dev + ip address add $ip/$mask dev vhost0 + if [[ $def_gw ]]; then + ip route add default via $def_gw + fi + fi + fi + params: + $phy_int: {get_param: VrouterPhysicalInterface} + $contrail_repo: {get_param: ContrailRepo} + +outputs: + # This means get_resource from the parent template will get the userdata, see: + # http://docs.openstack.org/developer/heat/template_guide/composition.html#making-your-template-resource-more-transparent + # Note this is new-for-kilo, an alternative is returning a value then using + # get_attr in the parent template instead. + OS::stack_id: + value: {get_resource: userdata} diff --git a/firstboot/os-net-config-mappings.yaml b/firstboot/os-net-config-mappings.yaml index d7e0c524..f82bc19f 100644 --- a/firstboot/os-net-config-mappings.yaml +++ b/firstboot/os-net-config-mappings.yaml @@ -9,8 +9,28 @@ description: > nic1: "00:c8:7c:e6:f0:2e" node2: nic1: "00:18:7d:99:0c:b6" - This will result in the first nodeN entry where a mac matches a - local device being written as a mapping file for os-net-config in + node3: + dmiString: 'system-uuid' + id: 'A8C85861-1B16-4803-8689-AFC62984F8F6' + nic1: em3 + # Dell PowerEdge + nodegroup1: + dmiString: "system-product-name" + id: "PowerEdge R630" + nic1: em3 + nic2: em1 + nic3: em2 + # Cisco UCS B200-M4" + nodegroup2: + dmiString: "system-product-name" + id: "UCSB-B200-M4" + nic1: enp7s0 + nic2: enp6s0 + + This will result in the first node* entry where either: + a) a mac matches a local device + or b) a DMI String matches the specified id + being written as a mapping file for os-net-config in /etc/os-net-config/mapping.yaml parameters: @@ -47,15 +67,36 @@ resources: echo '$node_lookup' | python -c " import json import sys + import copy + from subprocess import PIPE, Popen import yaml + + def write_mapping_file(interface_mapping): + with open('/etc/os-net-config/mapping.yaml', 'w') as f: + yaml.safe_dump(interface_mapping, f, default_flow_style=False) + input = sys.stdin.readline() or '{}' data = json.loads(input) for node in data: + interface_mapping = {'interface_mapping': + copy.deepcopy(data[node])} + if 'dmiString' in interface_mapping['interface_mapping']: + del interface_mapping['interface_mapping']['dmiString'] + if 'id' in interface_mapping['interface_mapping']: + del interface_mapping['interface_mapping']['id'] + # Match on mac addresses first if any(x in '$eth_addr'.split(',') for x in data[node].values()): - interface_mapping = {'interface_mapping': data[node]} - with open('/etc/os-net-config/mapping.yaml', 'w') as f: - yaml.safe_dump(interface_mapping, f, default_flow_style=False) + write_mapping_file(interface_mapping) break + # If data contain dmiString and id keys, try to match node(group) + if 'dmiString' in data[node] and 'id' in data[node]: + ps = Popen([ 'dmidecode', + '--string', data[node].get('dmiString') ], + stdout=PIPE) + out, err = ps.communicate() + if data[node].get('id') == out.rstrip(): + write_mapping_file(interface_mapping) + break " params: $node_lookup: {get_param: NetConfigDataLookup} diff --git a/firstboot/userdata_root_password.yaml b/firstboot/userdata_root_password.yaml new file mode 100644 index 00000000..63dd5a9c --- /dev/null +++ b/firstboot/userdata_root_password.yaml @@ -0,0 +1,38 @@ +heat_template_version: ocata + +description: > + Uses cloud-init to enable root logins and set the root password. + Note this is less secure than the default configuration and may not be + appropriate for production environments, it's intended for illustration + and development/debugging only. + +parameters: + NodeRootPassword: + description: Root password for the nodes + hidden: true + type: string + +resources: + userdata: + type: OS::Heat::MultipartMime + properties: + parts: + - config: {get_resource: root_config} + + root_config: + type: OS::Heat::CloudConfig + properties: + cloud_config: + ssh_pwauth: true + disable_root: false + chpasswd: + list: + str_replace: + template: "root:PASSWORD" + params: + PASSWORD: {get_param: NodeRootPassword} + expire: False + +outputs: + OS::stack_id: + value: {get_resource: userdata} diff --git a/net-config-noop.yaml b/net-config-noop.yaml index ad580cf9..be05cc11 100644 --- a/net-config-noop.yaml +++ b/net-config-noop.yaml @@ -38,8 +38,8 @@ resources: OsNetConfigImpl: type: OS::Heat::StructuredConfig properties: - group: os-apply-config - config: + group: apply-config + config: {} outputs: OS::stack_id: diff --git a/net-config-undercloud.yaml b/net-config-undercloud.yaml new file mode 100644 index 00000000..9be51c0f --- /dev/null +++ b/net-config-undercloud.yaml @@ -0,0 +1,77 @@ +heat_template_version: ocata +description: > + Software Config to drive os-net-config for a simple bridge configured with a static IP address for the ctlplane network. +parameters: + ControlPlaneIp: + default: '' + description: IP address/subnet on the ctlplane network + type: string + ExternalIpSubnet: + default: '' + description: IP address/subnet on the external network + type: string + InternalApiIpSubnet: + default: '' + description: IP address/subnet on the internal API network + type: string + StorageIpSubnet: + default: '' + description: IP address/subnet on the storage network + type: string + StorageMgmtIpSubnet: + default: '' + description: IP address/subnet on the storage mgmt network + type: string + TenantIpSubnet: + default: '' + description: IP address/subnet on the tenant network + type: string + ManagementIpSubnet: + default: '' + description: IP address/subnet on the management network + type: string + ControlPlaneSubnetCidr: # Override this via parameter_defaults + default: '24' + description: The subnet CIDR of the control plane network. + type: string + DnsServers: # Override this via parameter_defaults + default: [] + description: A list of DNS servers (2 max for some implementations) that will be added to resolv.conf. + type: comma_delimited_list +resources: + OsNetConfigImpl: + type: OS::Heat::SoftwareConfig + properties: + group: script + inputs: + - name: disable_configure_safe_defaults + default: true + config: + str_replace: + template: + get_file: network/scripts/run-os-net-config.sh + params: + $network_config: + network_config: + - type: ovs_bridge + name: br-ctlplane + use_dhcp: false + dns_servers: + get_param: DnsServers + addresses: + - ip_netmask: + list_join: + - / + - - get_param: ControlPlaneIp + - get_param: ControlPlaneSubnetCidr + members: + - type: interface + name: eth1 + # force the MAC address of the bridge to this interface + primary: true +outputs: + OS::stack_id: + description: The OsNetConfigImpl resource. + value: + get_resource: OsNetConfigImpl + diff --git a/network/endpoints/build_endpoint_map.py b/network/endpoints/build_endpoint_map.py index 964f58f7..990cbabc 100755 --- a/network/endpoints/build_endpoint_map.py +++ b/network/endpoints/build_endpoint_map.py @@ -191,7 +191,7 @@ def template_endpoint_items(config): def generate_endpoint_map_template(config): return collections.OrderedDict([ - ('heat_template_version', '2015-04-30'), + ('heat_template_version', 'ocata'), ('description', 'A map of OpenStack endpoints. Since the endpoints ' 'are URLs, we need to have brackets around IPv6 IP addresses. The ' 'inputs to these parameters come from net_ip_uri_map, which will ' @@ -280,8 +280,9 @@ def main(): try: if options.check: if not check_up_to_date(options.output_file, options.input_file): - print('EndpointMap template does not match input data', - file=sys.stderr) + print('EndpointMap template does not match input data. Please ' + 'run the build_endpoint_map.py tool to update the ' + 'template.', file=sys.stderr) sys.exit(2) else: build_endpoint_map(options.output_file, options.input_file) diff --git a/network/endpoints/endpoint_data.yaml b/network/endpoints/endpoint_data.yaml index 0178c4dd..277bd676 100644 --- a/network/endpoints/endpoint_data.yaml +++ b/network/endpoints/endpoint_data.yaml @@ -28,6 +28,96 @@ Ceilometer: net_param: CeilometerApi port: 8777 +ContrailConfig: + Internal: + net_param: ContrailConfig + Public: + net_param: Public + Admin: + net_param: ContrailConfig + port: 8082 + +ContrailDiscovery: + Internal: + net_param: ContrailConfig + Public: + net_param: Public + Admin: + net_param: ContrailConfig + port: 5998 + +ContrailAnalyticsCollectorHttp: + Internal: + net_param: ContrailAnalytics + Public: + net_param: Public + Admin: + net_param: ContrailAnalytics + port: 8089 + +ContrailAnalyticsApi: + Internal: + net_param: ContrailAnalytics + Public: + net_param: Public + Admin: + net_param: ContrailAnalytics + port: 8081 + +ContrailAnalyticsHttp: + Internal: + net_param: ContrailAnalytics + Public: + net_param: Public + Admin: + net_param: ContrailAnalytics + port: 8090 + +ContrailAnalyticsCollectorSandesh: + Internal: + net_param: ContrailAnalytics + Public: + net_param: Public + Admin: + net_param: ContrailAnalytics + port: 8086 + +ContrailAnalyticsRedis: + Internal: + net_param: ContrailAnalytics + Public: + net_param: Public + Admin: + net_param: ContrailAnalytics + port: 6379 + +ContrailWebuiHttp: + Internal: + net_param: ContrailConfig + Public: + net_param: Public + Admin: + net_param: ContrailConfig + port: 8080 + +ContrailWebuiHttps: + Internal: + net_param: ContrailConfig + Public: + net_param: Public + Admin: + net_param: ContrailConfig + port: 8143 + +Ec2Api: + Internal: + net_param: Ec2Api + Public: + net_param: Public + Admin: + net_param: Ec2Api + port: 8788 + Gnocchi: Internal: net_param: GnocchiApi @@ -67,6 +157,15 @@ Cinder: V3: /v3/%(tenant_id)s port: 8776 +Congress: + Internal: + net_param: CongressApi + Public: + net_param: Public + Admin: + net_param: CongressApi + port: 1789 + Glance: Internal: net_param: GlanceApi @@ -76,11 +175,6 @@ Glance: net_param: GlanceApi port: 9292 -GlanceRegistry: - Internal: - net_param: GlanceRegistry - port: 9191 - Mysql: Internal: net_param: Mysql @@ -205,6 +299,21 @@ Nova: '': /v2.1 port: 8774 +NovaPlacement: + Internal: + net_param: NovaPlacement + uri_suffixes: + '': /placement + Public: + net_param: Public + uri_suffixes: + '': /placement + Admin: + net_param: NovaPlacement + uri_suffixes: + '': /placement + port: 8778 + NovaVNCProxy: Internal: net_param: NovaApi @@ -262,6 +371,15 @@ Sahara: '': /v1.1/%(tenant_id)s port: 8386 +Tacker: + Internal: + net_param: TackerApi + Public: + net_param: Public + Admin: + net_param: TackerApi + port: 9890 + Ironic: Internal: net_param: IronicApi @@ -294,3 +412,13 @@ ZaqarWebSocket: Admin: net_param: ZaqarApi port: 9000 + protocol: ws + +Octavia: + Internal: + net_param: OctaviaApi + Public: + net_param: Public + Admin: + net_param: OctaviaApi + port: 9876 diff --git a/network/endpoints/endpoint_map.yaml b/network/endpoints/endpoint_map.yaml index af89d0b7..fecac0af 100644 --- a/network/endpoints/endpoint_map.yaml +++ b/network/endpoints/endpoint_map.yaml @@ -34,10 +34,48 @@ parameters: CinderAdmin: {protocol: http, port: '8776', host: IP_ADDRESS} CinderInternal: {protocol: http, port: '8776', host: IP_ADDRESS} CinderPublic: {protocol: http, port: '8776', host: IP_ADDRESS} + CongressAdmin: {protocol: http, port: '1789', host: IP_ADDRESS} + CongressInternal: {protocol: http, port: '1789', host: IP_ADDRESS} + CongressPublic: {protocol: http, port: '1789', host: IP_ADDRESS} + ContrailAnalyticsApiAdmin: {protocol: http, port: '8081', host: IP_ADDRESS} + ContrailAnalyticsApiInternal: {protocol: http, port: '8081', host: IP_ADDRESS} + ContrailAnalyticsApiPublic: {protocol: http, port: '8081', host: IP_ADDRESS} + ContrailAnalyticsCollectorHttpAdmin: {protocol: http, port: '8089', + host: IP_ADDRESS} + ContrailAnalyticsCollectorHttpInternal: {protocol: http, port: '8089', + host: IP_ADDRESS} + ContrailAnalyticsCollectorHttpPublic: {protocol: http, port: '8089', + host: IP_ADDRESS} + ContrailAnalyticsCollectorSandeshAdmin: {protocol: http, port: '8086', + host: IP_ADDRESS} + ContrailAnalyticsCollectorSandeshInternal: {protocol: http, port: '8086', + host: IP_ADDRESS} + ContrailAnalyticsCollectorSandeshPublic: {protocol: http, port: '8086', + host: IP_ADDRESS} + ContrailAnalyticsHttpAdmin: {protocol: http, port: '8090', host: IP_ADDRESS} + ContrailAnalyticsHttpInternal: {protocol: http, port: '8090', host: IP_ADDRESS} + ContrailAnalyticsHttpPublic: {protocol: http, port: '8090', host: IP_ADDRESS} + ContrailAnalyticsRedisAdmin: {protocol: http, port: '6379', host: IP_ADDRESS} + ContrailAnalyticsRedisInternal: {protocol: http, port: '6379', host: IP_ADDRESS} + ContrailAnalyticsRedisPublic: {protocol: http, port: '6379', host: IP_ADDRESS} + ContrailConfigAdmin: {protocol: http, port: '8082', host: IP_ADDRESS} + ContrailConfigInternal: {protocol: http, port: '8082', host: IP_ADDRESS} + ContrailConfigPublic: {protocol: http, port: '8082', host: IP_ADDRESS} + ContrailDiscoveryAdmin: {protocol: http, port: '5998', host: IP_ADDRESS} + ContrailDiscoveryInternal: {protocol: http, port: '5998', host: IP_ADDRESS} + ContrailDiscoveryPublic: {protocol: http, port: '5998', host: IP_ADDRESS} + ContrailWebuiHttpAdmin: {protocol: http, port: '8080', host: IP_ADDRESS} + ContrailWebuiHttpInternal: {protocol: http, port: '8080', host: IP_ADDRESS} + ContrailWebuiHttpPublic: {protocol: http, port: '8080', host: IP_ADDRESS} + ContrailWebuiHttpsAdmin: {protocol: http, port: '8143', host: IP_ADDRESS} + ContrailWebuiHttpsInternal: {protocol: http, port: '8143', host: IP_ADDRESS} + ContrailWebuiHttpsPublic: {protocol: http, port: '8143', host: IP_ADDRESS} + Ec2ApiAdmin: {protocol: http, port: '8788', host: IP_ADDRESS} + Ec2ApiInternal: {protocol: http, port: '8788', host: IP_ADDRESS} + Ec2ApiPublic: {protocol: http, port: '8788', host: IP_ADDRESS} GlanceAdmin: {protocol: http, port: '9292', host: IP_ADDRESS} GlanceInternal: {protocol: http, port: '9292', host: IP_ADDRESS} GlancePublic: {protocol: http, port: '9292', host: IP_ADDRESS} - GlanceRegistryInternal: {protocol: http, port: '9191', host: IP_ADDRESS} GnocchiAdmin: {protocol: http, port: '8041', host: IP_ADDRESS} GnocchiInternal: {protocol: http, port: '8041', host: IP_ADDRESS} GnocchiPublic: {protocol: http, port: '8041', host: IP_ADDRESS} @@ -67,9 +105,15 @@ parameters: NovaAdmin: {protocol: http, port: '8774', host: IP_ADDRESS} NovaInternal: {protocol: http, port: '8774', host: IP_ADDRESS} NovaPublic: {protocol: http, port: '8774', host: IP_ADDRESS} + NovaPlacementAdmin: {protocol: http, port: '8778', host: IP_ADDRESS} + NovaPlacementInternal: {protocol: http, port: '8778', host: IP_ADDRESS} + NovaPlacementPublic: {protocol: http, port: '8778', host: IP_ADDRESS} NovaVNCProxyAdmin: {protocol: http, port: '6080', host: IP_ADDRESS} NovaVNCProxyInternal: {protocol: http, port: '6080', host: IP_ADDRESS} NovaVNCProxyPublic: {protocol: http, port: '6080', host: IP_ADDRESS} + OctaviaAdmin: {protocol: http, port: '9876', host: IP_ADDRESS} + OctaviaInternal: {protocol: http, port: '9876', host: IP_ADDRESS} + OctaviaPublic: {protocol: http, port: '9876', host: IP_ADDRESS} PankoAdmin: {protocol: http, port: '8779', host: IP_ADDRESS} PankoInternal: {protocol: http, port: '8779', host: IP_ADDRESS} PankoPublic: {protocol: http, port: '8779', host: IP_ADDRESS} @@ -79,6 +123,9 @@ parameters: SwiftAdmin: {protocol: http, port: '8080', host: IP_ADDRESS} SwiftInternal: {protocol: http, port: '8080', host: IP_ADDRESS} SwiftPublic: {protocol: http, port: '8080', host: IP_ADDRESS} + TackerAdmin: {protocol: http, port: '9890', host: IP_ADDRESS} + TackerInternal: {protocol: http, port: '9890', host: IP_ADDRESS} + TackerPublic: {protocol: http, port: '9890', host: IP_ADDRESS} ZaqarAdmin: {protocol: http, port: '8888', host: IP_ADDRESS} ZaqarInternal: {protocol: http, port: '8888', host: IP_ADDRESS} ZaqarPublic: {protocol: http, port: '8888', host: IP_ADDRESS} @@ -1808,173 +1855,173 @@ outputs: template: NETWORK_uri - ':' - get_param: [EndpointMap, CinderPublic, port] - GlanceAdmin: + CongressAdmin: host: str_replace: template: - get_param: [EndpointMap, GlanceAdmin, host] + get_param: [EndpointMap, CongressAdmin, host] params: CLOUDNAME: get_param: - CloudEndpoints - - get_param: [ServiceNetMap, GlanceApiNetwork] + - get_param: [ServiceNetMap, CongressApiNetwork] IP_ADDRESS: get_param: - NetIpMap - str_replace: params: NETWORK: - get_param: [ServiceNetMap, GlanceApiNetwork] + get_param: [ServiceNetMap, CongressApiNetwork] template: NETWORK_uri host_nobrackets: str_replace: template: - get_param: [EndpointMap, GlanceAdmin, host] + get_param: [EndpointMap, CongressAdmin, host] params: CLOUDNAME: get_param: - CloudEndpoints - - get_param: [ServiceNetMap, GlanceApiNetwork] + - get_param: [ServiceNetMap, CongressApiNetwork] IP_ADDRESS: get_param: - NetIpMap - - get_param: [ServiceNetMap, GlanceApiNetwork] + - get_param: [ServiceNetMap, CongressApiNetwork] port: - get_param: [EndpointMap, GlanceAdmin, port] + get_param: [EndpointMap, CongressAdmin, port] protocol: - get_param: [EndpointMap, GlanceAdmin, protocol] + get_param: [EndpointMap, CongressAdmin, protocol] uri: list_join: - '' - - - get_param: [EndpointMap, GlanceAdmin, protocol] + - - get_param: [EndpointMap, CongressAdmin, protocol] - :// - str_replace: template: - get_param: [EndpointMap, GlanceAdmin, host] + get_param: [EndpointMap, CongressAdmin, host] params: CLOUDNAME: get_param: - CloudEndpoints - - get_param: [ServiceNetMap, GlanceApiNetwork] + - get_param: [ServiceNetMap, CongressApiNetwork] IP_ADDRESS: get_param: - NetIpMap - str_replace: params: NETWORK: - get_param: [ServiceNetMap, GlanceApiNetwork] + get_param: [ServiceNetMap, CongressApiNetwork] template: NETWORK_uri - ':' - - get_param: [EndpointMap, GlanceAdmin, port] + - get_param: [EndpointMap, CongressAdmin, port] uri_no_suffix: list_join: - '' - - - get_param: [EndpointMap, GlanceAdmin, protocol] + - - get_param: [EndpointMap, CongressAdmin, protocol] - :// - str_replace: template: - get_param: [EndpointMap, GlanceAdmin, host] + get_param: [EndpointMap, CongressAdmin, host] params: CLOUDNAME: get_param: - CloudEndpoints - - get_param: [ServiceNetMap, GlanceApiNetwork] + - get_param: [ServiceNetMap, CongressApiNetwork] IP_ADDRESS: get_param: - NetIpMap - str_replace: params: NETWORK: - get_param: [ServiceNetMap, GlanceApiNetwork] + get_param: [ServiceNetMap, CongressApiNetwork] template: NETWORK_uri - ':' - - get_param: [EndpointMap, GlanceAdmin, port] - GlanceInternal: + - get_param: [EndpointMap, CongressAdmin, port] + CongressInternal: host: str_replace: template: - get_param: [EndpointMap, GlanceInternal, host] + get_param: [EndpointMap, CongressInternal, host] params: CLOUDNAME: get_param: - CloudEndpoints - - get_param: [ServiceNetMap, GlanceApiNetwork] + - get_param: [ServiceNetMap, CongressApiNetwork] IP_ADDRESS: get_param: - NetIpMap - str_replace: params: NETWORK: - get_param: [ServiceNetMap, GlanceApiNetwork] + get_param: [ServiceNetMap, CongressApiNetwork] template: NETWORK_uri host_nobrackets: str_replace: template: - get_param: [EndpointMap, GlanceInternal, host] + get_param: [EndpointMap, CongressInternal, host] params: CLOUDNAME: get_param: - CloudEndpoints - - get_param: [ServiceNetMap, GlanceApiNetwork] + - get_param: [ServiceNetMap, CongressApiNetwork] IP_ADDRESS: get_param: - NetIpMap - - get_param: [ServiceNetMap, GlanceApiNetwork] + - get_param: [ServiceNetMap, CongressApiNetwork] port: - get_param: [EndpointMap, GlanceInternal, port] + get_param: [EndpointMap, CongressInternal, port] protocol: - get_param: [EndpointMap, GlanceInternal, protocol] + get_param: [EndpointMap, CongressInternal, protocol] uri: list_join: - '' - - - get_param: [EndpointMap, GlanceInternal, protocol] + - - get_param: [EndpointMap, CongressInternal, protocol] - :// - str_replace: template: - get_param: [EndpointMap, GlanceInternal, host] + get_param: [EndpointMap, CongressInternal, host] params: CLOUDNAME: get_param: - CloudEndpoints - - get_param: [ServiceNetMap, GlanceApiNetwork] + - get_param: [ServiceNetMap, CongressApiNetwork] IP_ADDRESS: get_param: - NetIpMap - str_replace: params: NETWORK: - get_param: [ServiceNetMap, GlanceApiNetwork] + get_param: [ServiceNetMap, CongressApiNetwork] template: NETWORK_uri - ':' - - get_param: [EndpointMap, GlanceInternal, port] + - get_param: [EndpointMap, CongressInternal, port] uri_no_suffix: list_join: - '' - - - get_param: [EndpointMap, GlanceInternal, protocol] + - - get_param: [EndpointMap, CongressInternal, protocol] - :// - str_replace: template: - get_param: [EndpointMap, GlanceInternal, host] + get_param: [EndpointMap, CongressInternal, host] params: CLOUDNAME: get_param: - CloudEndpoints - - get_param: [ServiceNetMap, GlanceApiNetwork] + - get_param: [ServiceNetMap, CongressApiNetwork] IP_ADDRESS: get_param: - NetIpMap - str_replace: params: NETWORK: - get_param: [ServiceNetMap, GlanceApiNetwork] + get_param: [ServiceNetMap, CongressApiNetwork] template: NETWORK_uri - ':' - - get_param: [EndpointMap, GlanceInternal, port] - GlancePublic: + - get_param: [EndpointMap, CongressInternal, port] + CongressPublic: host: str_replace: template: - get_param: [EndpointMap, GlancePublic, host] + get_param: [EndpointMap, CongressPublic, host] params: CLOUDNAME: get_param: @@ -1991,7 +2038,7 @@ outputs: host_nobrackets: str_replace: template: - get_param: [EndpointMap, GlancePublic, host] + get_param: [EndpointMap, CongressPublic, host] params: CLOUDNAME: get_param: @@ -2002,17 +2049,17 @@ outputs: - NetIpMap - get_param: [ServiceNetMap, PublicNetwork] port: - get_param: [EndpointMap, GlancePublic, port] + get_param: [EndpointMap, CongressPublic, port] protocol: - get_param: [EndpointMap, GlancePublic, protocol] + get_param: [EndpointMap, CongressPublic, protocol] uri: list_join: - '' - - - get_param: [EndpointMap, GlancePublic, protocol] + - - get_param: [EndpointMap, CongressPublic, protocol] - :// - str_replace: template: - get_param: [EndpointMap, GlancePublic, host] + get_param: [EndpointMap, CongressPublic, host] params: CLOUDNAME: get_param: @@ -2027,15 +2074,15 @@ outputs: get_param: [ServiceNetMap, PublicNetwork] template: NETWORK_uri - ':' - - get_param: [EndpointMap, GlancePublic, port] + - get_param: [EndpointMap, CongressPublic, port] uri_no_suffix: list_join: - '' - - - get_param: [EndpointMap, GlancePublic, protocol] + - - get_param: [EndpointMap, CongressPublic, protocol] - :// - str_replace: template: - get_param: [EndpointMap, GlancePublic, host] + get_param: [EndpointMap, CongressPublic, host] params: CLOUDNAME: get_param: @@ -2050,88 +2097,2776 @@ outputs: get_param: [ServiceNetMap, PublicNetwork] template: NETWORK_uri - ':' - - get_param: [EndpointMap, GlancePublic, port] - GlanceRegistryInternal: + - get_param: [EndpointMap, CongressPublic, port] + ContrailAnalyticsApiAdmin: + host: + str_replace: + template: + get_param: [EndpointMap, ContrailAnalyticsApiAdmin, host] + params: + CLOUDNAME: + get_param: + - CloudEndpoints + - get_param: [ServiceNetMap, ContrailAnalyticsNetwork] + IP_ADDRESS: + get_param: + - NetIpMap + - str_replace: + params: + NETWORK: + get_param: [ServiceNetMap, ContrailAnalyticsNetwork] + template: NETWORK_uri + host_nobrackets: + str_replace: + template: + get_param: [EndpointMap, ContrailAnalyticsApiAdmin, host] + params: + CLOUDNAME: + get_param: + - CloudEndpoints + - get_param: [ServiceNetMap, ContrailAnalyticsNetwork] + IP_ADDRESS: + get_param: + - NetIpMap + - get_param: [ServiceNetMap, ContrailAnalyticsNetwork] + port: + get_param: [EndpointMap, ContrailAnalyticsApiAdmin, port] + protocol: + get_param: [EndpointMap, ContrailAnalyticsApiAdmin, protocol] + uri: + list_join: + - '' + - - get_param: [EndpointMap, ContrailAnalyticsApiAdmin, protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, ContrailAnalyticsApiAdmin, + host] + params: + CLOUDNAME: + get_param: + - CloudEndpoints + - get_param: [ServiceNetMap, ContrailAnalyticsNetwork] + IP_ADDRESS: + get_param: + - NetIpMap + - str_replace: + params: + NETWORK: + get_param: [ServiceNetMap, ContrailAnalyticsNetwork] + template: NETWORK_uri + - ':' + - get_param: [EndpointMap, ContrailAnalyticsApiAdmin, port] + uri_no_suffix: + list_join: + - '' + - - get_param: [EndpointMap, ContrailAnalyticsApiAdmin, protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, ContrailAnalyticsApiAdmin, + host] + params: + CLOUDNAME: + get_param: + - CloudEndpoints + - get_param: [ServiceNetMap, ContrailAnalyticsNetwork] + IP_ADDRESS: + get_param: + - NetIpMap + - str_replace: + params: + NETWORK: + get_param: [ServiceNetMap, ContrailAnalyticsNetwork] + template: NETWORK_uri + - ':' + - get_param: [EndpointMap, ContrailAnalyticsApiAdmin, port] + ContrailAnalyticsApiInternal: + host: + str_replace: + template: + get_param: [EndpointMap, ContrailAnalyticsApiInternal, host] + params: + CLOUDNAME: + get_param: + - CloudEndpoints + - get_param: [ServiceNetMap, ContrailAnalyticsNetwork] + IP_ADDRESS: + get_param: + - NetIpMap + - str_replace: + params: + NETWORK: + get_param: [ServiceNetMap, ContrailAnalyticsNetwork] + template: NETWORK_uri + host_nobrackets: + str_replace: + template: + get_param: [EndpointMap, ContrailAnalyticsApiInternal, host] + params: + CLOUDNAME: + get_param: + - CloudEndpoints + - get_param: [ServiceNetMap, ContrailAnalyticsNetwork] + IP_ADDRESS: + get_param: + - NetIpMap + - get_param: [ServiceNetMap, ContrailAnalyticsNetwork] + port: + get_param: [EndpointMap, ContrailAnalyticsApiInternal, port] + protocol: + get_param: [EndpointMap, ContrailAnalyticsApiInternal, protocol] + uri: + list_join: + - '' + - - get_param: [EndpointMap, ContrailAnalyticsApiInternal, protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, ContrailAnalyticsApiInternal, + host] + params: + CLOUDNAME: + get_param: + - CloudEndpoints + - get_param: [ServiceNetMap, ContrailAnalyticsNetwork] + IP_ADDRESS: + get_param: + - NetIpMap + - str_replace: + params: + NETWORK: + get_param: [ServiceNetMap, ContrailAnalyticsNetwork] + template: NETWORK_uri + - ':' + - get_param: [EndpointMap, ContrailAnalyticsApiInternal, port] + uri_no_suffix: + list_join: + - '' + - - get_param: [EndpointMap, ContrailAnalyticsApiInternal, protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, ContrailAnalyticsApiInternal, + host] + params: + CLOUDNAME: + get_param: + - CloudEndpoints + - get_param: [ServiceNetMap, ContrailAnalyticsNetwork] + IP_ADDRESS: + get_param: + - NetIpMap + - str_replace: + params: + NETWORK: + get_param: [ServiceNetMap, ContrailAnalyticsNetwork] + template: NETWORK_uri + - ':' + - get_param: [EndpointMap, ContrailAnalyticsApiInternal, port] + ContrailAnalyticsApiPublic: + host: + str_replace: + template: + get_param: [EndpointMap, ContrailAnalyticsApiPublic, host] + params: + CLOUDNAME: + get_param: + - CloudEndpoints + - get_param: [ServiceNetMap, PublicNetwork] + IP_ADDRESS: + get_param: + - NetIpMap + - str_replace: + params: + NETWORK: + get_param: [ServiceNetMap, PublicNetwork] + template: NETWORK_uri + host_nobrackets: + str_replace: + template: + get_param: [EndpointMap, ContrailAnalyticsApiPublic, host] + params: + CLOUDNAME: + get_param: + - CloudEndpoints + - get_param: [ServiceNetMap, PublicNetwork] + IP_ADDRESS: + get_param: + - NetIpMap + - get_param: [ServiceNetMap, PublicNetwork] + port: + get_param: [EndpointMap, ContrailAnalyticsApiPublic, port] + protocol: + get_param: [EndpointMap, ContrailAnalyticsApiPublic, protocol] + uri: + list_join: + - '' + - - get_param: [EndpointMap, ContrailAnalyticsApiPublic, protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, ContrailAnalyticsApiPublic, + host] + params: + CLOUDNAME: + get_param: + - CloudEndpoints + - get_param: [ServiceNetMap, PublicNetwork] + IP_ADDRESS: + get_param: + - NetIpMap + - str_replace: + params: + NETWORK: + get_param: [ServiceNetMap, PublicNetwork] + template: NETWORK_uri + - ':' + - get_param: [EndpointMap, ContrailAnalyticsApiPublic, port] + uri_no_suffix: + list_join: + - '' + - - get_param: [EndpointMap, ContrailAnalyticsApiPublic, protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, ContrailAnalyticsApiPublic, + host] + params: + CLOUDNAME: + get_param: + - CloudEndpoints + - get_param: [ServiceNetMap, PublicNetwork] + IP_ADDRESS: + get_param: + - NetIpMap + - str_replace: + params: + NETWORK: + get_param: [ServiceNetMap, PublicNetwork] + template: NETWORK_uri + - ':' + - get_param: [EndpointMap, ContrailAnalyticsApiPublic, port] + ContrailAnalyticsCollectorHttpAdmin: + host: + str_replace: + template: + get_param: [EndpointMap, ContrailAnalyticsCollectorHttpAdmin, + host] + params: + CLOUDNAME: + get_param: + - CloudEndpoints + - get_param: [ServiceNetMap, ContrailAnalyticsNetwork] + IP_ADDRESS: + get_param: + - NetIpMap + - str_replace: + params: + NETWORK: + get_param: [ServiceNetMap, ContrailAnalyticsNetwork] + template: NETWORK_uri + host_nobrackets: + str_replace: + template: + get_param: [EndpointMap, ContrailAnalyticsCollectorHttpAdmin, + host] + params: + CLOUDNAME: + get_param: + - CloudEndpoints + - get_param: [ServiceNetMap, ContrailAnalyticsNetwork] + IP_ADDRESS: + get_param: + - NetIpMap + - get_param: [ServiceNetMap, ContrailAnalyticsNetwork] + port: + get_param: [EndpointMap, ContrailAnalyticsCollectorHttpAdmin, + port] + protocol: + get_param: [EndpointMap, ContrailAnalyticsCollectorHttpAdmin, + protocol] + uri: + list_join: + - '' + - - get_param: [EndpointMap, ContrailAnalyticsCollectorHttpAdmin, + protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, ContrailAnalyticsCollectorHttpAdmin, + host] + params: + CLOUDNAME: + get_param: + - CloudEndpoints + - get_param: [ServiceNetMap, ContrailAnalyticsNetwork] + IP_ADDRESS: + get_param: + - NetIpMap + - str_replace: + params: + NETWORK: + get_param: [ServiceNetMap, ContrailAnalyticsNetwork] + template: NETWORK_uri + - ':' + - get_param: [EndpointMap, ContrailAnalyticsCollectorHttpAdmin, + port] + uri_no_suffix: + list_join: + - '' + - - get_param: [EndpointMap, ContrailAnalyticsCollectorHttpAdmin, + protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, ContrailAnalyticsCollectorHttpAdmin, + host] + params: + CLOUDNAME: + get_param: + - CloudEndpoints + - get_param: [ServiceNetMap, ContrailAnalyticsNetwork] + IP_ADDRESS: + get_param: + - NetIpMap + - str_replace: + params: + NETWORK: + get_param: [ServiceNetMap, ContrailAnalyticsNetwork] + template: NETWORK_uri + - ':' + - get_param: [EndpointMap, ContrailAnalyticsCollectorHttpAdmin, + port] + ContrailAnalyticsCollectorHttpInternal: + host: + str_replace: + template: + get_param: [EndpointMap, ContrailAnalyticsCollectorHttpInternal, + host] + params: + CLOUDNAME: + get_param: + - CloudEndpoints + - get_param: [ServiceNetMap, ContrailAnalyticsNetwork] + IP_ADDRESS: + get_param: + - NetIpMap + - str_replace: + params: + NETWORK: + get_param: [ServiceNetMap, ContrailAnalyticsNetwork] + template: NETWORK_uri + host_nobrackets: + str_replace: + template: + get_param: [EndpointMap, ContrailAnalyticsCollectorHttpInternal, + host] + params: + CLOUDNAME: + get_param: + - CloudEndpoints + - get_param: [ServiceNetMap, ContrailAnalyticsNetwork] + IP_ADDRESS: + get_param: + - NetIpMap + - get_param: [ServiceNetMap, ContrailAnalyticsNetwork] + port: + get_param: [EndpointMap, ContrailAnalyticsCollectorHttpInternal, + port] + protocol: + get_param: [EndpointMap, ContrailAnalyticsCollectorHttpInternal, + protocol] + uri: + list_join: + - '' + - - get_param: [EndpointMap, ContrailAnalyticsCollectorHttpInternal, + protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, ContrailAnalyticsCollectorHttpInternal, + host] + params: + CLOUDNAME: + get_param: + - CloudEndpoints + - get_param: [ServiceNetMap, ContrailAnalyticsNetwork] + IP_ADDRESS: + get_param: + - NetIpMap + - str_replace: + params: + NETWORK: + get_param: [ServiceNetMap, ContrailAnalyticsNetwork] + template: NETWORK_uri + - ':' + - get_param: [EndpointMap, ContrailAnalyticsCollectorHttpInternal, + port] + uri_no_suffix: + list_join: + - '' + - - get_param: [EndpointMap, ContrailAnalyticsCollectorHttpInternal, + protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, ContrailAnalyticsCollectorHttpInternal, + host] + params: + CLOUDNAME: + get_param: + - CloudEndpoints + - get_param: [ServiceNetMap, ContrailAnalyticsNetwork] + IP_ADDRESS: + get_param: + - NetIpMap + - str_replace: + params: + NETWORK: + get_param: [ServiceNetMap, ContrailAnalyticsNetwork] + template: NETWORK_uri + - ':' + - get_param: [EndpointMap, ContrailAnalyticsCollectorHttpInternal, + port] + ContrailAnalyticsCollectorHttpPublic: + host: + str_replace: + template: + get_param: [EndpointMap, ContrailAnalyticsCollectorHttpPublic, + host] + params: + CLOUDNAME: + get_param: + - CloudEndpoints + - get_param: [ServiceNetMap, PublicNetwork] + IP_ADDRESS: + get_param: + - NetIpMap + - str_replace: + params: + NETWORK: + get_param: [ServiceNetMap, PublicNetwork] + template: NETWORK_uri + host_nobrackets: + str_replace: + template: + get_param: [EndpointMap, ContrailAnalyticsCollectorHttpPublic, + host] + params: + CLOUDNAME: + get_param: + - CloudEndpoints + - get_param: [ServiceNetMap, PublicNetwork] + IP_ADDRESS: + get_param: + - NetIpMap + - get_param: [ServiceNetMap, PublicNetwork] + port: + get_param: [EndpointMap, ContrailAnalyticsCollectorHttpPublic, + port] + protocol: + get_param: [EndpointMap, ContrailAnalyticsCollectorHttpPublic, + protocol] + uri: + list_join: + - '' + - - get_param: [EndpointMap, ContrailAnalyticsCollectorHttpPublic, + protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, ContrailAnalyticsCollectorHttpPublic, + host] + params: + CLOUDNAME: + get_param: + - CloudEndpoints + - get_param: [ServiceNetMap, PublicNetwork] + IP_ADDRESS: + get_param: + - NetIpMap + - str_replace: + params: + NETWORK: + get_param: [ServiceNetMap, PublicNetwork] + template: NETWORK_uri + - ':' + - get_param: [EndpointMap, ContrailAnalyticsCollectorHttpPublic, + port] + uri_no_suffix: + list_join: + - '' + - - get_param: [EndpointMap, ContrailAnalyticsCollectorHttpPublic, + protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, ContrailAnalyticsCollectorHttpPublic, + host] + params: + CLOUDNAME: + get_param: + - CloudEndpoints + - get_param: [ServiceNetMap, PublicNetwork] + IP_ADDRESS: + get_param: + - NetIpMap + - str_replace: + params: + NETWORK: + get_param: [ServiceNetMap, PublicNetwork] + template: NETWORK_uri + - ':' + - get_param: [EndpointMap, ContrailAnalyticsCollectorHttpPublic, + port] + ContrailAnalyticsCollectorSandeshAdmin: + host: + str_replace: + template: + get_param: [EndpointMap, ContrailAnalyticsCollectorSandeshAdmin, + host] + params: + CLOUDNAME: + get_param: + - CloudEndpoints + - get_param: [ServiceNetMap, ContrailAnalyticsNetwork] + IP_ADDRESS: + get_param: + - NetIpMap + - str_replace: + params: + NETWORK: + get_param: [ServiceNetMap, ContrailAnalyticsNetwork] + template: NETWORK_uri + host_nobrackets: + str_replace: + template: + get_param: [EndpointMap, ContrailAnalyticsCollectorSandeshAdmin, + host] + params: + CLOUDNAME: + get_param: + - CloudEndpoints + - get_param: [ServiceNetMap, ContrailAnalyticsNetwork] + IP_ADDRESS: + get_param: + - NetIpMap + - get_param: [ServiceNetMap, ContrailAnalyticsNetwork] + port: + get_param: [EndpointMap, ContrailAnalyticsCollectorSandeshAdmin, + port] + protocol: + get_param: [EndpointMap, ContrailAnalyticsCollectorSandeshAdmin, + protocol] + uri: + list_join: + - '' + - - get_param: [EndpointMap, ContrailAnalyticsCollectorSandeshAdmin, + protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, ContrailAnalyticsCollectorSandeshAdmin, + host] + params: + CLOUDNAME: + get_param: + - CloudEndpoints + - get_param: [ServiceNetMap, ContrailAnalyticsNetwork] + IP_ADDRESS: + get_param: + - NetIpMap + - str_replace: + params: + NETWORK: + get_param: [ServiceNetMap, ContrailAnalyticsNetwork] + template: NETWORK_uri + - ':' + - get_param: [EndpointMap, ContrailAnalyticsCollectorSandeshAdmin, + port] + uri_no_suffix: + list_join: + - '' + - - get_param: [EndpointMap, ContrailAnalyticsCollectorSandeshAdmin, + protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, ContrailAnalyticsCollectorSandeshAdmin, + host] + params: + CLOUDNAME: + get_param: + - CloudEndpoints + - get_param: [ServiceNetMap, ContrailAnalyticsNetwork] + IP_ADDRESS: + get_param: + - NetIpMap + - str_replace: + params: + NETWORK: + get_param: [ServiceNetMap, ContrailAnalyticsNetwork] + template: NETWORK_uri + - ':' + - get_param: [EndpointMap, ContrailAnalyticsCollectorSandeshAdmin, + port] + ContrailAnalyticsCollectorSandeshInternal: + host: + str_replace: + template: + get_param: [EndpointMap, ContrailAnalyticsCollectorSandeshInternal, + host] + params: + CLOUDNAME: + get_param: + - CloudEndpoints + - get_param: [ServiceNetMap, ContrailAnalyticsNetwork] + IP_ADDRESS: + get_param: + - NetIpMap + - str_replace: + params: + NETWORK: + get_param: [ServiceNetMap, ContrailAnalyticsNetwork] + template: NETWORK_uri + host_nobrackets: + str_replace: + template: + get_param: [EndpointMap, ContrailAnalyticsCollectorSandeshInternal, + host] + params: + CLOUDNAME: + get_param: + - CloudEndpoints + - get_param: [ServiceNetMap, ContrailAnalyticsNetwork] + IP_ADDRESS: + get_param: + - NetIpMap + - get_param: [ServiceNetMap, ContrailAnalyticsNetwork] + port: + get_param: [EndpointMap, ContrailAnalyticsCollectorSandeshInternal, + port] + protocol: + get_param: [EndpointMap, ContrailAnalyticsCollectorSandeshInternal, + protocol] + uri: + list_join: + - '' + - - get_param: [EndpointMap, ContrailAnalyticsCollectorSandeshInternal, + protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, ContrailAnalyticsCollectorSandeshInternal, + host] + params: + CLOUDNAME: + get_param: + - CloudEndpoints + - get_param: [ServiceNetMap, ContrailAnalyticsNetwork] + IP_ADDRESS: + get_param: + - NetIpMap + - str_replace: + params: + NETWORK: + get_param: [ServiceNetMap, ContrailAnalyticsNetwork] + template: NETWORK_uri + - ':' + - get_param: [EndpointMap, ContrailAnalyticsCollectorSandeshInternal, + port] + uri_no_suffix: + list_join: + - '' + - - get_param: [EndpointMap, ContrailAnalyticsCollectorSandeshInternal, + protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, ContrailAnalyticsCollectorSandeshInternal, + host] + params: + CLOUDNAME: + get_param: + - CloudEndpoints + - get_param: [ServiceNetMap, ContrailAnalyticsNetwork] + IP_ADDRESS: + get_param: + - NetIpMap + - str_replace: + params: + NETWORK: + get_param: [ServiceNetMap, ContrailAnalyticsNetwork] + template: NETWORK_uri + - ':' + - get_param: [EndpointMap, ContrailAnalyticsCollectorSandeshInternal, + port] + ContrailAnalyticsCollectorSandeshPublic: + host: + str_replace: + template: + get_param: [EndpointMap, ContrailAnalyticsCollectorSandeshPublic, + host] + params: + CLOUDNAME: + get_param: + - CloudEndpoints + - get_param: [ServiceNetMap, PublicNetwork] + IP_ADDRESS: + get_param: + - NetIpMap + - str_replace: + params: + NETWORK: + get_param: [ServiceNetMap, PublicNetwork] + template: NETWORK_uri + host_nobrackets: + str_replace: + template: + get_param: [EndpointMap, ContrailAnalyticsCollectorSandeshPublic, + host] + params: + CLOUDNAME: + get_param: + - CloudEndpoints + - get_param: [ServiceNetMap, PublicNetwork] + IP_ADDRESS: + get_param: + - NetIpMap + - get_param: [ServiceNetMap, PublicNetwork] + port: + get_param: [EndpointMap, ContrailAnalyticsCollectorSandeshPublic, + port] + protocol: + get_param: [EndpointMap, ContrailAnalyticsCollectorSandeshPublic, + protocol] + uri: + list_join: + - '' + - - get_param: [EndpointMap, ContrailAnalyticsCollectorSandeshPublic, + protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, ContrailAnalyticsCollectorSandeshPublic, + host] + params: + CLOUDNAME: + get_param: + - CloudEndpoints + - get_param: [ServiceNetMap, PublicNetwork] + IP_ADDRESS: + get_param: + - NetIpMap + - str_replace: + params: + NETWORK: + get_param: [ServiceNetMap, PublicNetwork] + template: NETWORK_uri + - ':' + - get_param: [EndpointMap, ContrailAnalyticsCollectorSandeshPublic, + port] + uri_no_suffix: + list_join: + - '' + - - get_param: [EndpointMap, ContrailAnalyticsCollectorSandeshPublic, + protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, ContrailAnalyticsCollectorSandeshPublic, + host] + params: + CLOUDNAME: + get_param: + - CloudEndpoints + - get_param: [ServiceNetMap, PublicNetwork] + IP_ADDRESS: + get_param: + - NetIpMap + - str_replace: + params: + NETWORK: + get_param: [ServiceNetMap, PublicNetwork] + template: NETWORK_uri + - ':' + - get_param: [EndpointMap, ContrailAnalyticsCollectorSandeshPublic, + port] + ContrailAnalyticsHttpAdmin: + host: + str_replace: + template: + get_param: [EndpointMap, ContrailAnalyticsHttpAdmin, host] + params: + CLOUDNAME: + get_param: + - CloudEndpoints + - get_param: [ServiceNetMap, ContrailAnalyticsNetwork] + IP_ADDRESS: + get_param: + - NetIpMap + - str_replace: + params: + NETWORK: + get_param: [ServiceNetMap, ContrailAnalyticsNetwork] + template: NETWORK_uri + host_nobrackets: + str_replace: + template: + get_param: [EndpointMap, ContrailAnalyticsHttpAdmin, host] + params: + CLOUDNAME: + get_param: + - CloudEndpoints + - get_param: [ServiceNetMap, ContrailAnalyticsNetwork] + IP_ADDRESS: + get_param: + - NetIpMap + - get_param: [ServiceNetMap, ContrailAnalyticsNetwork] + port: + get_param: [EndpointMap, ContrailAnalyticsHttpAdmin, port] + protocol: + get_param: [EndpointMap, ContrailAnalyticsHttpAdmin, protocol] + uri: + list_join: + - '' + - - get_param: [EndpointMap, ContrailAnalyticsHttpAdmin, protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, ContrailAnalyticsHttpAdmin, + host] + params: + CLOUDNAME: + get_param: + - CloudEndpoints + - get_param: [ServiceNetMap, ContrailAnalyticsNetwork] + IP_ADDRESS: + get_param: + - NetIpMap + - str_replace: + params: + NETWORK: + get_param: [ServiceNetMap, ContrailAnalyticsNetwork] + template: NETWORK_uri + - ':' + - get_param: [EndpointMap, ContrailAnalyticsHttpAdmin, port] + uri_no_suffix: + list_join: + - '' + - - get_param: [EndpointMap, ContrailAnalyticsHttpAdmin, protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, ContrailAnalyticsHttpAdmin, + host] + params: + CLOUDNAME: + get_param: + - CloudEndpoints + - get_param: [ServiceNetMap, ContrailAnalyticsNetwork] + IP_ADDRESS: + get_param: + - NetIpMap + - str_replace: + params: + NETWORK: + get_param: [ServiceNetMap, ContrailAnalyticsNetwork] + template: NETWORK_uri + - ':' + - get_param: [EndpointMap, ContrailAnalyticsHttpAdmin, port] + ContrailAnalyticsHttpInternal: + host: + str_replace: + template: + get_param: [EndpointMap, ContrailAnalyticsHttpInternal, + host] + params: + CLOUDNAME: + get_param: + - CloudEndpoints + - get_param: [ServiceNetMap, ContrailAnalyticsNetwork] + IP_ADDRESS: + get_param: + - NetIpMap + - str_replace: + params: + NETWORK: + get_param: [ServiceNetMap, ContrailAnalyticsNetwork] + template: NETWORK_uri + host_nobrackets: + str_replace: + template: + get_param: [EndpointMap, ContrailAnalyticsHttpInternal, + host] + params: + CLOUDNAME: + get_param: + - CloudEndpoints + - get_param: [ServiceNetMap, ContrailAnalyticsNetwork] + IP_ADDRESS: + get_param: + - NetIpMap + - get_param: [ServiceNetMap, ContrailAnalyticsNetwork] + port: + get_param: [EndpointMap, ContrailAnalyticsHttpInternal, port] + protocol: + get_param: [EndpointMap, ContrailAnalyticsHttpInternal, protocol] + uri: + list_join: + - '' + - - get_param: [EndpointMap, ContrailAnalyticsHttpInternal, + protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, ContrailAnalyticsHttpInternal, + host] + params: + CLOUDNAME: + get_param: + - CloudEndpoints + - get_param: [ServiceNetMap, ContrailAnalyticsNetwork] + IP_ADDRESS: + get_param: + - NetIpMap + - str_replace: + params: + NETWORK: + get_param: [ServiceNetMap, ContrailAnalyticsNetwork] + template: NETWORK_uri + - ':' + - get_param: [EndpointMap, ContrailAnalyticsHttpInternal, + port] + uri_no_suffix: + list_join: + - '' + - - get_param: [EndpointMap, ContrailAnalyticsHttpInternal, + protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, ContrailAnalyticsHttpInternal, + host] + params: + CLOUDNAME: + get_param: + - CloudEndpoints + - get_param: [ServiceNetMap, ContrailAnalyticsNetwork] + IP_ADDRESS: + get_param: + - NetIpMap + - str_replace: + params: + NETWORK: + get_param: [ServiceNetMap, ContrailAnalyticsNetwork] + template: NETWORK_uri + - ':' + - get_param: [EndpointMap, ContrailAnalyticsHttpInternal, + port] + ContrailAnalyticsHttpPublic: + host: + str_replace: + template: + get_param: [EndpointMap, ContrailAnalyticsHttpPublic, host] + params: + CLOUDNAME: + get_param: + - CloudEndpoints + - get_param: [ServiceNetMap, PublicNetwork] + IP_ADDRESS: + get_param: + - NetIpMap + - str_replace: + params: + NETWORK: + get_param: [ServiceNetMap, PublicNetwork] + template: NETWORK_uri + host_nobrackets: + str_replace: + template: + get_param: [EndpointMap, ContrailAnalyticsHttpPublic, host] + params: + CLOUDNAME: + get_param: + - CloudEndpoints + - get_param: [ServiceNetMap, PublicNetwork] + IP_ADDRESS: + get_param: + - NetIpMap + - get_param: [ServiceNetMap, PublicNetwork] + port: + get_param: [EndpointMap, ContrailAnalyticsHttpPublic, port] + protocol: + get_param: [EndpointMap, ContrailAnalyticsHttpPublic, protocol] + uri: + list_join: + - '' + - - get_param: [EndpointMap, ContrailAnalyticsHttpPublic, protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, ContrailAnalyticsHttpPublic, + host] + params: + CLOUDNAME: + get_param: + - CloudEndpoints + - get_param: [ServiceNetMap, PublicNetwork] + IP_ADDRESS: + get_param: + - NetIpMap + - str_replace: + params: + NETWORK: + get_param: [ServiceNetMap, PublicNetwork] + template: NETWORK_uri + - ':' + - get_param: [EndpointMap, ContrailAnalyticsHttpPublic, port] + uri_no_suffix: + list_join: + - '' + - - get_param: [EndpointMap, ContrailAnalyticsHttpPublic, protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, ContrailAnalyticsHttpPublic, + host] + params: + CLOUDNAME: + get_param: + - CloudEndpoints + - get_param: [ServiceNetMap, PublicNetwork] + IP_ADDRESS: + get_param: + - NetIpMap + - str_replace: + params: + NETWORK: + get_param: [ServiceNetMap, PublicNetwork] + template: NETWORK_uri + - ':' + - get_param: [EndpointMap, ContrailAnalyticsHttpPublic, port] + ContrailAnalyticsRedisAdmin: + host: + str_replace: + template: + get_param: [EndpointMap, ContrailAnalyticsRedisAdmin, host] + params: + CLOUDNAME: + get_param: + - CloudEndpoints + - get_param: [ServiceNetMap, ContrailAnalyticsNetwork] + IP_ADDRESS: + get_param: + - NetIpMap + - str_replace: + params: + NETWORK: + get_param: [ServiceNetMap, ContrailAnalyticsNetwork] + template: NETWORK_uri + host_nobrackets: + str_replace: + template: + get_param: [EndpointMap, ContrailAnalyticsRedisAdmin, host] + params: + CLOUDNAME: + get_param: + - CloudEndpoints + - get_param: [ServiceNetMap, ContrailAnalyticsNetwork] + IP_ADDRESS: + get_param: + - NetIpMap + - get_param: [ServiceNetMap, ContrailAnalyticsNetwork] + port: + get_param: [EndpointMap, ContrailAnalyticsRedisAdmin, port] + protocol: + get_param: [EndpointMap, ContrailAnalyticsRedisAdmin, protocol] + uri: + list_join: + - '' + - - get_param: [EndpointMap, ContrailAnalyticsRedisAdmin, protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, ContrailAnalyticsRedisAdmin, + host] + params: + CLOUDNAME: + get_param: + - CloudEndpoints + - get_param: [ServiceNetMap, ContrailAnalyticsNetwork] + IP_ADDRESS: + get_param: + - NetIpMap + - str_replace: + params: + NETWORK: + get_param: [ServiceNetMap, ContrailAnalyticsNetwork] + template: NETWORK_uri + - ':' + - get_param: [EndpointMap, ContrailAnalyticsRedisAdmin, port] + uri_no_suffix: + list_join: + - '' + - - get_param: [EndpointMap, ContrailAnalyticsRedisAdmin, protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, ContrailAnalyticsRedisAdmin, + host] + params: + CLOUDNAME: + get_param: + - CloudEndpoints + - get_param: [ServiceNetMap, ContrailAnalyticsNetwork] + IP_ADDRESS: + get_param: + - NetIpMap + - str_replace: + params: + NETWORK: + get_param: [ServiceNetMap, ContrailAnalyticsNetwork] + template: NETWORK_uri + - ':' + - get_param: [EndpointMap, ContrailAnalyticsRedisAdmin, port] + ContrailAnalyticsRedisInternal: + host: + str_replace: + template: + get_param: [EndpointMap, ContrailAnalyticsRedisInternal, + host] + params: + CLOUDNAME: + get_param: + - CloudEndpoints + - get_param: [ServiceNetMap, ContrailAnalyticsNetwork] + IP_ADDRESS: + get_param: + - NetIpMap + - str_replace: + params: + NETWORK: + get_param: [ServiceNetMap, ContrailAnalyticsNetwork] + template: NETWORK_uri + host_nobrackets: + str_replace: + template: + get_param: [EndpointMap, ContrailAnalyticsRedisInternal, + host] + params: + CLOUDNAME: + get_param: + - CloudEndpoints + - get_param: [ServiceNetMap, ContrailAnalyticsNetwork] + IP_ADDRESS: + get_param: + - NetIpMap + - get_param: [ServiceNetMap, ContrailAnalyticsNetwork] + port: + get_param: [EndpointMap, ContrailAnalyticsRedisInternal, port] + protocol: + get_param: [EndpointMap, ContrailAnalyticsRedisInternal, protocol] + uri: + list_join: + - '' + - - get_param: [EndpointMap, ContrailAnalyticsRedisInternal, + protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, ContrailAnalyticsRedisInternal, + host] + params: + CLOUDNAME: + get_param: + - CloudEndpoints + - get_param: [ServiceNetMap, ContrailAnalyticsNetwork] + IP_ADDRESS: + get_param: + - NetIpMap + - str_replace: + params: + NETWORK: + get_param: [ServiceNetMap, ContrailAnalyticsNetwork] + template: NETWORK_uri + - ':' + - get_param: [EndpointMap, ContrailAnalyticsRedisInternal, + port] + uri_no_suffix: + list_join: + - '' + - - get_param: [EndpointMap, ContrailAnalyticsRedisInternal, + protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, ContrailAnalyticsRedisInternal, + host] + params: + CLOUDNAME: + get_param: + - CloudEndpoints + - get_param: [ServiceNetMap, ContrailAnalyticsNetwork] + IP_ADDRESS: + get_param: + - NetIpMap + - str_replace: + params: + NETWORK: + get_param: [ServiceNetMap, ContrailAnalyticsNetwork] + template: NETWORK_uri + - ':' + - get_param: [EndpointMap, ContrailAnalyticsRedisInternal, + port] + ContrailAnalyticsRedisPublic: + host: + str_replace: + template: + get_param: [EndpointMap, ContrailAnalyticsRedisPublic, host] + params: + CLOUDNAME: + get_param: + - CloudEndpoints + - get_param: [ServiceNetMap, PublicNetwork] + IP_ADDRESS: + get_param: + - NetIpMap + - str_replace: + params: + NETWORK: + get_param: [ServiceNetMap, PublicNetwork] + template: NETWORK_uri + host_nobrackets: + str_replace: + template: + get_param: [EndpointMap, ContrailAnalyticsRedisPublic, host] + params: + CLOUDNAME: + get_param: + - CloudEndpoints + - get_param: [ServiceNetMap, PublicNetwork] + IP_ADDRESS: + get_param: + - NetIpMap + - get_param: [ServiceNetMap, PublicNetwork] + port: + get_param: [EndpointMap, ContrailAnalyticsRedisPublic, port] + protocol: + get_param: [EndpointMap, ContrailAnalyticsRedisPublic, protocol] + uri: + list_join: + - '' + - - get_param: [EndpointMap, ContrailAnalyticsRedisPublic, protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, ContrailAnalyticsRedisPublic, + host] + params: + CLOUDNAME: + get_param: + - CloudEndpoints + - get_param: [ServiceNetMap, PublicNetwork] + IP_ADDRESS: + get_param: + - NetIpMap + - str_replace: + params: + NETWORK: + get_param: [ServiceNetMap, PublicNetwork] + template: NETWORK_uri + - ':' + - get_param: [EndpointMap, ContrailAnalyticsRedisPublic, port] + uri_no_suffix: + list_join: + - '' + - - get_param: [EndpointMap, ContrailAnalyticsRedisPublic, protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, ContrailAnalyticsRedisPublic, + host] + params: + CLOUDNAME: + get_param: + - CloudEndpoints + - get_param: [ServiceNetMap, PublicNetwork] + IP_ADDRESS: + get_param: + - NetIpMap + - str_replace: + params: + NETWORK: + get_param: [ServiceNetMap, PublicNetwork] + template: NETWORK_uri + - ':' + - get_param: [EndpointMap, ContrailAnalyticsRedisPublic, port] + ContrailConfigAdmin: + host: + str_replace: + template: + get_param: [EndpointMap, ContrailConfigAdmin, host] + params: + CLOUDNAME: + get_param: + - CloudEndpoints + - get_param: [ServiceNetMap, ContrailConfigNetwork] + IP_ADDRESS: + get_param: + - NetIpMap + - str_replace: + params: + NETWORK: + get_param: [ServiceNetMap, ContrailConfigNetwork] + template: NETWORK_uri + host_nobrackets: + str_replace: + template: + get_param: [EndpointMap, ContrailConfigAdmin, host] + params: + CLOUDNAME: + get_param: + - CloudEndpoints + - get_param: [ServiceNetMap, ContrailConfigNetwork] + IP_ADDRESS: + get_param: + - NetIpMap + - get_param: [ServiceNetMap, ContrailConfigNetwork] + port: + get_param: [EndpointMap, ContrailConfigAdmin, port] + protocol: + get_param: [EndpointMap, ContrailConfigAdmin, protocol] + uri: + list_join: + - '' + - - get_param: [EndpointMap, ContrailConfigAdmin, protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, ContrailConfigAdmin, host] + params: + CLOUDNAME: + get_param: + - CloudEndpoints + - get_param: [ServiceNetMap, ContrailConfigNetwork] + IP_ADDRESS: + get_param: + - NetIpMap + - str_replace: + params: + NETWORK: + get_param: [ServiceNetMap, ContrailConfigNetwork] + template: NETWORK_uri + - ':' + - get_param: [EndpointMap, ContrailConfigAdmin, port] + uri_no_suffix: + list_join: + - '' + - - get_param: [EndpointMap, ContrailConfigAdmin, protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, ContrailConfigAdmin, host] + params: + CLOUDNAME: + get_param: + - CloudEndpoints + - get_param: [ServiceNetMap, ContrailConfigNetwork] + IP_ADDRESS: + get_param: + - NetIpMap + - str_replace: + params: + NETWORK: + get_param: [ServiceNetMap, ContrailConfigNetwork] + template: NETWORK_uri + - ':' + - get_param: [EndpointMap, ContrailConfigAdmin, port] + ContrailConfigInternal: + host: + str_replace: + template: + get_param: [EndpointMap, ContrailConfigInternal, host] + params: + CLOUDNAME: + get_param: + - CloudEndpoints + - get_param: [ServiceNetMap, ContrailConfigNetwork] + IP_ADDRESS: + get_param: + - NetIpMap + - str_replace: + params: + NETWORK: + get_param: [ServiceNetMap, ContrailConfigNetwork] + template: NETWORK_uri + host_nobrackets: + str_replace: + template: + get_param: [EndpointMap, ContrailConfigInternal, host] + params: + CLOUDNAME: + get_param: + - CloudEndpoints + - get_param: [ServiceNetMap, ContrailConfigNetwork] + IP_ADDRESS: + get_param: + - NetIpMap + - get_param: [ServiceNetMap, ContrailConfigNetwork] + port: + get_param: [EndpointMap, ContrailConfigInternal, port] + protocol: + get_param: [EndpointMap, ContrailConfigInternal, protocol] + uri: + list_join: + - '' + - - get_param: [EndpointMap, ContrailConfigInternal, protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, ContrailConfigInternal, host] + params: + CLOUDNAME: + get_param: + - CloudEndpoints + - get_param: [ServiceNetMap, ContrailConfigNetwork] + IP_ADDRESS: + get_param: + - NetIpMap + - str_replace: + params: + NETWORK: + get_param: [ServiceNetMap, ContrailConfigNetwork] + template: NETWORK_uri + - ':' + - get_param: [EndpointMap, ContrailConfigInternal, port] + uri_no_suffix: + list_join: + - '' + - - get_param: [EndpointMap, ContrailConfigInternal, protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, ContrailConfigInternal, host] + params: + CLOUDNAME: + get_param: + - CloudEndpoints + - get_param: [ServiceNetMap, ContrailConfigNetwork] + IP_ADDRESS: + get_param: + - NetIpMap + - str_replace: + params: + NETWORK: + get_param: [ServiceNetMap, ContrailConfigNetwork] + template: NETWORK_uri + - ':' + - get_param: [EndpointMap, ContrailConfigInternal, port] + ContrailConfigPublic: + host: + str_replace: + template: + get_param: [EndpointMap, ContrailConfigPublic, host] + params: + CLOUDNAME: + get_param: + - CloudEndpoints + - get_param: [ServiceNetMap, PublicNetwork] + IP_ADDRESS: + get_param: + - NetIpMap + - str_replace: + params: + NETWORK: + get_param: [ServiceNetMap, PublicNetwork] + template: NETWORK_uri + host_nobrackets: + str_replace: + template: + get_param: [EndpointMap, ContrailConfigPublic, host] + params: + CLOUDNAME: + get_param: + - CloudEndpoints + - get_param: [ServiceNetMap, PublicNetwork] + IP_ADDRESS: + get_param: + - NetIpMap + - get_param: [ServiceNetMap, PublicNetwork] + port: + get_param: [EndpointMap, ContrailConfigPublic, port] + protocol: + get_param: [EndpointMap, ContrailConfigPublic, protocol] + uri: + list_join: + - '' + - - get_param: [EndpointMap, ContrailConfigPublic, protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, ContrailConfigPublic, host] + params: + CLOUDNAME: + get_param: + - CloudEndpoints + - get_param: [ServiceNetMap, PublicNetwork] + IP_ADDRESS: + get_param: + - NetIpMap + - str_replace: + params: + NETWORK: + get_param: [ServiceNetMap, PublicNetwork] + template: NETWORK_uri + - ':' + - get_param: [EndpointMap, ContrailConfigPublic, port] + uri_no_suffix: + list_join: + - '' + - - get_param: [EndpointMap, ContrailConfigPublic, protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, ContrailConfigPublic, host] + params: + CLOUDNAME: + get_param: + - CloudEndpoints + - get_param: [ServiceNetMap, PublicNetwork] + IP_ADDRESS: + get_param: + - NetIpMap + - str_replace: + params: + NETWORK: + get_param: [ServiceNetMap, PublicNetwork] + template: NETWORK_uri + - ':' + - get_param: [EndpointMap, ContrailConfigPublic, port] + ContrailDiscoveryAdmin: + host: + str_replace: + template: + get_param: [EndpointMap, ContrailDiscoveryAdmin, host] + params: + CLOUDNAME: + get_param: + - CloudEndpoints + - get_param: [ServiceNetMap, ContrailConfigNetwork] + IP_ADDRESS: + get_param: + - NetIpMap + - str_replace: + params: + NETWORK: + get_param: [ServiceNetMap, ContrailConfigNetwork] + template: NETWORK_uri + host_nobrackets: + str_replace: + template: + get_param: [EndpointMap, ContrailDiscoveryAdmin, host] + params: + CLOUDNAME: + get_param: + - CloudEndpoints + - get_param: [ServiceNetMap, ContrailConfigNetwork] + IP_ADDRESS: + get_param: + - NetIpMap + - get_param: [ServiceNetMap, ContrailConfigNetwork] + port: + get_param: [EndpointMap, ContrailDiscoveryAdmin, port] + protocol: + get_param: [EndpointMap, ContrailDiscoveryAdmin, protocol] + uri: + list_join: + - '' + - - get_param: [EndpointMap, ContrailDiscoveryAdmin, protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, ContrailDiscoveryAdmin, host] + params: + CLOUDNAME: + get_param: + - CloudEndpoints + - get_param: [ServiceNetMap, ContrailConfigNetwork] + IP_ADDRESS: + get_param: + - NetIpMap + - str_replace: + params: + NETWORK: + get_param: [ServiceNetMap, ContrailConfigNetwork] + template: NETWORK_uri + - ':' + - get_param: [EndpointMap, ContrailDiscoveryAdmin, port] + uri_no_suffix: + list_join: + - '' + - - get_param: [EndpointMap, ContrailDiscoveryAdmin, protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, ContrailDiscoveryAdmin, host] + params: + CLOUDNAME: + get_param: + - CloudEndpoints + - get_param: [ServiceNetMap, ContrailConfigNetwork] + IP_ADDRESS: + get_param: + - NetIpMap + - str_replace: + params: + NETWORK: + get_param: [ServiceNetMap, ContrailConfigNetwork] + template: NETWORK_uri + - ':' + - get_param: [EndpointMap, ContrailDiscoveryAdmin, port] + ContrailDiscoveryInternal: + host: + str_replace: + template: + get_param: [EndpointMap, ContrailDiscoveryInternal, host] + params: + CLOUDNAME: + get_param: + - CloudEndpoints + - get_param: [ServiceNetMap, ContrailConfigNetwork] + IP_ADDRESS: + get_param: + - NetIpMap + - str_replace: + params: + NETWORK: + get_param: [ServiceNetMap, ContrailConfigNetwork] + template: NETWORK_uri + host_nobrackets: + str_replace: + template: + get_param: [EndpointMap, ContrailDiscoveryInternal, host] + params: + CLOUDNAME: + get_param: + - CloudEndpoints + - get_param: [ServiceNetMap, ContrailConfigNetwork] + IP_ADDRESS: + get_param: + - NetIpMap + - get_param: [ServiceNetMap, ContrailConfigNetwork] + port: + get_param: [EndpointMap, ContrailDiscoveryInternal, port] + protocol: + get_param: [EndpointMap, ContrailDiscoveryInternal, protocol] + uri: + list_join: + - '' + - - get_param: [EndpointMap, ContrailDiscoveryInternal, protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, ContrailDiscoveryInternal, + host] + params: + CLOUDNAME: + get_param: + - CloudEndpoints + - get_param: [ServiceNetMap, ContrailConfigNetwork] + IP_ADDRESS: + get_param: + - NetIpMap + - str_replace: + params: + NETWORK: + get_param: [ServiceNetMap, ContrailConfigNetwork] + template: NETWORK_uri + - ':' + - get_param: [EndpointMap, ContrailDiscoveryInternal, port] + uri_no_suffix: + list_join: + - '' + - - get_param: [EndpointMap, ContrailDiscoveryInternal, protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, ContrailDiscoveryInternal, + host] + params: + CLOUDNAME: + get_param: + - CloudEndpoints + - get_param: [ServiceNetMap, ContrailConfigNetwork] + IP_ADDRESS: + get_param: + - NetIpMap + - str_replace: + params: + NETWORK: + get_param: [ServiceNetMap, ContrailConfigNetwork] + template: NETWORK_uri + - ':' + - get_param: [EndpointMap, ContrailDiscoveryInternal, port] + ContrailDiscoveryPublic: + host: + str_replace: + template: + get_param: [EndpointMap, ContrailDiscoveryPublic, host] + params: + CLOUDNAME: + get_param: + - CloudEndpoints + - get_param: [ServiceNetMap, PublicNetwork] + IP_ADDRESS: + get_param: + - NetIpMap + - str_replace: + params: + NETWORK: + get_param: [ServiceNetMap, PublicNetwork] + template: NETWORK_uri + host_nobrackets: + str_replace: + template: + get_param: [EndpointMap, ContrailDiscoveryPublic, host] + params: + CLOUDNAME: + get_param: + - CloudEndpoints + - get_param: [ServiceNetMap, PublicNetwork] + IP_ADDRESS: + get_param: + - NetIpMap + - get_param: [ServiceNetMap, PublicNetwork] + port: + get_param: [EndpointMap, ContrailDiscoveryPublic, port] + protocol: + get_param: [EndpointMap, ContrailDiscoveryPublic, protocol] + uri: + list_join: + - '' + - - get_param: [EndpointMap, ContrailDiscoveryPublic, protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, ContrailDiscoveryPublic, host] + params: + CLOUDNAME: + get_param: + - CloudEndpoints + - get_param: [ServiceNetMap, PublicNetwork] + IP_ADDRESS: + get_param: + - NetIpMap + - str_replace: + params: + NETWORK: + get_param: [ServiceNetMap, PublicNetwork] + template: NETWORK_uri + - ':' + - get_param: [EndpointMap, ContrailDiscoveryPublic, port] + uri_no_suffix: + list_join: + - '' + - - get_param: [EndpointMap, ContrailDiscoveryPublic, protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, ContrailDiscoveryPublic, host] + params: + CLOUDNAME: + get_param: + - CloudEndpoints + - get_param: [ServiceNetMap, PublicNetwork] + IP_ADDRESS: + get_param: + - NetIpMap + - str_replace: + params: + NETWORK: + get_param: [ServiceNetMap, PublicNetwork] + template: NETWORK_uri + - ':' + - get_param: [EndpointMap, ContrailDiscoveryPublic, port] + ContrailWebuiHttpAdmin: + host: + str_replace: + template: + get_param: [EndpointMap, ContrailWebuiHttpAdmin, host] + params: + CLOUDNAME: + get_param: + - CloudEndpoints + - get_param: [ServiceNetMap, ContrailConfigNetwork] + IP_ADDRESS: + get_param: + - NetIpMap + - str_replace: + params: + NETWORK: + get_param: [ServiceNetMap, ContrailConfigNetwork] + template: NETWORK_uri + host_nobrackets: + str_replace: + template: + get_param: [EndpointMap, ContrailWebuiHttpAdmin, host] + params: + CLOUDNAME: + get_param: + - CloudEndpoints + - get_param: [ServiceNetMap, ContrailConfigNetwork] + IP_ADDRESS: + get_param: + - NetIpMap + - get_param: [ServiceNetMap, ContrailConfigNetwork] + port: + get_param: [EndpointMap, ContrailWebuiHttpAdmin, port] + protocol: + get_param: [EndpointMap, ContrailWebuiHttpAdmin, protocol] + uri: + list_join: + - '' + - - get_param: [EndpointMap, ContrailWebuiHttpAdmin, protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, ContrailWebuiHttpAdmin, host] + params: + CLOUDNAME: + get_param: + - CloudEndpoints + - get_param: [ServiceNetMap, ContrailConfigNetwork] + IP_ADDRESS: + get_param: + - NetIpMap + - str_replace: + params: + NETWORK: + get_param: [ServiceNetMap, ContrailConfigNetwork] + template: NETWORK_uri + - ':' + - get_param: [EndpointMap, ContrailWebuiHttpAdmin, port] + uri_no_suffix: + list_join: + - '' + - - get_param: [EndpointMap, ContrailWebuiHttpAdmin, protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, ContrailWebuiHttpAdmin, host] + params: + CLOUDNAME: + get_param: + - CloudEndpoints + - get_param: [ServiceNetMap, ContrailConfigNetwork] + IP_ADDRESS: + get_param: + - NetIpMap + - str_replace: + params: + NETWORK: + get_param: [ServiceNetMap, ContrailConfigNetwork] + template: NETWORK_uri + - ':' + - get_param: [EndpointMap, ContrailWebuiHttpAdmin, port] + ContrailWebuiHttpInternal: + host: + str_replace: + template: + get_param: [EndpointMap, ContrailWebuiHttpInternal, host] + params: + CLOUDNAME: + get_param: + - CloudEndpoints + - get_param: [ServiceNetMap, ContrailConfigNetwork] + IP_ADDRESS: + get_param: + - NetIpMap + - str_replace: + params: + NETWORK: + get_param: [ServiceNetMap, ContrailConfigNetwork] + template: NETWORK_uri + host_nobrackets: + str_replace: + template: + get_param: [EndpointMap, ContrailWebuiHttpInternal, host] + params: + CLOUDNAME: + get_param: + - CloudEndpoints + - get_param: [ServiceNetMap, ContrailConfigNetwork] + IP_ADDRESS: + get_param: + - NetIpMap + - get_param: [ServiceNetMap, ContrailConfigNetwork] + port: + get_param: [EndpointMap, ContrailWebuiHttpInternal, port] + protocol: + get_param: [EndpointMap, ContrailWebuiHttpInternal, protocol] + uri: + list_join: + - '' + - - get_param: [EndpointMap, ContrailWebuiHttpInternal, protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, ContrailWebuiHttpInternal, + host] + params: + CLOUDNAME: + get_param: + - CloudEndpoints + - get_param: [ServiceNetMap, ContrailConfigNetwork] + IP_ADDRESS: + get_param: + - NetIpMap + - str_replace: + params: + NETWORK: + get_param: [ServiceNetMap, ContrailConfigNetwork] + template: NETWORK_uri + - ':' + - get_param: [EndpointMap, ContrailWebuiHttpInternal, port] + uri_no_suffix: + list_join: + - '' + - - get_param: [EndpointMap, ContrailWebuiHttpInternal, protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, ContrailWebuiHttpInternal, + host] + params: + CLOUDNAME: + get_param: + - CloudEndpoints + - get_param: [ServiceNetMap, ContrailConfigNetwork] + IP_ADDRESS: + get_param: + - NetIpMap + - str_replace: + params: + NETWORK: + get_param: [ServiceNetMap, ContrailConfigNetwork] + template: NETWORK_uri + - ':' + - get_param: [EndpointMap, ContrailWebuiHttpInternal, port] + ContrailWebuiHttpPublic: + host: + str_replace: + template: + get_param: [EndpointMap, ContrailWebuiHttpPublic, host] + params: + CLOUDNAME: + get_param: + - CloudEndpoints + - get_param: [ServiceNetMap, PublicNetwork] + IP_ADDRESS: + get_param: + - NetIpMap + - str_replace: + params: + NETWORK: + get_param: [ServiceNetMap, PublicNetwork] + template: NETWORK_uri + host_nobrackets: + str_replace: + template: + get_param: [EndpointMap, ContrailWebuiHttpPublic, host] + params: + CLOUDNAME: + get_param: + - CloudEndpoints + - get_param: [ServiceNetMap, PublicNetwork] + IP_ADDRESS: + get_param: + - NetIpMap + - get_param: [ServiceNetMap, PublicNetwork] + port: + get_param: [EndpointMap, ContrailWebuiHttpPublic, port] + protocol: + get_param: [EndpointMap, ContrailWebuiHttpPublic, protocol] + uri: + list_join: + - '' + - - get_param: [EndpointMap, ContrailWebuiHttpPublic, protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, ContrailWebuiHttpPublic, host] + params: + CLOUDNAME: + get_param: + - CloudEndpoints + - get_param: [ServiceNetMap, PublicNetwork] + IP_ADDRESS: + get_param: + - NetIpMap + - str_replace: + params: + NETWORK: + get_param: [ServiceNetMap, PublicNetwork] + template: NETWORK_uri + - ':' + - get_param: [EndpointMap, ContrailWebuiHttpPublic, port] + uri_no_suffix: + list_join: + - '' + - - get_param: [EndpointMap, ContrailWebuiHttpPublic, protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, ContrailWebuiHttpPublic, host] + params: + CLOUDNAME: + get_param: + - CloudEndpoints + - get_param: [ServiceNetMap, PublicNetwork] + IP_ADDRESS: + get_param: + - NetIpMap + - str_replace: + params: + NETWORK: + get_param: [ServiceNetMap, PublicNetwork] + template: NETWORK_uri + - ':' + - get_param: [EndpointMap, ContrailWebuiHttpPublic, port] + ContrailWebuiHttpsAdmin: + host: + str_replace: + template: + get_param: [EndpointMap, ContrailWebuiHttpsAdmin, host] + params: + CLOUDNAME: + get_param: + - CloudEndpoints + - get_param: [ServiceNetMap, ContrailConfigNetwork] + IP_ADDRESS: + get_param: + - NetIpMap + - str_replace: + params: + NETWORK: + get_param: [ServiceNetMap, ContrailConfigNetwork] + template: NETWORK_uri + host_nobrackets: + str_replace: + template: + get_param: [EndpointMap, ContrailWebuiHttpsAdmin, host] + params: + CLOUDNAME: + get_param: + - CloudEndpoints + - get_param: [ServiceNetMap, ContrailConfigNetwork] + IP_ADDRESS: + get_param: + - NetIpMap + - get_param: [ServiceNetMap, ContrailConfigNetwork] + port: + get_param: [EndpointMap, ContrailWebuiHttpsAdmin, port] + protocol: + get_param: [EndpointMap, ContrailWebuiHttpsAdmin, protocol] + uri: + list_join: + - '' + - - get_param: [EndpointMap, ContrailWebuiHttpsAdmin, protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, ContrailWebuiHttpsAdmin, host] + params: + CLOUDNAME: + get_param: + - CloudEndpoints + - get_param: [ServiceNetMap, ContrailConfigNetwork] + IP_ADDRESS: + get_param: + - NetIpMap + - str_replace: + params: + NETWORK: + get_param: [ServiceNetMap, ContrailConfigNetwork] + template: NETWORK_uri + - ':' + - get_param: [EndpointMap, ContrailWebuiHttpsAdmin, port] + uri_no_suffix: + list_join: + - '' + - - get_param: [EndpointMap, ContrailWebuiHttpsAdmin, protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, ContrailWebuiHttpsAdmin, host] + params: + CLOUDNAME: + get_param: + - CloudEndpoints + - get_param: [ServiceNetMap, ContrailConfigNetwork] + IP_ADDRESS: + get_param: + - NetIpMap + - str_replace: + params: + NETWORK: + get_param: [ServiceNetMap, ContrailConfigNetwork] + template: NETWORK_uri + - ':' + - get_param: [EndpointMap, ContrailWebuiHttpsAdmin, port] + ContrailWebuiHttpsInternal: + host: + str_replace: + template: + get_param: [EndpointMap, ContrailWebuiHttpsInternal, host] + params: + CLOUDNAME: + get_param: + - CloudEndpoints + - get_param: [ServiceNetMap, ContrailConfigNetwork] + IP_ADDRESS: + get_param: + - NetIpMap + - str_replace: + params: + NETWORK: + get_param: [ServiceNetMap, ContrailConfigNetwork] + template: NETWORK_uri + host_nobrackets: + str_replace: + template: + get_param: [EndpointMap, ContrailWebuiHttpsInternal, host] + params: + CLOUDNAME: + get_param: + - CloudEndpoints + - get_param: [ServiceNetMap, ContrailConfigNetwork] + IP_ADDRESS: + get_param: + - NetIpMap + - get_param: [ServiceNetMap, ContrailConfigNetwork] + port: + get_param: [EndpointMap, ContrailWebuiHttpsInternal, port] + protocol: + get_param: [EndpointMap, ContrailWebuiHttpsInternal, protocol] + uri: + list_join: + - '' + - - get_param: [EndpointMap, ContrailWebuiHttpsInternal, protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, ContrailWebuiHttpsInternal, + host] + params: + CLOUDNAME: + get_param: + - CloudEndpoints + - get_param: [ServiceNetMap, ContrailConfigNetwork] + IP_ADDRESS: + get_param: + - NetIpMap + - str_replace: + params: + NETWORK: + get_param: [ServiceNetMap, ContrailConfigNetwork] + template: NETWORK_uri + - ':' + - get_param: [EndpointMap, ContrailWebuiHttpsInternal, port] + uri_no_suffix: + list_join: + - '' + - - get_param: [EndpointMap, ContrailWebuiHttpsInternal, protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, ContrailWebuiHttpsInternal, + host] + params: + CLOUDNAME: + get_param: + - CloudEndpoints + - get_param: [ServiceNetMap, ContrailConfigNetwork] + IP_ADDRESS: + get_param: + - NetIpMap + - str_replace: + params: + NETWORK: + get_param: [ServiceNetMap, ContrailConfigNetwork] + template: NETWORK_uri + - ':' + - get_param: [EndpointMap, ContrailWebuiHttpsInternal, port] + ContrailWebuiHttpsPublic: + host: + str_replace: + template: + get_param: [EndpointMap, ContrailWebuiHttpsPublic, host] + params: + CLOUDNAME: + get_param: + - CloudEndpoints + - get_param: [ServiceNetMap, PublicNetwork] + IP_ADDRESS: + get_param: + - NetIpMap + - str_replace: + params: + NETWORK: + get_param: [ServiceNetMap, PublicNetwork] + template: NETWORK_uri + host_nobrackets: + str_replace: + template: + get_param: [EndpointMap, ContrailWebuiHttpsPublic, host] + params: + CLOUDNAME: + get_param: + - CloudEndpoints + - get_param: [ServiceNetMap, PublicNetwork] + IP_ADDRESS: + get_param: + - NetIpMap + - get_param: [ServiceNetMap, PublicNetwork] + port: + get_param: [EndpointMap, ContrailWebuiHttpsPublic, port] + protocol: + get_param: [EndpointMap, ContrailWebuiHttpsPublic, protocol] + uri: + list_join: + - '' + - - get_param: [EndpointMap, ContrailWebuiHttpsPublic, protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, ContrailWebuiHttpsPublic, host] + params: + CLOUDNAME: + get_param: + - CloudEndpoints + - get_param: [ServiceNetMap, PublicNetwork] + IP_ADDRESS: + get_param: + - NetIpMap + - str_replace: + params: + NETWORK: + get_param: [ServiceNetMap, PublicNetwork] + template: NETWORK_uri + - ':' + - get_param: [EndpointMap, ContrailWebuiHttpsPublic, port] + uri_no_suffix: + list_join: + - '' + - - get_param: [EndpointMap, ContrailWebuiHttpsPublic, protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, ContrailWebuiHttpsPublic, host] + params: + CLOUDNAME: + get_param: + - CloudEndpoints + - get_param: [ServiceNetMap, PublicNetwork] + IP_ADDRESS: + get_param: + - NetIpMap + - str_replace: + params: + NETWORK: + get_param: [ServiceNetMap, PublicNetwork] + template: NETWORK_uri + - ':' + - get_param: [EndpointMap, ContrailWebuiHttpsPublic, port] + Ec2ApiAdmin: + host: + str_replace: + template: + get_param: [EndpointMap, Ec2ApiAdmin, host] + params: + CLOUDNAME: + get_param: + - CloudEndpoints + - get_param: [ServiceNetMap, Ec2ApiNetwork] + IP_ADDRESS: + get_param: + - NetIpMap + - str_replace: + params: + NETWORK: + get_param: [ServiceNetMap, Ec2ApiNetwork] + template: NETWORK_uri + host_nobrackets: + str_replace: + template: + get_param: [EndpointMap, Ec2ApiAdmin, host] + params: + CLOUDNAME: + get_param: + - CloudEndpoints + - get_param: [ServiceNetMap, Ec2ApiNetwork] + IP_ADDRESS: + get_param: + - NetIpMap + - get_param: [ServiceNetMap, Ec2ApiNetwork] + port: + get_param: [EndpointMap, Ec2ApiAdmin, port] + protocol: + get_param: [EndpointMap, Ec2ApiAdmin, protocol] + uri: + list_join: + - '' + - - get_param: [EndpointMap, Ec2ApiAdmin, protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, Ec2ApiAdmin, host] + params: + CLOUDNAME: + get_param: + - CloudEndpoints + - get_param: [ServiceNetMap, Ec2ApiNetwork] + IP_ADDRESS: + get_param: + - NetIpMap + - str_replace: + params: + NETWORK: + get_param: [ServiceNetMap, Ec2ApiNetwork] + template: NETWORK_uri + - ':' + - get_param: [EndpointMap, Ec2ApiAdmin, port] + uri_no_suffix: + list_join: + - '' + - - get_param: [EndpointMap, Ec2ApiAdmin, protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, Ec2ApiAdmin, host] + params: + CLOUDNAME: + get_param: + - CloudEndpoints + - get_param: [ServiceNetMap, Ec2ApiNetwork] + IP_ADDRESS: + get_param: + - NetIpMap + - str_replace: + params: + NETWORK: + get_param: [ServiceNetMap, Ec2ApiNetwork] + template: NETWORK_uri + - ':' + - get_param: [EndpointMap, Ec2ApiAdmin, port] + Ec2ApiInternal: + host: + str_replace: + template: + get_param: [EndpointMap, Ec2ApiInternal, host] + params: + CLOUDNAME: + get_param: + - CloudEndpoints + - get_param: [ServiceNetMap, Ec2ApiNetwork] + IP_ADDRESS: + get_param: + - NetIpMap + - str_replace: + params: + NETWORK: + get_param: [ServiceNetMap, Ec2ApiNetwork] + template: NETWORK_uri + host_nobrackets: + str_replace: + template: + get_param: [EndpointMap, Ec2ApiInternal, host] + params: + CLOUDNAME: + get_param: + - CloudEndpoints + - get_param: [ServiceNetMap, Ec2ApiNetwork] + IP_ADDRESS: + get_param: + - NetIpMap + - get_param: [ServiceNetMap, Ec2ApiNetwork] + port: + get_param: [EndpointMap, Ec2ApiInternal, port] + protocol: + get_param: [EndpointMap, Ec2ApiInternal, protocol] + uri: + list_join: + - '' + - - get_param: [EndpointMap, Ec2ApiInternal, protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, Ec2ApiInternal, host] + params: + CLOUDNAME: + get_param: + - CloudEndpoints + - get_param: [ServiceNetMap, Ec2ApiNetwork] + IP_ADDRESS: + get_param: + - NetIpMap + - str_replace: + params: + NETWORK: + get_param: [ServiceNetMap, Ec2ApiNetwork] + template: NETWORK_uri + - ':' + - get_param: [EndpointMap, Ec2ApiInternal, port] + uri_no_suffix: + list_join: + - '' + - - get_param: [EndpointMap, Ec2ApiInternal, protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, Ec2ApiInternal, host] + params: + CLOUDNAME: + get_param: + - CloudEndpoints + - get_param: [ServiceNetMap, Ec2ApiNetwork] + IP_ADDRESS: + get_param: + - NetIpMap + - str_replace: + params: + NETWORK: + get_param: [ServiceNetMap, Ec2ApiNetwork] + template: NETWORK_uri + - ':' + - get_param: [EndpointMap, Ec2ApiInternal, port] + Ec2ApiPublic: + host: + str_replace: + template: + get_param: [EndpointMap, Ec2ApiPublic, host] + params: + CLOUDNAME: + get_param: + - CloudEndpoints + - get_param: [ServiceNetMap, PublicNetwork] + IP_ADDRESS: + get_param: + - NetIpMap + - str_replace: + params: + NETWORK: + get_param: [ServiceNetMap, PublicNetwork] + template: NETWORK_uri + host_nobrackets: + str_replace: + template: + get_param: [EndpointMap, Ec2ApiPublic, host] + params: + CLOUDNAME: + get_param: + - CloudEndpoints + - get_param: [ServiceNetMap, PublicNetwork] + IP_ADDRESS: + get_param: + - NetIpMap + - get_param: [ServiceNetMap, PublicNetwork] + port: + get_param: [EndpointMap, Ec2ApiPublic, port] + protocol: + get_param: [EndpointMap, Ec2ApiPublic, protocol] + uri: + list_join: + - '' + - - get_param: [EndpointMap, Ec2ApiPublic, protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, Ec2ApiPublic, host] + params: + CLOUDNAME: + get_param: + - CloudEndpoints + - get_param: [ServiceNetMap, PublicNetwork] + IP_ADDRESS: + get_param: + - NetIpMap + - str_replace: + params: + NETWORK: + get_param: [ServiceNetMap, PublicNetwork] + template: NETWORK_uri + - ':' + - get_param: [EndpointMap, Ec2ApiPublic, port] + uri_no_suffix: + list_join: + - '' + - - get_param: [EndpointMap, Ec2ApiPublic, protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, Ec2ApiPublic, host] + params: + CLOUDNAME: + get_param: + - CloudEndpoints + - get_param: [ServiceNetMap, PublicNetwork] + IP_ADDRESS: + get_param: + - NetIpMap + - str_replace: + params: + NETWORK: + get_param: [ServiceNetMap, PublicNetwork] + template: NETWORK_uri + - ':' + - get_param: [EndpointMap, Ec2ApiPublic, port] + GlanceAdmin: + host: + str_replace: + template: + get_param: [EndpointMap, GlanceAdmin, host] + params: + CLOUDNAME: + get_param: + - CloudEndpoints + - get_param: [ServiceNetMap, GlanceApiNetwork] + IP_ADDRESS: + get_param: + - NetIpMap + - str_replace: + params: + NETWORK: + get_param: [ServiceNetMap, GlanceApiNetwork] + template: NETWORK_uri + host_nobrackets: + str_replace: + template: + get_param: [EndpointMap, GlanceAdmin, host] + params: + CLOUDNAME: + get_param: + - CloudEndpoints + - get_param: [ServiceNetMap, GlanceApiNetwork] + IP_ADDRESS: + get_param: + - NetIpMap + - get_param: [ServiceNetMap, GlanceApiNetwork] + port: + get_param: [EndpointMap, GlanceAdmin, port] + protocol: + get_param: [EndpointMap, GlanceAdmin, protocol] + uri: + list_join: + - '' + - - get_param: [EndpointMap, GlanceAdmin, protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, GlanceAdmin, host] + params: + CLOUDNAME: + get_param: + - CloudEndpoints + - get_param: [ServiceNetMap, GlanceApiNetwork] + IP_ADDRESS: + get_param: + - NetIpMap + - str_replace: + params: + NETWORK: + get_param: [ServiceNetMap, GlanceApiNetwork] + template: NETWORK_uri + - ':' + - get_param: [EndpointMap, GlanceAdmin, port] + uri_no_suffix: + list_join: + - '' + - - get_param: [EndpointMap, GlanceAdmin, protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, GlanceAdmin, host] + params: + CLOUDNAME: + get_param: + - CloudEndpoints + - get_param: [ServiceNetMap, GlanceApiNetwork] + IP_ADDRESS: + get_param: + - NetIpMap + - str_replace: + params: + NETWORK: + get_param: [ServiceNetMap, GlanceApiNetwork] + template: NETWORK_uri + - ':' + - get_param: [EndpointMap, GlanceAdmin, port] + GlanceInternal: + host: + str_replace: + template: + get_param: [EndpointMap, GlanceInternal, host] + params: + CLOUDNAME: + get_param: + - CloudEndpoints + - get_param: [ServiceNetMap, GlanceApiNetwork] + IP_ADDRESS: + get_param: + - NetIpMap + - str_replace: + params: + NETWORK: + get_param: [ServiceNetMap, GlanceApiNetwork] + template: NETWORK_uri + host_nobrackets: + str_replace: + template: + get_param: [EndpointMap, GlanceInternal, host] + params: + CLOUDNAME: + get_param: + - CloudEndpoints + - get_param: [ServiceNetMap, GlanceApiNetwork] + IP_ADDRESS: + get_param: + - NetIpMap + - get_param: [ServiceNetMap, GlanceApiNetwork] + port: + get_param: [EndpointMap, GlanceInternal, port] + protocol: + get_param: [EndpointMap, GlanceInternal, protocol] + uri: + list_join: + - '' + - - get_param: [EndpointMap, GlanceInternal, protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, GlanceInternal, host] + params: + CLOUDNAME: + get_param: + - CloudEndpoints + - get_param: [ServiceNetMap, GlanceApiNetwork] + IP_ADDRESS: + get_param: + - NetIpMap + - str_replace: + params: + NETWORK: + get_param: [ServiceNetMap, GlanceApiNetwork] + template: NETWORK_uri + - ':' + - get_param: [EndpointMap, GlanceInternal, port] + uri_no_suffix: + list_join: + - '' + - - get_param: [EndpointMap, GlanceInternal, protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, GlanceInternal, host] + params: + CLOUDNAME: + get_param: + - CloudEndpoints + - get_param: [ServiceNetMap, GlanceApiNetwork] + IP_ADDRESS: + get_param: + - NetIpMap + - str_replace: + params: + NETWORK: + get_param: [ServiceNetMap, GlanceApiNetwork] + template: NETWORK_uri + - ':' + - get_param: [EndpointMap, GlanceInternal, port] + GlancePublic: host: str_replace: template: - get_param: [EndpointMap, GlanceRegistryInternal, host] + get_param: [EndpointMap, GlancePublic, host] params: CLOUDNAME: get_param: - CloudEndpoints - - get_param: [ServiceNetMap, GlanceRegistryNetwork] + - get_param: [ServiceNetMap, PublicNetwork] IP_ADDRESS: get_param: - NetIpMap - str_replace: params: NETWORK: - get_param: [ServiceNetMap, GlanceRegistryNetwork] + get_param: [ServiceNetMap, PublicNetwork] template: NETWORK_uri host_nobrackets: str_replace: template: - get_param: [EndpointMap, GlanceRegistryInternal, host] + get_param: [EndpointMap, GlancePublic, host] params: CLOUDNAME: get_param: - CloudEndpoints - - get_param: [ServiceNetMap, GlanceRegistryNetwork] + - get_param: [ServiceNetMap, PublicNetwork] IP_ADDRESS: get_param: - NetIpMap - - get_param: [ServiceNetMap, GlanceRegistryNetwork] + - get_param: [ServiceNetMap, PublicNetwork] port: - get_param: [EndpointMap, GlanceRegistryInternal, port] + get_param: [EndpointMap, GlancePublic, port] protocol: - get_param: [EndpointMap, GlanceRegistryInternal, protocol] + get_param: [EndpointMap, GlancePublic, protocol] uri: list_join: - '' - - - get_param: [EndpointMap, GlanceRegistryInternal, protocol] + - - get_param: [EndpointMap, GlancePublic, protocol] - :// - str_replace: template: - get_param: [EndpointMap, GlanceRegistryInternal, host] + get_param: [EndpointMap, GlancePublic, host] params: CLOUDNAME: get_param: - CloudEndpoints - - get_param: [ServiceNetMap, GlanceRegistryNetwork] + - get_param: [ServiceNetMap, PublicNetwork] IP_ADDRESS: get_param: - NetIpMap - str_replace: params: NETWORK: - get_param: [ServiceNetMap, GlanceRegistryNetwork] + get_param: [ServiceNetMap, PublicNetwork] template: NETWORK_uri - ':' - - get_param: [EndpointMap, GlanceRegistryInternal, port] + - get_param: [EndpointMap, GlancePublic, port] uri_no_suffix: list_join: - '' - - - get_param: [EndpointMap, GlanceRegistryInternal, protocol] + - - get_param: [EndpointMap, GlancePublic, protocol] - :// - str_replace: template: - get_param: [EndpointMap, GlanceRegistryInternal, host] + get_param: [EndpointMap, GlancePublic, host] params: CLOUDNAME: get_param: - CloudEndpoints - - get_param: [ServiceNetMap, GlanceRegistryNetwork] + - get_param: [ServiceNetMap, PublicNetwork] IP_ADDRESS: get_param: - NetIpMap - str_replace: params: NETWORK: - get_param: [ServiceNetMap, GlanceRegistryNetwork] + get_param: [ServiceNetMap, PublicNetwork] template: NETWORK_uri - ':' - - get_param: [EndpointMap, GlanceRegistryInternal, port] + - get_param: [EndpointMap, GlancePublic, port] GnocchiAdmin: host: str_replace: @@ -5077,6 +7812,252 @@ outputs: template: NETWORK_uri - ':' - get_param: [EndpointMap, NovaPublic, port] + NovaPlacementAdmin: + host: + str_replace: + template: + get_param: [EndpointMap, NovaPlacementAdmin, host] + params: + CLOUDNAME: + get_param: + - CloudEndpoints + - get_param: [ServiceNetMap, NovaPlacementNetwork] + IP_ADDRESS: + get_param: + - NetIpMap + - str_replace: + params: + NETWORK: + get_param: [ServiceNetMap, NovaPlacementNetwork] + template: NETWORK_uri + host_nobrackets: + str_replace: + template: + get_param: [EndpointMap, NovaPlacementAdmin, host] + params: + CLOUDNAME: + get_param: + - CloudEndpoints + - get_param: [ServiceNetMap, NovaPlacementNetwork] + IP_ADDRESS: + get_param: + - NetIpMap + - get_param: [ServiceNetMap, NovaPlacementNetwork] + port: + get_param: [EndpointMap, NovaPlacementAdmin, port] + protocol: + get_param: [EndpointMap, NovaPlacementAdmin, protocol] + uri: + list_join: + - '' + - - get_param: [EndpointMap, NovaPlacementAdmin, protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, NovaPlacementAdmin, host] + params: + CLOUDNAME: + get_param: + - CloudEndpoints + - get_param: [ServiceNetMap, NovaPlacementNetwork] + IP_ADDRESS: + get_param: + - NetIpMap + - str_replace: + params: + NETWORK: + get_param: [ServiceNetMap, NovaPlacementNetwork] + template: NETWORK_uri + - ':' + - get_param: [EndpointMap, NovaPlacementAdmin, port] + - /placement + uri_no_suffix: + list_join: + - '' + - - get_param: [EndpointMap, NovaPlacementAdmin, protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, NovaPlacementAdmin, host] + params: + CLOUDNAME: + get_param: + - CloudEndpoints + - get_param: [ServiceNetMap, NovaPlacementNetwork] + IP_ADDRESS: + get_param: + - NetIpMap + - str_replace: + params: + NETWORK: + get_param: [ServiceNetMap, NovaPlacementNetwork] + template: NETWORK_uri + - ':' + - get_param: [EndpointMap, NovaPlacementAdmin, port] + NovaPlacementInternal: + host: + str_replace: + template: + get_param: [EndpointMap, NovaPlacementInternal, host] + params: + CLOUDNAME: + get_param: + - CloudEndpoints + - get_param: [ServiceNetMap, NovaPlacementNetwork] + IP_ADDRESS: + get_param: + - NetIpMap + - str_replace: + params: + NETWORK: + get_param: [ServiceNetMap, NovaPlacementNetwork] + template: NETWORK_uri + host_nobrackets: + str_replace: + template: + get_param: [EndpointMap, NovaPlacementInternal, host] + params: + CLOUDNAME: + get_param: + - CloudEndpoints + - get_param: [ServiceNetMap, NovaPlacementNetwork] + IP_ADDRESS: + get_param: + - NetIpMap + - get_param: [ServiceNetMap, NovaPlacementNetwork] + port: + get_param: [EndpointMap, NovaPlacementInternal, port] + protocol: + get_param: [EndpointMap, NovaPlacementInternal, protocol] + uri: + list_join: + - '' + - - get_param: [EndpointMap, NovaPlacementInternal, protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, NovaPlacementInternal, host] + params: + CLOUDNAME: + get_param: + - CloudEndpoints + - get_param: [ServiceNetMap, NovaPlacementNetwork] + IP_ADDRESS: + get_param: + - NetIpMap + - str_replace: + params: + NETWORK: + get_param: [ServiceNetMap, NovaPlacementNetwork] + template: NETWORK_uri + - ':' + - get_param: [EndpointMap, NovaPlacementInternal, port] + - /placement + uri_no_suffix: + list_join: + - '' + - - get_param: [EndpointMap, NovaPlacementInternal, protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, NovaPlacementInternal, host] + params: + CLOUDNAME: + get_param: + - CloudEndpoints + - get_param: [ServiceNetMap, NovaPlacementNetwork] + IP_ADDRESS: + get_param: + - NetIpMap + - str_replace: + params: + NETWORK: + get_param: [ServiceNetMap, NovaPlacementNetwork] + template: NETWORK_uri + - ':' + - get_param: [EndpointMap, NovaPlacementInternal, port] + NovaPlacementPublic: + host: + str_replace: + template: + get_param: [EndpointMap, NovaPlacementPublic, host] + params: + CLOUDNAME: + get_param: + - CloudEndpoints + - get_param: [ServiceNetMap, PublicNetwork] + IP_ADDRESS: + get_param: + - NetIpMap + - str_replace: + params: + NETWORK: + get_param: [ServiceNetMap, PublicNetwork] + template: NETWORK_uri + host_nobrackets: + str_replace: + template: + get_param: [EndpointMap, NovaPlacementPublic, host] + params: + CLOUDNAME: + get_param: + - CloudEndpoints + - get_param: [ServiceNetMap, PublicNetwork] + IP_ADDRESS: + get_param: + - NetIpMap + - get_param: [ServiceNetMap, PublicNetwork] + port: + get_param: [EndpointMap, NovaPlacementPublic, port] + protocol: + get_param: [EndpointMap, NovaPlacementPublic, protocol] + uri: + list_join: + - '' + - - get_param: [EndpointMap, NovaPlacementPublic, protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, NovaPlacementPublic, host] + params: + CLOUDNAME: + get_param: + - CloudEndpoints + - get_param: [ServiceNetMap, PublicNetwork] + IP_ADDRESS: + get_param: + - NetIpMap + - str_replace: + params: + NETWORK: + get_param: [ServiceNetMap, PublicNetwork] + template: NETWORK_uri + - ':' + - get_param: [EndpointMap, NovaPlacementPublic, port] + - /placement + uri_no_suffix: + list_join: + - '' + - - get_param: [EndpointMap, NovaPlacementPublic, protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, NovaPlacementPublic, host] + params: + CLOUDNAME: + get_param: + - CloudEndpoints + - get_param: [ServiceNetMap, PublicNetwork] + IP_ADDRESS: + get_param: + - NetIpMap + - str_replace: + params: + NETWORK: + get_param: [ServiceNetMap, PublicNetwork] + template: NETWORK_uri + - ':' + - get_param: [EndpointMap, NovaPlacementPublic, port] NovaVNCProxyAdmin: host: str_replace: @@ -5320,6 +8301,249 @@ outputs: template: NETWORK_uri - ':' - get_param: [EndpointMap, NovaVNCProxyPublic, port] + OctaviaAdmin: + host: + str_replace: + template: + get_param: [EndpointMap, OctaviaAdmin, host] + params: + CLOUDNAME: + get_param: + - CloudEndpoints + - get_param: [ServiceNetMap, OctaviaApiNetwork] + IP_ADDRESS: + get_param: + - NetIpMap + - str_replace: + params: + NETWORK: + get_param: [ServiceNetMap, OctaviaApiNetwork] + template: NETWORK_uri + host_nobrackets: + str_replace: + template: + get_param: [EndpointMap, OctaviaAdmin, host] + params: + CLOUDNAME: + get_param: + - CloudEndpoints + - get_param: [ServiceNetMap, OctaviaApiNetwork] + IP_ADDRESS: + get_param: + - NetIpMap + - get_param: [ServiceNetMap, OctaviaApiNetwork] + port: + get_param: [EndpointMap, OctaviaAdmin, port] + protocol: + get_param: [EndpointMap, OctaviaAdmin, protocol] + uri: + list_join: + - '' + - - get_param: [EndpointMap, OctaviaAdmin, protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, OctaviaAdmin, host] + params: + CLOUDNAME: + get_param: + - CloudEndpoints + - get_param: [ServiceNetMap, OctaviaApiNetwork] + IP_ADDRESS: + get_param: + - NetIpMap + - str_replace: + params: + NETWORK: + get_param: [ServiceNetMap, OctaviaApiNetwork] + template: NETWORK_uri + - ':' + - get_param: [EndpointMap, OctaviaAdmin, port] + uri_no_suffix: + list_join: + - '' + - - get_param: [EndpointMap, OctaviaAdmin, protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, OctaviaAdmin, host] + params: + CLOUDNAME: + get_param: + - CloudEndpoints + - get_param: [ServiceNetMap, OctaviaApiNetwork] + IP_ADDRESS: + get_param: + - NetIpMap + - str_replace: + params: + NETWORK: + get_param: [ServiceNetMap, OctaviaApiNetwork] + template: NETWORK_uri + - ':' + - get_param: [EndpointMap, OctaviaAdmin, port] + OctaviaInternal: + host: + str_replace: + template: + get_param: [EndpointMap, OctaviaInternal, host] + params: + CLOUDNAME: + get_param: + - CloudEndpoints + - get_param: [ServiceNetMap, OctaviaApiNetwork] + IP_ADDRESS: + get_param: + - NetIpMap + - str_replace: + params: + NETWORK: + get_param: [ServiceNetMap, OctaviaApiNetwork] + template: NETWORK_uri + host_nobrackets: + str_replace: + template: + get_param: [EndpointMap, OctaviaInternal, host] + params: + CLOUDNAME: + get_param: + - CloudEndpoints + - get_param: [ServiceNetMap, OctaviaApiNetwork] + IP_ADDRESS: + get_param: + - NetIpMap + - get_param: [ServiceNetMap, OctaviaApiNetwork] + port: + get_param: [EndpointMap, OctaviaInternal, port] + protocol: + get_param: [EndpointMap, OctaviaInternal, protocol] + uri: + list_join: + - '' + - - get_param: [EndpointMap, OctaviaInternal, protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, OctaviaInternal, host] + params: + CLOUDNAME: + get_param: + - CloudEndpoints + - get_param: [ServiceNetMap, OctaviaApiNetwork] + IP_ADDRESS: + get_param: + - NetIpMap + - str_replace: + params: + NETWORK: + get_param: [ServiceNetMap, OctaviaApiNetwork] + template: NETWORK_uri + - ':' + - get_param: [EndpointMap, OctaviaInternal, port] + uri_no_suffix: + list_join: + - '' + - - get_param: [EndpointMap, OctaviaInternal, protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, OctaviaInternal, host] + params: + CLOUDNAME: + get_param: + - CloudEndpoints + - get_param: [ServiceNetMap, OctaviaApiNetwork] + IP_ADDRESS: + get_param: + - NetIpMap + - str_replace: + params: + NETWORK: + get_param: [ServiceNetMap, OctaviaApiNetwork] + template: NETWORK_uri + - ':' + - get_param: [EndpointMap, OctaviaInternal, port] + OctaviaPublic: + host: + str_replace: + template: + get_param: [EndpointMap, OctaviaPublic, host] + params: + CLOUDNAME: + get_param: + - CloudEndpoints + - get_param: [ServiceNetMap, PublicNetwork] + IP_ADDRESS: + get_param: + - NetIpMap + - str_replace: + params: + NETWORK: + get_param: [ServiceNetMap, PublicNetwork] + template: NETWORK_uri + host_nobrackets: + str_replace: + template: + get_param: [EndpointMap, OctaviaPublic, host] + params: + CLOUDNAME: + get_param: + - CloudEndpoints + - get_param: [ServiceNetMap, PublicNetwork] + IP_ADDRESS: + get_param: + - NetIpMap + - get_param: [ServiceNetMap, PublicNetwork] + port: + get_param: [EndpointMap, OctaviaPublic, port] + protocol: + get_param: [EndpointMap, OctaviaPublic, protocol] + uri: + list_join: + - '' + - - get_param: [EndpointMap, OctaviaPublic, protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, OctaviaPublic, host] + params: + CLOUDNAME: + get_param: + - CloudEndpoints + - get_param: [ServiceNetMap, PublicNetwork] + IP_ADDRESS: + get_param: + - NetIpMap + - str_replace: + params: + NETWORK: + get_param: [ServiceNetMap, PublicNetwork] + template: NETWORK_uri + - ':' + - get_param: [EndpointMap, OctaviaPublic, port] + uri_no_suffix: + list_join: + - '' + - - get_param: [EndpointMap, OctaviaPublic, protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, OctaviaPublic, host] + params: + CLOUDNAME: + get_param: + - CloudEndpoints + - get_param: [ServiceNetMap, PublicNetwork] + IP_ADDRESS: + get_param: + - NetIpMap + - str_replace: + params: + NETWORK: + get_param: [ServiceNetMap, PublicNetwork] + template: NETWORK_uri + - ':' + - get_param: [EndpointMap, OctaviaPublic, port] PankoAdmin: host: str_replace: @@ -6297,6 +9521,249 @@ outputs: template: NETWORK_uri - ':' - get_param: [EndpointMap, SwiftPublic, port] + TackerAdmin: + host: + str_replace: + template: + get_param: [EndpointMap, TackerAdmin, host] + params: + CLOUDNAME: + get_param: + - CloudEndpoints + - get_param: [ServiceNetMap, TackerApiNetwork] + IP_ADDRESS: + get_param: + - NetIpMap + - str_replace: + params: + NETWORK: + get_param: [ServiceNetMap, TackerApiNetwork] + template: NETWORK_uri + host_nobrackets: + str_replace: + template: + get_param: [EndpointMap, TackerAdmin, host] + params: + CLOUDNAME: + get_param: + - CloudEndpoints + - get_param: [ServiceNetMap, TackerApiNetwork] + IP_ADDRESS: + get_param: + - NetIpMap + - get_param: [ServiceNetMap, TackerApiNetwork] + port: + get_param: [EndpointMap, TackerAdmin, port] + protocol: + get_param: [EndpointMap, TackerAdmin, protocol] + uri: + list_join: + - '' + - - get_param: [EndpointMap, TackerAdmin, protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, TackerAdmin, host] + params: + CLOUDNAME: + get_param: + - CloudEndpoints + - get_param: [ServiceNetMap, TackerApiNetwork] + IP_ADDRESS: + get_param: + - NetIpMap + - str_replace: + params: + NETWORK: + get_param: [ServiceNetMap, TackerApiNetwork] + template: NETWORK_uri + - ':' + - get_param: [EndpointMap, TackerAdmin, port] + uri_no_suffix: + list_join: + - '' + - - get_param: [EndpointMap, TackerAdmin, protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, TackerAdmin, host] + params: + CLOUDNAME: + get_param: + - CloudEndpoints + - get_param: [ServiceNetMap, TackerApiNetwork] + IP_ADDRESS: + get_param: + - NetIpMap + - str_replace: + params: + NETWORK: + get_param: [ServiceNetMap, TackerApiNetwork] + template: NETWORK_uri + - ':' + - get_param: [EndpointMap, TackerAdmin, port] + TackerInternal: + host: + str_replace: + template: + get_param: [EndpointMap, TackerInternal, host] + params: + CLOUDNAME: + get_param: + - CloudEndpoints + - get_param: [ServiceNetMap, TackerApiNetwork] + IP_ADDRESS: + get_param: + - NetIpMap + - str_replace: + params: + NETWORK: + get_param: [ServiceNetMap, TackerApiNetwork] + template: NETWORK_uri + host_nobrackets: + str_replace: + template: + get_param: [EndpointMap, TackerInternal, host] + params: + CLOUDNAME: + get_param: + - CloudEndpoints + - get_param: [ServiceNetMap, TackerApiNetwork] + IP_ADDRESS: + get_param: + - NetIpMap + - get_param: [ServiceNetMap, TackerApiNetwork] + port: + get_param: [EndpointMap, TackerInternal, port] + protocol: + get_param: [EndpointMap, TackerInternal, protocol] + uri: + list_join: + - '' + - - get_param: [EndpointMap, TackerInternal, protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, TackerInternal, host] + params: + CLOUDNAME: + get_param: + - CloudEndpoints + - get_param: [ServiceNetMap, TackerApiNetwork] + IP_ADDRESS: + get_param: + - NetIpMap + - str_replace: + params: + NETWORK: + get_param: [ServiceNetMap, TackerApiNetwork] + template: NETWORK_uri + - ':' + - get_param: [EndpointMap, TackerInternal, port] + uri_no_suffix: + list_join: + - '' + - - get_param: [EndpointMap, TackerInternal, protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, TackerInternal, host] + params: + CLOUDNAME: + get_param: + - CloudEndpoints + - get_param: [ServiceNetMap, TackerApiNetwork] + IP_ADDRESS: + get_param: + - NetIpMap + - str_replace: + params: + NETWORK: + get_param: [ServiceNetMap, TackerApiNetwork] + template: NETWORK_uri + - ':' + - get_param: [EndpointMap, TackerInternal, port] + TackerPublic: + host: + str_replace: + template: + get_param: [EndpointMap, TackerPublic, host] + params: + CLOUDNAME: + get_param: + - CloudEndpoints + - get_param: [ServiceNetMap, PublicNetwork] + IP_ADDRESS: + get_param: + - NetIpMap + - str_replace: + params: + NETWORK: + get_param: [ServiceNetMap, PublicNetwork] + template: NETWORK_uri + host_nobrackets: + str_replace: + template: + get_param: [EndpointMap, TackerPublic, host] + params: + CLOUDNAME: + get_param: + - CloudEndpoints + - get_param: [ServiceNetMap, PublicNetwork] + IP_ADDRESS: + get_param: + - NetIpMap + - get_param: [ServiceNetMap, PublicNetwork] + port: + get_param: [EndpointMap, TackerPublic, port] + protocol: + get_param: [EndpointMap, TackerPublic, protocol] + uri: + list_join: + - '' + - - get_param: [EndpointMap, TackerPublic, protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, TackerPublic, host] + params: + CLOUDNAME: + get_param: + - CloudEndpoints + - get_param: [ServiceNetMap, PublicNetwork] + IP_ADDRESS: + get_param: + - NetIpMap + - str_replace: + params: + NETWORK: + get_param: [ServiceNetMap, PublicNetwork] + template: NETWORK_uri + - ':' + - get_param: [EndpointMap, TackerPublic, port] + uri_no_suffix: + list_join: + - '' + - - get_param: [EndpointMap, TackerPublic, protocol] + - :// + - str_replace: + template: + get_param: [EndpointMap, TackerPublic, host] + params: + CLOUDNAME: + get_param: + - CloudEndpoints + - get_param: [ServiceNetMap, PublicNetwork] + IP_ADDRESS: + get_param: + - NetIpMap + - str_replace: + params: + NETWORK: + get_param: [ServiceNetMap, PublicNetwork] + template: NETWORK_uri + - ':' + - get_param: [EndpointMap, TackerPublic, port] ZaqarAdmin: host: str_replace: diff --git a/network/ports/net_ip_list_map.yaml b/network/ports/net_ip_list_map.yaml index 5782bbe9..83d875e8 100644 --- a/network/ports/net_ip_list_map.yaml +++ b/network/ports/net_ip_list_map.yaml @@ -35,6 +35,32 @@ parameters: default: [] type: json + InternalApiNetName: + default: internal_api + description: The name of the internal API network. + type: string + ExternalNetName: + default: external + description: The name of the external network. + type: string + ManagementNetName: + default: management + description: The name of the management network. + type: string + StorageNetName: + default: storage + description: The name of the storage network. + type: string + StorageMgmtNetName: + default: storage_mgmt + description: The name of the Storage management network. + type: string + TenantNetName: + default: tenant + description: The name of the tenant network. + type: string + + resources: # This adds the extra "services" on for keystone # so that keystone_admin_api_network and @@ -58,19 +84,33 @@ resources: - keystone_admin_api - keystone_public_api + NetIpMapValue: + type: OS::Heat::Value + properties: + type: json + value: + map_replace: + - ctlplane: {get_param: ControlPlaneIpList} + external: {get_param: ExternalIpList} + internal_api: {get_param: InternalApiIpList} + storage: {get_param: StorageIpList} + storage_mgmt: {get_param: StorageMgmtIpList} + tenant: {get_param: TenantIpList} + management: {get_param: ManagementIpList} + - keys: + external: {get_param: ExternalNetName} + internal_api: {get_param: InternalApiNetName} + storage: {get_param: StorageNetName} + storage_mgmt: {get_param: StorageMgmtNetName} + tenant: {get_param: TenantNetName} + management: {get_param: ManagementNetName} + outputs: net_ip_map: description: > A Hash containing a mapping of network names to assigned lists of IP addresses. - value: - ctlplane: {get_param: ControlPlaneIpList} - external: {get_param: ExternalIpList} - internal_api: {get_param: InternalApiIpList} - storage: {get_param: StorageIpList} - storage_mgmt: {get_param: StorageMgmtIpList} - tenant: {get_param: TenantIpList} - management: {get_param: ManagementIpList} + value: {get_attr: [NetIpMapValue, value]} service_ips: description: > Map of enabled services to a list of their IP addresses @@ -92,14 +132,7 @@ outputs: for_each: SERVICE: {get_attr: [EnabledServicesValue, value]} - values: {get_param: ServiceNetMap} - - values: - ctlplane: {get_param: ControlPlaneIpList} - external: {get_param: ExternalIpList} - internal_api: {get_param: InternalApiIpList} - storage: {get_param: StorageIpList} - storage_mgmt: {get_param: StorageMgmtIpList} - tenant: {get_param: TenantIpList} - management: {get_param: ManagementIpList} + - values: {get_attr: [NetIpMapValue, value]} service_hostnames: description: > Map of enabled services to a list of hostnames where they're running diff --git a/network/ports/net_ip_map.yaml b/network/ports/net_ip_map.yaml index c8cf733f..c974d72e 100644 --- a/network/ports/net_ip_map.yaml +++ b/network/ports/net_ip_map.yaml @@ -69,35 +69,136 @@ parameters: type: string description: IP address with brackets in case of IPv6 + InternalApiNetName: + default: internal_api + description: The name of the internal API network. + type: string + ExternalNetName: + default: external + description: The name of the external network. + type: string + ManagementNetName: + default: management + description: The name of the management network. + type: string + StorageNetName: + default: storage + description: The name of the storage network. + type: string + StorageMgmtNetName: + default: storage_mgmt + description: The name of the Storage management network. + type: string + TenantNetName: + default: tenant + description: The name of the tenant network. + type: string + +resources: + + NetIpMapValue: + type: OS::Heat::Value + properties: + type: json + value: + map_replace: + - ctlplane: {get_param: ControlPlaneIp} + external: {get_param: ExternalIp} + internal_api: {get_param: InternalApiIp} + storage: {get_param: StorageIp} + storage_mgmt: {get_param: StorageMgmtIp} + tenant: {get_param: TenantIp} + management: {get_param: ManagementIp} + ctlplane_subnet: + list_join: + - '' + - - {get_param: ControlPlaneIp} + - '/' + - {get_param: ControlPlaneSubnetCidr} + external_subnet: {get_param: ExternalIpSubnet} + internal_api_subnet: {get_param: InternalApiIpSubnet} + storage_subnet: {get_param: StorageIpSubnet} + storage_mgmt_subnet: {get_param: StorageMgmtIpSubnet} + tenant_subnet: {get_param: TenantIpSubnet} + management_subnet: {get_param: ManagementIpSubnet} + ctlplane_uri: {get_param: ControlPlaneIp} + external_uri: {get_param: ExternalIpUri} + internal_api_uri: {get_param: InternalApiIpUri} + storage_uri: {get_param: StorageIpUri} + storage_mgmt_uri: {get_param: StorageMgmtIpUri} + tenant_uri: {get_param: TenantIpUri} + management_uri: {get_param: ManagementIpUri} + - keys: + external: {get_param: ExternalNetName} + internal_api: {get_param: InternalApiNetName} + storage: {get_param: StorageNetName} + storage_mgmt: {get_param: StorageMgmtNetName} + tenant: {get_param: TenantNetName} + management: {get_param: ManagementNetName} + external_subnet: + str_replace: + template: NAME_subnet + params: + NAME: {get_param: ExternalNetName} + internal_api_subnet: + str_replace: + template: NAME_subnet + params: + NAME: {get_param: InternalApiNetName} + storage_subnet: + str_replace: + template: NAME_subnet + params: + NAME: {get_param: StorageNetName} + storage_mgmt_subnet: + str_replace: + template: NAME_subnet + params: + NAME: {get_param: StorageMgmtNetName} + tenant_subnet: + str_replace: + template: NAME_subnet + params: + NAME: {get_param: TenantNetName} + management_subnet: + str_replace: + template: NAME_subnet + params: + NAME: {get_param: ManagementNetName} + external_uri: + str_replace: + template: NAME_uri + params: + NAME: {get_param: ExternalNetName} + internal_api_uri: + str_replace: + template: NAME_uri + params: + NAME: {get_param: InternalApiNetName} + storage_uri: + str_replace: + template: NAME_uri + params: + NAME: {get_param: StorageNetName} + storage_mgmt_uri: + str_replace: + template: NAME_uri + params: + NAME: {get_param: StorageMgmtNetName} + tenant_uri: + str_replace: + template: NAME_uri + params: + NAME: {get_param: TenantNetName} + management_uri: + str_replace: + template: NAME_uri + params: + NAME: {get_param: ManagementNetName} + outputs: net_ip_map: description: > A Hash containing a mapping of network names to assigned IPs for a specific machine. - value: - ctlplane: {get_param: ControlPlaneIp} - external: {get_param: ExternalIp} - internal_api: {get_param: InternalApiIp} - storage: {get_param: StorageIp} - storage_mgmt: {get_param: StorageMgmtIp} - tenant: {get_param: TenantIp} - management: {get_param: ManagementIp} - ctlplane_subnet: - list_join: - - '' - - - {get_param: ControlPlaneIp} - - '/' - - {get_param: ControlPlaneSubnetCidr} - external_subnet: {get_param: ExternalIpSubnet} - internal_api_subnet: {get_param: InternalApiIpSubnet} - storage_subnet: {get_param: StorageIpSubnet} - storage_mgmt_subnet: {get_param: StorageMgmtIpSubnet} - tenant_subnet: {get_param: TenantIpSubnet} - management_subnet: {get_param: ManagementIpSubnet} - ctlplane_uri: {get_param: ControlPlaneIp} - external_uri: {get_param: ExternalIpUri} - internal_api_uri: {get_param: InternalApiIpUri} - storage_uri: {get_param: StorageIpUri} - storage_mgmt_uri: {get_param: StorageMgmtIpUri} - tenant_uri: {get_param: TenantIpUri} - management_uri: {get_param: ManagementIpUri} + value: {get_attr: [NetIpMapValue, value]} diff --git a/network/scripts/run-os-net-config.sh b/network/scripts/run-os-net-config.sh index e65f922a..8fe2d270 100755 --- a/network/scripts/run-os-net-config.sh +++ b/network/scripts/run-os-net-config.sh @@ -10,7 +10,7 @@ # a deployment input via input_values # $network_config : the json serialized os-net-config config to apply # -set -ux +set -eux function get_metadata_ip() { @@ -98,8 +98,10 @@ EOF_CAT fi fi done + set +e os-net-config -c /etc/os-net-config/dhcp_all_interfaces.yaml -v --detailed-exit-codes --cleanup RETVAL=$? + set -e if [[ $RETVAL == 2 ]]; then ping_metadata_ip elif [[ $RETVAL != 0 ]]; then @@ -108,7 +110,9 @@ EOF_CAT } if [ -n '$network_config' ]; then - trap configure_safe_defaults EXIT + if [ -z "${disable_configure_safe_defaults:-''}" ]; then + trap configure_safe_defaults EXIT + fi mkdir -p /etc/os-net-config # Note these variables come from the calling heat SoftwareConfig @@ -121,8 +125,10 @@ if [ -n '$network_config' ]; then sed -i "s/bridge_name/${bridge_name:-''}/" /etc/os-net-config/config.json sed -i "s/interface_name/${interface_name:-''}/" /etc/os-net-config/config.json + set +e os-net-config -c /etc/os-net-config/config.json -v --detailed-exit-codes RETVAL=$? + set -e if [[ $RETVAL == 2 ]]; then ping_metadata_ip diff --git a/network/service_net_map.j2.yaml b/network/service_net_map.j2.yaml index b2201452..a1042ebb 100644 --- a/network/service_net_map.j2.yaml +++ b/network/service_net_map.j2.yaml @@ -21,9 +21,19 @@ parameters: # snake_case - the names must still match when converted ServiceNetMapDefaults: default: + # Note the values in this map are replaced by *NetName + # to allow for sane defaults when the network names are + # overridden. ApacheNetwork: internal_api NeutronTenantNetwork: tenant CeilometerApiNetwork: internal_api + ContrailAnalyticsNetwork: internal_api + ContrailAnalyticsDatabaseNetwork: internal_api + ContrailConfigNetwork: internal_api + ContrailControlNetwork: internal_api + ContrailDatabaseNetwork: internal_api + ContrailWebuiNetwork: internal_api + ContrailTsnNetwork: internal_api AodhApiNetwork: internal_api PankoApiNetwork: internal_api BarbicanApiNetwork: internal_api @@ -31,20 +41,26 @@ parameters: MongodbNetwork: internal_api CinderApiNetwork: internal_api CinderIscsiNetwork: storage + CongressApiNetwork: internal_api GlanceApiNetwork: storage - GlanceRegistryNetwork: internal_api IronicApiNetwork: ctlplane IronicNetwork: ctlplane KeystoneAdminApiNetwork: ctlplane # allows undercloud to config endpoints KeystonePublicApiNetwork: internal_api ManilaApiNetwork: internal_api NeutronApiNetwork: internal_api + OctaviaApiNetwork: internal_api HeatApiNetwork: internal_api HeatApiCfnNetwork: internal_api HeatApiCloudwatchNetwork: internal_api NovaApiNetwork: internal_api + NovaPlacementNetwork: internal_api NovaMetadataNetwork: internal_api NovaVncProxyNetwork: internal_api + NovaLibvirtNetwork: internal_api + Ec2ApiNetwork: internal_api + Ec2ApiMetadataNetwork: internal_api + TackerApiNetwork: internal_api SwiftStorageNetwork: storage_mgmt SwiftProxyNetwork: storage SaharaApiNetwork: internal_api @@ -61,9 +77,11 @@ parameters: OvnDbsNetwork: internal_api MistralApiNetwork: internal_api ZaqarApiNetwork: internal_api + PacemakerRemoteNetwork: internal_api # We special-case the default ResolveNetwork for the CephStorage role # for backwards compatibility, all other roles default to internal_api CephStorageHostnameResolveNetwork: storage + EtcdNetwork: internal_api {% for role in roles if role.name != 'CephStorage' %} {{role.name}}HostnameResolveNetwork: internal_api {% endfor %} @@ -83,20 +101,62 @@ parameters: internal use only, this will be removed in future. type: json + InternalApiNetName: + default: internal_api + description: The name of the internal API network. + type: string + ExternalNetName: + default: external + description: The name of the external network. + type: string + ManagementNetName: + default: management + description: The name of the management network. + type: string + StorageNetName: + default: storage + description: The name of the storage network. + type: string + StorageMgmtNetName: + default: storage_mgmt + description: The name of the Storage management network. + type: string + TenantNetName: + default: tenant + description: The name of the tenant network. + type: string + + parameter_groups: - label: deprecated description: Do not use deprecated params, they will be removed. parameters: - ServiceNetMapDeprecatedMapping +resources: + ServiceNetMapValue: + type: OS::Heat::Value + properties: + type: json + value: + map_merge: + - map_replace: + - {get_param: ServiceNetMapDefaults} + - values: + external: {get_param: ExternalNetName} + internal_api: {get_param: InternalApiNetName} + storage: {get_param: StorageNetName} + storage_mgmt: {get_param: StorageMgmtNetName} + tenant: {get_param: TenantNetName} + management: {get_param: ManagementNetName} + - map_replace: + - {get_param: ServiceNetMap} + - keys: {get_param: ServiceNetMapDeprecatedMapping} + + outputs: service_net_map: - value: - map_merge: - - {get_param: ServiceNetMapDefaults} - - map_replace: - - {get_param: ServiceNetMap} - - keys: {get_param: ServiceNetMapDeprecatedMapping} + value: {get_attr: [ServiceNetMapValue, value]} service_net_map_lower: value: @@ -106,9 +166,4 @@ outputs: yaql: expression: dict($.data.map.items().select([ regex(`([a-z0-9])([A-Z])`).replace($[0], '\\1_\\2').toLower(), $[1]])) data: - map: - map_merge: - - {get_param: ServiceNetMapDefaults} - - map_replace: - - {get_param: ServiceNetMap} - - keys: {get_param: ServiceNetMapDeprecatedMapping} + map: {get_attr: [ServiceNetMapValue, value]} diff --git a/overcloud-resource-registry-puppet.j2.yaml b/overcloud-resource-registry-puppet.j2.yaml index c850ee1b..ae012b21 100644 --- a/overcloud-resource-registry-puppet.j2.yaml +++ b/overcloud-resource-registry-puppet.j2.yaml @@ -2,6 +2,7 @@ resource_registry: OS::TripleO::SoftwareDeployment: OS::Heat::StructuredDeployment OS::TripleO::PostDeploySteps: puppet/post.yaml + OS::TripleO::PostUpgradeSteps: puppet/post-upgrade.yaml OS::TripleO::AllNodes::SoftwareConfig: puppet/all-nodes-config.yaml OS::TripleO::Hosts::SoftwareConfig: hosts-config.yaml OS::TripleO::DefaultPasswords: default_passwords.yaml @@ -10,6 +11,9 @@ resource_registry: OS::TripleO::Tasks::UpdateWorkflow: OS::Heat::None OS::TripleO::Tasks::PackageUpdate: extraconfig/tasks/yum_update.yaml + OS::TripleO::Tasks::SwiftRingDeploy: extraconfig/tasks/swift-ring-deploy.yaml + OS::TripleO::Tasks::SwiftRingUpdate: extraconfig/tasks/swift-ring-update.yaml + {% for role in roles %} OS::TripleO::{{role.name}}::PreNetworkConfig: OS::Heat::None OS::TripleO::{{role.name}}PostDeploySteps: puppet/post.yaml @@ -65,8 +69,10 @@ resource_registry: OS::TripleO::NodeExtraConfig: puppet/extraconfig/pre_deploy/default.yaml OS::TripleO::NodeExtraConfigPost: extraconfig/post_deploy/default.yaml - OS::TripleO::Tasks::ControllerPrePuppet: OS::Heat::None - OS::TripleO::Tasks::ControllerPostPuppet: OS::Heat::None +{% for role in roles %} + OS::TripleO::Tasks::{{role.name}}PrePuppet: OS::Heat::None + OS::TripleO::Tasks::{{role.name}}PostPuppet: OS::Heat::None +{% endfor %} # "AllNodes" Extra cluster config, runs on all nodes prior to the post_deploy # phase, e.g when puppet is applied, but after the pre_deploy phase. Useful when @@ -110,14 +116,15 @@ resource_registry: # Upgrade resources OS::TripleO::UpgradeConfig: puppet/upgrade_config.yaml - OS::TripleO::UpgradeSteps: OS::Heat::None # services OS::TripleO::Services: puppet/services/services.yaml OS::TripleO::Services::Apache: puppet/services/apache.yaml OS::TripleO::Services::ApacheTLS: OS::Heat::None OS::TripleO::Services::CACerts: puppet/services/ca-certs.yaml + OS::TripleO::Services::CephMds: OS::Heat::None OS::TripleO::Services::CephMon: OS::Heat::None + OS::TripleO::Services::CephRbdMirror: OS::Heat::None OS::TripleO::Services::CephRgw: OS::Heat::None OS::TripleO::Services::CephOSD: OS::Heat::None OS::TripleO::Services::CephClient: OS::Heat::None @@ -127,10 +134,10 @@ resource_registry: OS::TripleO::Services::CinderScheduler: puppet/services/cinder-scheduler.yaml OS::TripleO::Services::CinderVolume: puppet/services/cinder-volume.yaml OS::TripleO::Services::BlockStorageCinderVolume: puppet/services/cinder-volume.yaml - OS::TripleO::Services::Core: OS::Heat::None + OS::TripleO::Services::Congress: OS::Heat::None OS::TripleO::Services::Keystone: puppet/services/keystone.yaml OS::TripleO::Services::GlanceApi: puppet/services/glance-api.yaml - OS::TripleO::Services::GlanceRegistry: puppet/services/glance-registry.yaml + OS::TripleO::Services::GlanceRegistry: puppet/services/disabled/glance-registry.yaml OS::TripleO::Services::HeatApi: puppet/services/heat-api.yaml OS::TripleO::Services::HeatApiCfn: puppet/services/heat-api-cfn.yaml OS::TripleO::Services::HeatApiCloudwatch: puppet/services/heat-api-cloudwatch.yaml @@ -153,13 +160,13 @@ resource_registry: OS::TripleO::Services::NeutronCorePluginML2OVN: puppet/services/neutron-plugin-ml2-ovn.yaml OS::TripleO::Services::NeutronCorePluginPlumgrid: puppet/services/neutron-plugin-plumgrid.yaml OS::TripleO::Services::NeutronCorePluginNuage: puppet/services/neutron-plugin-nuage.yaml - OS::TripleO::Services::NeutronCorePluginOpencontrail: puppet/services/neutron-plugin-opencontrail.yaml OS::TripleO::Services::OVNDBs: OS::Heat::None OS::TripleO::Services::NeutronCorePluginMidonet: puppet/services/neutron-midonet.yaml OS::TripleO::Services::NeutronOvsAgent: puppet/services/neutron-ovs-agent.yaml OS::TripleO::Services::ComputeNeutronOvsAgent: puppet/services/neutron-ovs-agent.yaml OS::TripleO::Services::Pacemaker: OS::Heat::None + OS::TripleO::Services::PacemakerRemote: OS::Heat::None OS::TripleO::Services::NeutronSriovAgent: OS::Heat::None OS::TripleO::Services::RabbitMQ: puppet/services/rabbitmq.yaml OS::TripleO::Services::HAproxy: puppet/services/haproxy.yaml @@ -169,10 +176,12 @@ resource_registry: OS::TripleO::Services::Memcached: puppet/services/memcached.yaml OS::TripleO::Services::SaharaApi: OS::Heat::None OS::TripleO::Services::SaharaEngine: OS::Heat::None + OS::TripleO::Services::Sshd: OS::Heat::None OS::TripleO::Services::Redis: puppet/services/database/redis.yaml OS::TripleO::Services::NovaConductor: puppet/services/nova-conductor.yaml OS::TripleO::Services::MongoDb: puppet/services/database/mongodb.yaml OS::TripleO::Services::NovaApi: puppet/services/nova-api.yaml + OS::TripleO::Services::NovaPlacement: puppet/services/nova-placement.yaml OS::TripleO::Services::NovaMetadata: puppet/services/nova-metadata.yaml OS::TripleO::Services::NovaScheduler: puppet/services/nova-scheduler.yaml OS::TripleO::Services::NovaConsoleauth: puppet/services/nova-consoleauth.yaml @@ -184,6 +193,7 @@ resource_registry: OS::TripleO::Services::SwiftStorage: puppet/services/swift-storage.yaml OS::TripleO::Services::SwiftRingBuilder: puppet/services/swift-ringbuilder.yaml OS::TripleO::Services::Snmp: puppet/services/snmp.yaml + OS::TripleO::Services::Tacker: OS::Heat::None OS::TripleO::Services::Timezone: puppet/services/time/timezone.yaml OS::TripleO::Services::CeilometerApi: puppet/services/ceilometer-api.yaml OS::TripleO::Services::CeilometerCollector: puppet/services/ceilometer-collector.yaml @@ -198,6 +208,7 @@ resource_registry: OS::TripleO::Services::GnocchiStatsd: puppet/services/gnocchi-statsd.yaml # Services that are disabled by default (use relevant environment files): OS::TripleO::Services::FluentdClient: OS::Heat::None + OS::TripleO::Services::Collectd: OS::Heat::None OS::TripleO::LoggingConfiguration: puppet/services/logging/fluentd-config.yaml OS::TripleO::Services::ManilaApi: OS::Heat::None OS::TripleO::Services::ManilaScheduler: OS::Heat::None @@ -212,7 +223,7 @@ resource_registry: OS::TripleO::Services::AodhEvaluator: puppet/services/aodh-evaluator.yaml OS::TripleO::Services::AodhNotifier: puppet/services/aodh-notifier.yaml OS::TripleO::Services::AodhListener: puppet/services/aodh-listener.yaml - OS::TripleO::Services::PankoApi: OS::Heat::None + OS::TripleO::Services::PankoApi: puppet/services/panko-api.yaml OS::TripleO::Services::MistralEngine: OS::Heat::None OS::TripleO::Services::MistralApi: OS::Heat::None OS::TripleO::Services::MistralExecutor: OS::Heat::None @@ -224,12 +235,21 @@ resource_registry: OS::TripleO::Services::OpenDaylightApi: OS::Heat::None OS::TripleO::Services::OpenDaylightOvs: OS::Heat::None OS::TripleO::Services::SensuClient: OS::Heat::None - OS::TripleO::Services::ContrailAnalytics: puppet/services/network/contrail-analytics.yaml - OS::TripleO::Services::ContrailConfig: puppet/services/network/contrail-config.yaml - OS::TripleO::Services::ContrailControl: puppet/services/network/contrail-control.yaml - OS::TripleO::Services::ContrailDatabase: puppet/services/network/contrail-database.yaml - OS::TripleO::Services::ContrailWebui: puppet/services/network/contrail-webui.yaml + OS::TripleO::Services::TLSProxyBase: OS::Heat::None OS::TripleO::Services::Zaqar: OS::Heat::None + OS::TripleO::Services::NeutronML2FujitsuCfab: OS::Heat::None + OS::TripleO::Services::NeutronML2FujitsuFossw: OS::Heat::None + OS::TripleO::Services::CinderHPELeftHandISCSI: OS::Heat::None + OS::TripleO::Services::Etcd: OS::Heat::None + OS::TripleO::Services::Ec2Api: OS::Heat::None + OS::TripleO::Services::AuditD: OS::Heat::None + OS::TripleO::Services::OctaviaApi: OS::Heat::None + OS::TripleO::Services::OctaviaHealthManager: OS::Heat::None + OS::TripleO::Services::OctaviaHousekeeping: OS::Heat::None + OS::TripleO::Services::OctaviaWorker: OS::Heat::None + OS::TripleO::Services::MySQLClient: puppet/services/database/mysql-client.yaml + OS::TripleO::Services::Vpp: OS::Heat::None + OS::TripleO::Services::Docker: OS::Heat::None parameter_defaults: EnablePackageInstall: false diff --git a/overcloud.j2.yaml b/overcloud.j2.yaml index d38bba2d..e99f770f 100644 --- a/overcloud.j2.yaml +++ b/overcloud.j2.yaml @@ -1,3 +1,4 @@ +{% set primary_role_name = roles[0].name -%} heat_template_version: ocata description: > @@ -242,6 +243,12 @@ resources: NetIpMap: {get_attr: [VipMap, net_ip_map]} ServiceNetMap: {get_attr: [ServiceNetMap, service_net_map]} + EndpointMapData: + type: OS::Heat::Value + properties: + type: json + value: {get_attr: [EndpointMap, endpoint_map]} + # Jinja loop for Role in roles_data.yaml {% for role in roles %} # Resources generated for {{role.name}} Role @@ -254,6 +261,18 @@ resources: EndpointMap: {get_attr: [EndpointMap, endpoint_map]} DefaultPasswords: {get_attr: [DefaultPasswords, passwords]} + # Filter any null/None service_names which may be present due to mapping + # of services to OS::Heat::None + {{role.name}}ServiceNames: + type: OS::Heat::Value + depends_on: {{role.name}}ServiceChain + properties: + type: comma_delimited_list + value: + yaql: + expression: coalesce($.data, []).where($ != null) + data: {get_attr: [{{role.name}}ServiceChain, role_data, service_names]} + {{role.name}}HostsDeployment: type: OS::Heat::StructuredDeployments properties: @@ -304,7 +323,7 @@ resources: StorageMgmtIpList: {get_attr: [{{role.name}}, storage_mgmt_ip_address]} TenantIpList: {get_attr: [{{role.name}}, tenant_ip_address]} ManagementIpList: {get_attr: [{{role.name}}, management_ip_address]} - EnabledServices: {get_attr: [{{role.name}}ServiceChain, role_data, service_names]} + EnabledServices: {get_attr: [{{role.name}}ServiceNames, value]} ServiceNetMap: {get_attr: [ServiceNetMap, service_net_map_lower]} ServiceHostnameList: {get_attr: [{{role.name}}, hostname]} NetworkHostnameMap: @@ -360,8 +379,8 @@ resources: {% for r in roles %} - get_attr: [{{r.name}}ServiceChain, role_data, service_config_settings] {% endfor %} - services: {get_attr: [{{role.name}}ServiceChain, role_data, service_names]} - ServiceNames: {get_attr: [{{role.name}}ServiceChain, role_data, service_names]} + services: {get_attr: [{{role.name}}ServiceNames, value]} + ServiceNames: {get_attr: [{{role.name}}ServiceNames, value]} MonitoringSubscriptions: {get_attr: [{{role.name}}ServiceChain, role_data, monitoring_subscriptions]} ServiceMetadataSettings: {get_attr: [{{role.name}}ServiceChain, role_data, service_metadata_settings]} {% endfor %} @@ -395,7 +414,7 @@ resources: list_join: - ',' {% for role in roles %} - - {get_attr: [{{role.name}}ServiceChain, role_data, service_names]} + - {get_attr: [{{role.name}}ServiceNames, value]} {% endfor %} logging_groups: yaql: @@ -415,8 +434,8 @@ resources: {% for role in roles %} - {get_attr: [{{role.name}}ServiceChain, role_data, logging_sources]} {% endfor %} - controller_ips: {get_attr: [Controller, ip_address]} - controller_names: {get_attr: [Controller, hostname]} + controller_ips: {get_attr: [{{primary_role_name}}, ip_address]} + controller_names: {get_attr: [{{primary_role_name}}, hostname]} service_ips: # Note (shardy) this somewhat complex yaql may be replaced # with a map_deep_merge function in ocata. It merges the @@ -454,7 +473,7 @@ resources: - {get_attr: [{{role.name}}IpListMap, short_service_bootstrap_hostnames]} {% endfor %} # FIXME(shardy): These require further work to move into service_ips - memcache_node_ips: {get_attr: [ControllerIpListMap, net_ip_map, {get_attr: [ServiceNetMap, service_net_map, MemcachedNetwork]}]} + memcache_node_ips: {get_attr: [{{primary_role_name}}IpListMap, net_ip_map, {get_attr: [ServiceNetMap, service_net_map, MemcachedNetwork]}]} NetVipMap: {get_attr: [VipMap, net_ip_map]} RedisVirtualIP: {get_attr: [RedisVirtualIP, ip_address]} ServiceNetMap: {get_attr: [ServiceNetMap, service_net_map_lower]} @@ -560,12 +579,12 @@ resources: PingTestIps: list_join: - ' ' - - - {get_attr: [Controller, resource.0.external_ip_address]} - - {get_attr: [Controller, resource.0.internal_api_ip_address]} - - {get_attr: [Controller, resource.0.storage_ip_address]} - - {get_attr: [Controller, resource.0.storage_mgmt_ip_address]} - - {get_attr: [Controller, resource.0.tenant_ip_address]} - - {get_attr: [Controller, resource.0.management_ip_address]} + - - {get_attr: [{{primary_role_name}}, resource.0.external_ip_address]} + - {get_attr: [{{primary_role_name}}, resource.0.internal_api_ip_address]} + - {get_attr: [{{primary_role_name}}, resource.0.storage_ip_address]} + - {get_attr: [{{primary_role_name}}, resource.0.storage_mgmt_ip_address]} + - {get_attr: [{{primary_role_name}}, resource.0.tenant_ip_address]} + - {get_attr: [{{primary_role_name}}, resource.0.management_ip_address]} UpdateWorkflow: type: OS::TripleO::Tasks::UpdateWorkflow @@ -592,36 +611,24 @@ resources: - {{role.name}}AllNodesValidationDeployment {% endfor %} properties: -{% for role in roles %} - servers: {get_attr: [{{role.name}}, attributes, nova_server_resource]} -{% endfor %} - - # Upgrade steps for all roles - AllNodesUpgradeSteps: - type: OS::TripleO::UpgradeSteps - depends_on: -{% for role in roles %} - - {{role.name}}AllNodesDeployment -{% endfor %} - properties: servers: {% for role in roles %} {{role.name}}: {get_attr: [{{role.name}}, attributes, nova_server_resource]} {% endfor %} - role_data: -{% for role in roles %} - {{role.name}}: {get_attr: [{{role.name}}ServiceChain, role_data]} -{% endfor %} # Post deployment steps for all roles AllNodesDeploySteps: type: OS::TripleO::PostDeploySteps - depends_on: AllNodesUpgradeSteps + depends_on: +{% for role in roles %} + - {{role.name}}AllNodesDeployment +{% endfor %} properties: servers: {% for role in roles %} {{role.name}}: {get_attr: [{{role.name}}, attributes, nova_server_resource]} {% endfor %} + EndpointMap: {get_attr: [EndpointMap, endpoint_map]} role_data: {% for role in roles %} {{role.name}}: {get_attr: [{{role.name}}ServiceChain, role_data]} @@ -633,7 +640,7 @@ outputs: value: true KeystoneURL: description: URL for the Overcloud Keystone service - value: {get_attr: [EndpointMap, endpoint_map, KeystonePublic, uri]} + value: {get_attr: [EndpointMapData, value, KeystonePublic, uri]} KeystoneAdminVip: description: Keystone Admin VIP endpoint value: {get_attr: [VipMap, net_ip_map, {get_attr: [ServiceNetMap, service_net_map, KeystoneAdminApiNetwork]}]} @@ -642,7 +649,7 @@ outputs: Mapping of the resources with the needed info for their endpoints. This includes the protocol used, the IP, port and also a full representation of the URI. - value: {get_attr: [EndpointMap, endpoint_map]} + value: {get_attr: [EndpointMapData, value]} HostsEntry: description: | The content that should be appended to your /etc/hosts if you want to get @@ -657,7 +664,7 @@ outputs: description: The services enabled on each role value: {% for role in roles %} - {{role.name}}: {get_attr: [{{role.name}}ServiceChain, role_data, service_names]} + {{role.name}}: {get_attr: [{{role.name}}ServiceNames, value]} {% endfor %} RoleData: description: The configuration data associated with each role diff --git a/plan-environment.yaml b/plan-environment.yaml new file mode 100644 index 00000000..f629eff3 --- /dev/null +++ b/plan-environment.yaml @@ -0,0 +1,5 @@ +version: 1.0
+
+template: overcloud.yaml
+environments:
+- path: overcloud-resource-registry-puppet.yaml
diff --git a/puppet/all-nodes-config.yaml b/puppet/all-nodes-config.yaml index ee43c3a5..7edf17af 100644 --- a/puppet/all-nodes-config.yaml +++ b/puppet/all-nodes-config.yaml @@ -68,6 +68,32 @@ parameters: type: boolean default: false + InternalApiNetName: + default: internal_api + description: The name of the internal API network. + type: string + ExternalNetName: + default: external + description: The name of the external network. + type: string + ManagementNetName: + default: management + description: The name of the management network. + type: string + StorageNetName: + default: storage + description: The name of the storage network. + type: string + StorageMgmtNetName: + default: storage_mgmt + description: The name of the Storage management network. + type: string + TenantNetName: + default: tenant + description: The name of the tenant network. + type: string + + resources: allNodesConfigImpl: @@ -175,21 +201,21 @@ resources: get_param: [NetVipMap, {get_param: [ServiceNetMap, keystone_admin_api_network]}] keystone_public_api_vip: get_param: [NetVipMap, {get_param: [ServiceNetMap, keystone_public_api_network]}] - public_virtual_ip: {get_param: [NetVipMap, external]} + public_virtual_ip: {get_param: [NetVipMap, {get_param: ExternalNetName}]} controller_virtual_ip: {get_param: [NetVipMap, ctlplane]} - internal_api_virtual_ip: {get_param: [NetVipMap, internal_api]} - storage_virtual_ip: {get_param: [NetVipMap, storage]} - storage_mgmt_virtual_ip: {get_param: [NetVipMap, storage_mgmt]} + internal_api_virtual_ip: {get_param: [NetVipMap, {get_param: InternalApiNetName}]} + storage_virtual_ip: {get_param: [NetVipMap, {get_param: StorageNetName}]} + storage_mgmt_virtual_ip: {get_param: [NetVipMap, {get_param: StorageMgmtNetName}]} redis_vip: {get_param: RedisVirtualIP} # public_virtual_ip and controller_virtual_ip are needed in # both HAproxy & keepalived. - tripleo::haproxy::public_virtual_ip: {get_param: [NetVipMap, external]} + tripleo::haproxy::public_virtual_ip: {get_param: [NetVipMap, {get_param: ExternalNetName}]} tripleo::haproxy::controller_virtual_ip: {get_param: [NetVipMap, ctlplane]} - tripleo::keepalived::public_virtual_ip: {get_param: [NetVipMap, external]} + tripleo::keepalived::public_virtual_ip: {get_param: [NetVipMap, {get_param: ExternalNetName}]} tripleo::keepalived::controller_virtual_ip: {get_param: [NetVipMap, ctlplane]} - tripleo::keepalived::internal_api_virtual_ip: {get_param: [NetVipMap, internal_api]} - tripleo::keepalived::storage_virtual_ip: {get_param: [NetVipMap, storage]} - tripleo::keepalived::storage_mgmt_virtual_ip: {get_param: [NetVipMap, storage_mgmt]} + tripleo::keepalived::internal_api_virtual_ip: {get_param: [NetVipMap, {get_param: InternalApiNetName}]} + tripleo::keepalived::storage_virtual_ip: {get_param: [NetVipMap, {get_param: StorageNetName}]} + tripleo::keepalived::storage_mgmt_virtual_ip: {get_param: [NetVipMap, {get_param: StorageMgmtNetName}]} tripleo::keepalived::redis_virtual_ip: {get_param: RedisVirtualIP} tripleo::redis_notification::haproxy_monitor_ip: {get_param: [NetVipMap, ctlplane]} cloud_name_external: {get_param: cloud_name_external} diff --git a/puppet/blockstorage-role.yaml b/puppet/blockstorage-role.yaml index e92de45f..51f9abac 100644 --- a/puppet/blockstorage-role.yaml +++ b/puppet/blockstorage-role.yaml @@ -115,6 +115,14 @@ parameters: Command or script snippet to run on all overcloud nodes to initialize the upgrade process. E.g. a repository switch. default: '' + UpgradeInitCommonCommand: + type: string + description: | + Common commands required by the upgrades process. This should not + normally be modified by the operator and is set and unset in the + major-upgrade-composable-steps.yaml and major-upgrade-converge.yaml + environment files. + default: '' resources: BlockStorage: @@ -360,6 +368,7 @@ resources: - - "#!/bin/bash\n\n" - "if [[ -f /etc/resolv.conf.save ]] ; then rm /etc/resolv.conf.save; fi\n\n" - get_param: UpgradeInitCommand + - get_param: UpgradeInitCommonCommand # Note we may be able to make this conditional on UpgradeInitCommandNotEmpty # but https://bugs.launchpad.net/heat/+bug/1649900 needs fixing first @@ -439,6 +448,7 @@ resources: UpdateDeployment: type: OS::Heat::SoftwareDeployment + depends_on: NetworkDeployment properties: name: UpdateDeployment config: {get_resource: UpdateConfig} diff --git a/puppet/cephstorage-role.yaml b/puppet/cephstorage-role.yaml index 892f91ef..d7d7f478 100644 --- a/puppet/cephstorage-role.yaml +++ b/puppet/cephstorage-role.yaml @@ -121,6 +121,14 @@ parameters: Command or script snippet to run on all overcloud nodes to initialize the upgrade process. E.g. a repository switch. default: '' + UpgradeInitCommonCommand: + type: string + description: | + Common commands required by the upgrades process. This should not + normally be modified by the operator and is set and unset in the + major-upgrade-composable-steps.yaml and major-upgrade-converge.yaml + environment files. + default: '' resources: CephStorage: @@ -366,6 +374,7 @@ resources: - - "#!/bin/bash\n\n" - "if [[ -f /etc/resolv.conf.save ]] ; then rm /etc/resolv.conf.save; fi\n\n" - get_param: UpgradeInitCommand + - get_param: UpgradeInitCommonCommand # Note we may be able to make this conditional on UpgradeInitCommandNotEmpty # but https://bugs.launchpad.net/heat/+bug/1649900 needs fixing first @@ -451,6 +460,7 @@ resources: UpdateDeployment: type: OS::Heat::SoftwareDeployment + depends_on: NetworkDeployment properties: config: {get_resource: UpdateConfig} server: {get_resource: CephStorage} diff --git a/puppet/compute-role.yaml b/puppet/compute-role.yaml index 62adcd33..ebdd762d 100644 --- a/puppet/compute-role.yaml +++ b/puppet/compute-role.yaml @@ -133,6 +133,14 @@ parameters: Command or script snippet to run on all overcloud nodes to initialize the upgrade process. E.g. a repository switch. default: '' + UpgradeInitCommonCommand: + type: string + description: | + Common commands required by the upgrades process. This should not + normally be modified by the operator and is set and unset in the + major-upgrade-composable-steps.yaml and major-upgrade-converge.yaml + environment files. + default: '' resources: @@ -383,6 +391,7 @@ resources: - - "#!/bin/bash\n\n" - "if [[ -f /etc/resolv.conf.save ]] ; then rm /etc/resolv.conf.save; fi\n\n" - get_param: UpgradeInitCommand + - get_param: UpgradeInitCommonCommand # Note we may be able to make this conditional on UpgradeInitCommandNotEmpty # but https://bugs.launchpad.net/heat/+bug/1649900 needs fixing first @@ -474,6 +483,7 @@ resources: UpdateDeployment: type: OS::Heat::SoftwareDeployment + depends_on: NetworkDeployment properties: name: UpdateDeployment config: {get_resource: UpdateConfig} diff --git a/puppet/controller-config-pacemaker.yaml b/puppet/controller-config-pacemaker.yaml deleted file mode 100644 index 1b719839..00000000 --- a/puppet/controller-config-pacemaker.yaml +++ /dev/null @@ -1,41 +0,0 @@ -heat_template_version: ocata - -description: > - A software config which runs manifests/overcloud_controller_pacemaker.pp - -parameters: - ConfigDebug: - default: false - description: Whether to run config management (e.g. Puppet) in debug mode. - type: boolean - StepConfig: - type: string - description: Config manifests that will be used to step through the deployment. - default: '' - -resources: - - ControllerPuppetConfigImpl: - type: OS::Heat::SoftwareConfig - properties: - group: puppet - options: - enable_debug: {get_param: ConfigDebug} - enable_hiera: True - enable_facter: False - modulepath: /etc/puppet/modules:/opt/stack/puppet-modules:/usr/share/openstack-puppet/modules - outputs: - - name: result - inputs: - - name: step - type: Number - config: - list_join: - - '' - - - get_file: manifests/overcloud_controller_pacemaker.pp - - {get_param: StepConfig} - -outputs: - OS::stack_id: - description: The software config which runs overcloud_controller_pacemaker.pp - value: {get_resource: ControllerPuppetConfigImpl} diff --git a/puppet/controller-role.yaml b/puppet/controller-role.yaml index 9e35af5f..2f4f583c 100644 --- a/puppet/controller-role.yaml +++ b/puppet/controller-role.yaml @@ -147,6 +147,14 @@ parameters: Command or script snippet to run on all overcloud nodes to initialize the upgrade process. E.g. a repository switch. default: '' + UpgradeInitCommonCommand: + type: string + description: | + Common commands required by the upgrades process. This should not + normally be modified by the operator and is set and unset in the + major-upgrade-composable-steps.yaml and major-upgrade-converge.yaml + environment files. + default: '' parameter_groups: - label: deprecated @@ -417,6 +425,7 @@ resources: - - "#!/bin/bash\n\n" - "if [[ -f /etc/resolv.conf.save ]] ; then rm /etc/resolv.conf.save; fi\n\n" - get_param: UpgradeInitCommand + - get_param: UpgradeInitCommonCommand # Note we may be able to make this conditional on UpgradeInitCommandNotEmpty # but https://bugs.launchpad.net/heat/+bug/1649900 needs fixing first @@ -458,9 +467,7 @@ resources: - all_nodes # provided by allNodesConfig - vip_data # provided by allNodesConfig - '"%{::osfamily}"' - - cinder_dellsc_data # Optionally provided by ControllerExtraConfigPre - cinder_netapp_data # Optionally provided by ControllerExtraConfigPre - - cinder_eqlx_data # Optionally provided by ControllerExtraConfigPre - neutron_bigswitch_data # Optionally provided by ControllerExtraConfigPre - neutron_cisco_data # Optionally provided by ControllerExtraConfigPre - cisco_n1kv_data # Optionally provided by ControllerExtraConfigPre @@ -516,6 +523,7 @@ resources: UpdateDeployment: type: OS::Heat::SoftwareDeployment + depends_on: NetworkDeployment properties: name: UpdateDeployment config: {get_resource: UpdateConfig} diff --git a/puppet/extraconfig/all_nodes/neutron-midonet-all-nodes.yaml b/puppet/extraconfig/all_nodes/neutron-midonet-all-nodes.yaml index 3daf3fd3..b6d1239a 100644 --- a/puppet/extraconfig/all_nodes/neutron-midonet-all-nodes.yaml +++ b/puppet/extraconfig/all_nodes/neutron-midonet-all-nodes.yaml @@ -53,41 +53,40 @@ resources: NetworkMidoNetConfig: type: OS::Heat::StructuredConfig properties: - group: os-apply-config + group: hiera config: - hiera: - datafiles: - midonet_data: - mapped_data: - enable_zookeeper_on_controller: {get_param: EnableZookeeperOnController} - enable_cassandra_on_controller: {get_param: EnableCassandraOnController} - midonet_tunnelzone_name: {get_param: TunnelZoneName} - midonet_tunnelzone_type: {get_param: TunnelZoneType} - midonet_libvirt_qemu_data: | - user = "root" - group = "root" - cgroup_device_acl = [ - "/dev/null", "/dev/full", "/dev/zero", - "/dev/random", "/dev/urandom", - "/dev/ptmx", "/dev/kvm", "/dev/kqemu", - "/dev/rtc","/dev/hpet", "/dev/vfio/vfio", - "/dev/net/tun" - ] - tripleo::cluster::cassandra::storage_port: {get_param: CassandraStoragePort} - tripleo::cluster::cassandra::ssl_storage_port: {get_param: CassandraSslStoragePort} - tripleo::cluster::cassandra::client_port: {get_param: CassandraClientPort} - tripleo::cluster::cassandra::client_port_thrift: {get_param: CassandraClientPortThrift} - tripleo::haproxy::midonet_api: true - # Missed Neutron Puppet data - neutron::agents::dhcp::interface_driver: 'neutron.agent.linux.interface.MidonetInterfaceDriver' - neutron::agents::dhcp::dhcp_driver: 'midonet.neutron.agent.midonet_driver.DhcpNoOpDriver' - neutron::plugins::midonet::midonet_api_port: 8081 - neutron::params::midonet_server_package: 'python-networking-midonet' + datafiles: + midonet_data: + mapped_data: + enable_zookeeper_on_controller: {get_param: EnableZookeeperOnController} + enable_cassandra_on_controller: {get_param: EnableCassandraOnController} + midonet_tunnelzone_name: {get_param: TunnelZoneName} + midonet_tunnelzone_type: {get_param: TunnelZoneType} + midonet_libvirt_qemu_data: | + user = "root" + group = "root" + cgroup_device_acl = [ + "/dev/null", "/dev/full", "/dev/zero", + "/dev/random", "/dev/urandom", + "/dev/ptmx", "/dev/kvm", "/dev/kqemu", + "/dev/rtc","/dev/hpet", "/dev/vfio/vfio", + "/dev/net/tun" + ] + tripleo::cluster::cassandra::storage_port: {get_param: CassandraStoragePort} + tripleo::cluster::cassandra::ssl_storage_port: {get_param: CassandraSslStoragePort} + tripleo::cluster::cassandra::client_port: {get_param: CassandraClientPort} + tripleo::cluster::cassandra::client_port_thrift: {get_param: CassandraClientPortThrift} + tripleo::haproxy::midonet_api: true + # Missed Neutron Puppet data + neutron::agents::dhcp::interface_driver: 'neutron.agent.linux.interface.MidonetInterfaceDriver' + neutron::agents::dhcp::dhcp_driver: 'midonet.neutron.agent.midonet_driver.DhcpNoOpDriver' + neutron::plugins::midonet::midonet_api_port: 8081 + neutron::params::midonet_server_package: 'python-networking-midonet' - # Make sure the l3 agent does not run - l3_agent_service: false - neutron::agents::l3::manage_service: false - neutron::agents::l3::enabled: false + # Make sure the l3 agent does not run + l3_agent_service: false + neutron::agents::l3::manage_service: false + neutron::agents::l3::enabled: false NetworkMidonetDeploymentControllers: diff --git a/puppet/extraconfig/all_nodes/neutron-ml2-cisco-nexus-ucsm.yaml b/puppet/extraconfig/all_nodes/neutron-ml2-cisco-nexus-ucsm.yaml index cb8d498c..b05fa636 100644 --- a/puppet/extraconfig/all_nodes/neutron-ml2-cisco-nexus-ucsm.yaml +++ b/puppet/extraconfig/all_nodes/neutron-ml2-cisco-nexus-ucsm.yaml @@ -101,31 +101,30 @@ resources: NetworkCiscoConfig: type: OS::Heat::StructuredConfig properties: - group: os-apply-config + group: hiera config: - hiera: - datafiles: - neutron_cisco_data: - mapped_data: - neutron::plugins::ml2::cisco::ucsm::ucsm_ip: {get_input: UCSM_ip} - neutron::plugins::ml2::cisco::ucsm::ucsm_username: {get_input: UCSM_username} - neutron::plugins::ml2::cisco::ucsm::ucsm_password: {get_input: UCSM_password} - neutron::plugins::ml2::cisco::ucsm::ucsm_host_list: {get_input: UCSM_host_list} - neutron::plugins::ml2::cisco::ucsm::supported_pci_devs: {get_input: UCSMSupportedPciDevs} - neutron::plugins::ml2::cisco::nexus::nexus_config: {get_input: NexusConfig} - neutron::plugins::ml2::cisco::nexus::managed_physical_network: {get_input: NexusManagedPhysicalNetwork} - neutron::plugins::ml2::cisco::nexus::vlan_name_prefix: {get_input: NexusVlanNamePrefix} - neutron::plugins::ml2::cisco::nexus::svi_round_robin: {get_input: NexusSviRoundRobin} - neutron::plugins::ml2::cisco::nexus::provider_vlan_name_prefix: {get_input: NexusProviderVlanNamePrefix} - neutron::plugins::ml2::cisco::nexus::persistent_switch_config: {get_input: NexusPersistentSwitchConfig} - neutron::plugins::ml2::cisco::nexus::switch_heartbeat_time: {get_input: NexusSwitchHeartbeatTime} - neutron::plugins::ml2::cisco::nexus::switch_replay_count: {get_input: NexusSwitchReplayCount} - neutron::plugins::ml2::cisco::nexus::provider_vlan_auto_create: {get_input: NexusProviderVlanAutoCreate} - neutron::plugins::ml2::cisco::nexus::provider_vlan_auto_trunk: {get_input: NexusProviderVlanAutoTrunk} - neutron::plugins::ml2::cisco::nexus::vxlan_global_config: {get_input: NexusVxlanGlobalConfig} - neutron::plugins::ml2::cisco::nexus::host_key_checks: {get_input: NexusHostKeyChecks} - neutron::plugins::ml2::cisco::type_nexus_vxlan::vni_ranges: {get_input: NexusVxlanVniRanges} - neutron::plugins::ml2::cisco::type_nexus_vxlan::mcast_ranges: {get_input: NexusVxlanMcastRanges} + datafiles: + neutron_cisco_data: + mapped_data: + neutron::plugins::ml2::cisco::ucsm::ucsm_ip: {get_input: UCSM_ip} + neutron::plugins::ml2::cisco::ucsm::ucsm_username: {get_input: UCSM_username} + neutron::plugins::ml2::cisco::ucsm::ucsm_password: {get_input: UCSM_password} + neutron::plugins::ml2::cisco::ucsm::ucsm_host_list: {get_input: UCSM_host_list} + neutron::plugins::ml2::cisco::ucsm::supported_pci_devs: {get_input: UCSMSupportedPciDevs} + neutron::plugins::ml2::cisco::nexus::nexus_config: {get_input: NexusConfig} + neutron::plugins::ml2::cisco::nexus::managed_physical_network: {get_input: NexusManagedPhysicalNetwork} + neutron::plugins::ml2::cisco::nexus::vlan_name_prefix: {get_input: NexusVlanNamePrefix} + neutron::plugins::ml2::cisco::nexus::svi_round_robin: {get_input: NexusSviRoundRobin} + neutron::plugins::ml2::cisco::nexus::provider_vlan_name_prefix: {get_input: NexusProviderVlanNamePrefix} + neutron::plugins::ml2::cisco::nexus::persistent_switch_config: {get_input: NexusPersistentSwitchConfig} + neutron::plugins::ml2::cisco::nexus::switch_heartbeat_time: {get_input: NexusSwitchHeartbeatTime} + neutron::plugins::ml2::cisco::nexus::switch_replay_count: {get_input: NexusSwitchReplayCount} + neutron::plugins::ml2::cisco::nexus::provider_vlan_auto_create: {get_input: NexusProviderVlanAutoCreate} + neutron::plugins::ml2::cisco::nexus::provider_vlan_auto_trunk: {get_input: NexusProviderVlanAutoTrunk} + neutron::plugins::ml2::cisco::nexus::vxlan_global_config: {get_input: NexusVxlanGlobalConfig} + neutron::plugins::ml2::cisco::nexus::host_key_checks: {get_input: NexusHostKeyChecks} + neutron::plugins::ml2::cisco::type_nexus_vxlan::vni_ranges: {get_input: NexusVxlanVniRanges} + neutron::plugins::ml2::cisco::type_nexus_vxlan::mcast_ranges: {get_input: NexusVxlanMcastRanges} NetworkCiscoDeployment: type: OS::Heat::StructuredDeployments @@ -245,7 +244,9 @@ resources: for map_name in mappings: f_name = '/root/' + map_name map_data = os.getenv(map_name, "Nada") - with open(f_name, 'a') as f: + with os.fdopen(os.open(f_name, + os.O_CREAT | os.O_TRUNC | os.O_WRONLY, 0o644), + 'w') as f: f.write(map_data) if map_data is not "Nada": if map_name is not 'nexus_config': @@ -260,7 +261,9 @@ resources: for mac in vals[1:]: mac2host[mac.lower()] = vals[0] - with open('/root/mac2host', 'a') as f: + with os.fdopen(os.open('/root/mac2host', + os.O_CREAT | os.O_TRUNC | os.O_WRONLY, 0o644), + 'w') as f: f.write(str(mac2host)) # now we have mac to host, map host to switchport in hieradata diff --git a/puppet/extraconfig/pre_deploy/compute/neutron-ml2-bigswitch.yaml b/puppet/extraconfig/pre_deploy/compute/neutron-ml2-bigswitch.yaml index 7fe2a842..533c0ee9 100644 --- a/puppet/extraconfig/pre_deploy/compute/neutron-ml2-bigswitch.yaml +++ b/puppet/extraconfig/pre_deploy/compute/neutron-ml2-bigswitch.yaml @@ -20,14 +20,13 @@ resources: NeutronBigswitchConfig: type: OS::Heat::StructuredConfig properties: - group: os-apply-config + group: hiera config: - hiera: - datafiles: - neutron_bigswitch_data: - mapped_data: - neutron::agents::bigswitch::agent_enabled: {get_input: neutron_enable_bigswitch_agent} - neutron::agents::bigswitch::lldp_enabled: {get_input: neutron_enable_bigswitch_lldp} + datafiles: + neutron_bigswitch_data: + mapped_data: + neutron::agents::bigswitch::agent_enabled: {get_input: neutron_enable_bigswitch_agent} + neutron::agents::bigswitch::lldp_enabled: {get_input: neutron_enable_bigswitch_lldp} NeutronBigswitchDeployment: type: OS::Heat::StructuredDeployment diff --git a/puppet/extraconfig/pre_deploy/compute/neutron-opencontrail.yaml b/puppet/extraconfig/pre_deploy/compute/neutron-opencontrail.yaml deleted file mode 100644 index 66252f1f..00000000 --- a/puppet/extraconfig/pre_deploy/compute/neutron-opencontrail.yaml +++ /dev/null @@ -1,59 +0,0 @@ -heat_template_version: ocata - -description: Compute node hieradata for Neutron OpenContrail configuration - -parameters: - server: - description: ID of the compute node to apply this config to - type: string - ContrailApiServerIp: - description: IP address of the OpenContrail API server - type: string - ContrailApiServerPort: - description: Port of the OpenContrail API - type: string - default: 8082 - -resources: - ComputeContrailConfig: - type: OS::Heat::StructuredConfig - properties: - group: os-apply-config - config: - hiera: - datafiles: - neutron_opencontrail_data: - mapped_data: - nova::network::neutron::network_api_class: nova.network.neutronv2.api.API - - contrail::vrouter::provision_vrouter::api_address: {get_input: contrail_api_server_ip} - contrail::vrouter::provision_vrouter::api_port: {get_input: contrail_api_server_port} - contrail::vrouter::provision_vrouter::keystone_admin_user: admin - contrail::vrouter::provision_vrouter::keystone_admin_tenant_name: admin - contrail::vrouter::provision_vrouter::keystone_admin_password: '"%{::admin_password}"' - - contrail::vnc_api::vnc_api_config: - 'auth/AUTHN_TYPE': - value: keystone - 'auth/AUTHN_PROTOCOL': - value: http - 'auth/AUTHN_SERVER': - value: "%{hiera('keystone_admin_api_vip')}" - 'auth/AUTHN_PORT': - value: 35357 - 'auth/AUTHN_URL': - value: '/v2.0/tokens' - - ComputeContrailDeployment: - type: OS::Heat::StructuredDeployment - properties: - config: {get_resource: ComputeContrailConfig} - server: {get_param: server} - input_values: - contrail_api_server_ip: {get_param: ContrailApiServerIp} - contrail_api_server_port: {get_param: ContrailApiServerPort} - -outputs: - deploy_stdout: - description: Output of the extra hiera data deployment - value: {get_attr: [ComputeContrailDeployment, deploy_stdout]} diff --git a/puppet/extraconfig/pre_deploy/compute/nova-nuage.yaml b/puppet/extraconfig/pre_deploy/compute/nova-nuage.yaml index 47c782c7..1d16e909 100644 --- a/puppet/extraconfig/pre_deploy/compute/nova-nuage.yaml +++ b/puppet/extraconfig/pre_deploy/compute/nova-nuage.yaml @@ -50,22 +50,21 @@ resources: NovaNuageConfig: type: OS::Heat::StructuredConfig properties: - group: os-apply-config + group: hiera config: - hiera: - datafiles: - nova_nuage_data: - mapped_data: - nuage::vrs::active_controller: {get_input: ActiveController} - nuage::vrs::standby_controller: {get_input: StandbyController} - nuage::metadataagent::metadata_port: {get_input: MetadataPort} - nuage::metadataagent::nova_metadata_port: {get_input: NovaMetadataPort} - nuage::metadataagent::metadata_secret: {get_input: SharedSecret} - nuage::metadataagent::nova_client_version: {get_input: NovaClientVersion} - nuage::metadataagent::nova_os_username: {get_input: NovaOsUsername} - nuage::metadataagent::metadata_agent_start_with_ovs: {get_input: MetadataAgentStartWithOvs} - nuage::metadataagent::nova_api_endpoint_type: {get_input: NovaApiEndpointType} - nuage::metadataagent::nova_region_name: {get_input: NovaRegionName} + datafiles: + nova_nuage_data: + mapped_data: + nuage::vrs::active_controller: {get_input: ActiveController} + nuage::vrs::standby_controller: {get_input: StandbyController} + nuage::metadataagent::metadata_port: {get_input: MetadataPort} + nuage::metadataagent::nova_metadata_port: {get_input: NovaMetadataPort} + nuage::metadataagent::metadata_secret: {get_input: SharedSecret} + nuage::metadataagent::nova_client_version: {get_input: NovaClientVersion} + nuage::metadataagent::nova_os_username: {get_input: NovaOsUsername} + nuage::metadataagent::metadata_agent_start_with_ovs: {get_input: MetadataAgentStartWithOvs} + nuage::metadataagent::nova_api_endpoint_type: {get_input: NovaApiEndpointType} + nuage::metadataagent::nova_region_name: {get_input: NovaRegionName} NovaNuageDeployment: type: OS::Heat::StructuredDeployment diff --git a/puppet/extraconfig/pre_deploy/controller/cinder-dellsc.yaml b/puppet/extraconfig/pre_deploy/controller/cinder-dellsc.yaml deleted file mode 100644 index 7d639883..00000000 --- a/puppet/extraconfig/pre_deploy/controller/cinder-dellsc.yaml +++ /dev/null @@ -1,87 +0,0 @@ -heat_template_version: ocata - -description: Configure hieradata for Cinder Dell Storage Center configuration - -parameters: - server: - description: ID of the controller node to apply this config to - type: string - - # Config specific parameters, to be provided via parameter_defaults - CinderEnableDellScBackend: - type: boolean - default: true - CinderDellScBackendName: - type: string - default: 'tripleo_dellsc' - CinderDellScSanIp: - type: string - CinderDellScSanLogin: - type: string - default: 'Admin' - CinderDellScSanPassword: - type: string - hidden: true - CinderDellScSsn: - type: string - default: '64702' - CinderDellScIscsiIpAddress: - type: string - default: '' - CinderDellScIscsiPort: - type: string - default: '3260' - CinderDellScApiPort: - type: string - default: '3033' - CinderDellScServerFolder: - type: string - default: 'dellsc_server' - CinderDellScVolumeFolder: - type: string - default: 'dellsc_volume' - -resources: - CinderDellScConfig: - type: OS::Heat::StructuredConfig - properties: - group: os-apply-config - config: - hiera: - datafiles: - cinder_dellsc_data: - mapped_data: - tripleo::profile::base::cinder::volume::cinder_enable_dellsc_backend: {get_input: EnableDellScBackend} - cinder::backend::dellsc_iscsi::volume_backend_name: {get_input: DellScBackendName} - cinder::backend::dellsc_iscsi::san_ip: {get_input: DellScSanIp} - cinder::backend::dellsc_iscsi::san_login: {get_input: DellScSanLogin} - cinder::backend::dellsc_iscsi::san_password: {get_input: DellScSanPassword} - cinder::backend::dellsc_iscsi::dell_sc_ssn: {get_input: DellScSsn} - cinder::backend::dellsc_iscsi::iscsi_ip_address: {get_input: DellScIscsiIpAddress} - cinder::backend::dellsc_iscsi::iscsi_port: {get_input: DellScIscsiPort} - cinder::backend::dellsc_iscsi::dell_sc_api_port: {get_input: DellScApiPort} - cinder::backend::dellsc_iscsi::dell_sc_server_folder: {get_input: DellScServerFolder} - cinder::backend::dellsc_iscsi::dell_sc_volume_folder: {get_input: DellScVolumeFolder} - - CinderDellScDeployment: - type: OS::Heat::StructuredDeployment - properties: - config: {get_resource: CinderDellScConfig} - server: {get_param: server} - input_values: - EnableDellScBackend: {get_param: CinderEnableDellScBackend} - DellScBackendName: {get_param: CinderDellScBackendName} - DellScSanIp: {get_param: CinderDellScSanIp} - DellScSanLogin: {get_param: CinderDellScSanLogin} - DellScSanPassword: {get_param: CinderDellScSanPassword} - DellScSsn: {get_param: CinderDellScSsn} - DellScIscsiIpAddress: {get_param: CinderDellScIscsiIpAddress} - DellScIscsiPort: {get_param: CinderDellScIscsiPort} - DellScApiPort: {get_param: CinderDellScApiPort} - DellScServerFolder: {get_param: CinderDellScServerFolder} - DellScVolumeFolder: {get_param: CinderDellScVolumeFolder} - -outputs: - deploy_stdout: - description: Deployment reference, used to trigger puppet apply on changes - value: {get_attr: [CinderDellScDeployment, deploy_stdout]} diff --git a/puppet/extraconfig/pre_deploy/controller/cinder-eqlx.yaml b/puppet/extraconfig/pre_deploy/controller/cinder-eqlx.yaml deleted file mode 100644 index 30509044..00000000 --- a/puppet/extraconfig/pre_deploy/controller/cinder-eqlx.yaml +++ /dev/null @@ -1,86 +0,0 @@ -heat_template_version: ocata - -description: Configure hieradata for Cinder Eqlx configuration - -parameters: - server: - description: ID of the controller node to apply this config to - type: string - - # Config specific parameters, to be provided via parameter_defaults - CinderEnableEqlxBackend: - type: boolean - default: true - CinderEqlxBackendName: - type: string - default: 'tripleo_eqlx' - CinderEqlxSanIp: - type: string - CinderEqlxSanLogin: - type: string - CinderEqlxSanPassword: - type: string - hidden: true - CinderEqlxSanThinProvision: - type: boolean - default: true - CinderEqlxGroupname: - type: string - default: 'group-0' - CinderEqlxPool: - type: string - default: 'default' - CinderEqlxChapLogin: - type: string - default: '' - CinderEqlxChapPassword: - type: string - default: '' - CinderEqlxUseChap: - type: boolean - default: false - -resources: - CinderEqlxConfig: - type: OS::Heat::StructuredConfig - properties: - group: os-apply-config - config: - hiera: - datafiles: - cinder_eqlx_data: - mapped_data: - tripleo::profile::base::cinder::volume::cinder_enable_eqlx_backend: {get_input: EnableEqlxBackend} - cinder::backend::eqlx::volume_backend_name: {get_input: EqlxBackendName} - cinder::backend::eqlx::san_ip: {get_input: EqlxSanIp} - cinder::backend::eqlx::san_login: {get_input: EqlxSanLogin} - cinder::backend::eqlx::san_password: {get_input: EqlxSanPassword} - cinder::backend::eqlx::san_thin_provision: {get_input: EqlxSanThinProvision} - cinder::backend::eqlx::eqlx_group_name: {get_input: EqlxGroupname} - cinder::backend::eqlx::eqlx_pool: {get_input: EqlxPool} - cinder::backend::eqlx::eqlx_use_chap: {get_input: EqlxUseChap} - cinder::backend::eqlx::eqlx_chap_login: {get_input: EqlxChapLogin} - cinder::backend::eqlx::eqlx_chap_password: {get_input: EqlxChapPassword} - - CinderEqlxDeployment: - type: OS::Heat::StructuredDeployment - properties: - config: {get_resource: CinderEqlxConfig} - server: {get_param: server} - input_values: - EnableEqlxBackend: {get_param: CinderEnableEqlxBackend} - EqlxBackendName: {get_param: CinderEqlxBackendName} - EqlxSanIp: {get_param: CinderEqlxSanIp} - EqlxSanLogin: {get_param: CinderEqlxSanLogin} - EqlxSanPassword: {get_param: CinderEqlxSanPassword} - EqlxSanThinProvision: {get_param: CinderEqlxSanThinProvision} - EqlxGroupname: {get_param: CinderEqlxGroupname} - EqlxPool: {get_param: CinderEqlxPool} - EqlxUseChap: {get_param: CinderEqlxUseChap} - EqlxChapLogin: {get_param: CinderEqlxChapLogin} - EqlxChapPassword: {get_param: CinderEqlxChapPassword} - -outputs: - deploy_stdout: - description: Deployment reference, used to trigger puppet apply on changes - value: {get_attr: [CinderEqlxDeployment, deploy_stdout]} diff --git a/puppet/extraconfig/pre_deploy/controller/cinder-netapp.yaml b/puppet/extraconfig/pre_deploy/controller/cinder-netapp.yaml index 763ae39a..378f7f98 100644 --- a/puppet/extraconfig/pre_deploy/controller/cinder-netapp.yaml +++ b/puppet/extraconfig/pre_deploy/controller/cinder-netapp.yaml @@ -91,35 +91,34 @@ resources: CinderNetappConfig: type: OS::Heat::StructuredConfig properties: - group: os-apply-config + group: hiera config: - hiera: - datafiles: - cinder_netapp_data: - mapped_data: - tripleo::profile::base::cinder::volume::cinder_enable_netapp_backend: {get_input: EnableNetappBackend} - cinder::backend::netapp::title: {get_input: NetappBackendName} - cinder::backend::netapp::netapp_login: {get_input: NetappLogin} - cinder::backend::netapp::netapp_password: {get_input: NetappPassword} - cinder::backend::netapp::netapp_server_hostname: {get_input: NetappServerHostname} - cinder::backend::netapp::netapp_server_port: {get_input: NetappServerPort} - cinder::backend::netapp::netapp_size_multiplier: {get_input: NetappSizeMultiplier} - cinder::backend::netapp::netapp_storage_family: {get_input: NetappStorageFamily} - cinder::backend::netapp::netapp_storage_protocol: {get_input: NetappStorageProtocol} - cinder::backend::netapp::netapp_transport_type: {get_input: NetappTransportType} - cinder::backend::netapp::netapp_vfiler: {get_input: NetappVfiler} - cinder::backend::netapp::netapp_volume_list: {get_input: NetappVolumeList} - cinder::backend::netapp::netapp_vserver: {get_input: NetappVserver} - cinder::backend::netapp::netapp_partner_backend_name: {get_input: NetappPartnerBackendName} - cinder::backend::netapp::nfs_shares: {get_input: NetappNfsShares} - cinder::backend::netapp::nfs_shares_config: {get_input: NetappNfsSharesConfig} - cinder::backend::netapp::nfs_mount_options: {get_input: NetappNfsMountOptions} - cinder::backend::netapp::netapp_copyoffload_tool_path: {get_input: NetappCopyOffloadToolPath} - cinder::backend::netapp::netapp_controller_ips: {get_input: NetappControllerIps} - cinder::backend::netapp::netapp_sa_password: {get_input: NetappSaPassword} - cinder::backend::netapp::netapp_storage_pools: {get_input: NetappStoragePools} - cinder::backend::netapp::netapp_host_type: {get_input: NetappHostType} - cinder::backend::netapp::netapp_webservice_path: {get_input: NetappWebservicePath} + datafiles: + cinder_netapp_data: + mapped_data: + tripleo::profile::base::cinder::volume::cinder_enable_netapp_backend: {get_input: EnableNetappBackend} + cinder::backend::netapp::title: {get_input: NetappBackendName} + cinder::backend::netapp::netapp_login: {get_input: NetappLogin} + cinder::backend::netapp::netapp_password: {get_input: NetappPassword} + cinder::backend::netapp::netapp_server_hostname: {get_input: NetappServerHostname} + cinder::backend::netapp::netapp_server_port: {get_input: NetappServerPort} + cinder::backend::netapp::netapp_size_multiplier: {get_input: NetappSizeMultiplier} + cinder::backend::netapp::netapp_storage_family: {get_input: NetappStorageFamily} + cinder::backend::netapp::netapp_storage_protocol: {get_input: NetappStorageProtocol} + cinder::backend::netapp::netapp_transport_type: {get_input: NetappTransportType} + cinder::backend::netapp::netapp_vfiler: {get_input: NetappVfiler} + cinder::backend::netapp::netapp_volume_list: {get_input: NetappVolumeList} + cinder::backend::netapp::netapp_vserver: {get_input: NetappVserver} + cinder::backend::netapp::netapp_partner_backend_name: {get_input: NetappPartnerBackendName} + cinder::backend::netapp::nfs_shares: {get_input: NetappNfsShares} + cinder::backend::netapp::nfs_shares_config: {get_input: NetappNfsSharesConfig} + cinder::backend::netapp::nfs_mount_options: {get_input: NetappNfsMountOptions} + cinder::backend::netapp::netapp_copyoffload_tool_path: {get_input: NetappCopyOffloadToolPath} + cinder::backend::netapp::netapp_controller_ips: {get_input: NetappControllerIps} + cinder::backend::netapp::netapp_sa_password: {get_input: NetappSaPassword} + cinder::backend::netapp::netapp_storage_pools: {get_input: NetappStoragePools} + cinder::backend::netapp::netapp_host_type: {get_input: NetappHostType} + cinder::backend::netapp::netapp_webservice_path: {get_input: NetappWebservicePath} CinderNetappDeployment: type: OS::Heat::StructuredDeployment diff --git a/puppet/extraconfig/pre_deploy/controller/neutron-ml2-bigswitch.yaml b/puppet/extraconfig/pre_deploy/controller/neutron-ml2-bigswitch.yaml index 0f4806db..1456337f 100644 --- a/puppet/extraconfig/pre_deploy/controller/neutron-ml2-bigswitch.yaml +++ b/puppet/extraconfig/pre_deploy/controller/neutron-ml2-bigswitch.yaml @@ -38,19 +38,18 @@ resources: NeutronBigswitchConfig: type: OS::Heat::StructuredConfig properties: - group: os-apply-config + group: hiera config: - hiera: - datafiles: - neutron_bigswitch_data: - mapped_data: - neutron::plugins::ml2::bigswitch::restproxy::servers: {get_input: restproxy_servers} - neutron::plugins::ml2::bigswitch::restproxy::server_auth: {get_input: restproxy_server_auth} - neutron::plugins::ml2::bigswitch::restproxy::auto_sync_on_failure: {get_input: restproxy_auto_sync_on_failure} - neutron::plugins::ml2::bigswitch::restproxy::consistency_interval: {get_input: restproxy_consistency_interval} - neutron::plugins::ml2::bigswitch::restproxy::neutron_id: {get_input: restproxy_neutron_id} - neutron::plugins::ml2::bigswitch::restproxy::server_ssl: {get_input: restproxy_server_ssl} - neutron::plugins::ml2::bigswitch::restproxy::ssl_cert_directory: {get_input: restproxy_ssl_cert_directory} + datafiles: + neutron_bigswitch_data: + mapped_data: + neutron::plugins::ml2::bigswitch::restproxy::servers: {get_input: restproxy_servers} + neutron::plugins::ml2::bigswitch::restproxy::server_auth: {get_input: restproxy_server_auth} + neutron::plugins::ml2::bigswitch::restproxy::auto_sync_on_failure: {get_input: restproxy_auto_sync_on_failure} + neutron::plugins::ml2::bigswitch::restproxy::consistency_interval: {get_input: restproxy_consistency_interval} + neutron::plugins::ml2::bigswitch::restproxy::neutron_id: {get_input: restproxy_neutron_id} + neutron::plugins::ml2::bigswitch::restproxy::server_ssl: {get_input: restproxy_server_ssl} + neutron::plugins::ml2::bigswitch::restproxy::ssl_cert_directory: {get_input: restproxy_ssl_cert_directory} NeutronBigswitchDeployment: type: OS::Heat::StructuredDeployment diff --git a/puppet/extraconfig/pre_deploy/controller/neutron-ml2-cisco-n1kv.yaml b/puppet/extraconfig/pre_deploy/controller/neutron-ml2-cisco-n1kv.yaml index 6eae812f..bca6010a 100644 --- a/puppet/extraconfig/pre_deploy/controller/neutron-ml2-cisco-n1kv.yaml +++ b/puppet/extraconfig/pre_deploy/controller/neutron-ml2-cisco-n1kv.yaml @@ -96,48 +96,47 @@ resources: CiscoN1kvConfig: type: OS::Heat::StructuredConfig properties: - group: os-apply-config + group: hiera config: - hiera: - datafiles: - cisco_n1kv_data: - mapped_data: - #enable_cisco_n1kv: {get_input: EnableCiscoN1kv} - # VEM Parameters - n1kv_vem_source: {get_input: n1kv_vem_source} - n1kv_vem_version: {get_input: n1kv_vem_version} - neutron::agents::n1kv_vem::n1kv_vsm_ip: {get_input: n1kv_vsm_ip} - neutron::agents::n1kv_vem::n1kv_vsm_domain_id: {get_input: n1kv_vsm_domain_id} - neutron::agents::n1kv_vem::n1kv_vsm_ip_v6: {get_input: n1kv_vsm_ip_v6} - neutron::agents::n1kv_vem::host_mgmt_intf: {get_input: n1kv_vem_host_mgmt_intf} - neutron::agents::n1kv_vem::uplink_profile: {get_input: n1kv_vem_uplink_profile} - neutron::agents::n1kv_vem::vtep_config: {get_input: n1kv_vem_vtep_config} - neutron::agents::n1kv_vem::portdb: {get_input: n1kv_vem_portdb} - neutron::agents::n1kv_vem::vteps_in_same_subnet: {get_input: n1kv_vem_vteps_in_same_subnet} - neutron::agents::n1kv_vem::fastpath_flood: {get_input: n1kv_vem_fastpath_flood} - #VSM Parameter - n1kv_vsm_source: {get_input: n1kv_vsm_source} - n1kv_vsm_version: {get_input: n1kv_vsm_version} - n1k_vsm::phy_if_bridge: {get_input: n1kv_vsm_host_mgmt_intf} - n1k_vsm::vsm_role: {get_input: n1kv_vsm_role} - n1k_vsm::pacemaker_control: {get_input: n1kv_vsm_pacemaker_ctrl} - n1k_vsm::existing_bridge: {get_input: n1kv_vsm_existing_br} - n1k_vsm::vsm_admin_passwd: {get_input: n1kv_vsm_password} - n1k_vsm::vsm_domain_id: {get_input: n1kv_vsm_domain_id} - n1k_vsm::vsm_mgmt_ip: {get_input: n1kv_vsm_ip} - n1k_vsm::vsm_mgmt_netmask: {get_input: n1kv_vsm_mgmt_netmask} - n1k_vsm::vsm_mgmt_gateway: {get_input: n1kv_vsm_gateway_ip} - n1k_vsm::phy_gateway: {get_input: n1kv_vsm_gateway_ip} - n1k_vsm::phy_bridge_vlan: {get_input: n1kv_phy_brige_vlan} - # Cisco N1KV driver Parameters - neutron::plugins::ml2::cisco::nexus1000v::n1kv_vsm_ip: {get_input: n1kv_vsm_ip} - neutron::plugins::ml2::cisco::nexus1000v::n1kv_vsm_username: {get_input: n1kv_vsm_username} - neutron::plugins::ml2::cisco::nexus1000v::n1kv_vsm_password: {get_input: n1kv_vsm_password} - neutron::plugins::ml2::cisco::nexus1000v::poll_duration: {get_input: n1kv_vsm_poll_duration} - neutron::plugins::ml2::cisco::nexus1000v::http_pool_size: {get_input: n1kv_vsm_http_pool_size} - neutron::plugins::ml2::cisco::nexus1000v::http_timeout: {get_input: n1kv_vsm_http_timeout} - neutron::plugins::ml2::cisco::nexus1000v::n1kv_vsm_sync_interval: {get_input: n1kv_vsm_sync_interval} - neutron::plugins::ml2::cisco::nexus1000v::max_vsm_retries: {get_input: n1kv_max_vsm_retries} + datafiles: + cisco_n1kv_data: + mapped_data: + #enable_cisco_n1kv: {get_input: EnableCiscoN1kv} + # VEM Parameters + n1kv_vem_source: {get_input: n1kv_vem_source} + n1kv_vem_version: {get_input: n1kv_vem_version} + neutron::agents::n1kv_vem::n1kv_vsm_ip: {get_input: n1kv_vsm_ip} + neutron::agents::n1kv_vem::n1kv_vsm_domain_id: {get_input: n1kv_vsm_domain_id} + neutron::agents::n1kv_vem::n1kv_vsm_ip_v6: {get_input: n1kv_vsm_ip_v6} + neutron::agents::n1kv_vem::host_mgmt_intf: {get_input: n1kv_vem_host_mgmt_intf} + neutron::agents::n1kv_vem::uplink_profile: {get_input: n1kv_vem_uplink_profile} + neutron::agents::n1kv_vem::vtep_config: {get_input: n1kv_vem_vtep_config} + neutron::agents::n1kv_vem::portdb: {get_input: n1kv_vem_portdb} + neutron::agents::n1kv_vem::vteps_in_same_subnet: {get_input: n1kv_vem_vteps_in_same_subnet} + neutron::agents::n1kv_vem::fastpath_flood: {get_input: n1kv_vem_fastpath_flood} + #VSM Parameter + n1kv_vsm_source: {get_input: n1kv_vsm_source} + n1kv_vsm_version: {get_input: n1kv_vsm_version} + n1k_vsm::phy_if_bridge: {get_input: n1kv_vsm_host_mgmt_intf} + n1k_vsm::vsm_role: {get_input: n1kv_vsm_role} + n1k_vsm::pacemaker_control: {get_input: n1kv_vsm_pacemaker_ctrl} + n1k_vsm::existing_bridge: {get_input: n1kv_vsm_existing_br} + n1k_vsm::vsm_admin_passwd: {get_input: n1kv_vsm_password} + n1k_vsm::vsm_domain_id: {get_input: n1kv_vsm_domain_id} + n1k_vsm::vsm_mgmt_ip: {get_input: n1kv_vsm_ip} + n1k_vsm::vsm_mgmt_netmask: {get_input: n1kv_vsm_mgmt_netmask} + n1k_vsm::vsm_mgmt_gateway: {get_input: n1kv_vsm_gateway_ip} + n1k_vsm::phy_gateway: {get_input: n1kv_vsm_gateway_ip} + n1k_vsm::phy_bridge_vlan: {get_input: n1kv_phy_brige_vlan} + # Cisco N1KV driver Parameters + neutron::plugins::ml2::cisco::nexus1000v::n1kv_vsm_ip: {get_input: n1kv_vsm_ip} + neutron::plugins::ml2::cisco::nexus1000v::n1kv_vsm_username: {get_input: n1kv_vsm_username} + neutron::plugins::ml2::cisco::nexus1000v::n1kv_vsm_password: {get_input: n1kv_vsm_password} + neutron::plugins::ml2::cisco::nexus1000v::poll_duration: {get_input: n1kv_vsm_poll_duration} + neutron::plugins::ml2::cisco::nexus1000v::http_pool_size: {get_input: n1kv_vsm_http_pool_size} + neutron::plugins::ml2::cisco::nexus1000v::http_timeout: {get_input: n1kv_vsm_http_timeout} + neutron::plugins::ml2::cisco::nexus1000v::n1kv_vsm_sync_interval: {get_input: n1kv_vsm_sync_interval} + neutron::plugins::ml2::cisco::nexus1000v::max_vsm_retries: {get_input: n1kv_max_vsm_retries} CiscoN1kvDeployment: type: OS::Heat::StructuredDeployment diff --git a/puppet/major_upgrade_steps.j2.yaml b/puppet/major_upgrade_steps.j2.yaml index b70f5c71..6f2dd684 100644 --- a/puppet/major_upgrade_steps.j2.yaml +++ b/puppet/major_upgrade_steps.j2.yaml @@ -1,3 +1,7 @@ +{% set enabled_roles = roles|rejectattr('disable_upgrade_deployment')|list -%} +{% set batch_upgrade_steps_max = 3 -%} +{% set upgrade_steps_max = 6 -%} +{% set deliver_script = {'deliver': False} -%} heat_template_version: ocata description: 'Upgrade steps for all roles' @@ -14,38 +18,193 @@ parameters: description: > Setting to a previously unused value during stack-update will trigger the Upgrade resources to re-run on all roles. + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + KeystoneRegion: + type: string + default: 'regionOne' + description: Keystone region for endpoint + NovaPassword: + description: The password for the nova service and db account, used by nova-api. + type: string + hidden: true + +conditions: + # Conditions to disable any steps where the task list is empty +{%- for role in roles %} + {{role.name}}UpgradeBatchConfigEnabled: + not: + equals: + - {get_param: [role_data, {{role.name}}, upgrade_batch_tasks]} + - [] + {{role.name}}UpgradeConfigEnabled: + not: + equals: + - {get_param: [role_data, {{role.name}}, upgrade_tasks]} + - [] +{%- endfor %} resources: +{% for role in roles if role.disable_upgrade_deployment|default(false) %} + {{role.name}}DeliverUpgradeScriptConfig: + type: OS::Heat::SoftwareConfig + properties: + group: script + config: + list_join: + - '' + - - "#!/bin/bash\n\n" + - "set -eu\n\n" + - "if hiera -c /etc/puppet/hiera.yaml service_names | grep nova_compute ; then\n\n" + - " crudini --set /etc/nova/nova.conf placement auth_type password\n\n" + - " crudini --set /etc/nova/nova.conf placement username placement\n\n" + - " crudini --set /etc/nova/nova.conf placement project_domain_name Default\n\n" + - " crudini --set /etc/nova/nova.conf placement user_domain_name Default\n\n" + - " crudini --set /etc/nova/nova.conf placement project_name service\n\n" + - " systemctl restart openstack-nova-compute\n\n" + - "fi\n\n" + - str_replace: + template: | + crudini --set /etc/nova/nova.conf placement password 'SERVICE_PASSWORD' + crudini --set /etc/nova/nova.conf placement region_name 'REGION_NAME' + crudini --set /etc/nova/nova.conf placement auth_url 'AUTH_URL' + ROLE='ROLE_NAME' + params: + SERVICE_PASSWORD: { get_param: NovaPassword } + REGION_NAME: { get_param: KeystoneRegion } + AUTH_URL: { get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix]} + ROLE_NAME: {{role.name}} + - get_file: ../extraconfig/tasks/pacemaker_common_functions.sh + - get_file: ../extraconfig/tasks/run_puppet.sh + - get_file: ../extraconfig/tasks/tripleo_upgrade_node.sh + + {{role.name}}DeliverUpgradeScriptDeployment: + type: OS::Heat::SoftwareDeploymentGroup + properties: + servers: {get_param: [servers, {{role.name}}]} + config: {get_resource: {{role.name}}DeliverUpgradeScriptConfig} +{% endfor %} + +# Upgrade Steps for all roles, batched updates +# The UpgradeConfig resources could actually be created without +# serialization, but the event output is easier to follow if we +# do, and there should be minimal performance hit (creating the +# config is cheap compared to the time to apply the deployment). +{% for step in range(0, batch_upgrade_steps_max) %} + # Batch config resources step {{step}} + {%- for role in roles %} + {{role.name}}UpgradeBatchConfig_Step{{step}}: + type: OS::TripleO::UpgradeConfig + {%- if step > 0 %} + condition: {{role.name}}UpgradeBatchConfigEnabled + {% if role.name in enabled_roles %} + depends_on: + - {{role.name}}UpgradeBatch_Step{{step -1}} + {%- endif %} + {% else %} + {% for role in roles if role.disable_upgrade_deployment|default(false) %} + {% if deliver_script.update({'deliver': True}) %} {% endif %} + {% endfor %} + {% if deliver_script.deliver %} + depends_on: + {% endif %} + {% for dep in roles if dep.disable_upgrade_deployment|default(false) %} + - {{dep.name}}DeliverUpgradeScriptDeployment + {% endfor %} + {% endif %} + properties: + UpgradeStepConfig: {get_param: [role_data, {{role.name}}, upgrade_batch_tasks]} + step: {{step}} + {%- endfor %} + + # Batch deployment resources for step {{step}} (only for enabled roles) + {%- for role in enabled_roles %} + {{role.name}}UpgradeBatch_Step{{step}}: + type: OS::Heat::SoftwareDeploymentGroup + condition: {{role.name}}UpgradeBatchConfigEnabled + {%- if step > 0 %} + depends_on: + - {{role.name}}UpgradeBatch_Step{{step -1}} + {% else %} + depends_on: + - {{role.name}}UpgradeBatchConfig_Step{{step}} + {%- endif %} + update_policy: + batch_create: + max_batch_size: {{role.upgrade_batch_size|default(1)}} + rolling_update: + max_batch_size: {{role.upgrade_batch_size|default(1)}} + properties: + name: {{role.name}}UpgradeBatch_Step{{step}} + servers: {get_param: [servers, {{role.name}}]} + config: {get_resource: {{role.name}}UpgradeBatchConfig_Step{{step}}} + input_values: + role: {{role.name}} + update_identifier: {get_param: UpdateIdentifier} + {%- endfor %} +{%- endfor %} + +# Dump the puppet manifests to be apply later when disable_upgrade_deployment +# is to true +{% for role in roles if role.disable_upgrade_deployment|default(false) %} + {{role.name}}DeliverPuppetConfig: + type: OS::Heat::SoftwareConfig + properties: + group: script + config: + list_join: + - '' + - - str_replace: + template: | + #!/bin/bash + cat > /root/{{role.name}}_puppet_config.pp << ENDOFCAT + PUPPET_CLASSES + ENDOFCAT + params: + PUPPET_CLASSES: {get_param: [role_data, {{role.name}}, step_config]} + + {{role.name}}DeliverPuppetDeployment: + type: OS::Heat::SoftwareDeploymentGroup + properties: + servers: {get_param: [servers, {{role.name}}]} + config: {get_resource: {{role.name}}DeliverPuppetConfig} +{% endfor %} + # Upgrade Steps for all roles -# FIXME(shardy): would be nice to make the number of steps configurable -{% for step in range(1, 8) %} - {% for role in roles %} - # Step {{step}} resources +{%- for step in range(0, upgrade_steps_max) %} + # Config resources for step {{step}} + {%- for role in roles %} {{role.name}}UpgradeConfig_Step{{step}}: type: OS::TripleO::UpgradeConfig # The UpgradeConfig resources could actually be created without # serialization, but the event output is easier to follow if we # do, and there should be minimal performance hit (creating the # config is cheap compared to the time to apply the deployment). - {% if step > 1 %} + {%- if step > 0 %} + condition: {{role.name}}UpgradeConfigEnabled + {% if role.name in enabled_roles %} depends_on: - {% for dep in roles %} - - {{dep.name}}Upgrade_Step{{step -1}} - {% endfor %} - {% endif %} + - {{role.name}}Upgrade_Step{{step -1}} + {% endif %} + {%- endif %} properties: UpgradeStepConfig: {get_param: [role_data, {{role.name}}, upgrade_tasks]} step: {{step}} + {%- endfor %} + # Deployment resources for step {{step}} (only for enabled roles) + {%- for role in enabled_roles %} {{role.name}}Upgrade_Step{{step}}: - type: OS::Heat::StructuredDeploymentGroup - {% if step > 1 %} + type: OS::Heat::SoftwareDeploymentGroup + {%- if step > 0 %} + condition: {{role.name}}UpgradeConfigEnabled depends_on: - {% for dep in roles %} - - {{dep.name}}Upgrade_Step{{step -1}} - {% endfor %} - {% endif %} + - {{role.name}}Upgrade_Step{{step -1}} + {%- endif %} properties: name: {{role.name}}Upgrade_Step{{step}} servers: {get_param: [servers, {{role.name}}]} @@ -53,8 +212,21 @@ resources: input_values: role: {{role.name}} update_identifier: {get_param: UpdateIdentifier} - {% endfor %} -{% endfor %} + {%- endfor %} +{%- endfor %} + + # Post upgrade deployment steps for all roles + # This runs the normal configuration (e.g puppet) steps unless upgrade + # is disabled for the role + AllNodesPostUpgradeSteps: + type: OS::TripleO::PostUpgradeSteps + depends_on: +{%- for dep in enabled_roles %} + - {{dep.name}}Upgrade_Step{{upgrade_steps_max - 1}} +{%- endfor %} + properties: + servers: {get_param: servers} + role_data: {get_param: role_data} outputs: # Output the config for each role, just use Step1 as the config should be @@ -65,4 +237,3 @@ outputs: {% for role in roles %} {{role.name.lower()}}: {get_attr: [{{role.name}}UpgradeConfig_Step1, upgrade_config]} {% endfor %} - diff --git a/puppet/manifests/overcloud_controller_pacemaker.pp b/puppet/manifests/overcloud_controller_pacemaker.pp deleted file mode 100644 index d329d5fc..00000000 --- a/puppet/manifests/overcloud_controller_pacemaker.pp +++ /dev/null @@ -1,21 +0,0 @@ -# Copyright 2015 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -if hiera('step') >= 4 { - hiera_include('controller_classes', []) -} - -$package_manifest_name = join(['/var/lib/tripleo/installed-packages/overcloud_controller_pacemaker', hiera('step')]) -package_manifest{$package_manifest_name: ensure => present} diff --git a/puppet/manifests/overcloud_role.pp b/puppet/manifests/overcloud_role.pp index 1a59620c..e2bf5146 100644 --- a/puppet/manifests/overcloud_role.pp +++ b/puppet/manifests/overcloud_role.pp @@ -24,3 +24,7 @@ if hiera('step') >= 4 { $package_manifest_name = join(['/var/lib/tripleo/installed-packages/overcloud___ROLE__', hiera('step')]) package_manifest{$package_manifest_name: ensure => present} + +# NOTE(gfidente): ensure deprecated package manifest is absent, can be removed after Pike +$absent_package_manifest_name = join(['/var/lib/tripleo/installed-packages/overcloud_controller_pacemaker', hiera('step')]) +package_manifest{$absent_package_manifest_name: ensure => absent} diff --git a/puppet/objectstorage-role.yaml b/puppet/objectstorage-role.yaml index 1633134d..6ee06d78 100644 --- a/puppet/objectstorage-role.yaml +++ b/puppet/objectstorage-role.yaml @@ -115,6 +115,14 @@ parameters: Command or script snippet to run on all overcloud nodes to initialize the upgrade process. E.g. a repository switch. default: '' + UpgradeInitCommonCommand: + type: string + description: | + Common commands required by the upgrades process. This should not + normally be modified by the operator and is set and unset in the + major-upgrade-composable-steps.yaml and major-upgrade-converge.yaml + environment files. + default: '' resources: @@ -360,6 +368,7 @@ resources: - - "#!/bin/bash\n\n" - "if [[ -f /etc/resolv.conf.save ]] ; then rm /etc/resolv.conf.save; fi\n\n" - get_param: UpgradeInitCommand + - get_param: UpgradeInitCommonCommand # Note we may be able to make this conditional on UpgradeInitCommandNotEmpty # but https://bugs.launchpad.net/heat/+bug/1649900 needs fixing first @@ -438,6 +447,7 @@ resources: UpdateDeployment: type: OS::Heat::SoftwareDeployment + depends_on: NetworkDeployment properties: config: {get_resource: UpdateConfig} server: {get_resource: SwiftStorage} diff --git a/puppet/post-upgrade.j2.yaml b/puppet/post-upgrade.j2.yaml new file mode 100644 index 00000000..b84039de --- /dev/null +++ b/puppet/post-upgrade.j2.yaml @@ -0,0 +1,27 @@ +heat_template_version: ocata + +description: > + Post-upgrade configuration steps via puppet for all roles + where upgrade is not disabled as defined in ../roles_data.yaml + +parameters: + servers: + type: json + description: Mapping of Role name e.g Controller to a list of servers + + role_data: + type: json + description: Mapping of Role name e.g Controller to the per-role data + + DeployIdentifier: + default: '' + type: string + description: > + Setting this to a unique value will re-run any deployment tasks which + perform configuration on a Heat stack-update. + +resources: +# Note the include here is the same as post.j2.yaml but the data used at +# the time of rendering is different if any roles disable upgrades +{% set roles = roles|rejectattr('disable_upgrade_deployment')|list -%} +{% include 'puppet-steps.j2' %} diff --git a/puppet/post.j2.yaml b/puppet/post.j2.yaml index 2a02ea19..21202775 100644 --- a/puppet/post.j2.yaml +++ b/puppet/post.j2.yaml @@ -12,7 +12,11 @@ parameters: role_data: type: json description: Mapping of Role name e.g Controller to the per-role data - + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json DeployIdentifier: default: '' type: string @@ -21,97 +25,4 @@ parameters: perform configuration on a Heat stack-update. resources: - -{% for role in roles %} - # Post deployment steps for all roles - # A single config is re-applied with an incrementing step number - # {{role.name}} Role steps - {{role.name}}ArtifactsConfig: - type: deploy-artifacts.yaml - - {{role.name}}ArtifactsDeploy: - type: OS::Heat::StructuredDeployments - properties: - servers: {get_param: [servers, {{role.name}}]} - config: {get_resource: {{role.name}}ArtifactsConfig} - - {{role.name}}PreConfig: - type: OS::TripleO::Tasks::{{role.name}}PreConfig - properties: - servers: {get_param: [servers, {{role.name}}]} - input_values: - update_identifier: {get_param: DeployIdentifier} - - {{role.name}}Config: - type: OS::TripleO::{{role.name}}Config - properties: - StepConfig: {get_param: [role_data, {{role.name}}, step_config]} - - {% if role.name == 'Controller' %} - ControllerPrePuppet: - type: OS::TripleO::Tasks::ControllerPrePuppet - properties: - servers: {get_param: [servers, Controller]} - input_values: - update_identifier: {get_param: DeployIdentifier} - {% endif %} - - # Step through a series of configuration steps -{% for step in range(1, 6) %} - {% for role in roles %} - - {{role.name}}Deployment_Step{{step}}: - type: OS::Heat::StructuredDeploymentGroup - {% if step == 1 %} - depends_on: [{{role.name}}PreConfig, {{role.name}}ArtifactsDeploy] - {% else %} - depends_on: - {% for dep in roles %} - - {{dep.name}}Deployment_Step{{step -1}} - {% endfor %} - {% endif %} - properties: - name: {{role.name}}Deployment_Step{{step}} - servers: {get_param: [servers, {{role.name}}]} - config: {get_resource: {{role.name}}Config} - input_values: - step: {{step}} - update_identifier: {get_param: DeployIdentifier} - - {% endfor %} -{% endfor %} - - {{role.name}}PostConfig: - type: OS::TripleO::Tasks::{{role.name}}PostConfig - depends_on: - {% for dep in roles %} - - {{dep.name}}Deployment_Step5 - {% endfor %} - properties: - servers: {get_param: servers} - input_values: - update_identifier: {get_param: DeployIdentifier} - - # Note, this should come last, so use depends_on to ensure - # this is created after any other resources. - {{role.name}}ExtraConfigPost: - depends_on: - {% for dep in roles %} - - {{dep.name}}PostConfig - {% endfor %} - type: OS::TripleO::NodeExtraConfigPost - properties: - servers: {get_param: [servers, {{role.name}}]} - - {% if role.name == 'Controller' %} - ControllerPostPuppet: - depends_on: - - ControllerExtraConfigPost - type: OS::TripleO::Tasks::ControllerPostPuppet - properties: - servers: {get_param: [servers, Controller]} - input_values: - update_identifier: {get_param: DeployIdentifier} - {% endif %} - -{% endfor %} +{% include 'puppet-steps.j2' %} diff --git a/puppet/puppet-steps.j2 b/puppet/puppet-steps.j2 new file mode 100644 index 00000000..581c4f0d --- /dev/null +++ b/puppet/puppet-steps.j2 @@ -0,0 +1,102 @@ + # Post deployment steps for all roles + # A single config is re-applied with an incrementing step number +{% for role in roles %} + # {{role.name}} Role post-deploy steps + {{role.name}}ArtifactsConfig: + type: deploy-artifacts.yaml + + {{role.name}}ArtifactsDeploy: + type: OS::Heat::StructuredDeployments + properties: + servers: {get_param: [servers, {{role.name}}]} + config: {get_resource: {{role.name}}ArtifactsConfig} + + {{role.name}}PreConfig: + type: OS::TripleO::Tasks::{{role.name}}PreConfig + properties: + servers: {get_param: [servers, {{role.name}}]} + input_values: + update_identifier: {get_param: DeployIdentifier} + + {{role.name}}Config: + type: OS::TripleO::{{role.name}}Config + properties: + StepConfig: {list_join: ["\n", {get_param: [role_data, {{role.name}}, step_config]}]} + + {{role.name}}PrePuppet: + type: OS::TripleO::Tasks::{{role.name}}PrePuppet + properties: + servers: {get_param: [servers, {{role.name}}]} + input_values: + update_identifier: {get_param: DeployIdentifier} + + {% if role.name in ['Controller', 'ObjectStorage'] %} + {{role.name}}SwiftRingDeploy: + type: OS::TripleO::Tasks::SwiftRingDeploy + properties: + servers: {get_param: [servers, {{role.name}}]} + {% endif %} + + # Step through a series of configuration steps +{% for step in range(1, 6) %} + {{role.name}}Deployment_Step{{step}}: + type: OS::Heat::StructuredDeploymentGroup + {% if step == 1 %} + depends_on: [{{role.name}}PreConfig, {{role.name}}ArtifactsDeploy] + {% else %} + depends_on: + {% for dep in roles %} + - {{dep.name}}Deployment_Step{{step -1}} + {% endfor %} + {% endif %} + properties: + name: {{role.name}}Deployment_Step{{step}} + servers: {get_param: [servers, {{role.name}}]} + config: {get_resource: {{role.name}}Config} + input_values: + step: {{step}} + update_identifier: {get_param: DeployIdentifier} +{% endfor %} + + {{role.name}}PostConfig: + type: OS::TripleO::Tasks::{{role.name}}PostConfig + depends_on: + {% for dep in roles %} + - {{dep.name}}Deployment_Step5 + {% endfor %} + properties: + servers: {get_param: servers} + input_values: + update_identifier: {get_param: DeployIdentifier} + + # Note, this should come last, so use depends_on to ensure + # this is created after any other resources. + {{role.name}}ExtraConfigPost: + depends_on: + {% for dep in roles %} + - {{dep.name}}PostConfig + {% endfor %} + type: OS::TripleO::NodeExtraConfigPost + properties: + servers: {get_param: [servers, {{role.name}}]} + + {{role.name}}PostPuppet: + depends_on: + - {{role.name}}ExtraConfigPost + type: OS::TripleO::Tasks::{{role.name}}PostPuppet + properties: + servers: {get_param: [servers, {{role.name}}]} + input_values: + update_identifier: {get_param: DeployIdentifier} + + {% if role.name in ['Controller', 'ObjectStorage'] %} + {{role.name}}SwiftRingUpdate: + type: OS::TripleO::Tasks::SwiftRingUpdate + depends_on: + {% for dep in roles %} + - {{dep.name}}Deployment_Step5 + {% endfor %} + properties: + servers: {get_param: [servers, {{role.name}}]} + {% endif %} +{% endfor %} diff --git a/puppet/role.role.j2.yaml b/puppet/role.role.j2.yaml index 2f070da2..1f68f41f 100644 --- a/puppet/role.role.j2.yaml +++ b/puppet/role.role.j2.yaml @@ -137,7 +137,14 @@ parameters: Command or script snippet to run on all overcloud nodes to initialize the upgrade process. E.g. a repository switch. default: '' - + UpgradeInitCommonCommand: + type: string + description: | + Common commands required by the upgrades process. This should not + normally be modified by the operator and is set and unset in the + major-upgrade-composable-steps.yaml and major-upgrade-converge.yaml + environment files. + default: '' resources: {{role}}: @@ -386,6 +393,7 @@ resources: - - "#!/bin/bash\n\n" - "if [[ -f /etc/resolv.conf.save ]] ; then rm /etc/resolv.conf.save; fi\n\n" - get_param: UpgradeInitCommand + - get_param: UpgradeInitCommonCommand # Note we may be able to make this conditional on UpgradeInitCommandNotEmpty # but https://bugs.launchpad.net/heat/+bug/1649900 needs fixing first @@ -473,6 +481,7 @@ resources: UpdateDeployment: type: OS::Heat::SoftwareDeployment + depends_on: NetworkDeployment properties: config: {get_resource: UpdateConfig} server: {get_resource: {{role}}} diff --git a/puppet/services/README.rst b/puppet/services/README.rst index 6e4e9c1d..f19b6cca 100644 --- a/puppet/services/README.rst +++ b/puppet/services/README.rst @@ -19,8 +19,21 @@ environment to set per service parameters. Config Settings --------------- -Each service may define a config_settings output variable which returns -Hiera settings to be configured. +Each service may define three ways in which to output variables to configure Hiera +settings on the nodes. + + * config_settings: the hiera keys will be pushed on all roles of which the service + is a part of. + + * global_config_settings: the hiera keys will be distributed to all roles + + * service_config_settings: Takes an extra key to wire in values that are + defined for a service that need to be consumed by some other service. + For example: + service_config_settings: + haproxy: + foo: bar + This will set the hiera key 'foo' on all roles where haproxy is included. Deployment Steps ---------------- @@ -49,6 +62,32 @@ are re-asserted when applying latter ones. 5) Service activation (Pacemaker) +Batch Upgrade Steps +------------------- + +Each service template may optionally define a `upgrade_batch_tasks` key, which +is a list of ansible tasks to be performed during the upgrade process. + +Similar to the step_config, we allow a series of steps for the per-service +upgrade sequence, defined as ansible tasks with a tag e.g "step1" for the first +step, "step2" for the second, etc (currently only two steps are supported, but +more may be added when required as additional services get converted to batched +upgrades). + +Note that each step is performed in batches, then we move on to the next step +which is also performed in batches (we don't perform all steps on one node, +then move on to the next one which means you can sequence rolling upgrades of +dependent services via the step value). + +The tasks performed at each step is service specific, but note that all batch +upgrade steps are performed before the `upgrade_tasks` described below. This +means that all services that support rolling upgrades can be upgraded without +downtime during `upgrade_batch_tasks`, then any remaining services are stopped +and upgraded during `upgrade_tasks` + +The default batch size is 1, but this can be overridden for each role via the +`upgrade_batch_size` option in roles_data.yaml + Upgrade Steps ------------- @@ -61,19 +100,34 @@ step, "step2" for the second, etc. Steps/tages correlate to the following: - 1) Quiesce the control-plane, e.g disable LoadBalancer, stop pacemaker cluster + 1) Stop all control-plane services. + + 2) Quiesce the control-plane, e.g disable LoadBalancer, stop + pacemaker cluster: this will stop the following resource: + - ocata: + - galera + - rabbit + - redis + - haproxy + - vips + - cinder-volumes + - cinder-backup + - manilla-share + - rbd-mirror - 2) Stop all control-plane services, ready for upgrade + The exact order is controlled by the cluster constraints. - 3) Perform a package update, (either specific packages or the whole system) + 3) Perform a package update and install new packages: A general + upgrade is done, and only new package should go into service + ansible tasks. 4) Start services needed for migration tasks (e.g DB) 5) Perform any migration tasks, e.g DB sync commands - 6) Start control-plane services - - 7) Any additional online migration tasks (e.g data migrations) +Note that the services are not started in the upgrade tasks - we instead re-run +puppet which does any reconfiguration required for the new version, then starts +the services. Nova Server Metadata Settings ----------------------------- diff --git a/puppet/services/aodh-api.yaml b/puppet/services/aodh-api.yaml index 4e735b45..d7c87b61 100644 --- a/puppet/services/aodh-api.yaml +++ b/puppet/services/aodh-api.yaml @@ -83,3 +83,9 @@ outputs: get_attr: [AodhBase, role_data, service_config_settings] step_config: | include tripleo::profile::base::aodh::api + metadata_settings: + get_attr: [ApacheServiceBase, role_data, metadata_settings] + upgrade_tasks: + - name: Stop aodh_api service (running under httpd) + tags: step1 + service: name=httpd state=stopped diff --git a/puppet/services/aodh-base.yaml b/puppet/services/aodh-base.yaml index 8648a971..c2c2d023 100644 --- a/puppet/services/aodh-base.yaml +++ b/puppet/services/aodh-base.yaml @@ -69,8 +69,7 @@ outputs: - '@' - {get_param: [EndpointMap, MysqlInternal, host]} - '/aodh' - - '?bind_address=' - - "%{hiera('tripleo::profile::base::database::mysql::client_bind_address')}" + - '?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo' aodh::debug: {get_param: Debug} aodh::auth::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix] } aodh::rabbit_userid: {get_param: RabbitUserName} @@ -80,7 +79,7 @@ outputs: aodh::keystone::authtoken::project_name: 'service' aodh::keystone::authtoken::password: {get_param: AodhPassword} aodh::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri] } - aodh::keystone::authtoken::auth_url: { get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix] } + aodh::keystone::authtoken::auth_url: { get_param: [EndpointMap, KeystoneInternal, uri_no_suffix] } aodh::auth::auth_password: {get_param: AodhPassword} aodh::auth::auth_region: 'regionOne' aodh::auth::auth_tenant_name: 'service' diff --git a/puppet/services/aodh-evaluator.yaml b/puppet/services/aodh-evaluator.yaml index 61f8c23f..b8be4a91 100644 --- a/puppet/services/aodh-evaluator.yaml +++ b/puppet/services/aodh-evaluator.yaml @@ -40,3 +40,17 @@ outputs: get_attr: [AodhBase, role_data, config_settings] step_config: | include tripleo::profile::base::aodh::evaluator + upgrade_tasks: + - name: Check if aodh_evaluator is deployed + command: systemctl is-enabled openstack-aodh-evaluator + tags: common + ignore_errors: True + register: aodh_evaluator_enabled + - name: "PreUpgrade step0,validation: Check service openstack-aodh-evaluator is running" + shell: /usr/bin/systemctl show 'openstack-aodh-evaluator' --property ActiveState | grep '\bactive\b' + when: aodh_evaluator_enabled.rc == 0 + tags: step0,validation + - name: Stop aodh_evaluator service + tags: step1 + when: aodh_evaluator_enabled.rc == 0 + service: name=openstack-aodh-evaluator state=stopped diff --git a/puppet/services/aodh-listener.yaml b/puppet/services/aodh-listener.yaml index 715165b3..f5c9330d 100644 --- a/puppet/services/aodh-listener.yaml +++ b/puppet/services/aodh-listener.yaml @@ -40,3 +40,17 @@ outputs: get_attr: [AodhBase, role_data, config_settings] step_config: | include tripleo::profile::base::aodh::listener + upgrade_tasks: + - name: Check if aodh_listener is deployed + command: systemctl is-enabled openstack-aodh-listener + tags: common + ignore_errors: True + register: aodh_listener_enabled + - name: "PreUpgrade step0,validation: Check service openstack-aodh-listener is running" + shell: /usr/bin/systemctl show 'openstack-aodh-listener' --property ActiveState | grep '\bactive\b' + when: aodh_listener_enabled.rc == 0 + tags: step0,validation + - name: Stop aodh_listener service + tags: step1 + when: aodh_listener_enabled.rc == 0 + service: name=openstack-aodh-listener state=stopped diff --git a/puppet/services/aodh-notifier.yaml b/puppet/services/aodh-notifier.yaml index da85581b..84c50dd6 100644 --- a/puppet/services/aodh-notifier.yaml +++ b/puppet/services/aodh-notifier.yaml @@ -40,3 +40,17 @@ outputs: get_attr: [AodhBase, role_data, config_settings] step_config: | include tripleo::profile::base::aodh::notifier + upgrade_tasks: + - name: Check if aodh_notifier is deployed + command: systemctl is-enabled openstack-aodh-notifier + tags: common + ignore_errors: True + register: aodh_notifier_enabled + - name: "PreUpgrade step0,validation: Check service openstack-aodh-notifier is running" + shell: /usr/bin/systemctl show 'openstack-aodh-notifier' --property ActiveState | grep '\bactive\b' + when: aodh_notifier_enabled.rc == 0 + tags: step0,validation + - name: Stop aodh_notifier service + tags: step1 + when: aodh_notifier_enabled.rc == 0 + service: name=openstack-aodh-notifier state=stopped diff --git a/puppet/services/apache-internal-tls-certmonger.yaml b/puppet/services/apache-internal-tls-certmonger.yaml index 07ec1b3c..4c94f440 100644 --- a/puppet/services/apache-internal-tls-certmonger.yaml +++ b/puppet/services/apache-internal-tls-certmonger.yaml @@ -21,6 +21,22 @@ parameters: via parameter_defaults in the resource registry. type: json +resources: + + ApacheNetworks: + type: OS::Heat::Value + properties: + value: + # NOTE(jaosorior) Get unique network names to create + # certificates for those. We skip the tenant network since + # we don't need a certificate for that, and the external + # network will be handled in another template. + yaql: + expression: list($.data.map.items().map($1[1])).distinct().where($ != external and $ != tenant) + data: + map: + get_param: ServiceNetMap + outputs: role_data: description: Role data for the Apache role. @@ -38,13 +54,22 @@ outputs: hostname: "%{hiera('fqdn_NETWORK')}" principal: "HTTP/%{hiera('fqdn_NETWORK')}" for_each: - NETWORK: - # NOTE(jaosorior) Get unique network names to create - # certificates for those. We skip the tenant network since - # we don't need a certificate for that, and the external - # network will be handled in another template. - yaql: - expression: list($.data.map.items().map($1[1])).distinct().where($ != external and $ != tenant) - data: - map: - get_param: ServiceNetMap + NETWORK: {get_attr: [ApacheNetworks, value]} + metadata_settings: + repeat: + template: + - service: HTTP + network: $NETWORK + type: node + for_each: + $NETWORK: {get_attr: [ApacheNetworks, value]} + upgrade_tasks: + - name: Check if httpd is deployed + command: systemctl is-enabled httpd + tags: common + ignore_errors: True + register: httpd_enabled + - name: "PreUpgrade step0,validation: Check service httpd is running" + shell: /usr/bin/systemctl show 'httpd' --property ActiveState | grep '\bactive\b' + when: httpd_enabled.rc == 0 + tags: step0,validation diff --git a/puppet/services/apache.yaml b/puppet/services/apache.yaml index 2e95dcb0..2d950151 100644 --- a/puppet/services/apache.yaml +++ b/puppet/services/apache.yaml @@ -64,3 +64,15 @@ outputs: apache::mod::prefork::serverlimit: { get_param: ApacheServerLimit } apache::mod::remoteip::proxy_ips: - "%{hiera('apache_remote_proxy_ips_network')}" + metadata_settings: + get_attr: [ApacheTLS, role_data, metadata_settings] + upgrade_tasks: + - name: Check if httpd is deployed + command: systemctl is-enabled httpd + tags: common + ignore_errors: True + register: httpd_enabled + - name: "PreUpgrade step0,validation: Check service httpd is running" + shell: /usr/bin/systemctl show 'httpd' --property ActiveState | grep '\bactive\b' + when: httpd_enabled.rc == 0 + tags: step0,validation diff --git a/puppet/services/auditd.yaml b/puppet/services/auditd.yaml new file mode 100644 index 00000000..8085ac8b --- /dev/null +++ b/puppet/services/auditd.yaml @@ -0,0 +1,50 @@ +heat_template_version: ocata + +description: > + AuditD configured with Puppet + +parameters: + ServiceNetMap: + default: {} + description: Mapping of service_name -> network name. Typically set + via parameter_defaults in the resource registry. This + mapping overrides those in ServiceNetMapDefaults. + type: json + DefaultPasswords: + default: {} + type: json + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + AuditdRules: + description: Mapping of auditd rules + type: json + default: {} + +outputs: + role_data: + description: Role data for the auditd service + value: + service_name: auditd + config_settings: + auditd::rules: {get_param: AuditdRules} + step_config: | + include ::tripleo::profile::base::auditd + upgrade_tasks: + - name: Check if auditd is deployed + command: systemctl is-enabled auditd + tags: common + ignore_errors: True + register: auditd_enabled + - name: "PreUpgrade step0,validation: Check if auditd is running" + shell: > + /usr/bin/systemctl show 'auditd' --property ActiveState | + grep '\bactive\b' + when: auditd_enabled.rc == 0 + tags: step0,validation + - name: Stop auditd service + tags: step2 + when: auditd_enabled.rc == 0 + service: name=auditd state=stopped diff --git a/puppet/services/barbican-api.yaml b/puppet/services/barbican-api.yaml index 000a744c..d8787c87 100644 --- a/puppet/services/barbican-api.yaml +++ b/puppet/services/barbican-api.yaml @@ -74,8 +74,8 @@ outputs: map_merge: - get_attr: [ApacheServiceBase, role_data, config_settings] - barbican::keystone::authtoken::password: {get_param: BarbicanPassword} - barbican::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri]} - barbican::keystone::authtoken::auth_url: { get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix]} + barbican::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]} + barbican::keystone::authtoken::auth_url: { get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]} barbican::keystone::authtoken::project_name: 'service' barbican::api::host_href: {get_param: [EndpointMap, BarbicanPublic, uri]} barbican::api::db_auto_create: false @@ -105,8 +105,7 @@ outputs: - '@' - {get_param: [EndpointMap, MysqlInternal, host]} - '/barbican' - - '?bind_address=' - - "%{hiera('tripleo::profile::base::database::mysql::client_bind_address')}" + - '?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo' tripleo.barbican_api.firewall_rules: '117 barbican': dport: @@ -136,11 +135,27 @@ outputs: nova::compute::barbican_endpoint: get_param: [EndpointMap, BarbicanInternal, uri] nova::compute::barbican_auth_endpoint: - get_param: [EndpointMap, KeystoneV3Internal, uri] + get_param: [EndpointMap, KeystoneInternal, uri_no_suffix] cinder_api: cinder::api::keymgr_api_class: > castellan.key_manager.barbican_key_manager.BarbicanKeyManager cinder::api::keymgr_encryption_api_url: get_param: [EndpointMap, BarbicanInternal, uri] cinder::api::keymgr_encryption_auth_url: - get_param: [EndpointMap, KeystoneV3Internal, uri] + get_param: [EndpointMap, KeystoneInternal, uri_no_suffix] + metadata_settings: + get_attr: [ApacheServiceBase, role_data, metadata_settings] + upgrade_tasks: + - name: Check if barbican_api is deployed + command: systemctl is-enabled openstack-barbican-api + tags: common + ignore_errors: True + register: barbican_api_enabled + - name: "PreUpgrade step0,validation: Check service openstack-barbican-api is running" + shell: /usr/bin/systemctl show 'openstack-barbican-api' --property ActiveState | grep '\bactive\b' + when: barbican_api_enabled.rc == 0 + tags: step0,validation + - name: Install openstack-barbican-api package if it was disabled + tags: step3 + yum: name=openstack-barbican-api state=latest + when: barbican_api_enabled.rc != 0 diff --git a/puppet/services/ceilometer-agent-central.yaml b/puppet/services/ceilometer-agent-central.yaml index b9d8966c..80823526 100644 --- a/puppet/services/ceilometer-agent-central.yaml +++ b/puppet/services/ceilometer-agent-central.yaml @@ -52,5 +52,20 @@ outputs: map_merge: - get_attr: [CeilometerServiceBase, role_data, config_settings] - ceilometer_redis_password: {get_param: RedisPassword} + central_namespace: true step_config: | - include ::tripleo::profile::base::ceilometer::agent::central + include ::tripleo::profile::base::ceilometer::agent::polling + upgrade_tasks: + - name: Check if ceilometer_agent_central is deployed + command: systemctl is-enabled openstack-ceilometer-central + tags: common + ignore_errors: True + register: ceilometer_agent_central_enabled + - name: "PreUpgrade step0,validation: Check service openstack-ceilometer-central is running" + shell: /usr/bin/systemctl show 'openstack-ceilometer-central' --property ActiveState | grep '\bactive\b' + when: ceilometer_agent_central_enabled.rc == 0 + tags: step0,validation + - name: Stop ceilometer_agent_central service + tags: step1 + when: ceilometer_agent_central_enabled.rc == 0 + service: name=openstack-ceilometer-central state=stopped diff --git a/puppet/services/ceilometer-agent-compute.yaml b/puppet/services/ceilometer-agent-compute.yaml index b1d36c94..546bcd98 100644 --- a/puppet/services/ceilometer-agent-compute.yaml +++ b/puppet/services/ceilometer-agent-compute.yaml @@ -21,6 +21,12 @@ parameters: MonitoringSubscriptionCeilometerCompute: default: 'overcloud-ceilometer-agent-compute' type: string + InstanceDiscoveryMethod: + default: 'libvirt_metadata' + description: Method used to discover instances running on compute node + type: string + constraints: + - allowed_values: ['naive', 'libvirt_metadata', 'workload_partitioning'] resources: CeilometerServiceBase: @@ -37,6 +43,23 @@ outputs: service_name: ceilometer_agent_compute monitoring_subscription: {get_param: MonitoringSubscriptionCeilometerCompute} config_settings: - get_attr: [CeilometerServiceBase, role_data, config_settings] + map_merge: + - get_attr: [CeilometerServiceBase, role_data, config_settings] + - ceilometer::agent::compute::instance_discovery_method: {get_param: InstanceDiscoveryMethod} + compute_namespace: true step_config: | - include ::tripleo::profile::base::ceilometer::agent::compute + include ::tripleo::profile::base::ceilometer::agent::polling + upgrade_tasks: + - name: Check if ceilometer_agent_compute is deployed + command: systemctl is-enabled openstack-ceilometer-compute + tags: common + ignore_errors: True + register: ceilometer_agent_compute_enabled + - name: "PreUpgrade step0,validation: Check service openstack-ceilometer-compute is running" + shell: /usr/bin/systemctl show 'openstack-ceilometer-compute' --property ActiveState | grep '\bactive\b' + when: ceilometer_agent_compute_enabled.rc == 0 + tags: step0,validation + - name: Stop ceilometer_agent_compute service + tags: step1 + when: ceilometer_agent_compute_enabled.rc == 0 + service: name=openstack-ceilometer-compute state=stopped diff --git a/puppet/services/ceilometer-agent-notification.yaml b/puppet/services/ceilometer-agent-notification.yaml index 9c9a3bd9..4ee43f49 100644 --- a/puppet/services/ceilometer-agent-notification.yaml +++ b/puppet/services/ceilometer-agent-notification.yaml @@ -49,3 +49,17 @@ outputs: get_attr: [CeilometerServiceBase, role_data, config_settings] step_config: | include ::tripleo::profile::base::ceilometer::agent::notification + upgrade_tasks: + - name: Check if ceilometer_agent_notification is deployed + command: systemctl is-enabled openstack-ceilometer-notification + tags: common + ignore_errors: True + register: ceilometer_agent_notification_enabled + - name: "PreUpgrade step0,validation: Check service openstack-ceilometer-notification is running" + shell: /usr/bin/systemctl show 'openstack-ceilometer-notification' --property ActiveState | grep '\bactive\b' + when: ceilometer_agent_notification_enabled.rc == 0 + tags: step0,validation + - name: Stop ceilometer_agent_notification service + tags: step1 + when: ceilometer_agent_notification_enabled.rc == 0 + service: name=openstack-ceilometer-notification state=stopped diff --git a/puppet/services/ceilometer-api.yaml b/puppet/services/ceilometer-api.yaml index 63e02d4f..f5ee9d40 100644 --- a/puppet/services/ceilometer-api.yaml +++ b/puppet/services/ceilometer-api.yaml @@ -90,3 +90,9 @@ outputs: get_attr: [CeilometerServiceBase, role_data, service_config_settings] step_config: | include ::tripleo::profile::base::ceilometer::api + metadata_settings: + get_attr: [ApacheServiceBase, role_data, metadata_settings] + upgrade_tasks: + - name: Stop ceilometer_api service (running under httpd) + tags: step1 + service: name=httpd state=stopped diff --git a/puppet/services/ceilometer-base.yaml b/puppet/services/ceilometer-base.yaml index 0528368e..874c6893 100644 --- a/puppet/services/ceilometer-base.yaml +++ b/puppet/services/ceilometer-base.yaml @@ -46,14 +46,6 @@ parameters: default: 0 description: Number of workers for Ceilometer service. type: number - CeilometerStoreEvents: - default: false - description: Whether to store events in ceilometer. - type: boolean - EnableLegacyCeilometerApi: - default: false - description: Enable legacy ceilometer Api service if needed. - type: boolean EventPipelinePublishers: default: ['notifier://?topic=alarm.all'] description: A list of publishers to put in event_pipeline.yaml. @@ -101,20 +93,16 @@ outputs: - '@' - {get_param: [EndpointMap, MysqlInternal, host]} - '/ceilometer' - - '?bind_address=' - - "%{hiera('tripleo::profile::base::database::mysql::client_bind_address')}" - enable_legacy_ceilometer_api: {get_param: EnableLegacyCeilometerApi} + - '?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo' ceilometer_backend: {get_param: CeilometerBackend} - ceilometer::metering_secret: {get_param: CeilometerMeteringSecret} # we include db_sync class in puppet-tripleo ceilometer::db::sync_db: false ceilometer::keystone::authtoken::project_name: 'service' ceilometer::keystone::authtoken::password: {get_param: CeilometerPassword} ceilometer::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri] } - ceilometer::keystone::authtoken::auth_url: { get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix] } + ceilometer::keystone::authtoken::auth_url: { get_param: [EndpointMap, KeystoneInternal, uri_no_suffix] } ceilometer::agent::auth::auth_password: {get_param: CeilometerPassword} ceilometer::agent::auth::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix] } - ceilometer::agent::notification::store_events: {get_param: CeilometerStoreEvents} ceilometer::agent::notification::event_pipeline_publishers: {get_param: EventPipelinePublishers} ceilometer::agent::auth::auth_region: {get_param: KeystoneRegion} ceilometer::agent::auth::auth_tenant_name: 'service' diff --git a/puppet/services/ceilometer-collector.yaml b/puppet/services/ceilometer-collector.yaml index 88e7d781..b0ec971f 100644 --- a/puppet/services/ceilometer-collector.yaml +++ b/puppet/services/ceilometer-collector.yaml @@ -59,3 +59,17 @@ outputs: get_attr: [CeilometerServiceBase, role_data, service_config_settings] step_config: | include ::tripleo::profile::base::ceilometer::collector + upgrade_tasks: + - name: Check if ceilometer_collector is deployed + command: systemctl is-enabled openstack-ceilometer-collector + tags: common + ignore_errors: True + register: ceilometer_collector_enabled + - name: "PreUpgrade step0,validation: Check service openstack-ceilometer-collector is running" + shell: /usr/bin/systemctl show 'openstack-ceilometer-collector' --property ActiveState | grep '\bactive\b' + when: ceilometer_collector_enabled.rc == 0 + tags: step0,validation + - name: Stop ceilometer_collector service + tags: step1 + when: ceilometer_collector_enabled.rc == 0 + service: name=openstack-ceilometer-collector state=stopped diff --git a/puppet/services/ceph-external.yaml b/puppet/services/ceph-external.yaml index b898d0bf..134f47c4 100644 --- a/puppet/services/ceph-external.yaml +++ b/puppet/services/ceph-external.yaml @@ -61,6 +61,12 @@ parameters: MonitoringSubscriptionCephExternal: default: 'overcloud-ceph-external' type: string + RbdDefaultFeatures: + default: '' + description: The default features enabled when creating a block device + image. Only applies to format 2 images. Set to '1' for Jewel + clients using older Ceph servers. + type: string conditions: glance_multiple_locations: @@ -81,24 +87,25 @@ outputs: config_settings: tripleo::profile::base::ceph::ceph_mon_host: {get_param: CephExternalMonHost} ceph::profile::params::fsid: {get_param: CephClusterFSID} + ceph::profile::params::rbd_default_features: {get_param: RbdDefaultFeatures} ceph::profile::params::client_keys: - str_replace: - template: "{ - client.CLIENT_USER: { - secret: 'CLIENT_KEY', - mode: '0644', - cap_mon: 'allow r', - cap_osd: 'allow class-read object_prefix rbd_children, allow rwx pool=CINDER_POOL, allow rwx pool=CINDERBACKUP_POOL, allow rwx pool=NOVA_POOL, allow rwx pool=GLANCE_POOL, allow rwx pool=GNOCCHI_POOL' - } - }" - params: - CLIENT_USER: {get_param: CephClientUserName} - CLIENT_KEY: {get_param: CephClientKey} - NOVA_POOL: {get_param: NovaRbdPoolName} - CINDER_POOL: {get_param: CinderRbdPoolName} - CINDERBACKUP_POOL: {get_param: CinderBackupRbdPoolName} - GLANCE_POOL: {get_param: GlanceRbdPoolName} - GNOCCHI_POOL: {get_param: GnocchiRbdPoolName} + map_replace: + - CEPH_CLIENT_KEY: + secret: {get_param: CephClientKey} + mode: '0644' + cap_mon: 'allow r' + cap_osd: + str_replace: + template: 'allow class-read object_prefix rbd_children, allow rwx pool=CINDER_POOL, allow rwx pool=CINDERBACKUP_POOL, allow rwx pool=NOVA_POOL, allow rwx pool=GLANCE_POOL, allow rwx pool=GNOCCHI_POOL' + params: + NOVA_POOL: {get_param: NovaRbdPoolName} + CINDER_POOL: {get_param: CinderRbdPoolName} + CINDERBACKUP_POOL: {get_param: CinderBackupRbdPoolName} + GLANCE_POOL: {get_param: GlanceRbdPoolName} + GNOCCHI_POOL: {get_param: GnocchiRbdPoolName} + - keys: + CEPH_CLIENT_KEY: + list_join: ['.', ['client', {get_param: CephClientUserName}]] ceph::profile::params::manage_repo: false # FIXME(gfidente): we should not have to list the packages explicitly in # the templates, but this should stay until the following is fixed: diff --git a/puppet/services/pacemaker/ceilometer-api.yaml b/puppet/services/ceph-mds.yaml index 95e791d1..b68567fb 100644 --- a/puppet/services/pacemaker/ceilometer-api.yaml +++ b/puppet/services/ceph-mds.yaml @@ -1,7 +1,7 @@ heat_template_version: ocata description: > - OpenStack Ceilometer API service with Pacemaker configured with Puppet + Ceph MDS service. parameters: ServiceNetMap: @@ -18,13 +18,15 @@ parameters: description: Mapping of service endpoint -> protocol. Typically set via parameter_defaults in the resource registry. type: json - MonitoringSubscriptionCeilometerApi: - default: 'overcloud-ceilometer-api' + CephMdsKey: + description: The cephx key for the MDS service. Can be created + with ceph-authtool --gen-print-key. type: string + hidden: true resources: - CeilometerServiceBase: - type: ../ceilometer-api.yaml + CephBase: + type: ./ceph-base.yaml properties: ServiceNetMap: {get_param: ServiceNetMap} DefaultPasswords: {get_param: DefaultPasswords} @@ -32,14 +34,16 @@ resources: outputs: role_data: - description: Role data for the Ceilometer API pacemaker role. + description: Role data for the Ceph MDS service. value: - service_name: ceilometer_api - monitoring_subscription: {get_param: MonitoringSubscriptionCeilometerApi} + service_name: ceph_mds config_settings: map_merge: - - get_attr: [CeilometerServiceBase, role_data, config_settings] - - ceilometer::api::manage_service: false - ceilometer::api::enabled: false + - get_attr: [CephBase, role_data, config_settings] + - ceph::profile::params::mds_key: {get_param: CephMdsKey} + tripleo.ceph_mds.firewall_rules: + '112 ceph_mds': + dport: + - '6800-7300' step_config: | - include ::tripleo::profile::pacemaker::ceilometer::api + include ::tripleo::profile::base::ceph::mds diff --git a/puppet/services/ceph-mon.yaml b/puppet/services/ceph-mon.yaml index f32bdd2b..d589ef89 100644 --- a/puppet/services/ceph-mon.yaml +++ b/puppet/services/ceph-mon.yaml @@ -28,6 +28,12 @@ parameters: CinderRbdPoolName: default: volumes type: string + ManilaCephFSDataPoolName: + default: manila_data + type: string + ManilaCephFSMetadataPoolName: + default: manila_metadata + type: string CinderBackupRbdPoolName: default: backups type: string @@ -53,6 +59,14 @@ parameters: } default: {} type: json + CephValidationRetries: + type: number + default: 5 + description: Number of retry attempts for Ceph validation + CephValidationDelay: + type: number + default: 10 + description: Interval (in seconds) in between validation checks MonitoringSubscriptionCephMon: default: 'overcloud-ceph-mon' type: string @@ -87,6 +101,8 @@ outputs: for_each: <%pool%>: - {get_param: CinderRbdPoolName} + - {get_param: ManilaCephFSDataPoolName} + - {get_param: ManilaCephFSMetadataPoolName} - {get_param: CinderBackupRbdPoolName} - {get_param: NovaRbdPoolName} - {get_param: GlanceRbdPoolName} @@ -105,3 +121,38 @@ outputs: get_attr: [CephBase, role_data, service_config_settings] step_config: | include ::tripleo::profile::base::ceph::mon + upgrade_batch_tasks: + # Note we perform these tasks in list order, but they are all step0 so + # we can perform a rolling upgrade of all mon nodes in step0, then a + # rolling upgrade of all osd nodes in step1 + - name: Check status + tags: step0,validation + shell: ceph health | egrep -sq "HEALTH_OK|HEALTH_WARN" + - name: Stop CephMon + tags: step0 + service: + name: ceph-mon@{{ ansible_hostname }} + state: stopped + - name: Update Ceph packages + tags: step0 + yum: + name: ceph-mon + state: latest + - name: Start CephMon + tags: step0 + service: + name: ceph-mon@{{ ansible_hostname }} + state: started + # ceph-ansible + # https://github.com/ceph/ceph-ansible/blob/master/infrastructure-playbooks/rolling_update.yml#L149-L157 + - name: Wait for the monitor to join the quorum... + tags: step0,ceph_quorum_validation + shell: | + ceph -s | grep monmap | sed 's/.*quorum//' | egrep -sq {{ ansible_hostname }} + register: ceph_quorum_nodecheck + until: ceph_quorum_nodecheck.rc == 0 + retries: {get_param: CephValidationRetries} + delay: {get_param: CephValidationDelay} + - name: ceph osd crush tunables default + tags: step0 + shell: ceph osd crush tunables default diff --git a/puppet/services/ceph-osd.yaml b/puppet/services/ceph-osd.yaml index df0ee6c3..a97fa116 100644 --- a/puppet/services/ceph-osd.yaml +++ b/puppet/services/ceph-osd.yaml @@ -21,6 +21,24 @@ parameters: MonitoringSubscriptionCephOsd: default: 'overcloud-ceph-osd' type: string + CephValidationRetries: + type: number + default: 40 + description: Number of retry attempts for Ceph validation + CephValidationDelay: + type: number + default: 30 + description: Interval (in seconds) in between validation checks + IgnoreCephUpgradeWarnings: + type: boolean + default: false + description: If enabled, Ceph upgrade will be forced even though cluster or PGs status is not clean + +parameter_groups: +- label: deprecated + description: Do not use deprecated params, they will be removed. + parameters: + - IgnoreCephUpgradeWarnings resources: CephBase: @@ -45,3 +63,67 @@ outputs: - '6800-7300' step_config: | include ::tripleo::profile::base::ceph::osd + upgrade_batch_tasks: + - name: Check status + tags: step1,validation + shell: ceph health | grep -qv HEALTH_ERR + - name: Get OSD IDs + tags: step1 + shell: ls /var/lib/ceph/osd | awk 'BEGIN { FS = "-" } ; { print $2 }' + register: osd_ids + # "so that mirrors aren't rebalanced as if the OSD died" - gfidente / leseb + - name: ceph osd set noout + tags: step1 + command: ceph osd set noout + - name: ceph osd set norebalance + tags: step1 + command: ceph osd set norebalance + - name: ceph osd set nodeep-scrub + tags: step1 + command: ceph osd set nodeep-scrub + - name: ceph osd set noscrub + tags: step1 + command: ceph osd set noscrub + - name: Stop CephOSD + tags: step1 + service: + name: ceph-osd@{{ item }} + state: stopped + with_items: "{{osd_ids.stdout.strip().split()}}" + - name: Update Ceph packages + tags: step1 + yum: + name: ceph-osd + state: latest + - name: Start CephOSD + tags: step1 + service: + name: ceph-osd@{{ item }} + state: started + with_items: "{{osd_ids.stdout.strip().split()}}" + # with awk we are meant to check if $2 and $4 are *the same* but it returns 1 when + # they are, so the check is inverted to produce an useful exit code + - name: Wait for clean pgs... + tags: step1,ceph_pgs_clean_validation + vars: + ignore_warnings: {get_param: IgnoreCephUpgradeWarnings} + shell: | + ceph pg stat | awk '{exit($2!=$4)}' && ceph health | egrep -sq "HEALTH_OK|HEALTH_WARN" + register: ceph_pgs_healthcheck + until: ceph_pgs_healthcheck.rc == 0 + retries: {get_param: CephValidationRetries} + delay: {get_param: CephValidationDelay} + when: + - not ignore_warnings + - name: ceph osd unset noout + tags: step1 + command: ceph osd unset noout + - name: ceph osd unset norebalance + tags: step1 + command: ceph osd unset norebalance + - name: ceph osd unset nodeep-scrub + tags: step1 + command: ceph osd unset nodeep-scrub + - name: ceph osd unset noscrub + tags: step1 + command: ceph osd unset noscrub diff --git a/puppet/services/ceph-rgw.yaml b/puppet/services/ceph-rgw.yaml index 6448387c..01531971 100644 --- a/puppet/services/ceph-rgw.yaml +++ b/puppet/services/ceph-rgw.yaml @@ -54,10 +54,14 @@ outputs: - get_attr: [CephBase, role_data, config_settings] - tripleo::profile::base::ceph::rgw::rgw_key: {get_param: CephRgwKey} tripleo::profile::base::ceph::rgw::keystone_admin_token: {get_param: AdminToken} - tripleo::profile::base::ceph::rgw::keystone_url: {get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix]} + tripleo::profile::base::ceph::rgw::keystone_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]} tripleo::profile::base::ceph::rgw::civetweb_bind_ip: {get_param: [ServiceNetMap, CephRgwNetwork]} tripleo::profile::base::ceph::rgw::civetweb_bind_port: {get_param: [EndpointMap, CephRgwInternal, port]} - ceph::params::user_radosgw: ceph + tripleo::profile::base::ceph::rgw::rgw_keystone_version: v3 + ceph::profile::params::rgw_keystone_admin_domain: default + ceph::profile::params::rgw_keystone_admin_project: service + ceph::profile::params::rgw_keystone_admin_user: swift + ceph::profile::params::rgw_keystone_admin_password: {get_param: SwiftPassword} tripleo.ceph_rgw.firewall_rules: '122 ceph rgw': dport: {get_param: [EndpointMap, CephRgwInternal, port]} @@ -68,7 +72,28 @@ outputs: ceph::rgw::keystone::auth::public_url: {get_param: [EndpointMap, CephRgwPublic, uri]} ceph::rgw::keystone::auth::internal_url: {get_param: [EndpointMap, CephRgwInternal, uri]} ceph::rgw::keystone::auth::admin_url: {get_param: [EndpointMap, CephRgwAdmin, uri]} - ceph::rgw::keystone::auth::user: 'swift' - ceph::rgw::keystone::auth::password: {get_param: SwiftPassword} ceph::rgw::keystone::auth::region: {get_param: KeystoneRegion} - ceph::rgw::keystone::auth::tenant: 'service' + ceph::rgw::keystone::auth::roles: [ 'admin', 'member', '_member_' ] + ceph::rgw::keystone::auth::tenant: service + ceph::rgw::keystone::auth::user: swift + ceph::rgw::keystone::auth::password: {get_param: SwiftPassword} + upgrade_tasks: + - name: Gather RGW instance ID + tags: common + shell: hiera -c /etc/puppet/hiera.yaml ceph::profile::params::rgw_name radosgw.gateway + register: rgw_id + - name: Check if ceph_rgw is deployed + command: systemctl is-enabled ceph-radosgw@{{rgw_id.stdout}} + tags: common + ignore_errors: True + register: ceph_rgw_enabled + - name: Check status + shell: /usr/bin/systemctl show ceph-radosgw@{{rgw_id.stdout}} --property ActiveState | grep '\bactive\b' + when: ceph_rgw_enabled.rc == 0 + tags: step0,validation + - name: Stop RGW instance + tags: step1 + when: ceph_rgw_enabled.rc == 0 + service: + name: ceph-radosgw@{{rgw_id.stdout}} + state: stopped diff --git a/puppet/services/cinder-api.yaml b/puppet/services/cinder-api.yaml index 376ea2c5..49a5f613 100644 --- a/puppet/services/cinder-api.yaml +++ b/puppet/services/cinder-api.yaml @@ -81,17 +81,17 @@ outputs: - get_attr: [CinderBase, role_data, config_settings] - get_attr: [ApacheServiceBase, role_data, config_settings] - cinder::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri]} - cinder::keystone::authtoken::auth_url: {get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix]} + cinder::keystone::authtoken::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]} cinder::keystone::authtoken::password: {get_param: CinderPassword} cinder::keystone::authtoken::project_name: 'service' cinder::api::enable_proxy_headers_parsing: true - cinder::api::nova_catalog_info: 'compute:Compute Service:internalURL' + cinder::api::nova_catalog_info: 'compute:nova:internalURL' + cinder::api::nova_catalog_admin_info: 'compute:nova:adminURL' # TODO(emilien) move it to puppet-cinder cinder::config: DEFAULT/swift_catalog_info: value: 'object-store:swift:internalURL' - cinder::glance::glance_api_servers: {get_param: [EndpointMap, GlanceInternal, uri]} tripleo::profile::base::cinder::cinder_enable_db_purge: {get_param: CinderEnableDBPurge} tripleo.cinder_api.firewall_rules: '119 cinder': @@ -147,3 +147,28 @@ outputs: cinder::db::mysql::allowed_hosts: - '%' - "%{hiera('mysql_bind_host')}" + metadata_settings: + get_attr: [ApacheServiceBase, role_data, metadata_settings] + upgrade_tasks: + - name: Check if cinder_api is deployed + command: systemctl is-enabled openstack-cinder-api + tags: common + ignore_errors: True + register: cinder_api_enabled + - name: "PreUpgrade step0,validation: Check service openstack-cinder-api is running" + shell: /usr/bin/systemctl show 'openstack-cinder-api' --property ActiveState | grep '\bactive\b' + when: cinder_api_enabled.rc == 0 + tags: step0,validation + - name: check for cinder running under apache (post upgrade) + tags: step1 + shell: "httpd -t -D DUMP_VHOSTS | grep -q cinder" + register: cinder_apache + ignore_errors: true + - name: Stop cinder_api service (running under httpd) + tags: step1 + service: name=httpd state=stopped + when: "cinder_apache.rc == 0" + - name: Stop and disable cinder_api service (pre-upgrade not under httpd) + tags: step1 + when: cinder_api_enabled.rc == 0 + service: name=openstack-cinder-api state=stopped enabled=no diff --git a/puppet/services/cinder-backend-dellps.yaml b/puppet/services/cinder-backend-dellps.yaml new file mode 100644 index 00000000..1f15c53e --- /dev/null +++ b/puppet/services/cinder-backend-dellps.yaml @@ -0,0 +1,85 @@ +# Copyright (c) 2017 Dell Inc. or its subsidiaries. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +heat_template_version: ocata + +description: > + Openstack Cinder Dell EMC PS Series backend + +parameters: + CinderEnableDellPsBackend: + type: boolean + default: true + CinderDellPsBackendName: + type: string + default: 'tripleo_dellps' + CinderDellPsSanIp: + type: string + CinderDellPsSanLogin: + type: string + CinderDellPsSanPassword: + type: string + hidden: true + CinderDellPsSanThinProvision: + type: boolean + default: true + CinderDellPsGroupname: + type: string + default: 'group-0' + CinderDellPsPool: + type: string + default: 'default' + CinderDellPsChapLogin: + type: string + default: '' + CinderDellPsChapPassword: + type: string + default: '' + CinderDellPsUseChap: + type: boolean + default: false + ServiceNetMap: + default: {} + description: Mapping of service_name -> network name. Typically set + via parameter_defaults in the resource registry. This + mapping overrides those in ServiceNetMapDefaults. + type: json + DefaultPasswords: + default: {} + type: json + EndpointMap: + default: {} + type: json + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + +outputs: + role_data: + description: Role data for the Cinder Dell EMC PS Series backend. + value: + service_name: cinder_backend_dellps + config_settings: + tripleo::profile::base::cinder::volume::cinder_enable_dellps_backend: {get_param: CinderEnableDellPsBackend} + cinder::backend::eqlx::volume_backend_name: {get_param: CinderDellPsBackendName} + cinder::backend::eqlx::san_ip: {get_param: CinderDellPsSanIp} + cinder::backend::eqlx::san_login: {get_param: CinderDellPsSanLogin} + cinder::backend::eqlx::san_password: {get_param: CinderDellPsSanPassword} + cinder::backend::eqlx::san_thin_provision: {get_param: CinderDellPsSanThinProvision} + cinder::backend::eqlx::eqlx_group_name: {get_param: CinderDellPsGroupname} + cinder::backend::eqlx::eqlx_pool: {get_param: CinderDellPsPool} + cinder::backend::eqlx::eqlx_use_chap: {get_param: CinderDellPsUseChap} + cinder::backend::eqlx::eqlx_chap_login: {get_param: CinderDellPsChapLogin} + cinder::backend::eqlx::eqlx_chap_password: {get_param: CinderDellPsChapPassword} + step_config: | + include ::tripleo::profile::base::cinder::volume diff --git a/puppet/services/cinder-backend-dellsc.yaml b/puppet/services/cinder-backend-dellsc.yaml new file mode 100644 index 00000000..6a6196ac --- /dev/null +++ b/puppet/services/cinder-backend-dellsc.yaml @@ -0,0 +1,85 @@ +# Copyright (c) 2016-2017 Dell Inc, or its subsidiaries. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +heat_template_version: ocata + +description: > + Openstack Cinder Dell EMC Storage Center backend + +parameters: + CinderEnableDellScBackend: + type: boolean + default: true + CinderDellScBackendName: + type: string + default: 'tripleo_dellsc' + CinderDellScSanIp: + type: string + CinderDellScSanLogin: + type: string + default: 'Admin' + CinderDellScSanPassword: + type: string + hidden: true + CinderDellScSsn: + type: number + default: 64702 + CinderDellScIscsiIpAddress: + type: string + default: '' + CinderDellScIscsiPort: + type: number + default: 3260 + CinderDellScApiPort: + type: number + default: 3033 + CinderDellScServerFolder: + type: string + default: 'dellsc_server' + CinderDellScVolumeFolder: + type: string + default: 'dellsc_volume' + ServiceNetMap: + default: {} + description: Mapping of service_name -> network name. Typically set + via parameter_defaults in the resource registry. This + mapping overrides those in ServiceNetMapDefaults. + type: json + DefaultPasswords: + default: {} + type: json + EndpointMap: + default: {} + type: json + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + +outputs: + role_data: + description: Role data for the Cinder Dell EMC Storage Center backend. + value: + service_name: cinder_backend_dellsc + config_settings: + tripleo::profile::base::cinder::volume::cinder_enable_dellsc_backend: {get_param: CinderEnableDellScBackend} + cinder::backend::dellsc_iscsi::volume_backend_name: {get_param: CinderDellScBackendName} + cinder::backend::dellsc_iscsi::san_ip: {get_param: CinderDellScSanIp} + cinder::backend::dellsc_iscsi::san_login: {get_param: CinderDellScSanLogin} + cinder::backend::dellsc_iscsi::san_password: {get_param: CinderDellScSanPassword} + cinder::backend::dellsc_iscsi::dell_sc_ssn: {get_param: CinderDellScSsn} + cinder::backend::dellsc_iscsi::iscsi_ip_address: {get_param: CinderDellScIscsiIpAddress} + cinder::backend::dellsc_iscsi::iscsi_port: {get_param: CinderDellScIscsiPort} + cinder::backend::dellsc_iscsi::dell_sc_api_port: {get_param: CinderDellScApiPort} + cinder::backend::dellsc_iscsi::dell_sc_server_folder: {get_param: CinderDellScServerFolder} + cinder::backend::dellsc_iscsi::dell_sc_volume_folder: {get_param: CinderDellScVolumeFolder} + step_config: | + include ::tripleo::profile::base::cinder::volume diff --git a/puppet/services/cinder-backend-scaleio.yaml b/puppet/services/cinder-backend-scaleio.yaml new file mode 100644 index 00000000..eb709cd5 --- /dev/null +++ b/puppet/services/cinder-backend-scaleio.yaml @@ -0,0 +1,111 @@ +# Copyright (c) 2016-2017 Dell Inc, or its subsidiaries. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +heat_template_version: ocata + +description: > + Openstack Cinder Dell EMC ScaleIO backend + +parameters: + CinderEnableScaleIOBackend: + type: boolean + default: true + CinderScaleIOBackendName: + type: string + default: 'tripleo_scaleio' + CinderScaleIOSanIp: + type: string + default: '' + CinderScaleIOSanLogin: + type: string + default: '' + CinderScaleIOSanPassword: + type: string + default: '' + hidden: true + CinderScaleIORestServerPort: + type: number + default: 443 + CinderScaleIOVerifyServerCertificate: + type: boolean + default: false + CinderScaleIOServerCertificatePath: + type: string + default: '' + CinderScaleIOProtectionDomainId: + type: string + default: '' + CinderScaleIOProtectionDomainName: + type: string + default: '' + CinderScaleIOStoragePoolId: + type: string + default: '' + CinderScaleIOStoragePoolName: + type: string + default: '' + CinderScaleIOStoragePools: + type: string + default: '' + CinderScaleIORoundVolumeCapacity: + type: boolean + default: true + CinderScaleIOUnmapVolumeBeforeDeletion: + type: boolean + default: false + CinderScaleIOMaxOverSubscriptionRatio: + type: string + default: '' + CinderScaleIOSanThinProvision: + type: boolean + default: true + ServiceNetMap: + default: {} + description: Mapping of service_name -> network name. Typically set + via parameter_defaults in the resource registry. This + mapping overrides those in ServiceNetMapDefaults. + type: json + DefaultPasswords: + default: {} + type: json + EndpointMap: + default: {} + type: json + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + +outputs: + role_data: + description: Role data for the Cinder Dell EMC ScaleIO backend. + value: + service_name: cinder_backend_scaleio + config_settings: + tripleo::profile::base::cinder::volume::cinder_enable_scaleio_backend: {get_param: CinderEnableScaleIOBackend} + cinder::backend::scaleio::volume_backend_name: {get_param: CinderScaleIOBackendName} + cinder::backend::scaleio::sio_login: {get_param: CinderScaleIOSanLogin} + cinder::backend::scaleio::sio_password: {get_param: CinderScaleIOSanPassword} + cinder::backend::scaleio::sio_server_hostname: {get_param: CinderScaleIOSanIp} + cinder::backend::scaleio::sio_server_port: {get_param: CinderScaleIORestServerPort} + cinder::backend::scaleio::sio_verify_server_certificate: {get_param: CinderScaleIOVerifyServerCertificate} + cinder::backend::scaleio::sio_server_certificate_path: {get_param: CinderScaleIOServerCertificatePath} + cinder::backend::scaleio::sio_protection_domain_name: {get_param: CinderScaleIOProtectionDomainName} + cinder::backend::scaleio::sio_protection_domain_id: {get_param: CinderScaleIOProtectionDomainId} + cinder::backend::scaleio::sio_storage_pool_id: {get_param: CinderScaleIOStoragePoolId} + cinder::backend::scaleio::sio_storage_pool_name: {get_param: CinderScaleIOStoragePoolName} + cinder::backend::scaleio::sio_storage_pools: {get_param: CinderScaleIOStoragePools} + cinder::backend::scaleio::sio_round_volume_capacity: {get_param: CinderScaleIORoundVolumeCapacity} + cinder::backend::scaleio::sio_unmap_volume_before_deletion: {get_param: CinderScaleIOUnmapVolumeBeforeDeletion} + cinder::backend::scaleio::sio_max_over_subscription_ratio: {get_param: CinderScaleIOMaxOverSubscriptionRatio} + cinder::backend::scaleio::sio_thin_provision: {get_param: CinderScaleIOThinProvision} + step_config: | + include ::tripleo::profile::base::cinder::volume diff --git a/puppet/services/cinder-base.yaml b/puppet/services/cinder-base.yaml index be4b4af2..88e7edb7 100644 --- a/puppet/services/cinder-base.yaml +++ b/puppet/services/cinder-base.yaml @@ -44,6 +44,46 @@ parameters: default: guest description: The username for RabbitMQ type: string + CinderCronDbPurgeMinute: + type: string + description: > + Cron to move deleted instances to another table - Minute + default: '1' + CinderCronDbPurgeHour: + type: string + description: > + Cron to move deleted instances to another table - Hour + default: '0' + CinderCronDbPurgeMonthday: + type: string + description: > + Cron to move deleted instances to another table - Month Day + default: '*' + CinderCronDbPurgeMonth: + type: string + description: > + Cron to move deleted instances to another table - Month + default: '*' + CinderCronDbPurgeWeekday: + type: string + description: > + Cron to move deleted instances to another table - Week Day + default: '*' + CinderCronDbPurgeUser: + type: string + description: > + Cron to move deleted instances to another table - User + default: 'keystone' + CinderCronDbPurgeAge: + type: string + description: > + Cron to move deleted instances to another table - Age + default: '0' + CinderCronDbPurgeDestination: + type: string + description: > + Cron to move deleted instances to another table - Log destination + default: '/var/log/cinder/cinder-rowsflush.log' outputs: role_data: @@ -60,8 +100,7 @@ outputs: - '@' - {get_param: [EndpointMap, MysqlInternal, host]} - '/cinder' - - '?bind_address=' - - "%{hiera('tripleo::profile::base::database::mysql::client_bind_address')}" + - '?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo' cinder::debug: {get_param: Debug} cinder::rabbit_use_ssl: {get_param: RabbitClientUseSSL} cinder::rabbit_userid: {get_param: RabbitUserName} @@ -71,3 +110,12 @@ outputs: cinder::cron::db_purge::destination: '/dev/null' cinder::db::database_db_max_retries: -1 cinder::db::database_max_retries: -1 + cinder::cron::db_purge::minute: {get_param: CinderCronDbPurgeMinute} + cinder::cron::db_purge::hour: {get_param: CinderCronDbPurgeHour} + cinder::cron::db_purge::monthday: {get_param: CinderCronDbPurgeMonthday} + cinder::cron::db_purge::month: {get_param: CinderCronDbPurgeMonth} + cinder::cron::db_purge::weekday: {get_param: CinderCronDbPurgeWeekday} + cinder::cron::db_purge::user: {get_param: CinderCronDbPurgeUser} + cinder::cron::db_purge::age: {get_param: CinderCronDbPurgeAge} + cinder::cron::db_purge::destination: {get_param: CinderCronDbPurgeDestination} + cinder::glance::glance_api_servers: {get_param: [EndpointMap, GlanceInternal, uri]} diff --git a/puppet/services/cinder-hpelefthand-iscsi.yaml b/puppet/services/cinder-hpelefthand-iscsi.yaml new file mode 100644 index 00000000..ca7d2838 --- /dev/null +++ b/puppet/services/cinder-hpelefthand-iscsi.yaml @@ -0,0 +1,56 @@ +heat_template_version: ocata + +description: > + Configure Cinder HPELeftHandISCSIDriver + +parameters: + # Config specific parameters, to be provided via parameter_defaults + CinderHPELeftHandISCSIApiUrl: + type: string + CinderHPELeftHandISCSIUserName: + type: string + CinderHPELeftHandISCSIPassword: + type: string + hidden: true + CinderHPELeftHandISCSIBackendName: + type: string + default: 'tripleo_hpelefthand' + CinderHPELeftHandISCSIChapEnabled: + type: boolean + default: false + CinderHPELeftHandClusterName: + type: string + CinderHPELeftHandDebug: + type: boolean + default: false + ServiceNetMap: + default: {} + description: Mapping of service_name -> network name. Typically set + via parameter_defaults in the resource registry. This + mapping overrides those in ServiceNetMapDefaults. + type: json + DefaultPasswords: + default: {} + type: json + EndpointMap: + default: {} + type: json + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + +outputs: + role_data: + description: Role data for Cinder HPELeftHandISCSIDriver + value: + service_name: cinder_hpelefthand_iscsi + config_settings: + tripleo::profile::base::cinder::volume::cinder_enable_hpelefthand_backend: true + cinder::backend::hpelefthand_iscsi::hpelefthand_api_url: {get_param: CinderHPELeftHandISCSIApiUrl} + cinder::backend::hpelefthand_iscsi::hpelefthand_username: {get_param: CinderHPELeftHandISCSIUserName} + cinder::backend::hpelefthand_iscsi::hpelefthand_password: {get_param: CinderHPELeftHandISCSIPassword} + cinder::backend::hpelefthand_iscsi::volume_backend_name: {get_param: CinderHPELeftHandISCSIBackendName} + cinder::backend::hpelefthand_iscsi::hpelefthand_iscsi_chap_enabled: {get_param: CinderHPELeftHandISCSIChapEnabled} + cinder::backend::hpelefthand_iscsi::hpelefthand_clustername: {get_param: CinderHPELeftHandClusterName} + cinder::backend::hpelefthand_iscsi::hpelefthand_debug: {get_param: CinderHPELeftHandDebug} + step_config: | + include ::tripleo::profile::base::cinder::volume diff --git a/puppet/services/cinder-scheduler.yaml b/puppet/services/cinder-scheduler.yaml index 3dd3f64e..f8361f6f 100644 --- a/puppet/services/cinder-scheduler.yaml +++ b/puppet/services/cinder-scheduler.yaml @@ -51,3 +51,17 @@ outputs: - cinder::scheduler::scheduler_driver: cinder.scheduler.filter_scheduler.FilterScheduler step_config: | include ::tripleo::profile::base::cinder::scheduler + upgrade_tasks: + - name: Check if cinder_scheduler is deployed + command: systemctl is-enabled openstack-cinder-scheduler + tags: common + ignore_errors: True + register: cinder_scheduler_enabled + - name: "PreUpgrade step0,validation: Check service openstack-cinder-scheduler is running" + shell: /usr/bin/systemctl show 'openstack-cinder-scheduler' --property ActiveState | grep '\bactive\b' + when: cinder_scheduler_enabled.rc == 0 + tags: step0,validation + - name: Stop cinder_scheduler service + tags: step1 + when: cinder_scheduler_enabled.rc == 0 + service: name=openstack-cinder-scheduler state=stopped diff --git a/puppet/services/cinder-volume.yaml b/puppet/services/cinder-volume.yaml index 66706bc4..b52955ef 100644 --- a/puppet/services/cinder-volume.yaml +++ b/puppet/services/cinder-volume.yaml @@ -20,6 +20,10 @@ parameters: default: lioadm description: The iSCSI helper to use with cinder. type: string + CinderISCSIProtocol: + default: iscsi + description: Whether to use TCP ('iscsi') or iSER RDMA ('iser') for iSCSI + type: string CinderLVMLoopDeviceSize: default: 10280 description: The size of the loopback file used by the cinder LVM driver. @@ -97,6 +101,7 @@ outputs: SERVERS: {get_param: CinderNfsServers} tripleo::profile::base::cinder::volume::iscsi::cinder_lvm_loop_device_size: {get_param: CinderLVMLoopDeviceSize} tripleo::profile::base::cinder::volume::iscsi::cinder_iscsi_helper: {get_param: CinderISCSIHelper} + tripleo::profile::base::cinder::volume::iscsi::cinder_iscsi_protocol: {get_param: CinderISCSIProtocol} tripleo::profile::base::cinder::volume::rbd::cinder_rbd_pool_name: {get_param: CinderRbdPoolName} tripleo::profile::base::cinder::volume::rbd::cinder_rbd_user_name: {get_param: CephClientUserName} tripleo.cinder_volume.firewall_rules: @@ -110,3 +115,17 @@ outputs: tripleo::profile::base::cinder::volume::iscsi::cinder_iscsi_address: {get_param: [ServiceNetMap, CinderIscsiNetwork]} step_config: | include ::tripleo::profile::base::cinder::volume + upgrade_tasks: + - name: Check if cinder_volume is deployed + command: systemctl is-enabled openstack-cinder-volume + tags: common + ignore_errors: True + register: cinder_volume_enabled + - name: "PreUpgrade step0,validation: Check service openstack-cinder-volume is running" + shell: /usr/bin/systemctl show 'openstack-cinder-volume' --property ActiveState | grep '\bactive\b' + when: cinder_volume_enabled.rc == 0 + tags: step0,validation + - name: Stop cinder_volume service + tags: step1 + when: cinder_volume_enabled.rc == 0 + service: name=openstack-cinder-volume state=stopped diff --git a/puppet/services/congress.yaml b/puppet/services/congress.yaml new file mode 100644 index 00000000..8bc9f2e3 --- /dev/null +++ b/puppet/services/congress.yaml @@ -0,0 +1,116 @@ +heat_template_version: ocata + +description: > + OpenStack Congress service configured with Puppet + +parameters: + ServiceNetMap: + default: {} + description: Mapping of service_name -> network name. Typically set + via parameter_defaults in the resource registry. This + mapping overrides those in ServiceNetMapDefaults. + type: json + DefaultPasswords: + default: {} + type: json + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + CongressPassword: + description: The password for the congress service account. + type: string + hidden: true + Debug: + type: string + default: '' + KeystoneRegion: + type: string + default: 'regionOne' + description: Keystone region for endpoint + RabbitPassword: + description: The password for RabbitMQ + type: string + hidden: true + RabbitUserName: + default: guest + description: The username for RabbitMQ + type: string + RabbitClientUseSSL: + default: false + description: > + Rabbit client subscriber parameter to specify + an SSL connection to the RabbitMQ host. + type: string + RabbitClientPort: + default: 5672 + description: Set rabbit subscriber port, change this if using SSL + type: number + +outputs: + role_data: + description: Role data for the Congress role. + value: + service_name: congress + config_settings: + congress_password: {get_param: CongressPassword} + congress::db::database_connection: + list_join: + - '' + - - {get_param: [EndpointMap, MysqlInternal, protocol]} + - '://congress:' + - {get_param: CongressPassword} + - '@' + - {get_param: [EndpointMap, MysqlInternal, host]} + - '/congress' + - '?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo' + congress::debug: {get_param: Debug} + congress::rpc_backend: rabbit + congress::rabbit_userid: {get_param: RabbitUserName} + congress::rabbit_password: {get_param: RabbitPassword} + congress::rabbit_use_ssl: {get_param: RabbitClientUseSSL} + congress::rabbit_port: {get_param: RabbitClientPort} + congress::server::bind_host: {get_param: [ServiceNetMap, CongressApiNetwork]} + + congress::keystone::authtoken::project_name: 'service' + congress::keystone::authtoken::auth_url: {get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix]} + congress::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri]} + + congress::db::mysql::password: {get_param: CongressPassword} + congress::db::mysql::user: congress + congress::db::mysql::host: {get_param: [EndpointMap, MysqlInternal, host_nobrackets]} + congress::db::mysql::dbname: congress + congress::db::mysql::allowed_hosts: + - '%' + - {get_param: [EndpointMap, MysqlInternal, host_nobrackets]} + + service_config_settings: + keystone: + congress::keystone::auth::tenant: 'service' + congress::keystone::auth::password: {get_param: CongressPassword} + congress::keystone::auth::public_url: {get_param: [EndpointMap, CongressPublic, uri]} + congress::keystone::auth::internal_url: {get_param: [EndpointMap, CongressInternal, uri]} + congress::keystone::auth::admin_url: {get_param: [EndpointMap, CongressAdmin, uri]} + + step_config: | + include ::tripleo::profile::base::congress + + upgrade_tasks: + - name: Check if congress is deployed + command: systemctl is-enabled openstack-congress-server + tags: common + ignore_errors: True + register: congress_enabled + - name: "PreUpgrade step0,validation: Check service openstack-congress-server is running" + shell: /usr/bin/systemctl show 'openstack-congress-server' --property ActiveState | grep '\bactive\b' + when: congress_enabled.rc == 0 + tags: step0,validation + - name: Stop congress service + tags: step1 + when: congress_enabled.rc == 0 + service: name=openstack-congress-server state=stopped + - name: Install openstack-congress package if it was disabled + tags: step3 + yum: name=openstack-congress state=latest + when: congress_enabled.rc != 0 diff --git a/puppet/services/database/mongodb.yaml b/puppet/services/database/mongodb.yaml index 8290cae7..63ec4446 100644 --- a/puppet/services/database/mongodb.yaml +++ b/puppet/services/database/mongodb.yaml @@ -66,3 +66,10 @@ outputs: mongodb::server::bind_ip: {get_param: [ServiceNetMap, MongodbNetwork]} step_config: | include ::tripleo::profile::base::database::mongodb + upgrade_tasks: + - name: Stop mongodb service + tags: step2 + service: name=mongod state=stopped + - name: Start mongodb service + tags: step4 + service: name=mongod state=started diff --git a/puppet/services/database/mysql-client.yaml b/puppet/services/database/mysql-client.yaml new file mode 100644 index 00000000..78456e28 --- /dev/null +++ b/puppet/services/database/mysql-client.yaml @@ -0,0 +1,34 @@ +heat_template_version: ocata + +description: > + Mysql client settings + +parameters: + ServiceNetMap: + default: {} + description: Mapping of service_name -> network name. Typically set + via parameter_defaults in the resource registry. This + mapping overrides those in ServiceNetMapDefaults. + type: json + DefaultPasswords: + default: {} + type: json + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + EnableInternalTLS: + type: boolean + default: false + +outputs: + role_data: + description: Role for setting mysql client parameters + value: + service_name: mysql_client + config_settings: + tripleo::profile::base::database::mysql::client::mysql_client_bind_address: {get_param: [ServiceNetMap, MysqlNetwork]} + tripleo::profile::base::database::mysql::client::enable_ssl: {get_param: EnableInternalTLS} + step_config: | + include ::tripleo::profile::base::database::mysql::client diff --git a/puppet/services/database/mysql-internal-tls-certmonger.yaml b/puppet/services/database/mysql-internal-tls-certmonger.yaml index 56d037e7..9f7eaf57 100644 --- a/puppet/services/database/mysql-internal-tls-certmonger.yaml +++ b/puppet/services/database/mysql-internal-tls-certmonger.yaml @@ -41,3 +41,7 @@ outputs: template: "mysql/%{hiera('cloud_name_NETWORK')}" params: NETWORK: {get_param: [ServiceNetMap, MysqlNetwork]} + metadata_settings: + - service: mysql + network: {get_param: [ServiceNetMap, MysqlNetwork]} + type: vip diff --git a/puppet/services/database/mysql.yaml b/puppet/services/database/mysql.yaml index 5eefe6bd..808f1353 100644 --- a/puppet/services/database/mysql.yaml +++ b/puppet/services/database/mysql.yaml @@ -34,6 +34,10 @@ parameters: default: true description: Whether to use Galera instead of regular MariaDB. type: boolean + NovaPassword: + description: The password for the nova db account + type: string + hidden: true resources: @@ -94,11 +98,27 @@ outputs: {get_param: [ServiceNetMap, MysqlNetwork]} step_config: | include ::tripleo::profile::base::database::mysql + metadata_settings: + get_attr: [MySQLTLS, role_data, metadata_settings] upgrade_tasks: + - name: Check for galera root password + tags: step0 + file: path=/root/.my.cnf state=file - name: Stop service tags: step2 service: name=mariadb state=stopped - name: Start service tags: step4 service: name=mariadb state=started - + - name: Setup cell_v2 (create cell0 database) + tags: step4 + mysql_db: + name: nova_cell0 + state: present + - name: Setup cell_v2 (grant access to the nova DB user) + tags: step4 + mysql_user: + str_replace: + template: "name=nova password=PASSWORD host=\"%\" priv=\"nova.*:ALL/nova_cell0.*:ALL,GRANT\" state=present" + params: + PASSWORD: {get_param: NovaPassword} diff --git a/puppet/services/disabled/glance-registry.yaml b/puppet/services/disabled/glance-registry.yaml new file mode 100644 index 00000000..7bf4a1fd --- /dev/null +++ b/puppet/services/disabled/glance-registry.yaml @@ -0,0 +1,30 @@ +heat_template_version: ocata + +description: > + OpenStack Glance Registry service, disabled since ocata + +parameters: + ServiceNetMap: + default: {} + description: Mapping of service_name -> network name. Typically set + via parameter_defaults in the resource registry. This + mapping overrides those in ServiceNetMapDefaults. + type: json + DefaultPasswords: + default: {} + type: json + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + +outputs: + role_data: + description: Role data for the disabled Glance Registry role. + value: + service_name: glance_registry + upgrade_tasks: + - name: Stop and disable glance_registry service on upgrade + tags: step1 + service: name=openstack-glance-registry state=stopped enabled=no diff --git a/puppet/services/docker.yaml b/puppet/services/docker.yaml new file mode 100644 index 00000000..e7da2383 --- /dev/null +++ b/puppet/services/docker.yaml @@ -0,0 +1,43 @@ +heat_template_version: ocata + +description: > + Configures docker on the host + +parameters: + DockerNamespace: + description: namespace + default: tripleoupstream + type: string + DockerNamespaceIsRegistry: + type: boolean + default: false + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + ServiceNetMap: + default: {} + description: Mapping of service_name -> network name. Typically set + via parameter_defaults in the resource registry. This + mapping overrides those in ServiceNetMapDefaults. + type: json + DefaultPasswords: + default: {} + type: json + +outputs: + role_data: + description: Role data for the docker service + value: + service_name: docker + config_settings: + tripleo::profile::base::docker::docker_namespace: {get_param: DockerNamespace} + tripleo::profile::base::docker::insecure_registry: {get_param: DockerNamespaceIsRegistry} + step_config: | + include ::tripleo::profile::base::docker + upgrade_tasks: + - name: Install docker packages on upgrade if missing + tags: step3 + yum: name=docker state=latest + diff --git a/puppet/services/ec2-api.yaml b/puppet/services/ec2-api.yaml new file mode 100644 index 00000000..10f6d311 --- /dev/null +++ b/puppet/services/ec2-api.yaml @@ -0,0 +1,138 @@ +heat_template_version: ocata + +description: > + OpenStack EC2-API service configured with Puppet + +parameters: + ServiceNetMap: + default: {} + description: Mapping of service_name -> network name. Typically set + via parameter_defaults in the resource registry. This + mapping overrides those in ServiceNetMapDefaults. + type: json + DefaultPasswords: + default: {} + type: json + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + Ec2ApiWorkers: + default: 0 + description: Number of workers for EC2-API service. + type: number + Ec2ApiPassword: + description: The password for the nova service and db account, used by nova-api. + type: string + hidden: true + KeystoneRegion: + type: string + default: 'regionOne' + description: Keystone region for endpoint + MonitoringSubscriptionEc2Api: + default: 'overcloud-ec2-api' + type: string + Ec2ApiLoggingSource: + type: json + default: + tag: openstack.ec2.api + path: /var/log/ec2api/ec2api.log + EnablePackageInstall: + default: 'false' + description: Set to true to enable package installation via Puppet + type: boolean + + +conditions: + nova_workers_zero: {equals : [{get_param: Ec2ApiWorkers}, 0]} + +outputs: + role_data: + description: Role data for the EC2-API service. + value: + service_name: ec2_api + monitoring_subscription: {get_param: MonitoringSubscriptionEc2Api} + logging_source: {get_param: Ec2ApiLoggingSource} + logging_groups: + - nova + config_settings: + map_merge: + - tripleo.ec2_api.firewall_rules: + '113 ec2_api': + dport: + - 8788 + - 13788 + ec2api::keystone::authtoken::project_name: 'service' + ec2api::keystone::authtoken::password: {get_param: Ec2ApiPassword} + ec2api::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri] } + ec2api::keystone::authtoken::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]} + ec2api::api::enabled: true + ec2api::package_manage: {get_param: EnablePackageInstall} + ec2api::api::ec2api_listen: + str_replace: + template: + "%{hiera('fqdn_$NETWORK')}" + params: + $NETWORK: {get_param: [ServiceNetMap, Ec2ApiNetwork]} + ec2api::metadata::metadata_listen: + str_replace: + template: + "%{hiera('fqdn_$NETWORK')}" + params: + $NETWORK: {get_param: [ServiceNetMap, Ec2ApiMetadataNetwork]} + ec2api::db::database_connection: + list_join: + - '' + - - {get_param: [EndpointMap, MysqlInternal, protocol]} + - '://ec2_api:' + - {get_param: Ec2ApiPassword} + - '@' + - {get_param: [EndpointMap, MysqlInternal, host]} + - '/ec2_api' + - '?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo' + - + if: + - nova_workers_zero + - {} + - ec2api::api::ec2api_workers: {get_param: Ec2ApiWorkers} + ec2api::metadata::metadata_workers: {get_param: Ec2ApiWorkers} + step_config: | + include tripleo::profile::base::nova::ec2api + service_config_settings: + keystone: + ec2api::keystone::auth::tenant: 'service' + ec2api::keystone::auth::public_url: {get_param: [EndpointMap, Ec2ApiPublic, uri]} + ec2api::keystone::auth::internal_url: {get_param: [EndpointMap, Ec2ApiInternal, uri]} + ec2api::keystone::auth::admin_url: {get_param: [EndpointMap, Ec2ApiAdmin, uri]} + ec2api::keystone::auth::password: {get_param: Ec2ApiPassword} + ec2api::keystone::auth::region: {get_param: KeystoneRegion} + mysql: + ec2api::db::mysql::password: {get_param: Ec2ApiPassword} + ec2api::db::mysql::user: ec2_api + ec2api::db::mysql::host: {get_param: [EndpointMap, MysqlInternal, host_nobrackets]} + ec2api::db::mysql::dbname: ec2_api + ec2api::db::mysql::allowed_hosts: + - '%' + - "%{hiera('mysql_bind_host')}" + upgrade_tasks: + - name: Check if ec2-api is deployed + command: systemctl is-enabled openstack-ec2-api + tags: common + ignore_errors: True + register: ec2_api_enabled + - name: "PreUpgrade step0,validation: Check if openstack-ec2-api is running" + shell: > + /usr/bin/systemctl show 'openstack-ec2-api' --property ActiveState | + grep '\bactive\b' + when: ec2_api_enabled.rc == 0 + tags: step0,validation + - name: Stop openstack-ec2-api service + tags: step1 + when: ec2_api_enabled.rc == 0 + service: name=openstack-ec2-api state=stopped + - name: Install openstack-ec2-api package if it was disabled + tags: step3 + yum: name=openstack-ec2-api state=latest + when: ec2_api_enabled.rc != 0 + diff --git a/puppet/services/etcd.yaml b/puppet/services/etcd.yaml new file mode 100644 index 00000000..7cdd8451 --- /dev/null +++ b/puppet/services/etcd.yaml @@ -0,0 +1,73 @@ +heat_template_version: ocata + +description: > + Etcd service configured with Puppet + +parameters: + ServiceNetMap: + default: {} + description: Mapping of service_name -> network name. Typically set + via parameter_defaults in the resource registry. This + mapping overrides those in ServiceNetMapDefaults. + type: json + DefaultPasswords: + default: {} + type: json + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + EtcdInitialClusterToken: + default: 'etcd-tripleo' + description: Initial cluster token for the etcd cluster during bootstrap. + type: string + MonitoringSubscriptionEtcd: + default: 'overcloud-etcd' + type: string + +outputs: + role_data: + description: Role data for the Etcd role. + value: + service_name: etcd + monitoring_subscription: {get_param: MonitoringSubscriptionEtcd} + config_settings: + etcd::etcd_name: + str_replace: + template: + "%{hiera('fqdn_$NETWORK')}" + params: + $NETWORK: {get_param: [ServiceNetMap, EtcdNetwork]} + # NOTE: bind IP is found in Heat replacing the network name with the local node IP + # for the given network; replacement examples (eg. for internal_api): + # internal_api -> IP + # internal_api_uri -> [IP] + # internal_api_subnet - > IP/CIDR + tripleo::profile::base::etcd::bind_ip: {get_param: [ServiceNetMap, EtcdNetwork]} + tripleo::profile::base::etcd::client_port: '2379' + tripleo::profile::base::etcd::peer_port: '2380' + etcd::initial_cluster_token: {get_param: EtcdInitialClusterToken} + etcd::manage_package: false + tripleo.etcd.firewall_rules: + '141 etcd': + dport: + - 2379 + - 2380 + step_config: | + include ::tripleo::profile::base::etcd + upgrade_tasks: + - name: Check if etcd is deployed + command: systemctl is-enabled etcd + tags: step0,validation + ignore_errors: True + register: etcd_enabled + - name: "PreUpgrade step0,validation: Check if etcd is running" + shell: > + /usr/bin/systemctl show 'etcd' --property ActiveState | + grep '\bactive\b' + when: etcd_enabled.rc == 0 + tags: step0,validation + - name: Stop etcd service + tags: step2 + service: name=etcd state=stopped diff --git a/puppet/services/glance-api.yaml b/puppet/services/glance-api.yaml index 36df724b..ce389dc1 100644 --- a/puppet/services/glance-api.yaml +++ b/puppet/services/glance-api.yaml @@ -45,8 +45,23 @@ parameters: default: tag: openstack.glance.api path: /var/log/glance/api.log + EnableInternalTLS: + type: boolean + default: false + +conditions: + use_tls_proxy: {equals : [{get_param: EnableInternalTLS}, true]} resources: + + TLSProxyBase: + type: OS::TripleO::Services::TLSProxyBase + properties: + ServiceNetMap: {get_param: ServiceNetMap} + DefaultPasswords: {get_param: DefaultPasswords} + EndpointMap: {get_param: EndpointMap} + EnableInternalTLS: {get_param: EnableInternalTLS} + GlanceBase: type: ./glance-base.yaml properties: @@ -66,6 +81,7 @@ outputs: config_settings: map_merge: - get_attr: [GlanceBase, role_data, config_settings] + - get_attr: [TLSProxyBase, role_data, config_settings] - glance::api::database_connection: list_join: - '' @@ -75,17 +91,12 @@ outputs: - '@' - {get_param: [EndpointMap, MysqlInternal, host]} - '/glance' - - '?bind_address=' - - "%{hiera('tripleo::profile::base::database::mysql::client_bind_address')}" + - '?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo' glance::api::bind_port: {get_param: [EndpointMap, GlanceInternal, port]} glance::api::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri] } - glance::api::authtoken::auth_url: { get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix] } - glance::api::registry_host: - str_replace: - template: "'REGISTRY_HOST'" - params: - REGISTRY_HOST: {get_param: [EndpointMap, GlanceRegistryInternal, host]} - glance::api::registry_client_protocol: {get_param: [EndpointMap, GlanceRegistryInternal, protocol] } + glance::api::authtoken::auth_url: { get_param: [EndpointMap, KeystoneInternal, uri_no_suffix] } + glance::api::enable_v1_api: false + glance::api::enable_v2_api: true glance::api::authtoken::password: {get_param: GlancePassword} glance::api::enable_proxy_headers_parsing: true glance::api::debug: {get_param: Debug} @@ -104,8 +115,48 @@ outputs: # internal_api -> IP # internal_api_uri -> [IP] # internal_api_subnet - > IP/CIDR - glance::api::bind_host: {get_param: [ServiceNetMap, GlanceApiNetwork]} + tripleo::profile::base::glance::api::tls_proxy_bind_ip: + get_param: [ServiceNetMap, GlanceApiNetwork] + tripleo::profile::base::glance::api::tls_proxy_fqdn: + str_replace: + template: + "%{hiera('fqdn_$NETWORK')}" + params: + $NETWORK: {get_param: [ServiceNetMap, GlanceApiNetwork]} + tripleo::profile::base::glance::api::tls_proxy_port: + get_param: [EndpointMap, GlanceInternal, port] + # Bind to localhost if internal TLS is enabled, since we put a TLs + # proxy in front. + glance::api::bind_host: + if: + - use_tls_proxy + - 'localhost' + - {get_param: [ServiceNetMap, GlanceApiNetwork]} step_config: | include ::tripleo::profile::base::glance::api service_config_settings: get_attr: [GlanceBase, role_data, service_config_settings] + upgrade_tasks: + - name: Check if glance_api is deployed + command: systemctl is-enabled openstack-glance-api + tags: common + ignore_errors: True + register: glance_api_enabled + #(TODO) Remove all glance-registry bits in Pike. + - name: Check if glance_registry is deployed + command: systemctl is-enabled openstack-glance-registry + tags: common + ignore_errors: True + register: glance_registry_enabled + - name: "PreUpgrade step0,validation: Check service openstack-glance-api is running" + shell: /usr/bin/systemctl show 'openstack-glance-api' --property ActiveState | grep '\bactive\b' + tags: step0,validation + when: glance_api_enabled.rc == 0 + - name: Stop glance_api service + tags: step1 + when: glance_api_enabled.rc == 0 + service: name=openstack-glance-api state=stopped + - name: Stop and disable glance registry (removed for Ocata) + tags: step1 + when: glance_registry_enabled.rc == 0 + service: name=openstack-glance-registry state=stopped enabled=no diff --git a/puppet/services/glance-base.yaml b/puppet/services/glance-base.yaml index d715ac02..f5548982 100644 --- a/puppet/services/glance-base.yaml +++ b/puppet/services/glance-base.yaml @@ -105,8 +105,6 @@ outputs: glance::notify::rabbitmq::rabbit_password: {get_param: RabbitPassword} glance::notify::rabbitmq::rabbit_use_ssl: {get_param: RabbitClientUseSSL} glance::notify::rabbitmq::notification_driver: messagingv2 - glance::registry::db::database_db_max_retries: -1 - glance::registry::db::database_max_retries: -1 tripleo::profile::base::glance::api::glance_nfs_enabled: {get_param: GlanceNfsEnabled} tripleo::glance::nfs_mount::share: {get_param: GlanceNfsShare} tripleo::glance::nfs_mount::options: {get_param: GlanceNfsOptions} diff --git a/puppet/services/glance-registry.yaml b/puppet/services/glance-registry.yaml deleted file mode 100644 index 1f7e6e3d..00000000 --- a/puppet/services/glance-registry.yaml +++ /dev/null @@ -1,102 +0,0 @@ -heat_template_version: ocata - -description: > - OpenStack Glance Registry service configured with Puppet - -parameters: - ServiceNetMap: - default: {} - description: Mapping of service_name -> network name. Typically set - via parameter_defaults in the resource registry. This - mapping overrides those in ServiceNetMapDefaults. - type: json - DefaultPasswords: - default: {} - type: json - EndpointMap: - default: {} - description: Mapping of service endpoint -> protocol. Typically set - via parameter_defaults in the resource registry. - type: json - Debug: - default: '' - description: Set to True to enable debugging on all services. - type: string - GlancePassword: - description: The password for the glance service and db account, used by the glance services. - type: string - hidden: true - GlanceWorkers: - default: '' - description: | - Number of worker processes for glance registry. If left unset (empty - string), the default value will result in the configuration being left - unset and a system-dependent default value will be chosen (e.g.: number of - processors). Please note that this will create a large number of processes - on systems with a large number of CPUs resulting in excess memory - consumption. It is recommended that a suitable non-default value be - selected on such systems. - type: string - MonitoringSubscriptionGlanceRegistry: - default: 'overcloud-glance-registry' - type: string - GlanceRegistryLoggingSource: - type: json - default: - tag: openstack.glance.registry - path: /var/log/glance/registry.log - -resources: - GlanceBase: - type: ./glance-base.yaml - properties: - ServiceNetMap: {get_param: ServiceNetMap} - DefaultPasswords: {get_param: DefaultPasswords} - EndpointMap: {get_param: EndpointMap} - -outputs: - role_data: - description: Role data for the Glance Registry role. - value: - service_name: glance_registry - monitoring_subscription: {get_param: MonitoringSubscriptionGlanceRegistry} - logging_source: {get_param: GlanceRegistryLoggingSource} - logging_groups: - - glance - config_settings: - map_merge: - - get_attr: [GlanceBase, role_data, config_settings] - - - glance::registry::database_connection: - list_join: - - '' - - - {get_param: [EndpointMap, MysqlInternal, protocol]} - - '://glance:' - - {get_param: GlancePassword} - - '@' - - {get_param: [EndpointMap, MysqlInternal, host]} - - '/glance' - - '?bind_address=' - - "%{hiera('tripleo::profile::base::database::mysql::client_bind_address')}" - glance::registry::authtoken::password: {get_param: GlancePassword} - glance::registry::authtoken::project_name: 'service' - glance::registry::pipeline: 'keystone' - glance::registry::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri] } - glance::registry::authtoken::auth_url: { get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix] } - glance::registry::debug: {get_param: Debug} - glance::registry::workers: {get_param: GlanceWorkers} - tripleo.glance_registry.firewall_rules: - '112 glance_registry': - dport: - - 9191 - # NOTE: bind IP is found in Heat replacing the network name with the - # local node IP for the given network; replacement examples - # (eg. for internal_api): - # internal_api -> IP - # internal_api_uri -> [IP] - # internal_api_subnet - > IP/CIDR - glance::registry::bind_host: {get_param: [ServiceNetMap, GlanceRegistryNetwork]} - step_config: | - include ::tripleo::profile::base::glance::registry - service_config_settings: - get_attr: [GlanceBase, role_data, config_settings] diff --git a/puppet/services/gnocchi-api.yaml b/puppet/services/gnocchi-api.yaml index 3929e005..08a939a6 100644 --- a/puppet/services/gnocchi-api.yaml +++ b/puppet/services/gnocchi-api.yaml @@ -84,7 +84,7 @@ outputs: gnocchi::api::enable_proxy_headers_parsing: true gnocchi::api::service_name: 'httpd' gnocchi::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri]} - gnocchi::keystone::authtoken::auth_url: {get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix]} + gnocchi::keystone::authtoken::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]} gnocchi::keystone::authtoken::password: {get_param: GnocchiPassword} gnocchi::keystone::authtoken::project_name: 'service' gnocchi::wsgi::apache::ssl: {get_param: EnableInternalTLS} @@ -103,15 +103,9 @@ outputs: # internal_api_subnet - > IP/CIDR gnocchi::wsgi::apache::bind_host: {get_param: [ServiceNetMap, GnocchiApiNetwork]} gnocchi::wsgi::apache::wsgi_process_display_name: 'gnocchi_wsgi' - gnocchi::api::host: - str_replace: - template: - "%{hiera('fqdn_$NETWORK')}" - params: - $NETWORK: {get_param: [ServiceNetMap, GnocchiApiNetwork]} gnocchi::api::keystone_auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri]} - gnocchi::api::keystone_identity_uri: {get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix]} + gnocchi::api::keystone_identity_uri: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]} gnocchi::storage::swift::swift_authurl: {get_param: [EndpointMap, KeystoneInternal, uri]} step_config: | include ::tripleo::profile::base::gnocchi::api @@ -131,3 +125,9 @@ outputs: gnocchi::db::mysql::allowed_hosts: - '%' - "%{hiera('mysql_bind_host')}" + metadata_settings: + get_attr: [ApacheServiceBase, role_data, metadata_settings] + upgrade_tasks: + - name: Stop gnocchi_api service (running under httpd) + tags: step1 + service: name=httpd state=stopped diff --git a/puppet/services/gnocchi-base.yaml b/puppet/services/gnocchi-base.yaml index d92b1766..c6310056 100644 --- a/puppet/services/gnocchi-base.yaml +++ b/puppet/services/gnocchi-base.yaml @@ -67,9 +67,8 @@ outputs: - '@' - {get_param: [EndpointMap, MysqlInternal, host]} - '/gnocchi' - - '?bind_address=' - - "%{hiera('tripleo::profile::base::database::mysql::client_bind_address')}" - gnocchi::db::sync::extra_opts: '--skip-storage --create-legacy-resource-types' + - '?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo' + gnocchi::db::sync::extra_opts: '--skip-storage' gnocchi::storage::swift::swift_user: 'service:gnocchi' gnocchi::storage::swift::swift_auth_version: 2 gnocchi::storage::swift::swift_key: {get_param: GnocchiPassword} diff --git a/puppet/services/gnocchi-metricd.yaml b/puppet/services/gnocchi-metricd.yaml index e5f9a8e7..9d76c2e7 100644 --- a/puppet/services/gnocchi-metricd.yaml +++ b/puppet/services/gnocchi-metricd.yaml @@ -22,7 +22,7 @@ parameters: default: 'overcloud-gnocchi-metricd' type: string GnocchiMetricdWorkers: - default: '' + default: '%{::os_workers}' description: Number of workers for Gnocchi MetricD type: string @@ -46,3 +46,17 @@ outputs: - gnocchi::metricd::workers: {get_param: GnocchiMetricdWorkers} step_config: | include ::tripleo::profile::base::gnocchi::metricd + upgrade_tasks: + - name: Check if gnocchi_metricd is deployed + command: systemctl is-enabled openstack-gnocchi-metricd + tags: common + ignore_errors: True + register: gnocchi_metricd_enabled + - name: "PreUpgrade step0,validation: Check service openstack-gnocchi-metricd is running" + shell: /usr/bin/systemctl show 'openstack-gnocchi-metricd' --property ActiveState | grep '\bactive\b' + when: gnocchi_metricd_enabled.rc == 0 + tags: step0,validation + - name: Stop gnocchi_metricd service + tags: step1 + when: gnocchi_metricd_enabled.rc == 0 + service: name=openstack-gnocchi-metricd state=stopped diff --git a/puppet/services/gnocchi-statsd.yaml b/puppet/services/gnocchi-statsd.yaml index df438b37..bb8d3bce 100644 --- a/puppet/services/gnocchi-statsd.yaml +++ b/puppet/services/gnocchi-statsd.yaml @@ -45,3 +45,17 @@ outputs: proto: 'udp' step_config: | include ::tripleo::profile::base::gnocchi::statsd + upgrade_tasks: + - name: Check if gnocchi_statsd is deployed + command: systemctl is-enabled openstack-gnocchi-statsd + tags: common + ignore_errors: True + register: gnocchi_statsd_enabled + - name: "PreUpgrade step0,validation: Check service openstack-gnocchi-statsd is running" + shell: /usr/bin/systemctl show 'openstack-gnocchi-statsd' --property ActiveState | grep '\bactive\b' + when: gnocchi_statsd_enabled.rc == 0 + tags: step0,validation + - name: Stop gnocchi_statsd service + tags: step1 + when: gnocchi_statsd_enabled.rc == 0 + service: name=openstack-gnocchi-statsd state=stopped diff --git a/puppet/services/haproxy-internal-tls-certmonger.yaml b/puppet/services/haproxy-internal-tls-certmonger.yaml index 77457593..ae226163 100644 --- a/puppet/services/haproxy-internal-tls-certmonger.yaml +++ b/puppet/services/haproxy-internal-tls-certmonger.yaml @@ -19,6 +19,22 @@ parameters: via parameter_defaults in the resource registry. type: json +resources: + + HAProxyNetworks: + type: OS::Heat::Value + properties: + value: + # NOTE(jaosorior) Get unique network names to create + # certificates for those. We skip the tenant network since + # we don't need a certificate for that, and the external + # network will be handled in another template. + yaql: + expression: list($.data.map.items().map($1[1])).distinct().where($ != external and $ != tenant) + data: + map: + get_param: ServiceNetMap + outputs: role_data: description: Role data for the HAProxy internal TLS via certmonger role. @@ -39,13 +55,12 @@ outputs: postsave_cmd: "" # TODO principal: "haproxy/%{hiera('cloud_name_NETWORK')}" for_each: - NETWORK: - # NOTE(jaosorior) Get unique network names to create - # certificates for those. We skip the tenant network since - # we don't need a certificate for that, and the external - # network will be handled in another template. - yaql: - expression: list($.data.map.items().map($1[1])).distinct().where($ != external and $ != tenant) - data: - map: - get_param: ServiceNetMap + NETWORK: {get_attr: [HAProxyNetworks, value]} + metadata_settings: + repeat: + template: + - service: haproxy + network: $NETWORK + type: vip + for_each: + $NETWORK: {get_attr: [HAProxyNetworks, value]} diff --git a/puppet/services/haproxy-public-tls-certmonger.yaml b/puppet/services/haproxy-public-tls-certmonger.yaml index 227697b9..6013b026 100644 --- a/puppet/services/haproxy-public-tls-certmonger.yaml +++ b/puppet/services/haproxy-public-tls-certmonger.yaml @@ -35,3 +35,7 @@ outputs: hostname: "%{hiera('cloud_name_external')}" postsave_cmd: "" # TODO principal: "haproxy/%{hiera('cloud_name_external')}" + metadata_settings: + - service: haproxy + network: external + type: vip diff --git a/puppet/services/haproxy.yaml b/puppet/services/haproxy.yaml index 9049c901..bd5b9ef6 100644 --- a/puppet/services/haproxy.yaml +++ b/puppet/services/haproxy.yaml @@ -78,9 +78,26 @@ outputs: step_config: | include ::tripleo::profile::base::haproxy upgrade_tasks: + - name: Check if haproxy is deployed + command: systemctl is-enabled haproxy + tags: common + ignore_errors: True + register: haproxy_enabled + - name: "PreUpgrade step0,validation: Check service haproxy is running" + shell: /usr/bin/systemctl show 'haproxy' --property ActiveState | grep '\bactive\b' + when: haproxy_enabled.rc == 0 + tags: step0,validation - name: Stop haproxy service - tags: step1 + tags: step2 + when: haproxy_enabled.rc == 0 service: name=haproxy state=stopped - name: Start haproxy service tags: step4 # Needed at step 4 for mysql + when: haproxy_enabled.rc == 0 service: name=haproxy state=started + metadata_settings: + yaql: + expression: '[].concat(coalesce($.data.internal, []), coalesce($.data.public, []))' + data: + public: {get_attr: [HAProxyPublicTLS, role_data, metadata_settings]} + internal: {get_attr: [HAProxyInternalTLS, role_data, metadata_settings]} diff --git a/puppet/services/heat-api-cfn.yaml b/puppet/services/heat-api-cfn.yaml index 10a7780b..483f0a45 100644 --- a/puppet/services/heat-api-cfn.yaml +++ b/puppet/services/heat-api-cfn.yaml @@ -84,3 +84,17 @@ outputs: heat::keystone::auth_cfn::admin_url: {get_param: [EndpointMap, HeatCfnAdmin, uri]} heat::keystone::auth_cfn::password: {get_param: HeatPassword} heat::keystone::auth_cfn::region: {get_param: KeystoneRegion} + upgrade_tasks: + - name: Check if heat_api_cfn is deployed + command: systemctl is-enabled openstack-heat-api-cfn + tags: common + ignore_errors: True + register: heat_api_cfn_enabled + - name: "PreUpgrade step0,validation: Check service openstack-heat-api-cfn is running" + shell: /usr/bin/systemctl show 'openstack-heat-api-cfn' --property ActiveState | grep '\bactive\b' + when: heat_api_cfn_enabled.rc == 0 + tags: step0,validation + - name: Stop heat_api_cfn service + tags: step1 + when: heat_api_cfn_enabled.rc == 0 + service: name=openstack-heat-api-cfn state=stopped diff --git a/puppet/services/heat-api-cloudwatch.yaml b/puppet/services/heat-api-cloudwatch.yaml index 1178d62b..8879bcb2 100644 --- a/puppet/services/heat-api-cloudwatch.yaml +++ b/puppet/services/heat-api-cloudwatch.yaml @@ -66,3 +66,17 @@ outputs: heat::api_cloudwatch::bind_host: {get_param: [ServiceNetMap, HeatApiNetwork]} step_config: | include ::tripleo::profile::base::heat::api_cloudwatch + upgrade_tasks: + - name: Check if heat_api_cloudwatch is deployed + command: systemctl is-enabled openstack-heat-api-cloudwatch + tags: common + ignore_errors: True + register: heat_api_cloudwatch_enabled + - name: "PreUpgrade step0,validation: Check service openstack-heat-api-cloudwatch is running" + shell: /usr/bin/systemctl show 'openstack-heat-api-cloudwatch' --property ActiveState | grep '\bactive\b' + when: heat_api_cloudwatch_enabled.rc == 0 + tags: step0,validation + - name: Stop heat_api_cloudwatch service + tags: step1 + when: heat_api_cloudwatch_enabled.rc == 0 + service: name=openstack-heat-api-cloudwatch state=stopped diff --git a/puppet/services/heat-api.yaml b/puppet/services/heat-api.yaml index a32521c7..2464011b 100644 --- a/puppet/services/heat-api.yaml +++ b/puppet/services/heat-api.yaml @@ -84,3 +84,17 @@ outputs: heat::keystone::auth::admin_url: {get_param: [EndpointMap, HeatAdmin, uri]} heat::keystone::auth::password: {get_param: HeatPassword} heat::keystone::auth::region: {get_param: KeystoneRegion} + upgrade_tasks: + - name: Check is heat_api is deployed + command: systemctl is-enabled openstack-heat-api + tags: common + ignore_errors: True + register: heat_api_enabled + - name: "PreUpgrade step0,validation: Check service openstack-heat-api is running" + shell: /usr/bin/systemctl show 'openstack-heat-api' --property ActiveState | grep '\bactive\b' + when: heat_api_enabled.rc == 0 + tags: step0,validation + - name: Stop heat_api service + tags: step1 + when: heat_api_enabled.rc == 0 + service: name=openstack-heat-api state=stopped diff --git a/puppet/services/heat-base.yaml b/puppet/services/heat-base.yaml index 3f6dba60..e83a9edd 100644 --- a/puppet/services/heat-base.yaml +++ b/puppet/services/heat-base.yaml @@ -44,6 +44,65 @@ parameters: description: Mapping of service endpoint -> protocol. Typically set via parameter_defaults in the resource registry. type: json + HeatCronPurgeDeletedEnsure: + type: string + description: > + Cron to purge db entries marked as deleted and older than $age - Ensure + default: 'present' + HeatCronPurgeDeletedMinute: + type: string + description: > + Cron to purge db entries marked as deleted and older than $age - Minute + default: '1' + HeatCronPurgeDeletedHour: + type: string + description: > + Cron to purge db entries marked as deleted and older than $age - Hour + default: '0' + HeatCronPurgeDeletedMonthday: + type: string + description: > + Cron to purge db entries marked as deleted and older than $age - Month Day + default: '*' + HeatCronPurgeDeletedMonth: + type: string + description: > + Cron to purge db entries marked as deleted and older than $age - Month + default: '*' + HeatCronPurgeDeletedWeekday: + type: string + description: > + Cron to purge db entries marked as deleted and older than $age - Week Day + default: '*' + HeatCronPurgeDeletedMaxDelay: + type: string + description: > + Cron to purge db entries marked as deleted and older than $age - Max Delay + default: '3600' + HeatCronPurgeDeletedUser: + type: string + description: > + Cron to purge db entries marked as deleted and older than $age - User + default: 'heat' + HeatCronPurgeDeletedAge: + type: string + description: > + Cron to purge db entries marked as deleted and older than $age - Age + default: '30' + HeatCronPurgeDeletedAgeType: + type: string + description: > + Cron to purge db entries marked as deleted and older than $age - Age type + default: 'days' + HeatCronPurgeDeletedDestination: + type: string + description: > + Cron to purge db entries marked as deleted and older than $age - Log destination + default: '/dev/null' + HeatMaxJsonBodySize: + default: 1048576 + description: Maximum raw byte size of the Heat API JSON request body. + type: number outputs: role_data: @@ -57,6 +116,7 @@ outputs: heat::rabbit_port: {get_param: RabbitClientPort} heat::debug: {get_param: Debug} heat::enable_proxy_headers_parsing: true + heat::rpc_response_timeout: 600 # We need this because the default heat policy.json no longer works on TripleO # https://git.openstack.org/cgit/openstack/heat/commit/?id=ac86702172ddf01f5bdc3f3cd99d2e32ad9b7024 heat::policy::policies: @@ -66,17 +126,27 @@ outputs: heat::rabbit_heartbeat_timeout_threshold: 60 heat::keystone::authtoken::project_name: 'service' heat::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri] } - heat::keystone::authtoken::auth_url: {get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix] } + heat::keystone::authtoken::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix] } heat::keystone::authtoken::password: {get_param: HeatPassword} heat::keystone::domain::domain_name: 'heat_stack' heat::keystone::domain::domain_admin: 'heat_stack_domain_admin' heat::keystone::domain::domain_admin_email: 'heat_stack_domain_admin@localhost' - heat::cron::purge_deleted::age: 30 - heat::cron::purge_deleted::age_type: 'days' - heat::cron::purge_deleted::maxdelay: 3600 - heat::cron::purge_deleted::destination: '/dev/null' heat::db::database_db_max_retries: -1 heat::db::database_max_retries: -1 + heat::yaql_memory_quota: 100000 + heat::yaql_limit_iterators: 1000 + heat::cron::purge_deleted::ensure: {get_param: HeatCronPurgeDeletedEnsure} + heat::cron::purge_deleted::minute: {get_param: HeatCronPurgeDeletedMinute} + heat::cron::purge_deleted::hour: {get_param: HeatCronPurgeDeletedHour} + heat::cron::purge_deleted::monthday: {get_param: HeatCronPurgeDeletedMonthday} + heat::cron::purge_deleted::month: {get_param: HeatCronPurgeDeletedMonth} + heat::cron::purge_deleted::weekday: {get_param: HeatCronPurgeDeletedWeekday} + heat::cron::purge_deleted::maxdelay: {get_param: HeatCronPurgeDeletedMaxDelay} + heat::cron::purge_deleted::user: {get_param: HeatCronPurgeDeletedUser} + heat::cron::purge_deleted::age: {get_param: HeatCronPurgeDeletedAge} + heat::cron::purge_deleted::age_type: {get_param: HeatCronPurgeDeletedAgeType} + heat::cron::purge_deleted::destination: {get_param: HeatCronPurgeDeletedDestination} + heat::max_json_body_size: {get_param: HeatMaxJsonBodySize} service_config_settings: keystone: tripleo::profile::base::keystone::heat_admin_domain: 'heat_stack' diff --git a/puppet/services/heat-engine.yaml b/puppet/services/heat-engine.yaml index 8faccd2b..a166f3a7 100644 --- a/puppet/services/heat-engine.yaml +++ b/puppet/services/heat-engine.yaml @@ -111,8 +111,7 @@ outputs: - '@' - {get_param: [EndpointMap, MysqlInternal, host]} - '/heat' - - '?bind_address=' - - "%{hiera('tripleo::profile::base::database::mysql::client_bind_address')}" + - '?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo' heat::keystone_ec2_uri: {get_param: [EndpointMap, KeystoneEC2, uri]} heat::keystone::domain::domain_password: {get_param: HeatStackDomainAdminPassword} heat::engine::auth_encryption_key: @@ -137,3 +136,17 @@ outputs: keystone: # This is needed because the keystone profile handles creating the domain tripleo::profile::base::keystone::heat_admin_password: {get_param: HeatStackDomainAdminPassword} + upgrade_tasks: + - name: Check if heat_engine is deployed + command: systemctl is-enabled openstack-heat-engine + tags: common + ignore_errors: True + register: heat_engine_enabled + - name: "PreUpgrade step0,validation: Check service openstack-heat-engine is running" + shell: /usr/bin/systemctl show 'openstack-heat-engine' --property ActiveState | grep '\bactive\b' + when: heat_engine_enabled.rc == 0 + tags: step0,validation + - name: Stop heat_engine service + tags: step1 + when: heat_engine_enabled.rc == 0 + service: name=openstack-heat-engine state=stopped diff --git a/puppet/services/horizon.yaml b/puppet/services/horizon.yaml index cf35d202..60b009a8 100644 --- a/puppet/services/horizon.yaml +++ b/puppet/services/horizon.yaml @@ -27,6 +27,14 @@ parameters: description: A list of IP/Hostname for the server Horizon is running on. Used for header checks. type: comma_delimited_list + HorizonPasswordValidator: + description: Regex for password validation + type: string + default: '' + HorizonPasswordValidatorHelp: + description: Help text for password validation + type: string + default: '' HorizonSecret: description: Secret key for Django type: string @@ -71,6 +79,8 @@ outputs: options: ['FollowSymLinks','MultiViews'] horizon::bind_address: {get_param: [ServiceNetMap, HorizonNetwork]} horizon::keystone_url: {get_param: [EndpointMap, KeystoneInternal, uri]} + horizon::password_validator: {get_param: [HorizonPasswordValidator]} + horizon::password_validator_help: {get_param: [HorizonPasswordValidatorHelp]} horizon::secret_key: yaql: expression: $.data.passwords.where($ != '').first() @@ -86,3 +96,20 @@ outputs: - horizon::django_debug: {get_param: Debug} step_config: | include ::tripleo::profile::base::horizon + # Ansible tasks to handle upgrade + upgrade_tasks: + - name: Check if httpd is deployed + command: systemctl is-enabled httpd + tags: common + ignore_errors: True + register: httpd_enabled + - name: "PreUpgrade step0,validation: Check if httpd is running" + shell: > + /usr/bin/systemctl show 'httpd' --property ActiveState | + grep '\bactive\b' + when: httpd_enabled.rc == 0 + tags: step0,validation + - name: Stop Horizon (under httpd) + tags: step1 + when: httpd_enabled.rc == 0 + service: name=httpd state=stopped diff --git a/puppet/services/ironic-api.yaml b/puppet/services/ironic-api.yaml index aebb37b2..7aab6f8d 100644 --- a/puppet/services/ironic-api.yaml +++ b/puppet/services/ironic-api.yaml @@ -25,6 +25,10 @@ parameters: MonitoringSubscriptionIronicApi: default: 'overcloud-ironic-api' type: string + KeystoneRegion: + type: string + default: 'regionOne' + description: Keystone region for endpoint resources: IronicBase: @@ -47,7 +51,7 @@ outputs: ironic::api::authtoken::project_name: 'service' ironic::api::authtoken::username: 'ironic' ironic::api::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri] } - ironic::api::authtoken::auth_url: {get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix]} + ironic::api::authtoken::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]} # NOTE: bind IP is found in Heat replacing the network name with the # local node IP for the given network; replacement examples # (eg. for internal_api): @@ -73,6 +77,7 @@ outputs: ironic::keystone::auth::auth_name: 'ironic' ironic::keystone::auth::password: {get_param: IronicPassword } ironic::keystone::auth::tenant: 'service' + ironic::keystone::auth::region: {get_param: KeystoneRegion} mysql: ironic::db::mysql::password: {get_param: IronicPassword} ironic::db::mysql::user: ironic @@ -81,3 +86,7 @@ outputs: ironic::db::mysql::allowed_hosts: - '%' - "%{hiera('mysql_bind_host')}" + upgrade_tasks: + - name: Stop ironic_api service + tags: step1 + service: name=openstack-ironic-api state=stopped diff --git a/puppet/services/ironic-base.yaml b/puppet/services/ironic-base.yaml index ad7ef6ea..d186b047 100644 --- a/puppet/services/ironic-base.yaml +++ b/puppet/services/ironic-base.yaml @@ -60,8 +60,7 @@ outputs: - '@' - {get_param: [EndpointMap, MysqlInternal, host]} - '/ironic' - - '?bind_address=' - - "%{hiera('tripleo::profile::base::database::mysql::client_bind_address')}" + - '?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo' ironic::debug: {get_param: Debug} ironic::rabbit_userid: {get_param: RabbitUserName} ironic::rabbit_password: {get_param: RabbitPassword} diff --git a/puppet/services/ironic-conductor.yaml b/puppet/services/ironic-conductor.yaml index 194afec7..f9547bef 100644 --- a/puppet/services/ironic-conductor.yaml +++ b/puppet/services/ironic-conductor.yaml @@ -24,6 +24,14 @@ parameters: "full" for full cleaning, "metadata" to clean only disk metadata (partition table). type: string + IronicCleaningNetwork: + default: 'provisioning' + description: Name or UUID of the *overcloud* network used for cleaning + bare metal nodes. The default value of "provisioning" can be + left during the initial deployment (when no networks are + created yet) and should be changed to an actual UUID in + a post-deployment stack update. + type: string IronicEnabledDrivers: default: ['pxe_ipmitool', 'pxe_drac', 'pxe_ilo'] description: Enabled Ironic drivers @@ -61,6 +69,7 @@ outputs: - ironic::api::neutron_url: {get_param: [EndpointMap, NeutronInternal, uri]} ironic::conductor::api_url: {get_param: [EndpointMap, IronicInternal, uri_no_suffix]} ironic::conductor::cleaning_disk_erase: {get_param: IronicCleaningDiskErase} + ironic::conductor::cleaning_network: {get_param: IronicCleaningNetwork} ironic::conductor::enabled_drivers: {get_param: IronicEnabledDrivers} # We need an endpoint containing a real IP, not a VIP here ironic_conductor_http_host: {get_param: [ServiceNetMap, IronicNetwork]} @@ -98,3 +107,7 @@ outputs: step_config: | include ::tripleo::profile::base::ironic::conductor + upgrade_tasks: + - name: Stop ironic_conductor service + tags: step1 + service: name=openstack-ironic-conductor state=stopped diff --git a/puppet/services/kernel.yaml b/puppet/services/kernel.yaml index 29157959..fec455d1 100644 --- a/puppet/services/kernel.yaml +++ b/puppet/services/kernel.yaml @@ -31,6 +31,7 @@ outputs: config_settings: kernel_modules: nf_conntrack: {} + ip_conntrack_proto_sctp: {} sysctl_settings: net.ipv4.tcp_keepalive_intvl: value: 1 diff --git a/puppet/services/keystone.yaml b/puppet/services/keystone.yaml index f69e20b4..f40c8d99 100644 --- a/puppet/services/keystone.yaml +++ b/puppet/services/keystone.yaml @@ -113,6 +113,51 @@ parameters: EnableInternalTLS: type: boolean default: false + KeystoneCronTokenFlushEnsure: + type: string + description: > + Cron to purge expired tokens - Ensure + default: 'present' + KeystoneCronTokenFlushMinute: + type: string + description: > + Cron to purge expired tokens - Minute + default: '1' + KeystoneCronTokenFlushHour: + type: string + description: > + Cron to purge expired tokens - Hour + default: '0' + KeystoneCronTokenFlushMonthday: + type: string + description: > + Cron to purge expired tokens - Month Day + default: '*' + KeystoneCronTokenFlushMonth: + type: string + description: > + Cron to purge expired tokens - Month + default: '*' + KeystoneCronTokenFlushWeekday: + type: string + description: > + Cron to purge expired tokens - Week Day + default: '*' + KeystoneCronTokenFlushMaxDelay: + type: string + description: > + Cron to purge expired tokens - Max Delay + default: '0' + KeystoneCronTokenFlushDestination: + type: string + description: > + Cron to purge expired tokens - Log destination + default: '/var/log/keystone/keystone-tokenflush.log' + KeystoneCronTokenFlushUser: + type: string + description: > + Cron to purge expired tokens - User + default: 'keystone' resources: @@ -148,8 +193,7 @@ outputs: - '@' - {get_param: [EndpointMap, MysqlInternal, host]} - '/keystone' - - '?bind_address=' - - "%{hiera('tripleo::profile::base::database::mysql::client_bind_address')}" + - '?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo' keystone::admin_token: {get_param: AdminToken} keystone::admin_password: {get_param: AdminPassword} keystone::roles::admin::password: {get_param: AdminPassword} @@ -182,6 +226,7 @@ outputs: keystone::endpoint::internal_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]} keystone::endpoint::admin_url: {get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix]} keystone::endpoint::region: {get_param: KeystoneRegion} + keystone::endpoint::version: '' keystone_enable_db_purge: {get_param: KeystoneEnableDBPurge} keystone::rabbit_heartbeat_timeout_threshold: 60 keystone::cron::token_flush::maxdelay: 3600 @@ -239,6 +284,16 @@ outputs: # NOTE: this applies to all 2 bind IP settings below... keystone::wsgi::apache::bind_host: {get_param: [ServiceNetMap, KeystonePublicApiNetwork]} keystone::wsgi::apache::admin_bind_host: {get_param: [ServiceNetMap, KeystoneAdminApiNetwork]} + keystone::cron::token_flush::ensure: {get_param: KeystoneCronTokenFlushEnsure} + keystone::cron::token_flush::minute: {get_param: KeystoneCronTokenFlushMinute} + keystone::cron::token_flush::hour: {get_param: KeystoneCronTokenFlushHour} + keystone::cron::token_flush::monthday: {get_param: KeystoneCronTokenFlushMonthday} + keystone::cron::token_flush::month: {get_param: KeystoneCronTokenFlushMonth} + keystone::cron::token_flush::weekday: {get_param: KeystoneCronTokenFlushWeekday} + keystone::cron::token_flush::maxdelay: {get_param: KeystoneCronTokenFlushMaxDelay} + keystone::cron::token_flush::destination: {get_param: KeystoneCronTokenFlushDestination} + keystone::cron::token_flush::user: {get_param: KeystoneCronTokenFlushUser} + step_config: | include ::tripleo::profile::base::keystone service_config_settings: @@ -253,11 +308,7 @@ outputs: # Ansible tasks to handle upgrade upgrade_tasks: - name: Stop keystone service (running under httpd) - tags: step2 + tags: step1 service: name=httpd state=stopped - - name: Sync keystone DB - tags: step5 - command: keystone-manage db_sync - - name: Start keystone service (running under httpd) - tags: step6 - service: name=httpd state=started + metadata_settings: + get_attr: [ApacheServiceBase, role_data, metadata_settings] diff --git a/puppet/services/logging/fluentd-client.yaml b/puppet/services/logging/fluentd-client.yaml index 769ab68f..57595b82 100644 --- a/puppet/services/logging/fluentd-client.yaml +++ b/puppet/services/logging/fluentd-client.yaml @@ -62,3 +62,23 @@ outputs: get_attr: [LoggingConfiguration, LoggingSharedKey] step_config: | include ::tripleo::profile::base::logging::fluentd + upgrade_tasks: + - name: Check if fluentd_client is deployed + command: systemctl is-enabled fluentd + tags: common + ignore_errors: True + register: fluentd_client_enabled + - name: Check status of fluentd service + shell: > + /usr/bin/systemctl show fluentd --property ActiveState | + grep '\bactive\b' + when: fluentd_client_enabled.rc == 0 + tags: step0,validation + - name: Stop fluentd service + tags: step1 + when: fluentd_client_enabled.rc == 0 + service: name=fluentd state=stopped + - name: Install fluentd package if it was disabled + tags: step3 + yum: name=fluentd state=latest + when: fluentd_client_enabled.rc != 0 diff --git a/puppet/services/manila-api.yaml b/puppet/services/manila-api.yaml index b7c64823..7b78c82e 100644 --- a/puppet/services/manila-api.yaml +++ b/puppet/services/manila-api.yaml @@ -49,7 +49,7 @@ outputs: - get_attr: [ManilaBase, role_data, config_settings] - manila::keystone::authtoken::password: {get_param: ManilaPassword} manila::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri]} - manila::keystone::authtoken::auth_url: { get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix] } + manila::keystone::authtoken::auth_url: { get_param: [EndpointMap, KeystoneInternal, uri_no_suffix] } manila::keystone::authtoken::project_name: 'service' tripleo.manila_api.firewall_rules: '150 manila': @@ -64,6 +64,7 @@ outputs: # internal_api_subnet - > IP/CIDR manila::api::bind_host: {get_param: [ServiceNetMap, ManilaApiNetwork]} manila::api::enable_proxy_headers_parsing: true + manila::api::default_share_type: 'default' step_config: | include ::tripleo::profile::base::manila::api service_config_settings: diff --git a/puppet/services/manila-backend-cephfs.yaml b/puppet/services/manila-backend-cephfs.yaml index 91369a99..36ef1ea9 100644 --- a/puppet/services/manila-backend-cephfs.yaml +++ b/puppet/services/manila-backend-cephfs.yaml @@ -40,6 +40,20 @@ parameters: ManilaCephFSNativeCephFSEnableSnapshots: type: boolean default: true + ManilaCephFSDataPoolName: + default: manila_data + type: string + ManilaCephFSMetadataPoolName: + default: manila_metadata + type: string + # (jprovazn) default value is set to assure this templates works with an + # external ceph too (user/key is created only when ceph is deployed by + # TripleO) + CephManilaClientKey: + default: '' + description: The Ceph client key. Can be created with ceph-authtool --gen-print-key. + type: string + hidden: true outputs: role_data: @@ -54,4 +68,8 @@ outputs: manila::backend::cephfsnative::cephfs_auth_id: {get_param: ManilaCephFSNativeCephFSAuthId} manila::backend::cephfsnative::cephfs_cluster_name: {get_param: ManilaCephFSNativeCephFSClusterName} manila::backend::cephfsnative::cephfs_enable_snapshots: {get_param: ManilaCephFSNativeCephFSEnableSnapshots} + manila::backend::cephfsnative::ceph_client_key: {get_param: CephManilaClientKey} + ceph::profile::params::fs_data_pool: {get_param: ManilaCephFSDataPoolName} + ceph::profile::params::fs_metadata_pool: {get_param: ManilaCephFSMetadataPoolName} + ceph::profile::params::fs_name: {get_param: ManilaCephFSNativeShareBackendName} step_config: diff --git a/puppet/services/manila-base.yaml b/puppet/services/manila-base.yaml index 2a9745a2..c183bc08 100644 --- a/puppet/services/manila-base.yaml +++ b/puppet/services/manila-base.yaml @@ -67,8 +67,7 @@ outputs: - '@' - {get_param: [EndpointMap, MysqlInternal, host]} - '/manila' - - '?bind_address=' - - "%{hiera('tripleo::profile::base::database::mysql::client_bind_address')}" + - '?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo' service_config_settings: mysql: manila::db::mysql::password: {get_param: ManilaPassword} diff --git a/puppet/services/manila-share.yaml b/puppet/services/manila-share.yaml index e38fe675..6ac0d2cf 100644 --- a/puppet/services/manila-share.yaml +++ b/puppet/services/manila-share.yaml @@ -46,7 +46,7 @@ outputs: - manila::volume::cinder::cinder_admin_tenant_name: 'service' manila::keystone::authtoken::password: {get_param: ManilaPassword} manila::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri]} - manila::keystone::authtoken::auth_url: { get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix] } + manila::keystone::authtoken::auth_url: { get_param: [EndpointMap, KeystoneInternal, uri_no_suffix] } manila::keystone::authtoken::project_name: 'service' service_config_settings: get_attr: [ManilaBase, role_data, service_config_settings] diff --git a/puppet/services/memcached.yaml b/puppet/services/memcached.yaml index ffa969e0..146cc306 100644 --- a/puppet/services/memcached.yaml +++ b/puppet/services/memcached.yaml @@ -18,6 +18,12 @@ parameters: description: Mapping of service endpoint -> protocol. Typically set via parameter_defaults in the resource registry. type: json + MemcachedMaxMemory: + default: '50%' + description: The maximum amount of memory for memcached to be configured + to use when installed. This can be either a percentage ('50%') + or a fixed value ('2048'). + type: string MonitoringSubscriptionMemcached: default: 'overcloud-memcached' type: string @@ -35,8 +41,17 @@ outputs: # internal_api_uri -> [IP] # internal_api_subnet - > IP/CIDR memcached::listen_ip: {get_param: [ServiceNetMap, MemcachedNetwork]} + memcached::max_memory: {get_param: MemcachedMaxMemory} tripleo.memcached.firewall_rules: '121 memcached': dport: 11211 step_config: | include ::tripleo::profile::base::memcached + service_config_settings: + collectd: + tripleo.collectd.plugins.memcached: + - memcached + collectd::plugin::memcached::instances: + local: + host: "%{hiera('memcached::listen_ip')}" + port: 11211 diff --git a/puppet/services/metrics/collectd.yaml b/puppet/services/metrics/collectd.yaml new file mode 100644 index 00000000..49b2d4c2 --- /dev/null +++ b/puppet/services/metrics/collectd.yaml @@ -0,0 +1,131 @@ +heat_template_version: ocata + +description: Collectd client service + +parameters: + ServiceNetMap: + default: {} + description: Mapping of service_name -> network name. Typically set + via parameter_defaults in the resource registry. This + mapping overrides those in ServiceNetMapDefaults. + type: json + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + DefaultPasswords: + default: {} + type: json + CollectdDefaultPlugins: + default: + - disk + - interface + - load + - memory + - processes + - tcpconns + type: comma_delimited_list + description: > + List of collectd plugins to activate on all overcloud hosts. See + the documentation for the puppet-collectd module for a list plugins + supported by the module (https://github.com/voxpupuli/puppet-collectd). + Set this key to override the default list of plugins. Use + CollectdExtraPlugins if you want to load additional plugins without + overriding the defaults. + CollectdExtraPlugins: + default: [] + type: comma_delimited_list + description: > + List of collectd plugins to activate on all overcloud hosts. See + the documentation for the puppet-collectd module for a list plugins + supported by the module (https://github.com/voxpupuli/puppet-collectd). + Set this key to load plugins in addition to those in + CollectdDefaultPlugins. + CollectdServer: + type: string + description: > + Address of remote collectd server to which we will send + metrics. + default: '' + CollectdServerPort: + type: number + default: 25826 + description: > + Port on remote collectd server to which we will send + metrics. + CollectdUsername: + type: string + description: > + Username for authenticating to the remote collectd server. The default + is to not configure any authentication. + default: '' + CollectdPassword: + type: string + hidden: true + description: > + Password for authenticating to the remote collectd server. The + default is to not configure any authentication. + default: '' + CollectdSecurityLevel: + type: string + description: > + Security level setting for remote collectd connection. + default: 'None' + constraints: + - allowed_values: + - None + - Sign + - Encrypt + +outputs: + role_data: + description: Role data for the Collectd client role. + value: + service_name: collectd + config_settings: + collectd::manage_repo: false + collectd::purge: true + collectd::recurse: true + collectd::purge_config: true + collectd::minimum_version: "5.7" + tripleo::profile::base::metrics::collectd::collectd_server: + get_param: CollectdServer + tripleo::profile::base::metrics::collectd::collectd_port: + get_param: CollectdServerPort + tripleo::profile::base::metrics::collectd::collectd_username: + get_param: CollectdUsername + tripleo::profile::base::metrics::collectd::collectd_password: + get_param: CollectdPassword + tripleo::profile::base::metrics::collectd::collectd_securitylevel: + get_param: CollectdSecurityLevel + tripleo.collectd.plugins.collectd: + yaql: + data: + default_plugins: {get_param: CollectdDefaultPlugins} + extra_plugins: {get_param: CollectdExtraPlugins} + expression: > + ($.data.default_plugins + $.data.extra_plugins) + .flatten().distinct() + step_config: | + include ::tripleo::profile::base::metrics::collectd + upgrade_tasks: + - name: Check if collectd is deployed + command: systemctl is-enabled collectd + tags: common + ignore_errors: True + register: collectd_enabled + - name: Check status of collectd service + shell: > + /usr/bin/systemctl show collectd --property ActiveState | + grep '\bactive\b' + when: collectd_enabled.rc == 0 + tags: step0,validation + - name: Stop collectd service + tags: step1 + when: collectd_enabled.rc == 0 + service: name=collectd state=stopped + - name: Install collectd package if it was disabled + tags: step3 + yum: name=collectd state=latest + when: collectd_enabled.rc != 0 diff --git a/puppet/services/mistral-api.yaml b/puppet/services/mistral-api.yaml index daa1dc7c..1c7d6bd3 100644 --- a/puppet/services/mistral-api.yaml +++ b/puppet/services/mistral-api.yaml @@ -50,3 +50,22 @@ outputs: get_attr: [MistralBase, role_data, service_config_settings] step_config: | include ::tripleo::profile::base::mistral::api + upgrade_tasks: + - name: Check if mistral api is deployed + command: systemctl is-enabled openstack-mistral-api + tags: common + ignore_errors: True + register: mistral_api_enabled + - name: "PreUpgrade step0,validation: Check if openstack-mistral-api is running" + shell: > + /usr/bin/systemctl show 'openstack-mistral-api' --property ActiveState | + grep '\bactive\b' + when: mistral_api_enabled.rc == 0 + tags: step0,validation + - name: Stop mistral_api service + tags: step1 + service: name=openstack-mistral-api state=stopped + - name: Install openstack-mistral-api package if it was disabled + tags: step3 + yum: name=openstack-mistral-api state=latest + when: mistral_api_enabled.rc != 0 diff --git a/puppet/services/mistral-base.yaml b/puppet/services/mistral-base.yaml index e678b14f..e1030346 100644 --- a/puppet/services/mistral-base.yaml +++ b/puppet/services/mistral-base.yaml @@ -65,8 +65,7 @@ outputs: - '@' - {get_param: [EndpointMap, MysqlInternal, host]} - '/mistral' - - '?bind_address=' - - "%{hiera('tripleo::profile::base::database::mysql::client_bind_address')}" + - '?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo' mistral::rabbit_userid: {get_param: RabbitUserName} mistral::rabbit_password: {get_param: RabbitPassword} mistral::rabbit_use_ssl: {get_param: RabbitClientUseSSL} @@ -76,7 +75,7 @@ outputs: mistral::keystone_tenant: 'service' mistral::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri]} mistral::keystone_ec2_uri: {get_param: [EndpointMap, KeystoneEC2, uri]} - mistral::identity_uri: {get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix]} + mistral::identity_uri: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]} service_config_settings: keystone: mistral::keystone::auth::tenant: 'service' diff --git a/puppet/services/mistral-engine.yaml b/puppet/services/mistral-engine.yaml index 4a92b863..03a2a55c 100644 --- a/puppet/services/mistral-engine.yaml +++ b/puppet/services/mistral-engine.yaml @@ -36,3 +36,22 @@ outputs: get_attr: [MistralBase, role_data, config_settings] step_config: | include ::tripleo::profile::base::mistral::engine + upgrade_tasks: + - name: Check if mistral engine is deployed + command: systemctl is-enabled openstack-mistral-engine + tags: common + ignore_errors: True + register: mistral_engine_enabled + - name: "PreUpgrade step0,validation: Check if openstack-mistral-engine is running" + shell: > + /usr/bin/systemctl show 'openstack-mistral-engine' --property ActiveState | + grep '\bactive\b' + when: mistral_engine_enabled.rc == 0 + tags: step0,validation + - name: Stop mistral_engine service + tags: step1 + service: name=openstack-mistral-engine state=stopped + - name: Install openstack-mistral-engine package if it was disabled + tags: step3 + yum: name=openstack-mistral-engine state=latest + when: mistral_engine_enabled.rc != 0 diff --git a/puppet/services/mistral-executor.yaml b/puppet/services/mistral-executor.yaml index 6e273b92..0f6adb07 100644 --- a/puppet/services/mistral-executor.yaml +++ b/puppet/services/mistral-executor.yaml @@ -36,3 +36,22 @@ outputs: get_attr: [MistralBase, role_data, config_settings] step_config: | include ::tripleo::profile::base::mistral::executor + upgrade_tasks: + - name: Check if mistral executor is deployed + command: systemctl is-enabled openstack-mistral-executor + tags: common + ignore_errors: True + register: mistral_executor_enabled + - name: "PreUpgrade step0,validation: Check if openstack-mistral-executor is running" + shell: > + /usr/bin/systemctl show 'openstack-mistral-executor' --property ActiveState | + grep '\bactive\b' + when: mistral_executor_enabled.rc == 0 + tags: step0,validation + - name: Stop mistral_executor service + tags: step1 + service: name=openstack-mistral-executor state=stopped + - name: Install openstack-mistral-executor package if it was disabled + tags: step3 + yum: name=openstack-mistral-executor state=latest + when: mistral_executor_enabled.rc != 0 diff --git a/puppet/services/monitoring/sensu-client.yaml b/puppet/services/monitoring/sensu-client.yaml index 76ba59c1..aba2b1ed 100644 --- a/puppet/services/monitoring/sensu-client.yaml +++ b/puppet/services/monitoring/sensu-client.yaml @@ -62,3 +62,23 @@ outputs: region: {get_param: KeystoneRegion} step_config: | include ::tripleo::profile::base::monitoring::sensu + upgrade_tasks: + - name: Check if sensu_client is deployed + command: systemctl is-enabled sensu-client + tags: common + ignore_errors: True + register: sensu_client_enabled + - name: Check status of sensu-client service + shell: > + /usr/bin/systemctl show sensu-client --property ActiveState | + grep '\bactive\b' + when: sensu_client_enabled.rc == 0 + tags: step0,validation + - name: Stop sensu-client service + tags: step1 + when: sensu_client_enabled.rc == 0 + service: name=sensu-client state=stopped + - name: Install sensu package if it was disabled + tags: step3 + yum: name=sensu state=latest + when: sensu_client.rc != 0 diff --git a/puppet/services/pacemaker/database/mongodb.yaml b/puppet/services/network/contrail-analytics-database.yaml index fb29f4f4..67341ed3 100644 --- a/puppet/services/pacemaker/database/mongodb.yaml +++ b/puppet/services/network/contrail-analytics-database.yaml @@ -1,10 +1,12 @@ heat_template_version: ocata description: > - MongoDb service deployment using puppet + Contrail Analytics Database service deployment using puppet, this YAML file + creates the interface between the HOT template + and the puppet manifest that actually installs + and configures Contrail Analytics Database. parameters: - #Parameters not used EndpointMap ServiceNetMap: default: {} description: Mapping of service_name -> network name. Typically set @@ -21,8 +23,8 @@ parameters: type: json resources: - MongoDbBase: - type: ../../database/mongodb.yaml + ContrailBase: + type: ./contrail-base.yaml properties: ServiceNetMap: {get_param: ServiceNetMap} DefaultPasswords: {get_param: DefaultPasswords} @@ -30,13 +32,12 @@ resources: outputs: role_data: - description: Service mongodb using composable services. + description: Role Contrail Analytics Database using composable services. value: - service_name: mongodb + service_name: contrail_analytics_database config_settings: map_merge: - - get_attr: [MongoDbBase, role_data, config_settings] - - tripleo::profile::pacemaker::database::mongodb::mongodb_replset: {get_attr: [MongoDbBase, aux_parameters, rplset_name]} - mongodb::server::service_manage: False + - get_attr: [ContrailBase, role_data, config_settings] + - contrail::analytics::database::host_ip: {get_param: [ServiceNetMap, ContrailAnalyticsDatabaseNetwork]} step_config: | - include ::tripleo::profile::pacemaker::database::mongodb + include ::tripleo::network::contrail::analyticsdatabase diff --git a/puppet/services/network/contrail-analytics.yaml b/puppet/services/network/contrail-analytics.yaml index ad14d315..e3e0ec4b 100644 --- a/puppet/services/network/contrail-analytics.yaml +++ b/puppet/services/network/contrail-analytics.yaml @@ -21,44 +21,6 @@ parameters: description: Mapping of service endpoint -> protocol. Typically set via parameter_defaults in the resource registry. type: json - ContrailAnalyticsHostIP: - description: host IP address of Analytics - type: string - ContrailAnalyticsRedisServerIp: - description: Redis server ip address - type: string - ContrailAnalyticsCollectorServerHttpPort: - description: Collector http port - type: number - default: 8089 - ContrailAnalyticsCollectorSandeshPort: - description: Collector sandesh port - type: number - default: 8086 - ContrailAnalyticsHttpServerPort: - description: Analytics http port - type: number - default: 8090 - ContrailAnalyticsListenAddress: - default: '0.0.0.0' - description: IP address Config API is listening on - type: string - ContrailAnalyticsListenPort: - default: 8082 - description: Port Config API is listening on - type: number - ContrailAnalyticsRedisServerPort: - description: Redis server port - type: number - default: 6379 - ContrailAnalyticsRestApiIp: - description: IP address Analytics rest interface listens on - type: string - default: '0.0.0.0' - ContrailAnalyticsRestApiPort: - description: Analytics rest port - type: number - default: 8081 resources: ContrailBase: @@ -76,15 +38,14 @@ outputs: config_settings: map_merge: - get_attr: [ContrailBase, role_data, config_settings] - - contrail::analytics::collector_http_server_port: {get_param: ContrailAnalyticsCollectorServerHttpPort} - contrail::analytics::collector_sandesh_port: {get_param: ContrailAnalyticsCollectorSandeshPort} - contrail::analytics::host_ip: {get_param: ContrailAnalyticsHostIP} - contrail::analytics::http_server_port: {get_param: ContrailAnalyticsHttpServerPort} - contrail::analytics::listen_ip_address: {get_param: ContrailAnalyticsListenAddress} - contrail::analytics::listen_port: {get_param: ContrailAnalyticsListenPort} - contrail::analytics::redis_server: {get_param: ContrailAnalyticsRedisServerIp} - contrail::analytics::redis_server_port: {get_param: ContrailAnalyticsRedisServerPort} - contrail::analytics::rest_api_ip: {get_param: ContrailAnalyticsRestApiIp} - contrail::analytics::rest_api_port: {get_param: ContrailAnalyticsRestApiPort} + - contrail::analytics::collector_http_server_port: {get_param: [EndpointMap, ContrailAnalyticsCollectorHttpInternal, port]} + contrail::analytics::collector_sandesh_port: {get_param: [EndpointMap, ContrailAnalyticsCollectorSandeshInternal, port]} + contrail::analytics::host_ip: {get_param: [ServiceNetMap, ContrailAnalyticsNetwork]} + contrail::analytics::http_server_port: {get_param: [EndpointMap, ContrailAnalyticsHttpInternal, port]} + contrail::analytics::listen_ip_address: {get_param: [ServiceNetMap, ContrailAnalyticsNetwork]} + contrail::analytics::redis_server: '127.0.0.1' + contrail::analytics::redis_server_port: {get_param: [EndpointMap, ContrailAnalyticsRedisInternal, port]} + contrail::analytics::rest_api_ip: {get_param: [ServiceNetMap, ContrailAnalyticsNetwork]} + contrail::analytics::rest_api_port: {get_param: [EndpointMap, ContrailAnalyticsApiInternal, port]} step_config: | include ::tripleo::network::contrail::analytics diff --git a/puppet/services/network/contrail-base.yaml b/puppet/services/network/contrail-base.yaml index b49b2add..bc56a3ca 100644 --- a/puppet/services/network/contrail-base.yaml +++ b/puppet/services/network/contrail-base.yaml @@ -18,47 +18,42 @@ parameters: description: Mapping of service endpoint -> protocol. Typically set via parameter_defaults in the resource registry. type: json + ContrailAAAMode: + description: AAAmode can be no-auth, cloud-admin or rbac + type: string + default: 'rbac' + ContrailAAAModeAnalytics: + description: AAAmode for analytics can be no-auth, cloud-admin or rbac + type: string + default: 'no-auth' AdminPassword: description: Keystone admin user password type: string + hidden: true AdminTenantName: description: Keystone admin tenant name type: string + default: 'admin' AdminToken: description: Keystone admin token type: string + hidden: true AdminUser: description: Keystone admin user name type: string - AuthHost: - description: Keystone host IP address - type: string - AuthPort: - default: 35357 - description: Keystone port + default: 'admin' + AuthPortSSL: + default: 13357 + description: Keystone SSL port + type: number + AuthPortSSLPublic: + default: 13000 + description: Keystone Public SSL port type: number - AuthProtocol: - default: 'http' - description: Keystone authentication protocol - type: string - ContrailDiscoveryServerIp: - description: Discovery server ip address - type: string - ContrailKafkaBrokerList: - description: List of kafka servers - type: comma_delimited_list ContrailAuth: default: 'keystone' description: Keystone authentication method type: string - ContrailCassandraServerList: - default: [] - description: List of cassandra servers - type: comma_delimited_list - ContrailDiscoveryServerPort: - description: Discovery server port - type: number - default: 5998 ContrailInsecure: default: false description: Keystone insecure mode @@ -67,14 +62,18 @@ parameters: default: '127.0.0.1:12111' description: Memcached server type: string - ContrailMultiTenancy: - default: true - description: Turn on/off multi-tenancy - type: boolean - ContrailZkServerIp: - default: [] - description: List of zookeeper servers - type: comma_delimited_list + RabbitPassword: + description: The password for RabbitMQ + type: string + hidden: true + RabbitUserName: + default: guest + description: The username for RabbitMQ + type: string + RabbitClientPort: + default: 5672 + description: Set rabbit subscriber port, change this if using SSL + type: number outputs: role_data: @@ -82,19 +81,23 @@ outputs: value: service_name: contrail_base config_settings: + contrail::aaa_mode: {get_param: ContrailAAAMode} + contrail::analytics_aaa_mode: {get_param: ContrailAAAModeAnalytics} contrail::admin_password: {get_param: AdminPassword} contrail::admin_tenant_name: {get_param: AdminTenantName} contrail::admin_token: {get_param: AdminToken} contrail::admin_user: {get_param: AdminUser} - contrail::auth_host: {get_param: [EndpointMap, KeystoneInternal, host] } - contrail::auth_port: {get_param: [EndpointMap, KeystoneInternal, port] } - contrail::auth_protocol: {get_param: [EndpointMap, KeystoneInternal, protocol] } - contrail::disc_server_ip: {get_param: ContrailDiscoveryServerIp} - contrail::kafka_broker_list: {get_param: ContrailKafkaBrokerList} contrail::auth: {get_param: ContrailAuth} - contrail::cassandra_server_list: {get_param: ContrailCassandraServerList} - contrail::disc_server_port: {get_param: ContrailDiscoveryServerPort} + contrail::auth_host: {get_param: [EndpointMap, KeystonePublic, host] } + contrail::auth_port: {get_param: [EndpointMap, KeystoneAdmin, port] } + contrail::auth_port_ssl: {get_param: AuthPortSSL } + contrail::auth_port_public: {get_param: [EndpointMap, KeystonePublic, port] } + contrail::auth_port_ssl_public: {get_param: AuthPortSSLPublic } + contrail::auth_protocol: {get_param: [EndpointMap, KeystoneInternal, protocol] } + contrail::api_port: {get_param: [EndpointMap, ContrailConfigInternal, port] } + contrail::disc_server_port: {get_param: [EndpointMap, ContrailDiscoveryInternal, port] } contrail::insecure: {get_param: ContrailInsecure} contrail::memcached_server: {get_param: ContrailMemcachedServer} - contrail::multi_tenancy: {get_param: ContrailMultiTenancy} - contrail::zk_server_ip: {get_param: ContrailZkServerIp} + contrail::rabbit_password: {get_param: RabbitPassword} + contrail::rabbit_user: {get_param: RabbitUserName} + contrail::rabbit_port: {get_param: RabbitClientPort} diff --git a/puppet/services/network/contrail-config.yaml b/puppet/services/network/contrail-config.yaml index 03774480..185b6094 100644 --- a/puppet/services/network/contrail-config.yaml +++ b/puppet/services/network/contrail-config.yaml @@ -21,29 +21,14 @@ parameters: description: Mapping of service endpoint -> protocol. Typically set via parameter_defaults in the resource registry. type: json - ContrailConfigIfmapServerIp: - description: Ifmap server ip address - type: string ContrailConfigIfmapUserName: description: Ifmap user name type: string + default: 'api-server' ContrailConfigIfmapUserPassword: description: Ifmap user password type: string - ContrailConfigRabbitServerIp: - description: RabbitMq server ip address - type: string - ContrailConfigRedisServerIp: - description: Redis server ip address - type: string - ContrailConfigListenAddress: - default: '0.0.0.0' - description: IP address Config API is listening on - type: string - ContrailConfigListenPort: - default: 8082 - description: Port Config API is listening on - type: number + default: 'api-server' resources: ContrailBase: @@ -62,11 +47,10 @@ outputs: map_merge: - get_attr: [ContrailBase, role_data, config_settings] - contrail::config::ifmap_password: {get_param: ContrailConfigIfmapUserPassword} - contrail::config::ifmap_server_ip: {get_param: ContrailConfigIfmapServerIp} contrail::config::ifmap_username: {get_param: ContrailConfigIfmapUserName} - contrail::config::listen_ip_address: {get_param: ContrailConfigListenAddress} - contrail::config::listen_port: {get_param: ContrailConfigListenPort} - contrail::config::rabbit_server: {get_param: ContrailConfigRabbitServerIp} - contrail::config::redis_server: {get_param: ContrailConfigRedisServerIp} + contrail::config::listen_ip_address: {get_param: [ServiceNetMap, ContrailConfigNetwork]} + contrail::config::listen_port: {get_param: [EndpointMap, ContrailConfigInternal, port] } + contrail::config::redis_server: '127.0.0.1' + contrail::config::host_ip: {get_param: [ServiceNetMap, ContrailConfigNetwork] } step_config: | include ::tripleo::network::contrail::config diff --git a/puppet/services/network/contrail-control.yaml b/puppet/services/network/contrail-control.yaml index 7c28d283..0964989b 100644 --- a/puppet/services/network/contrail-control.yaml +++ b/puppet/services/network/contrail-control.yaml @@ -21,15 +21,14 @@ parameters: description: Mapping of service endpoint -> protocol. Typically set via parameter_defaults in the resource registry. type: json - ContrailControlHostIP: - description: host IP address of Analytics - type: string - ContrailControlIfmapUserName: - description: Ifmap user name - type: string - ContrailControlIfmapUserPassword: - description: Ifmap user password + ContrailControlASN: + description: Autonomous System Number + type: number + default: 64512 + ContrailControlRNDCSecret: + description: sda1/256 hmac key, e.g. echo -n "values" | openssl dgst -sha256 -hmac key -binary | base64 type: string + hidden: true resources: ContrailBase: @@ -47,8 +46,8 @@ outputs: config_settings: map_merge: - get_attr: [ContrailBase, role_data, config_settings] - - contrail::control::host_ip: {get_param: ContrailControlHostIP} - contrail::control::ifmap_username: {get_param: ContrailControlIfmapUserName} - contrail::control::ifmap_password: {get_param: ContrailControlIfmapUserPassword} + - contrail::control::asn: {get_param: ContrailControlASN } + contrail::control::host_ip: {get_param: [ServiceNetMap, ContrailControlNetwork]} + contrail::control::rndc_secret: {get_param: ContrailControlRNDCSecret} step_config: | include ::tripleo::network::contrail::control diff --git a/puppet/services/network/contrail-database.yaml b/puppet/services/network/contrail-database.yaml index c56b90a2..b47c2c36 100644 --- a/puppet/services/network/contrail-database.yaml +++ b/puppet/services/network/contrail-database.yaml @@ -21,13 +21,6 @@ parameters: description: Mapping of service endpoint -> protocol. Typically set via parameter_defaults in the resource registry. type: json - ContrailDatabaseHostIP: - description: host IP address of Database node - type: string - ContrailDatabaseMinDisk: - description: Minimum disk size for database - type: number - default: 64 resources: ContrailBase: @@ -45,7 +38,6 @@ outputs: config_settings: map_merge: - get_attr: [ContrailBase, role_data, config_settings] - - contrail::database::host_ip: {get_param: ContrailDatabaseHostIP} - contrail::database::minimum_diskGB: {get_param: ContrailDatabaseMinDisk} + - contrail::database::host_ip: {get_param: [ServiceNetMap, ContrailDatabaseNetwork]} step_config: | - include ::tripleo::profile::contrail::database + include ::tripleo::network::contrail::database diff --git a/puppet/services/pacemaker/neutron-plugin-nuage.yaml b/puppet/services/network/contrail-heat.yaml index 0302b1c5..4dfc6579 100644 --- a/puppet/services/pacemaker/neutron-plugin-nuage.yaml +++ b/puppet/services/network/contrail-heat.yaml @@ -1,7 +1,8 @@ heat_template_version: ocata description: > - OpenStack Neutron Nuage Plugin with Pacemaker configured with Puppet + Contrail Heat plugin adds Contrail specific heat resources enabling heat + to orchestrate Contrail parameters: ServiceNetMap: @@ -20,9 +21,8 @@ parameters: type: json resources: - - NeutronPluginNuageBase: - type: ../neutron-plugin-nuage.yaml + ContrailBase: + type: ./contrail-base.yaml properties: ServiceNetMap: {get_param: ServiceNetMap} DefaultPasswords: {get_param: DefaultPasswords} @@ -30,11 +30,11 @@ resources: outputs: role_data: - description: Role data for the Neutron Nuage plugin. + description: Contrail Heat plugin value: - service_name: neutron_plugin_nuage + service_name: contrail_heat config_settings: map_merge: - - get_attr: [NeutronPluginNuageBase, role_data, config_settings] + - get_attr: [ContrailBase, role_data, config_settings] step_config: | - include ::tripleo::profile::pacemaker::neutron::plugins::nuage + include ::tripleo::network::contrail::heat diff --git a/puppet/services/pacemaker/memcached.yaml b/puppet/services/network/contrail-neutron-plugin.yaml index 7e0007ce..2f2ceb37 100644 --- a/puppet/services/pacemaker/memcached.yaml +++ b/puppet/services/network/contrail-neutron-plugin.yaml @@ -1,7 +1,7 @@ heat_template_version: ocata description: > - Mecached service with Pacemaker configured with Puppet + OpenStack Neutron Opencontrail plugin parameters: ServiceNetMap: @@ -18,11 +18,14 @@ parameters: description: Mapping of service endpoint -> protocol. Typically set via parameter_defaults in the resource registry. type: json + ContrailExtensions: + description: List of OpenContrail extensions to be enabled + type: comma_delimited_list + default: '' resources: - - MemcachedServiceBase: - type: ../memcached.yaml + ContrailBase: + type: ./contrail-base.yaml properties: ServiceNetMap: {get_param: ServiceNetMap} DefaultPasswords: {get_param: DefaultPasswords} @@ -30,13 +33,13 @@ resources: outputs: role_data: - description: Role data for the Memcached pacemaker role. + description: Role data for the Neutron Opencontrail plugin value: - service_name: memcached - monitoring_subscription: {get_attr: [MemcachedServiceBase, role_data, monitoring_subscription]} + service_name: contrail_neutron_plugin config_settings: map_merge: - - get_attr: [MemcachedServiceBase, role_data, config_settings] - - memcached::service_manage: false + - get_attr: [ContrailBase, role_data, config_settings] + - neutron::api_extensions_path: /usr/lib/python2.7/site-packages/neutron_plugin_contrail/extensions + contrail::vrouter::contrail_extensions: {get_param: ContrailExtensions} step_config: | - include ::tripleo::profile::pacemaker::memcached + include tripleo::network::contrail::neutron_plugin diff --git a/puppet/services/pacemaker/neutron-plugin-plumgrid.yaml b/puppet/services/network/contrail-provision.yaml index bc0ecac0..765be9a9 100644 --- a/puppet/services/pacemaker/neutron-plugin-plumgrid.yaml +++ b/puppet/services/network/contrail-provision.yaml @@ -1,7 +1,7 @@ heat_template_version: ocata description: > - OpenStack Neutron PLUMgrid Plugin with Pacemaker configured with Puppet + Provision Contrail services after deployment parameters: ServiceNetMap: @@ -20,9 +20,8 @@ parameters: type: json resources: - - NeutronPluginPlumgridBase: - type: ../neutron-plugin-ml2.yaml + ContrailBase: + type: ./contrail-base.yaml properties: ServiceNetMap: {get_param: ServiceNetMap} DefaultPasswords: {get_param: DefaultPasswords} @@ -30,11 +29,11 @@ resources: outputs: role_data: - description: Role data for the Neutron PLUMgrid plugin. + description: Contrail provisioning role value: - service_name: neutron_plugin_plumgrid + service_name: contrail_provision config_settings: map_merge: - - get_attr: [NeutronPluginPlumgridBase, role_data, config_settings] + - get_attr: [ContrailBase, role_data, config_settings] step_config: | - include ::tripleo::profile::pacemaker::neutron::plugins::plumgrid + include ::tripleo::network::contrail::provision diff --git a/puppet/services/network/contrail-tsn.yaml b/puppet/services/network/contrail-tsn.yaml new file mode 100644 index 00000000..88adc4a5 --- /dev/null +++ b/puppet/services/network/contrail-tsn.yaml @@ -0,0 +1,64 @@ +heat_template_version: ocata + +description: > + Contrail TSN Service + +parameters: + ServiceNetMap: + default: {} + description: Mapping of service_name -> network name. Typically set + via parameter_defaults in the resource registry. This + mapping overrides those in ServiceNetMapDefaults. + type: json + DefaultPasswords: + default: {} + type: json + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + NeutronMetadataProxySharedSecret: + description: Metadata Secret + type: string + VrouterPhysicalInterface: + default: 'eth0' + description: vRouter physical interface + type: string + VrouterGateway: + default: '192.168.24.1' + description: vRouter default gateway + type: string + VrouterNetmask: + default: '255.255.255.0' + description: vRouter netmask + type: string + +resources: + ContrailBase: + type: ./contrail-base.yaml + properties: + ServiceNetMap: {get_param: ServiceNetMap} + DefaultPasswords: {get_param: DefaultPasswords} + EndpointMap: {get_param: EndpointMap} + +outputs: + role_data: + description: Role data for the Contrail TSN Service + value: + service_name: contrail_tsn + config_settings: + map_merge: + - get_attr: [ContrailBase, role_data, config_settings] + - contrail::vrouter::host_ip: {get_param: [ServiceNetMap, NeutronCorePluginOpencontrailNetwork]} + contrail::vrouter::physical_interface: {get_param: VrouterPhysicalInterface} + contrail::vrouter::gateway: {get_param: VrouterGateway} + contrail::vrouter::netmask: {get_param: VrouterNetmask} + contrail::vrouter::metadata_proxy_shared_secret: {get_param: NeutronMetadataProxySharedSecret} + contrail::vrouter::is_tsn: 'true' + tripleo.neutron_compute_plugin_opencontrail.firewall_rules: + '111 neutron_compute_plugin_opencontrail proxy': + dport: 8097 + proto: tcp + step_config: | + include ::tripleo::network::contrail::vrouter diff --git a/puppet/services/network/contrail-vrouter.yaml b/puppet/services/network/contrail-vrouter.yaml new file mode 100644 index 00000000..db9f0836 --- /dev/null +++ b/puppet/services/network/contrail-vrouter.yaml @@ -0,0 +1,64 @@ +heat_template_version: ocata + +description: > + OpenStack Neutron Compute OpenContrail plugin + +parameters: + ServiceNetMap: + default: {} + description: Mapping of service_name -> network name. Typically set + via parameter_defaults in the resource registry. This + mapping overrides those in ServiceNetMapDefaults. + type: json + DefaultPasswords: + default: {} + type: json + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + NeutronMetadataProxySharedSecret: + description: Metadata Secret + type: string + hidden: true + ContrailVrouterPhysicalInterface: + default: 'eth0' + description: vRouter physical interface + type: string + ContrailVrouterGateway: + default: '192.0.2.1' + description: vRouter default gateway + type: string + ContrailVrouterNetmask: + default: '255.255.255.0' + description: vRouter netmask + type: string + +resources: + ContrailBase: + type: ./contrail-base.yaml + properties: + ServiceNetMap: {get_param: ServiceNetMap} + DefaultPasswords: {get_param: DefaultPasswords} + EndpointMap: {get_param: EndpointMap} + +outputs: + role_data: + description: Role data for the Neutron Compute OpenContrail plugin + value: + service_name: contrail_vrouter + config_settings: + map_merge: + - get_attr: [ContrailBase, role_data, config_settings] + - contrail::vrouter::host_ip: {get_param: [ServiceNetMap, NeutronCorePluginOpencontrailNetwork]} + contrail::vrouter::physical_interface: {get_param: ContrailVrouterPhysicalInterface} + contrail::vrouter::gateway: {get_param: ContrailVrouterGateway} + contrail::vrouter::netmask: {get_param: ContrailVrouterNetmask} + contrail::vrouter::metadata_proxy_shared_secret: {get_param: NeutronMetadataProxySharedSecret} + tripleo.neutron_compute_plugin_opencontrail.firewall_rules: + '111 neutron_compute_plugin_opencontrail proxy': + dport: 8097 + proto: tcp + step_config: | + include ::tripleo::network::contrail::vrouter diff --git a/puppet/services/network/contrail-webui.yaml b/puppet/services/network/contrail-webui.yaml index 72cc6fa5..3786cdd1 100644 --- a/puppet/services/network/contrail-webui.yaml +++ b/puppet/services/network/contrail-webui.yaml @@ -21,27 +21,6 @@ parameters: description: Mapping of service endpoint -> protocol. Typically set via parameter_defaults in the resource registry. type: json - ContrailWebUiAnalyticsVip: - description: Contrail Analytics VIP - type: string - ContrailWebUiConfigVip: - description: Contrail Config VIP - type: string - ContrailWebUiNeutronVip: - description: Neutron VIP - type: string - ContrailWebuiHttpPort: - default: 8080 - description: HTTP Port of Webui - type: number - ContrailWebuiHttpsPort: - default: 8143 - description: HTTPS Port of Webui - type: number - ContrailWebUiRedisIp: - description: Redis IP - type: string - default: '127.0.0.1' resources: ContrailBase: @@ -59,11 +38,8 @@ outputs: config_settings: map_merge: - get_attr: [ContrailBase, role_data, config_settings] - - contrail::webui::contrail_analytics_vip: {get_param: ContrailWebUiAnalyticsVip} - contrail::webui::contrail_config_vip: {get_param: ContrailWebUiConfigVip} - contrail::webui::contrail_webui_http_port: {get_param: ContrailWebuiHttpPort} - contrail::webui::contrail_webui_https_port: {get_param: ContrailWebuiHttpsPort} - contrail::webui::neutron_vip: {get_param: ContrailWebUiNeutronVip} - contrail::webui::redis_ip: {get_param: ContrailWebUiRedisIp} + - contrail::webui::http_port: {get_param: [EndpointMap, ContrailWebuiHttpInternal, port] } + contrail::webui::https_port: {get_param: [EndpointMap, ContrailWebuiHttpsInternal, port] } + contrail::webui::redis_ip: '127.0.0.1' step_config: | include ::tripleo::network::contrail::webui diff --git a/puppet/services/neutron-api.yaml b/puppet/services/neutron-api.yaml index fa10cd94..bb191ff0 100644 --- a/puppet/services/neutron-api.yaml +++ b/puppet/services/neutron-api.yaml @@ -71,6 +71,9 @@ parameters: removed in Ocata. Future releases will enable L3 HA by default if it is appropriate for the deployment type. Alternate mechanisms will be available to override. + EnableInternalTLS: + type: boolean + default: false parameter_groups: - label: deprecated @@ -82,8 +85,19 @@ parameter_groups: parameters: - NeutronL3HA +conditions: + use_tls_proxy: {equals : [{get_param: EnableInternalTLS}, true]} + resources: + TLSProxyBase: + type: OS::TripleO::Services::TLSProxyBase + properties: + ServiceNetMap: {get_param: ServiceNetMap} + DefaultPasswords: {get_param: DefaultPasswords} + EndpointMap: {get_param: EndpointMap} + EnableInternalTLS: {get_param: EnableInternalTLS} + NeutronBase: type: ./neutron-base.yaml properties: @@ -103,6 +117,7 @@ outputs: config_settings: map_merge: - get_attr: [NeutronBase, role_data, config_settings] + - get_attr: [TLSProxyBase, role_data, config_settings] - neutron::server::database_connection: list_join: - '' @@ -112,17 +127,14 @@ outputs: - '@' - {get_param: [EndpointMap, MysqlInternal, host]} - '/ovs_neutron' - - '?bind_address=' - - "%{hiera('tripleo::profile::base::database::mysql::client_bind_address')}" + - '?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo' neutron::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri] } - neutron::keystone::authtoken::auth_url: {get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix]} + neutron::keystone::authtoken::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]} neutron::server::api_workers: {get_param: NeutronWorkers} neutron::server::rpc_workers: {get_param: NeutronWorkers} neutron::server::allow_automatic_l3agent_failover: {get_param: NeutronAllowL3AgentFailover} neutron::server::enable_proxy_headers_parsing: true neutron::keystone::authtoken::password: {get_param: NeutronPassword} - - neutron::server::notifications::nova_url: { get_param: [ EndpointMap, NovaInternal, uri ] } neutron::server::notifications::auth_url: { get_param: [ EndpointMap, KeystoneV3Admin, uri ] } neutron::server::notifications::tenant_name: 'service' neutron::server::notifications::project_name: 'service' @@ -140,7 +152,23 @@ outputs: # internal_api -> IP # internal_api_uri -> [IP] # internal_api_subnet - > IP/CIDR - neutron::bind_host: {get_param: [ServiceNetMap, NeutronApiNetwork]} + tripleo::profile::base::neutron::server::tls_proxy_bind_ip: + get_param: [ServiceNetMap, NeutronApiNetwork] + tripleo::profile::base::neutron::server::tls_proxy_fqdn: + str_replace: + template: + "%{hiera('fqdn_$NETWORK')}" + params: + $NETWORK: {get_param: [ServiceNetMap, NeutronApiNetwork]} + tripleo::profile::base::neutron::server::tls_proxy_port: + get_param: [EndpointMap, NeutronInternal, port] + # Bind to localhost if internal TLS is enabled, since we put a TLS + # proxy in front. + neutron::bind_host: + if: + - use_tls_proxy + - 'localhost' + - {get_param: [ServiceNetMap, NeutronApiNetwork]} tripleo::profile::base::neutron::server::l3_ha_override: {get_param: NeutronL3HA} step_config: | include tripleo::profile::base::neutron::server @@ -160,3 +188,17 @@ outputs: neutron::db::mysql::allowed_hosts: - '%' - "%{hiera('mysql_bind_host')}" + upgrade_tasks: + - name: Check if neutron_server is deployed + command: systemctl is-enabled neutron-server + tags: common + ignore_errors: True + register: neutron_server_enabled + - name: "PreUpgrade step0,validation: Check service neutron-server is running" + shell: /usr/bin/systemctl show 'neutron-server' --property ActiveState | grep '\bactive\b' + when: neutron_server_enabled.rc == 0 + tags: step0,validation + - name: Stop neutron_api service + tags: step1 + when: neutron_server_enabled.rc == 0 + service: name=neutron-server state=stopped diff --git a/puppet/services/neutron-base.yaml b/puppet/services/neutron-base.yaml index 43657bd9..55361939 100644 --- a/puppet/services/neutron-base.yaml +++ b/puppet/services/neutron-base.yaml @@ -24,7 +24,7 @@ parameters: type: number NeutronDhcpAgentsPerNetwork: type: number - default: 3 + default: 0 description: The number of neutron dhcp agents to schedule per network NeutronCorePlugin: default: 'ml2' @@ -72,24 +72,31 @@ parameters: via parameter_defaults in the resource registry. type: json +conditions: + dhcp_agents_zero: {equals : [{get_param: NeutronDhcpAgentsPerNetwork}, 0]} + outputs: role_data: description: Role data for the Neutron base service. value: service_name: neutron_base config_settings: - neutron::rabbit_password: {get_param: RabbitPassword} - neutron::rabbit_user: {get_param: RabbitUserName} - neutron::rabbit_use_ssl: {get_param: RabbitClientUseSSL} - neutron::rabbit_port: {get_param: RabbitClientPort} - neutron::dhcp_agents_per_network: {get_param: NeutronDhcpAgentsPerNetwork} - neutron::core_plugin: {get_param: NeutronCorePlugin} - neutron::service_plugins: {get_param: NeutronServicePlugins} - neutron::debug: {get_param: Debug} - neutron::purge_config: {get_param: EnableConfigPurge} - neutron::allow_overlapping_ips: true - neutron::rabbit_heartbeat_timeout_threshold: 60 - neutron::host: '%{::fqdn}' - neutron::db::database_db_max_retries: -1 - neutron::db::database_max_retries: -1 - neutron::global_physnet_mtu: {get_param: NeutronGlobalPhysnetMtu} + map_merge: + - neutron::rabbit_password: {get_param: RabbitPassword} + neutron::rabbit_user: {get_param: RabbitUserName} + neutron::rabbit_use_ssl: {get_param: RabbitClientUseSSL} + neutron::rabbit_port: {get_param: RabbitClientPort} + neutron::core_plugin: {get_param: NeutronCorePlugin} + neutron::service_plugins: {get_param: NeutronServicePlugins} + neutron::debug: {get_param: Debug} + neutron::purge_config: {get_param: EnableConfigPurge} + neutron::allow_overlapping_ips: true + neutron::rabbit_heartbeat_timeout_threshold: 60 + neutron::host: '%{::fqdn}' + neutron::db::database_db_max_retries: -1 + neutron::db::database_max_retries: -1 + neutron::global_physnet_mtu: {get_param: NeutronGlobalPhysnetMtu} + - if: + - dhcp_agents_zero + - {} + - tripleo::profile::base::neutron::dhcp_agents_per_network: {get_param: NeutronDhcpAgentsPerNetwork} diff --git a/puppet/services/neutron-compute-plugin-ovn.yaml b/puppet/services/neutron-compute-plugin-ovn.yaml index ce28b5c3..e3a4da99 100644 --- a/puppet/services/neutron-compute-plugin-ovn.yaml +++ b/puppet/services/neutron-compute-plugin-ovn.yaml @@ -18,9 +18,6 @@ parameters: via parameter_defaults in the resource registry. This mapping overrides those in ServiceNetMapDefaults. type: json - OVNDbHost: - description: IP address on which the OVN DB servers are listening - type: string OVNSouthboundServerPort: description: Port of the Southbound DB Server type: number @@ -29,6 +26,16 @@ parameters: description: Tunnel encapsulation type type: string default: geneve + NeutronBridgeMappings: + description: > + The OVS logical->physical bridge mappings to use. See the Neutron + documentation for details. Defaults to mapping br-ex - the external + bridge on hosts - to a physical name 'datacentre' which can be used + to create provider networks (and we use this for the default floating + network) - if changing this either use different post-install network + scripts or be sure to keep 'datacentre' as a mapping network name + type: comma_delimited_list + default: "datacentre:br-ex" outputs: @@ -37,9 +44,16 @@ outputs: value: service_name: neutron_compute_plugin_ovn config_settings: - tripleo::profile::base::neutron::agents::ovn::ovn_db_host: {get_param: OVNDbHost} ovn::southbound::port: {get_param: OVNSouthboundServerPort} - ovn::southbound::encap_type: {get_param: OVNTunnelEncapType} + ovn::controller::ovn_encap_type: {get_param: OVNTunnelEncapType} ovn::controller::ovn_encap_ip: {get_param: [ServiceNetMap, NeutronApiNetwork]} + ovn::controller::ovn_bridge_mappings: {get_param: NeutronBridgeMappings} + tripleo.neutron_compute_plugin_ovn.firewall_rules: + '118 neutron vxlan networks': + proto: 'udp' + dport: 4789 + '119 neutron geneve networks': + proto: 'udp' + dport: 6081 step_config: | include ::tripleo::profile::base::neutron::agents::ovn diff --git a/puppet/services/neutron-dhcp.yaml b/puppet/services/neutron-dhcp.yaml index ad46c90f..fe7f9f31 100644 --- a/puppet/services/neutron-dhcp.yaml +++ b/puppet/services/neutron-dhcp.yaml @@ -39,6 +39,10 @@ parameters: default: tag: openstack.neutron.agent.dhcp path: /var/log/neutron/dhcp-agent.log + NeutronDhcpAgentDnsmasqDnsServers: + default: [] + description: List of servers to use as dnsmasq forwarders + type: comma_delimited_list resources: @@ -64,6 +68,7 @@ outputs: - neutron::agents::dhcp::enable_isolated_metadata: {get_param: NeutronEnableIsolatedMetadata} neutron::agents::dhcp::enable_force_metadata: {get_param: NeutronEnableForceMetadata} neutron::agents::dhcp::enable_metadata_network: {get_param: NeutronEnableMetadataNetwork} + neutron::agents::dhcp::dnsmasq_dns_servers: {get_param: NeutronDhcpAgentDnsmasqDnsServers} tripleo.neutron_dhcp.firewall_rules: '115 neutron dhcp input': proto: 'udp' @@ -74,3 +79,17 @@ outputs: dport: 68 step_config: | include tripleo::profile::base::neutron::dhcp + upgrade_tasks: + - name: Check if neutron_dhcp_agent is deployed + command: systemctl is-enabled neutron-dhcp-agent + tags: common + ignore_errors: True + register: neutron_dhcp_agent_enabled + - name: "PreUpgrade step0,validation: Check service neutron-dhcp-agent is running" + shell: /usr/bin/systemctl show 'neutron-dhcp-agent' --property ActiveState | grep '\bactive\b' + when: neutron_dhcp_agent_enabled.rc == 0 + tags: step0,validation + - name: Stop neutron_dhcp service + tags: step1 + when: neutron_dhcp_agent_enabled.rc == 0 + service: name=neutron-dhcp-agent state=stopped diff --git a/puppet/services/neutron-l3-compute-dvr.yaml b/puppet/services/neutron-l3-compute-dvr.yaml index 06927fe0..1d6a2371 100644 --- a/puppet/services/neutron-l3-compute-dvr.yaml +++ b/puppet/services/neutron-l3-compute-dvr.yaml @@ -22,10 +22,6 @@ parameters: Debug: type: string default: '' - NeutronExternalNetworkBridge: - description: Name of bridge used for external network traffic. - type: string - default: 'br-ex' MonitoringSubscriptionNeutronL3Dvr: default: 'overcloud-neutron-l3-dvr' type: string @@ -35,6 +31,19 @@ parameters: tag: openstack.neutron.agent.l3-compute path: /var/log/neutron/l3-agent.log + # DEPRECATED: the following options are deprecated and are currently maintained + # for backwards compatibility. They will be removed in the Pike cycle. + NeutronExternalNetworkBridge: + description: Name of bridge used for external network traffic. Usually L2 + agent handles port wiring into external bridge, and hence the + parameter should be unset. + type: string + default: '' + +conditions: + + external_network_bridge_empty: {equals : [{get_param: NeutronExternalNetworkBridge}, "''"]} + resources: NeutronBase: @@ -56,7 +65,11 @@ outputs: config_settings: map_merge: - get_attr: [NeutronBase, role_data, config_settings] - - neutron::agents::l3::external_network_bridge: {get_param: NeutronExternalNetworkBridge} - neutron::agents::l3::agent_mode : 'dvr' + - neutron::agents::l3::agent_mode : 'dvr' + - + if: + - external_network_bridge_empty + - {} + - neutron::agents::l3::external_network_bridge: {get_param: NeutronExternalNetworkBridge} step_config: | include tripleo::profile::base::neutron::l3 diff --git a/puppet/services/neutron-l3.yaml b/puppet/services/neutron-l3.yaml index f8c839d0..cd9870bd 100644 --- a/puppet/services/neutron-l3.yaml +++ b/puppet/services/neutron-l3.yaml @@ -21,10 +21,6 @@ parameters: Debug: type: string default: '' - NeutronExternalNetworkBridge: - description: Name of bridge used for external network traffic. - type: string - default: 'br-ex' NeutronL3AgentMode: description: | Agent mode for L3 agent. Must be one of legacy or dvr_snat. @@ -43,6 +39,15 @@ parameters: tag: openstack.neutron.agent.l3 path: /var/log/neutron/l3-agent.log + # DEPRECATED: the following options are deprecated and are currently maintained + # for backwards compatibility. They will be removed in the Pike cycle. + NeutronExternalNetworkBridge: + description: Name of bridge used for external network traffic. Usually L2 + agent handles port wiring into external bridge, and hence the + parameter should be unset. + type: string + default: '' + conditions: external_network_bridge_empty: {equals : [{get_param: NeutronExternalNetworkBridge}, "''"]} @@ -72,10 +77,24 @@ outputs: tripleo.neutron_l3.firewall_rules: '106 neutron_l3 vrrp': proto: vrrp - - + - if: - external_network_bridge_empty - {} - neutron::agents::l3::external_network_bridge: {get_param: NeutronExternalNetworkBridge} step_config: | include tripleo::profile::base::neutron::l3 + upgrade_tasks: + - name: Check if neutron_l3_agent is deployed + command: systemctl is-enabled neutron-l3-agent + tags: common + ignore_errors: True + register: neutron_l3_agent_enabled + - name: "PreUpgrade step0,validation: Check service neutron-l3-agent is running" + shell: /usr/bin/systemctl show 'neutron-l3-agent' --property ActiveState | grep '\bactive\b' + when: neutron_l3_agent_enabled.rc == 0 + tags: step0,validation + - name: Stop neutron_l3 service + tags: step1 + when: neutron_l3_agent_enabled.rc == 0 + service: name=neutron-l3-agent state=stopped diff --git a/puppet/services/neutron-metadata.yaml b/puppet/services/neutron-metadata.yaml index 73b767d8..32ef567c 100644 --- a/puppet/services/neutron-metadata.yaml +++ b/puppet/services/neutron-metadata.yaml @@ -70,8 +70,22 @@ outputs: - neutron::agents::metadata::shared_secret: {get_param: NeutronMetadataProxySharedSecret} neutron::agents::metadata::metadata_workers: {get_param: NeutronWorkers} neutron::agents::metadata::auth_password: {get_param: NeutronPassword} - neutron::agents::metadata::auth_url: { get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix] } + neutron::agents::metadata::auth_url: { get_param: [EndpointMap, KeystoneInternal, uri_no_suffix] } neutron::agents::metadata::auth_tenant: 'service' neutron::agents::metadata::metadata_ip: "%{hiera('nova_metadata_vip')}" step_config: | include tripleo::profile::base::neutron::metadata + upgrade_tasks: + - name: Check if neutron_metadata_agent is deployed + command: systemctl is-enabled neutron-metadata-agent + tags: common + ignore_errors: True + register: neutron_metadata_agent_enabled + - name: "PreUpgrade step0,validation: Check service neutron-metadata-agent is running" + shell: /usr/bin/systemctl show 'neutron-metadata-agent' --property ActiveState | grep '\bactive\b' + when: neutron_metadata_agent_enabled.rc == 0 + tags: step0,validation + - name: Stop neutron_metadata service + tags: step1 + when: neutron_metadata_agent_enabled.rc == 0 + service: name=neutron-metadata-agent state=stopped diff --git a/puppet/services/neutron-ovs-agent.yaml b/puppet/services/neutron-ovs-agent.yaml index 5fa04867..01471ba2 100644 --- a/puppet/services/neutron-ovs-agent.yaml +++ b/puppet/services/neutron-ovs-agent.yaml @@ -70,6 +70,9 @@ parameters: tag: openstack.neutron.agent.openvswitch path: /var/log/neutron/openvswitch-agent.log +conditions: + no_firewall_driver: {equals : [{get_param: NeutronOVSFirewallDriver}, '']} + resources: NeutronBase: @@ -104,12 +107,30 @@ outputs: # internal_api_uri -> [IP] # internal_api_subnet - > IP/CIDR neutron::agents::ml2::ovs::local_ip: {get_param: [ServiceNetMap, NeutronTenantNetwork]} - neutron::agents::ml2::ovs::firewall_driver: {get_param: NeutronOVSFirewallDriver} tripleo.neutron_ovs_agent.firewall_rules: '118 neutron vxlan networks': proto: 'udp' dport: 4789 '136 neutron gre networks': proto: 'gre' + - + if: + - no_firewall_driver + - {} + - neutron::agents::ml2::ovs::firewall_driver: {get_param: NeutronOVSFirewallDriver} step_config: | include ::tripleo::profile::base::neutron::ovs + upgrade_tasks: + - name: Check if neutron_ovs_agent is deployed + command: systemctl is-enabled neutron-openvswitch-agent + tags: common + ignore_errors: True + register: neutron_ovs_agent_enabled + - name: "PreUpgrade step0,validation: Check service neutron-openvswitch-agent is running" + shell: /usr/bin/systemctl show 'neutron-openvswitch-agent' --property ActiveState | grep '\bactive\b' + when: neutron_ovs_agent_enabled.rc == 0 + tags: step0,validation + - name: Stop neutron_ovs_agent service + tags: step1 + when: neutron_ovs_agent_enabled.rc == 0 + service: name=neutron-openvswitch-agent state=stopped diff --git a/puppet/services/neutron-ovs-dpdk-agent.yaml b/puppet/services/neutron-ovs-dpdk-agent.yaml index 5c77e35d..e25bc495 100644 --- a/puppet/services/neutron-ovs-dpdk-agent.yaml +++ b/puppet/services/neutron-ovs-dpdk-agent.yaml @@ -18,6 +18,11 @@ parameters: description: Mapping of service endpoint -> protocol. Typically set via parameter_defaults in the resource registry. type: json + HostCpusList: + description: List of cores to be used for host process + type: string + constraints: + - allowed_pattern: "'[0-9,-]+'" NeutronDpdkCoreList: description: List of cores to be used for DPDK Poll Mode Driver type: string @@ -68,7 +73,8 @@ outputs: - neutron::agents::ml2::ovs::enable_dpdk: true neutron::agents::ml2::ovs::datapath_type: {get_param: NeutronDatapathType} neutron::agents::ml2::ovs::vhostuser_socket_dir: {get_param: NeutronVhostuserSocketDir} - vswitch::dpdk::core_list: {get_param: NeutronDpdkCoreList} + vswitch::dpdk::host_core_list: {get_param: HostCpusList} + vswitch::dpdk::pmd_core_list: {get_param: NeutronDpdkCoreList} vswitch::dpdk::memory_channels: {get_param: NeutronDpdkMemoryChannels} vswitch::dpdk::socket_mem: {get_param: NeutronDpdkSocketMemory} vswitch::dpdk::driver_type: {get_param: NeutronDpdkDriverType} diff --git a/puppet/services/neutron-plugin-ml2-fujitsu-cfab.yaml b/puppet/services/neutron-plugin-ml2-fujitsu-cfab.yaml new file mode 100644 index 00000000..becd25c9 --- /dev/null +++ b/puppet/services/neutron-plugin-ml2-fujitsu-cfab.yaml @@ -0,0 +1,73 @@ +heat_template_version: ocata + +description: > + Configure hieradata for Fujitsu C-Fabric plugin configuration + +parameters: + ServiceNetMap: + default: {} + description: Mapping of service_name -> network name. Typically set + via parameter_defaults in the resource registry. This + mapping overrides those in ServiceNetMapDefaults. + type: json + DefaultPasswords: + default: {} + type: json + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + NeutronFujitsuCfabAddress: + description: 'The address of the C-Fabric to telnet to.' + type: string + NeutronFujitsuCfabUserName: + description: 'The C-Fabric username to use.' + type: string + NeutronFujitsuCfabPassword: + description: 'The C-Fabric password to use.' + type: string + hidden: true + NeutronFujitsuCfabPhysicalNetworks: + description: 'List of <physical_network>:<vfab_id> tuples specifying physical_network names and corresponding vfab ids.' + type: comma_delimited_list + default: '' + NeutronFujitsuCfabSharePprofile: + description: '"Whether to share a C-Fabric pprofile among Neutron ports using the same VLAN ID.' + type: boolean + default: false + NeutronFujitsuCfabPprofilePrefix: + description: 'The prefix string for pprofile name.' + type: string + default: '' + NeutronFujitsuCfabSaveConfig: + description: 'Whether to save configuration.' + type: boolean + default: true + +resources: + + NeutronMl2Base: + type: ./neutron-plugin-ml2.yaml + properties: + ServiceNetMap: {get_param: ServiceNetMap} + DefaultPasswords: {get_param: DefaultPasswords} + EndpointMap: {get_param: EndpointMap} + +outputs: + role_data: + description: Role data for Fujitsu Cfab ML2 Driver + value: + service_name: neutron_plugin_ml2_fujitsu_cfab + config_settings: + map_merge: + - get_attr: [NeutronMl2Base, role_data, config_settings] + - neutron::plugins::ml2::fujitsu::cfab::address: {get_param: NeutronFujitsuCfabAddress} + neutron::plugins::ml2::fujitsu::cfab::username: {get_param: NeutronFujitsuCfabUserName} + neutron::plugins::ml2::fujitsu::cfab::password: {get_param: NeutronFujitsuCfabPassword} + neutron::plugins::ml2::fujitsu::cfab::physical_networks: {get_param: NeutronFujitsuCfabPhysicalNetworks} + neutron::plugins::ml2::fujitsu::cfab::share_pprofile: {get_param: NeutronFujitsuCfabSharePprofile} + neutron::plugins::ml2::fujitsu::cfab::pprofile_prefix: {get_param: NeutronFujitsuCfabPprofilePrefix} + neutron::plugins::ml2::fujitsu::cfab::save_config: {get_param: NeutronFujitsuCfabSaveConfig} + step_config: | + include ::tripleo::profile::base::neutron::plugins::ml2 diff --git a/puppet/services/neutron-plugin-ml2-fujitsu-fossw.yaml b/puppet/services/neutron-plugin-ml2-fujitsu-fossw.yaml new file mode 100644 index 00000000..85971f17 --- /dev/null +++ b/puppet/services/neutron-plugin-ml2-fujitsu-fossw.yaml @@ -0,0 +1,78 @@ +heat_template_version: ocata + +description: Configure hieradata for Fujitsu fossw plugin configuration + +parameters: + ServiceNetMap: + default: {} + description: Mapping of service_name -> network name. Typically set + via parameter_defaults in the resource registry. This + mapping overrides those in ServiceNetMapDefaults. + type: json + DefaultPasswords: + default: {} + type: json + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + NeutronFujitsuFosswIps: + description: 'The List of IP address of all fos switches.' + type: comma_delimited_list + NeutronFujitsuFosswUserName: + description: 'The username of the fos switches.' + type: string + NeutronFujitsuFosswPassword: + description: 'The password of the fos switches.' + type: string + hidden: true + NeutronFujitsuFosswPort: + description: 'The port number used for SSH connection.' + type: number + default: 22 + NeutronFujitsuFosswTimeout: + description: 'The timeout os SSH connection.' + type: number + default: 30 + NeutronFujitsuFosswUdpDestPort: + description: 'The port number of VXLAN UDP destination on the fos switches.' + type: number + default: 4789 + NeutronFujitsuFosswOvsdbVlanidRangeMin: + description: 'The minimum VLAN ID in the range that is used for binding VNI and physical port.' + type: number + default: 2 + NeutronFujitsuFosswOvsdbPort: + description: 'The port number which OVSDB server on the fos switches listen.' + type: number + default: 6640 + +resources: + + NeutronMl2Base: + type: ./neutron-plugin-ml2.yaml + properties: + ServiceNetMap: {get_param: ServiceNetMap} + DefaultPasswords: {get_param: DefaultPasswords} + EndpointMap: {get_param: EndpointMap} + +outputs: + role_data: + description: Role data for Fujitsu Fossw ML2 Driver + value: + service_name: neutron_plugin_ml2_fujitsu_fossw + config_settings: + map_merge: + - get_attr: [NeutronMl2Base, role_data, config_settings] + - neutron::plugins::ml2::fujitsu::fossw::fossw_ips: {get_param: NeutronFujitsuFosswIps} + neutron::plugins::ml2::fujitsu::fossw::username: {get_param: NeutronFujitsuFosswUserName} + neutron::plugins::ml2::fujitsu::fossw::password: {get_param: NeutronFujitsuFosswPassword} + neutron::plugins::ml2::fujitsu::fossw::port: {get_param: NeutronFujitsuFosswPort} + neutron::plugins::ml2::fujitsu::fossw::timeout: {get_param: NeutronFujitsuFosswTimeout} + neutron::plugins::ml2::fujitsu::fossw::udp_dest_port: {get_param: NeutronFujitsuFosswUdpDestPort} + neutron::plugins::ml2::fujitsu::fossw::ovsdb_vlanid_range_min: {get_param: NeutronFujitsuFosswOvsdbVlanidRangeMin} + neutron::plugins::ml2::fujitsu::fossw::ovsdb_port: {get_param: NeutronFujitsuFosswOvsdbPort} + step_config: | + include ::tripleo::profile::base::neutron::plugins::ml2 + diff --git a/puppet/services/neutron-plugin-ml2-ovn.yaml b/puppet/services/neutron-plugin-ml2-ovn.yaml index 59346edc..4d4c3900 100644 --- a/puppet/services/neutron-plugin-ml2-ovn.yaml +++ b/puppet/services/neutron-plugin-ml2-ovn.yaml @@ -18,10 +18,14 @@ parameters: description: Mapping of service endpoint -> protocol. Typically set via parameter_defaults in the resource registry. type: json + OVNSouthboundServerPort: + description: Port of the OVN Southbound DB server + type: number + default: 6642 OVNDbConnectionTimeout: description: Timeout in seconds for the OVSDB connection transaction type: number - default: 60 + default: 180 OVNVifType: description: Type of VIF to be used for ports type: string @@ -43,6 +47,10 @@ parameters: description: OVN notification driver for Neutron QOS service plugin type: string default: NULL + NeutronGeneveMaxHeaderSize: + description: Geneve encapsulation header size + type: number + default: 38 resources: @@ -61,10 +69,12 @@ outputs: config_settings: map_merge: - get_attr: [NeutronMl2Base, role_data, config_settings] - - neutron::plugins::ovn::ovsdb_connection_timeout: {get_param: OVNDbConnectionTimeout} - neutron::plugins::ovn::neutron_sync_mode: {get_param: OVNNeutronSyncMode} - neutron::plugins::ovn::ovn_l3_mode: true - neutron::plugins::ovn::vif_type: {get_param: OVNVifType} + - ovn::southbound::port: {get_param: OVNSouthboundServerPort} + neutron::plugins::ml2::ovn::ovsdb_connection_timeout: {get_param: OVNDbConnectionTimeout} + neutron::plugins::ml2::ovn::neutron_sync_mode: {get_param: OVNNeutronSyncMode} + neutron::plugins::ml2::ovn::ovn_l3_mode: true + neutron::plugins::ml2::ovn::vif_type: {get_param: OVNVifType} neutron::server::qos_notification_drivers: {get_param: OVNQosDriver} + neutron::plugins::ml2::max_header_size: {get_param: NeutronGeneveMaxHeaderSize} step_config: | include ::tripleo::profile::base::neutron::plugins::ml2 diff --git a/puppet/services/neutron-plugin-ml2.yaml b/puppet/services/neutron-plugin-ml2.yaml index 407ce6ba..3abd04f3 100644 --- a/puppet/services/neutron-plugin-ml2.yaml +++ b/puppet/services/neutron-plugin-ml2.yaml @@ -60,12 +60,6 @@ parameters: default: 'vxlan' description: The tenant network type for Neutron. type: comma_delimited_list - NeutronSupportedPCIVendorDevs: - description: | - List of supported pci vendor devices in the format VendorID:ProductID. - By default Intel & Mellanox SR-IOV capable NICs are supported. - type: comma_delimited_list - default: ['15b3:1004','8086:10ca'] resources: NeutronBase: @@ -91,7 +85,9 @@ outputs: neutron::plugins::ml2::tunnel_id_ranges: {get_param: NeutronTunnelIdRanges} neutron::plugins::ml2::vni_ranges: {get_param: NeutronVniRanges} neutron::plugins::ml2::tenant_network_types: {get_param: NeutronNetworkType} - neutron::plugins::ml2::supported_pci_vendor_devs: {get_param: NeutronSupportedPCIVendorDevs} step_config: | include ::tripleo::profile::base::neutron::plugins::ml2 + service_config_settings: + horizon: + neutron::plugins::ml2::mechanism_drivers: {get_param: NeutronMechanismDrivers} diff --git a/puppet/services/neutron-plugin-opencontrail.yaml b/puppet/services/neutron-plugin-opencontrail.yaml deleted file mode 100644 index 976e5f19..00000000 --- a/puppet/services/neutron-plugin-opencontrail.yaml +++ /dev/null @@ -1,74 +0,0 @@ -heat_template_version: ocata - -description: > - OpenStack Neutron Opencontrail plugin - -parameters: - ServiceNetMap: - default: {} - description: Mapping of service_name -> network name. Typically set - via parameter_defaults in the resource registry. This - mapping overrides those in ServiceNetMapDefaults. - type: json - DefaultPasswords: - default: {} - type: json - EndpointMap: - default: {} - description: Mapping of service endpoint -> protocol. Typically set - via parameter_defaults in the resource registry. - type: json - AdminPassword: - description: The password for the keystone admin account, used for monitoring, querying neutron etc. - type: string - hidden: true - AdminToken: - description: The keystone auth secret and db password. - type: string - hidden: true - ContrailApiServerIp: - description: IP address of the OpenContrail API server - type: string - ContrailApiServerPort: - description: Port of the OpenContrail API - type: string - default: 8082 - ContrailMultiTenancy: - description: Whether to enable multi tenancy - type: boolean - default: false - ContrailExtensions: - description: List of OpenContrail extensions to be enabled - type: comma_delimited_list - default: '' - -resources: - - NeutronBase: - type: ./neutron-base.yaml - properties: - ServiceNetMap: {get_param: ServiceNetMap} - DefaultPasswords: {get_param: DefaultPasswords} - EndpointMap: {get_param: EndpointMap} - -outputs: - role_data: - description: Role data for the Neutron Opencontrail plugin - value: - service_name: neutron_plugin_opencontrail - config_settings: - map_merge: - - get_attr: [NeutronBase, role_data, config_settings] - - neutron::api_extensions_path: /usr/lib/python2.7/site-packages/neutron_plugin_contrail/extensions,/usr/lib/python2.7/site-packages/neutron_lbaas/extensions - - neutron::plugins::opencontrail::api_server_ip: {get_param: ContrailApiServerIp} - neutron::plugins::opencontrail::api_server_port: {get_param: ContrailApiServerPort} - neutron::plugins::opencontrail::multi_tenancy: {get_param: ContrailMultiTenancy} - neutron::plugins::opencontrail::contrail_extensions: {get_param: ContrailExtensions} - neutron::plugins::opencontrail::keystone_auth_url: {get_param: [EndpointMap, KeystoneInternal, uri] } - neutron::plugins::opencontrail::keystone_admin_user: admin - neutron::plugins::opencontrail::keystone_admin_tenant_name: admin - neutron::plugins::opencontrail::keystone_admin_password: {get_param: AdminPassword} - neutron::plugins::opencontrail::keystone_admin_token: {get_param: AdminToken} - step_config: | - include tripleo::profile::base::neutron::plugins::opencontrail diff --git a/puppet/services/neutron-plugin-plumgrid.yaml b/puppet/services/neutron-plugin-plumgrid.yaml index bd078074..f948dd07 100644 --- a/puppet/services/neutron-plugin-plumgrid.yaml +++ b/puppet/services/neutron-plugin-plumgrid.yaml @@ -100,9 +100,8 @@ outputs: - '@' - {get_param: [EndpointMap, MysqlInternal, host]} - '/ovs_neutron' - - '?bind_address=' - - "%{hiera('tripleo::profile::base::database::mysql::client_bind_address')}" - neutron::plugins::plumgrid::controller_priv_host: {get_param: [EndpointMap, KeystoneAdmin, host]} + - '?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo' + neutron::plugins::plumgrid::controller_priv_host: {get_param: [EndpointMap, KeystoneInternal, host]} neutron::plugins::plumgrid::admin_password: {get_param: AdminPassword} neutron::plugins::plumgrid::metadata_proxy_shared_secret: {get_param: NeutronMetadataProxySharedSecret} neutron::plugins::plumgrid::director_server: {get_param: PLUMgridDirectorServer} diff --git a/puppet/services/nova-api.yaml b/puppet/services/nova-api.yaml index d2ca841f..f27b53f2 100644 --- a/puppet/services/nova-api.yaml +++ b/puppet/services/nova-api.yaml @@ -54,18 +54,28 @@ parameters: EnableInternalTLS: type: boolean default: false + NovaDefaultFloatingPool: + default: 'public' + description: Default pool for floating IP addresses + type: string + NovaDbSyncTimeout: + default: 300 + description: Timeout for Nova db sync + type: number conditions: nova_workers_zero: {equals : [{get_param: NovaWorkers}, 0]} resources: - ApacheServiceBase: - type: ./apache.yaml - properties: - ServiceNetMap: {get_param: ServiceNetMap} - DefaultPasswords: {get_param: DefaultPasswords} - EndpointMap: {get_param: EndpointMap} - EnableInternalTLS: {get_param: EnableInternalTLS} + # Temporarily disable Nova API deployed in WSGI + # https://bugs.launchpad.net/nova/+bug/1661360 + # ApacheServiceBase: + # type: ./apache.yaml + # properties: + # ServiceNetMap: {get_param: ServiceNetMap} + # DefaultPasswords: {get_param: DefaultPasswords} + # EndpointMap: {get_param: EndpointMap} + # EnableInternalTLS: {get_param: EnableInternalTLS} NovaBase: type: ./nova-base.yaml @@ -86,7 +96,9 @@ outputs: config_settings: map_merge: - get_attr: [NovaBase, role_data, config_settings] - - get_attr: [ApacheServiceBase, role_data, config_settings] + # Temporarily disable Nova API deployed in WSGI + # https://bugs.launchpad.net/nova/+bug/1661360 + # - get_attr: [ApacheServiceBase, role_data, config_settings] - nova::cron::archive_deleted_rows::hour: '*/12' nova::cron::archive_deleted_rows::destination: '/dev/null' tripleo.nova_api.firewall_rules: @@ -100,9 +112,9 @@ outputs: nova::keystone::authtoken::project_name: 'service' nova::keystone::authtoken::password: {get_param: NovaPassword} nova::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri] } - nova::keystone::authtoken::auth_url: {get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix]} + nova::keystone::authtoken::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]} nova::api::enabled: true - nova::api::default_floating_pool: 'public' + nova::api::default_floating_pool: {get_param: NovaDefaultFloatingPool} nova::api::sync_db_api: true nova::api::enable_proxy_headers_parsing: true nova::api::api_bind_address: @@ -111,20 +123,23 @@ outputs: "%{hiera('fqdn_$NETWORK')}" params: $NETWORK: {get_param: [ServiceNetMap, NovaApiNetwork]} - nova::api::service_name: 'httpd' - nova::wsgi::apache_api::ssl: {get_param: EnableInternalTLS} + # Temporarily disable Nova API deployed in WSGI + # https://bugs.launchpad.net/nova/+bug/1661360 + nova_wsgi_enabled: false + # nova::api::service_name: 'httpd' + # nova::wsgi::apache_api::ssl: {get_param: EnableInternalTLS} # NOTE: bind IP is found in Heat replacing the network name with the local node IP # for the given network; replacement examples (eg. for internal_api): # internal_api -> IP # internal_api_uri -> [IP] # internal_api_subnet - > IP/CIDR - nova::wsgi::apache_api::bind_host: {get_param: [ServiceNetMap, NovaApiNetwork]} - nova::wsgi::apache_api::servername: - str_replace: - template: - "%{hiera('fqdn_$NETWORK')}" - params: - $NETWORK: {get_param: [ServiceNetMap, NovaApiNetwork]} + # nova::wsgi::apache_api::bind_host: {get_param: [ServiceNetMap, NovaApiNetwork]} + # nova::wsgi::apache_api::servername: + # str_replace: + # template: + # "%{hiera('fqdn_$NETWORK')}" + # params: + # $NETWORK: {get_param: [ServiceNetMap, NovaApiNetwork]} nova::api::neutron_metadata_proxy_shared_secret: {get_param: NeutronMetadataProxySharedSecret} nova::api::instance_name_template: {get_param: InstanceNameTemplate} nova_enable_db_purge: {get_param: NovaEnableDBPurge} @@ -133,7 +148,9 @@ outputs: - nova_workers_zero - {} - nova::api::osapi_compute_workers: {get_param: NovaWorkers} - nova::wsgi::apache_api::workers: {get_param: NovaWorkers} + # Temporarily disable Nova API deployed in WSGI + # https://bugs.launchpad.net/nova/+bug/1661360 + # nova::wsgi::apache_api::workers: {get_param: NovaWorkers} step_config: | include tripleo::profile::base::nova::api service_config_settings: @@ -161,3 +178,87 @@ outputs: nova::keystone::auth::admin_url: {get_param: [EndpointMap, NovaAdmin, uri]} nova::keystone::auth::password: {get_param: NovaPassword} nova::keystone::auth::region: {get_param: KeystoneRegion} + # Temporarily disable Nova API deployed in WSGI + # https://bugs.launchpad.net/nova/+bug/1661360 + # metadata_settings: + # get_attr: [ApacheServiceBase, role_data, metadata_settings] + upgrade_tasks: + - name: get bootstrap nodeid + tags: common + command: hiera bootstrap_nodeid + register: bootstrap_node + - name: set is_bootstrap_node fact + tags: common + set_fact: is_bootstrap_node={{bootstrap_node.stdout == ansible_hostname}} + - name: Extra migration for nova tripleo/+bug/1656791 + tags: step0,pre-upgrade + when: is_bootstrap_node + command: nova-manage db online_data_migrations + - name: Stop and disable nova_api service (pre-upgrade not under httpd) + tags: step2 + service: name=openstack-nova-api state=stopped enabled=no + - name: Create puppet manifest to set transport_url in nova.conf + tags: step5 + when: is_bootstrap_node + copy: + dest: /root/nova-api_upgrade_manifest.pp + mode: 0600 + content: > + $transport_url = os_transport_url({ + 'transport' => hiera('messaging_service_name', 'rabbit'), + 'hosts' => any2array(hiera('rabbitmq_node_names', undef)), + 'port' => sprintf('%s',hiera('nova::rabbit_port', '5672') ), + 'username' => hiera('nova::rabbit_userid', 'guest'), + 'password' => hiera('nova::rabbit_password'), + 'ssl' => sprintf('%s', bool2num(str2bool(hiera('nova::rabbit_use_ssl', '0')))) + }) + oslo::messaging::default { 'nova_config': + transport_url => $transport_url + } + - name: Run puppet apply to set tranport_url in nova.conf + tags: step5 + when: is_bootstrap_node + command: puppet apply --detailed-exitcodes /root/nova-api_upgrade_manifest.pp + register: puppet_apply_nova_api_upgrade + failed_when: puppet_apply_nova_api_upgrade.rc not in [0,2] + changed_when: puppet_apply_nova_api_upgrade.rc == 2 + - name: Setup cell_v2 (map cell0) + tags: step5 + when: is_bootstrap_node + command: nova-manage cell_v2 map_cell0 + - name: Setup cell_v2 (create default cell) + tags: step5 + when: is_bootstrap_node + # (owalsh) puppet-nova expects the cell name 'default' + # (owalsh) pass the db uri explicitly to avoid https://bugs.launchpad.net/tripleo/+bug/1662344 + shell: nova-manage cell_v2 create_cell --name='default' --database_connection=$(hiera nova::database_connection) + register: nova_api_create_cell + failed_when: nova_api_create_cell.rc not in [0,2] + changed_when: nova_api_create_cell.rc == 0 + - name: Setup cell_v2 (sync nova/cell DB) + tags: step5 + when: is_bootstrap_node + command: nova-manage db sync + async: {get_param: NovaDbSyncTimeout} + poll: 10 + - name: Setup cell_v2 (migrate hosts) + tags: step5 + when: is_bootstrap_node + command: nova-manage cell_v2 map_cell_and_hosts + - name: Setup cell_v2 (get cell uuid) + tags: step5 + when: is_bootstrap_node + shell: nova-manage cell_v2 list_cells | sed -e '1,3d' -e '$d' | awk -F ' *| *' '$2 == "default" {print $4}' + register: nova_api_cell_uuid + - name: Setup cell_v2 (migrate instances) + tags: step5 + when: is_bootstrap_node + command: nova-manage cell_v2 map_instances --cell_uuid {{nova_api_cell_uuid.stdout}} + - name: Sync nova_api DB + tags: step5 + command: nova-manage api_db sync + when: is_bootstrap_node + - name: Online data migration for nova + tags: step5 + when: is_bootstrap_node + command: nova-manage db online_data_migrations diff --git a/puppet/services/nova-base.yaml b/puppet/services/nova-base.yaml index bf8e46be..ceacb0b2 100644 --- a/puppet/services/nova-base.yaml +++ b/puppet/services/nova-base.yaml @@ -18,6 +18,10 @@ parameters: description: Mapping of service endpoint -> protocol. Typically set via parameter_defaults in the resource registry. type: json + KeystoneRegion: + type: string + default: 'regionOne' + description: Keystone region for endpoint NovaPassword: description: The password for the nova service and db account, used by nova-api. type: string @@ -66,6 +70,57 @@ parameters: type: string description: Nova Compute upgrade level default: '' + NovaCronArchiveDeleteRowsMinute: + type: string + description: > + Cron to move deleted instances to another table - Minute + default: '1' + NovaCronArchiveDeleteRowsHour: + type: string + description: > + Cron to move deleted instances to another table - Hour + default: '0' + NovaCronArchiveDeleteRowsMonthday: + type: string + description: > + Cron to move deleted instances to another table - Month Day + default: '*' + NovaCronArchiveDeleteRowsMonth: + type: string + description: > + Cron to move deleted instances to another table - Month + default: '*' + NovaCronArchiveDeleteRowsWeekday: + type: string + description: > + Cron to move deleted instances to another table - Week Day + default: '*' + NovaCronArchiveDeleteRowsMaxRows: + type: string + description: > + Cron to move deleted instances to another table - Max Rows + default: '100' + NovaCronArchiveDeleteRowsUser: + type: string + description: > + Cron to move deleted instances to another table - User + default: 'nova' + NovaCronArchiveDeleteRowsDestination: + type: string + description: > + Cron to move deleted instances to another table - Log destination + default: '/var/log/nova/nova-rowsflush.log' + NovaCronArchiveDeleteRowsUntilComplete: + type: boolean + description: > + Cron to move deleted instances to another table - Until complete + default: false + NovaPlacementAPIInterface: + type: string + description: > + Endpoint interface to be used for the placement API. + default: 'internal' + conditions: compute_upgrade_level_empty: {equals : [{get_param: UpgradeLevelNovaCompute}, '']} @@ -81,6 +136,11 @@ outputs: nova::rabbit_userid: {get_param: RabbitUserName} nova::rabbit_use_ssl: {get_param: RabbitClientUseSSL} nova::rabbit_port: {get_param: RabbitClientPort} + nova::placement::project_name: 'service' + nova::placement::password: {get_param: NovaPassword} + nova::placement::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]} + nova::placement::os_region_name: {get_param: KeystoneRegion} + nova::placement::os_interface: {get_param: NovaPlacementAPIInterface} nova::database_connection: list_join: - '' @@ -90,8 +150,7 @@ outputs: - '@' - {get_param: [EndpointMap, MysqlInternal, host]} - '/nova' - - '?bind_address=' - - "%{hiera('tripleo::profile::base::database::mysql::client_bind_address')}" + - '?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo' nova::api_database_connection: list_join: - '' @@ -101,12 +160,22 @@ outputs: - '@' - {get_param: [EndpointMap, MysqlInternal, host]} - '/nova_api' - - '?bind_address=' - - "%{hiera('tripleo::profile::base::database::mysql::client_bind_address')}" + - '?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo' + nova::placement_database_connection: + list_join: + - '' + - - {get_param: [EndpointMap, MysqlInternal, protocol]} + - '://nova_placement:' + - {get_param: NovaPassword} + - '@' + - {get_param: [EndpointMap, MysqlInternal, host]} + - '/nova_placement' + - '?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo' nova::debug: {get_param: Debug} nova::purge_config: {get_param: EnableConfigPurge} nova::network::neutron::neutron_project_name: 'service' nova::network::neutron::neutron_username: 'neutron' + nova::network::neutron::neutron_region_name: {get_param: KeystoneRegion} nova::network::neutron::dhcp_domain: '' nova::network::neutron::neutron_password: {get_param: NeutronPassword} nova::network::neutron::neutron_url: {get_param: [EndpointMap, NeutronInternal, uri]} @@ -122,7 +191,16 @@ outputs: nova::glance_api_servers: {get_param: [EndpointMap, GlanceInternal, uri]} nova::use_ipv6: {get_param: NovaIPv6} nova::network::neutron::neutron_ovs_bridge: {get_param: NovaOVSBridge} - - + nova::cron::archive_deleted_rows::minute: {get_param: NovaCronArchiveDeleteRowsMinute} + nova::cron::archive_deleted_rows::hour: {get_param: NovaCronArchiveDeleteRowsHour} + nova::cron::archive_deleted_rows::monthday: {get_param: NovaCronArchiveDeleteRowsMonthday} + nova::cron::archive_deleted_rows::month: {get_param: NovaCronArchiveDeleteRowsMonth} + nova::cron::archive_deleted_rows::weekday: {get_param: NovaCronArchiveDeleteRowsWeekday} + nova::cron::archive_deleted_rows::max_rows: {get_param: NovaCronArchiveDeleteRowsMaxRows} + nova::cron::archive_deleted_rows::user: {get_param: NovaCronArchiveDeleteRowsUser} + nova::cron::archive_deleted_rows::destination: {get_param: NovaCronArchiveDeleteRowsDestination} + nova::cron::archive_deleted_rows::until_complete: {get_param: NovaCronArchiveDeleteRowsUntilComplete} + - if: - compute_upgrade_level_empty - {} diff --git a/puppet/services/nova-compute.yaml b/puppet/services/nova-compute.yaml index 2312b635..d208bede 100644 --- a/puppet/services/nova-compute.yaml +++ b/puppet/services/nova-compute.yaml @@ -75,6 +75,10 @@ parameters: default: tag: openstack.nova.compute path: /var/log/nova/nova-compute.log + UpgradeLevelNovaCompute: + type: string + description: Nova Compute upgrade level + default: auto resources: NovaBase: @@ -141,3 +145,24 @@ outputs: # We'll probably treat it like we do with Neutron plugins. # Until then, just include it in the default nova-compute role. include tripleo::profile::base::nova::compute::libvirt + service_config_settings: + collectd: + tripleo.collectd.plugins.nova_compute: + - virt + collectd::plugins::virt::connection: "qemu:///system" + upgrade_tasks: + - name: Stop nova-compute service + tags: step1 + service: name=openstack-nova-compute state=stopped + # If not already set by puppet (e.g a pre-ocata version), set the + # upgrade_level for compute to "auto" + - name: Set compute upgrade level to auto + tags: step3 + ini_file: + str_replace: + template: "dest=/etc/nova/nova.conf section=upgrade_levels option=compute value=LEVEL" + params: + LEVEL: {get_param: UpgradeLevelNovaCompute} + - name: Start nova-compute service + tags: step6 + service: name=openstack-nova-compute state=started diff --git a/puppet/services/nova-conductor.yaml b/puppet/services/nova-conductor.yaml index b96bf6e6..4574cae8 100644 --- a/puppet/services/nova-conductor.yaml +++ b/puppet/services/nova-conductor.yaml @@ -30,6 +30,10 @@ parameters: default: tag: openstack.nova.scheduler path: /var/log/nova/nova-scheduler.log + UpgradeLevelNovaCompute: + type: string + description: Nova Compute upgrade level + default: auto conditions: nova_workers_zero: {equals : [{get_param: NovaWorkers}, 0]} @@ -61,3 +65,16 @@ outputs: - nova::conductor::workers: {get_param: NovaWorkers} step_config: | include tripleo::profile::base::nova::conductor + upgrade_tasks: + - name: Stop nova_conductor service + tags: step1 + service: name=openstack-nova-conductor state=stopped + # If not already set by puppet (e.g a pre-ocata version), set the + # upgrade_level for compute to "auto" + - name: Set compute upgrade level to auto + tags: step1 + ini_file: + str_replace: + template: "dest=/etc/nova/nova.conf section=upgrade_levels option=compute value=LEVEL" + params: + LEVEL: {get_param: UpgradeLevelNovaCompute} diff --git a/puppet/services/nova-consoleauth.yaml b/puppet/services/nova-consoleauth.yaml index 79969ded..82f329bc 100644 --- a/puppet/services/nova-consoleauth.yaml +++ b/puppet/services/nova-consoleauth.yaml @@ -48,3 +48,7 @@ outputs: get_attr: [NovaBase, role_data, config_settings] step_config: | include tripleo::profile::base::nova::consoleauth + upgrade_tasks: + - name: Stop nova_consoleauth service + tags: step1 + service: name=openstack-nova-consoleauth state=stopped diff --git a/puppet/services/nova-ironic.yaml b/puppet/services/nova-ironic.yaml index 306c6b6f..5eb2170a 100644 --- a/puppet/services/nova-ironic.yaml +++ b/puppet/services/nova-ironic.yaml @@ -42,10 +42,10 @@ outputs: - nova::compute::force_config_drive: true nova::compute::reserved_host_memory: '0' nova::compute::vnc_enabled: false - nova::ironic::common::admin_password: {get_param: IronicPassword} - nova::ironic::common::admin_tenant_name: 'service' - nova::ironic::common::admin_url: {get_param: [EndpointMap, KeystoneAdmin, uri]} - nova::ironic::common::admin_username: 'ironic' + nova::ironic::common::password: {get_param: IronicPassword} + nova::ironic::common::project_name: 'service' + nova::ironic::common::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri]} + nova::ironic::common::username: 'ironic' nova::ironic::common::api_endpoint: {get_param: [EndpointMap, IronicInternal, uri]} nova::network::neutron::dhcp_domain: '' nova::scheduler::filter::scheduler_host_manager: 'ironic_host_manager' diff --git a/puppet/services/nova-libvirt.yaml b/puppet/services/nova-libvirt.yaml index a9b2b3f9..faf1ae48 100644 --- a/puppet/services/nova-libvirt.yaml +++ b/puppet/services/nova-libvirt.yaml @@ -62,6 +62,7 @@ outputs: nova::compute::libvirt::qemu::configure_qemu: true nova::compute::libvirt::qemu::max_files: 32768 nova::compute::libvirt::qemu::max_processes: 131072 + nova::compute::libvirt::vncserver_listen: {get_param: [ServiceNetMap, NovaLibvirtNetwork]} tripleo.nova_libvirt.firewall_rules: '200 nova_libvirt': dport: diff --git a/puppet/services/nova-placement.yaml b/puppet/services/nova-placement.yaml new file mode 100644 index 00000000..b59e2fc6 --- /dev/null +++ b/puppet/services/nova-placement.yaml @@ -0,0 +1,133 @@ +heat_template_version: ocata + +description: > + OpenStack Nova Placement API service configured with Puppet + +parameters: + ServiceNetMap: + default: {} + description: Mapping of service_name -> network name. Typically set + via parameter_defaults in the resource registry. This + mapping overrides those in ServiceNetMapDefaults. + type: json + DefaultPasswords: + default: {} + type: json + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + NovaWorkers: + default: 0 + description: Number of workers for Nova Placement API service. + type: number + NovaPassword: + description: The password for the nova service and db account, used by nova-placement. + type: string + hidden: true + KeystoneRegion: + type: string + default: 'regionOne' + description: Keystone region for endpoint + MonitoringSubscriptionNovaPlacement: + default: 'overcloud-nova-placement' + type: string + NovaPlacementLoggingSource: + type: json + default: + tag: openstack.nova.placement + path: /var/log/httpd/nova_placement_wsgi_error_ssl.log + EnableInternalTLS: + type: boolean + default: false + +conditions: + nova_workers_zero: {equals : [{get_param: NovaWorkers}, 0]} + +resources: + ApacheServiceBase: + type: ./apache.yaml + properties: + ServiceNetMap: {get_param: ServiceNetMap} + DefaultPasswords: {get_param: DefaultPasswords} + EndpointMap: {get_param: EndpointMap} + EnableInternalTLS: {get_param: EnableInternalTLS} + + NovaBase: + type: ./nova-base.yaml + properties: + ServiceNetMap: {get_param: ServiceNetMap} + DefaultPasswords: {get_param: DefaultPasswords} + EndpointMap: {get_param: EndpointMap} + +outputs: + role_data: + description: Role data for the Nova Placement API service. + value: + service_name: nova_placement + monitoring_subscription: {get_param: MonitoringSubscriptionNovaPlacement} + logging_source: {get_param: NovaPlacementLoggingSource} + logging_groups: + - nova + config_settings: + map_merge: + - get_attr: [NovaBase, role_data, config_settings] + - get_attr: [ApacheServiceBase, role_data, config_settings] + - tripleo.nova_placement.firewall_rules: + '138 nova_placement': + dport: + - 8778 + - 13778 + nova::keystone::authtoken::project_name: 'service' + nova::keystone::authtoken::password: {get_param: NovaPassword} + nova::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri] } + nova::keystone::authtoken::auth_url: {get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix]} + nova::wsgi::apache_placement::api_port: '8778' + nova::wsgi::apache_placement::ssl: {get_param: EnableInternalTLS} + # NOTE: bind IP is found in Heat replacing the network name with the local node IP + # for the given network; replacement examples (eg. for internal_api): + # internal_api -> IP + # internal_api_uri -> [IP] + # internal_api_subnet - > IP/CIDR + nova::wsgi::apache_placement::bind_host: {get_param: [ServiceNetMap, NovaPlacementNetwork]} + nova::wsgi::apache_placement::servername: + str_replace: + template: + "%{hiera('fqdn_$NETWORK')}" + params: + $NETWORK: {get_param: [ServiceNetMap, NovaPlacementNetwork]} + - + if: + - nova_workers_zero + - {} + - nova::wsgi::apache_placement::workers: {get_param: NovaWorkers} + step_config: | + include tripleo::profile::base::nova::placement + service_config_settings: + keystone: + nova::keystone::auth_placement::tenant: 'service' + nova::keystone::auth_placement::public_url: {get_param: [EndpointMap, NovaPlacementPublic, uri]} + nova::keystone::auth_placement::internal_url: {get_param: [EndpointMap, NovaPlacementInternal, uri]} + nova::keystone::auth_placement::admin_url: {get_param: [EndpointMap, NovaPlacementAdmin, uri]} + nova::keystone::auth_placement::password: {get_param: NovaPassword} + nova::keystone::auth_placement::region: {get_param: KeystoneRegion} + mysql: + map_merge: + - {get_attr: [NovaBase, role_data, service_config_settings, mysql]} + - nova::db::mysql_placement::password: {get_param: NovaPassword} + nova::db::mysql_placement::user: nova_placement + nova::db::mysql_placement::host: {get_param: [EndpointMap, MysqlInternal, host_nobrackets]} + nova::db::mysql_placement::dbname: nova_placement + nova::db::mysql_placement::allowed_hosts: + - '%' + - "%{hiera('mysql_bind_host')}" + upgrade_tasks: + - name: Stop nova_placement service (running under httpd) + tags: step1 + service: name=httpd state=stopped + # The nova placement API isn't installed in newton images, so install + # it on upgrade + - name: Install nova-placement packages on upgrade + tags: step3 + yum: name=openstack-nova-placement-api state=latest diff --git a/puppet/services/nova-scheduler.yaml b/puppet/services/nova-scheduler.yaml index 353a75ac..e4b6bb43 100644 --- a/puppet/services/nova-scheduler.yaml +++ b/puppet/services/nova-scheduler.yaml @@ -63,3 +63,7 @@ outputs: nova::scheduler::filter::scheduler_default_filters: {get_param: NovaSchedulerDefaultFilters} step_config: | include tripleo::profile::base::nova::scheduler + upgrade_tasks: + - name: Stop nova_scheduler service + tags: step1 + service: name=openstack-nova-scheduler state=stopped diff --git a/puppet/services/nova-vnc-proxy.yaml b/puppet/services/nova-vnc-proxy.yaml index bf244943..42335ade 100644 --- a/puppet/services/nova-vnc-proxy.yaml +++ b/puppet/services/nova-vnc-proxy.yaml @@ -64,3 +64,7 @@ outputs: - 13080 step_config: | include tripleo::profile::base::nova::vncproxy + upgrade_tasks: + - name: Stop nova_vnc_proxy service + tags: step1 + service: name=openstack-nova-consoleauth state=stopped diff --git a/puppet/services/octavia-api.yaml b/puppet/services/octavia-api.yaml new file mode 100644 index 00000000..909a3030 --- /dev/null +++ b/puppet/services/octavia-api.yaml @@ -0,0 +1,98 @@ +heat_template_version: ocata + +description: > + OpenStack Octavia API service. + +parameters: + ServiceNetMap: + default: {} + description: Mapping of service_name -> network name. Typically set + via parameter_defaults in the resource registry. This + mapping overrides those in ServiceNetMapDefaults. + type: json + DefaultPasswords: + default: {} + type: json + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + OctaviaPassword: + description: The password for the Octavia's database account. + type: string + hidden: true + KeystoneRegion: + type: string + default: 'regionOne' + description: Keystone region for endpoint + MonitoringSubscriptionOctaviaApi: + default: 'overcloud-octavia-api' + type: string + OctaviaApiLoggingSource: + type: json + default: + tag: openstack.octavia.api + path: /var/log/octavia/api.log + +resources: + + OctaviaBase: + type: ./octavia-base.yaml + properties: + ServiceNetMap: {get_param: ServiceNetMap} + DefaultPasswords: {get_param: DefaultPasswords} + EndpointMap: {get_param: EndpointMap} + +outputs: + role_data: + description: Role data for the Octavia API service. + value: + service_name: octavia_api + monitoring_subscription: {get_param: MonitoringSubscriptionOctaviaApi} + logging_source: {get_param: OctaviaApiLoggingSource} + logging_groups: + - octavia + config_settings: + map_merge: + - get_attr: [OctaviaBase, role_data, config_settings] + - octavia::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri] } + octavia::db::database_connection: + list_join: + - '' + - - {get_param: [EndpointMap, MysqlInternal, protocol]} + - '://octavia:' + - {get_param: OctaviaPassword} + - '@' + - {get_param: [EndpointMap, MysqlInternal, host]} + - '/octavia' + - '?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo' + octavia::keystone::authtoken::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]} + octavia::keystone::authtoken::project_name: 'service' + octavia::keystone::authtoken::password: {get_param: OctaviaPassword} + octavia::api::sync_db: true + tripleo.octavia_api.firewall_rules: + '120 octavia api': + dport: + - 9876 + - 13876 + octavia::api::host: {get_param: [ServiceNetMap, OctaviaApiNetwork]} + neutron::server::service_providers: ['LOADBALANCERV2:Octavia:neutron_lbaas.drivers.octavia.driver.OctaviaDriver:default'] + step_config: | + include tripleo::profile::base::octavia::api + service_config_settings: + keystone: + octavia::keystone::auth::tenant: 'service' + octavia::keystone::auth::public_url: {get_param: [EndpointMap, OctaviaPublic, uri]} + octavia::keystone::auth::internal_url: { get_param: [ EndpointMap, OctaviaInternal, uri ] } + octavia::keystone::auth::admin_url: { get_param: [ EndpointMap, OctaviaAdmin, uri ] } + octavia::keystone::auth::password: {get_param: OctaviaPassword} + octavia::keystone::auth::region: {get_param: KeystoneRegion} + mysql: + octavia::db::mysql::password: {get_param: OctaviaPassword} + octavia::db::mysql::user: octavia + octavia::db::mysql::host: {get_param: [EndpointMap, MysqlInternal, host_nobrackets]} + octavia::db::mysql::dbname: octavia + octavia::db::mysql::allowed_hosts: + - '%' + - "%{hiera('mysql_bind_host')}" diff --git a/puppet/services/octavia-base.yaml b/puppet/services/octavia-base.yaml new file mode 100644 index 00000000..b537a2bc --- /dev/null +++ b/puppet/services/octavia-base.yaml @@ -0,0 +1,62 @@ +heat_template_version: ocata + +description: > + OpenStack Octavia base service. Shared for all Octavia services + +parameters: + ServiceNetMap: + default: {} + description: Mapping of service_name -> network name. Typically set + via parameter_defaults in the resource registry. This + mapping overrides those in ServiceNetMapDefaults. + type: json + DefaultPasswords: + default: {} + type: json + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + Debug: + type: string + default: '' + description: Set to True to enable debugging on all services. + EnableConfigPurge: + type: boolean + default: true + description: > + Remove configuration that is not generated by TripleO. Setting + to false may result in configuration remnants after updates/upgrades. + RabbitPassword: + description: The password for RabbitMQ + type: string + hidden: true + RabbitUserName: + default: guest + description: The username for RabbitMQ + type: string + RabbitClientUseSSL: + default: false + description: > + Rabbit client subscriber parameter to specify + an SSL connection to the RabbitMQ host. + type: string + RabbitClientPort: + default: 5672 + description: Set rabbit subscriber port, change this if using SSL + type: number + +outputs: + role_data: + description: Base role data for Octavia services + value: + service_name: octavia_base + config_settings: + octavia::debug: {get_param: Debug} + octavia::purge_config: {get_param: EnableConfigPurge} + octavia::rabbit_use_ssl: {get_param: RabbitClientUseSSL} + tripleo::profile::base::octavia::rabbit_user: {get_param: RabbitUserName} + tripleo::profile::base::octavia::rabbit_password: {get_param: RabbitPassword} + tripleo::profile::base::octavia::rabbit_port: {get_param: RabbitClientPort} + diff --git a/puppet/services/octavia-health-manager.yaml b/puppet/services/octavia-health-manager.yaml new file mode 100644 index 00000000..51d32f23 --- /dev/null +++ b/puppet/services/octavia-health-manager.yaml @@ -0,0 +1,61 @@ +heat_template_version: ocata + +description: > + OpenStack Octavia Health Manager service. + +parameters: + ServiceNetMap: + default: {} + description: Mapping of service_name -> network name. Typically set + via parameter_defaults in the resource registry. This + mapping overrides those in ServiceNetMapDefaults. + type: json + DefaultPasswords: + default: {} + type: json + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + MonitoringSubscriptionOctaviaHealthManager: + default: 'overcloud-octavia-health-manager' + type: string + OctaviaHealthManagerLoggingSource: + type: json + default: + tag: openstack.octavia.health-manager + path: /var/log/octavia/health-manager.log + OctaviaHeartbeatKey: + type: string + description: Key to identify heartbeat messages for amphorae. + hidden: true + +resources: + + OctaviaBase: + type: ./octavia-base.yaml + properties: + ServiceNetMap: {get_param: ServiceNetMap} + DefaultPasswords: {get_param: DefaultPasswords} + EndpointMap: {get_param: EndpointMap} + +outputs: + role_data: + description: Role data for the Octavia Health Manager service. + value: + service_name: octavia_health_manager + monitoring_subscription: {get_param: MonitoringSubscriptionOctaviaHealthManager} + logging_source: {get_param: OctaviaHealthManagerLoggingSource} + logging_groups: + - octavia + config_settings: + map_merge: + - get_attr: [OctaviaBase, role_data, config_settings] + - octavia::health_manager::heartbeat_key: {get_param: OctaviaHeartbeatKey} + octavia::health_manager::event_streamer_driver: 'queue_event_streamer' + step_config: | + include tripleo::profile::base::octavia::health_manager + + + diff --git a/puppet/services/octavia-housekeeping.yaml b/puppet/services/octavia-housekeeping.yaml new file mode 100644 index 00000000..84c33433 --- /dev/null +++ b/puppet/services/octavia-housekeeping.yaml @@ -0,0 +1,70 @@ +heat_template_version: ocata + +description: > + OpenStack Octavia Housekeeping service. + +parameters: + ServiceNetMap: + default: {} + description: Mapping of service_name -> network name. Typically set + via parameter_defaults in the resource registry. This + mapping overrides those in ServiceNetMapDefaults. + type: json + DefaultPasswords: + default: {} + type: json + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + OctaviaAmphoraExpiryAge: + default: 0 + description: The interval in seconds after which an unused Amphora will + be considered expired and cleaned up. If left to 0, the + configuration will not be set and the system will use + the service defaults. + type: number + MonitoringSubscriptionOctaviaHousekeeping: + default: 'overcloud-octavia-housekeeping' + type: string + OctaviaHousekeepingLoggingSource: + type: json + default: + tag: openstack.octavia.housekeeping + path: /var/log/octavia/housekeeping.log + +conditions: + amphora_expiry_is_zero: {equals: [{get_param: OctaviaAmphoraExpiryAge}, 0]} + + +resources: + + OctaviaBase: + type: ./octavia-base.yaml + properties: + ServiceNetMap: {get_param: ServiceNetMap} + DefaultPasswords: {get_param: DefaultPasswords} + EndpointMap: {get_param: EndpointMap} + +outputs: + role_data: + description: Role data for the Octavia Housekeeping service. + value: + service_name: octavia_housekeeping + monitoring_subscription: {get_param: MonitoringSubscriptionOctaviaHousekeeping} + logging_source: {get_param: OctaviaHousekeepingLoggingSource} + logging_groups: + - octavia + config_settings: + map_merge: + - get_attr: [OctaviaBase, role_data, config_settings] + - + if: + - amphora_expiry_is_zero + - {} + - octavia::worker::amphora_expiry_age: {get_param: OctaviaAmphoraExpiryAge} + step_config: | + include tripleo::profile::base::octavia::housekeeping + + diff --git a/puppet/services/octavia-worker.yaml b/puppet/services/octavia-worker.yaml new file mode 100644 index 00000000..9212b76b --- /dev/null +++ b/puppet/services/octavia-worker.yaml @@ -0,0 +1,102 @@ +heat_template_version: ocata + +description: > + OpenStack Octavia Worker service. + +parameters: + ServiceNetMap: + default: {} + description: Mapping of service_name -> network name. Typically set + via parameter_defaults in the resource registry. This + mapping overrides those in ServiceNetMapDefaults. + type: json + DefaultPasswords: + default: {} + type: json + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + MonitoringSubscriptionOctaviaWorker: + default: 'overcloud-octavia-worker' + type: string + OctaviaWorkerLoggingSource: + type: json + default: + tag: openstack.octavia.worker + path: /var/log/octavia/worker.log + OctaviaAmphoraImageTag: + default: '' + description: Glance image tag for identifying the amphora image. + type: string + OctaviaAmphoraNetworkList: + default: [] + description: List of networks to attach to amphorae. + type: comma_delimited_list + OctaviaLoadBalancerTopology: + default: '' + description: Load balancer topology configuration. + type: string + OctaviaFlavorId: + default: 65 + description: Nova flavor ID to be used when creating the nova flavor for + amphora. + type: number + OctaviaFlavorProperties: + default: {} + description: Dictionary describing the nova flavor for amphora. + type: json + OctaviaManageNovaFlavor: + default: false + description: Configure the nova flavor for the amphora. + type: boolean + OctaviaSSHKeyName: + default: 'octavia-ssh-key' + description: name for ssh key to be configured so the amphora can + be logged into. + type: string + +conditions: + octavia_topology_unset: {equals : [{get_param: OctaviaLoadBalancerTopology}, ""]} + octavia_amphora_tag_unset: {equals: [{get_param: OctaviaAmphoraImageTag}, ""]} + +resources: + + OctaviaBase: + type: ./octavia-base.yaml + properties: + ServiceNetMap: {get_param: ServiceNetMap} + DefaultPasswords: {get_param: DefaultPasswords} + EndpointMap: {get_param: EndpointMap} + +outputs: + role_data: + description: Role data for the Octavia WoWorker service. + value: + service_name: octavia_worker + monitoring_subscription: {get_param: MonitoringSubscriptionOctaviaWorker} + logging_source: {get_param: OctaviaWorkerLoggingSource} + logging_groups: + -octavia + config_settings: + map_merge: + - get_attr: [OctaviaBase, role_data, config_settings] + - octavia::worker::amp_boot_network_list: {get_param: OctaviaAmphoraNetworkList} + octavia::worker::amp_flavor_id: {get_param: OctaviaFlavorId} + octavia::worker::nova_flavor_config: {get_param: OctaviaFlavorProperties} + octavia::worker::manage_nova_flavor: {get_param: OctaviaManageNovaFlavor} + octavia::worker::ssh_key_name: {get_param: OctaviaSSHKeyName} + - + if: + - octavia_amphora_tag_unset + - {} + - octavia::worker::amp_image_tag: {get_param: OctaviaAmphoraImageTag} + - + if: + - octavia_topology_unset + - {} + - octavia::worker::loadbalancer_topology: {get_param: OctaviaLoadBalancerTopology} + step_config: | + include tripleo::profile::base::octavia::worker + diff --git a/puppet/services/opendaylight-api.yaml b/puppet/services/opendaylight-api.yaml index 89842115..ceb56a81 100644 --- a/puppet/services/opendaylight-api.yaml +++ b/puppet/services/opendaylight-api.yaml @@ -17,10 +17,10 @@ parameters: type: string description: The password for the opendaylight server. hidden: true - OpenDaylightEnableL3: - description: Knob to enable/disable ODL L3 + OpenDaylightConnectionProtocol: + description: L7 protocol used for REST access type: string - default: 'no' + default: 'http' OpenDaylightEnableDHCP: description: Knob to enable/disable ODL DHCP Server type: boolean @@ -56,10 +56,10 @@ outputs: opendaylight::odl_rest_port: {get_param: OpenDaylightPort} opendaylight::username: {get_param: OpenDaylightUsername} opendaylight::password: {get_param: OpenDaylightPassword} - opendaylight::enable_l3: {get_param: OpenDaylightEnableL3} opendaylight::extra_features: {get_param: OpenDaylightFeatures} opendaylight::enable_dhcp: {get_param: OpenDaylightEnableDHCP} opendaylight::odl_bind_ip: {get_param: [ServiceNetMap, OpendaylightApiNetwork]} + opendaylight::nb_connection_protocol: {get_param: OpenDayLightConnectionProtocol} tripleo.opendaylight_api.firewall_rules: '137 opendaylight api': dport: @@ -68,3 +68,26 @@ outputs: - 6653 step_config: | include tripleo::profile::base::neutron::opendaylight + upgrade_tasks: + - name: Check if opendaylight is deployed + command: systemctl is-enabled opendaylight + tags: common + ignore_errors: True + register: opendaylight_enabled + - name: "PreUpgrade step0,validation: Check service opendaylight is running" + shell: /usr/bin/systemctl show 'opendaylight' --property ActiveState | grep '\bactive\b' + when: opendaylight_enabled.rc == 0 + tags: step0,validation + - name: Stop opendaylight service + tags: step1 + when: opendaylight_enabled.rc == 0 + service: name=opendaylight state=stopped + - name: Removes ODL snapshots, data, journal directories + file: + state: absent + path: /opt/opendaylight/{{item}} + tags: step2 + with_items: + - snapshots + - data + - journal diff --git a/puppet/services/opendaylight-ovs.yaml b/puppet/services/opendaylight-ovs.yaml index cfec3c48..3db0848e 100644 --- a/puppet/services/opendaylight-ovs.yaml +++ b/puppet/services/opendaylight-ovs.yaml @@ -73,3 +73,17 @@ outputs: proto: 'gre' step_config: | include tripleo::profile::base::neutron::plugins::ovs::opendaylight + upgrade_tasks: + - name: Check if openvswitch is deployed + command: systemctl is-enabled openvswitch + tags: common + ignore_errors: True + register: openvswitch_enabled + - name: "PreUpgrade step0,validation: Check service openvswitch is running" + shell: /usr/bin/systemctl show 'openvswitch' --property ActiveState | grep '\bactive\b' + when: openvswitch_enabled.rc == 0 + tags: step0,validation + - name: Stop openvswitch service + tags: step1 + when: openvswitch_enabled.rc == 0 + service: name=openvswitch state=stopped diff --git a/puppet/services/ovn-dbs.yaml b/puppet/services/ovn-dbs.yaml index 302628d4..7f81afde 100644 --- a/puppet/services/ovn-dbs.yaml +++ b/puppet/services/ovn-dbs.yaml @@ -1,4 +1,4 @@ -heat_template_version: 2016-04-08 +heat_template_version: ocata description: > OVN databases configured with puppet diff --git a/puppet/services/pacemaker.yaml b/puppet/services/pacemaker.yaml index 9adf1bdb..5be58c18 100644 --- a/puppet/services/pacemaker.yaml +++ b/puppet/services/pacemaker.yaml @@ -29,11 +29,22 @@ parameters: default: false description: Whether to enable fencing in Pacemaker or not. type: boolean + PacemakerRemoteAuthkey: + type: string + description: The authkey for the pacemaker remote service. + hidden: true + default: '' PcsdPassword: type: string description: The password for the 'pcsd' user for pacemaker. hidden: true default: '' + CorosyncSettleTries: + type: number + description: Number of tries for cluster settling. This has the + same default as the pacemaker puppet module. Override + to a smaller value when in need to replace a controller node. + default: 360 FencingConfig: default: {} description: | @@ -76,6 +87,10 @@ parameters: \[(?<pid>[^ ]*)\] (?<host>[^ ]*) (?<message>.*)$/ + PacemakerResources: + type: comma_delimited_list + description: List of resources managed by pacemaker + default: ['rabbitmq','haproxy'] outputs: role_data: @@ -92,6 +107,7 @@ outputs: pacemaker::resource_defaults::defaults: resource-stickiness: { value: INFINITY } corosync_token_timeout: 10000 + pacemaker::corosync::settle_tries: {get_param: CorosyncSettleTries} tripleo.pacemaker.firewall_rules: '130 pacemaker tcp': proto: 'tcp' @@ -112,5 +128,20 @@ outputs: passwords: - {get_param: PcsdPassword} - {get_param: [DefaultPasswords, pcsd_password]} + tripleo::profile::base::pacemaker::remote_authkey: {get_param: PacemakerRemoteAuthkey} step_config: | include ::tripleo::profile::base::pacemaker + upgrade_tasks: + - name: Check pacemaker cluster running before upgrade + tags: step0,validation + pacemaker_cluster: state=online check_and_fail=true + - name: Stop pacemaker cluster + tags: step2 + pacemaker_cluster: state=offline + - name: Start pacemaker cluster + tags: step4 + pacemaker_cluster: state=online + - name: Check pacemaker resource + tags: step4 + pacemaker_resource: state=started resource={{item}} check_mode=true wait_for_resource=true timeout=500 + with_items: {get_param: PacemakerResources} diff --git a/puppet/services/pacemaker/ceilometer-agent-central.yaml b/puppet/services/pacemaker/ceilometer-agent-central.yaml deleted file mode 100644 index 87bbf0c5..00000000 --- a/puppet/services/pacemaker/ceilometer-agent-central.yaml +++ /dev/null @@ -1,45 +0,0 @@ -heat_template_version: ocata - -description: > - OpenStack Ceilometer Central Agent service with Pacemaker configured with Puppet - -parameters: - ServiceNetMap: - default: {} - description: Mapping of service_name -> network name. Typically set - via parameter_defaults in the resource registry. This - mapping overrides those in ServiceNetMapDefaults. - type: json - DefaultPasswords: - default: {} - type: json - EndpointMap: - default: {} - description: Mapping of service endpoint -> protocol. Typically set - via parameter_defaults in the resource registry. - type: json - MonitoringSubscriptionCeilometerCentral: - default: 'overcloud-ceilometer-agent-central' - type: string - -resources: - CeilometerServiceBase: - type: ../ceilometer-agent-central.yaml - properties: - ServiceNetMap: {get_param: ServiceNetMap} - DefaultPasswords: {get_param: DefaultPasswords} - EndpointMap: {get_param: EndpointMap} - -outputs: - role_data: - description: Role data for the Ceilometer Central Agent pacemaker role. - value: - service_name: ceilometer_agent_central - monitoring_subscription: {get_param: MonitoringSubscriptionCeilometerCentral} - config_settings: - map_merge: - - get_attr: [CeilometerServiceBase, role_data, config_settings] - - ceilometer::agent::central::manage_service: false - ceilometer::agent::central::enabled: false - step_config: | - include ::tripleo::profile::pacemaker::ceilometer::agent::central diff --git a/puppet/services/pacemaker/ceilometer-agent-notification.yaml b/puppet/services/pacemaker/ceilometer-agent-notification.yaml deleted file mode 100644 index a013cf4f..00000000 --- a/puppet/services/pacemaker/ceilometer-agent-notification.yaml +++ /dev/null @@ -1,45 +0,0 @@ -heat_template_version: ocata - -description: > - OpenStack Ceilometer Notification Agent service with Pacemaker configured with Puppet - -parameters: - ServiceNetMap: - default: {} - description: Mapping of service_name -> network name. Typically set - via parameter_defaults in the resource registry. This - mapping overrides those in ServiceNetMapDefaults. - type: json - DefaultPasswords: - default: {} - type: json - EndpointMap: - default: {} - description: Mapping of service endpoint -> protocol. Typically set - via parameter_defaults in the resource registry. - type: json - MonitoringSubscriptionCeilometerNotification: - default: 'overcloud-ceilometer-agent-notification' - type: string - -resources: - CeilometerServiceBase: - type: ../ceilometer-agent-notification.yaml - properties: - ServiceNetMap: {get_param: ServiceNetMap} - DefaultPasswords: {get_param: DefaultPasswords} - EndpointMap: {get_param: EndpointMap} - -outputs: - role_data: - description: Role data for the Ceilometer Notification Agent pacemaker role. - value: - service_name: ceilometer_agent_notification - monitoring_subscription: {get_param: MonitoringSubscriptionCeilometerNotification} - config_settings: - map_merge: - - get_attr: [CeilometerServiceBase, role_data, config_settings] - - ceilometer::agent::notification::manage_service: false - ceilometer::agent::notification::enabled: false - step_config: | - include ::tripleo::profile::pacemaker::ceilometer::agent::notification diff --git a/puppet/services/pacemaker/ceilometer-collector.yaml b/puppet/services/pacemaker/ceilometer-collector.yaml deleted file mode 100644 index 0ddd72c3..00000000 --- a/puppet/services/pacemaker/ceilometer-collector.yaml +++ /dev/null @@ -1,45 +0,0 @@ -heat_template_version: ocata - -description: > - OpenStack Ceilometer Collector service with Pacemaker configured with Puppet - -parameters: - ServiceNetMap: - default: {} - description: Mapping of service_name -> network name. Typically set - via parameter_defaults in the resource registry. This - mapping overrides those in ServiceNetMapDefaults. - type: json - DefaultPasswords: - default: {} - type: json - EndpointMap: - default: {} - description: Mapping of service endpoint -> protocol. Typically set - via parameter_defaults in the resource registry. - type: json - MonitoringSubscriptionCeilometerCollector: - default: 'overcloud-ceilometer-collector' - type: string - -resources: - CeilometerServiceBase: - type: ../ceilometer-collector.yaml - properties: - ServiceNetMap: {get_param: ServiceNetMap} - DefaultPasswords: {get_param: DefaultPasswords} - EndpointMap: {get_param: EndpointMap} - -outputs: - role_data: - description: Role data for the Ceilometer Collector pacemaker role. - value: - service_name: ceilometer_collector - monitoring_subscription: {get_param: MonitoringSubscriptionCeilometerCollector} - config_settings: - map_merge: - - get_attr: [CeilometerServiceBase, role_data, config_settings] - - ceilometer::collector::manage_service: false - ceilometer::collector::enabled: false - step_config: | - include ::tripleo::profile::pacemaker::ceilometer::collector diff --git a/puppet/services/pacemaker/gnocchi-api.yaml b/puppet/services/pacemaker/ceph-rbdmirror.yaml index 87e525ad..7686028d 100644 --- a/puppet/services/pacemaker/gnocchi-api.yaml +++ b/puppet/services/pacemaker/ceph-rbdmirror.yaml @@ -1,7 +1,7 @@ heat_template_version: ocata description: > - Gnocchi service configured with Puppet + Ceph RBD mirror service. parameters: ServiceNetMap: @@ -18,13 +18,13 @@ parameters: description: Mapping of service endpoint -> protocol. Typically set via parameter_defaults in the resource registry. type: json - MonitoringSubscriptionGnocchiApi: - default: 'overcloud-gnocchi-api' + CephClientUserName: + default: openstack type: string resources: - GnocchiServiceBase: - type: ../gnocchi-api.yaml + CephBase: + type: ../ceph-base.yaml properties: ServiceNetMap: {get_param: ServiceNetMap} DefaultPasswords: {get_param: DefaultPasswords} @@ -32,14 +32,16 @@ resources: outputs: role_data: - description: Role data for the Gnocchi role. + description: Role data for the Ceph RBD mirrror service. value: - service_name: gnocchi_api - monitoring_subscription: {get_param: MonitoringSubscriptionGnocchiApi} + service_name: ceph_rbdmirror config_settings: map_merge: - - get_attr: [GnocchiServiceBase, role_data, config_settings] - - gnocchi::metricd::manage_service: false - gnocchi::metricd::enabled: false + - get_attr: [CephBase, role_data, config_settings] + - tripleo::profile::pacemaker::ceph::rbdmirror::client_name: {get_param: CephClientUserName} + tripleo.ceph_rbdmirror.firewall_rules: + '113 ceph_rbdmirror': + dport: + - '6800-7300' step_config: | - include ::tripleo::profile::pacemaker::gnocchi::api + include ::tripleo::profile::pacemaker::ceph::rbdmirror
\ No newline at end of file diff --git a/puppet/services/pacemaker/cinder-api.yaml b/puppet/services/pacemaker/cinder-api.yaml deleted file mode 100644 index 472b45f8..00000000 --- a/puppet/services/pacemaker/cinder-api.yaml +++ /dev/null @@ -1,45 +0,0 @@ -heat_template_version: ocata - -description: > - OpenStack Cinder API service with Pacemaker configured with Puppet. - -parameters: - ServiceNetMap: - default: {} - description: Mapping of service_name -> network name. Typically set - via parameter_defaults in the resource registry. This - mapping overrides those in ServiceNetMapDefaults. - type: json - DefaultPasswords: - default: {} - type: json - EndpointMap: - default: {} - description: Mapping of service endpoint -> protocol. Typically set - via parameter_defaults in the resource registry. - type: json - -resources: - - CinderApiBase: - type: ../cinder-api.yaml - properties: - ServiceNetMap: {get_param: ServiceNetMap} - DefaultPasswords: {get_param: DefaultPasswords} - EndpointMap: {get_param: EndpointMap} - -outputs: - role_data: - description: Role data for the Cinder API role. - value: - service_name: cinder_api - monitoring_subscription: {get_attr: [CinderApiBase, role_data, monitoring_subscription]} - logging_source: {get_attr: [CinderApiBase, role_data, logging_source]} - logging_groups: {get_attr: [CinderApiBase, role_data, logging_groups]} - config_settings: - map_merge: - - get_attr: [CinderApiBase, role_data, config_settings] - - cinder::api::manage_service: false - cinder::api::enabled: false - step_config: | - include ::tripleo::profile::pacemaker::cinder::api diff --git a/puppet/services/pacemaker/cinder-scheduler.yaml b/puppet/services/pacemaker/cinder-scheduler.yaml deleted file mode 100644 index 19785fab..00000000 --- a/puppet/services/pacemaker/cinder-scheduler.yaml +++ /dev/null @@ -1,45 +0,0 @@ -heat_template_version: ocata - -description: > - OpenStack Cinder Scheduler service with Pacemaker configured with Puppet - -parameters: - ServiceNetMap: - default: {} - description: Mapping of service_name -> network name. Typically set - via parameter_defaults in the resource registry. This - mapping overrides those in ServiceNetMapDefaults. - type: json - DefaultPasswords: - default: {} - type: json - EndpointMap: - default: {} - description: Mapping of service endpoint -> protocol. Typically set - via parameter_defaults in the resource registry. - type: json - -resources: - - CinderSchedulerBase: - type: ../cinder-scheduler.yaml - properties: - ServiceNetMap: {get_param: ServiceNetMap} - DefaultPasswords: {get_param: DefaultPasswords} - EndpointMap: {get_param: EndpointMap} - -outputs: - role_data: - description: Role data for the Cinder Scheduler role. - value: - service_name: cinder_scheduler - monitoring_subscription: {get_attr: [CinderSchedulerBase, role_data, monitoring_subscription]} - logging_source: {get_attr: [CinderSchedulerBase, role_data, logging_source]} - logging_groups: {get_attr: [CinderSchedulerBase, role_data, logging_groups]} - config_settings: - map_merge: - - get_attr: [CinderSchedulerBase, role_data, config_settings] - - cinder::scheduler::manage_service: false - cinder::scheduler::enabled: false - step_config: - include ::tripleo::profile::pacemaker::cinder::scheduler diff --git a/puppet/services/pacemaker/database/mysql.yaml b/puppet/services/pacemaker/database/mysql.yaml index af95dbd1..93bf5967 100644 --- a/puppet/services/pacemaker/database/mysql.yaml +++ b/puppet/services/pacemaker/database/mysql.yaml @@ -53,3 +53,9 @@ outputs: get_param: [ServiceNetMap, MysqlNetwork] step_config: | include ::tripleo::profile::pacemaker::database::mysql + metadata_settings: + get_attr: [MysqlBase, role_data, metadata_settings] + upgrade_tasks: + - name: Check for galera root password + tags: step0 + file: path=/root/.my.cnf state=file diff --git a/puppet/services/pacemaker/glance-api.yaml b/puppet/services/pacemaker/glance-api.yaml deleted file mode 100644 index 00213cba..00000000 --- a/puppet/services/pacemaker/glance-api.yaml +++ /dev/null @@ -1,74 +0,0 @@ -heat_template_version: ocata - -description: > - OpenStack Glance API service with Pacemaker configured with Puppet. - -parameters: - ServiceNetMap: - default: {} - description: Mapping of service_name -> network name. Typically set - via parameter_defaults in the resource registry. This - mapping overrides those in ServiceNetMapDefaults. - type: json - DefaultPasswords: - default: {} - type: json - EndpointMap: - default: {} - description: Mapping of service endpoint -> protocol. Typically set - via parameter_defaults in the resource registry. - type: json - GlanceFilePcmkDevice: - default: '' - description: > - An exported storage device that should be mounted by Pacemaker - as Glance storage. Effective when GlanceFilePcmkManage is true. - type: string - GlanceFilePcmkFstype: - default: 'nfs' - description: > - Filesystem type for Pacemaker mount used as Glance storage. - Effective when GlanceFilePcmkManage is true. - type: string - GlanceFilePcmkManage: - default: false - description: > - Whether to make Glance file backend a mount managed by Pacemaker. - Effective when GlanceBackend is 'file'. - type: boolean - GlanceFilePcmkOptions: - default: '' - description: > - Mount options for Pacemaker mount used as Glance storage. - Effective when GlanceFilePcmkManage is true. - type: string - -resources: - - GlanceApiBase: - type: ../glance-api.yaml - properties: - ServiceNetMap: {get_param: ServiceNetMap} - DefaultPasswords: {get_param: DefaultPasswords} - EndpointMap: {get_param: EndpointMap} - -outputs: - role_data: - description: Role data for the Glance role. - value: - service_name: glance_api - monitoring_subscription: {get_attr: [GlanceApiBase, role_data, monitoring_subscription]} - logging_source: {get_attr: [GlanceApiBase, role_data, logging_source]} - logging_groups: {get_attr: [GlanceApiBase, role_data, logging_groups]} - config_settings: - map_merge: - - get_attr: [GlanceApiBase, role_data, config_settings] - - glance_file_pcmk_device: {get_param: GlanceFilePcmkDevice} - glance_file_pcmk_fstype: {get_param: GlanceFilePcmkFstype} - glance_file_pcmk_manage: {get_param: GlanceFilePcmkManage} - glance_file_pcmk_options: {get_param: GlanceFilePcmkOptions} - glance_file_pcmk_directory: '/var/lib/glance/images' - glance::api::manage_service: false - glance::api::enabled: false - step_config: | - include ::tripleo::profile::pacemaker::glance diff --git a/puppet/services/pacemaker/glance-registry.yaml b/puppet/services/pacemaker/glance-registry.yaml deleted file mode 100644 index eaf0266c..00000000 --- a/puppet/services/pacemaker/glance-registry.yaml +++ /dev/null @@ -1,47 +0,0 @@ -heat_template_version: ocata - -description: > - OpenStack Glance Registry service with Pacemaker configured with Puppet - -parameters: - ServiceNetMap: - default: {} - description: Mapping of service_name -> network name. Typically set - via parameter_defaults in the resource registry. This - mapping overrides those in ServiceNetMapDefaults. - type: json - DefaultPasswords: - default: {} - type: json - EndpointMap: - default: {} - description: Mapping of service endpoint -> protocol. Typically set - via parameter_defaults in the resource registry. - type: json - -resources: - - GlanceRegistryBase: - type: ../glance-registry.yaml - properties: - ServiceNetMap: {get_param: ServiceNetMap} - DefaultPasswords: {get_param: DefaultPasswords} - EndpointMap: {get_param: EndpointMap} - -outputs: - role_data: - description: Role data for the Glance role. - value: - service_name: glance_registry - monitoring_subscription: {get_attr: [GlanceRegistryBase, role_data, monitoring_subscription]} - logging_source: {get_attr: [GlanceRegistryBase, role_data, logging_source]} - logging_groups: {get_attr: [GlanceRegistryBase, role_data, logging_groups]} - config_settings: - map_merge: - - get_attr: [GlanceRegistryBase, role_data, config_settings] - - glance::registry::manage_service: false - glance::registry::enabled: false - # No puppet manifests since glance-registry is included in - # ::tripleo::profile::pacemaker::glance which is maintained alongside of - # pacemaker/glance-api.yaml. - step_config: diff --git a/puppet/services/pacemaker/gnocchi-metricd.yaml b/puppet/services/pacemaker/gnocchi-metricd.yaml deleted file mode 100644 index f83c3cd6..00000000 --- a/puppet/services/pacemaker/gnocchi-metricd.yaml +++ /dev/null @@ -1,47 +0,0 @@ -heat_template_version: ocata - -description: > - Gnocchi service configured with Puppet - -parameters: - ServiceNetMap: - default: {} - description: Mapping of service_name -> network name. Typically set - via parameter_defaults in the resource registry. This - mapping overrides those in ServiceNetMapDefaults. - type: json - DefaultPasswords: - default: {} - type: json - EndpointMap: - default: {} - description: Mapping of service endpoint -> protocol. Typically set - via parameter_defaults in the resource registry. - type: json - MonitoringSubscriptionGnocchiMetricd: - default: 'overcloud-gnocchi-metricd' - type: string - -resources: - GnocchiServiceBase: - type: ../gnocchi-metricd.yaml - properties: - ServiceNetMap: {get_param: ServiceNetMap} - DefaultPasswords: {get_param: DefaultPasswords} - EndpointMap: {get_param: EndpointMap} - -outputs: - role_data: - description: Role data for the Gnocchi role. - value: - service_name: gnocchi_metricd - monitoring_subscription: {get_param: MonitoringSubscriptionGnocchiMetricd} - config_settings: - map_merge: - - get_attr: [GnocchiServiceBase, role_data, config_settings] - - gnocchi::metricd::manage_service: false - gnocchi::metricd::enabled: false - tripleo::profile::pacemaker::gnocchi::gnocchi_indexer_backend: {get_attr: [GnocchiServiceBase, aux_parameters, gnocchi_indexer_backend]} - - step_config: | - include ::tripleo::profile::pacemaker::gnocchi::metricd diff --git a/puppet/services/pacemaker/gnocchi-statsd.yaml b/puppet/services/pacemaker/gnocchi-statsd.yaml deleted file mode 100644 index 8c224497..00000000 --- a/puppet/services/pacemaker/gnocchi-statsd.yaml +++ /dev/null @@ -1,46 +0,0 @@ -heat_template_version: ocata - -description: > - Gnocchi service configured with Puppet - -parameters: - ServiceNetMap: - default: {} - description: Mapping of service_name -> network name. Typically set - via parameter_defaults in the resource registry. This - mapping overrides those in ServiceNetMapDefaults. - type: json - DefaultPasswords: - default: {} - type: json - EndpointMap: - default: {} - description: Mapping of service endpoint -> protocol. Typically set - via parameter_defaults in the resource registry. - type: json - MonitoringSubscriptionGnocchiStatsd: - default: 'overcloud-gnocchi-statsd' - type: string - -resources: - GnocchiServiceBase: - type: ../gnocchi-statsd.yaml - properties: - ServiceNetMap: {get_param: ServiceNetMap} - DefaultPasswords: {get_param: DefaultPasswords} - EndpointMap: {get_param: EndpointMap} - -outputs: - role_data: - description: Role data for the Gnocchi role. - value: - service_name: gnocchi_statsd - monitoring_subscription: {get_param: MonitoringSubscriptionGnocchiStatsd} - config_settings: - map_merge: - - get_attr: [GnocchiServiceBase, role_data, config_settings] - - gnocchi::statsd::manage_service: false - gnocchi::statsd::enabled: false - tripleo::profile::pacemaker::gnocchi::gnocchi_indexer_backend: {get_attr: [GnocchiServiceBase, aux_parameters, gnocchi_indexer_backend]} - step_config: | - include ::tripleo::profile::pacemaker::gnocchi::statsd diff --git a/puppet/services/pacemaker/haproxy.yaml b/puppet/services/pacemaker/haproxy.yaml index 50da4119..598deaef 100644 --- a/puppet/services/pacemaker/haproxy.yaml +++ b/puppet/services/pacemaker/haproxy.yaml @@ -40,3 +40,5 @@ outputs: tripleo::haproxy::mysql_clustercheck: true step_config: | include ::tripleo::profile::pacemaker::haproxy + metadata_settings: + get_attr: [LoadbalancerServiceBase, role_data, metadata_settings] diff --git a/puppet/services/pacemaker/heat-api-cfn.yaml b/puppet/services/pacemaker/heat-api-cfn.yaml deleted file mode 100644 index da856d5a..00000000 --- a/puppet/services/pacemaker/heat-api-cfn.yaml +++ /dev/null @@ -1,44 +0,0 @@ -heat_template_version: ocata - -description: > - Openstack Heat CloudFormation API service configured with Puppet - -parameters: - ServiceNetMap: - default: {} - description: Mapping of service_name -> network name. Typically set - via parameter_defaults in the resource registry. This - mapping overrides those in ServiceNetMapDefaults. - type: json - DefaultPasswords: - default: {} - type: json - EndpointMap: - default: {} - description: Mapping of service endpoint -> protocol. Typically set - via parameter_defaults in the resource registry. - type: json - -resources: - HeatApiCfnBase: - type: ../heat-api-cfn.yaml - properties: - ServiceNetMap: {get_param: ServiceNetMap} - DefaultPasswords: {get_param: DefaultPasswords} - EndpointMap: {get_param: EndpointMap} - -outputs: - role_data: - description: Role data for the Heat CloudFormation API role. - value: - service_name: heat_api_cfn - monitoring_subscription: {get_attr: [HeatApiCfnBase, role_data, monitoring_subscription]} - logging_source: {get_attr: [HeatApiCfnBase, role_data, logging_source]} - logging_groups: {get_attr: [HeatApiCfnBase, role_data, logging_groups]} - config_settings: - map_merge: - - get_attr: [HeatApiCfnBase, role_data, config_settings] - - heat::api_cfn::manage_service: false - heat::api_cfn::enabled: false - step_config: | - include ::tripleo::profile::pacemaker::heat::api_cfn diff --git a/puppet/services/pacemaker/heat-api-cloudwatch.yaml b/puppet/services/pacemaker/heat-api-cloudwatch.yaml deleted file mode 100644 index a1a66cf6..00000000 --- a/puppet/services/pacemaker/heat-api-cloudwatch.yaml +++ /dev/null @@ -1,44 +0,0 @@ -heat_template_version: ocata - -description: > - Openstack Heat CloudWatch API service configured with Puppet - -parameters: - ServiceNetMap: - default: {} - description: Mapping of service_name -> network name. Typically set - via parameter_defaults in the resource registry. This - mapping overrides those in ServiceNetMapDefaults. - type: json - DefaultPasswords: - default: {} - type: json - EndpointMap: - default: {} - description: Mapping of service endpoint -> protocol. Typically set - via parameter_defaults in the resource registry. - type: json - -resources: - HeatApiCloudwatchBase: - type: ../heat-api-cloudwatch.yaml - properties: - ServiceNetMap: {get_param: ServiceNetMap} - DefaultPasswords: {get_param: DefaultPasswords} - EndpointMap: {get_param: EndpointMap} - -outputs: - role_data: - description: Role data for the Heat Cloudwatch API role. - value: - service_name: heat_api_cloudwatch - monitoring_subscription: {get_attr: [HeatApiCloudwatchBase, role_data, monitoring_subscription]} - logging_source: {get_attr: [HeatApiCloudwatchBase, role_data, logging_source]} - logging_groups: {get_attr: [HeatApiCloudwatchBase, role_data, logging_groups]} - config_settings: - map_merge: - - get_attr: [HeatApiCloudwatchBase, role_data, config_settings] - - heat::api_cloudwatch::manage_service: false - heat::api_cloudwatch::enabled: false - step_config: | - include ::tripleo::profile::pacemaker::heat::api_cloudwatch diff --git a/puppet/services/pacemaker/heat-api.yaml b/puppet/services/pacemaker/heat-api.yaml deleted file mode 100644 index c9a14373..00000000 --- a/puppet/services/pacemaker/heat-api.yaml +++ /dev/null @@ -1,44 +0,0 @@ -heat_template_version: ocata - -description: > - Openstack Heat API service configured with Puppet - -parameters: - ServiceNetMap: - default: {} - description: Mapping of service_name -> network name. Typically set - via parameter_defaults in the resource registry. This - mapping overrides those in ServiceNetMapDefaults. - type: json - DefaultPasswords: - default: {} - type: json - EndpointMap: - default: {} - description: Mapping of service endpoint -> protocol. Typically set - via parameter_defaults in the resource registry. - type: json - -resources: - HeatApiBase: - type: ../heat-api.yaml - properties: - ServiceNetMap: {get_param: ServiceNetMap} - DefaultPasswords: {get_param: DefaultPasswords} - EndpointMap: {get_param: EndpointMap} - -outputs: - role_data: - description: Role data for the Heat API role. - value: - service_name: heat_api - monitoring_subscription: {get_attr: [HeatApiBase, role_data, monitoring_subscription]} - logging_source: {get_attr: [HeatApiBase, role_data, logging_source]} - logging_groups: {get_attr: [HeatApiBase, role_data, logging_groups]} - config_settings: - map_merge: - - get_attr: [HeatApiBase, role_data, config_settings] - - heat::api::manage_service: false - heat::api::enabled: false - step_config: | - include ::tripleo::profile::pacemaker::heat::api diff --git a/puppet/services/pacemaker/heat-engine.yaml b/puppet/services/pacemaker/heat-engine.yaml deleted file mode 100644 index 42eb0045..00000000 --- a/puppet/services/pacemaker/heat-engine.yaml +++ /dev/null @@ -1,45 +0,0 @@ -heat_template_version: ocata - -description: > - Openstack Heat Engine service configured with Puppet - -parameters: - ServiceNetMap: - default: {} - description: Mapping of service_name -> network name. Typically set - via parameter_defaults in the resource registry. This - mapping overrides those in ServiceNetMapDefaults. - type: json - DefaultPasswords: - default: {} - type: json - EndpointMap: - default: {} - description: Mapping of service endpoint -> protocol. Typically set - via parameter_defaults in the resource registry. - type: json - -resources: - HeatEngineBase: - type: ../heat-engine.yaml - properties: - ServiceNetMap: {get_param: ServiceNetMap} - DefaultPasswords: {get_param: DefaultPasswords} - EndpointMap: {get_param: EndpointMap} - - -outputs: - role_data: - description: Role data for the Heat engine role. - value: - service_name: heat_engine - monitoring_subscription: {get_attr: [HeatEngineBase, role_data, monitoring_subscription]} - logging_source: {get_attr: [HeatEngineBase, role_data, logging_source]} - logging_groups: {get_attr: [HeatEngineBase, role_data, logging_groups]} - config_settings: - map_merge: - - get_attr: [HeatEngineBase, role_data, config_settings] - - heat::engine::manage_service: false - heat::engine::enabled: false - step_config: | - include ::tripleo::profile::pacemaker::heat::engine diff --git a/puppet/services/pacemaker/horizon.yaml b/puppet/services/pacemaker/horizon.yaml deleted file mode 100644 index 04b2d4c1..00000000 --- a/puppet/services/pacemaker/horizon.yaml +++ /dev/null @@ -1,41 +0,0 @@ -heat_template_version: ocata - -description: > - Horizon service with Pacemaker configured with Puppet. - -parameters: - ServiceNetMap: - default: {} - description: Mapping of service_name -> network name. Typically set - via parameter_defaults in the resource registry. This - mapping overrides those in ServiceNetMapDefaults. - type: json - DefaultPasswords: - default: {} - type: json - EndpointMap: - default: {} - description: Mapping of service endpoint -> protocol. Typically set - via parameter_defaults in the resource registry. - type: json - -resources: - - HorizonBase: - type: ../horizon.yaml - properties: - ServiceNetMap: {get_param: ServiceNetMap} - DefaultPasswords: {get_param: DefaultPasswords} - EndpointMap: {get_param: EndpointMap} - -outputs: - role_data: - description: Role data for the Horizon role. - value: - service_name: horizon - monitoring_subscription: {get_attr: [HorizonBase, role_data, monitoring_subscription]} - config_settings: - get_attr: [HorizonBase, role_data, config_settings] - step_config: | - include ::tripleo::profile::base::horizon - include ::tripleo::profile::pacemaker::apache diff --git a/puppet/services/pacemaker/keystone.yaml b/puppet/services/pacemaker/keystone.yaml deleted file mode 100644 index 97015484..00000000 --- a/puppet/services/pacemaker/keystone.yaml +++ /dev/null @@ -1,45 +0,0 @@ -heat_template_version: ocata - -description: > - OpenStack Keystone service with Pacemaker configured with Puppet - -parameters: - ServiceNetMap: - default: {} - description: Mapping of service_name -> network name. Typically set - via parameter_defaults in the resource registry. This - mapping overrides those in ServiceNetMapDefaults. - type: json - DefaultPasswords: - default: {} - type: json - EndpointMap: - default: {} - description: Mapping of service endpoint -> protocol. Typically set - via parameter_defaults in the resource registry. - type: json - -resources: - - KeystoneServiceBase: - type: ../keystone.yaml - properties: - ServiceNetMap: {get_param: ServiceNetMap} - DefaultPasswords: {get_param: DefaultPasswords} - EndpointMap: {get_param: EndpointMap} - -outputs: - role_data: - description: Role data for the Keystone pacemaker role. - value: - service_name: keystone - monitoring_subscription: {get_attr: [KeystoneServiceBase, role_data, monitoring_subscription]} - logging_source: {get_attr: [KeystoneServiceBase, role_data, logging_source]} - logging_groups: {get_attr: [KeystoneServiceBase, role_data, logging_groups]} - config_settings: - map_merge: - - get_attr: [KeystoneServiceBase, role_data, config_settings] - - keystone::manage_service: false - keystone::enabled: false - step_config: | - include ::tripleo::profile::pacemaker::keystone diff --git a/puppet/services/pacemaker/neutron-dhcp.yaml b/puppet/services/pacemaker/neutron-dhcp.yaml deleted file mode 100644 index 84a4f8ad..00000000 --- a/puppet/services/pacemaker/neutron-dhcp.yaml +++ /dev/null @@ -1,46 +0,0 @@ -heat_template_version: ocata - -description: > - OpenStack Neutron DHCP service with Pacemaker configured with Puppet. - -parameters: - ServiceNetMap: - default: {} - description: Mapping of service_name -> network name. Typically set - via parameter_defaults in the resource registry. This - mapping overrides those in ServiceNetMapDefaults. - type: json - DefaultPasswords: - default: {} - type: json - EndpointMap: - default: {} - description: Mapping of service endpoint -> protocol. Typically set - via parameter_defaults in the resource registry. - type: json - -resources: - - NeutronDhcpBase: - type: ../neutron-dhcp.yaml - properties: - ServiceNetMap: {get_param: ServiceNetMap} - DefaultPasswords: {get_param: DefaultPasswords} - EndpointMap: {get_param: EndpointMap} - -outputs: - role_data: - description: Role data for the Neutron DHCP role. - value: - service_name: neutron_dhcp - monitoring_subscription: {get_attr: [NeutronDhcpBase, role_data, monitoring_subscription]} - logging_source: {get_attr: [NeutronDhcpBase, role_data, logging_source]} - logging_groups: {get_attr: [NeutronDhcpBase, role_data, logging_groups]} - config_settings: - map_merge: - - get_attr: [NeutronDhcpBase, role_data, config_settings] - - tripleo::profile::pacemaker::neutron::enable_dhcp: True - neutron::agents::dhcp::enabled: false - neutron::agents::dhcp::manage_service: false - step_config: | - include ::tripleo::profile::pacemaker::neutron::dhcp diff --git a/puppet/services/pacemaker/neutron-l3.yaml b/puppet/services/pacemaker/neutron-l3.yaml deleted file mode 100644 index 6a99cf98..00000000 --- a/puppet/services/pacemaker/neutron-l3.yaml +++ /dev/null @@ -1,46 +0,0 @@ -heat_template_version: ocata - -description: > - OpenStack Neutron L3 service with Pacemaker configured with Puppet. - -parameters: - ServiceNetMap: - default: {} - description: Mapping of service_name -> network name. Typically set - via parameter_defaults in the resource registry. This - mapping overrides those in ServiceNetMapDefaults. - type: json - DefaultPasswords: - default: {} - type: json - EndpointMap: - default: {} - description: Mapping of service endpoint -> protocol. Typically set - via parameter_defaults in the resource registry. - type: json - -resources: - - NeutronL3Base: - type: ../neutron-l3.yaml - properties: - ServiceNetMap: {get_param: ServiceNetMap} - DefaultPasswords: {get_param: DefaultPasswords} - EndpointMap: {get_param: EndpointMap} - -outputs: - role_data: - description: Role data for the Neutron L3 role. - value: - service_name: neutron_l3 - monitoring_subscription: {get_attr: [NeutronL3Base, role_data, monitoring_subscription]} - logging_source: {get_attr: [NeutronL3Base, role_data, logging_source]} - logging_groups: {get_attr: [NeutronL3Base, role_data, logging_groups]} - config_settings: - map_merge: - - get_attr: [NeutronL3Base, role_data, config_settings] - - tripleo::profile::pacemaker::neutron::enable_l3: True - neutron::agents::l3::enabled: false - neutron::agents::l3::manage_service: false - step_config: | - include ::tripleo::profile::pacemaker::neutron::l3 diff --git a/puppet/services/pacemaker/neutron-metadata.yaml b/puppet/services/pacemaker/neutron-metadata.yaml deleted file mode 100644 index 2c3dd374..00000000 --- a/puppet/services/pacemaker/neutron-metadata.yaml +++ /dev/null @@ -1,44 +0,0 @@ -heat_template_version: ocata - -description: > - OpenStack Neutron Metadata service with Pacemaker configured with Puppet. - -parameters: - ServiceNetMap: - default: {} - description: Mapping of service_name -> network name. Typically set - via parameter_defaults in the resource registry. This - mapping overrides those in ServiceNetMapDefaults. - type: json - DefaultPasswords: - default: {} - type: json - EndpointMap: - default: {} - description: Mapping of service endpoint -> protocol. Typically set - via parameter_defaults in the resource registry. - type: json - -resources: - - NeutronMetadataBase: - type: ../neutron-metadata.yaml - properties: - ServiceNetMap: {get_param: ServiceNetMap} - DefaultPasswords: {get_param: DefaultPasswords} - EndpointMap: {get_param: EndpointMap} - -outputs: - role_data: - description: Role data for the Neutron Metadata role. - value: - service_name: neutron_metadata - monitoring_subscription: {get_attr: [NeutronMetadataBase, role_data, monitoring_subscription]} - logging_source: {get_attr: [NeutronMetadataBase, role_data, logging_source]} - logging_groups: {get_attr: [NeutronMetadataBase, role_data, logging_groups]} - config_settings: - map_merge: - - get_attr: [NeutronMetadataBase, role_data, config_settings] - - tripleo::profile::pacemaker::neutron::enable_metadata: True - step_config: | - include ::tripleo::profile::pacemaker::neutron::metadata diff --git a/puppet/services/pacemaker/neutron-midonet.yaml b/puppet/services/pacemaker/neutron-midonet.yaml deleted file mode 100644 index b7f77697..00000000 --- a/puppet/services/pacemaker/neutron-midonet.yaml +++ /dev/null @@ -1,41 +0,0 @@ -heat_template_version: ocata - -description: > - OpenStack Neutron Midonet with Pacemaker configured with Puppet - -parameters: - ServiceNetMap: - default: {} - description: Mapping of service_name -> network name. Typically set - via parameter_defaults in the resource registry. This - mapping overrides those in ServiceNetMapDefaults. - type: json - DefaultPasswords: - default: {} - type: json - EndpointMap: - default: {} - description: Mapping of service endpoint -> protocol. Typically set - via parameter_defaults in the resource registry. - type: json - -resources: - - NeutronMidonetBase: - type: ../neutron-midonet.yaml - properties: - ServiceNetMap: {get_param: ServiceNetMap} - DefaultPasswords: {get_param: DefaultPasswords} - EndpointMap: {get_param: EndpointMap} - -outputs: - role_data: - description: Role data for the Neutron Midonet plugin. - value: - service_name: neutron_midonet - monitoring_subscription: {get_attr: [NeutronMidonetBase, role_data, monitoring_subscription]} - config_settings: - map_merge: - - get_attr: [NeutronMidonetBase, role_data, config_settings] - step_config: | - include ::tripleo::profile::pacemaker::neutron::plugins::midonet diff --git a/puppet/services/pacemaker/neutron-ovs-agent.yaml b/puppet/services/pacemaker/neutron-ovs-agent.yaml deleted file mode 100644 index 67d5e274..00000000 --- a/puppet/services/pacemaker/neutron-ovs-agent.yaml +++ /dev/null @@ -1,42 +0,0 @@ -heat_template_version: ocata - -description: > - OpenStack Neutron OVS agent with Pacemaker configured with Puppet - -parameters: - ServiceNetMap: - default: {} - description: Mapping of service_name -> network name. Typically set - via parameter_defaults in the resource registry. This - mapping overrides those in ServiceNetMapDefaults. - type: json - DefaultPasswords: - default: {} - type: json - EndpointMap: - default: {} - description: Mapping of service endpoint -> protocol. Typically set - via parameter_defaults in the resource registry. - type: json - -resources: - - NeutronOvsBase: - type: ../neutron-ovs-agent.yaml - properties: - ServiceNetMap: {get_param: ServiceNetMap} - DefaultPasswords: {get_param: DefaultPasswords} - EndpointMap: {get_param: EndpointMap} - -outputs: - role_data: - description: Role data for the Neutron OVS agent service. - value: - service_name: neutron_ovs_agent - monitoring_subscription: {get_attr: [NeutronOvsBase, role_data, monitoring_subscription]} - logging_source: {get_attr: [NeutronOvsBase, role_data, logging_source]} - logging_groups: {get_attr: [NeutronOvsBase, role_data, logging_groups]} - config_settings: - get_attr: [NeutronOvsBase, role_data, config_settings] - step_config: | - include ::tripleo::profile::pacemaker::neutron::ovs diff --git a/puppet/services/pacemaker/neutron-plugin-ml2.yaml b/puppet/services/pacemaker/neutron-plugin-ml2.yaml deleted file mode 100644 index 3b2bba04..00000000 --- a/puppet/services/pacemaker/neutron-plugin-ml2.yaml +++ /dev/null @@ -1,42 +0,0 @@ -heat_template_version: ocata - -description: > - OpenStack Neutron ML2 Plugin with Pacemaker configured with Puppet - -parameters: - ServiceNetMap: - default: {} - description: Mapping of service_name -> network name. Typically set - via parameter_defaults in the resource registry. This - mapping overrides those in ServiceNetMapDefaults. - type: json - DefaultPasswords: - default: {} - type: json - EndpointMap: - default: {} - description: Mapping of service endpoint -> protocol. Typically set - via parameter_defaults in the resource registry. - type: json - -resources: - - NeutronMl2Base: - type: ../neutron-plugin-ml2.yaml - properties: - ServiceNetMap: {get_param: ServiceNetMap} - DefaultPasswords: {get_param: DefaultPasswords} - EndpointMap: {get_param: EndpointMap} - -outputs: - role_data: - description: Role data for the Neutron ML2 plugin. - value: - service_name: neutron_plugin_ml2 - config_settings: - map_merge: - - get_attr: [NeutronMl2Base, role_data, config_settings] - - neutron::agents::ml2::ovs::enabled: false - neutron::agents::ml2::ovs::manage_service: false - step_config: | - include ::tripleo::profile::pacemaker::neutron::plugins::ml2 diff --git a/puppet/services/pacemaker/neutron-plugin-opencontrail.yaml b/puppet/services/pacemaker/neutron-plugin-opencontrail.yaml deleted file mode 100644 index d3b7e76e..00000000 --- a/puppet/services/pacemaker/neutron-plugin-opencontrail.yaml +++ /dev/null @@ -1,40 +0,0 @@ -heat_template_version: ocata - -description: > - OpenStack Neutron OpenContrail Plugin with Pacemaker configured with Puppet - -parameters: - ServiceNetMap: - default: {} - description: Mapping of service_name -> network name. Typically set - via parameter_defaults in the resource registry. This - mapping overrides those in ServiceNetMapDefaults. - type: json - DefaultPasswords: - default: {} - type: json - EndpointMap: - default: {} - description: Mapping of service endpoint -> protocol. Typically set - via parameter_defaults in the resource registry. - type: json - -resources: - - NeutronPluginOpenContrail: - type: ../neutron-plugin-nuage.yaml - properties: - ServiceNetMap: {get_param: ServiceNetMap} - DefaultPasswords: {get_param: DefaultPasswords} - EndpointMap: {get_param: EndpointMap} - -outputs: - role_data: - description: Role data for the Neutron OpenContrail plugin. - value: - service_name: neutron_plugin_opencontrail - config_settings: - map_merge: - - get_attr: [NeutronPluginOpenContrail, role_data, config_settings] - step_config: | - include ::tripleo::profile::pacemaker::neutron::plugins::opencontrail diff --git a/puppet/services/pacemaker/neutron-server.yaml b/puppet/services/pacemaker/neutron-server.yaml deleted file mode 100644 index 015cd9fb..00000000 --- a/puppet/services/pacemaker/neutron-server.yaml +++ /dev/null @@ -1,48 +0,0 @@ -heat_template_version: ocata - -description: > - OpenStack Neutron Server with Pacemaker configured with Puppet. - -parameters: - ServiceNetMap: - default: {} - description: Mapping of service_name -> network name. Typically set - via parameter_defaults in the resource registry. This - mapping overrides those in ServiceNetMapDefaults. - type: json - DefaultPasswords: - default: {} - type: json - EndpointMap: - default: {} - description: Mapping of service endpoint -> protocol. Typically set - via parameter_defaults in the resource registry. - type: json - NeutronL3HA: - default: true - description: Whether to enable HA for virtual routers - type: boolean - -resources: - - NeutronServerBase: - type: ../neutron-server.yaml - properties: - ServiceNetMap: {get_param: ServiceNetMap} - DefaultPasswords: {get_param: DefaultPasswords} - EndpointMap: {get_param: EndpointMap} - -outputs: - role_data: - description: Role data for the Neutron Server. - value: - service_name: neutron_server - monitoring_subscription: {get_attr: [NeutronServerBase, role_data, monitoring_subscription]} - config_settings: - map_merge: - - get_attr: [NeutronServerBase, role_data, config_settings] - - neutron::server::enabled: false - neutron::server::manage_service: false - neutron::server::l3_ha: {get_param: NeutronL3HA} - step_config: | - include ::tripleo::profile::pacemaker::neutron::server diff --git a/puppet/services/pacemaker/nova-api.yaml b/puppet/services/pacemaker/nova-api.yaml deleted file mode 100644 index c9411900..00000000 --- a/puppet/services/pacemaker/nova-api.yaml +++ /dev/null @@ -1,45 +0,0 @@ -heat_template_version: ocata - -description: > - OpenStack Nova API service with Pacemaker configured with Puppet. - -parameters: - ServiceNetMap: - default: {} - description: Mapping of service_name -> network name. Typically set - via parameter_defaults in the resource registry. This - mapping overrides those in ServiceNetMapDefaults. - type: json - DefaultPasswords: - default: {} - type: json - EndpointMap: - default: {} - description: Mapping of service endpoint -> protocol. Typically set - via parameter_defaults in the resource registry. - type: json - -resources: - - NovaApiBase: - type: ../nova-api.yaml - properties: - ServiceNetMap: {get_param: ServiceNetMap} - DefaultPasswords: {get_param: DefaultPasswords} - EndpointMap: {get_param: EndpointMap} - -outputs: - role_data: - description: Role data for the Nova API role. - value: - service_name: nova_api - monitoring_subscription: {get_attr: [NovaApiBase, role_data, monitoring_subscription]} - logging_source: {get_attr: [NovaApiBase, role_data, logging_source]} - logging_groups: {get_attr: [NovaApiBase, role_data, logging_groups]} - config_settings: - map_merge: - - get_attr: [NovaApiBase, role_data, config_settings] - - nova::api::manage_service: false - nova::api::enabled: false - step_config: | - include ::tripleo::profile::pacemaker::nova::api diff --git a/puppet/services/pacemaker/nova-conductor.yaml b/puppet/services/pacemaker/nova-conductor.yaml deleted file mode 100644 index 31b4c406..00000000 --- a/puppet/services/pacemaker/nova-conductor.yaml +++ /dev/null @@ -1,45 +0,0 @@ -heat_template_version: ocata - -description: > - OpenStack Nova Conductor service with Pacemaker configured with Puppet. - -parameters: - ServiceNetMap: - default: {} - description: Mapping of service_name -> network name. Typically set - via parameter_defaults in the resource registry. This - mapping overrides those in ServiceNetMapDefaults. - type: json - DefaultPasswords: - default: {} - type: json - EndpointMap: - default: {} - description: Mapping of service endpoint -> protocol. Typically set - via parameter_defaults in the resource registry. - type: json - -resources: - - NovaConductorBase: - type: ../nova-conductor.yaml - properties: - ServiceNetMap: {get_param: ServiceNetMap} - DefaultPasswords: {get_param: DefaultPasswords} - EndpointMap: {get_param: EndpointMap} - -outputs: - role_data: - description: Role data for the Nova Conductor role. - value: - service_name: nova_conductor - monitoring_subscription: {get_attr: [NovaConductorBase, role_data, monitoring_subscription]} - logging_source: {get_attr: [NovaConductorBase, role_data, logging_source]} - logging_groups: {get_attr: [NovaConductorBase, role_data, logging_groups]} - config_settings: - map_merge: - - get_attr: [NovaConductorBase, role_data, config_settings] - - nova::conductor::manage_service: false - nova::conductor::enabled: false - step_config: | - include ::tripleo::profile::pacemaker::nova::conductor diff --git a/puppet/services/pacemaker/nova-consoleauth.yaml b/puppet/services/pacemaker/nova-consoleauth.yaml deleted file mode 100644 index 8c4be5a8..00000000 --- a/puppet/services/pacemaker/nova-consoleauth.yaml +++ /dev/null @@ -1,45 +0,0 @@ -heat_template_version: ocata - -description: > - OpenStack Nova Consoleauth service with Pacemaker configured with Puppet. - -parameters: - ServiceNetMap: - default: {} - description: Mapping of service_name -> network name. Typically set - via parameter_defaults in the resource registry. This - mapping overrides those in ServiceNetMapDefaults. - type: json - DefaultPasswords: - default: {} - type: json - EndpointMap: - default: {} - description: Mapping of service endpoint -> protocol. Typically set - via parameter_defaults in the resource registry. - type: json - -resources: - - NovaConsoleauthBase: - type: ../nova-consoleauth.yaml - properties: - ServiceNetMap: {get_param: ServiceNetMap} - DefaultPasswords: {get_param: DefaultPasswords} - EndpointMap: {get_param: EndpointMap} - -outputs: - role_data: - description: Role data for the Nova Consoleauth role. - value: - service_name: nova_consoleauth - monitoring_subscription: {get_attr: [NovaConsoleauthBase, role_data, monitoring_subscription]} - logging_source: {get_attr: [NovaConsoleauthBase, role_data, logging_source]} - logging_groups: {get_attr: [NovaConsoleauthBase, role_data, logging_groups]} - config_settings: - map_merge: - - get_attr: [NovaConsoleauthBase, role_data, config_settings] - - nova::consoleauth::manage_service: false - nova::consoleauth::enabled: false - step_config: | - include ::tripleo::profile::pacemaker::nova::consoleauth diff --git a/puppet/services/pacemaker/nova-scheduler.yaml b/puppet/services/pacemaker/nova-scheduler.yaml deleted file mode 100644 index afcf5d5c..00000000 --- a/puppet/services/pacemaker/nova-scheduler.yaml +++ /dev/null @@ -1,45 +0,0 @@ -heat_template_version: ocata - -description: > - OpenStack Nova Scheduler service with Pacemaker configured with Puppet. - -parameters: - ServiceNetMap: - default: {} - description: Mapping of service_name -> network name. Typically set - via parameter_defaults in the resource registry. This - mapping overrides those in ServiceNetMapDefaults. - type: json - DefaultPasswords: - default: {} - type: json - EndpointMap: - default: {} - description: Mapping of service endpoint -> protocol. Typically set - via parameter_defaults in the resource registry. - type: json - -resources: - - NovaSchedulerBase: - type: ../nova-scheduler.yaml - properties: - ServiceNetMap: {get_param: ServiceNetMap} - DefaultPasswords: {get_param: DefaultPasswords} - EndpointMap: {get_param: EndpointMap} - -outputs: - role_data: - description: Role data for the Nova Scheduler role. - value: - service_name: nova_scheduler - monitoring_subscription: {get_attr: [NovaSchedulerBase, role_data, monitoring_subscription]} - logging_source: {get_attr: [NovaSchedulerBase, role_data, logging_source]} - logging_groups: {get_attr: [NovaSchedulerBase, role_data, logging_groups]} - config_settings: - map_merge: - - get_attr: [NovaSchedulerBase, role_data, config_settings] - - nova::scheduler::manage_service: false - nova::scheduler::enabled: false - step_config: | - include ::tripleo::profile::pacemaker::nova::scheduler diff --git a/puppet/services/pacemaker/rabbitmq.yaml b/puppet/services/pacemaker/rabbitmq.yaml index 03c2c83f..b018df35 100644 --- a/puppet/services/pacemaker/rabbitmq.yaml +++ b/puppet/services/pacemaker/rabbitmq.yaml @@ -39,3 +39,32 @@ outputs: - rabbitmq::service_manage: false step_config: | include ::tripleo::profile::pacemaker::rabbitmq + upgrade_tasks: + - name: get bootstrap nodeid + tags: common + command: hiera bootstrap_nodeid + register: bootstrap_node + - name: set is_bootstrap_node fact + tags: common + set_fact: is_bootstrap_node={{bootstrap_node.stdout == ansible_hostname}} + - name: get rabbitmq policy + tags: common + shell: pcs resource show rabbitmq | grep -q -E "Attributes:.*\"ha-mode\":\"all\"" + register: rabbit_ha_mode + when: is_bootstrap_node + ignore_errors: true + - name: set migrate_rabbit_ha_mode fact + tags: common + set_fact: migrate_rabbit_ha_mode={{rabbit_ha_mode.rc == 0}} + when: is_bootstrap_node + - name: Fixup for rabbitmq ha-queues LP#1668600 + tags: step0,pre-upgrade + shell: | + nr_controllers=$(($(hiera controller_node_names | grep -o "," |wc -l) + 1)) + nr_queues=$(($nr_controllers / 2 + ($nr_controllers % 2))) + if ! [ $nr_queues -gt 0 -a $nr_queues -le $nr_controllers ]; then + echo "ERROR: The nr. of HA queues during the rabbit upgrade is out of range: $nr_queues" + exit 1 + fi + pcs resource update rabbitmq set_policy='ha-all ^(?!amq\\.).* {"ha-mode":"exactly","ha-params":'"$nr_queues}" --wait=600 + when: is_bootstrap_node and migrate_rabbit_ha_mode diff --git a/puppet/services/pacemaker/sahara-api.yaml b/puppet/services/pacemaker/sahara-api.yaml deleted file mode 100644 index 4ff93f9a..00000000 --- a/puppet/services/pacemaker/sahara-api.yaml +++ /dev/null @@ -1,45 +0,0 @@ -heat_template_version: ocata - -description: > - OpenStack Sahara API service with Pacemaker configured with Puppet - -parameters: - ServiceNetMap: - default: {} - description: Mapping of service_name -> network name. Typically set - via parameter_defaults in the resource registry. This - mapping overrides those in ServiceNetMapDefaults. - type: json - DefaultPasswords: - default: {} - type: json - EndpointMap: - default: {} - description: Mapping of service endpoint -> protocol. Typically set - via parameter_defaults in the resource registry. - type: json - -resources: - - SaharaApiBase: - type: ../sahara-api.yaml - properties: - ServiceNetMap: {get_param: ServiceNetMap} - DefaultPasswords: {get_param: DefaultPasswords} - EndpointMap: {get_param: EndpointMap} - -outputs: - role_data: - description: Role data for the Sahara API role. - value: - service_name: sahara_api - monitoring_subscription: {get_attr: [SaharaApiBase, role_data, monitoring_subscription]} - logging_source: {get_attr: [SaharaApiBase, role_data, logging_source]} - logging_groups: {get_attr: [SaharaApiBase, role_data, logging_groups]} - config_settings: - map_merge: - - get_attr: [SaharaApiBase, role_data, config_settings] - - sahara::service::api::manage_service: false - sahara::service::api::enabled: false - step_config: | - include ::tripleo::profile::pacemaker::sahara::api diff --git a/puppet/services/pacemaker/sahara-engine.yaml b/puppet/services/pacemaker/sahara-engine.yaml deleted file mode 100644 index 18f2ffe1..00000000 --- a/puppet/services/pacemaker/sahara-engine.yaml +++ /dev/null @@ -1,45 +0,0 @@ -heat_template_version: ocata - -description: > - OpenStack Sahara Engine service with Pacemaker configured with Puppet - -parameters: - ServiceNetMap: - default: {} - description: Mapping of service_name -> network name. Typically set - via parameter_defaults in the resource registry. This - mapping overrides those in ServiceNetMapDefaults. - type: json - DefaultPasswords: - default: {} - type: json - EndpointMap: - default: {} - description: Mapping of service endpoint -> protocol. Typically set - via parameter_defaults in the resource registry. - type: json - -resources: - - SaharaEngineBase: - type: ../sahara-engine.yaml - properties: - ServiceNetMap: {get_param: ServiceNetMap} - DefaultPasswords: {get_param: DefaultPasswords} - EndpointMap: {get_param: EndpointMap} - -outputs: - role_data: - description: Role data for the Sahara Engine role. - value: - service_name: sahara_engine - monitoring_subscription: {get_attr: [SaharaEngineBase, role_data, monitoring_subscription]} - logging_source: {get_attr: [SaharaEngineBase, role_data, logging_source]} - logging_groups: {get_attr: [SaharaEngineBase, role_data, logging_groups]} - config_settings: - map_merge: - - get_attr: [SaharaEngineBase, role_data, config_settings] - - sahara::service::engine::manage_service: false - sahara::service::engine::enabled: false - step_config: | - include ::tripleo::profile::pacemaker::sahara::engine diff --git a/puppet/services/pacemaker_remote.yaml b/puppet/services/pacemaker_remote.yaml new file mode 100644 index 00000000..daee43e6 --- /dev/null +++ b/puppet/services/pacemaker_remote.yaml @@ -0,0 +1,57 @@ +heat_template_version: ocata + +description: > + Pacemaker remote service configured with Puppet + +parameters: + ServiceNetMap: + default: {} + description: Mapping of service_name -> network name. Typically set + via parameter_defaults in the resource registry. This + mapping overrides those in ServiceNetMapDefaults. + type: json + DefaultPasswords: + default: {} + type: json + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + PacemakerRemoteAuthkey: + type: string + description: The authkey for the pacemaker remote service. + hidden: true + default: '' + MonitoringSubscriptionPacemakerRemote: + default: 'overcloud-pacemaker_remote' + type: string + PacemakerRemoteLoggingSource: + type: json + default: + tag: system.pacemaker_remote + path: /var/log/pacemaker.log + format: >- + /^(?<time>[^ ]*\s*[^ ]* [^ ]*) + \[(?<pid>[^ ]*)\] + (?<host>[^ ]*) + (?<message>.*)$/ + +outputs: + role_data: + description: Role data for the Pacemaker remote role. + value: + service_name: pacemaker_remote + monitoring_subscription: {get_param: MonitoringSubscriptionPacemakerRemote} + logging_groups: + - haclient + logging_source: {get_param: PacemakerRemoteLoggingSource} + config_settings: + tripleo.pacemaker_remote.firewall_rules: + '130 pacemaker_remote tcp': + proto: 'tcp' + dport: + - 3121 + tripleo::profile::base::pacemaker_remote::remote_authkey: {get_param: PacemakerRemoteAuthkey} + step_config: | + include ::tripleo::profile::base::pacemaker_remote diff --git a/puppet/services/panko-api.yaml b/puppet/services/panko-api.yaml index 06284fb2..eed98257 100644 --- a/puppet/services/panko-api.yaml +++ b/puppet/services/panko-api.yaml @@ -82,3 +82,24 @@ outputs: get_attr: [PankoBase, role_data, service_config_settings] step_config: | include tripleo::profile::base::panko::api + metadata_settings: + get_attr: [ApacheServiceBase, role_data, metadata_settings] + upgrade_tasks: + - name: Check if httpd is deployed + command: systemctl is-enabled httpd + tags: common + ignore_errors: True + register: httpd_enabled + - name: "PreUpgrade step0,validation: Check if httpd is running" + shell: > + /usr/bin/systemctl show 'httpd' --property ActiveState | + grep '\bactive\b' + when: httpd_enabled.rc == 0 + tags: step0,validation + - name: Stop panko-api service (running under httpd) + tags: step1 + service: name=httpd state=stopped + when: httpd_enabled.rc == 0 + - name: Install openstack-panko-api package if it was not installed + tags: step3 + yum: name=openstack-panko-api state=latest diff --git a/puppet/services/panko-base.yaml b/puppet/services/panko-base.yaml index 6e25d796..998e64ee 100644 --- a/puppet/services/panko-base.yaml +++ b/puppet/services/panko-base.yaml @@ -46,14 +46,13 @@ outputs: - '@' - {get_param: [EndpointMap, MysqlInternal, host]} - '/panko' - - '?bind_address=' - - "%{hiera('tripleo::profile::base::database::mysql::client_bind_address')}" + - '?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo' panko::debug: {get_param: Debug} panko::auth::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix] } panko::keystone::authtoken::project_name: 'service' panko::keystone::authtoken::password: {get_param: PankoPassword} panko::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri] } - panko::keystone::authtoken::auth_url: { get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix] } + panko::keystone::authtoken::auth_url: { get_param: [EndpointMap, KeystoneInternal, uri_no_suffix] } panko::auth::auth_password: {get_param: PankoPassword} panko::auth::auth_region: 'regionOne' panko::auth::auth_tenant_name: 'service' diff --git a/puppet/services/rabbitmq.yaml b/puppet/services/rabbitmq.yaml index 5526a6f2..2c4ccbc9 100644 --- a/puppet/services/rabbitmq.yaml +++ b/puppet/services/rabbitmq.yaml @@ -78,6 +78,7 @@ outputs: NODE_IP_ADDRESS: '' RABBITMQ_NODENAME: "rabbit@%{::hostname}" RABBITMQ_SERVER_ERL_ARGS: '"+K true +P 1048576 -kernel inet_default_connect_options [{nodelay,true},{raw,6,18,<<5000:64/native>>}] -kernel inet_default_listen_options [{raw,6,18,<<5000:64/native>>}]"' + 'export ERL_EPMD_ADDRESS': "%{hiera('rabbitmq::interface')}" rabbitmq_kernel_variables: inet_dist_listen_min: '25672' inet_dist_listen_max: '25672' @@ -107,6 +108,6 @@ outputs: tags: step2 service: name=rabbitmq-server state=stopped - name: Start rabbitmq service - tags: step6 + tags: step4 service: name=rabbitmq-server state=started diff --git a/puppet/services/sahara-api.yaml b/puppet/services/sahara-api.yaml index 9e494385..96b3d6e3 100644 --- a/puppet/services/sahara-api.yaml +++ b/puppet/services/sahara-api.yaml @@ -90,3 +90,7 @@ outputs: sahara::db::mysql::allowed_hosts: - '%' - "%{hiera('mysql_bind_host')}" + upgrade_tasks: + - name: Stop sahara_api service + tags: step1 + service: name=openstack-sahara-api state=stopped diff --git a/puppet/services/sahara-base.yaml b/puppet/services/sahara-base.yaml index b4307053..d5131f61 100644 --- a/puppet/services/sahara-base.yaml +++ b/puppet/services/sahara-base.yaml @@ -64,19 +64,20 @@ outputs: - '@' - {get_param: [EndpointMap, MysqlInternal, host]} - '/sahara' - - '?bind_address=' - - "%{hiera('tripleo::profile::base::database::mysql::client_bind_address')}" + - '?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo' sahara::rabbit_password: {get_param: RabbitPassword} sahara::rabbit_user: {get_param: RabbitUserName} sahara::rabbit_use_ssl: {get_param: RabbitClientUseSSL} sahara::rabbit_port: {get_param: RabbitClientPort} sahara::debug: {get_param: Debug} + # Remove admin_password when https://review.openstack.org/442619 is merged. sahara::admin_password: {get_param: SaharaPassword} - sahara::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri] } - sahara::identity_uri: { get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix] } sahara::use_neutron: true sahara::plugins: {get_param: SaharaPlugins} sahara::rpc_backend: rabbit - sahara::admin_tenant_name: 'service' sahara::db::database_db_max_retries: -1 sahara::db::database_max_retries: -1 + sahara::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]} + sahara::keystone::authtoken::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]} + sahara::keystone::authtoken::password: {get_param: SaharaPassword} + sahara::keystone::authtoken::project_name: 'service' diff --git a/puppet/services/sahara-engine.yaml b/puppet/services/sahara-engine.yaml index a1521c28..c0b6b3e6 100644 --- a/puppet/services/sahara-engine.yaml +++ b/puppet/services/sahara-engine.yaml @@ -49,3 +49,7 @@ outputs: - get_attr: [SaharaBase, role_data, config_settings] step_config: | include ::tripleo::profile::base::sahara::engine + upgrade_tasks: + - name: Stop sahara_engine service + tags: step1 + service: name=openstack-sahara-engine state=stopped diff --git a/puppet/services/services.yaml b/puppet/services/services.yaml index 90268c78..a2286d16 100644 --- a/puppet/services/services.yaml +++ b/puppet/services/services.yaml @@ -52,11 +52,7 @@ outputs: description: Combined Role data for this set of services. value: service_names: - # Filter any null/None service_names which may be present due to mapping - # of services to OS::Heat::None - yaql: - expression: list($.data.s_names.where($ != null)) - data: {s_names: {get_attr: [ServiceChain, role_data, service_name]}} + {get_attr: [ServiceChain, role_data, service_name]} monitoring_subscriptions: yaql: expression: list($.data.role_data.where($ != null).select($.get('monitoring_subscription')).where($ != null)) @@ -112,10 +108,15 @@ outputs: yaql: expression: $.data.role_data.where($ != null).select($.get('service_config_settings')).where($ != null).reduce($1.mergeWith($2), {}) data: {role_data: {get_attr: [ServiceChain, role_data]}} - step_config: {list_join: ["\n", {get_attr: [ServiceChain, role_data, step_config]}]} + step_config: {get_attr: [ServiceChain, role_data, step_config]} upgrade_tasks: yaql: # Note we use distinct() here to filter any identical tasks, e.g yum update for all services expression: $.data.where($ != null).select($.get('upgrade_tasks')).where($ != null).flatten().distinct() data: {get_attr: [ServiceChain, role_data]} + upgrade_batch_tasks: + yaql: + # Note we use distinct() here to filter any identical tasks, e.g yum update for all services + expression: $.data.where($ != null).select($.get('upgrade_batch_tasks')).where($ != null).flatten().distinct() + data: {get_attr: [ServiceChain, role_data]} service_metadata_settings: {get_attr: [ServiceServerMetadataHook, metadata]} diff --git a/puppet/services/snmp.yaml b/puppet/services/snmp.yaml index be9d143e..80c29f95 100644 --- a/puppet/services/snmp.yaml +++ b/puppet/services/snmp.yaml @@ -43,3 +43,7 @@ outputs: proto: 'udp' step_config: | include ::tripleo::profile::base::snmp + upgrade_tasks: + - name: Stop snmp service + tags: step1 + service: name=snmpd state=stopped diff --git a/puppet/services/neutron-compute-plugin-opencontrail.yaml b/puppet/services/sshd.yaml index bbe4a051..12998c33 100644 --- a/puppet/services/neutron-compute-plugin-opencontrail.yaml +++ b/puppet/services/sshd.yaml @@ -1,7 +1,7 @@ heat_template_version: ocata description: > - OpenStack Neutron Compute OpenContrail plugin + Configure sshd_config parameters: ServiceNetMap: @@ -18,12 +18,17 @@ parameters: description: Mapping of service endpoint -> protocol. Typically set via parameter_defaults in the resource registry. type: json + BannerText: + default: '' + description: Configures Banner text in sshd_config + type: string outputs: role_data: - description: Role data for the Neutron Compute OpenContrail plugin + description: Role data for the ssh value: - service_name: neutron_compute_plugin_opencontrail + service_name: sshd config_settings: + tripleo::profile::base::sshd::bannertext: {get_param: BannerText} step_config: | - include ::tripleo::profile::base::neutron::opencontrail::vrouter + include ::tripleo::profile::base::sshd diff --git a/puppet/services/swift-proxy.yaml b/puppet/services/swift-proxy.yaml index da6021bc..0c3cc1ec 100644 --- a/puppet/services/swift-proxy.yaml +++ b/puppet/services/swift-proxy.yaml @@ -57,6 +57,12 @@ parameters: default: 5672 description: Set rabbit subscriber port, change this if using SSL type: number + RabbitClientUseSSL: + default: false + description: > + Rabbit client subscriber parameter to specify + an SSL connection to the RabbitMQ host. + type: string conditions: @@ -81,7 +87,7 @@ outputs: - get_attr: [SwiftBase, role_data, config_settings] - swift::proxy::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri]} - swift::proxy::authtoken::auth_url: {get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix]} + swift::proxy::authtoken::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]} swift::proxy::authtoken::password: {get_param: SwiftPassword} swift::proxy::authtoken::project_name: 'service' swift::proxy::node_timeout: {get_param: SwiftProxyNodeTimeout} @@ -91,6 +97,7 @@ outputs: swift::proxy::staticweb::url_base: {get_param: [EndpointMap, SwiftPublic, uri_no_suffix]} swift::proxy::ceilometer::nonblocking_notify: true tripleo::profile::base::swift::proxy::rabbit_port: {get_param: RabbitClientPort} + tripleo::profile::base::swift::proxy::ceilometer_messaging_use_ssl: {get_param: RabbitClientUseSSL} tripleo::profile::base::swift::proxy::ceilometer_enabled: {get_param: SwiftCeilometerPipelineEnabled} tripleo.swift_proxy.firewall_rules: '122 swift proxy': @@ -119,8 +126,8 @@ outputs: - 'keystone' - 'staticweb' - 'copy' - - 'container-quotas' - - 'account-quotas' + - 'container_quotas' + - 'account_quotas' - 'slo' - 'dlo' - 'versioned_writes' @@ -131,6 +138,7 @@ outputs: - '' - 'proxy-logging' - 'proxy-server' + swift::proxy::ceilometer::rabbit_use_ssl: {get_param: RabbitClientUseSSL} swift::proxy::account_autocreate: true # NOTE: bind IP is found in Heat replacing the network name with the # local node IP for the given network; replacement examples @@ -157,3 +165,7 @@ outputs: - admin - swiftoperator - ResellerAdmin + upgrade_tasks: + - name: Stop swift_proxy service + tags: step1 + service: name=openstack-swift-proxy state=stopped diff --git a/puppet/services/swift-ringbuilder.yaml b/puppet/services/swift-ringbuilder.yaml index a7ba7bad..2e3c818f 100644 --- a/puppet/services/swift-ringbuilder.yaml +++ b/puppet/services/swift-ringbuilder.yaml @@ -43,6 +43,16 @@ parameters: description: 'Use a local directory for Swift storage services when building rings' type: boolean +conditions: + swift_use_local_dir: + and: + - equals: + - get_param: SwiftUseLocalDir + - true + - equals: + - get_param: SwiftRawDisks + - {} + outputs: role_data: description: Role data for Swift Ringbuilder configuration. @@ -59,7 +69,7 @@ outputs: expression: $.data.raw_disk_lists.flatten() data: raw_disk_lists: - - {if: [{get_param: SwiftUseLocalDir}, [':%PORT%/d1'], []]} + - {if: [swift_use_local_dir, [':%PORT%/d1'], []]} - repeat: template: ':%PORT%/DEVICE' for_each: diff --git a/puppet/services/swift-storage.yaml b/puppet/services/swift-storage.yaml index 00ae9c35..261aadeb 100644 --- a/puppet/services/swift-storage.yaml +++ b/puppet/services/swift-storage.yaml @@ -56,6 +56,17 @@ resources: DefaultPasswords: {get_param: DefaultPasswords} EndpointMap: {get_param: EndpointMap} +conditions: + swift_mount_check: + or: + - equals: + - get_param: SwiftMountCheck + - true + - not: + equals: + - get_param: SwiftRawDisks + - {} + outputs: role_data: description: Role data for the Swift Proxy role. @@ -65,7 +76,7 @@ outputs: config_settings: map_merge: - get_attr: [SwiftBase, role_data, config_settings] - - swift::storage::all::mount_check: {get_param: SwiftMountCheck} + - swift::storage::all::mount_check: {if: [swift_mount_check, true, false]} tripleo::profile::base::swift::storage::enable_swift_storage: {get_param: ControllerEnableSwiftStorage} tripleo.swift_storage.firewall_rules: '123 swift storage': @@ -90,3 +101,20 @@ outputs: swift::storage::all::storage_local_net_ip: {get_param: [ServiceNetMap, SwiftStorageNetwork]} step_config: | include ::tripleo::profile::base::swift::storage + upgrade_tasks: + - name: Stop swift storage services + tags: step1 + service: name={{ item }} state=stopped + with_items: + - openstack-swift-account-auditor + - openstack-swift-account-reaper + - openstack-swift-account-replicator + - openstack-swift-account + - openstack-swift-container-auditor + - openstack-swift-container-replicator + - openstack-swift-container-updater + - openstack-swift-container + - openstack-swift-object-auditor + - openstack-swift-object-replicator + - openstack-swift-object-updater + - openstack-swift-object diff --git a/puppet/services/tacker.yaml b/puppet/services/tacker.yaml new file mode 100644 index 00000000..6f92066e --- /dev/null +++ b/puppet/services/tacker.yaml @@ -0,0 +1,116 @@ +heat_template_version: ocata + +description: > + OpenStack Tacker service configured with Puppet + +parameters: + ServiceNetMap: + default: {} + description: Mapping of service_name -> network name. Typically set + via parameter_defaults in the resource registry. This + mapping overrides those in ServiceNetMapDefaults. + type: json + DefaultPasswords: + default: {} + type: json + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + TackerPassword: + description: The password for the tacker service account. + type: string + hidden: true + Debug: + type: string + default: '' + KeystoneRegion: + type: string + default: 'regionOne' + description: Keystone region for endpoint + RabbitPassword: + description: The password for RabbitMQ + type: string + hidden: true + RabbitUserName: + default: guest + description: The username for RabbitMQ + type: string + RabbitClientUseSSL: + default: false + description: > + Rabbit client subscriber parameter to specify + an SSL connection to the RabbitMQ host. + type: string + RabbitClientPort: + default: 5672 + description: Set rabbit subscriber port, change this if using SSL + type: number + +outputs: + role_data: + description: Role data for the Tacker role. + value: + service_name: tacker + config_settings: + tacker_password: {get_param: TackerPassword} + tacker::db::database_connection: + list_join: + - '' + - - {get_param: [EndpointMap, MysqlInternal, protocol]} + - '://tacker:' + - {get_param: TackerPassword} + - '@' + - {get_param: [EndpointMap, MysqlInternal, host]} + - '/tacker' + - '?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo' + + tacker::debug: {get_param: Debug} + tacker::rpc_backend: rabbit + tacker::rabbit_userid: {get_param: RabbitUserName} + tacker::rabbit_password: {get_param: RabbitPassword} + tacker::rabbit_use_ssl: {get_param: RabbitClientUseSSL} + tacker::rabbit_port: {get_param: RabbitClientPort} + tacker::server::bind_host: {get_param: [ServiceNetMap, TackerApiNetwork]} + + tacker::keystone::authtoken::project_name: 'service' + tacker::keystone::authtoken::auth_url: {get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix]} + tacker::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri]} + + tacker::db::mysql::password: {get_param: TackerPassword} + tacker::db::mysql::user: tacker + tacker::db::mysql::host: {get_param: [EndpointMap, MysqlInternal, host_nobrackets]} + tacker::db::mysql::dbname: tacker + tacker::db::mysql::allowed_hosts: + - '%' + - {get_param: [EndpointMap, MysqlInternal, host_nobrackets]} + + service_config_settings: + keystone: + tacker::keystone::auth::tenant: 'service' + tacker::keystone::auth::password: {get_param: TackerPassword} + tacker::keystone::auth::public_url: {get_param: [EndpointMap, TackerPublic, uri]} + tacker::keystone::auth::internal_url: {get_param: [EndpointMap, TackerInternal, uri]} + tacker::keystone::auth::admin_url: {get_param: [EndpointMap, TackerAdmin, uri]} + + step_config: | + include ::tripleo::profile::base::tacker + upgrade_tasks: + - name: Check if tacker is deployed + command: systemctl is-enabled openstack-tacker-server + tags: common + ignore_errors: True + register: tacker_enabled + - name: "PreUpgrade step0,validation: Check service openstack-tacker-server is running" + shell: /usr/bin/systemctl show 'openstack-tacker-server' --property ActiveState | grep '\bactive\b' + when: tacker_enabled.rc == 0 + tags: step0,validation + - name: Stop tacker service + tags: step1 + when: tacker_enabled.rc == 0 + service: name=openstack-tacker-server state=stopped + - name: Install openstack-tacker package if it was disabled + tags: step3 + yum: name=openstack-tacker state=latest + when: tacker_enabled.rc != 0 diff --git a/puppet/services/time/ntp.yaml b/puppet/services/time/ntp.yaml index 7c3a19a9..b14d7bcc 100644 --- a/puppet/services/time/ntp.yaml +++ b/puppet/services/time/ntp.yaml @@ -22,8 +22,10 @@ parameters: via parameter_defaults in the resource registry. type: json NtpServer: - default: [] - description: NTP servers + default: ['pool.ntp.org'] + description: NTP servers list. Defaulted to pool.ntp.org in order to + have a sane default for Pacemaker deployments when + not configuring this parameter by default. type: comma_delimited_list outputs: @@ -38,4 +40,4 @@ outputs: dport: 123 proto: udp step_config: | - include ::ntp + include ::tripleo::profile::base::time::ntp diff --git a/puppet/services/tripleo-packages.yaml b/puppet/services/tripleo-packages.yaml index da6e3083..737be829 100644 --- a/puppet/services/tripleo-packages.yaml +++ b/puppet/services/tripleo-packages.yaml @@ -33,6 +33,14 @@ outputs: step_config: | include ::tripleo::packages upgrade_tasks: + - name: Check yum for rpm-python present + tags: step0 + yum: "name=rpm-python state=present" + register: rpm_python_check + - name: Fail when rpm-python wasn't present + fail: msg="rpm-python package was not present before this run! Check environment before re-running" + when: rpm_python_check.changed != false + tags: step0 - name: Update all packages tags: step3 yum: name=* state=latest diff --git a/puppet/services/vpp.yaml b/puppet/services/vpp.yaml new file mode 100644 index 00000000..59866d39 --- /dev/null +++ b/puppet/services/vpp.yaml @@ -0,0 +1,47 @@ +heat_template_version: ocata + +description: > + Vpp service configured with Puppet + +parameters: + ServiceNetMap: + default: {} + description: Mapping of service_name -> network name. Typically set + via parameter_defaults in the resource registry. This + mapping overrides those in ServiceNetMapDefaults. + type: json + DefaultPasswords: + default: {} + type: json + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + VppCpuMainCore: + default: '' + description: VPP main thread core pinning. + type: string + VppCpuCorelistWorkers: + default: '' + description: List of cores for VPP worker thread pinning + type: string + MonitoringSubscriptionVpp: + default: 'overcloud-vpp' + type: string + +outputs: + role_data: + description: Role data for the Vpp role. + value: + service_name: vpp + monitoring_subscription: {get_param: MonitoringSubscriptionVpp} + config_settings: + fdio::vpp_cpu_main_core: {get_param: VppCpuMainCore} + fdio::vpp_cpu_corelist_workers: {get_param: VppCpuCorelistWorkers} + step_config: | + include ::tripleo::profile::base::vpp + upgrade_tasks: + - name: Stop vpp service + tags: step2 + service: name=vpp state=stopped diff --git a/puppet/services/zaqar.yaml b/puppet/services/zaqar.yaml index 0224ac13..a320f694 100644 --- a/puppet/services/zaqar.yaml +++ b/puppet/services/zaqar.yaml @@ -40,7 +40,7 @@ outputs: config_settings: zaqar::keystone::authtoken::password: {get_param: ZaqarPassword} zaqar::keystone::authtoken::project_name: 'service' - zaqar::keystone::authtoken::auth_url: {get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix]} + zaqar::keystone::authtoken::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]} zaqar::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri]} zaqar::debug: {get_param: Debug} zaqar::transport::websocket::bind: {get_param: [EndpointMap, ZaqarInternal, host]} @@ -64,3 +64,23 @@ outputs: step_config: | include ::tripleo::profile::base::zaqar + upgrade_tasks: + - name: Check if zaqar is deployed + command: systemctl is-enabled openstack-zaqar + tags: common + ignore_errors: True + register: zaqar_enabled + - name: "PreUpgrade step0,validation: Check if openstack-zaqar is running" + shell: > + /usr/bin/systemctl show 'openstack-zaqar' --property ActiveState | + grep '\bactive\b' + when: zaqar_enabled.rc == 0 + tags: step0,validation + - name: Stop zaqar service + tags: step1 + when: zaqar_enabled.rc == 0 + service: name=openstack-zaqar state=stopped + - name: Install openstack-zaqar package if it was disabled + tags: step3 + yum: name=openstack-zaqar state=latest + when: zaqar_enabled.rc != 0 diff --git a/puppet/upgrade_config.yaml b/puppet/upgrade_config.yaml index 499160e5..2cfd43f4 100644 --- a/puppet/upgrade_config.yaml +++ b/puppet/upgrade_config.yaml @@ -11,6 +11,11 @@ parameters: type: string description: Step number of the upgrade + SkipUpgradeConfigTags: + type: comma_delimited_list + description: Ansible tags to skip during upgrade, e.g validation skips pre-upgrade validations + default: [] + resources: AnsibleConfig: @@ -30,11 +35,16 @@ resources: properties: group: ansible options: + skip_tags: + list_join: + - "," + - {get_param: SkipUpgradeConfigTags} tags: str_replace: - template: "stepSTEP" + template: "common,stepSTEP" params: STEP: {get_param: step} + modulepath: /usr/share/ansible-modules inputs: - name: role config: {get_attr: [AnsibleConfig, value]} diff --git a/releasenotes/notes/6.0.0-b52a14a71fc62788.yaml b/releasenotes/notes/6.0.0-b52a14a71fc62788.yaml new file mode 100644 index 00000000..9343d99e --- /dev/null +++ b/releasenotes/notes/6.0.0-b52a14a71fc62788.yaml @@ -0,0 +1,127 @@ +--- +prelude: > + 6.0.0 is the final release for Ocata. + It's the first release where release notes are added. +features: + - Fujitsu Neutron plugin for FOS support. Users can deploy + Neutron with this plugin by using + environments/neutron-ml2-fujitsu-fossw.yaml environment file. + - Expose InstanceDiscoveryMethod parameter to configure Ceilometer + method used to discover instances running on compute node. + Default value to 'libvirt_metadata'. Allowed values are 'naive', + 'libvirt_metadata' and 'workload_partitioning'. + - Make ServiceNetMap support custom network names. + Note that operators will still be expected to pass any ServiceNetMap + overrides with the "new" network name, e.g whatever NetName specifies, + otherwise environment files could get very confusing. + - Nova Placement API support. As this new service is required, deploy it + by default in WSGI with Apache, like other API services. + - Cinder pass-through iSER backend support. + - etcd composable services, used by networking-vpp ML2 driver as the + messaging mechanism. + - Allow to configure cron parameters for Cinder, Heat, Keystone and Nova + crontabs. + - Export NovaDefaultFloatingPool parameter to configure the default pool + of floating IP addressed available. Default to 'public' for backward + compatibility. + - Bump Heat Templates to 'ocata' version, to match Heat requirements. + - Configure OVS agent firewall driver only if NeutronOVSFirewallDriver + is set. + - Expose RbdDefaultFeatures parameter to configure the default features + enabled when creating a block device image. + Only applies to format '2' images. Set to '1' for Jewel clients using + older Ceph servers. + - Cinder HPELeftHandISCSIDriver backend support. + - Pacemaker stopped to manage Ceilometer, Cinder API, + Cinder Scheduler, MongoDB, Glance, Gnocchi, Heat, Apache, Memcached, + Neutron, Nova and Sahara. + - Ceph MDS service support. Service can be enable with + environments/services/ceph-mds.yaml environment file. + - Expose HeatConvergenceEngine and HeatMaxResourcesPerStack parameters + to configure Heat. + - Add pre-network hook and example showing config-then-reboot. + - Expose LibvirtEnabledPerfEvents parameter in Nova Compute service. + Default to an empty array. + This is a performance event list which could be used as monitor. + - Increase libvirt/qemu.conf max_files to 32768 and max_processes to + 131072. + - Split OVN northd and ml2 plugin, so we can deploy OVNDBs and Northd + services on different nodes. + - Add hook to generate metadata from service profiles. + This is useful for nova vendordata plugins that can parse said metadata. + - Expose EventPipelinePublishers to Ceilometer and set the default to + 'notifier://?topic=alarm.all'. + - Add Panko service support. This service is not enabled by default. Use + environments/services/enable-panko.yaml to include it in your deployment. + - Add EC2-API composable service support. + - Allow dnsmasq_dns_servers to be configured for Neutron DHCP Agent with a + new parameter (NeutronDhcpAgentDnsmasqDnsServers, default to []). + - Add support for Ceph RBD mirroring daemon managed by Pacemaker. + - Add deployed server bootstrap for RHEL. + - Configure VNC Server listen address on internal_api network by default. + - Support for Cinder Dell EMC PS Series. + - Support for Cinder Dell EMC EMC Storage Center. + - Support for Octavia composable services for LBaaS with Neutron. + - Support for Collectd composable services for performance monitoring. + - Support for Tacker composable service for VNF management. + - Add the plan-environment.yaml file which will facilitate deployment plan + import and export. +upgrade: + - Update OpenDaylight deployment to use networking-odl v2 as a mechanism + driver. + - Update Contrail composable services. +deprecations: + - Glance Registry service has been removed and Glance API v2 is now deploy + by default. Glance API v1 is not supported anymore in TripleO. + - Remove CeilometerStoreEvents parameter, which has been removed + in Ceilometer. + - Ceilometer API service is deprecated and will be removed in a future + release. If you would like to disable it, use + environments/services/disable-ceilometer-api.yaml environment file. + - Removes deprecated OpenDaylight L2 only deployments. + Deploying ODL without L3 DVR is no longer supported. +security: + - Enable management of 'DISALLOW_IFRAME_EMBED' in Horizon configuration to + prevent dashboard being embedded within an iframe and exposed to Cross-Frame + Scripting (XFS) vulnerability on legacy browsers. + - Enable management of 'ENFORCE_PASSWORD_CHECK' in Horizons configuration to + display an Admin Password field on the Change Password form to verify that + it is indeed the admin logged-in who wants to change the password. + - Enable management of 'DISABLE_PASSWORD_REVEAL' in Horizon, to remove the + password reveal option. + - Enable 'SECURE_PROXY_SSL_HEADER' option in Horizons configuration to take + X-Forwarded-Proto header into account when forming URLs. + - Enable management of ENFORCE_PASSWORD_CHECK value. By setting + 'ENFORCE_PASSWORD_CHECK' to 'True' within Horizons local_settings.py, it + displays an ‘Admin Password’ field on the “Change Password” form to verify + that it is the admin logged-in that wants to perform the password change. + - Enable management of Horizons Password Validation. Enables injection of an + operators own password validation regex via a heat template. + - Enable management of '/etc/issue Banner' whereby an operator can populate + their own Banner warning text to be displayed upon terminal login. + - Enable management of auditd system. '/etc/audit/audit.rules' can now be + populated by means of a heat template. +fixes: + - Fixes `bug 1645898 + <https://bugs.launchpad.net/tripleo/+bug/1645898>`__ so epmd is binded on + the right address, where RabbitMQ is listening too. + - Fixes `bug 1652184 + <https://bugs.launchpad.net/tripleo/+bug/1652184>`__ so swap partitions + can be handled from an environment file thanks to AllNodesExtraConfig. + - Add retry to RHEL registration, useful when having network outages during + registration. + - Fixes `bug 1651476 + <https://bugs.launchpad.net/tripleo/+bug/1651476>`__ so firewall rules + are created for Opendaylight API service. + - Fixes `bug 1643487 + <https://bugs.launchpad.net/tripleo/+bug/1643487>`__ to prevent source + address from binding to a VIP for database connection. + - Fixes `bug 1649836 + <https://bugs.launchpad.net/tripleo/+bug/1649836>`__ to configure + DPDK options to isolate PMD cores and ovs process cores. + - Fixes `bug 1662344 + <https://bugs.launchpad.net/tripleo/+bug/1662344>`__ by stopping + to set bind_address on nova db uri. + This reverts the changes in https://review.openstack.org/414629 for nova as + they are incompatible with cell_v2. + This is a temporary fix for HA while a long-term solution is developed. diff --git a/releasenotes/notes/add-default-ntp-server-696b8568e09be497.yaml b/releasenotes/notes/add-default-ntp-server-696b8568e09be497.yaml new file mode 100644 index 00000000..78fdbb59 --- /dev/null +++ b/releasenotes/notes/add-default-ntp-server-696b8568e09be497.yaml @@ -0,0 +1,6 @@ +--- +issues: + - We add a default NTP server to the Overcloud + for all Pacemaker and non-Pacemaker deployments, + also useful for keeping time diff controlled for + Keystone and Ceph. diff --git a/releasenotes/notes/allow-neutron-dhcp-agents-per-network-calculation-536c70391497256d.yaml b/releasenotes/notes/allow-neutron-dhcp-agents-per-network-calculation-536c70391497256d.yaml new file mode 100644 index 00000000..ec22942a --- /dev/null +++ b/releasenotes/notes/allow-neutron-dhcp-agents-per-network-calculation-536c70391497256d.yaml @@ -0,0 +1,8 @@ +--- +fixes: + - | + NeutronDhcpAgents had a default value of 3 that, even though unused in + practice was a bad default value. Changing the default value to a + sentinel value and making the hiera conditional allows deploy-time + logic in puppet to provide a default value based on the number of dhcp + agents being deployed. diff --git a/releasenotes/notes/composable-ha-37e2d7e1f57f5c10.yaml b/releasenotes/notes/composable-ha-37e2d7e1f57f5c10.yaml new file mode 100644 index 00000000..e560fe95 --- /dev/null +++ b/releasenotes/notes/composable-ha-37e2d7e1f57f5c10.yaml @@ -0,0 +1,12 @@ +--- +features: + - With the composable HA work landed it is now possible + to split pacemaker-managed services like galera, rabbit, + redis, haproxy and any A/P resource, off to dedicated + nodes. These services can be split off to separate nodes + either via the normal Pacemaker service (which has a limit + of 16 maximum number of nodes) or via the newer PacemakerRemote + service (but not both on the same node). Note that until + https://bugzilla.redhat.com/show_bug.cgi?id=1417936 is fixed, + PacemakerRemote should only be used for Cinder A/P resources + and Manila A/P resources. diff --git a/releasenotes/notes/composable-upgrades-d9ec7c634365e8e0.yaml b/releasenotes/notes/composable-upgrades-d9ec7c634365e8e0.yaml new file mode 100644 index 00000000..55062b04 --- /dev/null +++ b/releasenotes/notes/composable-upgrades-d9ec7c634365e8e0.yaml @@ -0,0 +1,14 @@ +--- +features: + - | + Composable service plugins now support two additional sections, + upgrade_tasks and upgrade_batch_tasks. These can be used by service + template authors to define the required behavior on upgrade as ansible + tasks, for both upgrades that require downtime, and rolling upgrades. + See puppet/services/README.rst for more details. +upgrade: + - | + Please refer to tripleo-docs for full details on the upgrade workflow + required for Newton to Ocata upgrades, as it's possible some steps are + different to previous releases: + http://docs.openstack.org/developer/tripleo-docs/post_deployment/upgrade.html diff --git a/releasenotes/notes/deployed-servers-fd47f18204cea105.yaml b/releasenotes/notes/deployed-servers-fd47f18204cea105.yaml new file mode 100644 index 00000000..d05b268c --- /dev/null +++ b/releasenotes/notes/deployed-servers-fd47f18204cea105.yaml @@ -0,0 +1,8 @@ +--- +features: + - It is now possible to deploy with tripleo-heat-templates using servers that + are already provisioned with an operating system, and not necessarily + provisioned with Nova and Ironic. This feature is enabled by making use of + the environments/deployed-server-environment.yaml environment file. For + more information, see + http://docs.openstack.org/developer/tripleo-docs/advanced_deployment/deployed_server.html diff --git a/releasenotes/notes/ha-by-default-55326e699ee8602c.yaml b/releasenotes/notes/ha-by-default-55326e699ee8602c.yaml new file mode 100644 index 00000000..edcc1250 --- /dev/null +++ b/releasenotes/notes/ha-by-default-55326e699ee8602c.yaml @@ -0,0 +1,5 @@ +--- +deprecations: + - The environments/puppet-pacemaker.yaml file is now deprecated and the HA + deployment is now the default. In order to get the non-HA deployment use + environments/nonha-arch.yaml explicitly. diff --git a/releasenotes/notes/ironic-cleaning-network-1e06881df0402221.yaml b/releasenotes/notes/ironic-cleaning-network-1e06881df0402221.yaml new file mode 100644 index 00000000..72601f9e --- /dev/null +++ b/releasenotes/notes/ironic-cleaning-network-1e06881df0402221.yaml @@ -0,0 +1,10 @@ +--- +features: + - | + New parameter "IronicCleaningNetwork" can be used to override the name + or UUID of the **overcloud** network Ironic uses for cleaning. +fixes: + - | + A default value is now provided for Ironic ``cleaning_network`` + configuration option. Not providing it on start up was deprecated since + Newton, and will result in a failure in the near future. diff --git a/releasenotes/notes/keystone_internal-53cc7b24ebdd9df4.yaml b/releasenotes/notes/keystone_internal-53cc7b24ebdd9df4.yaml new file mode 100644 index 00000000..1f41073b --- /dev/null +++ b/releasenotes/notes/keystone_internal-53cc7b24ebdd9df4.yaml @@ -0,0 +1,9 @@ +--- +other: + - | + Use Keystone internal endpoint instead of admin for services. + The admin endpoint is listening on the ctlplane network by default; + services should ideally be using the internal api network for this kind + of traffic, as the ctlplane network is mostly for provisioning. On the + other hand, the admin endpoint shouldn't be as relevant with services + switching to keystone v3. diff --git a/releasenotes/notes/manila-with-managed-ceph-e5178fd06127624f.yaml b/releasenotes/notes/manila-with-managed-ceph-e5178fd06127624f.yaml new file mode 100644 index 00000000..59f1fb99 --- /dev/null +++ b/releasenotes/notes/manila-with-managed-ceph-e5178fd06127624f.yaml @@ -0,0 +1,11 @@ +--- +prelude: > + Support for Manila/CephFS with TripleO managed Ceph cluster +features: + - | + It is now possible to configure Manila with CephFS to use a + TripleO managed Ceph cluster. When using the Heat environment + file at environments/manila-cephfsnative-config.yaml Manila + will be configured to use the TripleO managed Ceph cluster + if CephMDS is deployed as well, which can be done using the + file environments/services/ceph-mds.yaml
\ No newline at end of file diff --git a/releasenotes/notes/memcached-max-memory-ef6834d17953fca6.yaml b/releasenotes/notes/memcached-max-memory-ef6834d17953fca6.yaml new file mode 100644 index 00000000..c14cefa0 --- /dev/null +++ b/releasenotes/notes/memcached-max-memory-ef6834d17953fca6.yaml @@ -0,0 +1,7 @@ +--- +features: + - | + Memcached max memory configuration is now exposed va MemcachedMaxMemory. +upgrade: + - | + Reduce the default memory configuration for memcached from 95% to 50%. diff --git a/releasenotes/notes/octavia-service-integration-03bd3eb6cfe1efaf.yaml b/releasenotes/notes/octavia-service-integration-03bd3eb6cfe1efaf.yaml new file mode 100644 index 00000000..bd8d3562 --- /dev/null +++ b/releasenotes/notes/octavia-service-integration-03bd3eb6cfe1efaf.yaml @@ -0,0 +1,4 @@ +--- +features: + - | + Added initial support for deploying the Octavia services in the overcloud. diff --git a/releasenotes/notes/puppet-auditd-6504295e8c6c7a3b.yaml b/releasenotes/notes/puppet-auditd-6504295e8c6c7a3b.yaml new file mode 100644 index 00000000..1949e4fe --- /dev/null +++ b/releasenotes/notes/puppet-auditd-6504295e8c6c7a3b.yaml @@ -0,0 +1,9 @@ +--- +features: + - | + Adds the ability to manage auditd.service and enter audit.rules via tripleo + heat templates. This in turn enforces an audit log of system events, such + as system time changes, modifications to Discretionary Access Controls, + Failed login attempts. + + diff --git a/releasenotes/notes/sahara_auth_v3-65bd276b39b4e284.yaml b/releasenotes/notes/sahara_auth_v3-65bd276b39b4e284.yaml new file mode 100644 index 00000000..c744e0f7 --- /dev/null +++ b/releasenotes/notes/sahara_auth_v3-65bd276b39b4e284.yaml @@ -0,0 +1,4 @@ +--- +features: + - Sahara is now deployed with keystone_authtoken parameters and move + forward with Keystone v3 version. diff --git a/releasenotes/notes/swift-ring-keeper-c04b440d7d5ce13f.yaml b/releasenotes/notes/swift-ring-keeper-c04b440d7d5ce13f.yaml new file mode 100644 index 00000000..e9974a20 --- /dev/null +++ b/releasenotes/notes/swift-ring-keeper-c04b440d7d5ce13f.yaml @@ -0,0 +1,9 @@ +--- +fixes: + - | + Swift rings created or updated on the overcloud nodes will now be + stored on the undercloud at the end of each deployment. They will be + retrieved before any deployment update, and by doing this the Swift + rings will be in a consistent state across the cluster all the time. + This makes it possible to add, remove or replace nodes without + manual operator interaction. diff --git a/releasenotes/notes/vpp-84d35e51ff62a58c.yaml b/releasenotes/notes/vpp-84d35e51ff62a58c.yaml new file mode 100644 index 00000000..b78df17d --- /dev/null +++ b/releasenotes/notes/vpp-84d35e51ff62a58c.yaml @@ -0,0 +1,6 @@ +--- +features: + - Add the ability to deploy VPP. Vector Packet Processing (VPP) is a high + performance packet processing stack that runs in user space in Linux. + VPP is used as an alternative to kernel networking stack for + accelerated network data path. diff --git a/releasenotes/source/_static/.placeholder b/releasenotes/source/_static/.placeholder new file mode 100644 index 00000000..e69de29b --- /dev/null +++ b/releasenotes/source/_static/.placeholder diff --git a/releasenotes/source/conf.py b/releasenotes/source/conf.py new file mode 100644 index 00000000..8da995b0 --- /dev/null +++ b/releasenotes/source/conf.py @@ -0,0 +1,264 @@ +# -*- coding: utf-8 -*- +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# If extensions (or modules to document with autodoc) are in another directory, +# add these directories to sys.path here. If the directory is relative to the +# documentation root, use os.path.abspath to make it absolute, like shown here. +#sys.path.insert(0, os.path.abspath('.')) + +# -- General configuration ------------------------------------------------ + +# If your documentation needs a minimal Sphinx version, state it here. +#needs_sphinx = '1.0' + +# Add any Sphinx extension module names here, as strings. They can be +# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom +# ones. +extensions = [ + 'oslosphinx', + 'reno.sphinxext', +] + +# Add any paths that contain templates here, relative to this directory. +templates_path = ['_templates'] + +# The suffix of source filenames. +source_suffix = '.rst' + +# The encoding of source files. +#source_encoding = 'utf-8-sig' + +# The master toctree document. +master_doc = 'index' + +# General information about the project. +project = u'tripleo-heat-templates Release Notes' +copyright = u'2017, TripleO Developers' + +# The version info for the project you're documenting, acts as replacement for +# |version| and |release|, also used in various other places throughout the +# built documents. +# +# The full version, including alpha/beta/rc tags. +release = '6.0.0.0b3' +# The short X.Y version. +version = '6.0.0' + +# The full version, including alpha/beta/rc tags. + +# The language for content autogenerated by Sphinx. Refer to documentation +# for a list of supported languages. +#language = None + +# There are two options for replacing |today|: either, you set today to some +# non-false value, then it is used: +#today = '' +# Else, today_fmt is used as the format for a strftime call. +#today_fmt = '%B %d, %Y' + +# List of patterns, relative to source directory, that match files and +# directories to ignore when looking for source files. +exclude_patterns = [] + +# The reST default role (used for this markup: `text`) to use for all +# documents. +#default_role = None + +# If true, '()' will be appended to :func: etc. cross-reference text. +#add_function_parentheses = True + +# If true, the current module name will be prepended to all description +# unit titles (such as .. function::). +#add_module_names = True + +# If true, sectionauthor and moduleauthor directives will be shown in the +# output. They are ignored by default. +#show_authors = False + +# The name of the Pygments (syntax highlighting) style to use. +pygments_style = 'sphinx' + +# A list of ignored prefixes for module index sorting. +#modindex_common_prefix = [] + +# If true, keep warnings as "system message" paragraphs in the built documents. +#keep_warnings = False + + +# -- Options for HTML output ---------------------------------------------- + +# The theme to use for HTML and HTML Help pages. See the documentation for +# a list of builtin themes. +html_theme = 'default' + +# Theme options are theme-specific and customize the look and feel of a theme +# further. For a list of options available for each theme, see the +# documentation. +#html_theme_options = {} + +# Add any paths that contain custom themes here, relative to this directory. +#html_theme_path = [] + +# The name for this set of Sphinx documents. If None, it defaults to +# "<project> v<release> documentation". +#html_title = None + +# A shorter title for the navigation bar. Default is the same as html_title. +#html_short_title = None + +# The name of an image file (relative to this directory) to place at the top +# of the sidebar. +#html_logo = None + +# The name of an image file (within the static path) to use as favicon of the +# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 +# pixels large. +#html_favicon = None + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +html_static_path = ['_static'] + +# Add any extra paths that contain custom files (such as robots.txt or +# .htaccess) here, relative to this directory. These files are copied +# directly to the root of the documentation. +#html_extra_path = [] + +# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, +# using the given strftime format. +#html_last_updated_fmt = '%b %d, %Y' + +# If true, SmartyPants will be used to convert quotes and dashes to +# typographically correct entities. +#html_use_smartypants = True + +# Custom sidebar templates, maps document names to template names. +#html_sidebars = {} + +# Additional templates that should be rendered to pages, maps page names to +# template names. +#html_additional_pages = {} + +# If false, no module index is generated. +#html_domain_indices = True + +# If false, no index is generated. +#html_use_index = True + +# If true, the index is split into individual pages for each letter. +#html_split_index = False + +# If true, links to the reST sources are added to the pages. +#html_show_sourcelink = True + +# If true, "Created using Sphinx" is shown in the HTML footer. Default is True. +#html_show_sphinx = True + +# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. +#html_show_copyright = True + +# If true, an OpenSearch description file will be output, and all pages will +# contain a <link> tag referring to it. The value of this option must be the +# base URL from which the finished HTML is served. +#html_use_opensearch = '' + +# This is the file name suffix for HTML files (e.g. ".xhtml"). +#html_file_suffix = None + +# Output file base name for HTML help builder. +htmlhelp_basename = 'tripleo-heat-templatesReleaseNotesdoc' + + +# -- Options for LaTeX output --------------------------------------------- + +latex_elements = { +# The paper size ('letterpaper' or 'a4paper'). +#'papersize': 'letterpaper', + +# The font size ('10pt', '11pt' or '12pt'). +#'pointsize': '10pt', + +# Additional stuff for the LaTeX preamble. +#'preamble': '', +} + +# Grouping the document tree into LaTeX files. List of tuples +# (source start file, target name, title, +# author, documentclass [howto, manual, or own class]). +latex_documents = [ + ('index', 'tripleo-heat-templatesReleaseNotes.tex', u'tripleo-heat-templates Release Notes Documentation', + u'2016, TripleO Developers', 'manual'), +] + +# The name of an image file (relative to this directory) to place at the top of +# the title page. +#latex_logo = None + +# For "manual" documents, if this is true, then toplevel headings are parts, +# not chapters. +#latex_use_parts = False + +# If true, show page references after internal links. +#latex_show_pagerefs = False + +# If true, show URL addresses after external links. +#latex_show_urls = False + +# Documents to append as an appendix to all manuals. +#latex_appendices = [] + +# If false, no module index is generated. +#latex_domain_indices = True + + +# -- Options for manual page output --------------------------------------- + +# One entry per manual page. List of tuples +# (source start file, name, description, authors, manual section). +man_pages = [ + ('index', 'tripleo-heat-templatesreleasenotes', u'tripleo-heat-templates Release Notes Documentation', + [u'2016, TripleO Developers'], 1) +] + +# If true, show URL addresses after external links. +#man_show_urls = False + + +# -- Options for Texinfo output ------------------------------------------- + +# Grouping the document tree into Texinfo files. List of tuples +# (source start file, target name, title, author, +# dir menu entry, description, category) +texinfo_documents = [ + ('index', 'tripleo-heat-templatesReleaseNotes', u'tripleo-heat-templates Release Notes Documentation', + u'2016, TripleO Developers', 'tripleo-heat-templatesReleaseNotes', 'One line description of project.', + 'Miscellaneous'), +] + +# Documents to append as an appendix to all manuals. +#texinfo_appendices = [] + +# If false, no module index is generated. +#texinfo_domain_indices = True + +# How to display URL addresses: 'footnote', 'no', or 'inline'. +#texinfo_show_urls = 'footnote' + +# If true, do not generate a @detailmenu in the "Top" node's menu. +#texinfo_no_detailmenu = False + +# -- Options for Internationalization output ------------------------------ +locale_dirs = ['locale/'] diff --git a/releasenotes/source/index.rst b/releasenotes/source/index.rst new file mode 100644 index 00000000..43c77709 --- /dev/null +++ b/releasenotes/source/index.rst @@ -0,0 +1,19 @@ +================================================ +Welcome to tripleo-heat-templates Release Notes! +================================================ + +Contents +======== + +.. toctree:: + :maxdepth: 2 + + unreleased + ocata + + +Indices and tables +================== + +* :ref:`genindex` +* :ref:`search` diff --git a/releasenotes/source/ocata.rst b/releasenotes/source/ocata.rst new file mode 100644 index 00000000..ebe62f42 --- /dev/null +++ b/releasenotes/source/ocata.rst @@ -0,0 +1,6 @@ +=================================== + Ocata Series Release Notes +=================================== + +.. release-notes:: + :branch: origin/stable/ocata diff --git a/releasenotes/source/unreleased.rst b/releasenotes/source/unreleased.rst new file mode 100644 index 00000000..2334dd5c --- /dev/null +++ b/releasenotes/source/unreleased.rst @@ -0,0 +1,5 @@ +============================== + Current Series Release Notes +============================== + + .. release-notes:: diff --git a/requirements.txt b/requirements.txt index 9c4a708a..057aa287 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,2 +1,6 @@ -pbr>=0.5.21,<1.0 -Jinja2>=2.8 # BSD License (3 clause) +# The order of packages is significant, because pip processes them in the order +# of appearance. Changing the order has an impact on the overall integration +# process, which may cause wedges in the gate later. +pbr>=1.8 # Apache-2.0 +Jinja2!=2.9.0,!=2.9.1,!=2.9.2,!=2.9.3,!=2.9.4,>=2.8 # BSD License (3 clause) +six>=1.9.0 # MIT diff --git a/roles_data.yaml b/roles_data.yaml index e29885d5..1fddf72f 100644 --- a/roles_data.yaml +++ b/roles_data.yaml @@ -17,31 +17,41 @@ # disable_constraints: (boolean) optional, whether to disable Nova and Glance # constraints for each role specified in the templates. # +# disable_upgrade_deployment: (boolean) optional, whether to run the +# ansible upgrade steps for all services that are deployed on the role. If set +# to True, the operator will drive the upgrade for this role's nodes. +# +# upgrade_batch_size: (number): batch size for upgrades where tasks are +# specified by services to run in batches vs all nodes at once. +# This defaults to 1, but larger batches may be specified here. +# # ServicesDefault: (list) optional default list of services to be deployed # on the role, defaults to an empty list. Sets the default for the # {{role.name}}Services parameter in overcloud.yaml -- name: Controller +- name: Controller # the 'primary' role goes first CountDefault: 1 ServicesDefault: - OS::TripleO::Services::CACerts + - OS::TripleO::Services::CephMds - OS::TripleO::Services::CephMon - OS::TripleO::Services::CephExternal + - OS::TripleO::Services::CephRbdMirror - OS::TripleO::Services::CephRgw - OS::TripleO::Services::CinderApi - OS::TripleO::Services::CinderBackup - OS::TripleO::Services::CinderScheduler - OS::TripleO::Services::CinderVolume - - OS::TripleO::Services::Core + - OS::TripleO::Services::Congress - OS::TripleO::Services::Kernel - OS::TripleO::Services::Keystone - OS::TripleO::Services::GlanceApi - - OS::TripleO::Services::GlanceRegistry - OS::TripleO::Services::HeatApi - OS::TripleO::Services::HeatApiCfn - OS::TripleO::Services::HeatApiCloudwatch - OS::TripleO::Services::HeatEngine - OS::TripleO::Services::MySQL + - OS::TripleO::Services::MySQLClient - OS::TripleO::Services::NeutronDhcpAgent - OS::TripleO::Services::NeutronL3Agent - OS::TripleO::Services::NeutronMetadataAgent @@ -57,15 +67,18 @@ - OS::TripleO::Services::NovaConductor - OS::TripleO::Services::MongoDb - OS::TripleO::Services::NovaApi + - OS::TripleO::Services::NovaPlacement - OS::TripleO::Services::NovaMetadata - OS::TripleO::Services::NovaScheduler - OS::TripleO::Services::NovaConsoleauth - OS::TripleO::Services::NovaVncProxy + - OS::TripleO::Services::Ec2Api - OS::TripleO::Services::Ntp - OS::TripleO::Services::SwiftProxy - OS::TripleO::Services::SwiftStorage - OS::TripleO::Services::SwiftRingBuilder - OS::TripleO::Services::Snmp + - OS::TripleO::Services::Sshd - OS::TripleO::Services::Timezone - OS::TripleO::Services::CeilometerApi - OS::TripleO::Services::CeilometerCollector @@ -97,14 +110,28 @@ - OS::TripleO::Services::OpenDaylightOvs - OS::TripleO::Services::SensuClient - OS::TripleO::Services::FluentdClient + - OS::TripleO::Services::Collectd - OS::TripleO::Services::BarbicanApi - OS::TripleO::Services::PankoApi + - OS::TripleO::Services::Tacker - OS::TripleO::Services::Zaqar - OS::TripleO::Services::OVNDBs + - OS::TripleO::Services::NeutronML2FujitsuCfab + - OS::TripleO::Services::NeutronML2FujitsuFossw + - OS::TripleO::Services::CinderHPELeftHandISCSI + - OS::TripleO::Services::Etcd + - OS::TripleO::Services::AuditD + - OS::TripleO::Services::OctaviaApi + - OS::TripleO::Services::OctaviaHealthManager + - OS::TripleO::Services::OctaviaHousekeeping + - OS::TripleO::Services::OctaviaWorker + - OS::TripleO::Services::Vpp + - OS::TripleO::Services::Docker - name: Compute CountDefault: 1 HostnameFormatDefault: '%stackname%-novacompute-%index%' + disable_upgrade_deployment: True ServicesDefault: - OS::TripleO::Services::CACerts - OS::TripleO::Services::CephClient @@ -112,6 +139,7 @@ - OS::TripleO::Services::Timezone - OS::TripleO::Services::Ntp - OS::TripleO::Services::Snmp + - OS::TripleO::Services::Sshd - OS::TripleO::Services::NovaCompute - OS::TripleO::Services::NovaLibvirt - OS::TripleO::Services::Kernel @@ -126,6 +154,10 @@ - OS::TripleO::Services::OpenDaylightOvs - OS::TripleO::Services::SensuClient - OS::TripleO::Services::FluentdClient + - OS::TripleO::Services::AuditD + - OS::TripleO::Services::Collectd + - OS::TripleO::Services::Vpp + - OS::TripleO::Services::MySQLClient - name: BlockStorage ServicesDefault: @@ -135,12 +167,17 @@ - OS::TripleO::Services::Ntp - OS::TripleO::Services::Timezone - OS::TripleO::Services::Snmp + - OS::TripleO::Services::Sshd - OS::TripleO::Services::TripleoPackages - OS::TripleO::Services::TripleoFirewall - OS::TripleO::Services::SensuClient - OS::TripleO::Services::FluentdClient + - OS::TripleO::Services::AuditD + - OS::TripleO::Services::Collectd + - OS::TripleO::Services::MySQLClient - name: ObjectStorage + disable_upgrade_deployment: True ServicesDefault: - OS::TripleO::Services::CACerts - OS::TripleO::Services::Kernel @@ -148,11 +185,15 @@ - OS::TripleO::Services::SwiftStorage - OS::TripleO::Services::SwiftRingBuilder - OS::TripleO::Services::Snmp + - OS::TripleO::Services::Sshd - OS::TripleO::Services::Timezone - OS::TripleO::Services::TripleoPackages - OS::TripleO::Services::TripleoFirewall - OS::TripleO::Services::SensuClient - OS::TripleO::Services::FluentdClient + - OS::TripleO::Services::AuditD + - OS::TripleO::Services::Collectd + - OS::TripleO::Services::MySQLClient - name: CephStorage ServicesDefault: @@ -161,8 +202,12 @@ - OS::TripleO::Services::Kernel - OS::TripleO::Services::Ntp - OS::TripleO::Services::Snmp + - OS::TripleO::Services::Sshd - OS::TripleO::Services::Timezone - OS::TripleO::Services::TripleoPackages - OS::TripleO::Services::TripleoFirewall - OS::TripleO::Services::SensuClient - OS::TripleO::Services::FluentdClient + - OS::TripleO::Services::AuditD + - OS::TripleO::Services::Collectd + - OS::TripleO::Services::MySQLClient diff --git a/roles_data_undercloud.yaml b/roles_data_undercloud.yaml new file mode 100644 index 00000000..5070ef38 --- /dev/null +++ b/roles_data_undercloud.yaml @@ -0,0 +1,36 @@ +- name: Undercloud # the 'primary' role goes first + CountDefault: 1 + disable_constraints: True + ServicesDefault: + - OS::TripleO::Services::Ntp + - OS::TripleO::Services::MySQL + - OS::TripleO::Services::MongoDb + - OS::TripleO::Services::Keystone + - OS::TripleO::Services::Apache + - OS::TripleO::Services::RabbitMQ + - OS::TripleO::Services::GlanceApi + - OS::TripleO::Services::SwiftProxy + - OS::TripleO::Services::SwiftStorage + - OS::TripleO::Services::SwiftRingBuilder + - OS::TripleO::Services::Memcached + - OS::TripleO::Services::HeatApi + - OS::TripleO::Services::HeatApiCfn + - OS::TripleO::Services::HeatEngine + - OS::TripleO::Services::NovaApi + - OS::TripleO::Services::NovaPlacement + - OS::TripleO::Services::NovaMetadata + - OS::TripleO::Services::NovaScheduler + - OS::TripleO::Services::NovaConductor + - OS::TripleO::Services::MistralEngine + - OS::TripleO::Services::MistralApi + - OS::TripleO::Services::MistralExecutor + - OS::TripleO::Services::IronicApi + - OS::TripleO::Services::IronicConductor + - OS::TripleO::Services::IronicPxe + - OS::TripleO::Services::NovaIronic + - OS::TripleO::Services::Zaqar + - OS::TripleO::Services::NeutronServer + - OS::TripleO::Services::NeutronApi + - OS::TripleO::Services::NeutronCorePlugin + - OS::TripleO::Services::NeutronOvsAgent + - OS::TripleO::Services::NeutronDhcpAgent @@ -1,4 +1,3 @@ -#!/usr/bin/env python # Copyright (c) 2013 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,6 +16,14 @@ # THIS FILE IS MANAGED BY THE GLOBAL REQUIREMENTS REPO - DO NOT EDIT import setuptools +# In python < 2.7.4, a lazy loading of package `pbr` will break +# setuptools if some other modules registered functions in `atexit`. +# solution from: http://bugs.python.org/issue15881#msg170215 +try: + import multiprocessing # noqa +except ImportError: + pass + setuptools.setup( - setup_requires=['pbr'], + setup_requires=['pbr>=1.8'], pbr=True) diff --git a/test-requirements.txt b/test-requirements.txt index c3726e8b..1c9e3b42 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -1 +1,9 @@ -pyyaml +# The order of packages is significant, because pip processes them in the order +# of appearance. Changing the order has an impact on the overall integration +# process, which may cause wedges in the gate later. +PyYAML>=3.10.0 # MIT +Jinja2!=2.9.0,!=2.9.1,!=2.9.2,!=2.9.3,!=2.9.4,>=2.8 # BSD License (3 clause) +six>=1.9.0 # MIT +sphinx>=1.5.1 # BSD +oslosphinx>=4.7.0 # Apache-2.0 +reno>=1.8.0 # Apache-2.0 diff --git a/tools/process-templates.py b/tools/process-templates.py index a15b00e2..1c8c4ba6 100755 --- a/tools/process-templates.py +++ b/tools/process-templates.py @@ -14,9 +14,13 @@ import argparse import jinja2 import os +import shutil +import six import sys import yaml +__tht_root_dir = os.path.dirname(os.path.dirname(__file__)) + def parse_opts(argv): parser = argparse.ArgumentParser( @@ -32,6 +36,9 @@ def parse_opts(argv): action='store_true', help="""Enable safe mode (do not overwrite files).""", default=False) + parser.add_argument('-o', '--output-dir', metavar='OUTPUT_DIR', + help="""Output dir for all the templates""", + default='') opts = parser.parse_args(argv[1:]) return opts @@ -46,9 +53,14 @@ def _j2_render_to_file(j2_template, j2_data, outfile_name=None, print('ERROR: path already exists for file: %s' % outfile_name) sys.exit(1) + # Search for templates relative to the current template path first + template_base = os.path.dirname(yaml_f) + j2_loader = jinja2.loaders.FileSystemLoader([template_base, __tht_root_dir]) + try: # Render the j2 template - template = jinja2.Environment().from_string(j2_template) + template = jinja2.Environment(loader=j2_loader).from_string( + j2_template) r_template = template.render(**j2_data) except jinja2.exceptions.TemplateError as ex: error_msg = ("Error rendering template %s : %s" @@ -59,7 +71,7 @@ def _j2_render_to_file(j2_template, j2_data, outfile_name=None, out_f.write(r_template) -def process_templates(template_path, role_data_path, overwrite): +def process_templates(template_path, role_data_path, output_dir, overwrite): with open(role_data_path) as role_data_file: role_data = yaml.safe_load(role_data_file) @@ -68,6 +80,11 @@ def process_templates(template_path, role_data_path, overwrite): with open(j2_excludes_path) as role_data_file: j2_excludes = yaml.safe_load(role_data_file) + if output_dir and not os.path.isdir(output_dir): + if os.path.exists(output_dir): + raise RuntimeError('Output dir %s is not a directory' % output_dir) + os.mkdir(output_dir) + role_names = [r.get('name') for r in role_data] r_map = {} for r in role_data: @@ -77,6 +94,29 @@ def process_templates(template_path, role_data_path, overwrite): if os.path.isdir(template_path): for subdir, dirs, files in os.walk(template_path): + + # NOTE(flaper87): Ignore hidden dirs as we don't + # generate templates for those. + # Note the slice assigment for `dirs` is necessary + # because we need to modify the *elements* in the + # dirs list rather than the reference to the list. + # This way we'll make sure os.walk will iterate over + # the shrunk list. os.walk doesn't have an API for + # filtering dirs at this point. + dirs[:] = [d for d in dirs if not d[0] == '.'] + files = [f for f in files if not f[0] == '.'] + + # NOTE(flaper87): We could have used shutil.copytree + # but it requires the dst dir to not be present. This + # approach is safer as it doesn't require us to delete + # the output_dir in advance and it allows for running + # the command multiple times with the same output_dir. + out_dir = subdir + if output_dir: + out_dir = os.path.join(output_dir, subdir) + if not os.path.exists(out_dir): + os.mkdir(out_dir) + for f in files: file_path = os.path.join(subdir, f) # We do two templating passes here: @@ -100,7 +140,7 @@ def process_templates(template_path, role_data_path, overwrite): [role.lower(), os.path.basename(f).replace('.role.j2.yaml', '.yaml')]) - out_f_path = os.path.join(subdir, out_f) + out_f_path = os.path.join(out_dir, out_f) if not (out_f_path in excl_templates): _j2_render_to_file(template_data, j2_data, out_f_path, overwrite) @@ -111,9 +151,12 @@ def process_templates(template_path, role_data_path, overwrite): with open(file_path) as j2_template: template_data = j2_template.read() j2_data = {'roles': role_data} - out_f = file_path.replace('.j2.yaml', '.yaml') - _j2_render_to_file(template_data, j2_data, out_f, + out_f = os.path.basename(f).replace('.j2.yaml', '.yaml') + out_f_path = os.path.join(out_dir, out_f) + _j2_render_to_file(template_data, j2_data, out_f_path, overwrite) + elif output_dir: + shutil.copy(os.path.join(subdir, f), out_dir) else: print('Unexpected argument %s' % template_path) @@ -122,4 +165,4 @@ opts = parse_opts(sys.argv) role_data_path = os.path.join(opts.base_path, opts.roles_data) -process_templates(opts.base_path, role_data_path, (not opts.safe)) +process_templates(opts.base_path, role_data_path, opts.output_dir, (not opts.safe)) diff --git a/tools/releasenotes_tox.sh b/tools/releasenotes_tox.sh new file mode 100755 index 00000000..4fecfd92 --- /dev/null +++ b/tools/releasenotes_tox.sh @@ -0,0 +1,28 @@ +#!/usr/bin/env bash + +rm -rf releasenotes/build + +sphinx-build -a -E -W \ + -d releasenotes/build/doctrees \ + -b html \ + releasenotes/source releasenotes/build/html +BUILD_RESULT=$? + +UNCOMMITTED_NOTES=$(git status --porcelain | \ + awk '$1 == "M" && $2 ~ /releasenotes\/notes/ {print $2}') + +if [ "${UNCOMMITTED_NOTES}" ] +then + cat <<EOF + +REMINDER: The following changes to release notes have not been committed: + +${UNCOMMITTED_NOTES} + +While that may be intentional, keep in mind that release notes are built from +committed changes, not the working directory. + +EOF +fi + +exit ${BUILD_RESULT} diff --git a/tools/tox_install.sh b/tools/tox_install.sh new file mode 100755 index 00000000..e61b63a8 --- /dev/null +++ b/tools/tox_install.sh @@ -0,0 +1,30 @@ +#!/usr/bin/env bash + +# Client constraint file contains this client version pin that is in conflict +# with installing the client from source. We should remove the version pin in +# the constraints file before applying it for from-source installation. + +CONSTRAINTS_FILE="$1" +shift 1 + +set -e + +# NOTE(tonyb): Place this in the tox enviroment's log dir so it will get +# published to logs.openstack.org for easy debugging. +localfile="$VIRTUAL_ENV/log/upper-constraints.txt" + +if [[ "$CONSTRAINTS_FILE" != http* ]]; then + CONSTRAINTS_FILE="file://$CONSTRAINTS_FILE" +fi +# NOTE(tonyb): need to add curl to bindep.txt if the project supports bindep +curl "$CONSTRAINTS_FILE" --insecure --progress-bar --output "$localfile" + +pip install -c"$localfile" openstack-requirements + +# This is the main purpose of the script: Allow local installation of +# the current repo. It is listed in constraints file and thus any +# install will be constrained and we need to unconstrain it. +edit-constraints "$localfile" -- "$CLIENT_NAME" + +pip install -c"$localfile" -U "$@" +exit $? diff --git a/tools/yaml-validate.py b/tools/yaml-validate.py index fd1f47de..32987cb2 100755 --- a/tools/yaml-validate.py +++ b/tools/yaml-validate.py @@ -19,11 +19,56 @@ import yaml required_params = ['EndpointMap', 'ServiceNetMap', 'DefaultPasswords'] +envs_containing_endpoint_map = ['tls-endpoints-public-dns.yaml', + 'tls-endpoints-public-ip.yaml', + 'tls-everywhere-endpoints-dns.yaml'] +ENDPOINT_MAP_FILE = 'endpoint_map.yaml' + def exit_usage(): print('Usage %s <yaml file or directory>' % sys.argv[0]) sys.exit(1) +def get_base_endpoint_map(filename): + try: + tpl = yaml.load(open(filename).read()) + return tpl['parameters']['EndpointMap']['default'] + except Exception: + print(traceback.format_exc()) + return None + + +def get_endpoint_map_from_env(filename): + try: + tpl = yaml.load(open(filename).read()) + return { + 'file': filename, + 'map': tpl['parameter_defaults']['EndpointMap'] + } + except Exception: + print(traceback.format_exc()) + return None + + +def validate_endpoint_map(base_map, env_map): + return sorted(base_map.keys()) == sorted(env_map.keys()) + + +def validate_hci_compute_services_default(env_filename, env_tpl): + env_services_list = env_tpl['parameter_defaults']['ComputeServices'] + env_services_list.remove('OS::TripleO::Services::CephOSD') + roles_filename = os.path.join(os.path.dirname(env_filename), + '../roles_data.yaml') + roles_tpl = yaml.load(open(roles_filename).read()) + for role in roles_tpl: + if role['name'] == 'Compute': + roles_services_list = role['ServicesDefault'] + if sorted(env_services_list) != sorted(roles_services_list): + print('ERROR: ComputeServices in %s is different ' + 'from ServicesDefault in roles_data.yaml' % env_filename) + return 1 + return 0 + def validate_mysql_connection(settings): no_op = lambda *args: False error_status = [0] @@ -32,7 +77,8 @@ def validate_mysql_connection(settings): return items == ['EndpointMap', 'MysqlInternal', 'protocol'] def client_bind_address(item): - return 'bind_address' in item + return 'read_default_file' in item and \ + 'read_default_group' in item def validate_mysql_uri(key, items): # Only consider a connection if it targets mysql @@ -101,10 +147,20 @@ def validate(filename): try: tpl = yaml.load(open(filename).read()) + # The template alias version should be used instead a date, this validation + # will be applied to all templates not just for those in the services folder. + if 'heat_template_version' in tpl and not str(tpl['heat_template_version']).isalpha(): + print('ERROR: heat_template_version needs to be the release alias not a date: %s' + % filename) + return 1 + if (filename.startswith('./puppet/services/') and filename != './puppet/services/services.yaml'): retval = validate_service(filename, tpl) + if filename.endswith('hyperconverged-ceph.yaml'): + retval = validate_hci_compute_services_default(filename, tpl) + except Exception: print(traceback.format_exc()) return 1 @@ -128,6 +184,8 @@ if len(sys.argv) < 2: path_args = sys.argv[1:] exit_val = 0 failed_files = [] +base_endpoint_map = None +env_endpoint_maps = list() for base_path in path_args: if os.path.isdir(base_path): @@ -139,6 +197,12 @@ for base_path in path_args: if failed: failed_files.append(file_path) exit_val |= failed + if f == ENDPOINT_MAP_FILE: + base_endpoint_map = get_base_endpoint_map(file_path) + if f in envs_containing_endpoint_map: + env_endpoint_map = get_endpoint_map_from_env(file_path) + if env_endpoint_map: + env_endpoint_maps.append(env_endpoint_map) elif os.path.isfile(base_path) and base_path.endswith('.yaml'): failed = validate(base_path) if failed: @@ -148,6 +212,30 @@ for base_path in path_args: print('Unexpected argument %s' % base_path) exit_usage() +if base_endpoint_map and \ + len(env_endpoint_maps) == len(envs_containing_endpoint_map): + for env_endpoint_map in env_endpoint_maps: + matches = validate_endpoint_map(base_endpoint_map, + env_endpoint_map['map']) + if not matches: + print("ERROR: %s needs to be updated to match changes in base " + "endpoint map" % env_endpoint_map['file']) + failed_files.append(env_endpoint_map['file']) + exit_val |= 1 + else: + print("%s matches base endpoint map" % env_endpoint_map['file']) +else: + print("ERROR: Can't validate endpoint maps since a file is missing. " + "If you meant to delete one of these files you should update this " + "tool as well.") + if not base_endpoint_map: + failed_files.append(ENDPOINT_MAP_FILE) + if len(env_endpoint_maps) != len(envs_containing_endpoint_map): + matched_files = set(os.path.basename(matched_env_file['file']) + for matched_env_file in env_endpoint_maps) + failed_files.extend(set(envs_containing_endpoint_map) - matched_files) + exit_val |= 1 + if failed_files: print('Validation failed on:') for f in failed_files: @@ -13,7 +13,11 @@ commands = {posargs} [testenv:pep8] commands = python ./tools/process-templates.py + python ./network/endpoints/build_endpoint_map.py --check python ./tools/yaml-validate.py . [testenv:templates] commands = python ./tools/process-templates.py + +[testenv:releasenotes] +commands = bash -c tools/releasenotes_tox.sh |