diff options
45 files changed, 1201 insertions, 53 deletions
diff --git a/ci/environments/multinode-containers.yaml b/ci/environments/multinode-containers.yaml new file mode 100644 index 00000000..89970419 --- /dev/null +++ b/ci/environments/multinode-containers.yaml @@ -0,0 +1,70 @@ +# NOTE: This is an environment specific for containers upgrade +# CI. Mainly we deploy non-pacemakerized overcloud, as at the time +# being containerization of services managed by pacemaker is not +# complete, so we deploy and upgrade the non-HA services for now. + +resource_registry: + OS::TripleO::Controller::Net::SoftwareConfig: ../common/net-config-multinode-os-net-config.yaml + OS::TripleO::Compute::Net::SoftwareConfig: ../common/net-config-multinode-os-net-config.yaml + + # NOTE: This is needed because of upgrades from Ocata to Pike. We + # deploy the initial environment with Ocata templates, and + # overcloud-resource-registry.yaml there doesn't have this Docker + # mapping at all. After we stop CI'ing Ocata->Pike upgrade, we can + # remove this. + OS::TripleO::Services::Docker: OS::Heat::None + +parameter_defaults: + ControllerServices: + - OS::TripleO::Services::CephMon + - OS::TripleO::Services::CephOSD + - OS::TripleO::Services::CinderApi + - OS::TripleO::Services::CinderScheduler + - OS::TripleO::Services::CinderVolume + - OS::TripleO::Services::Docker + - OS::TripleO::Services::Kernel + - OS::TripleO::Services::Keystone + - OS::TripleO::Services::GlanceApi + - OS::TripleO::Services::HeatApi + - OS::TripleO::Services::HeatApiCfn + - OS::TripleO::Services::HeatApiCloudwatch + - OS::TripleO::Services::HeatEngine + - OS::TripleO::Services::MySQL + - OS::TripleO::Services::MySQLClient + - OS::TripleO::Services::NeutronDhcpAgent + - OS::TripleO::Services::NeutronL3Agent + - OS::TripleO::Services::NeutronMetadataAgent + - OS::TripleO::Services::NeutronServer + - OS::TripleO::Services::NeutronCorePlugin + - OS::TripleO::Services::NeutronOvsAgent + - OS::TripleO::Services::RabbitMQ + - OS::TripleO::Services::HAproxy + - OS::TripleO::Services::Keepalived + - OS::TripleO::Services::Memcached + - OS::TripleO::Services::Pacemaker + - OS::TripleO::Services::NovaConductor + - OS::TripleO::Services::NovaApi + - OS::TripleO::Services::NovaPlacement + - OS::TripleO::Services::NovaMetadata + - OS::TripleO::Services::NovaScheduler + - OS::TripleO::Services::Ntp + - OS::TripleO::Services::SwiftProxy + - OS::TripleO::Services::SwiftStorage + - OS::TripleO::Services::SwiftRingBuilder + - OS::TripleO::Services::Snmp + - OS::TripleO::Services::Timezone + - OS::TripleO::Services::TripleoPackages + - OS::TripleO::Services::NovaCompute + - OS::TripleO::Services::NovaLibvirt + - OS::TripleO::Services::Sshd + ControllerExtraConfig: + nova::compute::libvirt::services::libvirt_virt_type: qemu + nova::compute::libvirt::libvirt_virt_type: qemu + # Required for Centos 7.3 and Qemu 2.6.0 + nova::compute::libvirt::libvirt_cpu_mode: 'none' + #NOTE(gfidente): not great but we need this to deploy on ext4 + #http://docs.ceph.com/docs/jewel/rados/configuration/filesystem-recommendations/ + ceph::profile::params::osd_max_object_name_len: 256 + ceph::profile::params::osd_max_object_namespace_len: 64 + SwiftCeilometerPipelineEnabled: False + Debug: True diff --git a/docker/services/aodh-api.yaml b/docker/services/aodh-api.yaml index 8a02d8fd..f802e4e6 100644 --- a/docker/services/aodh-api.yaml +++ b/docker/services/aodh-api.yaml @@ -100,13 +100,14 @@ outputs: net: host privileged: false detach: false + user: root volumes: list_concat: - {get_attr: [ContainersCommon, volumes]} - - /var/lib/config-data/aodh/etc/aodh/:/etc/aodh/:ro - /var/log/containers/aodh:/var/log/aodh - command: /usr/bin/aodh-dbsync + command: "/usr/bin/bootstrap_host_exec aodh_api su aodh -s /bin/bash -c /usr/bin/aodh-dbsync" step_4: aodh_api: image: *aodh_image diff --git a/docker/services/containers-common.yaml b/docker/services/containers-common.yaml index a4ebe549..a9912a1f 100644 --- a/docker/services/containers-common.yaml +++ b/docker/services/containers-common.yaml @@ -9,6 +9,8 @@ outputs: value: - /etc/hosts:/etc/hosts:ro - /etc/localtime:/etc/localtime:ro + # required for bootstrap_host_exec + - /etc/puppet:/etc/puppet:ro # OpenSSL trusted CAs - /etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro - /etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro diff --git a/docker/services/glance-api.yaml b/docker/services/glance-api.yaml index 07710c7a..df8186da 100644 --- a/docker/services/glance-api.yaml +++ b/docker/services/glance-api.yaml @@ -100,6 +100,7 @@ outputs: net: host privileged: false detach: false + user: root volumes: &glance_volumes list_concat: - {get_attr: [ContainersCommon, volumes]} @@ -110,6 +111,7 @@ outputs: environment: - KOLLA_BOOTSTRAP=True - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS + command: "/usr/bin/bootstrap_host_exec glance_api su glance -s /bin/bash -c '/usr/local/bin/kolla_start'" step_4: map_merge: - glance_api: diff --git a/docker/services/gnocchi-api.yaml b/docker/services/gnocchi-api.yaml index 9b474731..e59d6095 100644 --- a/docker/services/gnocchi-api.yaml +++ b/docker/services/gnocchi-api.yaml @@ -100,13 +100,14 @@ outputs: net: host detach: false privileged: false + user: root volumes: list_concat: - {get_attr: [ContainersCommon, volumes]} - - /var/lib/config-data/gnocchi/etc/gnocchi/:/etc/gnocchi/:ro - /var/log/containers/gnocchi:/var/log/gnocchi - command: ["/usr/bin/gnocchi-upgrade", "--skip-storage"] + command: "/usr/bin/bootstrap_host_exec gnocchi_api su gnocchi -s /bin/bash -c '/usr/bin/gnocchi-upgrade --skip-storage'" step_4: gnocchi_api: image: *gnocchi_image diff --git a/docker/services/heat-engine.yaml b/docker/services/heat-engine.yaml index 8c554a50..0adad538 100644 --- a/docker/services/heat-engine.yaml +++ b/docker/services/heat-engine.yaml @@ -94,13 +94,14 @@ outputs: net: host privileged: false detach: false + user: root volumes: list_concat: - {get_attr: [ContainersCommon, volumes]} - - /var/lib/config-data/heat/etc/heat/:/etc/heat/:ro - /var/log/containers/heat:/var/log/heat - command: ['heat-manage', 'db_sync'] + command: "/usr/bin/bootstrap_host_exec heat_engine su heat -s /bin/bash -c 'heat-manage db_sync'" step_4: heat_engine: image: *heat_engine_image diff --git a/docker/services/ironic-api.yaml b/docker/services/ironic-api.yaml index 300aa0bd..c8978aa2 100644 --- a/docker/services/ironic-api.yaml +++ b/docker/services/ironic-api.yaml @@ -100,13 +100,14 @@ outputs: net: host privileged: false detach: false + user: root volumes: list_concat: - {get_attr: [ContainersCommon, volumes]} - - /var/lib/config-data/ironic/etc/:/etc/:ro - /var/log/containers/ironic:/var/log/ironic - command: ['ironic-dbsync', '--config-file', '/etc/ironic/ironic.conf'] + command: "/usr/bin/bootstrap_host_exec ironic_api su ironic -s /bin/bash -c 'ironic-dbsync --config-file /etc/ironic/ironic.conf'" step_4: ironic_api: start_order: 10 diff --git a/docker/services/keystone.yaml b/docker/services/keystone.yaml index ca643749..772859ee 100644 --- a/docker/services/keystone.yaml +++ b/docker/services/keystone.yaml @@ -131,8 +131,9 @@ outputs: environment: - KOLLA_BOOTSTRAP=True - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS + command: ['/usr/bin/bootstrap_host_exec', 'keystone', '/usr/local/bin/kolla_start'] keystone: - start_order: 1 + start_order: 2 image: *keystone_image net: host privileged: false @@ -141,10 +142,10 @@ outputs: environment: - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS keystone_bootstrap: - start_order: 2 + start_order: 3 action: exec command: - [ 'keystone', 'keystone-manage', 'bootstrap', '--bootstrap-password', {get_param: AdminPassword} ] + [ 'keystone', '/usr/bin/bootstrap_host_exec', 'keystone' ,'keystone-manage', 'bootstrap', '--bootstrap-password', {get_param: AdminPassword} ] docker_puppet_tasks: # Keystone endpoint creation occurs only on single node step_3: diff --git a/docker/services/mistral-api.yaml b/docker/services/mistral-api.yaml index 3b256fdd..5586d41b 100644 --- a/docker/services/mistral-api.yaml +++ b/docker/services/mistral-api.yaml @@ -100,19 +100,21 @@ outputs: net: host privileged: false detach: false + user: root volumes: list_concat: - {get_attr: [ContainersCommon, volumes]} - - /var/lib/config-data/mistral/etc/:/etc/:ro - /var/log/containers/mistral:/var/log/mistral - command: ['mistral-db-manage', '--config-file', '/etc/mistral/mistral.conf', 'upgrade', 'head'] + command: "/usr/bin/bootstrap_host_exec mistral_api su mistral -s /bin/bash -c 'mistral-db-manage --config-file /etc/mistral/mistral.conf upgrade head'" mistral_db_populate: start_order: 2 image: *mistral_image net: host privileged: false detach: false + user: root volumes: list_concat: - {get_attr: [ContainersCommon, volumes]} @@ -121,7 +123,7 @@ outputs: - /var/log/containers/mistral:/var/log/mistral # NOTE: dprince this requires that we install openstack-tripleo-common into # the Mistral API image so that we get tripleo* actions - command: ['mistral-db-manage', '--config-file', '/etc/mistral/mistral.conf', 'populate'] + command: "/usr/bin/bootstrap_host_exec mistral_api su mistral -s /bin/bash -c 'mistral-db-manage --config-file /etc/mistral/mistral.conf populate'" step_4: mistral_api: start_order: 15 diff --git a/docker/services/neutron-api.yaml b/docker/services/neutron-api.yaml index 9517e4b1..7ce47a14 100644 --- a/docker/services/neutron-api.yaml +++ b/docker/services/neutron-api.yaml @@ -110,8 +110,6 @@ outputs: net: host privileged: false detach: false - # FIXME: we should make config file permissions right - # and run as neutron user user: root volumes: list_concat: @@ -120,7 +118,10 @@ outputs: - /var/lib/config-data/neutron/etc/neutron:/etc/neutron:ro - /var/lib/config-data/neutron/usr/share/neutron:/usr/share/neutron:ro - /var/log/containers/neutron:/var/log/neutron - command: ['neutron-db-manage', 'upgrade', 'heads'] + command: ['/usr/bin/bootstrap_host_exec', 'neutron_api', 'neutron-db-manage', 'upgrade', 'heads'] + # FIXME: we should make config file permissions right + # and run as neutron user + #command: "/usr/bin/bootstrap_host_exec neutron_api su neutron -s /bin/bash -c 'neutron-db-manage upgrade heads'" step_4: map_merge: - neutron_api: diff --git a/docker/services/nova-api.yaml b/docker/services/nova-api.yaml index d571b21b..2375dada 100644 --- a/docker/services/nova-api.yaml +++ b/docker/services/nova-api.yaml @@ -103,6 +103,7 @@ outputs: image: *nova_api_image net: host detach: false + user: root volumes: &nova_api_volumes list_concat: - {get_attr: [ContainersCommon, volumes]} @@ -110,7 +111,7 @@ outputs: - /var/lib/kolla/config_files/nova_api.json:/var/lib/kolla/config_files/config.json:ro - /var/lib/config-data/nova/etc/nova/:/etc/nova/:ro - /var/log/containers/nova:/var/log/nova - command: ['/usr/bin/nova-manage', 'api_db', 'sync'] + command: "/usr/bin/bootstrap_host_exec nova_api su nova -s /bin/bash -c '/usr/bin/nova-manage api_db sync'" # FIXME: we probably want to wait on the 'cell_v2 update' in order for this # to be capable of upgrading a baremetal setup. This is to ensure the name # of the cell is 'default' @@ -119,11 +120,9 @@ outputs: image: *nova_api_image net: host detach: false + user: root volumes: *nova_api_volumes - command: - - '/usr/bin/nova-manage' - - 'cell_v2' - - 'map_cell0' + command: "/usr/bin/bootstrap_host_exec nova_api su nova -s /bin/bash -c '/usr/bin/nova-manage cell_v2 map_cell0'" nova_api_create_default_cell: start_order: 3 image: *nova_api_image @@ -134,18 +133,16 @@ outputs: # this idempotent (if the resource already exists a conflict # is raised) exit_codes: [0,2] - command: - - '/usr/bin/nova-manage' - - 'cell_v2' - - 'create_cell' - - '--name="default"' + user: root + command: "/usr/bin/bootstrap_host_exec nova_api su nova -s /bin/bash -c '/usr/bin/nova-manage cell_v2 create_cell --name=default'" nova_db_sync: start_order: 4 image: *nova_api_image net: host detach: false volumes: *nova_api_volumes - command: ['/usr/bin/nova-manage', 'db', 'sync'] + user: root + command: "/usr/bin/bootstrap_host_exec nova_api su nova -s /bin/bash -c '/usr/bin/nova-manage db sync'" step_4: nova_api: start_order: 2 @@ -164,10 +161,8 @@ outputs: net: host detach: false volumes: *nova_api_volumes - command: - - '/usr/bin/nova-manage' - - 'cell_v2' - - 'discover_hosts' + user: root + command: "/usr/bin/bootstrap_host_exec nova_api su nova -s /bin/bash -c '/usr/bin/nova-manage cell_v2 discover_hosts'" host_prep_tasks: - name: create persistent logs directory file: diff --git a/docker/services/nova-libvirt.yaml b/docker/services/nova-libvirt.yaml index ebf0da7d..9779d676 100644 --- a/docker/services/nova-libvirt.yaml +++ b/docker/services/nova-libvirt.yaml @@ -18,6 +18,10 @@ parameters: description: image default: 'centos-binary-nova-compute:latest' type: string + EnablePackageInstall: + default: 'false' + description: Set to true to enable package installation + type: boolean ServiceNetMap: default: {} description: Mapping of service_name -> network name. Typically set @@ -112,6 +116,7 @@ outputs: - /var/run/libvirt:/var/run/libvirt - /var/lib/libvirt:/var/lib/libvirt - /etc/libvirt/qemu:/etc/libvirt/qemu + - /var/log/libvirt/qemu:/var/log/libvirt/qemu:ro - /var/log/containers/nova:/var/log/nova environment: - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS @@ -124,6 +129,22 @@ outputs: - /etc/libvirt/qemu - /var/lib/libvirt - /var/log/containers/nova + - name: set enable_package_install fact + set_fact: + enable_package_install: {get_param: EnablePackageInstall} + # We use virtlogd on host, so when using Deployed Server + # feature, we need to ensure libvirt is installed. + - name: install libvirt-daemon + package: + name: libvirt-daemon + state: present + when: enable_package_install + - name: start virtlogd socket + service: + name: virtlogd.socket + state: started + enabled: yes + when: enable_package_install upgrade_tasks: - name: Stop and disable libvirtd service tags: step2 diff --git a/docker/services/pacemaker/clustercheck.yaml b/docker/services/pacemaker/clustercheck.yaml new file mode 100644 index 00000000..bad2acf6 --- /dev/null +++ b/docker/services/pacemaker/clustercheck.yaml @@ -0,0 +1,103 @@ +heat_template_version: pike + +description: > + MySQL HA clustercheck service deployment using puppet + This service is used by HAProxy in a HA scenario to report whether + the local galera node is synced + +parameters: + DockerNamespace: + description: namespace + default: 'tripleoupstream' + type: string + DockerClustercheckImage: + description: image + default: 'centos-binary-mariadb:latest' + type: string + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + ServiceNetMap: + default: {} + description: Mapping of service_name -> network name. Typically set + via parameter_defaults in the resource registry. This + mapping overrides those in ServiceNetMapDefaults. + type: json + DefaultPasswords: + default: {} + type: json + RoleName: + default: '' + description: Role name on which the service is applied + type: string + RoleParameters: + default: {} + description: Parameters specific to the role + type: json + +resources: + + ContainersCommon: + type: ../containers-common.yaml + + MysqlPuppetBase: + type: ../../../puppet/services/pacemaker/database/mysql.yaml + properties: + EndpointMap: {get_param: EndpointMap} + ServiceNetMap: {get_param: ServiceNetMap} + DefaultPasswords: {get_param: DefaultPasswords} + RoleName: {get_param: RoleName} + RoleParameters: {get_param: RoleParameters} + +outputs: + role_data: + description: Containerized service clustercheck using composable services. + value: + service_name: clustercheck + config_settings: {get_attr: [MysqlPuppetBase, role_data, config_settings]} + step_config: "include ::tripleo::profile::pacemaker::clustercheck" + # BEGIN DOCKER SETTINGS # + puppet_config: + config_volume: clustercheck + puppet_tags: file # set this even though file is the default + step_config: "include ::tripleo::profile::pacemaker::clustercheck" + config_image: &clustercheck_image + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerClustercheckImage} ] + kolla_config: + /var/lib/kolla/config_files/clustercheck.json: + command: /usr/sbin/xinetd -dontfork + config_files: + - dest: /etc/xinetd.conf + source: /var/lib/kolla/config_files/src/etc/xinetd.conf + owner: mysql + perm: '0644' + - dest: /etc/xinetd.d/galera-monitor + source: /var/lib/kolla/config_files/src/etc/xinetd.d/galera-monitor + owner: mysql + perm: '0644' + - dest: /etc/sysconfig/clustercheck + source: /var/lib/kolla/config_files/src/etc/sysconfig/clustercheck + owner: mysql + perm: '0600' + docker_config: + step_2: + clustercheck: + start_order: 1 + image: *clustercheck_image + restart: always + net: host + volumes: + list_concat: + - {get_attr: [ContainersCommon, volumes]} + - + - /var/lib/kolla/config_files/clustercheck.json:/var/lib/kolla/config_files/config.json + - /var/lib/config-data/clustercheck/:/var/lib/kolla/config_files/src:ro + - /var/lib/mysql:/var/lib/mysql + environment: + - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS + host_prep_tasks: + upgrade_tasks: diff --git a/docker/services/pacemaker/database/mysql.yaml b/docker/services/pacemaker/database/mysql.yaml new file mode 100644 index 00000000..d64845f2 --- /dev/null +++ b/docker/services/pacemaker/database/mysql.yaml @@ -0,0 +1,180 @@ +heat_template_version: pike + +description: > + MySQL service deployment with pacemaker bundle + +parameters: + DockerNamespace: + description: namespace + default: 'tripleoupstream' + type: string + DockerMysqlImage: + description: image + default: 'centos-binary-mariadb:latest' + type: string + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + ServiceNetMap: + default: {} + description: Mapping of service_name -> network name. Typically set + via parameter_defaults in the resource registry. This + mapping overrides those in ServiceNetMapDefaults. + type: json + DefaultPasswords: + default: {} + type: json + MysqlRootPassword: + type: string + hidden: true + default: '' + RoleName: + default: '' + description: Role name on which the service is applied + type: string + RoleParameters: + default: {} + description: Parameters specific to the role + type: json + +resources: + + ContainersCommon: + type: ../../containers-common.yaml + + MysqlPuppetBase: + type: ../../../../puppet/services/pacemaker/database/mysql.yaml + properties: + EndpointMap: {get_param: EndpointMap} + ServiceNetMap: {get_param: ServiceNetMap} + DefaultPasswords: {get_param: DefaultPasswords} + RoleName: {get_param: RoleName} + RoleParameters: {get_param: RoleParameters} + +outputs: + role_data: + description: Containerized service MySQL using composable services. + value: + service_name: {get_attr: [MysqlPuppetBase, role_data, service_name]} + config_settings: + map_merge: + - {get_attr: [MysqlPuppetBase, role_data, config_settings]} + - tripleo::profile::pacemaker::database::mysql_bundle::mysql_docker_image: &mysql_image + list_join: + - '/' + - - {get_param: DockerNamespace} + - {get_param: DockerMysqlImage} + step_config: "" + # BEGIN DOCKER SETTINGS # + puppet_config: + config_volume: mysql + puppet_tags: file # set this even though file is the default + step_config: + list_join: + - "\n" + - - "['Mysql_datadir', 'Mysql_user', 'Mysql_database', 'Mysql_grant', 'Mysql_plugin'].each |String $val| { noop_resource($val) }" + - "exec {'wait-for-settle': command => '/bin/true' }" + - "include ::tripleo::profile::pacemaker::database::mysql_bundle" + config_image: *mysql_image + kolla_config: + /var/lib/kolla/config_files/mysql.json: + command: /usr/sbin/pacemaker_remoted + config_files: + - dest: /etc/libqb/force-filesystem-sockets + source: /dev/null + owner: root + perm: '0644' + - dest: /etc/my.cnf + source: /var/lib/kolla/config_files/src/etc/my.cnf + owner: mysql + perm: '0644' + - dest: /etc/my.cnf.d/galera.cnf + source: /var/lib/kolla/config_files/src/etc/my.cnf.d/galera.cnf + owner: mysql + perm: '0644' + - dest: /etc/sysconfig/clustercheck + source: /var/lib/kolla/config_files/src/etc/sysconfig/clustercheck + owner: root + perm: '0600' + docker_config: + step_1: + mysql_data_ownership: + start_order: 0 + detach: false + image: *mysql_image + net: host + user: root + # Kolla does only non-recursive chown + command: ['chown', '-R', 'mysql:', '/var/lib/mysql'] + volumes: + - /var/lib/mysql:/var/lib/mysql + mysql_bootstrap: + start_order: 1 + detach: false + image: *mysql_image + net: host + # Kolla bootstraps aren't idempotent, explicitly checking if bootstrap was done + command: ['bash', '-c', 'test -e /var/lib/mysql/mysql || kolla_start'] + volumes: &mysql_volumes + list_concat: + - {get_attr: [ContainersCommon, volumes]} + - + - /var/lib/kolla/config_files/mysql.json:/var/lib/kolla/config_files/config.json + - /var/lib/config-data/mysql/:/var/lib/kolla/config_files/src:ro + - /var/lib/mysql:/var/lib/mysql + environment: + - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS + - KOLLA_BOOTSTRAP=True + # NOTE(mandre) skip wsrep cluster status check + - KOLLA_KUBERNETES=True + - + list_join: + - '=' + - - 'DB_ROOT_PASSWORD' + - + yaql: + expression: $.data.passwords.where($ != '').first() + data: + passwords: + - {get_param: MysqlRootPassword} + - {get_param: [DefaultPasswords, mysql_root_password]} + step_2: + mysql_init_bundle: + start_order: 1 + detach: false + net: host + user: root + command: + - '/bin/bash' + - '-c' + - str_replace: + template: + list_join: + - '; ' + - - "cp -a /tmp/puppet-etc/* /etc/puppet; echo '{\"step\": 2}' > /etc/puppet/hieradata/docker.json" + - "FACTER_uuid=docker puppet apply --tags file,file_line,concat,augeas,TAGS -v -e 'CONFIG'" + params: + TAGS: 'pacemaker::resource::bundle,pacemaker::property,pacemaker::resource::ocf,pacemaker::constraint::order,pacemaker::constraint::colocation,galera_ready,mysql_database,mysql_grant,mysql_user' + CONFIG: 'include ::tripleo::profile::base::pacemaker;include ::tripleo::profile::pacemaker::database::mysql_bundle' + image: *mysql_image + volumes: + - /etc/hosts:/etc/hosts:ro + - /etc/localtime:/etc/localtime:ro + - /etc/puppet:/tmp/puppet-etc:ro + - /usr/share/openstack-puppet/modules:/usr/share/openstack-puppet/modules:ro + - /etc/corosync/corosync.conf:/etc/corosync/corosync.conf:ro + - /dev/shm:/dev/shm:rw + - /var/lib/config-data/mysql/etc/my.cnf:/etc/my.cnf:ro + - /var/lib/config-data/mysql/etc/my.cnf.d:/etc/my.cnf.d:ro + - /var/lib/mysql:/var/lib/mysql:rw + host_prep_tasks: + - name: create /var/lib/mysql + file: + path: /var/lib/mysql + state: directory + upgrade_tasks: + - name: Stop and disable mysql service + tags: step2 + service: name=mariadb state=stopped enabled=no diff --git a/docker/services/pacemaker/database/redis.yaml b/docker/services/pacemaker/database/redis.yaml new file mode 100644 index 00000000..ef27f7e9 --- /dev/null +++ b/docker/services/pacemaker/database/redis.yaml @@ -0,0 +1,140 @@ +heat_template_version: pike + +description: > + OpenStack containerized Redis services + +parameters: + DockerNamespace: + description: namespace + default: 'tripleoupstream' + type: string + DockerRedisImage: + description: image + default: 'centos-binary-redis:latest' + type: string + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + ServiceNetMap: + default: {} + description: Mapping of service_name -> network name. Typically set + via parameter_defaults in the resource registry. This + mapping overrides those in ServiceNetMapDefaults. + type: json + DefaultPasswords: + default: {} + type: json + RoleName: + default: '' + description: Role name on which the service is applied + type: string + RoleParameters: + default: {} + description: Parameters specific to the role + type: json + +resources: + + RedisBase: + type: ../../../../puppet/services/database/redis.yaml + properties: + EndpointMap: {get_param: EndpointMap} + ServiceNetMap: {get_param: ServiceNetMap} + DefaultPasswords: {get_param: DefaultPasswords} + RoleName: {get_param: RoleName} + RoleParameters: {get_param: RoleParameters} + +outputs: + role_data: + description: Role data for the Redis API role. + value: + service_name: {get_attr: [RedisBase, role_data, service_name]} + config_settings: + map_merge: + - {get_attr: [RedisBase, role_data, config_settings]} + - redis::service_manage: false + redis::notify_service: false + redis::managed_by_cluster_manager: true + tripleo::profile::pacemaker::database::redis_bundle::redis_docker_image: &redis_image + list_join: + - '/' + - - {get_param: DockerNamespace} + - {get_param: DockerRedisImage} + + step_config: "" + service_config_settings: {get_attr: [RedisBase, role_data, service_config_settings]} + # BEGIN DOCKER SETTINGS + puppet_config: + config_volume: 'redis' + # NOTE: we need the exec tag to copy /etc/redis.conf.puppet to + # /etc/redis.conf + # https://github.com/arioch/puppet-redis/commit/1c004143223e660cbd433422ff8194508aab9763 + puppet_tags: 'exec' + step_config: + get_attr: [RedisBase, role_data, step_config] + config_image: *redis_image + kolla_config: + /var/lib/kolla/config_files/redis.json: + command: /usr/sbin/pacemaker_remoted + config_files: + - dest: /etc/libqb/force-filesystem-sockets + source: /dev/null + owner: root + perm: '0644' + permissions: + - path: /var/run/redis + owner: redis:redis + recurse: true + - path: /var/lib/redis + owner: redis:redis + recurse: true + - path: /var/log/redis + owner: redis:redis + recurse: true + docker_config: + step_2: + redis_init_bundle: + start_order: 2 + detach: false + net: host + user: root + config_volume: 'redis_init_bundle' + command: + - '/bin/bash' + - '-c' + - str_replace: + template: + list_join: + - '; ' + - - "cp -a /tmp/puppet-etc/* /etc/puppet; echo '{\"step\": 2}' > /etc/puppet/hieradata/docker.json" + - "FACTER_uuid=docker puppet apply --tags file,file_line,concat,augeas,TAGS -v -e 'CONFIG'" + params: + TAGS: 'pacemaker::resource::bundle,pacemaker::property,pacemaker::resource::ocf,pacemaker::constraint::order,pacemaker::constraint::colocation' + CONFIG: 'include ::tripleo::profile::base::pacemaker;include ::tripleo::profile::pacemaker::database::redis_bundle' + image: *redis_image + volumes: + - /etc/hosts:/etc/hosts:ro + - /etc/localtime:/etc/localtime:ro + - /etc/puppet:/tmp/puppet-etc:ro + - /usr/share/openstack-puppet/modules:/usr/share/openstack-puppet/modules:ro + - /etc/corosync/corosync.conf:/etc/corosync/corosync.conf:ro + - /dev/shm:/dev/shm:rw + host_prep_tasks: + - name: create /var/run/redis + file: + path: /var/run/redis + state: directory + - name: create /var/log/redis + file: + path: /var/log/redis + state: directory + - name: create /var/lib/redis + file: + path: /var/lib/redis + state: directory + upgrade_tasks: + - name: Stop and disable redis service + tags: step2 + service: name=redis state=stopped enabled=no diff --git a/docker/services/pacemaker/haproxy.yaml b/docker/services/pacemaker/haproxy.yaml new file mode 100644 index 00000000..ae19652e --- /dev/null +++ b/docker/services/pacemaker/haproxy.yaml @@ -0,0 +1,116 @@ +heat_template_version: pike + +description: > + OpenStack containerized HAproxy service for pacemaker + +parameters: + DockerNamespace: + description: namespace + default: 'tripleoupstream' + type: string + DockerHAProxyImage: + description: image + default: 'centos-binary-haproxy:latest' + type: string + ServiceNetMap: + default: {} + description: Mapping of service_name -> network name. Typically set + via parameter_defaults in the resource registry. This + mapping overrides those in ServiceNetMapDefaults. + type: json + DefaultPasswords: + default: {} + type: json + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + RoleName: + default: '' + description: Role name on which the service is applied + type: string + RoleParameters: + default: {} + description: Parameters specific to the role + type: json + +resources: + + HAProxyBase: + type: ../../../puppet/services/pacemaker/haproxy.yaml + properties: + EndpointMap: {get_param: EndpointMap} + ServiceNetMap: {get_param: ServiceNetMap} + DefaultPasswords: {get_param: DefaultPasswords} + RoleName: {get_param: RoleName} + RoleParameters: {get_param: RoleParameters} + +outputs: + role_data: + description: Role data for the HAproxy role. + value: + service_name: {get_attr: [HAProxyBase, role_data, service_name]} + config_settings: + map_merge: + - get_attr: [HAProxyBase, role_data, config_settings] + - tripleo::haproxy::haproxy_daemon: false + haproxy_docker: true + tripleo::profile::pacemaker::haproxy_bundle::haproxy_docker_image: &haproxy_image + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerHAProxyImage} ] + step_config: + list_join: + - "\n" + - - &noop_pcmk "['pcmk_bundle', 'pcmk_resource', 'pcmk_property', 'pcmk_constraint', 'pcmk_resource_default'].each |String $val| { noop_resource($val) }" + - 'include ::tripleo::profile::pacemaker::haproxy_bundle' + service_config_settings: {get_attr: [HAProxyBase, role_data, service_config_settings]} + # BEGIN DOCKER SETTINGS + puppet_config: + config_volume: haproxy + puppet_tags: haproxy_config + step_config: + list_join: + - "\n" + - - "exec {'wait-for-settle': command => '/bin/true' }" + - &noop_firewall "class tripleo::firewall(){}; define tripleo::firewall::rule( $port = undef, $dport = undef, $sport = undef, $proto = undef, $action = undef, $state = undef, $source = undef, $iniface = undef, $chain = undef, $destination = undef, $extras = undef){}" + - *noop_pcmk + - 'include ::tripleo::profile::pacemaker::haproxy_bundle' + config_image: *haproxy_image + kolla_config: + /var/lib/kolla/config_files/haproxy.json: + command: haproxy -f /etc/haproxy/haproxy.cfg + docker_config: + step_2: + haproxy_init_bundle: + start_order: 3 + detach: false + net: host + user: root + command: + - '/bin/bash' + - '-c' + - str_replace: + template: + list_join: + - '; ' + - - "cp -a /tmp/puppet-etc/* /etc/puppet; echo '{\"step\": 2}' > /etc/puppet/hieradata/docker.json" + - "FACTER_uuid=docker puppet apply --tags file,file_line,concat,augeas,TAGS -v -e 'CONFIG'" + params: + TAGS: 'pacemaker::resource::bundle,pacemaker::property,pacemaker::resource::ip,pacemaker::resource::ocf,pacemaker::constraint::order,pacemaker::constraint::colocation' + CONFIG: + list_join: + - ';' + - - *noop_firewall + - 'include ::tripleo::profile::base::pacemaker;include ::tripleo::profile::pacemaker::haproxy_bundle' + image: *haproxy_image + volumes: + - /etc/hosts:/etc/hosts:ro + - /etc/localtime:/etc/localtime:ro + - /etc/puppet:/tmp/puppet-etc:ro + - /usr/share/openstack-puppet/modules:/usr/share/openstack-puppet/modules:ro + - /etc/corosync/corosync.conf:/etc/corosync/corosync.conf:ro + - /dev/shm:/dev/shm:rw + metadata_settings: + get_attr: [HAProxyBase, role_data, metadata_settings] diff --git a/docker/services/pacemaker/rabbitmq.yaml b/docker/services/pacemaker/rabbitmq.yaml new file mode 100644 index 00000000..7f6ac701 --- /dev/null +++ b/docker/services/pacemaker/rabbitmq.yaml @@ -0,0 +1,159 @@ +heat_template_version: pike + +description: > + OpenStack containerized Rabbitmq service + +parameters: + DockerNamespace: + description: namespace + default: 'tripleoupstream' + type: string + DockerRabbitmqImage: + description: image + default: 'centos-binary-rabbitmq:latest' + type: string + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + ServiceNetMap: + default: {} + description: Mapping of service_name -> network name. Typically set + via parameter_defaults in the resource registry. This + mapping overrides those in ServiceNetMapDefaults. + type: json + DefaultPasswords: + default: {} + type: json + RabbitCookie: + type: string + default: '' + hidden: true + RoleName: + default: '' + description: Role name on which the service is applied + type: string + RoleParameters: + default: {} + description: Parameters specific to the role + type: json + +resources: + + RabbitmqBase: + type: ../../../puppet/services/rabbitmq.yaml + properties: + EndpointMap: {get_param: EndpointMap} + ServiceNetMap: {get_param: ServiceNetMap} + DefaultPasswords: {get_param: DefaultPasswords} + RoleName: {get_param: RoleName} + RoleParameters: {get_param: RoleParameters} + +outputs: + role_data: + description: Role data for the Rabbitmq API role. + value: + service_name: {get_attr: [RabbitmqBase, role_data, service_name]} + config_settings: + map_merge: + - {get_attr: [RabbitmqBase, role_data, config_settings]} + - rabbitmq::service_manage: false + tripleo::profile::pacemaker::rabbitmq_bundle::rabbitmq_docker_image: &rabbitmq_image + list_join: + - '/' + - - {get_param: DockerNamespace} + - {get_param: DockerRabbitmqImage} + step_config: &step_config + get_attr: [RabbitmqBase, role_data, step_config] + service_config_settings: {get_attr: [RabbitmqBase, role_data, service_config_settings]} + # BEGIN DOCKER SETTINGS + puppet_config: + config_volume: rabbitmq + puppet_tags: file + step_config: *step_config + config_image: *rabbitmq_image + kolla_config: + /var/lib/kolla/config_files/rabbitmq.json: + command: /usr/sbin/pacemaker_remoted + config_files: + - dest: /etc/libqb/force-filesystem-sockets + source: /dev/null + owner: root + perm: '0644' + permissions: + - path: /var/lib/rabbitmq + owner: rabbitmq:rabbitmq + recurse: true + - path: /var/log/rabbitmq + owner: rabbitmq:rabbitmq + recurse: true + # When using pacemaker we don't launch the container, instead that is done by pacemaker + # itself. + docker_config: + step_1: + rabbitmq_bootstrap: + start_order: 0 + image: *rabbitmq_image + net: host + privileged: false + volumes: + - /var/lib/kolla/config_files/rabbitmq.json:/var/lib/kolla/config_files/config.json:ro + - /var/lib/config-data/rabbitmq/etc/rabbitmq:/etc/rabbitmq:ro + - /etc/hosts:/etc/hosts:ro + - /etc/localtime:/etc/localtime:ro + - /var/lib/rabbitmq:/var/lib/rabbitmq + environment: + - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS + - KOLLA_BOOTSTRAP=True + - + list_join: + - '=' + - - 'RABBITMQ_CLUSTER_COOKIE' + - + yaql: + expression: $.data.passwords.where($ != '').first() + data: + passwords: + - {get_param: RabbitCookie} + - {get_param: [DefaultPasswords, rabbit_cookie]} + step_2: + rabbitmq_init_bundle: + start_order: 0 + detach: false + net: host + user: root + command: + - '/bin/bash' + - '-c' + - str_replace: + template: + list_join: + - '; ' + - - "cp -a /tmp/puppet-etc/* /etc/puppet; echo '{\"step\": 2}' > /etc/puppet/hieradata/docker.json" + - "FACTER_uuid=docker puppet apply --tags file,file_line,concat,augeas,TAGS -v -e 'CONFIG'" + params: + TAGS: 'pacemaker::resource::bundle,pacemaker::property,pacemaker::resource::ocf,pacemaker::constraint::order,pacemaker::constraint::colocation' + CONFIG: 'include ::tripleo::profile::base::pacemaker;include ::tripleo::profile::pacemaker::rabbitmq_bundle' + image: *rabbitmq_image + volumes: + - /etc/hosts:/etc/hosts:ro + - /etc/localtime:/etc/localtime:ro + - /etc/puppet:/tmp/puppet-etc:ro + - /usr/share/openstack-puppet/modules:/usr/share/openstack-puppet/modules:ro + - /etc/corosync/corosync.conf:/etc/corosync/corosync.conf:ro + - /dev/shm:/dev/shm:rw + host_prep_tasks: + - name: create /var/lib/rabbitmq + file: + path: /var/lib/rabbitmq + state: directory + - name: stop the Erlang port mapper on the host and make sure it cannot bind to the port used by container + shell: | + echo 'export ERL_EPMD_ADDRESS=127.0.0.1' > /etc/rabbitmq/rabbitmq-env.conf + echo 'export ERL_EPMD_PORT=4370' >> /etc/rabbitmq/rabbitmq-env.conf + for pid in $(pgrep epmd); do if [ "$(lsns -o NS -p $pid)" == "$(lsns -o NS -p 1)" ]; then kill $pid; break; fi; done + upgrade_tasks: + - name: Stop and disable rabbitmq service + tags: step2 + service: name=rabbitmq-server state=stopped enabled=no diff --git a/docker/services/panko-api.yaml b/docker/services/panko-api.yaml index 46cfa5ab..b9e6e93a 100644 --- a/docker/services/panko-api.yaml +++ b/docker/services/panko-api.yaml @@ -101,13 +101,14 @@ outputs: net: host detach: false privileged: false + user: root volumes: list_concat: - {get_attr: [ContainersCommon, volumes]} - - /var/lib/config-data/panko/etc/panko:/etc/panko:ro - /var/log/containers/panko:/var/log/panko - command: /usr/bin/panko-dbsync + command: "/usr/bin/bootstrap_host_exec panko_api su panko -s /bin/bash -c '/usr/bin/panko-dbsync'" step_4: panko_api: start_order: 2 diff --git a/environments/disable-telemetry.yaml b/environments/disable-telemetry.yaml new file mode 100644 index 00000000..6249c286 --- /dev/null +++ b/environments/disable-telemetry.yaml @@ -0,0 +1,20 @@ +# This heat environment can be used to disable all of the telemetry services. +# It is most useful in a resource constrained environment or one in which +# telemetry is not needed. + +resource_registry: + OS::TripleO::Services::CeilometerApi: OS::Heat::None + OS::TripleO::Services::CeilometerCollector: OS::Heat::None + OS::TripleO::Services::CeilometerExpirer: OS::Heat::None + OS::TripleO::Services::CeilometerAgentCentral: OS::Heat::None + OS::TripleO::Services::CeilometerAgentNotification: OS::Heat::None + OS::TripleO::Services::CeilometerAgentIpmi: OS::Heat::None + OS::TripleO::Services::ComputeCeilometerAgent: OS::Heat::None + OS::TripleO::Services::GnocchiApi: OS::Heat::None + OS::TripleO::Services::GnocchiMetricd: OS::Heat::None + OS::TripleO::Services::GnocchiStatsd: OS::Heat::None + OS::TripleO::Services::AodhApi: OS::Heat::None + OS::TripleO::Services::AodhEvaluator: OS::Heat::None + OS::TripleO::Services::AodhNotifier: OS::Heat::None + OS::TripleO::Services::AodhListener: OS::Heat::None + OS::TripleO::Services::PankoApi: OS::Heat::None diff --git a/environments/docker-services-tls-everywhere.yaml b/environments/docker-services-tls-everywhere.yaml index e37f2515..2740664c 100644 --- a/environments/docker-services-tls-everywhere.yaml +++ b/environments/docker-services-tls-everywhere.yaml @@ -50,3 +50,4 @@ parameter_defaults: - OS::TripleO::Services::NovaLibvirt - OS::TripleO::Services::ComputeNeutronOvsAgent - OS::TripleO::Services::Docker + - OS::TripleO::Services::Sshd diff --git a/environments/docker.yaml b/environments/docker.yaml index cbd5b687..36e4c391 100644 --- a/environments/docker.yaml +++ b/environments/docker.yaml @@ -63,3 +63,4 @@ parameter_defaults: - OS::TripleO::Services::ComputeNeutronOvsAgent - OS::TripleO::Services::Docker - OS::TripleO::Services::CeilometerAgentCompute + - OS::TripleO::Services::Sshd diff --git a/environments/hyperconverged-ceph.yaml b/environments/hyperconverged-ceph.yaml index 6fd71013..0f7e1143 100644 --- a/environments/hyperconverged-ceph.yaml +++ b/environments/hyperconverged-ceph.yaml @@ -19,6 +19,7 @@ parameter_defaults: - OS::TripleO::Services::Kernel - OS::TripleO::Services::ComputeNeutronCorePlugin - OS::TripleO::Services::ComputeNeutronOvsAgent + - OS::TripleO::Services::NeutronLinuxbridgeAgent - OS::TripleO::Services::ComputeCeilometerAgent - OS::TripleO::Services::ComputeNeutronL3Agent - OS::TripleO::Services::ComputeNeutronMetadataAgent diff --git a/environments/low-memory-usage.yaml b/environments/low-memory-usage.yaml index 3a606336..547dc31d 100644 --- a/environments/low-memory-usage.yaml +++ b/environments/low-memory-usage.yaml @@ -16,3 +16,5 @@ parameter_defaults: ControllerExtraConfig: 'nova::network::neutron::neutron_url_timeout': '60' + + DatabaseSyncTimeout: 900 diff --git a/environments/neutron-linuxbridge.yaml b/environments/neutron-linuxbridge.yaml new file mode 100644 index 00000000..c8045cc9 --- /dev/null +++ b/environments/neutron-linuxbridge.yaml @@ -0,0 +1,8 @@ +## A Heat environment that can be used to deploy linuxbridge +resource_registry: + OS::TripleO::Services::ComputeNeutronOvsAgent: OS::Heat::None + OS::TripleO::Services::NeutronOvsAgent: OS::Heat::None + OS::TripleO::Services::NeutronLinuxbridgeAgent: ../puppet/services/neutron-linuxbridge-agent.yaml + +parameter_defaults: + NeutronMechanismDrivers: ['linuxbridge'] diff --git a/environments/neutron-ovs-dpdk.yaml b/environments/neutron-ovs-dpdk.yaml index 004b8ac0..6706bccc 100644 --- a/environments/neutron-ovs-dpdk.yaml +++ b/environments/neutron-ovs-dpdk.yaml @@ -9,7 +9,7 @@ parameter_defaults: #NeutronDpdkMemoryChannels: "" NeutronDatapathType: "netdev" - NeutronVhostuserSocketDir: "/var/run/openvswitch" + NeutronVhostuserSocketDir: "/var/lib/vhost_sockets" #NeutronDpdkSocketMemory: "" #NeutronDpdkDriverType: "vfio-pci" diff --git a/overcloud-resource-registry-puppet.j2.yaml b/overcloud-resource-registry-puppet.j2.yaml index c99fa3f1..d4c301bb 100644 --- a/overcloud-resource-registry-puppet.j2.yaml +++ b/overcloud-resource-registry-puppet.j2.yaml @@ -154,6 +154,7 @@ resource_registry: OS::TripleO::Services::NeutronCorePluginMidonet: puppet/services/neutron-midonet.yaml OS::TripleO::Services::NeutronOvsAgent: puppet/services/neutron-ovs-agent.yaml + OS::TripleO::Services::NeutronLinuxbridgeAgent: OS::Heat::None OS::TripleO::Services::ComputeNeutronOvsAgent: puppet/services/neutron-ovs-agent.yaml OS::TripleO::Services::Pacemaker: OS::Heat::None OS::TripleO::Services::PacemakerRemote: OS::Heat::None diff --git a/overcloud.j2.yaml b/overcloud.j2.yaml index e1c70dc7..f8655b18 100644 --- a/overcloud.j2.yaml +++ b/overcloud.j2.yaml @@ -197,6 +197,12 @@ parameters: description: > Set to true to append per network Vips to /etc/hosts on each node. + DeploymentServerBlacklist: + default: [] + type: comma_delimited_list + description: > + List of server hostnames to blacklist from any triggered deployments. + conditions: add_vips_to_etc_hosts: {equals : [{get_param: AddVipsToEtcHosts}, True]} @@ -293,6 +299,14 @@ resources: RoleName: {{role.name}} RoleParameters: {get_param: {{role.name}}Parameters} + # Lookup of role_data via heat outputs is slow, so workaround this by caching + # the value in an OS::Heat::Value resource + {{role.name}}ServiceChainRoleData: + type: OS::Heat::Value + properties: + type: json + value: {get_attr: [{{role.name}}ServiceChain, role_data]} + # Filter any null/None service_names which may be present due to mapping # of services to OS::Heat::None {{role.name}}ServiceNames: @@ -303,21 +317,21 @@ resources: value: yaql: expression: coalesce($.data, []).where($ != null) - data: {get_attr: [{{role.name}}ServiceChain, role_data, service_names]} + data: {get_attr: [{{role.name}}ServiceChainRoleData, value, service_names]} {{role.name}}HostsDeployment: type: OS::Heat::StructuredDeployments properties: name: {{role.name}}HostsDeployment config: {get_attr: [hostsConfig, config_id]} - servers: {get_attr: [{{role.name}}, attributes, nova_server_resource]} + servers: {get_attr: [{{role.name}}Servers, value]} {{role.name}}SshKnownHostsDeployment: type: OS::Heat::StructuredDeployments properties: name: {{role.name}}SshKnownHostsDeployment config: {get_resource: SshKnownHostsConfig} - servers: {get_attr: [{{role.name}}, attributes, nova_server_resource]} + servers: {get_attr: [{{role.name}}Servers, value]} {{role.name}}AllNodesDeployment: type: OS::Heat::StructuredDeployments @@ -328,7 +342,7 @@ resources: properties: name: {{role.name}}AllNodesDeployment config: {get_attr: [allNodesConfig, config_id]} - servers: {get_attr: [{{role.name}}, attributes, nova_server_resource]} + servers: {get_attr: [{{role.name}}Servers, value]} input_values: # Note we have to use yaql to look up the first hostname/ip in the # list because heat path based attributes operate on the attribute @@ -350,7 +364,7 @@ resources: properties: name: {{role.name}}AllNodesValidationDeployment config: {get_resource: AllNodesValidationConfig} - servers: {get_attr: [{{role.name}}, attributes, nova_server_resource]} + servers: {get_attr: [{{role.name}}Servers, value]} {{role.name}}IpListMap: type: OS::TripleO::Network::Ports::NetIpListMap @@ -365,7 +379,13 @@ resources: EnabledServices: {get_attr: [{{role.name}}ServiceNames, value]} ServiceNetMap: {get_attr: [ServiceNetMap, service_net_map_lower]} ServiceHostnameList: {get_attr: [{{role.name}}, hostname]} - NetworkHostnameMap: + NetworkHostnameMap: {get_attr: [{{role.name}}NetworkHostnameMap, value]} + + {{role.name}}NetworkHostnameMap: + type: OS::Heat::Value + properties: + type: json + value: # Note (shardy) this somewhat complex yaql may be replaced # with a map_deep_merge function in ocata. It merges the # list of maps, but appends to colliding lists so we can @@ -403,7 +423,7 @@ resources: {% endif %} ServiceConfigSettings: map_merge: - - get_attr: [{{role.name}}ServiceChain, role_data, config_settings] + - get_attr: [{{role.name}}ServiceChainRoleData, value, config_settings] {% for r in roles %} - get_attr: [{{r.name}}ServiceChain, role_data, global_config_settings] {% endfor %} @@ -423,10 +443,41 @@ resources: {% endfor %} services: {get_attr: [{{role.name}}ServiceNames, value]} ServiceNames: {get_attr: [{{role.name}}ServiceNames, value]} - MonitoringSubscriptions: {get_attr: [{{role.name}}ServiceChain, role_data, monitoring_subscriptions]} - ServiceMetadataSettings: {get_attr: [{{role.name}}ServiceChain, role_data, service_metadata_settings]} + MonitoringSubscriptions: {get_attr: [{{role.name}}ServiceChainRoleData, value, monitoring_subscriptions]} + ServiceMetadataSettings: {get_attr: [{{role.name}}ServiceChainRoleData, value, service_metadata_settings]} + DeploymentServerBlacklistDict: {get_attr: [DeploymentServerBlacklistDict, value]} +{% endfor %} + +{% for role in roles %} + {{role.name}}Servers: + type: OS::Heat::Value + depends_on: {{role.name}} + properties: + type: json + value: + yaql: + expression: let(servers=>switch(isDict($.data.servers) => $.data.servers, true => {})) -> $servers.deleteAll($servers.keys().where($servers[$] = null)) + data: + servers: {get_attr: [{{role.name}}, attributes, nova_server_resource]} {% endfor %} + # This resource just creates a dict out of the DeploymentServerBlacklist, + # which is a list. The dict is used in the role templates to set a condition + # on whether to create the deployment resources. We can't use the list + # directly because there is no way to ask Heat if a list contains a specific + # value. + DeploymentServerBlacklistDict: + type: OS::Heat::Value + properties: + type: json + value: + map_merge: + repeat: + template: + hostname: 1 + for_each: + hostname: {get_param: DeploymentServerBlacklist} + hostsConfig: type: OS::TripleO::Hosts::SoftwareConfig properties: @@ -465,7 +516,7 @@ resources: data: groups: {% for role in roles %} - - {get_attr: [{{role.name}}ServiceChain, role_data, logging_groups]} + - {get_attr: [{{role.name}}ServiceChainRoleData, value, logging_groups]} {% endfor %} logging_sources: yaql: @@ -474,7 +525,7 @@ resources: data: sources: {% for role in roles %} - - {get_attr: [{{role.name}}ServiceChain, role_data, logging_sources]} + - {get_attr: [{{role.name}}ServiceChainRoleData, value, logging_sources]} {% endfor %} controller_ips: {get_attr: [{{primary_role_name}}, ip_address]} controller_names: {get_attr: [{{primary_role_name}}, hostname]} @@ -649,7 +700,7 @@ resources: properties: servers: {% for role in roles %} - {{role.name}}: {get_attr: [{{role.name}}, attributes, nova_server_resource]} + {{role.name}}: {get_attr: [{{role.name}}Servers, value]} {% endfor %} input_values: deploy_identifier: {get_param: DeployIdentifier} @@ -667,7 +718,7 @@ resources: properties: servers: {% for role in roles %} - {{role.name}}: {get_attr: [{{role.name}}, attributes, nova_server_resource]} + {{role.name}}: {get_attr: [{{role.name}}Servers, value]} {% endfor %} # Post deployment steps for all roles @@ -681,12 +732,12 @@ resources: properties: servers: {% for role in roles %} - {{role.name}}: {get_attr: [{{role.name}}, attributes, nova_server_resource]} + {{role.name}}: {get_attr: [{{role.name}}Servers, value]} {% endfor %} EndpointMap: {get_attr: [EndpointMap, endpoint_map]} role_data: {% for role in roles %} - {{role.name}}: {get_attr: [{{role.name}}ServiceChain, role_data]} + {{role.name}}: {get_attr: [{{role.name}}ServiceChainRoleData, value]} {% endfor %} outputs: @@ -725,7 +776,7 @@ outputs: description: The configuration data associated with each role value: {% for role in roles %} - {{role.name}}: {get_attr: [{{role.name}}ServiceChain, role_data]} + {{role.name}}: {get_attr: [{{role.name}}ServiceChainRoleData, value]} {% endfor %} RoleNetIpMap: description: Mapping of each network to a list of IPs for each role @@ -733,3 +784,9 @@ outputs: {% for role in roles %} {{role.name}}: {get_attr: [{{role.name}}IpListMap, net_ip_map]} {% endfor %} + RoleNetHostnameMap: + description: Mapping of each network to a list of hostnames for each role + value: +{% for role in roles %} + {{role.name}}: {get_attr: [{{role.name}}NetworkHostnameMap, value]} +{% endfor %} diff --git a/puppet/blockstorage-role.yaml b/puppet/blockstorage-role.yaml index 3fc663fb..d66cbd90 100644 --- a/puppet/blockstorage-role.yaml +++ b/puppet/blockstorage-role.yaml @@ -132,6 +132,20 @@ parameters: major-upgrade-composable-steps.yaml and major-upgrade-converge.yaml environment files. default: '' + DeploymentServerBlacklistDict: + default: {} + type: json + description: > + Map of server hostnames to blacklist from any triggered + deployments. If the value is 1, the server will be blacklisted. This + parameter is generated from the parent template. + +conditions: + server_not_blacklisted: + not: + equals: + - {get_param: [DeploymentServerBlacklistDict, {get_param: Hostname}]} + - 1 resources: BlockStorage: @@ -362,6 +376,7 @@ resources: NetworkDeployment: type: OS::TripleO::SoftwareDeployment depends_on: PreNetworkConfig + condition: server_not_blacklisted properties: name: NetworkDeployment config: {get_resource: NetworkConfig} @@ -385,6 +400,7 @@ resources: BlockStorageUpgradeInitDeployment: type: OS::Heat::SoftwareDeployment depends_on: NetworkDeployment + condition: server_not_blacklisted properties: name: BlockStorageUpgradeInitDeployment server: {get_resource: BlockStorage} @@ -393,6 +409,7 @@ resources: BlockStorageDeployment: type: OS::Heat::StructuredDeployment depends_on: BlockStorageUpgradeInitDeployment + condition: server_not_blacklisted properties: name: BlockStorageDeployment server: {get_resource: BlockStorage} @@ -459,6 +476,7 @@ resources: UpdateDeployment: type: OS::Heat::SoftwareDeployment depends_on: NetworkDeployment + condition: server_not_blacklisted properties: name: UpdateDeployment config: {get_resource: UpdateConfig} @@ -555,6 +573,7 @@ CTLPLANEIP,CTLPLANEHOST.DOMAIN,CTLPLANEHOST HOSTSSHPUBKEY" description: Heat resource handle for the block storage server value: {get_resource: BlockStorage} + condition: server_not_blacklisted external_ip_address: description: IP address of the server in the external network value: {get_attr: [ExternalPort, ip_address]} diff --git a/puppet/cephstorage-role.yaml b/puppet/cephstorage-role.yaml index 295e64f5..d4dfa719 100644 --- a/puppet/cephstorage-role.yaml +++ b/puppet/cephstorage-role.yaml @@ -138,6 +138,20 @@ parameters: major-upgrade-composable-steps.yaml and major-upgrade-converge.yaml environment files. default: '' + DeploymentServerBlacklistDict: + default: {} + type: json + description: > + Map of server hostnames to blacklist from any triggered + deployments. If the value is 1, the server will be blacklisted. This + parameter is generated from the parent template. + +conditions: + server_not_blacklisted: + not: + equals: + - {get_param: [DeploymentServerBlacklistDict, {get_param: Hostname}]} + - 1 resources: CephStorage: @@ -368,6 +382,7 @@ resources: NetworkDeployment: type: OS::TripleO::SoftwareDeployment depends_on: PreNetworkConfig + condition: server_not_blacklisted properties: name: NetworkDeployment config: {get_resource: NetworkConfig} @@ -391,6 +406,7 @@ resources: CephStorageUpgradeInitDeployment: type: OS::Heat::SoftwareDeployment depends_on: NetworkDeployment + condition: server_not_blacklisted properties: name: CephStorageUpgradeInitDeployment server: {get_resource: CephStorage} @@ -399,6 +415,7 @@ resources: CephStorageDeployment: type: OS::Heat::StructuredDeployment depends_on: CephStorageUpgradeInitDeployment + condition: server_not_blacklisted properties: name: CephStorageDeployment config: {get_resource: CephStorageConfig} @@ -471,6 +488,7 @@ resources: UpdateDeployment: type: OS::Heat::SoftwareDeployment depends_on: NetworkDeployment + condition: server_not_blacklisted properties: config: {get_resource: UpdateConfig} server: {get_resource: CephStorage} @@ -566,6 +584,7 @@ CTLPLANEIP,CTLPLANEHOST.DOMAIN,CTLPLANEHOST HOSTSSHPUBKEY" description: Heat resource handle for the ceph storage server value: {get_resource: CephStorage} + condition: server_not_blacklisted external_ip_address: description: IP address of the server in the external network value: {get_attr: [ExternalPort, ip_address]} diff --git a/puppet/compute-role.yaml b/puppet/compute-role.yaml index 05318f3f..ff1f6d2a 100644 --- a/puppet/compute-role.yaml +++ b/puppet/compute-role.yaml @@ -150,6 +150,20 @@ parameters: major-upgrade-composable-steps.yaml and major-upgrade-converge.yaml environment files. default: '' + DeploymentServerBlacklistDict: + default: {} + type: json + description: > + Map of server hostnames to blacklist from any triggered + deployments. If the value is 1, the server will be blacklisted. This + parameter is generated from the parent template. + +conditions: + server_not_blacklisted: + not: + equals: + - {get_param: [DeploymentServerBlacklistDict, {get_param: Hostname}]} + - 1 resources: @@ -382,6 +396,7 @@ resources: NetworkDeployment: type: OS::TripleO::SoftwareDeployment depends_on: PreNetworkConfig + condition: server_not_blacklisted properties: name: NetworkDeployment config: {get_resource: NetworkConfig} @@ -408,6 +423,7 @@ resources: NovaComputeUpgradeInitDeployment: type: OS::Heat::SoftwareDeployment depends_on: NetworkDeployment + condition: server_not_blacklisted properties: name: NovaComputeUpgradeInitDeployment server: {get_resource: NovaCompute} @@ -459,6 +475,7 @@ resources: NovaComputeDeployment: type: OS::TripleO::SoftwareDeployment depends_on: NovaComputeUpgradeInitDeployment + condition: server_not_blacklisted properties: name: NovaComputeDeployment config: {get_resource: NovaComputeConfig} @@ -494,6 +511,7 @@ resources: UpdateDeployment: type: OS::Heat::SoftwareDeployment depends_on: NetworkDeployment + condition: server_not_blacklisted properties: name: UpdateDeployment config: {get_resource: UpdateConfig} @@ -609,4 +627,5 @@ CTLPLANEIP,CTLPLANEHOST.DOMAIN,CTLPLANEHOST HOSTSSHPUBKEY" nova_server_resource: description: Heat resource handle for the Nova compute server value: - {get_resource: NovaCompute}
\ No newline at end of file + {get_resource: NovaCompute} + condition: server_not_blacklisted diff --git a/puppet/controller-role.yaml b/puppet/controller-role.yaml index 163ba57b..9bf110d5 100644 --- a/puppet/controller-role.yaml +++ b/puppet/controller-role.yaml @@ -164,6 +164,13 @@ parameters: major-upgrade-composable-steps.yaml and major-upgrade-converge.yaml environment files. default: '' + DeploymentServerBlacklistDict: + default: {} + type: json + description: > + Map of server hostnames to blacklist from any triggered + deployments. If the value is 1, the server will be blacklisted. This + parameter is generated from the parent template. parameter_groups: - label: deprecated @@ -171,6 +178,14 @@ parameter_groups: parameters: - controllerExtraConfig +conditions: + server_not_blacklisted: + not: + equals: + - {get_param: [DeploymentServerBlacklistDict, {get_param: Hostname}]} + - 1 + + resources: Controller: @@ -400,6 +415,7 @@ resources: NetworkDeployment: type: OS::TripleO::SoftwareDeployment + condition: server_not_blacklisted depends_on: PreNetworkConfig properties: name: NetworkDeployment @@ -441,6 +457,7 @@ resources: # but https://bugs.launchpad.net/heat/+bug/1649900 needs fixing first ControllerUpgradeInitDeployment: type: OS::Heat::SoftwareDeployment + condition: server_not_blacklisted depends_on: NetworkDeployment properties: name: ControllerUpgradeInitDeployment @@ -449,6 +466,7 @@ resources: ControllerDeployment: type: OS::TripleO::SoftwareDeployment + condition: server_not_blacklisted depends_on: ControllerUpgradeInitDeployment properties: name: ControllerDeployment @@ -532,6 +550,7 @@ resources: UpdateDeployment: type: OS::Heat::SoftwareDeployment + condition: server_not_blacklisted depends_on: NetworkDeployment properties: name: UpdateDeployment @@ -649,6 +668,7 @@ CTLPLANEIP,CTLPLANEHOST.DOMAIN,CTLPLANEHOST HOSTSSHPUBKEY" description: Heat resource handle for the Nova compute server value: {get_resource: Controller} + condition: server_not_blacklisted tls_key_modulus_md5: description: MD5 checksum of the TLS Key Modulus value: {get_attr: [NodeTLSData, key_modulus_md5]} diff --git a/puppet/objectstorage-role.yaml b/puppet/objectstorage-role.yaml index 7ee12b19..2f7056c4 100644 --- a/puppet/objectstorage-role.yaml +++ b/puppet/objectstorage-role.yaml @@ -132,6 +132,20 @@ parameters: major-upgrade-composable-steps.yaml and major-upgrade-converge.yaml environment files. default: '' + DeploymentServerBlacklistDict: + default: {} + type: json + description: > + Map of server hostnames to blacklist from any triggered + deployments. If the value is 1, the server will be blacklisted. This + parameter is generated from the parent template. + +conditions: + server_not_blacklisted: + not: + equals: + - {get_param: [DeploymentServerBlacklistDict, {get_param: Hostname}]} + - 1 resources: @@ -362,6 +376,7 @@ resources: NetworkDeployment: type: OS::TripleO::SoftwareDeployment depends_on: PreNetworkConfig + condition: server_not_blacklisted properties: name: NetworkDeployment config: {get_resource: NetworkConfig} @@ -385,6 +400,7 @@ resources: SwiftStorageUpgradeInitDeployment: type: OS::Heat::SoftwareDeployment depends_on: NetworkDeployment + condition: server_not_blacklisted properties: name: SwiftStorageUpgradeInitDeployment server: {get_resource: SwiftStorage} @@ -430,6 +446,7 @@ resources: SwiftStorageHieraDeploy: type: OS::Heat::StructuredDeployment depends_on: SwiftStorageUpgradeInitDeployment + condition: server_not_blacklisted properties: name: SwiftStorageHieraDeploy server: {get_resource: SwiftStorage} @@ -458,6 +475,7 @@ resources: UpdateDeployment: type: OS::Heat::SoftwareDeployment depends_on: NetworkDeployment + condition: server_not_blacklisted properties: config: {get_resource: UpdateConfig} server: {get_resource: SwiftStorage} @@ -553,6 +571,7 @@ CTLPLANEIP,CTLPLANEHOST.DOMAIN,CTLPLANEHOST HOSTSSHPUBKEY" description: Heat resource handle for the swift storage server value: {get_resource: SwiftStorage} + condition: server_not_blacklisted external_ip_address: description: IP address of the server in the external network value: {get_attr: [ExternalPort, ip_address]} diff --git a/puppet/role.role.j2.yaml b/puppet/role.role.j2.yaml index dbb517f0..7acf2dfb 100644 --- a/puppet/role.role.j2.yaml +++ b/puppet/role.role.j2.yaml @@ -154,6 +154,20 @@ parameters: major-upgrade-composable-steps.yaml and major-upgrade-converge.yaml environment files. default: '' + DeploymentServerBlacklistDict: + default: {} + type: json + description: > + Map of server hostnames to blacklist from any triggered + deployments. If the value is 1, the server will be blacklisted. This + parameter is generated from the parent template. + +conditions: + server_not_blacklisted: + not: + equals: + - {get_param: [DeploymentServerBlacklistDict, {get_param: Hostname}]} + - 1 resources: {{role}}: @@ -384,6 +398,7 @@ resources: NetworkDeployment: type: OS::TripleO::SoftwareDeployment depends_on: PreNetworkConfig + condition: server_not_blacklisted properties: name: NetworkDeployment config: {get_resource: NetworkConfig} @@ -410,6 +425,7 @@ resources: {{role}}UpgradeInitDeployment: type: OS::Heat::SoftwareDeployment depends_on: NetworkDeployment + condition: server_not_blacklisted properties: name: {{role}}UpgradeInitDeployment server: {get_resource: {{role}}} @@ -418,6 +434,7 @@ resources: {{role}}Deployment: type: OS::Heat::StructuredDeployment depends_on: {{role}}UpgradeInitDeployment + condition: server_not_blacklisted properties: name: {{role}}Deployment config: {get_resource: {{role}}Config} @@ -492,6 +509,7 @@ resources: UpdateDeployment: type: OS::Heat::SoftwareDeployment depends_on: NetworkDeployment + condition: server_not_blacklisted properties: name: UpdateDeployment config: {get_resource: UpdateConfig} @@ -588,6 +606,7 @@ CTLPLANEIP,CTLPLANEHOST.DOMAIN,CTLPLANEHOST HOSTSSHPUBKEY" description: Heat resource handle for {{role}} server value: {get_resource: {{role}}} + condition: server_not_blacklisted external_ip_address: description: IP address of the server in the external network value: {get_attr: [ExternalPort, ip_address]} diff --git a/puppet/services/disabled/ceilometer-expirer-disabled.yaml b/puppet/services/disabled/ceilometer-expirer-disabled.yaml index e6d8ee6e..9b7b47ef 100644 --- a/puppet/services/disabled/ceilometer-expirer-disabled.yaml +++ b/puppet/services/disabled/ceilometer-expirer-disabled.yaml @@ -27,12 +27,24 @@ parameters: via parameter_defaults in the resource registry. type: json +resources: + CeilometerServiceBase: + type: ../ceilometer-base.yaml + properties: + ServiceNetMap: {get_param: ServiceNetMap} + DefaultPasswords: {get_param: DefaultPasswords} + EndpointMap: {get_param: EndpointMap} + RoleName: {get_param: RoleName} + RoleParameters: {get_param: RoleParameters} + outputs: role_data: - description: Role data for the disabled Ceilometer Expirer role. + description: Role data for the disabling Ceilometer Expirer role. value: service_name: ceilometer_expirer_disabled - upgrade_tasks: - - name: Stop and disable ceilometer_expirer service on upgrade - tags: step1 - service: name=openstack-ceilometer-expirer state=stopped enabled=no + config_settings: + map_merge: + - get_attr: [CeilometerServiceBase, role_data, config_settings] + - ceilometer::expirer::enable_cron: false + step_config: | + include ::tripleo::profile::base::ceilometer::expirer diff --git a/puppet/services/gnocchi-base.yaml b/puppet/services/gnocchi-base.yaml index d62c349e..012bd727 100644 --- a/puppet/services/gnocchi-base.yaml +++ b/puppet/services/gnocchi-base.yaml @@ -76,7 +76,7 @@ outputs: query: read_default_file: /etc/my.cnf.d/tripleo.cnf read_default_group: tripleo - gnocchi::db::sync::extra_opts: '--skip-storage' + gnocchi::db::sync::extra_opts: '' gnocchi::storage::metric_processing_delay: {get_param: MetricProcessingDelay} gnocchi::storage::swift::swift_user: 'service:gnocchi' gnocchi::storage::swift::swift_auth_version: 3 diff --git a/puppet/services/kernel.yaml b/puppet/services/kernel.yaml index 3f9b0b7e..c142b475 100644 --- a/puppet/services/kernel.yaml +++ b/puppet/services/kernel.yaml @@ -77,6 +77,8 @@ outputs: value: 0 net.ipv4.conf.all.send_redirects: value: 0 + net.ipv4.conf.all.arp_accept: + value: 1 net.ipv4.conf.default.accept_redirects: value: 0 net.ipv4.conf.default.secure_redirects: diff --git a/puppet/services/neutron-linuxbridge-agent.yaml b/puppet/services/neutron-linuxbridge-agent.yaml new file mode 100644 index 00000000..f4324054 --- /dev/null +++ b/puppet/services/neutron-linuxbridge-agent.yaml @@ -0,0 +1,83 @@ +heat_template_version: ocata + +description: > + OpenStack Neutron Linuxbridge agent configured with Puppet. + +parameters: + ServiceNetMap: + default: {} + description: Mapping of service_name -> network name. Typically set + via parameter_defaults in the resource registry. This + mapping overrides those in ServiceNetMapDefaults. + type: json + DefaultPasswords: + default: {} + type: json + EndpointMap: + default: {} + description: Mapping of service endpoint -> protocol. Typically set + via parameter_defaults in the resource registry. + type: json + RoleName: + default: '' + description: Role name on which the service is applied + type: string + RoleParameters: + default: {} + description: Parameters specific to the role + type: json + PhysicalInterfaceMapping: + description: List of <physical_network>:<physical_interface> tuples + mapping physical network names to agent's node-specific + physical network interfaces. Defaults to empty list. + type: comma_delimited_list + default: '' + NeutronLinuxbridgeFirewallDriver: + default: '' + description: Configure the classname of the firewall driver to use for + implementing security groups. Possible values depend on + system configuration. The default value of an empty string + will result in a default supported configuration. + type: string + NeutronEnableL2Pop: + type: string + description: Enable/disable the L2 population feature in the Neutron agents. + default: 'False' + NeutronTunnelTypes: + default: 'vxlan' + description: The tunnel types for the Neutron tenant network. + type: comma_delimited_list + +conditions: + no_firewall_driver: {equals : [{get_param: NeutronLinuxbridgeFirewallDriver}, '']} + +resources: + + NeutronBase: + type: ./neutron-base.yaml + properties: + ServiceNetMap: {get_param: ServiceNetMap} + DefaultPasswords: {get_param: DefaultPasswords} + EndpointMap: {get_param: EndpointMap} + +outputs: + role_data: + description: Role data for the Neutron Linuxbridge agent service. + value: + service_name: neutron_linuxbridge_agent + config_settings: + map_merge: + - get_attr: [NeutronBase, role_data, config_settings] + - neutron::agents::ml2::linuxbridge::physical_interface_mappings: {get_param: PhysicalInterfaceMapping} + neutron::agents::ml2::linuxbridge::l2_population: {get_param: NeutronEnableL2Pop} + neutron::agents::ml2::linuxbridge::tunnel_types: {get_param: NeutronTunnelTypes} + neutron::agents::ml2::linuxbridge::local_ip: {get_param: [ServiceNetMap, NeutronTenantNetwork]} + neutron::agents::dhcp::interface_driver: 'neutron.agent.linux.interface.BridgeInterfaceDriver' + neutron::agents::dhcp::dhcp_driver: 'neutron.agent.linux.dhcp.Dnsmasq' + - + if: + - no_firewall_driver + - {} + - neutron::agents::ml2::linuxbridge::firewall_driver: {get_param: NeutronLinuxbridgeFirewallDriver} + step_config: | + include ::tripleo::profile::base::neutron::linuxbridge diff --git a/puppet/services/nova-compute.yaml b/puppet/services/nova-compute.yaml index 16ccb9e0..e39e997a 100644 --- a/puppet/services/nova-compute.yaml +++ b/puppet/services/nova-compute.yaml @@ -72,7 +72,7 @@ parameters: description: > Reserved RAM for host processes. type: number - default: 2048 + default: 4096 constraints: - range: { min: 512 } MonitoringSubscriptionNovaCompute: diff --git a/puppet/services/pacemaker/cinder-volume.yaml b/puppet/services/pacemaker/cinder-volume.yaml index 659368a4..39914db5 100644 --- a/puppet/services/pacemaker/cinder-volume.yaml +++ b/puppet/services/pacemaker/cinder-volume.yaml @@ -54,3 +54,18 @@ outputs: cinder::host: hostgroup step_config: include ::tripleo::profile::pacemaker::cinder::volume + upgrade_tasks: + - name: Stop cinder_volume service (pacemaker) + tags: step1 + pacemaker_resource: + resource: openstack-cinder-volume + state: disable + wait_for_resource: true + - name: Sync cinder DB + tags: step5 + command: cinder-manage db sync + - name: Start cinder_volume service (pacemaker) + tags: step5 + pacemaker_resource: + resource: openstack-cinder-volume + state: enable diff --git a/releasenotes/notes/change-db-sync-timeout-57abe3e48d741842.yaml b/releasenotes/notes/change-db-sync-timeout-57abe3e48d741842.yaml new file mode 100644 index 00000000..46f3b855 --- /dev/null +++ b/releasenotes/notes/change-db-sync-timeout-57abe3e48d741842.yaml @@ -0,0 +1,6 @@ +--- +fixes: + - | + During a deployment on lower spec systems, the "db sync" can take longer + than five minutes. value of DatabaseSyncTimeout has change from 300 + to 900 at the environment file "low-memory-usage.yaml". diff --git a/releasenotes/notes/enable-arp_accept-6296b0113bc56b10.yaml b/releasenotes/notes/enable-arp_accept-6296b0113bc56b10.yaml new file mode 100644 index 00000000..4025477b --- /dev/null +++ b/releasenotes/notes/enable-arp_accept-6296b0113bc56b10.yaml @@ -0,0 +1,9 @@ +--- +other: + - | + All nodes now enable ``arp_accept`` sysctl setting to help with honoring + gratuitous ARP packets in their ARP tables. While sources of gratuitous ARP + packets are diverse, this comes especially useful for Neutron floating IP + addresses that roam between devices, and for which Neutron L3 agent sends + gratuitous ARP packets to update all network nodes about IP address new + locations. diff --git a/releasenotes/notes/increase-nova-reserved-host-memory-80434e8484a29680.yaml b/releasenotes/notes/increase-nova-reserved-host-memory-80434e8484a29680.yaml new file mode 100644 index 00000000..88f67a03 --- /dev/null +++ b/releasenotes/notes/increase-nova-reserved-host-memory-80434e8484a29680.yaml @@ -0,0 +1,4 @@ +--- +other: + - Increased the default of NovaReservedHostMemory for + Compute nodes to 4096 MB. diff --git a/releasenotes/notes/server-blacklist-support-370c1a1f15a28a41.yaml b/releasenotes/notes/server-blacklist-support-370c1a1f15a28a41.yaml new file mode 100644 index 00000000..7ab253b6 --- /dev/null +++ b/releasenotes/notes/server-blacklist-support-370c1a1f15a28a41.yaml @@ -0,0 +1,6 @@ +--- +features: + - Added the ability to blacklist servers by name from being + associated with any Heat triggered SoftwareDeployment + resources. The servers are specified in the new + DeploymentServerBlacklist parameter. diff --git a/releasenotes/notes/vhost_default_dir-cac327a0ac05df90.yaml b/releasenotes/notes/vhost_default_dir-cac327a0ac05df90.yaml new file mode 100644 index 00000000..b9ddaec4 --- /dev/null +++ b/releasenotes/notes/vhost_default_dir-cac327a0ac05df90.yaml @@ -0,0 +1,6 @@ +--- +issues: + - Modify ``NeutronVhostuserSocketDir`` to a seprate directory in the DPDK + environment file. A different set of permission is required for creating + vhost sockets when the vhost type is dpdkvhostuserclient (which is default + from ocata). diff --git a/roles_data.yaml b/roles_data.yaml index 68d0b9e2..86cd3f0d 100644 --- a/roles_data.yaml +++ b/roles_data.yaml @@ -75,6 +75,7 @@ - OS::TripleO::Services::NeutronCorePlugin - OS::TripleO::Services::NeutronOvsAgent - OS::TripleO::Services::NeutronL2gwAgent + - OS::TripleO::Services::NeutronLinuxbridgeAgent - OS::TripleO::Services::RabbitMQ - OS::TripleO::Services::HAproxy - OS::TripleO::Services::Keepalived @@ -167,6 +168,7 @@ - OS::TripleO::Services::Kernel - OS::TripleO::Services::ComputeNeutronCorePlugin - OS::TripleO::Services::ComputeNeutronOvsAgent + - OS::TripleO::Services::NeutronLinuxbridgeAgent - OS::TripleO::Services::ComputeCeilometerAgent - OS::TripleO::Services::ComputeNeutronL3Agent - OS::TripleO::Services::ComputeNeutronMetadataAgent |