aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--README.rst2
-rw-r--r--bindep.txt2
-rw-r--r--capabilities-map.yaml5
-rw-r--r--deployed-server/deployed-server-roles-data.yaml5
-rwxr-xr-xdocker/docker-puppet.py5
-rw-r--r--docker/docker-steps.j228
-rw-r--r--docker/services/aodh-api.yaml123
-rw-r--r--docker/services/aodh-evaluator.yaml84
-rw-r--r--docker/services/aodh-listener.yaml84
-rw-r--r--docker/services/aodh-notifier.yaml84
-rw-r--r--docker/services/database/mongodb.yaml21
-rw-r--r--docker/services/database/mysql.yaml30
-rw-r--r--docker/services/gnocchi-api.yaml118
-rw-r--r--docker/services/gnocchi-metricd.yaml78
-rw-r--r--docker/services/gnocchi-statsd.yaml78
-rw-r--r--docker/services/heat-api-cfn.yaml4
-rw-r--r--docker/services/heat-api.yaml4
-rw-r--r--docker/services/ironic-conductor.yaml48
-rw-r--r--docker/services/ironic-pxe.yaml9
-rw-r--r--docker/services/keystone.yaml26
-rw-r--r--docker/services/mistral-engine.yaml2
-rw-r--r--docker/services/mistral-executor.yaml2
-rw-r--r--docker/services/neutron-dhcp.yaml6
-rw-r--r--docker/services/neutron-l3.yaml2
-rw-r--r--docker/services/nova-api.yaml4
-rw-r--r--docker/services/nova-compute.yaml7
-rw-r--r--docker/services/nova-conductor.yaml4
-rw-r--r--docker/services/nova-ironic.yaml4
-rw-r--r--docker/services/nova-libvirt.yaml16
-rw-r--r--docker/services/nova-placement.yaml7
-rw-r--r--docker/services/nova-scheduler.yaml4
-rw-r--r--docker/services/panko-api.yaml119
-rw-r--r--docker/services/rabbitmq.yaml9
-rw-r--r--docker/services/services.yaml5
-rw-r--r--docker/services/swift-proxy.yaml7
-rw-r--r--docker/services/swift-storage.yaml98
-rw-r--r--environments/contrail/roles_data_contrail.yaml9
-rw-r--r--environments/docker.yaml17
-rw-r--r--environments/enable-internal-tls.yaml2
-rw-r--r--environments/hyperconverged-ceph.yaml1
-rw-r--r--environments/neutron-bgpvpn.yaml16
-rw-r--r--environments/services-docker/ironic.yaml5
-rw-r--r--environments/services-docker/mistral.yaml4
-rw-r--r--environments/services-docker/zaqar.yaml2
-rw-r--r--extraconfig/pre_deploy/rhel-registration/scripts/rhel-registration48
-rw-r--r--extraconfig/tasks/aodh_data_migration.sh19
-rw-r--r--extraconfig/tasks/major_upgrade_ceilometer_wsgi_mitaka_newton.yaml62
-rwxr-xr-xextraconfig/tasks/major_upgrade_check.sh109
-rwxr-xr-xextraconfig/tasks/major_upgrade_controller_pacemaker_1.sh36
-rwxr-xr-xextraconfig/tasks/major_upgrade_controller_pacemaker_2.sh177
-rwxr-xr-xextraconfig/tasks/major_upgrade_controller_pacemaker_3.sh68
-rwxr-xr-xextraconfig/tasks/major_upgrade_controller_pacemaker_4.sh17
-rwxr-xr-xextraconfig/tasks/major_upgrade_controller_pacemaker_5.sh8
-rwxr-xr-xextraconfig/tasks/major_upgrade_controller_pacemaker_6.sh15
-rw-r--r--extraconfig/tasks/major_upgrade_pacemaker.yaml175
-rw-r--r--extraconfig/tasks/major_upgrade_pacemaker_migrations.sh200
-rw-r--r--extraconfig/tasks/mitaka_to_newton_aodh_data_migration.yaml25
-rw-r--r--extraconfig/tasks/mitaka_to_newton_ceilometer_wsgi_upgrade.pp103
-rwxr-xr-xextraconfig/tasks/yum_update.sh11
-rw-r--r--overcloud-resource-registry-puppet.j2.yaml2
-rw-r--r--puppet/puppet-steps.j22
-rw-r--r--puppet/services/aodh-base.yaml4
-rw-r--r--puppet/services/ceilometer-base.yaml6
-rw-r--r--puppet/services/certmonger-user.yaml28
-rw-r--r--puppet/services/cinder-api.yaml6
-rw-r--r--puppet/services/cinder-backend-scaleio.yaml2
-rw-r--r--puppet/services/congress.yaml6
-rw-r--r--puppet/services/database/redis-base.yaml1
-rw-r--r--puppet/services/etcd.yaml2
-rw-r--r--puppet/services/glance-api.yaml105
-rw-r--r--puppet/services/glance-base.yaml126
-rw-r--r--puppet/services/gnocchi-api.yaml8
-rw-r--r--puppet/services/gnocchi-base.yaml3
-rw-r--r--puppet/services/heat-api-cfn.yaml55
-rw-r--r--puppet/services/heat-api-cloudwatch.yaml55
-rw-r--r--puppet/services/heat-api.yaml53
-rw-r--r--puppet/services/heat-base.yaml4
-rw-r--r--puppet/services/horizon.yaml2
-rw-r--r--puppet/services/ironic-api.yaml4
-rw-r--r--puppet/services/ironic-conductor.yaml43
-rw-r--r--puppet/services/keystone.yaml2
-rw-r--r--puppet/services/manila-api.yaml4
-rw-r--r--puppet/services/monitoring/sensu-base.yaml15
-rw-r--r--puppet/services/neutron-api.yaml6
-rw-r--r--puppet/services/neutron-bgpvpn-api.yaml34
-rw-r--r--puppet/services/nova-api.yaml4
-rw-r--r--puppet/services/nova-ironic.yaml2
-rw-r--r--puppet/services/octavia-base.yaml6
-rw-r--r--puppet/services/opendaylight-api.yaml5
-rw-r--r--puppet/services/pacemaker.yaml6
-rw-r--r--puppet/services/panko-base.yaml4
-rw-r--r--puppet/services/tacker.yaml6
-rw-r--r--releasenotes/notes/Switch-keystone's-default-token-provider-to-fernet-2542fccb5a588852.yaml6
-rw-r--r--releasenotes/notes/add-bgpvpn-support-f60c5a9cee0bb393.yaml3
-rw-r--r--releasenotes/notes/add-opendaylight-ha-e46ef46e29689dde.yaml5
-rw-r--r--releasenotes/notes/add-parameters-for-heat-apis-over-httpd-df83ab04d9f9ebb2.yaml6
-rw-r--r--releasenotes/notes/deprecate-NeutronExternalNetworkBridge-7d42f1a0718da327.yaml10
-rw-r--r--releasenotes/notes/etcdtoken-4c46bdfac940acda.yaml6
-rw-r--r--releasenotes/notes/gnocchi-keystonev3-d288ba40226545c9.yaml4
-rw-r--r--releasenotes/notes/ha-by-default-55326e699ee8602c.yaml5
-rw-r--r--roles_data.yaml6
-rw-r--r--roles_data_undercloud.yaml8
-rwxr-xr-xtools/yaml-validate.py62
103 files changed, 1614 insertions, 1381 deletions
diff --git a/README.rst b/README.rst
index 68fdd0ec..e2b59c59 100644
--- a/README.rst
+++ b/README.rst
@@ -66,7 +66,7 @@ and should be executed according to the following table:
+================+=============+=============+=============+=============+=================+
| keystone | X | X | X | X | X |
+----------------+-------------+-------------+-------------+-------------+-----------------+
-| glance | file | swift | file | file | swift |
+| glance | rbd | swift | file | swift + rbd | swift |
+----------------+-------------+-------------+-------------+-------------+-----------------+
| cinder | rbd | iscsi | | | iscsi |
+----------------+-------------+-------------+-------------+-------------+-----------------+
diff --git a/bindep.txt b/bindep.txt
new file mode 100644
index 00000000..4f9b4254
--- /dev/null
+++ b/bindep.txt
@@ -0,0 +1,2 @@
+# This is a cross-platform list tracking distribution packages needed by tests;
+# see http://docs.openstack.org/infra/bindep/ for additional information.
diff --git a/capabilities-map.yaml b/capabilities-map.yaml
index 66dc1d1d..83b3ac40 100644
--- a/capabilities-map.yaml
+++ b/capabilities-map.yaml
@@ -308,6 +308,11 @@ topics:
description: >
Enable various Neutron plugins and backends
environments:
+ - file: environments/neutron-bgpvpn.yaml
+ title: Neutron BGPVPN Service Plugin
+ description: Enables Neutron BGPVPN Service Plugin
+ requires:
+ - overcloud-resource-registry-puppet.yaml
- file: environments/neutron-ml2-bigswitch.yaml
title: BigSwitch Extensions
description: >
diff --git a/deployed-server/deployed-server-roles-data.yaml b/deployed-server/deployed-server-roles-data.yaml
index 04da5565..084c2f8f 100644
--- a/deployed-server/deployed-server-roles-data.yaml
+++ b/deployed-server/deployed-server-roles-data.yaml
@@ -26,6 +26,7 @@
disable_constraints: True
ServicesDefault:
- OS::TripleO::Services::CACerts
+ - OS::TripleO::Services::CertmongerUser
- OS::TripleO::Services::CephMon
- OS::TripleO::Services::CephExternal
- OS::TripleO::Services::CephRgw
@@ -109,6 +110,7 @@
disable_constraints: True
ServicesDefault:
- OS::TripleO::Services::CACerts
+ - OS::TripleO::Services::CertmongerUser
- OS::TripleO::Services::CephClient
- OS::TripleO::Services::CephExternal
- OS::TripleO::Services::Timezone
@@ -133,6 +135,7 @@
disable_constraints: True
ServicesDefault:
- OS::TripleO::Services::CACerts
+ - OS::TripleO::Services::CertmongerUser
- OS::TripleO::Services::BlockStorageCinderVolume
- OS::TripleO::Services::Kernel
- OS::TripleO::Services::Ntp
@@ -147,6 +150,7 @@
disable_constraints: True
ServicesDefault:
- OS::TripleO::Services::CACerts
+ - OS::TripleO::Services::CertmongerUser
- OS::TripleO::Services::Kernel
- OS::TripleO::Services::Ntp
- OS::TripleO::Services::SwiftStorage
@@ -162,6 +166,7 @@
disable_constraints: True
ServicesDefault:
- OS::TripleO::Services::CACerts
+ - OS::TripleO::Services::CertmongerUser
- OS::TripleO::Services::CephOSD
- OS::TripleO::Services::Kernel
- OS::TripleO::Services::Ntp
diff --git a/docker/docker-puppet.py b/docker/docker-puppet.py
index 157bf63f..0f079436 100755
--- a/docker/docker-puppet.py
+++ b/docker/docker-puppet.py
@@ -210,6 +210,11 @@ def mp_puppet_config((config_volume, puppet_tags, manifest, config_image, volume
dcmd.extend(['--entrypoint', sh_script])
env = {}
+ # NOTE(flaper87): Always copy the DOCKER_* environment variables as
+ # they contain the access data for the docker daemon.
+ for k in filter(lambda k: k.startswith('DOCKER'), os.environ.keys()):
+ env[k] = os.environ.get(k)
+
if os.environ.get('NET_HOST', 'false') == 'true':
print('NET_HOST enabled')
dcmd.extend(['--net', 'host', '--volume',
diff --git a/docker/docker-steps.j2 b/docker/docker-steps.j2
index 2f5953d3..301d838f 100644
--- a/docker/docker-steps.j2
+++ b/docker/docker-steps.j2
@@ -123,6 +123,32 @@ resources:
servers: {get_param: [servers, {{role.name}}]}
config: {get_resource: {{role.name}}CreateConfigDir}
+ {{role.name}}HostPrepAnsible:
+ type: OS::Heat::Value
+ properties:
+ value:
+ str_replace:
+ template: CONFIG
+ params:
+ CONFIG:
+ - hosts: localhost
+ connection: local
+ tasks: {get_param: [role_data, {{role.name}}, host_prep_tasks]}
+
+ {{role.name}}HostPrepConfig:
+ type: OS::Heat::SoftwareConfig
+ properties:
+ group: ansible
+ options:
+ modulepath: /usr/share/ansible-modules
+ config: {get_attr: [{{role.name}}HostPrepAnsible, value]}
+
+ {{role.name}}HostPrepDeployment:
+ type: OS::Heat::SoftwareDeploymentGroup
+ properties:
+ servers: {get_param: [servers, {{role.name}}]}
+ config: {get_resource: {{role.name}}HostPrepConfig}
+
# this creates a JSON config file for our docker-puppet.py script
{{role.name}}GenPuppetConfig:
type: OS::Heat::StructuredConfig
@@ -146,7 +172,7 @@ resources:
{{role.name}}GenerateConfigDeployment:
type: OS::Heat::SoftwareDeploymentGroup
- depends_on: [{{role.name}}GenPuppetDeployment, {{role.name}}ArtifactsDeploy, {{role.name}}CreateConfigDirDeployment]
+ depends_on: [{{role.name}}GenPuppetDeployment, {{role.name}}ArtifactsDeploy, {{role.name}}CreateConfigDirDeployment, {{role.name}}HostPrepDeployment]
properties:
name: {{role.name}}GenerateConfigDeployment
servers: {get_param: [servers, {{role.name}}]}
diff --git a/docker/services/aodh-api.yaml b/docker/services/aodh-api.yaml
new file mode 100644
index 00000000..ca410d6d
--- /dev/null
+++ b/docker/services/aodh-api.yaml
@@ -0,0 +1,123 @@
+heat_template_version: ocata
+
+description: >
+ OpenStack containerized aodh service
+
+parameters:
+ DockerNamespace:
+ description: namespace
+ default: 'tripleoupstream'
+ type: string
+ DockerAodhApiImage:
+ description: image
+ default: 'centos-binary-aodh-api:latest'
+ type: string
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+
+resources:
+
+ AodhApiPuppetBase:
+ type: ../../puppet/services/aodh-api.yaml
+ properties:
+ EndpointMap: {get_param: EndpointMap}
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+
+outputs:
+ role_data:
+ description: Role data for the aodh API role.
+ value:
+ service_name: {get_attr: [AodhApiPuppetBase, role_data, service_name]}
+ config_settings:
+ map_merge:
+ - get_attr: [AodhApiPuppetBase, role_data, config_settings]
+ - apache::default_vhost: false
+ step_config: &step_config
+ get_attr: [AodhApiPuppetBase, role_data, step_config]
+ service_config_settings: {get_attr: [AodhApiPuppetBase, role_data, service_config_settings]}
+ # BEGIN DOCKER SETTINGS
+ puppet_config:
+ config_volume: aodh
+ puppet_tags: aodh_api_paste_ini,aodh_config
+ step_config: *step_config
+ config_image: &aodh_image
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerAodhApiImage} ]
+ kolla_config:
+ /var/lib/kolla/config_files/aodh-api.json:
+ command: /usr/sbin/httpd -DFOREGROUND
+ config_files:
+ - dest: /etc/aodh/aodh.conf
+ owner: aodh
+ perm: '0640'
+ source: /var/lib/kolla/config_files/src/etc/aodh/aodh.conf
+ - dest: /etc/httpd/conf.d/10-aodh_wsgi.conf
+ owner: root
+ perm: '0644'
+ source: /var/lib/kolla/config_files/src/etc/httpd/conf.d/10-aodh_wsgi.conf
+ - dest: /etc/httpd/conf/httpd.conf
+ owner: root
+ perm: '0644'
+ source: /var/lib/kolla/config_files/src/etc/httpd/conf/httpd.conf
+ - dest: /etc/httpd/conf/ports.conf
+ owner: root
+ perm: '0644'
+ source: /var/lib/kolla/config_files/src/etc/httpd/conf/ports.conf
+ - dest: /var/www/cgi-bin/aodh/app
+ owner: aodh
+ perm: '0644'
+ source: /var/lib/kolla/config_files/src/var/www/cgi-bin/aodh/app
+ docker_config:
+ step_3:
+ aodh-init-log:
+ start_order: 0
+ image: *aodh_image
+ user: root
+ command: ['/bin/bash', '-c', 'mkdir -p /var/log/httpd && mkdir -p /var/log/aodh && chown aodh:aodh /var/log/aodh']
+ volumes:
+ - logs:/var/log
+ aodh_db_sync:
+ start_order: 1
+ image: *aodh_image
+ net: host
+ privileged: false
+ detach: false
+ volumes:
+ - /var/lib/config-data/aodh/etc/aodh/:/etc/aodh:ro
+ - /etc/hosts:/etc/hosts:ro
+ - /etc/localtime:/etc/localtime:ro
+ - logs:/var/log
+ command: /usr/bin/aodh-dbsync
+ step_4:
+ aodh-api:
+ image: *aodh_image
+ net: host
+ privileged: false
+ restart: always
+ volumes:
+ - /var/lib/kolla/config_files/aodh-api.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/aodh/:/var/lib/kolla/config_files/src:ro
+ - /var/lib/config-data/aodh/etc/httpd/conf.modules.d:/etc/httpd/conf.modules.d:ro
+ - /etc/hosts:/etc/hosts:ro
+ - /etc/localtime:/etc/localtime:ro
+ - logs:/var/log
+ environment:
+ - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ upgrade_tasks:
+ - name: Stop and disable aodh service (running under httpd)
+ tags: step2
+ service: name=httpd state=stopped enabled=no
diff --git a/docker/services/aodh-evaluator.yaml b/docker/services/aodh-evaluator.yaml
new file mode 100644
index 00000000..d3c8c595
--- /dev/null
+++ b/docker/services/aodh-evaluator.yaml
@@ -0,0 +1,84 @@
+heat_template_version: ocata
+
+description: >
+ OpenStack containerized Aodh Evaluator service
+
+parameters:
+ DockerNamespace:
+ description: namespace
+ default: 'tripleoupstream'
+ type: string
+ DockerAodhEvaluatorImage:
+ description: image
+ default: 'centos-binary-aodh-evaluator:latest'
+ type: string
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+
+resources:
+
+ AodhEvaluatorBase:
+ type: ../../puppet/services/aodh-evaluator.yaml
+ properties:
+ EndpointMap: {get_param: EndpointMap}
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+
+outputs:
+ role_data:
+ description: Role data for the Aodh API role.
+ value:
+ service_name: {get_attr: [AodhEvaluatorBase, role_data, service_name]}
+ config_settings:
+ map_merge:
+ - get_attr: [AodhEvaluatorBase, role_data, config_settings]
+ step_config: &step_config
+ get_attr: [AodhEvaluatorBase, role_data, step_config]
+ service_config_settings: {get_attr: [AodhEvaluatorBase, role_data, service_config_settings]}
+ # BEGIN DOCKER SETTINGS
+ puppet_config:
+ config_volume: aodh
+ puppet_tags: aodh_config
+ step_config: *step_config
+ config_image: &aodh_evaluator_image
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerAodhEvaluatorImage} ]
+ kolla_config:
+ /var/lib/kolla/config_files/aodh-evaluator.json:
+ command: /usr/bin/aodh-evaluator
+ config_files:
+ - dest: /etc/aodh/aodh.conf
+ owner: aodh
+ perm: '0640'
+ source: /var/lib/kolla/config_files/src/etc/aodh/aodh.conf
+ docker_config:
+ step_4:
+ aodh_evaluator:
+ image: *aodh_evaluator_image
+ net: host
+ privileged: false
+ restart: always
+ volumes:
+ - /var/lib/kolla/config_files/aodh-evaluator.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/aodh/:/var/lib/kolla/config_files/src:ro
+ - /etc/hosts:/etc/hosts:ro
+ - /etc/localtime:/etc/localtime:ro
+ environment:
+ - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ upgrade_tasks:
+ - name: Stop and disable openstack-aodh-evaluator service
+ tags: step2
+ service: name=openstack-aodh-evaluator.service state=stopped enabled=no
diff --git a/docker/services/aodh-listener.yaml b/docker/services/aodh-listener.yaml
new file mode 100644
index 00000000..7aa9618d
--- /dev/null
+++ b/docker/services/aodh-listener.yaml
@@ -0,0 +1,84 @@
+heat_template_version: ocata
+
+description: >
+ OpenStack containerized Aodh Listener service
+
+parameters:
+ DockerNamespace:
+ description: namespace
+ default: 'tripleoupstream'
+ type: string
+ DockerAodhListenerImage:
+ description: image
+ default: 'centos-binary-aodh-listener:latest'
+ type: string
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+
+resources:
+
+ AodhListenerBase:
+ type: ../../puppet/services/aodh-listener.yaml
+ properties:
+ EndpointMap: {get_param: EndpointMap}
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+
+outputs:
+ role_data:
+ description: Role data for the Aodh API role.
+ value:
+ service_name: {get_attr: [AodhListenerBase, role_data, service_name]}
+ config_settings:
+ map_merge:
+ - get_attr: [AodhListenerBase, role_data, config_settings]
+ step_config: &step_config
+ get_attr: [AodhListenerBase, role_data, step_config]
+ service_config_settings: {get_attr: [AodhListenerBase, role_data, service_config_settings]}
+ # BEGIN DOCKER SETTINGS
+ puppet_config:
+ config_volume: aodh
+ puppet_tags: aodh_config
+ step_config: *step_config
+ config_image: &aodh_listener_image
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerAodhListenerImage} ]
+ kolla_config:
+ /var/lib/kolla/config_files/aodh-listener.json:
+ command: /usr/bin/aodh-listener
+ config_files:
+ - dest: /etc/aodh/aodh.conf
+ owner: aodh
+ perm: '0640'
+ source: /var/lib/kolla/config_files/src/etc/aodh/aodh.conf
+ docker_config:
+ step_4:
+ aodh_listener:
+ image: *aodh_listener_image
+ net: host
+ privileged: false
+ restart: always
+ volumes:
+ - /var/lib/kolla/config_files/aodh-listener.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/aodh/:/var/lib/kolla/config_files/src:ro
+ - /etc/hosts:/etc/hosts:ro
+ - /etc/localtime:/etc/localtime:ro
+ environment:
+ - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ upgrade_tasks:
+ - name: Stop and disable openstack-aodh-listener service
+ tags: step2
+ service: name=openstack-aodh-listener.service state=stopped enabled=no
diff --git a/docker/services/aodh-notifier.yaml b/docker/services/aodh-notifier.yaml
new file mode 100644
index 00000000..f525d6bd
--- /dev/null
+++ b/docker/services/aodh-notifier.yaml
@@ -0,0 +1,84 @@
+heat_template_version: ocata
+
+description: >
+ OpenStack containerized Aodh Notifier service
+
+parameters:
+ DockerNamespace:
+ description: namespace
+ default: 'tripleoupstream'
+ type: string
+ DockerAodhNotifierImage:
+ description: image
+ default: 'centos-binary-aodh-notifier:latest'
+ type: string
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+
+resources:
+
+ AodhNotifierBase:
+ type: ../../puppet/services/aodh-notifier.yaml
+ properties:
+ EndpointMap: {get_param: EndpointMap}
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+
+outputs:
+ role_data:
+ description: Role data for the Aodh API role.
+ value:
+ service_name: {get_attr: [AodhNotifierBase, role_data, service_name]}
+ config_settings:
+ map_merge:
+ - get_attr: [AodhNotifierBase, role_data, config_settings]
+ step_config: &step_config
+ get_attr: [AodhNotifierBase, role_data, step_config]
+ service_config_settings: {get_attr: [AodhNotifierBase, role_data, service_config_settings]}
+ # BEGIN DOCKER SETTINGS
+ puppet_config:
+ config_volume: aodh
+ puppet_tags: aodh_config
+ step_config: *step_config
+ config_image: &aodh_notifier_image
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerAodhNotifierImage} ]
+ kolla_config:
+ /var/lib/kolla/config_files/aodh-notifier.json:
+ command: /usr/bin/aodh-notifier
+ config_files:
+ - dest: /etc/aodh/aodh.conf
+ owner: aodh
+ perm: '0640'
+ source: /var/lib/kolla/config_files/src/etc/aodh/aodh.conf
+ docker_config:
+ step_4:
+ aodh_notifier:
+ image: *aodh_notifier_image
+ net: host
+ privileged: false
+ restart: always
+ volumes:
+ - /var/lib/kolla/config_files/aodh-notifier.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/aodh/:/var/lib/kolla/config_files/src:ro
+ - /etc/hosts:/etc/hosts:ro
+ - /etc/localtime:/etc/localtime:ro
+ environment:
+ - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ upgrade_tasks:
+ - name: Stop and disable openstack-aodh-notifier service
+ tags: step2
+ service: name=openstack-aodh-notifier.service state=stopped enabled=no
diff --git a/docker/services/database/mongodb.yaml b/docker/services/database/mongodb.yaml
index 68a64a7d..265558a4 100644
--- a/docker/services/database/mongodb.yaml
+++ b/docker/services/database/mongodb.yaml
@@ -73,7 +73,16 @@ outputs:
perm: '0600'
docker_config:
step_2:
+ mongodb_data_ownership:
+ start_order: 0
+ image: *mongodb_image
+ net: host
+ user: root
+ command: ['chown', '-R', 'mongodb:', '/var/lib/mongodb']
+ volumes:
+ - /var/lib/mongodb:/var/lib/mongodb
mongodb:
+ start_order: 1
image: *mongodb_image
net: host
privileged: false
@@ -82,7 +91,7 @@ outputs:
- /var/lib/config-data/mongodb/:/var/lib/kolla/config_files/src:ro
- /etc/localtime:/etc/localtime:ro
- logs:/var/log/kolla
- - mongodb:/var/lib/mongodb/
+ - /var/lib/mongodb:/var/lib/mongodb
environment:
- KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
docker_puppet_tasks:
@@ -91,13 +100,15 @@ outputs:
config_volume: 'mongodb_init_tasks'
puppet_tags: 'mongodb_database,mongodb_user,mongodb_replset'
step_config: 'include ::tripleo::profile::base::database::mongodb'
- config_image:
- list_join:
- - '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerMongodbImage} ]
+ config_image: *mongodb_image
volumes:
- "mongodb:/var/lib/mongodb/"
- "logs:/var/log/kolla:ro"
+ host_prep_tasks:
+ - name: create /var/lib/mongodb
+ file:
+ path: /var/lib/mongodb
+ state: directory
upgrade_tasks:
- name: Stop and disable mongodb service
tags: step2
diff --git a/docker/services/database/mysql.yaml b/docker/services/database/mysql.yaml
index 46b856e3..0ffd0336 100644
--- a/docker/services/database/mysql.yaml
+++ b/docker/services/database/mysql.yaml
@@ -82,17 +82,29 @@ outputs:
perm: '0644'
docker_config:
step_2:
- mysql_bootstrap:
+ mysql_data_ownership:
start_order: 0
detach: false
image: *mysql_image
net: host
+ user: root
+ # Kolla does only non-recursive chown
+ command: ['chown', '-R', 'mysql:', '/var/lib/mysql']
+ volumes:
+ - /var/lib/mysql:/var/lib/mysql
+ mysql_bootstrap:
+ start_order: 1
+ detach: false
+ image: *mysql_image
+ net: host
+ # Kolla bootstraps aren't idempotent, explicitly checking if bootstrap was done
+ command: ['bash', '-c', 'test -e /var/lib/mysql/mysql || kolla_start']
volumes: &mysql_volumes
- /var/lib/kolla/config_files/mysql.json:/var/lib/kolla/config_files/config.json
- /var/lib/config-data/mysql/:/var/lib/kolla/config_files/src:ro
- /etc/localtime:/etc/localtime:ro
- /etc/hosts:/etc/hosts:ro
- - mariadb:/var/lib/mysql/
+ - /var/lib/mysql:/var/lib/mysql
environment:
- KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
- KOLLA_BOOTSTRAP=True
@@ -110,7 +122,7 @@ outputs:
- {get_param: MysqlRootPassword}
- {get_param: [DefaultPasswords, mysql_root_password]}
mysql:
- start_order: 1
+ start_order: 2
image: *mysql_image
restart: always
net: host
@@ -123,13 +135,15 @@ outputs:
config_volume: 'mysql_init_tasks'
puppet_tags: 'mysql_database,mysql_grant,mysql_user'
step_config: 'include ::tripleo::profile::base::database::mysql'
- config_image:
- list_join:
- - '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerMysqlImage} ]
+ config_image: *mysql_image
volumes:
- - "mariadb:/var/lib/mysql/:ro"
+ - "/var/lib/mysql:/var/lib/mysql/:ro"
- "/var/lib/config-data/mysql/root:/root:ro" #provides .my.cnf
+ host_prep_tasks:
+ - name: create /var/lib/mysql
+ file:
+ path: /var/lib/mysql
+ state: directory
upgrade_tasks:
- name: Stop and disable mysql service
tags: step2
diff --git a/docker/services/gnocchi-api.yaml b/docker/services/gnocchi-api.yaml
new file mode 100644
index 00000000..a64d1507
--- /dev/null
+++ b/docker/services/gnocchi-api.yaml
@@ -0,0 +1,118 @@
+heat_template_version: ocata
+
+description: >
+ OpenStack containerized gnocchi service
+
+parameters:
+ DockerNamespace:
+ description: namespace
+ default: 'tripleoupstream'
+ type: string
+ DockerGnocchiApiImage:
+ description: image
+ default: 'centos-binary-gnocchi-api:latest'
+ type: string
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+
+resources:
+
+ GnocchiApiPuppetBase:
+ type: ../../puppet/services/gnocchi-api.yaml
+ properties:
+ EndpointMap: {get_param: EndpointMap}
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+
+outputs:
+ role_data:
+ description: Role data for the gnocchi API role.
+ value:
+ service_name: {get_attr: [GnocchiApiPuppetBase, role_data, service_name]}
+ config_settings:
+ map_merge:
+ - get_attr: [GnocchiApiPuppetBase, role_data, config_settings]
+ - apache::default_vhost: false
+ step_config: &step_config
+ get_attr: [GnocchiApiPuppetBase, role_data, step_config]
+ service_config_settings: {get_attr: [GnocchiApiPuppetBase, role_data, service_config_settings]}
+ # BEGIN DOCKER SETTINGS
+ puppet_config:
+ config_volume: gnocchi
+ puppet_tags: gnocchi_api_paste_ini,gnocchi_config
+ step_config: *step_config
+ config_image: &gnocchi_image
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerGnocchiApiImage} ]
+ kolla_config:
+ /var/lib/kolla/config_files/gnocchi-api.json:
+ command: /usr/sbin/httpd -DFOREGROUND
+ config_files:
+ - dest: /etc/gnocchi/gnocchi.conf
+ owner: gnocchi
+ perm: '0640'
+ source: /var/lib/kolla/config_files/src/etc/gnocchi/gnocchi.conf
+ - dest: /etc/httpd/conf.d/10-gnocchi_wsgi.conf
+ owner: root
+ perm: '0644'
+ source: /var/lib/kolla/config_files/src/etc/httpd/conf.d/10-gnocchi_wsgi.conf
+ - dest: /etc/httpd/conf/httpd.conf
+ owner: root
+ perm: '0644'
+ source: /var/lib/kolla/config_files/src/etc/httpd/conf/httpd.conf
+ - dest: /etc/httpd/conf/ports.conf
+ owner: root
+ perm: '0644'
+ source: /var/lib/kolla/config_files/src/etc/httpd/conf/ports.conf
+ - dest: /var/www/cgi-bin/gnocchi/app
+ owner: gnocchi
+ perm: '0644'
+ source: /var/lib/kolla/config_files/src/var/www/cgi-bin/gnocchi/app
+ docker_config:
+ step_3:
+ gnocchi-init-log:
+ start_order: 0
+ image: *gnocchi_image
+ user: root
+ command: ['/bin/bash', '-c', 'mkdir -p /var/log/httpd && mkdir -p /var/log/gnocchi && chown gnocchi:gnocchi /var/log/gnocchi']
+ volumes:
+ - logs:/var/log
+ gnocchi_db_sync:
+ start_order: 1
+ image: *gnocchi_image
+ net: host
+ detach: false
+ privileged: false
+ volumes:
+ - /var/lib/config-data/gnocchi/etc/gnocchi/:/etc/gnocchi:ro
+ - /etc/hosts:/etc/hosts:ro
+ - /etc/localtime:/etc/localtime:ro
+ - logs:/var/log
+ command: ["/usr/bin/gnocchi-upgrade", "--skip-storage"]
+ step_4:
+ gnocchi-api:
+ image: *gnocchi_image
+ net: host
+ privileged: false
+ restart: always
+ volumes:
+ - /var/lib/kolla/config_files/gnocchi-api.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/gnocchi/:/var/lib/kolla/config_files/src:ro
+ - /var/lib/config-data/gnocchi/etc/httpd/conf.modules.d:/etc/httpd/conf.modules.d:ro
+ - /etc/hosts:/etc/hosts:ro
+ - /etc/localtime:/etc/localtime:ro
+ environment:
+ - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
diff --git a/docker/services/gnocchi-metricd.yaml b/docker/services/gnocchi-metricd.yaml
new file mode 100644
index 00000000..6437e942
--- /dev/null
+++ b/docker/services/gnocchi-metricd.yaml
@@ -0,0 +1,78 @@
+heat_template_version: ocata
+
+description: >
+ OpenStack containerized Gnocchi Metricd service
+
+parameters:
+ DockerNamespace:
+ description: namespace
+ default: 'tripleoupstream'
+ type: string
+ DockerGnocchiMetricdImage:
+ description: image
+ default: 'centos-binary-gnocchi-metricd:latest'
+ type: string
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+
+resources:
+
+ GnocchiMetricdBase:
+ type: ../../puppet/services/gnocchi-metricd.yaml
+ properties:
+ EndpointMap: {get_param: EndpointMap}
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+
+outputs:
+ role_data:
+ description: Role data for the Gnocchi API role.
+ value:
+ service_name: {get_attr: [GnocchiMetricdBase, role_data, service_name]}
+ config_settings: {get_attr: [GnocchiMetricdBase, role_data, config_settings]}
+ step_config: &step_config
+ get_attr: [GnocchiMetricdBase, role_data, step_config]
+ service_config_settings: {get_attr: [GnocchiMetricdBase, role_data, service_config_settings]}
+ # BEGIN DOCKER SETTINGS
+ puppet_config:
+ config_volume: gnocchi
+ puppet_tags: gnocchi_config
+ step_config: *step_config
+ config_image: &gnocchi_metricd_image
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerGnocchiMetricdImage} ]
+ kolla_config:
+ /var/lib/kolla/config_files/gnocchi-metricd.json:
+ command: /usr/bin/gnocchi-metricd
+ config_files:
+ - dest: /etc/gnocchi/gnocchi.conf
+ owner: gnocchi
+ perm: '0640'
+ source: /var/lib/kolla/config_files/src/etc/gnocchi/gnocchi.conf
+ docker_config:
+ step_4:
+ gnocchi_metricd:
+ image: *gnocchi_metricd_image
+ net: host
+ privileged: false
+ restart: always
+ volumes:
+ - /var/lib/kolla/config_files/gnocchi-metricd.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/gnocchi/:/var/lib/kolla/config_files/src:ro
+ - /etc/hosts:/etc/hosts:ro
+ - /etc/localtime:/etc/localtime:ro
+ environment:
+ - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
diff --git a/docker/services/gnocchi-statsd.yaml b/docker/services/gnocchi-statsd.yaml
new file mode 100644
index 00000000..32c16521
--- /dev/null
+++ b/docker/services/gnocchi-statsd.yaml
@@ -0,0 +1,78 @@
+heat_template_version: ocata
+
+description: >
+ OpenStack containerized Gnocchi Statsd service
+
+parameters:
+ DockerNamespace:
+ description: namespace
+ default: 'tripleoupstream'
+ type: string
+ DockerGnocchiStatsdImage:
+ description: image
+ default: 'centos-binary-gnocchi-statsd:latest'
+ type: string
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+
+resources:
+
+ GnocchiStatsdBase:
+ type: ../../puppet/services/gnocchi-statsd.yaml
+ properties:
+ EndpointMap: {get_param: EndpointMap}
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+
+outputs:
+ role_data:
+ description: Role data for the Gnocchi API role.
+ value:
+ service_name: {get_attr: [GnocchiStatsdBase, role_data, service_name]}
+ config_settings: {get_attr: [GnocchiStatsdBase, role_data, config_settings]}
+ step_config: &step_config
+ get_attr: [GnocchiStatsdBase, role_data, step_config]
+ service_config_settings: {get_attr: [GnocchiStatsdBase, role_data, service_config_settings]}
+ # BEGIN DOCKER SETTINGS
+ puppet_config:
+ config_volume: gnocchi
+ puppet_tags: gnocchi_config
+ step_config: *step_config
+ config_image: &gnocchi_statsd_image
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerGnocchiStatsdImage} ]
+ kolla_config:
+ /var/lib/kolla/config_files/gnocchi-statsd.json:
+ command: /usr/bin/gnocchi-statsd
+ config_files:
+ - dest: /etc/gnocchi/gnocchi.conf
+ owner: gnocchi
+ perm: '0640'
+ source: /var/lib/kolla/config_files/src/etc/gnocchi/gnocchi.conf
+ docker_config:
+ step_4:
+ gnocchi_statsd:
+ image: *gnocchi_statsd_image
+ net: host
+ privileged: false
+ restart: always
+ volumes:
+ - /var/lib/kolla/config_files/gnocchi-statsd.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/gnocchi/:/var/lib/kolla/config_files/src:ro
+ - /etc/hosts:/etc/hosts:ro
+ - /etc/localtime:/etc/localtime:ro
+ environment:
+ - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
diff --git a/docker/services/heat-api-cfn.yaml b/docker/services/heat-api-cfn.yaml
index 2a27efb4..85ad9212 100644
--- a/docker/services/heat-api-cfn.yaml
+++ b/docker/services/heat-api-cfn.yaml
@@ -13,7 +13,7 @@ parameters:
default: 'centos-binary-heat-api-cfn:latest'
type: string
# we configure all heat services in the same heat engine container
- DockerHeatEngineImage:
+ DockerHeatConfigImage:
description: image
default: 'centos-binary-heat-engine:latest'
type: string
@@ -62,7 +62,7 @@ outputs:
config_image:
list_join:
- '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerHeatEngineImage} ]
+ - [ {get_param: DockerNamespace}, {get_param: DockerHeatConfigImage} ]
kolla_config:
/var/lib/kolla/config_files/heat_api_cfn.json:
command: /usr/bin/heat-api-cfn --config-file /usr/share/heat/heat-dist.conf --config-file /etc/heat/heat.conf
diff --git a/docker/services/heat-api.yaml b/docker/services/heat-api.yaml
index c429870b..12884f56 100644
--- a/docker/services/heat-api.yaml
+++ b/docker/services/heat-api.yaml
@@ -13,7 +13,7 @@ parameters:
default: 'centos-binary-heat-api:latest'
type: string
# we configure all heat services in the same heat engine container
- DockerHeatEngineImage:
+ DockerHeatConfigImage:
description: image
default: 'centos-binary-heat-engine:latest'
type: string
@@ -62,7 +62,7 @@ outputs:
config_image:
list_join:
- '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerHeatEngineImage} ]
+ - [ {get_param: DockerNamespace}, {get_param: DockerHeatConfigImage} ]
kolla_config:
/var/lib/kolla/config_files/heat_api.json:
command: /usr/bin/heat-api --config-file /usr/share/heat/heat-dist.conf --config-file /etc/heat/heat.conf
diff --git a/docker/services/ironic-conductor.yaml b/docker/services/ironic-conductor.yaml
index 8c18a160..678b8c27 100644
--- a/docker/services/ironic-conductor.yaml
+++ b/docker/services/ironic-conductor.yaml
@@ -50,6 +50,7 @@ outputs:
- get_attr: [IronicConductorBase, role_data, config_settings]
# to avoid hard linking errors we store these on the same
# volume/device as the ironic master_path
+ # https://github.com/docker/docker/issues/7457
- ironic::drivers::pxe::tftp_root: /var/lib/ironic/tftpboot
- ironic::drivers::pxe::tftp_master_path: /var/lib/ironic/tftpboot/master_images
- ironic::pxe::tftp_root: /var/lib/ironic/tftpboot
@@ -84,18 +85,12 @@ outputs:
recurse: true
docker_config:
step_4:
- ironic-init-dirs:
- image: &ironic_image
+ ironic_conductor:
+ start_order: 80
+ image:
list_join:
- '/'
- [ {get_param: DockerNamespace}, {get_param: DockerIronicConductorImage} ]
- user: root
- command: ['/bin/bash', '-c', 'mkdir /var/lib/ironic/httpboot && mkdir /var/lib/ironic/tftpboot']
- volumes:
- - ironic:/var/lib/ironic
- ironic_conductor:
- start_order: 80
- image: *ironic_image
net: host
privileged: true
restart: always
@@ -108,9 +103,42 @@ outputs:
- /sys:/sys
- /dev:/dev
- /run:/run #shared?
- - ironic:/var/lib/ironic
+ - /var/lib/ironic:/var/lib/ironic
environment:
- KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ host_prep_tasks:
+ - name: create ironic persistent data directory
+ file:
+ path: /var/lib/ironic
+ state: directory
+ - name: stat /httpboot
+ stat: path=/httpboot
+ register: stat_httpboot
+ - name: stat /tftpboot
+ stat: path=/tftpboot
+ register: stat_tftpboot
+ - name: stat /var/lib/ironic/httpboot
+ stat: path=/var/lib/ironic/httpboot
+ register: stat_ironic_httpboot
+ - name: stat /var/lib/ironic/tftpboot
+ stat: path=/var/lib/ironic/tftpboot
+ register: stat_ironic_tftpboot
+ # cannot use 'copy' module as with 'remote_src' it doesn't support recursion
+ - name: migrate /httpboot to containerized (if applicable)
+ command: /bin/cp -R /httpboot /var/lib/ironic/httpboot
+ when: stat_httpboot.stat.exists and not stat_ironic_httpboot.stat.exists
+ - name: migrate /tftpboot to containerized (if applicable)
+ command: /bin/cp -R /tftpboot /var/lib/ironic/tftpboot
+ when: stat_tftpboot.stat.exists and not stat_ironic_tftpboot.stat.exists
+ # Even if there was nothing to copy from original locations,
+ # we need to create the dirs before starting the containers
+ - name: ensure ironic pxe directories exist
+ file:
+ path: /var/lib/ironic/{{ item }}
+ state: directory
+ with_items:
+ - httpboot
+ - tftpboot
upgrade_tasks:
- name: Stop and disable ironic_conductor service
tags: step2
diff --git a/docker/services/ironic-pxe.yaml b/docker/services/ironic-pxe.yaml
index 370b665e..c6607094 100644
--- a/docker/services/ironic-pxe.yaml
+++ b/docker/services/ironic-pxe.yaml
@@ -112,7 +112,7 @@ outputs:
- /etc/hosts:/etc/hosts:ro
- /etc/localtime:/etc/localtime:ro
- /dev/log:/dev/log
- - ironic:/var/lib/ironic/
+ - /var/lib/ironic:/var/lib/ironic/
environment:
- KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
ironic_pxe_http:
@@ -127,6 +127,11 @@ outputs:
- /var/lib/config-data/ironic/etc/httpd/conf.modules.d:/etc/httpd/conf.modules.d:ro
- /etc/hosts:/etc/hosts:ro
- /etc/localtime:/etc/localtime:ro
- - ironic:/var/lib/ironic/
+ - /var/lib/ironic:/var/lib/ironic/
environment:
- KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ host_prep_tasks:
+ - name: create ironic persistent data directory
+ file:
+ path: /var/lib/ironic
+ state: directory
diff --git a/docker/services/keystone.yaml b/docker/services/keystone.yaml
index bd3a010e..63713677 100644
--- a/docker/services/keystone.yaml
+++ b/docker/services/keystone.yaml
@@ -30,6 +30,12 @@ parameters:
description: The password for the keystone admin account, used for monitoring, querying neutron etc.
type: string
hidden: true
+ KeystoneTokenProvider:
+ description: The keystone token format
+ type: string
+ default: 'uuid'
+ constraints:
+ - allowed_values: ['uuid', 'fernet']
resources:
@@ -40,6 +46,9 @@ resources:
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
+conditions:
+ keystone_fernet_tokens: {equals: [{get_param: KeystoneTokenProvider}, "fernet"]}
+
outputs:
role_data:
description: Role data for the Keystone API role.
@@ -80,6 +89,16 @@ outputs:
owner: keystone
perm: '0600'
source: /var/lib/kolla/config_files/src/etc/keystone/credential-keys/1
+ - dest: /etc/keystone/fernet-keys/0
+ owner: keystone
+ perm: '0600'
+ source: /var/lib/kolla/config_files/src/etc/keystone/fernet-keys/0
+ optional: {if: [keystone_fernet_tokens, false, true]}
+ - dest: /etc/keystone/fernet-keys/1
+ owner: keystone
+ perm: '0600'
+ source: /var/lib/kolla/config_files/src/etc/keystone/fernet-keys/1
+ optional: {if: [keystone_fernet_tokens, false, true]}
- dest: /etc/httpd/conf.d/10-keystone_wsgi_admin.conf
owner: root
perm: '0644'
@@ -149,11 +168,10 @@ outputs:
config_volume: 'keystone_init_tasks'
puppet_tags: 'keystone_config,keystone_domain_config,keystone_endpoint,keystone_identity_provider,keystone_paste_ini,keystone_role,keystone_service,keystone_tenant,keystone_user,keystone_user_role,keystone_domain'
step_config: 'include ::tripleo::profile::base::keystone'
- config_image:
- list_join:
- - '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerKeystoneImage} ]
+ config_image: *keystone_image
upgrade_tasks:
- name: Stop and disable keystone service (running under httpd)
tags: step2
service: name=httpd state=stopped enabled=no
+ metadata_settings:
+ get_attr: [KeystoneBase, role_data, metadata_settings]
diff --git a/docker/services/mistral-engine.yaml b/docker/services/mistral-engine.yaml
index fd72e344..db2721bd 100644
--- a/docker/services/mistral-engine.yaml
+++ b/docker/services/mistral-engine.yaml
@@ -72,7 +72,7 @@ outputs:
docker_config:
step_4:
mistral_engine:
- image: &mistral_engine_image
+ image:
list_join:
- '/'
- [ {get_param: DockerNamespace}, {get_param: DockerMistralEngineImage} ]
diff --git a/docker/services/mistral-executor.yaml b/docker/services/mistral-executor.yaml
index 0274ff48..d68830ed 100644
--- a/docker/services/mistral-executor.yaml
+++ b/docker/services/mistral-executor.yaml
@@ -72,7 +72,7 @@ outputs:
docker_config:
step_4:
mistral_executor:
- image: &mistral_executor_image
+ image:
list_join:
- '/'
- [ {get_param: DockerNamespace}, {get_param: DockerMistralExecutorImage} ]
diff --git a/docker/services/neutron-dhcp.yaml b/docker/services/neutron-dhcp.yaml
index a4854d90..9be13ad3 100644
--- a/docker/services/neutron-dhcp.yaml
+++ b/docker/services/neutron-dhcp.yaml
@@ -8,7 +8,7 @@ parameters:
description: namespace
default: 'tripleoupstream'
type: string
- DockerNeutronApiImage:
+ DockerNeutronDHCPImage:
description: image
default: 'centos-binary-neutron-dhcp-agent:latest'
type: string
@@ -76,10 +76,10 @@ outputs:
docker_config:
step_4:
neutron_dhcp:
- image: &neutron_dhcp_image
+ image:
list_join:
- '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerNeutronApiImage} ]
+ - [ {get_param: DockerNamespace}, {get_param: DockerNeutronDHCPImage} ]
net: host
pid: host
privileged: true
diff --git a/docker/services/neutron-l3.yaml b/docker/services/neutron-l3.yaml
index 61ad8f4a..db4fa863 100644
--- a/docker/services/neutron-l3.yaml
+++ b/docker/services/neutron-l3.yaml
@@ -72,7 +72,7 @@ outputs:
docker_config:
step_4:
neutronl3agent:
- image: &neutron_l3_agent_image
+ image:
list_join:
- '/'
- [ {get_param: DockerNamespace}, {get_param: DockerNeutronL3AgentImage} ]
diff --git a/docker/services/nova-api.yaml b/docker/services/nova-api.yaml
index 1c57bbf5..9e203b7a 100644
--- a/docker/services/nova-api.yaml
+++ b/docker/services/nova-api.yaml
@@ -12,7 +12,7 @@ parameters:
description: image
default: 'centos-binary-nova-api:latest'
type: string
- DockerNovaBaseImage:
+ DockerNovaConfigImage:
description: image
default: 'centos-binary-nova-base:latest'
type: string
@@ -60,7 +60,7 @@ outputs:
config_image:
list_join:
- '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerNovaBaseImage} ]
+ - [ {get_param: DockerNamespace}, {get_param: DockerNovaConfigImage} ]
kolla_config:
/var/lib/kolla/config_files/nova_api.json:
command: /usr/bin/nova-api
diff --git a/docker/services/nova-compute.yaml b/docker/services/nova-compute.yaml
index 7fc00b47..0afd3e70 100644
--- a/docker/services/nova-compute.yaml
+++ b/docker/services/nova-compute.yaml
@@ -83,6 +83,11 @@ outputs:
- /lib/modules:/lib/modules:ro
- /run:/run
- /var/lib/nova:/var/lib/nova
- - libvirtd:/var/lib/libvirt
+ - /var/lib/libvirt:/var/lib/libvirt
environment:
- KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ host_prep_tasks:
+ - name: create /var/lib/libvirt
+ file:
+ path: /var/lib/libvirt
+ state: directory
diff --git a/docker/services/nova-conductor.yaml b/docker/services/nova-conductor.yaml
index 09a6d0f6..f85cf546 100644
--- a/docker/services/nova-conductor.yaml
+++ b/docker/services/nova-conductor.yaml
@@ -12,7 +12,7 @@ parameters:
description: image
default: 'centos-binary-nova-conductor:latest'
type: string
- DockerNovaBaseImage:
+ DockerNovaConfigImage:
description: image
default: 'centos-binary-nova-base:latest'
type: string
@@ -58,7 +58,7 @@ outputs:
config_image:
list_join:
- '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerNovaBaseImage} ]
+ - [ {get_param: DockerNamespace}, {get_param: DockerNovaConfigImage} ]
kolla_config:
/var/lib/kolla/config_files/nova_conductor.json:
command: /usr/bin/nova-conductor
diff --git a/docker/services/nova-ironic.yaml b/docker/services/nova-ironic.yaml
index d3c0af44..170468a5 100644
--- a/docker/services/nova-ironic.yaml
+++ b/docker/services/nova-ironic.yaml
@@ -12,7 +12,7 @@ parameters:
description: image
default: 'centos-binary-nova-compute-ironic:latest'
type: string
- DockerNovaBaseImage:
+ DockerNovaConfigImage:
description: image
default: 'centos-binary-nova-base:latest'
type: string
@@ -54,7 +54,7 @@ outputs:
config_image:
list_join:
- '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerNovaBaseImage} ]
+ - [ {get_param: DockerNamespace}, {get_param: DockerNovaConfigImage} ]
kolla_config:
/var/lib/kolla/config_files/nova_ironic.json:
command: /usr/bin/nova-compute --config-file /etc/nova/nova.conf --config-file /etc/nova/rootwrap.conf
diff --git a/docker/services/nova-libvirt.yaml b/docker/services/nova-libvirt.yaml
index e25b2014..a0437162 100644
--- a/docker/services/nova-libvirt.yaml
+++ b/docker/services/nova-libvirt.yaml
@@ -14,7 +14,7 @@ parameters:
type: string
# we configure libvirt via the nova-compute container due to coupling
# in the puppet modules
- DockerNovaComputeImage:
+ DockerNovaConfigImage:
description: image
default: 'centos-binary-nova-compute:latest'
type: string
@@ -57,7 +57,7 @@ outputs:
config_image:
list_join:
- '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerNovaComputeImage} ]
+ - [ {get_param: DockerNamespace}, {get_param: DockerNovaConfigImage} ]
kolla_config:
/var/lib/kolla/config_files/nova-libvirt.json:
command: /usr/sbin/libvirtd --config /etc/libvirt/libvirtd.conf
@@ -88,7 +88,15 @@ outputs:
- /var/lib/nova:/var/lib/nova
# Needed to use host's virtlogd
- /var/run/libvirt:/var/run/libvirt
- - libvirtd:/var/lib/libvirt
- - nova_libvirt_qemu:/etc/libvirt/qemu
+ - /var/lib/libvirt:/var/lib/libvirt
+ - /etc/libvirt/qemu:/etc/libvirt/qemu
environment:
- KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ host_prep_tasks:
+ - name: create libvirt persistent data directories
+ file:
+ path: "{{ item }}"
+ state: directory
+ with_items:
+ - /etc/libvirt/qemu
+ - /var/lib/libvirt
diff --git a/docker/services/nova-placement.yaml b/docker/services/nova-placement.yaml
index 0f32e33f..e49839b5 100644
--- a/docker/services/nova-placement.yaml
+++ b/docker/services/nova-placement.yaml
@@ -53,7 +53,7 @@ outputs:
config_volume: nova_placement
puppet_tags: nova_config
step_config: *step_config
- config_image:
+ config_image: &nova_placement_image
list_join:
- '/'
- [ {get_param: DockerNamespace}, {get_param: DockerNovaPlacementImage} ]
@@ -92,10 +92,7 @@ outputs:
step_3:
nova_placement:
start_order: 1
- image:
- list_join:
- - '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerNovaPlacementImage} ]
+ image: *nova_placement_image
net: host
user: root
restart: always
diff --git a/docker/services/nova-scheduler.yaml b/docker/services/nova-scheduler.yaml
index 0b64ca37..de1199e1 100644
--- a/docker/services/nova-scheduler.yaml
+++ b/docker/services/nova-scheduler.yaml
@@ -12,7 +12,7 @@ parameters:
description: image
default: 'centos-binary-nova-scheduler:latest'
type: string
- DockerNovaBaseImage:
+ DockerNovaConfigImage:
description: image
default: 'centos-binary-nova-base:latest'
type: string
@@ -57,7 +57,7 @@ outputs:
config_image:
list_join:
- '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerNovaBaseImage} ]
+ - [ {get_param: DockerNamespace}, {get_param: DockerNovaConfigImage} ]
kolla_config:
/var/lib/kolla/config_files/nova_scheduler.json:
command: /usr/bin/nova-scheduler
diff --git a/docker/services/panko-api.yaml b/docker/services/panko-api.yaml
new file mode 100644
index 00000000..32efc5d7
--- /dev/null
+++ b/docker/services/panko-api.yaml
@@ -0,0 +1,119 @@
+heat_template_version: ocata
+
+description: >
+ OpenStack Panko service configured with docker
+
+parameters:
+ DockerNamespace:
+ description: namespace
+ default: 'tripleoupstream'
+ type: string
+ DockerPankoApiImage:
+ description: image
+ default: 'centos-binary-panko-api:latest'
+ type: string
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+
+resources:
+
+ PankoApiPuppetBase:
+ type: ../../puppet/services/panko-api.yaml
+ properties:
+ EndpointMap: {get_param: EndpointMap}
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+
+outputs:
+ role_data:
+ description: Role data for the Panko API role.
+ value:
+ service_name: {get_attr: [PankoApiPuppetBase, role_data, service_name]}
+ config_settings:
+ map_merge:
+ - get_attr: [PankoApiPuppetBase, role_data, config_settings]
+ - apache::default_vhost: false
+ step_config: &step_config
+ get_attr: [PankoApiPuppetBase, role_data, step_config]
+ service_config_settings: {get_attr: [PankoApiPuppetBase, role_data, service_config_settings]}
+ # BEGIN DOCKER SETTINGS #
+ puppet_config:
+ config_volume: panko
+ puppet_tags: panko_api_paste_ini,panko_config
+ step_config: *step_config
+ config_image: &panko_image
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerPankoApiImage} ]
+ kolla_config:
+ /var/lib/kolla/config_files/panko-api.json:
+ command: /usr/sbin/httpd -DFOREGROUND
+ config_files:
+ - dest: /etc/httpd/conf.d/10-panko_wsgi.conf
+ owner: root
+ perm: '0644'
+ source: /var/lib/kolla/config_files/src/etc/httpd/conf.d/10-panko_wsgi.conf
+ - dest: /etc/httpd/conf/httpd.conf
+ owner: root
+ perm: '0644'
+ source: /var/lib/kolla/config_files/src/etc/httpd/conf/httpd.conf
+ - dest: /etc/httpd/conf/ports.conf
+ owner: root
+ perm: '0644'
+ source: /var/lib/kolla/config_files/src/etc/httpd/conf/ports.conf
+ - dest: /etc/panko/panko.conf
+ owner: panko
+ perm: '0600'
+ source: /var/lib/kolla/config_files/src/etc/panko/panko.conf
+ - dest: /var/www/cgi-bin/panko/app
+ owner: panko
+ perm: '0644'
+ source: /var/lib/kolla/config_files/src/var/www/cgi-bin/panko/app
+ docker_config:
+ step_3:
+ panko-init-log:
+ start_order: 0
+ image: *panko_image
+ user: root
+ command: ['/bin/bash', '-c', 'mkdir -p /var/log/httpd && mkdir -p /var/log/panko && chown panko:panko /var/log/panko']
+ volumes:
+ - logs:/var/log
+ panko_db_sync:
+ start_order: 1
+ image: *panko_image
+ net: host
+ detach: false
+ privileged: false
+ volumes:
+ - /var/lib/config-data/panko/etc/panko:/etc/panko:ro
+ - /etc/hosts:/etc/hosts:ro
+ - /etc/localtime:/etc/localtime:ro
+ - logs:/var/log
+ command: /usr/bin/panko-dbsync
+ step_4:
+ panko_api:
+ start_order: 2
+ image: *panko_image
+ net: host
+ privileged: false
+ restart: always
+ volumes:
+ - /var/lib/kolla/config_files/panko-api.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/panko/:/var/lib/kolla/config_files/src:ro
+ - /var/lib/config-data/panko/etc/httpd/conf.modules.d:/etc/httpd/conf.modules.d:ro
+ - /etc/hosts:/etc/hosts:ro
+ - /etc/localtime:/etc/localtime:ro
+ environment:
+ - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
diff --git a/docker/services/rabbitmq.yaml b/docker/services/rabbitmq.yaml
index 573ec178..341ec3de 100644
--- a/docker/services/rabbitmq.yaml
+++ b/docker/services/rabbitmq.yaml
@@ -90,7 +90,7 @@ outputs:
- /var/lib/config-data/rabbitmq/:/var/lib/kolla/config_files/src:ro
- /etc/hosts:/etc/hosts:ro
- /etc/localtime:/etc/localtime:ro
- - rabbitmq:/var/lib/rabbitmq/
+ - /var/lib/rabbitmq:/var/lib/rabbitmq
environment:
- KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
- KOLLA_BOOTSTRAP=True
@@ -116,9 +116,14 @@ outputs:
- /var/lib/config-data/rabbitmq/:/var/lib/kolla/config_files/src:ro
- /etc/hosts:/etc/hosts:ro
- /etc/localtime:/etc/localtime:ro
- - rabbitmq:/var/lib/rabbitmq/
+ - /var/lib/rabbitmq:/var/lib/rabbitmq
environment:
- KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ host_prep_tasks:
+ - name: create /var/lib/rabbitmq
+ file:
+ path: /var/lib/rabbitmq
+ state: directory
upgrade_tasks:
- name: Stop and disable rabbitmq service
tags: step2
diff --git a/docker/services/services.yaml b/docker/services/services.yaml
index 892da77c..84c56b5b 100644
--- a/docker/services/services.yaml
+++ b/docker/services/services.yaml
@@ -74,6 +74,11 @@ outputs:
{get_attr: [ServiceChain, role_data, docker_config]}
docker_puppet_tasks:
{get_attr: [ServiceChain, role_data, docker_puppet_tasks]}
+ host_prep_tasks:
+ yaql:
+ # Note we use distinct() here to filter any identical tasks
+ expression: $.data.where($ != null).select($.get('host_prep_tasks')).where($ != null).flatten().distinct()
+ data: {get_attr: [ServiceChain, role_data]}
upgrade_tasks:
yaql:
# Note we use distinct() here to filter any identical tasks, e.g yum update for all services
diff --git a/docker/services/swift-proxy.yaml b/docker/services/swift-proxy.yaml
index 93e21c81..0d7cd7b9 100644
--- a/docker/services/swift-proxy.yaml
+++ b/docker/services/swift-proxy.yaml
@@ -72,10 +72,15 @@ outputs:
- /etc/hosts:/etc/hosts:ro
- /etc/localtime:/etc/localtime:ro
- /run:/run
- - swift-srv:/srv
+ - /srv/node:/srv/node
- /dev:/dev
environment:
- KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ host_prep_tasks:
+ - name: create /srv/node
+ file:
+ path: /srv/node
+ state: directory
upgrade_tasks:
- name: Stop and disable swift_proxy service
tags: step2
diff --git a/docker/services/swift-storage.yaml b/docker/services/swift-storage.yaml
index 8e76504c..301ef69b 100644
--- a/docker/services/swift-storage.yaml
+++ b/docker/services/swift-storage.yaml
@@ -62,7 +62,7 @@ outputs:
config_volume: swift
puppet_tags: swift_config,swift_container_config,swift_container_sync_realms_config,swift_account_config,swift_object_config,swift_object_expirer_config
step_config: *step_config
- config_image:
+ config_image: &swift_proxy_image
list_join:
- '/'
- [ {get_param: DockerNamespace}, {get_param: DockerSwiftProxyImage} ]
@@ -99,20 +99,17 @@ outputs:
# volume during the configuration stage. We just need to create this
# directory and make sure it's owned by swift.
swift_setup_srv:
- image:
+ image: &swift_account_image
list_join:
- '/'
- [ {get_param: DockerNamespace}, {get_param: DockerSwiftAccountImage} ]
user: root
- command: ['/bin/bash', '-c', 'mkdir /srv/node && chown swift:swift /srv/node']
+ command: ['chown', '-R', 'swift:', '/srv/node']
volumes:
- - swift-srv:/srv
+ - /srv/node:/srv/node
step_4:
swift_account_auditor:
- image:
- list_join:
- - '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerSwiftAccountImage} ]
+ image: *swift_account_image
net: host
user: swift
restart: always
@@ -123,15 +120,12 @@ outputs:
- /etc/hosts:/etc/hosts:ro
- /etc/localtime:/etc/localtime:ro
- /run:/run
- - swift-srv:/srv
+ - /srv/node:/srv/node
- /dev:/dev
environment: &kolla_env
- KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
swift_account_reaper:
- image:
- list_join:
- - '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerSwiftAccountImage} ]
+ image: *swift_account_image
net: host
user: swift
restart: always
@@ -142,14 +136,11 @@ outputs:
- /etc/hosts:/etc/hosts:ro
- /etc/localtime:/etc/localtime:ro
- /run:/run
- - swift-srv:/srv
+ - /srv/node:/srv/node
- /dev:/dev
environment: *kolla_env
swift_account_replicator:
- image:
- list_join:
- - '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerSwiftAccountImage} ]
+ image: *swift_account_image
net: host
user: swift
restart: always
@@ -160,14 +151,11 @@ outputs:
- /etc/hosts:/etc/hosts:ro
- /etc/localtime:/etc/localtime:ro
- /run:/run
- - swift-srv:/srv
+ - /srv/node:/srv/node
- /dev:/dev
environment: *kolla_env
swift_account_server:
- image:
- list_join:
- - '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerSwiftAccountImage} ]
+ image: *swift_account_image
net: host
user: swift
restart: always
@@ -178,11 +166,11 @@ outputs:
- /etc/hosts:/etc/hosts:ro
- /etc/localtime:/etc/localtime:ro
- /run:/run
- - swift-srv:/srv
+ - /srv/node:/srv/node
- /dev:/dev
environment: *kolla_env
swift_container_auditor:
- image:
+ image: &swift_container_image
list_join:
- '/'
- [ {get_param: DockerNamespace}, {get_param: DockerSwiftContainerImage} ]
@@ -196,14 +184,11 @@ outputs:
- /etc/hosts:/etc/hosts:ro
- /etc/localtime:/etc/localtime:ro
- /run:/run
- - swift-srv:/srv
+ - /srv/node:/srv/node
- /dev:/dev
environment: *kolla_env
swift_container_replicator:
- image:
- list_join:
- - '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerSwiftContainerImage} ]
+ image: *swift_container_image
net: host
user: swift
restart: always
@@ -214,14 +199,11 @@ outputs:
- /etc/hosts:/etc/hosts:ro
- /etc/localtime:/etc/localtime:ro
- /run:/run
- - swift-srv:/srv
+ - /srv/node:/srv/node
- /dev:/dev
environment: *kolla_env
swift_container_updater:
- image:
- list_join:
- - '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerSwiftContainerImage} ]
+ image: *swift_container_image
net: host
user: swift
restart: always
@@ -232,14 +214,11 @@ outputs:
- /etc/hosts:/etc/hosts:ro
- /etc/localtime:/etc/localtime:ro
- /run:/run
- - swift-srv:/srv
+ - /srv/node:/srv/node
- /dev:/dev
environment: *kolla_env
swift_container_server:
- image:
- list_join:
- - '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerSwiftContainerImage} ]
+ image: *swift_container_image
net: host
user: swift
restart: always
@@ -250,11 +229,11 @@ outputs:
- /etc/hosts:/etc/hosts:ro
- /etc/localtime:/etc/localtime:ro
- /run:/run
- - swift-srv:/srv
+ - /srv/node:/srv/node
- /dev:/dev
environment: *kolla_env
swift_object_auditor:
- image:
+ image: &swift_object_image
list_join:
- '/'
- [ {get_param: DockerNamespace}, {get_param: DockerSwiftObjectImage} ]
@@ -268,14 +247,11 @@ outputs:
- /etc/hosts:/etc/hosts:ro
- /etc/localtime:/etc/localtime:ro
- /run:/run
- - swift-srv:/srv
+ - /srv/node:/srv/node
- /dev:/dev
environment: *kolla_env
swift_object_expirer:
- image:
- list_join:
- - '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerSwiftProxyImage} ]
+ image: *swift_proxy_image
net: host
user: swift
restart: always
@@ -286,14 +262,11 @@ outputs:
- /etc/hosts:/etc/hosts:ro
- /etc/localtime:/etc/localtime:ro
- /run:/run
- - swift-srv:/srv
+ - /srv/node:/srv/node
- /dev:/dev
environment: *kolla_env
swift_object_replicator:
- image:
- list_join:
- - '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerSwiftObjectImage} ]
+ image: *swift_object_image
net: host
user: swift
restart: always
@@ -304,14 +277,11 @@ outputs:
- /etc/hosts:/etc/hosts:ro
- /etc/localtime:/etc/localtime:ro
- /run:/run
- - swift-srv:/srv
+ - /srv/node:/srv/node
- /dev:/dev
environment: *kolla_env
swift_object_updater:
- image:
- list_join:
- - '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerSwiftObjectImage} ]
+ image: *swift_object_image
net: host
user: swift
restart: always
@@ -322,14 +292,11 @@ outputs:
- /etc/hosts:/etc/hosts:ro
- /etc/localtime:/etc/localtime:ro
- /run:/run
- - swift-srv:/srv
+ - /srv/node:/srv/node
- /dev:/dev
environment: *kolla_env
swift_object_server:
- image:
- list_join:
- - '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerSwiftObjectImage} ]
+ image: *swift_object_image
net: host
user: swift
restart: always
@@ -340,9 +307,14 @@ outputs:
- /etc/hosts:/etc/hosts:ro
- /etc/localtime:/etc/localtime:ro
- /run:/run
- - swift-srv:/srv
+ - /srv/node:/srv/node
- /dev:/dev
environment: *kolla_env
+ host_prep_tasks:
+ - name: create /srv/node
+ file:
+ path: /srv/node
+ state: directory
upgrade_tasks:
- name: Stop and disable swift storage services
tags: step2
diff --git a/environments/contrail/roles_data_contrail.yaml b/environments/contrail/roles_data_contrail.yaml
index 5f6c4691..d6d6f291 100644
--- a/environments/contrail/roles_data_contrail.yaml
+++ b/environments/contrail/roles_data_contrail.yaml
@@ -29,6 +29,7 @@
CountDefault: 1
ServicesDefault:
- OS::TripleO::Services::CACerts
+ - OS::TripleO::Services::CertmongerUser
- OS::TripleO::Services::CephMds
- OS::TripleO::Services::CephMon
- OS::TripleO::Services::CephExternal
@@ -115,6 +116,7 @@
disable_upgrade_deployment: True
ServicesDefault:
- OS::TripleO::Services::CACerts
+ - OS::TripleO::Services::CertmongerUser
- OS::TripleO::Services::CephClient
- OS::TripleO::Services::CephExternal
- OS::TripleO::Services::Timezone
@@ -140,6 +142,7 @@
- name: BlockStorage
ServicesDefault:
- OS::TripleO::Services::CACerts
+ - OS::TripleO::Services::CertmongerUser
- OS::TripleO::Services::BlockStorageCinderVolume
- OS::TripleO::Services::Kernel
- OS::TripleO::Services::Ntp
@@ -156,6 +159,7 @@
disable_upgrade_deployment: True
ServicesDefault:
- OS::TripleO::Services::CACerts
+ - OS::TripleO::Services::CertmongerUser
- OS::TripleO::Services::Kernel
- OS::TripleO::Services::Ntp
- OS::TripleO::Services::SwiftStorage
@@ -173,6 +177,7 @@
disable_upgrade_deployment: True
ServicesDefault:
- OS::TripleO::Services::CACerts
+ - OS::TripleO::Services::CertmongerUser
- OS::TripleO::Services::CephOSD
- OS::TripleO::Services::Kernel
- OS::TripleO::Services::Ntp
@@ -188,6 +193,7 @@
- name: ContrailController
ServicesDefault:
- OS::TripleO::Services::CACerts
+ - OS::TripleO::Services::CertmongerUser
- OS::TripleO::Services::ContrailConfig
- OS::TripleO::Services::ContrailControl
- OS::TripleO::Services::ContrailDatabase
@@ -203,6 +209,7 @@
- name: ContrailAnalytics
ServicesDefault:
- OS::TripleO::Services::CACerts
+ - OS::TripleO::Services::CertmongerUser
- OS::TripleO::Services::ContrailAnalytics
- OS::TripleO::Services::Kernel
- OS::TripleO::Services::Ntp
@@ -215,6 +222,7 @@
- name: ContrailAnalyticsDatabase
ServicesDefault:
- OS::TripleO::Services::CACerts
+ - OS::TripleO::Services::CertmongerUser
- OS::TripleO::Services::ContrailAnalyticsDatabase
- OS::TripleO::Services::Kernel
- OS::TripleO::Services::Ntp
@@ -227,6 +235,7 @@
- name: ContrailTsn
ServicesDefault:
- OS::TripleO::Services::CACerts
+ - OS::TripleO::Services::CertmongerUser
- OS::TripleO::Services::ContrailTsn
- OS::TripleO::Services::Kernel
- OS::TripleO::Services::Ntp
diff --git a/environments/docker.yaml b/environments/docker.yaml
index 8a977f26..755e94c2 100644
--- a/environments/docker.yaml
+++ b/environments/docker.yaml
@@ -19,11 +19,6 @@ resource_registry:
OS::TripleO::Services::NovaPlacement: ../docker/services/nova-placement.yaml
OS::TripleO::Services::NovaConductor: ../docker/services/nova-conductor.yaml
OS::TripleO::Services::NovaScheduler: ../docker/services/nova-scheduler.yaml
- # FIXME: these need to go into a environments/services-docker dir?
- OS::TripleO::Services::NovaIronic: ../docker/services/nova-ironic.yaml
- OS::TripleO::Services::IronicApi: ../docker/services/ironic-api.yaml
- OS::TripleO::Services::IronicConductor: ../docker/services/ironic-conductor.yaml
- OS::TripleO::Services::IronicPxe: ../docker/services/ironic-pxe.yaml
OS::TripleO::Services::NeutronServer: ../docker/services/neutron-api.yaml
OS::TripleO::Services::NeutronApi: ../docker/services/neutron-api.yaml
OS::TripleO::Services::NeutronCorePlugin: ../docker/services/neutron-plugin-ml2.yaml
@@ -31,16 +26,20 @@ resource_registry:
OS::TripleO::Services::NeutronDhcpAgent: ../docker/services/neutron-dhcp.yaml
OS::TripleO::Services::NeutronL3Agent: ../docker/services/neutron-l3.yaml
OS::TripleO::Services::MySQL: ../docker/services/database/mysql.yaml
- OS::TripleO::Services::MistralApi: ../docker/services/mistral-api.yaml
- OS::TripleO::Services::MistralEngine: ../docker/services/mistral-engine.yaml
- OS::TripleO::Services::MistralExecutor: ../docker/services/mistral-executor.yaml
- OS::TripleO::Services::Zaqar: ../docker/services/zaqar.yaml
OS::TripleO::Services::RabbitMQ: ../docker/services/rabbitmq.yaml
OS::TripleO::Services::MongoDb: ../docker/services/database/mongodb.yaml
OS::TripleO::Services::Memcached: ../docker/services/memcached.yaml
OS::TripleO::Services::SwiftProxy: ../docker/services/swift-proxy.yaml
OS::TripleO::Services::SwiftStorage: ../docker/services/swift-storage.yaml
OS::TripleO::Services::SwiftRingBuilder: ../docker/services/swift-ringbuilder.yaml
+ OS::TripleO::Services::GnocchiApi: ../docker/services/gnocchi-api.yaml
+ OS::TripleO::Services::GnocchiMetricd: ../docker/services/gnocchi-metricd.yaml
+ OS::TripleO::Services::GnocchiStatsd: ../docker/services/gnocchi-statsd.yaml
+ OS::TripleO::Services::AodhApi: ../docker/services/aodh-api.yaml
+ OS::TripleO::Services::AodhEvaluator: ../docker/services/aodh-evaluator.yaml
+ OS::TripleO::Services::AodhNotifier: ../docker/services/aodh-notifier.yaml
+ OS::TripleO::Services::AodhListener: ../docker/services/aodh-listener.yaml
+ OS::TripleO::Services::PankoApi: ../docker/services/panko-api.yaml
OS::TripleO::PostDeploySteps: ../docker/post.yaml
OS::TripleO::PostUpgradeSteps: ../docker/post-upgrade.yaml
diff --git a/environments/enable-internal-tls.yaml b/environments/enable-internal-tls.yaml
index f485e4a5..e245a6af 100644
--- a/environments/enable-internal-tls.yaml
+++ b/environments/enable-internal-tls.yaml
@@ -9,6 +9,8 @@ parameter_defaults:
ipa_enroll: True
resource_registry:
+ OS::TripleO::Services::CertmongerUser: ../puppet/services/certmonger-user.yaml
+
OS::TripleO::Services::HAProxyInternalTLS: ../puppet/services/haproxy-internal-tls-certmonger.yaml
OS::TripleO::Services::ApacheTLS: ../puppet/services/apache-internal-tls-certmonger.yaml
OS::TripleO::Services::MySQLTLS: ../puppet/services/database/mysql-internal-tls-certmonger.yaml
diff --git a/environments/hyperconverged-ceph.yaml b/environments/hyperconverged-ceph.yaml
index f59b0414..8f74ec35 100644
--- a/environments/hyperconverged-ceph.yaml
+++ b/environments/hyperconverged-ceph.yaml
@@ -6,6 +6,7 @@ resource_registry:
parameter_defaults:
ComputeServices:
- OS::TripleO::Services::CACerts
+ - OS::TripleO::Services::CertmongerUser
- OS::TripleO::Services::CephClient
- OS::TripleO::Services::CephExternal
- OS::TripleO::Services::Timezone
diff --git a/environments/neutron-bgpvpn.yaml b/environments/neutron-bgpvpn.yaml
new file mode 100644
index 00000000..dc6c1454
--- /dev/null
+++ b/environments/neutron-bgpvpn.yaml
@@ -0,0 +1,16 @@
+# A Heat environment file that can be used to deploy Neutron BGPVPN service
+#
+# Currently there are four types of service provider for Neutron BGPVPN
+# The default option is a dummy driver that allows to enable the API.
+# In order to enable other backend, replace the content of BgpvpnServiceProvider
+#
+# - Bagpipe: BGPVPN:BaGPipe:networking_bgpvpn.neutron.services.service_drivers.bagpipe.bagpipe.BaGPipeBGPVPNDriver:default
+# - OpenContrail: BGPVPN:OpenContrail:networking_bgpvpn.neutron.services.service_drivers.opencontrail.opencontrail.OpenContrailBGPVPNDriver:default
+# - OpenDaylight: BGPVPN:OpenDaylight:networking_bgpvpn.neutron.services.service_drivers.opendaylight.odl.OpenDaylightBgpvpnDriver:default
+# - Nuage: BGPVPN:Nuage:nuage_neutron.bgpvpn.services.service_drivers.driver.NuageBGPVPNDriver:default
+resource_registry:
+ OS::TripleO::Services::NeutronBgpvpnApi: ../puppet/services/neutron-bgpvpn-api.yaml
+
+parameter_defaults:
+ NeutronServicePlugins: 'networking_bgpvpn.neutron.services.plugin.BGPVPNPlugin'
+ BgpvpnServiceProvider: 'BGPVPN:Dummy:networking_bgpvpn.neutron.services.service_drivers.driver_api.BGPVPNDriver:default'
diff --git a/environments/services-docker/ironic.yaml b/environments/services-docker/ironic.yaml
new file mode 100644
index 00000000..e927ecb3
--- /dev/null
+++ b/environments/services-docker/ironic.yaml
@@ -0,0 +1,5 @@
+resource_registry:
+ OS::TripleO::Services::IronicApi: ../../docker/services/ironic-api.yaml
+ OS::TripleO::Services::IronicConductor: ../../docker/services/ironic-conductor.yaml
+ OS::TripleO::Services::IronicPxe: ../../docker/services/ironic-pxe.yaml
+ OS::TripleO::Services::NovaIronic: ../../docker/services/nova-ironic.yaml
diff --git a/environments/services-docker/mistral.yaml b/environments/services-docker/mistral.yaml
new file mode 100644
index 00000000..a215d2a0
--- /dev/null
+++ b/environments/services-docker/mistral.yaml
@@ -0,0 +1,4 @@
+resource_registry:
+ OS::TripleO::Services::MistralEngine: ../../docker/services/mistral-engine.yaml
+ OS::TripleO::Services::MistralApi: ../../docker/services/mistral-api.yaml
+ OS::TripleO::Services::MistralExecutor: ../../docker/services/mistral-executor.yaml
diff --git a/environments/services-docker/zaqar.yaml b/environments/services-docker/zaqar.yaml
new file mode 100644
index 00000000..ca0b3b15
--- /dev/null
+++ b/environments/services-docker/zaqar.yaml
@@ -0,0 +1,2 @@
+resource_registry:
+ OS::TripleO::Services::Zaqar: ../../docker/services/zaqar.yaml
diff --git a/extraconfig/pre_deploy/rhel-registration/scripts/rhel-registration b/extraconfig/pre_deploy/rhel-registration/scripts/rhel-registration
index 6f83cc4b..0d0fa3f1 100644
--- a/extraconfig/pre_deploy/rhel-registration/scripts/rhel-registration
+++ b/extraconfig/pre_deploy/rhel-registration/scripts/rhel-registration
@@ -11,7 +11,7 @@ if [ -e $OK ] ; then
exit 0
fi
-retryCount=0
+retry_max_count=10
opts=
config_opts=
attach_opts=
@@ -157,27 +157,41 @@ else
fi
function retry() {
- if [[ $retryCount < 3 ]]; then
- $@
- if ! [[ $? == 0 ]]; then
- retryCount=$(echo $retryCount + 1 | bc)
- echo "WARN: Failed to connect when running '$@', retrying..."
- retry $@
- else
- retryCount=0
+ # Inhibit -e since we want to retry without exiting..
+ set +e
+ # Retry delay (seconds)
+ retry_delay=2.0
+ retry_count=0
+ mycli="$@"
+ while [ $retry_count -lt ${retry_max_count} ]
+ do
+ echo "INFO: Sleeping ${retry_delay} ..."
+ sleep ${retry_delay}
+ echo "INFO: Executing '${mycli}' ..."
+ ${mycli}
+ if [ $? -eq 0 ]; then
+ echo "INFO: Ran '${mycli}' successfully, not retrying..."
+ break
+ else
+ echo "WARN: Failed to connect when running '${mycli}', retrying (attempt #$retry_count )..."
+ retry_count=$(echo $retry_count + 1 | bc)
+ fi
+ done
+
+ if [ $retry_count -ge ${retry_max_count} ]; then
+ echo "ERROR: Failed to connect after ${retry_max_count} attempts when running '${mycli}'"
+ exit 1
fi
- else
- echo "ERROR: Failed to connect after 3 attempts when running '$@'"
- exit 1
- fi
+ # Re-enable -e when exiting retry()
+ set -e
}
function detect_satellite_version {
ping_api=$REG_SAT_URL/katello/api/ping
- if curl --retry 3 --retry-delay 10 --max-time 30 -L -k -s -D - -o /dev/null $ping_api | grep "200 OK"; then
+ if curl --retry ${retry_max_count} --retry-delay 10 --max-time 30 -L -k -s -D - -o /dev/null $ping_api | grep "200 OK"; then
echo Satellite 6 detected at $REG_SAT_URL
satellite_version=6
- elif curl --retry 3 --retry-delay 10 --max-time 30 -L -k -s -D - -o /dev/null $REG_SAT_URL/rhn/Login.do | grep "200 OK"; then
+ elif curl --retry ${retry_max_count} --retry-delay 10 --max-time 30 -L -k -s -D - -o /dev/null $REG_SAT_URL/rhn/Login.do | grep "200 OK"; then
echo Satellite 5 detected at $REG_SAT_URL
satellite_version=5
else
@@ -220,7 +234,7 @@ case "${REG_METHOD:-}" in
detect_satellite_version
if [ "$satellite_version" = "6" ]; then
repos="$repos --enable ${satellite_repo}"
- curl --retry 3 --retry-delay 10 --max-time 30 -L -k -O "$REG_SAT_URL/pub/katello-ca-consumer-latest.noarch.rpm"
+ curl --retry ${retry_max_count} --retry-delay 10 --max-time 30 -L -k -O "$REG_SAT_URL/pub/katello-ca-consumer-latest.noarch.rpm"
rpm -Uvh katello-ca-consumer-latest.noarch.rpm || true
retry subscription-manager register $opts
retry subscription-manager $repos
@@ -229,7 +243,7 @@ case "${REG_METHOD:-}" in
retry subscription-manager repos --disable ${satellite_repo}
else
pushd /usr/share/rhn/
- curl --retry 3 --retry-delay 10 --max-time 30 -k -O $REG_SAT_URL/pub/RHN-ORG-TRUSTED-SSL-CERT
+ curl --retry ${retry_max_count} --retry-delay 10 --max-time 30 -k -O $REG_SAT_URL/pub/RHN-ORG-TRUSTED-SSL-CERT
popd
retry rhnreg_ks --serverUrl=$REG_SAT_URL/XMLRPC $sat5_opts
fi
diff --git a/extraconfig/tasks/aodh_data_migration.sh b/extraconfig/tasks/aodh_data_migration.sh
deleted file mode 100644
index d4c29673..00000000
--- a/extraconfig/tasks/aodh_data_migration.sh
+++ /dev/null
@@ -1,19 +0,0 @@
-#!/bin/bash
-#
-# This delivers the aodh data migration script to be invoked as part of the tripleo
-# major upgrade workflow to migrate all the alarm data from mongodb to mysql.
-# This needs to run post controller node upgrades so new aodh mysql db configured and
-# running.
-#
-set -eu
-
-#Get existing mongodb connection
-MONGO_DB_CONNECTION="$(crudini --get /etc/ceilometer/ceilometer.conf database connection)"
-
-# Get the aodh database string from hiera data
-MYSQL_DB_CONNECTION="$(crudini --get /etc/aodh/aodh.conf database connection)"
-
-#Run migration
-/usr/bin/aodh-data-migration --nosql-conn $MONGO_DB_CONNECTION --sql-conn $MYSQL_DB_CONNECTION
-
-
diff --git a/extraconfig/tasks/major_upgrade_ceilometer_wsgi_mitaka_newton.yaml b/extraconfig/tasks/major_upgrade_ceilometer_wsgi_mitaka_newton.yaml
deleted file mode 100644
index cf5d7a84..00000000
--- a/extraconfig/tasks/major_upgrade_ceilometer_wsgi_mitaka_newton.yaml
+++ /dev/null
@@ -1,62 +0,0 @@
-heat_template_version: ocata
-
-description: >
- Software-config for ceilometer configuration under httpd during upgrades
-
-parameters:
- servers:
- type: json
- input_values:
- type: json
- description: input values for the software deployments
-resources:
- CeilometerWsgiMitakaNewtonPreUpgradeConfig:
- type: OS::Heat::SoftwareConfig
- properties:
- group: puppet
- config:
- get_file: mitaka_to_newton_ceilometer_wsgi_upgrade.pp
-
- CeilometerWsgiMitakaNewtonUpgradeConfig:
- type: OS::Heat::SoftwareConfig
- properties:
- group: script
- config:
- list_join:
- - ''
- - - "#!/bin/bash\n\nset -e\n\n"
- - get_file: pacemaker_common_functions.sh
- - get_file: major_upgrade_pacemaker_migrations.sh
- - "disable_standalone_ceilometer_api\n\n"
-
- CeilometerWsgiMitakaNewtonPostUpgradeConfig:
- type: OS::Heat::SoftwareConfig
- properties:
- group: script
- config: |
- #!/bin/bash
- set -e
- /usr/bin/systemctl reload httpd
-
- CeilometerWsgiMitakaNewtonPreUpgradeDeployment:
- type: OS::Heat::SoftwareDeploymentGroup
- properties:
- name: CeilometerWsgiMitakaNewtonPreUpgradeDeployment
- servers: {get_param: [servers, Controller]}
- config: {get_resource: CeilometerWsgiMitakaNewtonPreUpgradeConfig}
-
- CeilometerWsgiMitakaNewtonUpgradeConfigDeployment:
- type: OS::Heat::SoftwareDeploymentGroup
- depends_on: CeilometerWsgiMitakaNewtonPreUpgradeDeployment
- properties:
- name: CeilometerWsgiMitakaNewtonUpgradeConfigDeployment
- servers: {get_param: [servers, Controller]}
- config: {get_resource: CeilometerWsgiMitakaNewtonUpgradeConfig}
-
- CeilometerWsgiMitakaNewtonPostUpgradeDeployment:
- type: OS::Heat::SoftwareDeploymentGroup
- depends_on: CeilometerWsgiMitakaNewtonUpgradeConfigDeployment
- properties:
- name: CeilometerWsgiMitakaNewtonPostUpgradeDeployment
- servers: {get_param: [servers, Controller]}
- config: {get_resource: CeilometerWsgiMitakaNewtonPostUpgradeConfig}
diff --git a/extraconfig/tasks/major_upgrade_check.sh b/extraconfig/tasks/major_upgrade_check.sh
deleted file mode 100755
index 8bdff5e7..00000000
--- a/extraconfig/tasks/major_upgrade_check.sh
+++ /dev/null
@@ -1,109 +0,0 @@
-#!/bin/bash
-
-set -eu
-
-check_cluster()
-{
- if pcs status 2>&1 | grep -E '(cluster is not currently running)|(OFFLINE:)'; then
- echo_error "ERROR: upgrade cannot start with some cluster nodes being offline"
- exit 1
- fi
-}
-
-check_pcsd()
-{
- if pcs status 2>&1 | grep -E 'Offline'; then
- echo_error "ERROR: upgrade cannot start with some pcsd daemon offline"
- exit 1
- fi
-}
-
-mysql_need_update()
-{
- # Shall we upgrade mysql data directory during the stack upgrade?
- if [ "$mariadb_do_major_upgrade" = "auto" ]; then
- ret=$(is_mysql_upgrade_needed)
- if [ $ret = "1" ]; then
- DO_MYSQL_UPGRADE=1
- else
- DO_MYSQL_UPGRADE=0
- fi
- echo "mysql upgrade required: $DO_MYSQL_UPGRADE"
- elif [ "$mariadb_do_major_upgrade" = "no" ]; then
- DO_MYSQL_UPGRADE=0
- else
- DO_MYSQL_UPGRADE=1
- fi
-}
-
-check_disk_for_mysql_dump()
-{
- # Where to backup current database if mysql need to be upgraded
- MYSQL_BACKUP_DIR=/var/tmp/mysql_upgrade_osp
- MYSQL_TEMP_UPGRADE_BACKUP_DIR=/var/lib/mysql-temp-upgrade-backup
- # Spare disk ratio for extra safety
- MYSQL_BACKUP_SIZE_RATIO=1.2
-
- mysql_need_update
-
- if [ "$(hiera -c /etc/puppet/hiera.yaml bootstrap_nodeid)" = "$(facter hostname)" ]; then
- if [ $DO_MYSQL_UPGRADE -eq 1 ]; then
-
- if [ -d "$MYSQL_BACKUP_DIR" ]; then
- echo_error "Error: $MYSQL_BACKUP_DIR exists already. Likely an upgrade failed previously"
- exit 1
- fi
- mkdir "$MYSQL_BACKUP_DIR"
- if [ $? -ne 0 ]; then
- echo_error "Error: could not create temporary backup directory $MYSQL_BACKUP_DIR"
- exit 1
- fi
-
- # the /root/.my.cnf is needed because we set the mysql root
- # password from liberty onwards
- backup_flags="--defaults-extra-file=/root/.my.cnf -u root --flush-privileges --all-databases --single-transaction"
- # While not ideal, this step allows us to calculate exactly how much space the dump
- # will need. Our main goal here is avoiding any chance of corruption due to disk space
- # exhaustion
- backup_size=$(mysqldump $backup_flags 2>/dev/null | wc -c)
- database_size=$(du -cb /var/lib/mysql | tail -1 | awk '{ print $1 }')
- free_space=$(df -B1 --output=avail "$MYSQL_BACKUP_DIR" | tail -1)
-
- # we need at least space for a new mysql database + dump of the existing one,
- # times a small factor for additional safety room
- # note: bash doesn't do floating point math or floats in if statements,
- # so use python to apply the ratio and cast it back to integer
- required_space=$(python -c "from __future__ import print_function; print(\"%d\" % int((($database_size + $backup_size) * $MYSQL_BACKUP_SIZE_RATIO)))")
- if [ $required_space -ge $free_space ]; then
- echo_error "Error: not enough free space in $MYSQL_BACKUP_DIR ($required_space bytes required)"
- exit 1
- fi
- fi
- fi
-}
-
-check_python_rpm()
-{
- # If for some reason rpm-python are missing we want to error out early enough
- if ! rpm -q rpm-python &> /dev/null; then
- echo_error "ERROR: upgrade cannot start without rpm-python installed"
- exit 1
- fi
-}
-
-check_clean_cluster()
-{
- if pcs status | grep -q Stopped:; then
- echo_error "ERROR: upgrade cannot start with stopped resources on the cluster. Make sure that all the resources are up and running."
- exit 1
- fi
-}
-
-check_galera_root_password()
-{
- # BZ: 1357112
- if [ ! -e /root/.my.cnf ]; then
- echo_error "ERROR: upgrade cannot be started, the galera password is missing. The overcloud needs update."
- exit 1
- fi
-}
diff --git a/extraconfig/tasks/major_upgrade_controller_pacemaker_1.sh b/extraconfig/tasks/major_upgrade_controller_pacemaker_1.sh
deleted file mode 100755
index 080831ab..00000000
--- a/extraconfig/tasks/major_upgrade_controller_pacemaker_1.sh
+++ /dev/null
@@ -1,36 +0,0 @@
-#!/bin/bash
-
-set -eu
-
-check_cluster
-check_pcsd
-if [[ -n $(is_bootstrap_node) ]]; then
- check_clean_cluster
-fi
-check_python_rpm
-check_galera_root_password
-check_disk_for_mysql_dump
-
-# We want to disable fencing during the cluster --stop as it might fence
-# nodes where a service fails to stop, which could be fatal during an upgrade
-# procedure. So we remember the stonith state. If it was enabled we reenable it
-# at the end of this script
-if [[ -n $(is_bootstrap_node) ]]; then
- STONITH_STATE=$(pcs property show stonith-enabled | grep "stonith-enabled" | awk '{ print $2 }')
- # We create this empty file if stonith was set to true so we can reenable stonith in step2
- rm -f /var/tmp/stonith-true
- if [ $STONITH_STATE == "true" ]; then
- touch /var/tmp/stonith-true
- fi
- pcs property set stonith-enabled=false
-fi
-
-# Migrate to HA NG and fix up rabbitmq queues
-# We fix up the rabbitmq ha queues after the migration because it will
-# restart the rabbitmq resource. Doing it after the migration means no other
-# services will be restart as there are no other constraints
-if [[ -n $(is_bootstrap_node) ]]; then
- migrate_full_to_ng_ha
- rabbitmq_newton_ocata_upgrade
-fi
-
diff --git a/extraconfig/tasks/major_upgrade_controller_pacemaker_2.sh b/extraconfig/tasks/major_upgrade_controller_pacemaker_2.sh
deleted file mode 100755
index 4b323854..00000000
--- a/extraconfig/tasks/major_upgrade_controller_pacemaker_2.sh
+++ /dev/null
@@ -1,177 +0,0 @@
-#!/bin/bash
-
-set -eu
-
-cluster_sync_timeout=1800
-
-# After migrating the cluster to HA-NG the services not under pacemaker's control
-# are still up and running. We need to stop them explicitely otherwise during the yum
-# upgrade the rpm %post sections will try to do a systemctl try-restart <service>, which
-# is going to take a long time because rabbit is down. By having the service stopped
-# systemctl try-restart is a noop
-
-for service in $(services_to_migrate); do
- manage_systemd_service stop "${service%%-clone}"
- # So the reason for not reusing check_resource_systemd is that
- # I have observed systemctl is-active returning unknown with at least
- # one service that was stopped (See LP 1627254)
- timeout=600
- tstart=$(date +%s)
- tend=$(( $tstart + $timeout ))
- check_interval=3
- while (( $(date +%s) < $tend )); do
- if [[ "$(systemctl is-active ${service%%-clone})" = "active" ]]; then
- echo "$service still active, sleeping $check_interval seconds."
- sleep $check_interval
- else
- # we do not care if it is inactive, unknown or failed as long as it is
- # not running
- break
- fi
-
- done
-done
-
-# In case the mysql package is updated, the database on disk must be
-# upgraded as well. This typically needs to happen during major
-# version upgrades (e.g. 5.5 -> 5.6, 5.5 -> 10.1...)
-#
-# Because in-place upgrades are not supported across 2+ major versions
-# (e.g. 5.5 -> 10.1), we rely on logical upgrades via dump/restore cycle
-# https://bugzilla.redhat.com/show_bug.cgi?id=1341968
-#
-# The default is to determine automatically if upgrade is needed based
-# on mysql package versioning, but this can be overridden manually
-# to support specific upgrade scenario
-
-# Calling this function will set the DO_MYSQL_UPGRADE variable which is used
-# later
-mysql_need_update
-
-if [[ -n $(is_bootstrap_node) ]]; then
- if [ $DO_MYSQL_UPGRADE -eq 1 ]; then
- backup_flags="--defaults-extra-file=/root/.my.cnf -u root --flush-privileges --all-databases --single-transaction"
- mysqldump $backup_flags > "$MYSQL_BACKUP_DIR/openstack_database.sql"
- cp -rdp /etc/my.cnf* "$MYSQL_BACKUP_DIR"
- fi
-
- pcs resource disable redis
- check_resource redis stopped 600
- pcs resource disable rabbitmq
- check_resource rabbitmq stopped 600
- pcs resource disable galera
- check_resource galera stopped 600
- pcs resource disable openstack-cinder-volume
- check_resource openstack-cinder-volume stopped 600
- # Disable all VIPs before stopping the cluster, so that pcs doesn't use one as a source address:
- # https://bugzilla.redhat.com/show_bug.cgi?id=1330688
- for vip in $(pcs resource show | grep ocf::heartbeat:IPaddr2 | grep Started | awk '{ print $1 }'); do
- pcs resource disable $vip
- check_resource $vip stopped 60
- done
- pcs cluster stop --all
-fi
-
-
-# Swift isn't controlled by pacemaker
-systemctl_swift stop
-
-tstart=$(date +%s)
-while systemctl is-active pacemaker; do
- sleep 5
- tnow=$(date +%s)
- if (( tnow-tstart > cluster_sync_timeout )) ; then
- echo_error "ERROR: cluster shutdown timed out"
- exit 1
- fi
-done
-
-# The reason we do an sql dump *and* we move the old dir out of
-# the way is because it gives us an extra level of safety in case
-# something goes wrong during the upgrade. Once the restore is
-# successful we go ahead and remove it. If the directory exists
-# we bail out as it means the upgrade process had issues in the last
-# run.
-if [ $DO_MYSQL_UPGRADE -eq 1 ]; then
- if [ -d $MYSQL_TEMP_UPGRADE_BACKUP_DIR ]; then
- echo_error "ERROR: mysql backup dir already exist"
- exit 1
- fi
- mv /var/lib/mysql $MYSQL_TEMP_UPGRADE_BACKUP_DIR
-fi
-
-# Special-case OVS for https://bugs.launchpad.net/tripleo/+bug/1635205
-special_case_ovs_upgrade_if_needed
-
-yum -y install python-zaqarclient # needed for os-collect-config
-yum -y -q update
-
-# We need to ensure at least those two configuration settings, otherwise
-# mariadb 10.1+ won't activate galera replication.
-# wsrep_cluster_address must only be set though, its value does not
-# matter because it's overridden by the galera resource agent.
-cat >> /etc/my.cnf.d/galera.cnf <<EOF
-[mysqld]
-wsrep_on = ON
-wsrep_cluster_address = gcomm://localhost
-EOF
-
-if [ $DO_MYSQL_UPGRADE -eq 1 ]; then
- # Scripts run via heat have no HOME variable set and this confuses
- # mysqladmin
- export HOME=/root
-
- mkdir /var/lib/mysql || /bin/true
- chown mysql:mysql /var/lib/mysql
- chmod 0755 /var/lib/mysql
- restorecon -R /var/lib/mysql/
- mysql_install_db --datadir=/var/lib/mysql --user=mysql
- chown -R mysql:mysql /var/lib/mysql/
-
- if [ "$(hiera -c /etc/puppet/hiera.yaml bootstrap_nodeid)" = "$(facter hostname)" ]; then
- mysqld_safe --wsrep-new-cluster &
- # We have a populated /root/.my.cnf with root/password here so
- # we need to temporarily rename it because the newly created
- # db is empty and no root password is set
- mv /root/.my.cnf /root/.my.cnf.temporary
- timeout 60 sh -c 'while ! mysql -e "" &> /dev/null; do sleep 1; done'
- mysql -u root < "$MYSQL_BACKUP_DIR/openstack_database.sql"
- mv /root/.my.cnf.temporary /root/.my.cnf
- mysqladmin -u root shutdown
- # The import was successful so we may remove the folder
- rm -r "$MYSQL_BACKUP_DIR"
- fi
-fi
-
-# If we reached here without error we can safely blow away the origin
-# mysql dir from every controller
-
-# TODO: What if the upgrade fails on the bootstrap node, but not on
-# this controller. Data may be lost.
-if [ $DO_MYSQL_UPGRADE -eq 1 ]; then
- rm -r $MYSQL_TEMP_UPGRADE_BACKUP_DIR
-fi
-
-# Let's reset the stonith back to true if it was true, before starting the cluster
-if [[ -n $(is_bootstrap_node) ]]; then
- if [ -f /var/tmp/stonith-true ]; then
- pcs -f /var/lib/pacemaker/cib/cib.xml property set stonith-enabled=true
- fi
- rm -f /var/tmp/stonith-true
-fi
-
-# Pin messages sent to compute nodes to kilo, these will be upgraded later
-crudini --set /etc/nova/nova.conf upgrade_levels compute "$upgrade_level_nova_compute"
-# https://bugzilla.redhat.com/show_bug.cgi?id=1284047
-# Change-Id: Ib3f6c12ff5471e1f017f28b16b1e6496a4a4b435
-crudini --set /etc/ceilometer/ceilometer.conf DEFAULT rpc_backend rabbit
-# https://bugzilla.redhat.com/show_bug.cgi?id=1284058
-# Ifd1861e3df46fad0e44ff9b5cbd58711bbc87c97 Swift Ceilometer middleware no longer exists
-crudini --set /etc/swift/proxy-server.conf pipeline:main pipeline "catch_errors healthcheck cache ratelimit tempurl formpost authtoken keystone staticweb proxy-logging proxy-server"
-# LP: 1615035, required only for M/N upgrade.
-crudini --set /etc/nova/nova.conf DEFAULT scheduler_host_manager host_manager
-# LP: 1627450, required only for M/N upgrade
-crudini --set /etc/nova/nova.conf DEFAULT scheduler_driver filter_scheduler
-
-crudini --set /etc/sahara/sahara.conf DEFAULT plugins ambari,cdh,mapr,vanilla,spark,storm
-
diff --git a/extraconfig/tasks/major_upgrade_controller_pacemaker_3.sh b/extraconfig/tasks/major_upgrade_controller_pacemaker_3.sh
deleted file mode 100755
index a3cbd945..00000000
--- a/extraconfig/tasks/major_upgrade_controller_pacemaker_3.sh
+++ /dev/null
@@ -1,68 +0,0 @@
-#!/bin/bash
-
-set -eu
-
-cluster_form_timeout=600
-cluster_settle_timeout=1800
-galera_sync_timeout=600
-
-if [[ -n $(is_bootstrap_node) ]]; then
- pcs cluster start --all
-
- tstart=$(date +%s)
- while pcs status 2>&1 | grep -E '(cluster is not currently running)|(OFFLINE:)'; do
- sleep 5
- tnow=$(date +%s)
- if (( tnow-tstart > cluster_form_timeout )) ; then
- echo_error "ERROR: timed out forming the cluster"
- exit 1
- fi
- done
-
- if ! timeout -k 10 $cluster_settle_timeout crm_resource --wait; then
- echo_error "ERROR: timed out waiting for cluster to finish transition"
- exit 1
- fi
-
- for vip in $(pcs resource show | grep ocf::heartbeat:IPaddr2 | grep Stopped | awk '{ print $1 }'); do
- pcs resource enable $vip
- check_resource_pacemaker $vip started 60
- done
-fi
-
-start_or_enable_service galera
-check_resource galera started 600
-start_or_enable_service redis
-check_resource redis started 600
-# We need mongod which is now a systemd service up and running before calling
-# ceilometer-dbsync. There is still a race here: mongod might not be up on all nodes
-# so ceilometer-dbsync will fail a couple of times before that. As it retries indefinitely
-# we should be good.
-# Due to LP Bug https://bugs.launchpad.net/tripleo/+bug/1627254 am using systemctl directly atm
-systemctl start mongod
-check_resource mongod started 600
-
-if [[ -n $(is_bootstrap_node) ]]; then
- tstart=$(date +%s)
- while ! clustercheck; do
- sleep 5
- tnow=$(date +%s)
- if (( tnow-tstart > galera_sync_timeout )) ; then
- echo_error "ERROR galera sync timed out"
- exit 1
- fi
- done
-
- # Run all the db syncs
- # TODO: check if this can be triggered in puppet and removed from here
- ceilometer-upgrade --config-file=/etc/ceilometer/ceilometer.conf --skip-gnocchi-resource-types
- cinder-manage db sync
- glance-manage db_sync
- heat-manage --config-file /etc/heat/heat.conf db_sync
- keystone-manage db_sync
- neutron-db-manage upgrade heads
- nova-manage db sync
- nova-manage api_db sync
- nova-manage db online_data_migrations
- sahara-db-manage --config-file /etc/sahara/sahara.conf upgrade head
-fi
diff --git a/extraconfig/tasks/major_upgrade_controller_pacemaker_4.sh b/extraconfig/tasks/major_upgrade_controller_pacemaker_4.sh
deleted file mode 100755
index d2cb9553..00000000
--- a/extraconfig/tasks/major_upgrade_controller_pacemaker_4.sh
+++ /dev/null
@@ -1,17 +0,0 @@
-#!/bin/bash
-
-set -eu
-
-start_or_enable_service rabbitmq
-check_resource rabbitmq started 600
-start_or_enable_service redis
-check_resource redis started 600
-start_or_enable_service openstack-cinder-volume
-check_resource openstack-cinder-volume started 600
-
-# start httpd so keystone is available for gnocchi
-# upgrade to run.
-systemctl start httpd
-
-# Swift isn't controled by pacemaker
-systemctl_swift start
diff --git a/extraconfig/tasks/major_upgrade_controller_pacemaker_5.sh b/extraconfig/tasks/major_upgrade_controller_pacemaker_5.sh
deleted file mode 100755
index fa95f1f8..00000000
--- a/extraconfig/tasks/major_upgrade_controller_pacemaker_5.sh
+++ /dev/null
@@ -1,8 +0,0 @@
-#!/bin/bash
-
-set -eu
-
-if [[ -n $(is_bootstrap_node) ]]; then
- # run gnocchi upgrade
- gnocchi-upgrade
-fi
diff --git a/extraconfig/tasks/major_upgrade_controller_pacemaker_6.sh b/extraconfig/tasks/major_upgrade_controller_pacemaker_6.sh
deleted file mode 100755
index d569084d..00000000
--- a/extraconfig/tasks/major_upgrade_controller_pacemaker_6.sh
+++ /dev/null
@@ -1,15 +0,0 @@
-#!/bin/bash
-
-set -eu
-
-# We need to start the systemd services we explicitely stopped at step _1.sh
-# FIXME: Should we let puppet during the convergence step do the service enabling or
-# should we add it here?
-services=$(services_to_migrate)
-if [[ ${keep_sahara_services_on_upgrade} =~ [Ff]alse ]] ; then
- services=${services%%openstack-sahara*}
-fi
-for service in $services; do
- manage_systemd_service start "${service%%-clone}"
- check_resource_systemd "${service%%-clone}" started 600
-done
diff --git a/extraconfig/tasks/major_upgrade_pacemaker.yaml b/extraconfig/tasks/major_upgrade_pacemaker.yaml
deleted file mode 100644
index 74d3be71..00000000
--- a/extraconfig/tasks/major_upgrade_pacemaker.yaml
+++ /dev/null
@@ -1,175 +0,0 @@
-heat_template_version: ocata
-description: 'Upgrade for Pacemaker deployments'
-
-parameters:
- servers:
- type: json
- input_values:
- type: json
- description: input values for the software deployments
-
- UpgradeLevelNovaCompute:
- type: string
- description: Nova Compute upgrade level
- default: ''
- MySqlMajorUpgrade:
- type: string
- description: Can be auto,yes,no and influences if the major upgrade should do or detect an automatic mysql upgrade
- constraints:
- - allowed_values: ['auto', 'yes', 'no']
- default: 'auto'
- KeepSaharaServicesOnUpgrade:
- type: boolean
- default: true
- description: Whether to keep Sahara services when upgrading controller nodes from mitaka to newton
-
-
-resources:
- # TODO(jistr): for Mitaka->Newton upgrades and further we can use
- # map_merge with input_values instead of feeding params into scripts
- # via str_replace on bash snippets
-
- ControllerPacemakerUpgradeConfig_Step1:
- type: OS::Heat::SoftwareConfig
- properties:
- group: script
- config:
- list_join:
- - ''
- - - str_replace:
- template: |
- #!/bin/bash
- upgrade_level_nova_compute='UPGRADE_LEVEL_NOVA_COMPUTE'
- params:
- UPGRADE_LEVEL_NOVA_COMPUTE: {get_param: UpgradeLevelNovaCompute}
- - str_replace:
- template: |
- #!/bin/bash
- mariadb_do_major_upgrade='MYSQL_MAJOR_UPGRADE'
- params:
- MYSQL_MAJOR_UPGRADE: {get_param: MySqlMajorUpgrade}
- - get_file: pacemaker_common_functions.sh
- - get_file: major_upgrade_check.sh
- - get_file: major_upgrade_pacemaker_migrations.sh
- - get_file: major_upgrade_controller_pacemaker_1.sh
-
- ControllerPacemakerUpgradeDeployment_Step1:
- type: OS::Heat::SoftwareDeploymentGroup
- properties:
- servers: {get_param: [servers, Controller]}
- config: {get_resource: ControllerPacemakerUpgradeConfig_Step1}
- input_values: {get_param: input_values}
-
- ControllerPacemakerUpgradeConfig_Step2:
- type: OS::Heat::SoftwareConfig
- properties:
- group: script
- config:
- list_join:
- - ''
- - - str_replace:
- template: |
- #!/bin/bash
- upgrade_level_nova_compute='UPGRADE_LEVEL_NOVA_COMPUTE'
- params:
- UPGRADE_LEVEL_NOVA_COMPUTE: {get_param: UpgradeLevelNovaCompute}
- - str_replace:
- template: |
- #!/bin/bash
- mariadb_do_major_upgrade='MYSQL_MAJOR_UPGRADE'
- params:
- MYSQL_MAJOR_UPGRADE: {get_param: MySqlMajorUpgrade}
- - get_file: pacemaker_common_functions.sh
- - get_file: major_upgrade_check.sh
- - get_file: major_upgrade_pacemaker_migrations.sh
- - get_file: major_upgrade_controller_pacemaker_2.sh
-
- ControllerPacemakerUpgradeDeployment_Step2:
- type: OS::Heat::SoftwareDeploymentGroup
- depends_on: ControllerPacemakerUpgradeDeployment_Step1
- properties:
- servers: {get_param: [servers, Controller]}
- config: {get_resource: ControllerPacemakerUpgradeConfig_Step2}
- input_values: {get_param: input_values}
-
- ControllerPacemakerUpgradeConfig_Step3:
- type: OS::Heat::SoftwareConfig
- properties:
- group: script
- config:
- list_join:
- - ''
- - - get_file: pacemaker_common_functions.sh
- - get_file: major_upgrade_pacemaker_migrations.sh
- - get_file: major_upgrade_controller_pacemaker_3.sh
-
- ControllerPacemakerUpgradeDeployment_Step3:
- type: OS::Heat::SoftwareDeploymentGroup
- depends_on: ControllerPacemakerUpgradeDeployment_Step2
- properties:
- servers: {get_param: [servers, Controller]}
- config: {get_resource: ControllerPacemakerUpgradeConfig_Step3}
- input_values: {get_param: input_values}
-
- ControllerPacemakerUpgradeConfig_Step4:
- type: OS::Heat::SoftwareConfig
- properties:
- group: script
- config:
- list_join:
- - ''
- - - get_file: pacemaker_common_functions.sh
- - get_file: major_upgrade_pacemaker_migrations.sh
- - get_file: major_upgrade_controller_pacemaker_4.sh
-
- ControllerPacemakerUpgradeDeployment_Step4:
- type: OS::Heat::SoftwareDeploymentGroup
- depends_on: ControllerPacemakerUpgradeDeployment_Step3
- properties:
- servers: {get_param: [servers, Controller]}
- config: {get_resource: ControllerPacemakerUpgradeConfig_Step4}
- input_values: {get_param: input_values}
-
- ControllerPacemakerUpgradeConfig_Step5:
- type: OS::Heat::SoftwareConfig
- properties:
- group: script
- config:
- list_join:
- - ''
- - - get_file: pacemaker_common_functions.sh
- - get_file: major_upgrade_pacemaker_migrations.sh
- - get_file: major_upgrade_controller_pacemaker_5.sh
-
- ControllerPacemakerUpgradeDeployment_Step5:
- type: OS::Heat::SoftwareDeploymentGroup
- depends_on: ControllerPacemakerUpgradeDeployment_Step4
- properties:
- servers: {get_param: [servers, Controller]}
- config: {get_resource: ControllerPacemakerUpgradeConfig_Step5}
- input_values: {get_param: input_values}
-
- ControllerPacemakerUpgradeConfig_Step6:
- type: OS::Heat::SoftwareConfig
- properties:
- group: script
- config:
- list_join:
- - ''
- - - str_replace:
- template: |
- #!/bin/bash
- keep_sahara_services_on_upgrade='KEEP_SAHARA_SERVICES_ON_UPGRADE'
- params:
- KEEP_SAHARA_SERVICES_ON_UPGRADE: {get_param: KeepSaharaServicesOnUpgrade}
- - get_file: pacemaker_common_functions.sh
- - get_file: major_upgrade_pacemaker_migrations.sh
- - get_file: major_upgrade_controller_pacemaker_6.sh
-
- ControllerPacemakerUpgradeDeployment_Step6:
- type: OS::Heat::SoftwareDeploymentGroup
- depends_on: ControllerPacemakerUpgradeDeployment_Step5
- properties:
- servers: {get_param: [servers, Controller]}
- config: {get_resource: ControllerPacemakerUpgradeConfig_Step6}
- input_values: {get_param: input_values}
diff --git a/extraconfig/tasks/major_upgrade_pacemaker_migrations.sh b/extraconfig/tasks/major_upgrade_pacemaker_migrations.sh
deleted file mode 100644
index ae22a1e7..00000000
--- a/extraconfig/tasks/major_upgrade_pacemaker_migrations.sh
+++ /dev/null
@@ -1,200 +0,0 @@
-#!/bin/bash
-
-# Special pieces of upgrade migration logic go into this
-# file. E.g. Pacemaker cluster transitions for existing deployments,
-# matching changes to overcloud_controller_pacemaker.pp (Puppet
-# handles deployment, this file handles migrations).
-#
-# This file shouldn't execute any action on its own, all logic should
-# be wrapped into bash functions. Upgrade scripts will source this
-# file and call the functions defined in this file where appropriate.
-#
-# The migration functions should be idempotent. If the migration has
-# been already applied, it should be possible to call the function
-# again without damaging the deployment or failing the upgrade.
-
-# If the major version of mysql is going to change after the major
-# upgrade, the database must be upgraded on disk to avoid failures
-# due to internal incompatibilities between major mysql versions
-# https://bugs.launchpad.net/tripleo/+bug/1587449
-# This function detects whether a database upgrade is required
-# after a mysql package upgrade. It returns 0 when no major upgrade
-# has to take place, 1 otherwise.
-function is_mysql_upgrade_needed {
- # The name of the package which provides mysql might differ
- # after the upgrade. Consider the generic package name, which
- # should capture the major version change (e.g. 5.5 -> 10.1)
- local name="mariadb"
- local output
- local ret
- set +e
- output=$(yum -q check-update $name)
- ret=$?
- set -e
- if [ $ret -ne 100 ]; then
- # no updates so we exit
- echo "0"
- return
- fi
-
- local currentepoch=$(rpm -q --qf "%{epoch}" $name)
- local currentversion=$(rpm -q --qf "%{version}" $name | cut -d. -f-2)
- local currentrelease=$(rpm -q --qf "%{release}" $name)
- local newoutput=$(repoquery -a --pkgnarrow=updates --qf "%{epoch} %{version} %{release}\n" $name)
- local newepoch=$(echo "$newoutput" | awk '{ print $1 }')
- local newversion=$(echo "$newoutput" | awk '{ print $2 }' | cut -d. -f-2)
- local newrelease=$(echo "$newoutput" | awk '{ print $3 }')
-
- # With this we trigger the dump restore/path if we change either epoch or
- # version in the package If only the release tag changes we do not do it
- # FIXME: we could refine this by trying to parse the mariadb version
- # into X.Y.Z and trigger the update only if X and/or Y change.
- output=$(python -c "import rpm; rc = rpm.labelCompare((\"$currentepoch\", \"$currentversion\", None), (\"$newepoch\", \"$newversion\", None)); print rc")
- if [ "$output" != "-1" ]; then
- echo "0"
- return
- fi
- echo "1"
-}
-
-# This function returns the list of services to be migrated away from pacemaker
-# and to systemd. The reason to have these services in a separate function is because
-# this list is needed in three different places: major_upgrade_controller_pacemaker_{1,2}
-# and in the function to migrate the cluster from full HA to HA NG
-function services_to_migrate {
- # The following PCMK resources the ones the we are going to delete
- PCMK_RESOURCE_TODELETE="
- httpd-clone
- memcached-clone
- mongod-clone
- neutron-dhcp-agent-clone
- neutron-l3-agent-clone
- neutron-metadata-agent-clone
- neutron-netns-cleanup-clone
- neutron-openvswitch-agent-clone
- neutron-ovs-cleanup-clone
- neutron-server-clone
- openstack-aodh-evaluator-clone
- openstack-aodh-listener-clone
- openstack-aodh-notifier-clone
- openstack-ceilometer-central-clone
- openstack-ceilometer-collector-clone
- openstack-ceilometer-notification-clone
- openstack-cinder-api-clone
- openstack-cinder-scheduler-clone
- openstack-glance-api-clone
- openstack-gnocchi-metricd-clone
- openstack-gnocchi-statsd-clone
- openstack-heat-api-cfn-clone
- openstack-heat-api-clone
- openstack-heat-api-cloudwatch-clone
- openstack-heat-engine-clone
- openstack-nova-api-clone
- openstack-nova-conductor-clone
- openstack-nova-consoleauth-clone
- openstack-nova-novncproxy-clone
- openstack-nova-scheduler-clone
- openstack-sahara-api-clone
- openstack-sahara-engine-clone
- "
- echo $PCMK_RESOURCE_TODELETE
-}
-
-# This function will migrate a mitaka system where all the resources are managed
-# via pacemaker to a newton setup where only a few services will be managed by pacemaker
-# On a high-level it will operate as follows:
-# 1. Set the cluster in maintenance-mode so no start/stop action will actually take place
-# during the conversion
-# 2. Remove all the colocation constraints and then the ordering constraints, except the
-# ones related to haproxy/VIPs which exist in Newton as well
-# 3. Take the cluster out of maintenance-mode
-# 4. Remove all the resources that won't be managed by pacemaker in newton. The
-# outcome will be
-# that they are stopped and removed from pacemakers control
-# 5. Do a resource cleanup to make sure the cluster is in a clean state
-function migrate_full_to_ng_ha {
- if [[ -n $(pcmk_running) ]]; then
- pcs property set maintenance-mode=true
-
- # First we go through all the colocation constraints (except the ones
- # we want to keep, i.e. the haproxy/ip ones) and we remove those
- COL_CONSTRAINTS=$(pcs config show | sed -n '/^Colocation Constraints:$/,/^$/p' | grep -v "Colocation Constraints:" | egrep -v "ip-.*haproxy" | awk '{print $NF}' | cut -f2 -d: |cut -f1 -d\))
- for constraint in $COL_CONSTRAINTS; do
- log_debug "Deleting colocation constraint $constraint from CIB"
- pcs constraint remove "$constraint"
- done
-
- # Now we kill all the ordering constraints (except the haproxy/ip ones)
- ORD_CONSTRAINTS=$(pcs config show | sed -n '/^Ordering Constraints:/,/^Colocation Constraints:$/p' | grep -v "Ordering Constraints:" | awk '{print $NF}' | cut -f2 -d: |cut -f1 -d\))
- for constraint in $ORD_CONSTRAINTS; do
- log_debug "Deleting ordering constraint $constraint from CIB"
- pcs constraint remove "$constraint"
- done
- # At this stage all the pacemaker resources are removed from the CIB.
- # Once we remove the maintenance-mode those systemd resources will keep
- # on running. They shall be systemd enabled via the puppet converge
- # step later on
- pcs property set maintenance-mode=false
-
- # At this stage there are no constraints whatsoever except the haproxy/ip ones
- # which we want to keep. We now disable and then delete each resource
- # that will move to systemd.
- # We want the systemd resources be stopped before doing "yum update",
- # that way "systemctl try-restart <service>" is no-op because the
- # service was down already
- PCS_STATUS_OUTPUT="$(pcs status)"
- for resource in $(services_to_migrate) "delay-clone" "openstack-core-clone"; do
- if echo "$PCS_STATUS_OUTPUT" | grep "$resource"; then
- log_debug "Deleting $resource from the CIB"
- if ! pcs resource disable "$resource" --wait=600; then
- echo_error "ERROR: resource $resource failed to be disabled"
- exit 1
- fi
- pcs resource delete --force "$resource"
- else
- log_debug "Service $resource not found as a pacemaker resource, not trying to delete."
- fi
- done
-
- # We need to do a pcs resource cleanup here + crm_resource --wait to
- # make sure the cluster is in a clean state before we stop everything,
- # upgrade and restart everything
- pcs resource cleanup
- # We are making sure here that the cluster is stable before proceeding
- if ! timeout -k 10 600 crm_resource --wait; then
- echo_error "ERROR: cluster remained unstable after resource cleanup for more than 600 seconds, exiting."
- exit 1
- fi
- fi
-}
-
-function disable_standalone_ceilometer_api {
- if [[ -n $(is_bootstrap_node) ]]; then
- if [[ -n $(is_pacemaker_managed openstack-ceilometer-api) ]]; then
- # Disable pacemaker resources for ceilometer-api
- manage_pacemaker_service disable openstack-ceilometer-api
- check_resource_pacemaker openstack-ceilometer-api stopped 600
- pcs resource delete openstack-ceilometer-api --wait=600
- fi
- fi
-}
-
-
-# This function will make sure that the rabbitmq ha policies are converted from mitaka to newton
-# In newton we had: Attributes: set_policy="ha-all ^(?!amq\.).* {"ha-mode":"all"}"
-# In ocata we want: Attributes: set_policy="ha-all ^(?!amq\.).* {"ha-mode":"exactly","ha-params":2}"
-# The nr "2" should be CEIL(N/2) where N is the number of Controllers (i.e. rabbit instances)
-# Note that changing an attribute like this makes the rabbitmq resource restart
-function rabbitmq_newton_ocata_upgrade {
- if pcs resource show rabbitmq-clone | grep -q -E "Attributes:.*\"ha-mode\":\"all\""; then
- # Number of controller is obtained by counting how many hostnames we
- # have in controller_node_names hiera key
- nr_controllers=$(($(hiera controller_node_names | grep -o "," |wc -l) + 1))
- nr_queues=$(($nr_controllers / 2 + ($nr_controllers % 2)))
- if ! [ $nr_queues -gt 0 -a $nr_queues -le $nr_controllers ]; then
- echo_error "ERROR: The nr. of HA queues during the M/N upgrade is out of range $nr_queues"
- exit 1
- fi
- pcs resource update rabbitmq set_policy='ha-all ^(?!amq\\.).* {"ha-mode":"exactly","ha-params":'"$nr_queues}" --wait=600
- fi
-}
diff --git a/extraconfig/tasks/mitaka_to_newton_aodh_data_migration.yaml b/extraconfig/tasks/mitaka_to_newton_aodh_data_migration.yaml
deleted file mode 100644
index 45933fb7..00000000
--- a/extraconfig/tasks/mitaka_to_newton_aodh_data_migration.yaml
+++ /dev/null
@@ -1,25 +0,0 @@
-heat_template_version: ocata
-
-description: >
- Software-config for performing aodh data migration
-
-parameters:
- servers:
- type: json
- input_values:
- type: json
- description: input values for the software deployments
-resources:
-
- AodhMysqlMigrationScriptConfig:
- type: OS::Heat::SoftwareConfig
- properties:
- group: script
- config: {get_file: aodh_data_migration.sh}
-
- AodhMysqlMigrationScriptDeployment:
- type: OS::Heat::SoftwareDeploymentGroup
- properties:
- servers: {get_param: [servers, Controller]}
- config: {get_resource: AodhMysqlMigrationScriptConfig}
- input_values: {get_param: input_values}
diff --git a/extraconfig/tasks/mitaka_to_newton_ceilometer_wsgi_upgrade.pp b/extraconfig/tasks/mitaka_to_newton_ceilometer_wsgi_upgrade.pp
deleted file mode 100644
index a8d43663..00000000
--- a/extraconfig/tasks/mitaka_to_newton_ceilometer_wsgi_upgrade.pp
+++ /dev/null
@@ -1,103 +0,0 @@
-# Copyright 2015 Red Hat, Inc.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This puppet manifest is to be used only during a Mitaka->Newton upgrade
-# It configures ceilometer to be run under httpd but it makes sure to not
-# restart any services. This snippet needs to be called before init as a
-# pre upgrade migration.
-
-Service <|
- tag == 'ceilometer-service'
-|> {
- hasrestart => true,
- restart => '/bin/true',
- start => '/bin/true',
- stop => '/bin/true',
-}
-
-if $::hostname == downcase(hiera('bootstrap_nodeid')) {
- $pacemaker_master = true
- $sync_db = true
-} else {
- $pacemaker_master = false
- $sync_db = false
-}
-
-include ::tripleo::packages
-
-
-if str2bool(hiera('mongodb::server::ipv6', false)) {
- $mongo_node_ips_with_port_prefixed = prefix(hiera('mongodb_node_ips'), '[')
- $mongo_node_ips_with_port = suffix($mongo_node_ips_with_port_prefixed, ']:27017')
-} else {
- $mongo_node_ips_with_port = suffix(hiera('mongodb_node_ips'), ':27017')
-}
-$mongodb_replset = hiera('mongodb::server::replset')
-$mongo_node_string = join($mongo_node_ips_with_port, ',')
-$database_connection = "mongodb://${mongo_node_string}/ceilometer?replicaSet=${mongodb_replset}"
-
-$rabbit_hosts = hiera('rabbitmq_node_ips', undef)
-$rabbit_port = hiera('ceilometer::rabbit_port', 5672)
-$rabbit_endpoints = suffix(any2array(normalize_ip_for_uri($rabbit_hosts)), ":${rabbit_port}")
-
-class { '::ceilometer' :
- rabbit_hosts => $rabbit_endpoints,
-}
-
-class {'::ceilometer::db':
- database_connection => $database_connection,
-}
-
-if $sync_db {
- include ::ceilometer::db::sync
-}
-
-include ::ceilometer::config
-
-class { '::ceilometer::api':
- enabled => true,
- service_name => 'httpd',
- keystone_password => hiera('ceilometer::keystone::auth::password'),
- identity_uri => hiera('ceilometer::keystone::authtoken::auth_url'),
- auth_uri => hiera('ceilometer::keystone::authtoken::auth_uri'),
- keystone_tenant => hiera('ceilometer::keystone::authtoken::project_name'),
-}
-
-class { '::apache' :
- service_enable => false,
- service_manage => true,
- service_restart => '/bin/true',
- purge_configs => false,
- purge_vhost_dir => false,
-}
-
-# To ensure existing ports are not overridden
-class { '::aodh::wsgi::apache':
- servername => $::hostname,
- ssl => false,
-}
-class { '::gnocchi::wsgi::apache':
- servername => $::hostname,
- ssl => false,
-}
-
-class { '::keystone::wsgi::apache':
- servername => $::hostname,
- ssl => false,
-}
-class { '::ceilometer::wsgi::apache':
- servername => $::hostname,
- ssl => false,
-}
diff --git a/extraconfig/tasks/yum_update.sh b/extraconfig/tasks/yum_update.sh
index 4c87373e..3bf72f14 100755
--- a/extraconfig/tasks/yum_update.sh
+++ b/extraconfig/tasks/yum_update.sh
@@ -97,17 +97,6 @@ return_code=$?
echo "$result"
echo "yum return code: $return_code"
-# Writes any changes caused by alterations to os-net-config and bounces the
-# interfaces *before* restarting the cluster.
-os-net-config -c /etc/os-net-config/config.json -v --detailed-exit-codes
-RETVAL=$?
-if [[ $RETVAL == 2 ]]; then
- echo "os-net-config: interface configuration files updated successfully"
-elif [[ $RETVAL != 0 ]]; then
- echo "ERROR: os-net-config configuration failed"
- exit $RETVAL
-fi
-
if [[ "$pacemaker_status" == "active" ]] ; then
echo "Starting cluster node"
pcs cluster start
diff --git a/overcloud-resource-registry-puppet.j2.yaml b/overcloud-resource-registry-puppet.j2.yaml
index 7a18c117..d9eaf8df 100644
--- a/overcloud-resource-registry-puppet.j2.yaml
+++ b/overcloud-resource-registry-puppet.j2.yaml
@@ -145,6 +145,7 @@ resource_registry:
OS::TripleO::Services::Kernel: puppet/services/kernel.yaml
OS::TripleO::Services::MySQL: puppet/services/database/mysql.yaml
OS::TripleO::Services::MySQLTLS: OS::Heat::None
+ OS::TripleO::Services::NeutronBgpvpnApi: OS::Heat::None
OS::TripleO::Services::NeutronDhcpAgent: puppet/services/neutron-dhcp.yaml
OS::TripleO::Services::NeutronL3Agent: puppet/services/neutron-l3.yaml
OS::TripleO::Services::NeutronMetadataAgent: puppet/services/neutron-metadata.yaml
@@ -251,6 +252,7 @@ resource_registry:
OS::TripleO::Services::MySQLClient: puppet/services/database/mysql-client.yaml
OS::TripleO::Services::Vpp: OS::Heat::None
OS::TripleO::Services::Docker: OS::Heat::None
+ OS::TripleO::Services::CertmongerUser: OS::Heat::None
parameter_defaults:
EnablePackageInstall: false
diff --git a/puppet/puppet-steps.j2 b/puppet/puppet-steps.j2
index 581c4f0d..86af6114 100644
--- a/puppet/puppet-steps.j2
+++ b/puppet/puppet-steps.j2
@@ -42,7 +42,7 @@
{{role.name}}Deployment_Step{{step}}:
type: OS::Heat::StructuredDeploymentGroup
{% if step == 1 %}
- depends_on: [{{role.name}}PreConfig, {{role.name}}ArtifactsDeploy]
+ depends_on: [{{role.name}}PrePuppet, {{role.name}}PreConfig, {{role.name}}ArtifactsDeploy]
{% else %}
depends_on:
{% for dep in roles %}
diff --git a/puppet/services/aodh-base.yaml b/puppet/services/aodh-base.yaml
index c2c2d023..48a2aecd 100644
--- a/puppet/services/aodh-base.yaml
+++ b/puppet/services/aodh-base.yaml
@@ -77,8 +77,10 @@ outputs:
aodh::rabbit_use_ssl: {get_param: RabbitClientUseSSL}
aodh::rabbit_port: {get_param: RabbitClientPort}
aodh::keystone::authtoken::project_name: 'service'
+ aodh::keystone::authtoken::user_domain_name: 'Default'
+ aodh::keystone::authtoken::project_domain_name: 'Default'
aodh::keystone::authtoken::password: {get_param: AodhPassword}
- aodh::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri] }
+ aodh::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix] }
aodh::keystone::authtoken::auth_url: { get_param: [EndpointMap, KeystoneInternal, uri_no_suffix] }
aodh::auth::auth_password: {get_param: AodhPassword}
aodh::auth::auth_region: 'regionOne'
diff --git a/puppet/services/ceilometer-base.yaml b/puppet/services/ceilometer-base.yaml
index 874c6893..a9c84289 100644
--- a/puppet/services/ceilometer-base.yaml
+++ b/puppet/services/ceilometer-base.yaml
@@ -98,14 +98,18 @@ outputs:
# we include db_sync class in puppet-tripleo
ceilometer::db::sync_db: false
ceilometer::keystone::authtoken::project_name: 'service'
+ ceilometer::keystone::authtoken::user_domain_name: 'Default'
+ ceilometer::keystone::authtoken::project_domain_name: 'Default'
ceilometer::keystone::authtoken::password: {get_param: CeilometerPassword}
- ceilometer::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri] }
+ ceilometer::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix] }
ceilometer::keystone::authtoken::auth_url: { get_param: [EndpointMap, KeystoneInternal, uri_no_suffix] }
ceilometer::agent::auth::auth_password: {get_param: CeilometerPassword}
ceilometer::agent::auth::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix] }
ceilometer::agent::notification::event_pipeline_publishers: {get_param: EventPipelinePublishers}
ceilometer::agent::auth::auth_region: {get_param: KeystoneRegion}
ceilometer::agent::auth::auth_tenant_name: 'service'
+ ceilometer::agent::auth::auth_user_domain_name: 'Default'
+ ceilometer::agent::auth::auth_project_domain_name: 'Default'
ceilometer::agent::auth::auth_endpoint_type: 'internalURL'
ceilometer::collector::meter_dispatcher: {get_param: CeilometerMeterDispatcher}
ceilometer::collector::event_dispatcher: {get_param: CeilometerEventDispatcher}
diff --git a/puppet/services/certmonger-user.yaml b/puppet/services/certmonger-user.yaml
new file mode 100644
index 00000000..af9802b0
--- /dev/null
+++ b/puppet/services/certmonger-user.yaml
@@ -0,0 +1,28 @@
+heat_template_version: ocata
+
+description: >
+ Requests certificates using certmonger through Puppet
+
+parameters:
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+
+outputs:
+ role_data:
+ description: Role data for the certmonger-user service
+ value:
+ service_name: certmonger_user
+ step_config: |
+ include ::tripleo::profile::base::certmonger_user
diff --git a/puppet/services/cinder-api.yaml b/puppet/services/cinder-api.yaml
index 49a5f613..958b0e7d 100644
--- a/puppet/services/cinder-api.yaml
+++ b/puppet/services/cinder-api.yaml
@@ -80,10 +80,12 @@ outputs:
map_merge:
- get_attr: [CinderBase, role_data, config_settings]
- get_attr: [ApacheServiceBase, role_data, config_settings]
- - cinder::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri]}
+ - cinder::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
cinder::keystone::authtoken::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
cinder::keystone::authtoken::password: {get_param: CinderPassword}
cinder::keystone::authtoken::project_name: 'service'
+ cinder::keystone::authtoken::user_domain_name: 'Default'
+ cinder::keystone::authtoken::project_domain_name: 'Default'
cinder::api::enable_proxy_headers_parsing: true
cinder::api::nova_catalog_info: 'compute:nova:internalURL'
@@ -167,7 +169,7 @@ outputs:
- name: Stop cinder_api service (running under httpd)
tags: step1
service: name=httpd state=stopped
- when: "cinder_apache.rc == 0"
+ when: cinder_apache.rc == 0
- name: Stop and disable cinder_api service (pre-upgrade not under httpd)
tags: step1
when: cinder_api_enabled.rc == 0
diff --git a/puppet/services/cinder-backend-scaleio.yaml b/puppet/services/cinder-backend-scaleio.yaml
index eb709cd5..c4e4aa3d 100644
--- a/puppet/services/cinder-backend-scaleio.yaml
+++ b/puppet/services/cinder-backend-scaleio.yaml
@@ -106,6 +106,6 @@ outputs:
cinder::backend::scaleio::sio_round_volume_capacity: {get_param: CinderScaleIORoundVolumeCapacity}
cinder::backend::scaleio::sio_unmap_volume_before_deletion: {get_param: CinderScaleIOUnmapVolumeBeforeDeletion}
cinder::backend::scaleio::sio_max_over_subscription_ratio: {get_param: CinderScaleIOMaxOverSubscriptionRatio}
- cinder::backend::scaleio::sio_thin_provision: {get_param: CinderScaleIOThinProvision}
+ cinder::backend::scaleio::sio_thin_provision: {get_param: CinderScaleIOSanThinProvision}
step_config: |
include ::tripleo::profile::base::cinder::volume
diff --git a/puppet/services/congress.yaml b/puppet/services/congress.yaml
index 8bc9f2e3..fd1ee24b 100644
--- a/puppet/services/congress.yaml
+++ b/puppet/services/congress.yaml
@@ -74,8 +74,10 @@ outputs:
congress::server::bind_host: {get_param: [ServiceNetMap, CongressApiNetwork]}
congress::keystone::authtoken::project_name: 'service'
- congress::keystone::authtoken::auth_url: {get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix]}
- congress::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri]}
+ congress::keystone::authtoken::user_domain_name: 'Default'
+ congress::keystone::authtoken::project_domain_name: 'Default'
+ congress::keystone::authtoken::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
+ congress::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
congress::db::mysql::password: {get_param: CongressPassword}
congress::db::mysql::user: congress
diff --git a/puppet/services/database/redis-base.yaml b/puppet/services/database/redis-base.yaml
index 2b7dd430..af89ffb1 100644
--- a/puppet/services/database/redis-base.yaml
+++ b/puppet/services/database/redis-base.yaml
@@ -42,3 +42,4 @@ outputs:
redis::sentinel::master_name: "%{hiera('bootstrap_nodeid')}"
redis::sentinel::redis_host: "%{hiera('bootstrap_nodeid_ip')}"
redis::sentinel::notification_script: '/usr/local/bin/redis-notifications.sh'
+ redis::sentinel::sentinel_bind: {get_param: [ServiceNetMap, RedisNetwork]}
diff --git a/puppet/services/etcd.yaml b/puppet/services/etcd.yaml
index 7cdd8451..5db8bec0 100644
--- a/puppet/services/etcd.yaml
+++ b/puppet/services/etcd.yaml
@@ -19,9 +19,9 @@ parameters:
via parameter_defaults in the resource registry.
type: json
EtcdInitialClusterToken:
- default: 'etcd-tripleo'
description: Initial cluster token for the etcd cluster during bootstrap.
type: string
+ hidden: true
MonitoringSubscriptionEtcd:
default: 'overcloud-etcd'
type: string
diff --git a/puppet/services/glance-api.yaml b/puppet/services/glance-api.yaml
index ce389dc1..b06f9993 100644
--- a/puppet/services/glance-api.yaml
+++ b/puppet/services/glance-api.yaml
@@ -48,6 +48,68 @@ parameters:
EnableInternalTLS:
type: boolean
default: false
+ CephClientUserName:
+ default: openstack
+ type: string
+ Debug:
+ default: ''
+ description: Set to True to enable debugging on all services.
+ type: string
+ GlanceNotifierStrategy:
+ description: Strategy to use for Glance notification queue
+ type: string
+ default: noop
+ GlanceLogFile:
+ description: The filepath of the file to use for logging messages from Glance.
+ type: string
+ default: ''
+ GlanceBackend:
+ default: swift
+ description: The short name of the Glance backend to use. Should be one
+ of swift, rbd, or file
+ type: string
+ constraints:
+ - allowed_values: ['swift', 'file', 'rbd']
+ GlanceNfsEnabled:
+ default: false
+ description: >
+ When using GlanceBackend 'file', mount NFS share for image storage.
+ type: boolean
+ GlanceNfsShare:
+ default: ''
+ description: >
+ NFS share to mount for image storage (when GlanceNfsEnabled is true)
+ type: string
+ GlanceNfsOptions:
+ default: 'intr,context=system_u:object_r:glance_var_lib_t:s0'
+ description: >
+ NFS mount options for image storage (when GlanceNfsEnabled is true)
+ type: string
+ GlanceRbdPoolName:
+ default: images
+ type: string
+ RabbitPassword:
+ description: The password for RabbitMQ
+ type: string
+ hidden: true
+ RabbitUserName:
+ default: guest
+ description: The username for RabbitMQ
+ type: string
+ RabbitClientPort:
+ default: 5672
+ description: Set rabbit subscriber port, change this if using SSL
+ type: number
+ RabbitClientUseSSL:
+ default: false
+ description: >
+ Rabbit client subscriber parameter to specify
+ an SSL connection to the RabbitMQ host.
+ type: string
+ KeystoneRegion:
+ type: string
+ default: 'regionOne'
+ description: Keystone region for endpoint
conditions:
use_tls_proxy: {equals : [{get_param: EnableInternalTLS}, true]}
@@ -62,13 +124,6 @@ resources:
EndpointMap: {get_param: EndpointMap}
EnableInternalTLS: {get_param: EnableInternalTLS}
- GlanceBase:
- type: ./glance-base.yaml
- properties:
- ServiceNetMap: {get_param: ServiceNetMap}
- DefaultPasswords: {get_param: DefaultPasswords}
- EndpointMap: {get_param: EndpointMap}
-
outputs:
role_data:
description: Role data for the Glance API role.
@@ -80,7 +135,6 @@ outputs:
- glance
config_settings:
map_merge:
- - get_attr: [GlanceBase, role_data, config_settings]
- get_attr: [TLSProxyBase, role_data, config_settings]
- glance::api::database_connection:
list_join:
@@ -132,10 +186,41 @@ outputs:
- use_tls_proxy
- 'localhost'
- {get_param: [ServiceNetMap, GlanceApiNetwork]}
+ glance_notifier_strategy: {get_param: GlanceNotifierStrategy}
+ glance_log_file: {get_param: GlanceLogFile}
+ glance::backend::swift::swift_store_auth_address: {get_param: [EndpointMap, KeystoneInternal, uri] }
+ glance::backend::swift::swift_store_user: service:glance
+ glance::backend::swift::swift_store_key: {get_param: GlancePassword}
+ glance::backend::swift::swift_store_create_container_on_put: true
+ glance::backend::rbd::rbd_store_pool: {get_param: GlanceRbdPoolName}
+ glance::backend::rbd::rbd_store_user: {get_param: CephClientUserName}
+ glance_backend: {get_param: GlanceBackend}
+ glance::notify::rabbitmq::rabbit_userid: {get_param: RabbitUserName}
+ glance::notify::rabbitmq::rabbit_port: {get_param: RabbitClientPort}
+ glance::notify::rabbitmq::rabbit_password: {get_param: RabbitPassword}
+ glance::notify::rabbitmq::rabbit_use_ssl: {get_param: RabbitClientUseSSL}
+ glance::notify::rabbitmq::notification_driver: messagingv2
+ tripleo::profile::base::glance::api::glance_nfs_enabled: {get_param: GlanceNfsEnabled}
+ tripleo::glance::nfs_mount::share: {get_param: GlanceNfsShare}
+ tripleo::glance::nfs_mount::options: {get_param: GlanceNfsOptions}
+ service_config_settings:
+ keystone:
+ glance::keystone::auth::public_url: {get_param: [EndpointMap, GlancePublic, uri]}
+ glance::keystone::auth::internal_url: {get_param: [EndpointMap, GlanceInternal, uri]}
+ glance::keystone::auth::admin_url: {get_param: [EndpointMap, GlanceAdmin, uri]}
+ glance::keystone::auth::password: {get_param: GlancePassword }
+ glance::keystone::auth::region: {get_param: KeystoneRegion}
+ glance::keystone::auth::tenant: 'service'
+ mysql:
+ glance::db::mysql::password: {get_param: GlancePassword}
+ glance::db::mysql::user: glance
+ glance::db::mysql::host: {get_param: [EndpointMap, MysqlInternal, host_nobrackets]}
+ glance::db::mysql::dbname: glance
+ glance::db::mysql::allowed_hosts:
+ - '%'
+ - "%{hiera('mysql_bind_host')}"
step_config: |
include ::tripleo::profile::base::glance::api
- service_config_settings:
- get_attr: [GlanceBase, role_data, service_config_settings]
upgrade_tasks:
- name: Check if glance_api is deployed
command: systemctl is-enabled openstack-glance-api
diff --git a/puppet/services/glance-base.yaml b/puppet/services/glance-base.yaml
deleted file mode 100644
index f5548982..00000000
--- a/puppet/services/glance-base.yaml
+++ /dev/null
@@ -1,126 +0,0 @@
-heat_template_version: ocata
-
-description: >
- OpenStack Glance Common settings with Puppet
-
-parameters:
- ServiceNetMap:
- default: {}
- description: Mapping of service_name -> network name. Typically set
- via parameter_defaults in the resource registry. This
- mapping overrides those in ServiceNetMapDefaults.
- type: json
- DefaultPasswords:
- default: {}
- type: json
- EndpointMap:
- default: {}
- description: Mapping of service endpoint -> protocol. Typically set
- via parameter_defaults in the resource registry.
- type: json
- CephClientUserName:
- default: openstack
- type: string
- Debug:
- default: ''
- description: Set to True to enable debugging on all services.
- type: string
- GlanceNotifierStrategy:
- description: Strategy to use for Glance notification queue
- type: string
- default: noop
- GlanceLogFile:
- description: The filepath of the file to use for logging messages from Glance.
- type: string
- default: ''
- GlancePassword:
- description: The password for the glance service and db account, used by the glance services.
- type: string
- hidden: true
- GlanceBackend:
- default: swift
- description: The short name of the Glance backend to use. Should be one
- of swift, rbd, or file
- type: string
- constraints:
- - allowed_values: ['swift', 'file', 'rbd']
- GlanceNfsEnabled:
- default: false
- description: >
- When using GlanceBackend 'file', mount NFS share for image storage.
- type: boolean
- GlanceNfsShare:
- default: ''
- description: >
- NFS share to mount for image storage (when GlanceNfsEnabled is true)
- type: string
- GlanceNfsOptions:
- default: 'intr,context=system_u:object_r:glance_var_lib_t:s0'
- description: >
- NFS mount options for image storage (when GlanceNfsEnabled is true)
- type: string
- GlanceRbdPoolName:
- default: images
- type: string
- RabbitPassword:
- description: The password for RabbitMQ
- type: string
- hidden: true
- RabbitUserName:
- default: guest
- description: The username for RabbitMQ
- type: string
- RabbitClientPort:
- default: 5672
- description: Set rabbit subscriber port, change this if using SSL
- type: number
- RabbitClientUseSSL:
- default: false
- description: >
- Rabbit client subscriber parameter to specify
- an SSL connection to the RabbitMQ host.
- type: string
- KeystoneRegion:
- type: string
- default: 'regionOne'
- description: Keystone region for endpoint
-
-outputs:
- role_data:
- description: Role data for the Glance common role.
- value:
- service_name: glance_base
- config_settings:
- glance_notifier_strategy: {get_param: GlanceNotifierStrategy}
- glance_log_file: {get_param: GlanceLogFile}
- glance::backend::swift::swift_store_auth_address: {get_param: [EndpointMap, KeystoneInternal, uri] }
- glance::backend::swift::swift_store_user: service:glance
- glance::backend::swift::swift_store_key: {get_param: GlancePassword}
- glance::backend::swift::swift_store_create_container_on_put: true
- glance::backend::rbd::rbd_store_pool: {get_param: GlanceRbdPoolName}
- glance::backend::rbd::rbd_store_user: {get_param: CephClientUserName}
- glance_backend: {get_param: GlanceBackend}
- glance::notify::rabbitmq::rabbit_userid: {get_param: RabbitUserName}
- glance::notify::rabbitmq::rabbit_port: {get_param: RabbitClientPort}
- glance::notify::rabbitmq::rabbit_password: {get_param: RabbitPassword}
- glance::notify::rabbitmq::rabbit_use_ssl: {get_param: RabbitClientUseSSL}
- glance::notify::rabbitmq::notification_driver: messagingv2
- tripleo::profile::base::glance::api::glance_nfs_enabled: {get_param: GlanceNfsEnabled}
- tripleo::glance::nfs_mount::share: {get_param: GlanceNfsShare}
- tripleo::glance::nfs_mount::options: {get_param: GlanceNfsOptions}
- service_config_settings:
- keystone:
- glance::keystone::auth::public_url: {get_param: [EndpointMap, GlancePublic, uri]}
- glance::keystone::auth::internal_url: {get_param: [EndpointMap, GlanceInternal, uri]}
- glance::keystone::auth::admin_url: {get_param: [EndpointMap, GlanceAdmin, uri]}
- glance::keystone::auth::password: {get_param: GlancePassword }
- glance::keystone::auth::region: {get_param: KeystoneRegion}
- glance::keystone::auth::tenant: 'service'
- mysql:
- glance::db::mysql::password: {get_param: GlancePassword}
- glance::db::mysql::user: glance
- glance::db::mysql::host: {get_param: [EndpointMap, MysqlInternal, host_nobrackets]}
- glance::db::mysql::dbname: glance
- glance::db::mysql::allowed_hosts:
- - '%'
- - "%{hiera('mysql_bind_host')}"
diff --git a/puppet/services/gnocchi-api.yaml b/puppet/services/gnocchi-api.yaml
index 08a939a6..f4629917 100644
--- a/puppet/services/gnocchi-api.yaml
+++ b/puppet/services/gnocchi-api.yaml
@@ -83,10 +83,12 @@ outputs:
gnocchi::api::enabled: true
gnocchi::api::enable_proxy_headers_parsing: true
gnocchi::api::service_name: 'httpd'
- gnocchi::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri]}
+ gnocchi::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
gnocchi::keystone::authtoken::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
gnocchi::keystone::authtoken::password: {get_param: GnocchiPassword}
gnocchi::keystone::authtoken::project_name: 'service'
+ gnocchi::keystone::authtoken::user_domain_name: 'Default'
+ gnocchi::keystone::authtoken::project_domain_name: 'Default'
gnocchi::wsgi::apache::ssl: {get_param: EnableInternalTLS}
gnocchi::wsgi::apache::servername:
str_replace:
@@ -103,10 +105,6 @@ outputs:
# internal_api_subnet - > IP/CIDR
gnocchi::wsgi::apache::bind_host: {get_param: [ServiceNetMap, GnocchiApiNetwork]}
gnocchi::wsgi::apache::wsgi_process_display_name: 'gnocchi_wsgi'
-
- gnocchi::api::keystone_auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri]}
- gnocchi::api::keystone_identity_uri: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
- gnocchi::storage::swift::swift_authurl: {get_param: [EndpointMap, KeystoneInternal, uri]}
step_config: |
include ::tripleo::profile::base::gnocchi::api
service_config_settings:
diff --git a/puppet/services/gnocchi-base.yaml b/puppet/services/gnocchi-base.yaml
index c6310056..d7555561 100644
--- a/puppet/services/gnocchi-base.yaml
+++ b/puppet/services/gnocchi-base.yaml
@@ -70,8 +70,9 @@ outputs:
- '?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo'
gnocchi::db::sync::extra_opts: '--skip-storage'
gnocchi::storage::swift::swift_user: 'service:gnocchi'
- gnocchi::storage::swift::swift_auth_version: 2
+ gnocchi::storage::swift::swift_auth_version: 3
gnocchi::storage::swift::swift_key: {get_param: GnocchiPassword}
+ gnocchi::storage::swift::swift_authurl: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
gnocchi::storage::ceph::ceph_pool: {get_param: GnocchiRbdPoolName}
gnocchi::storage::ceph::ceph_username: {get_param: CephClientUserName}
gnocchi::storage::ceph::ceph_keyring:
diff --git a/puppet/services/heat-api-cfn.yaml b/puppet/services/heat-api-cfn.yaml
index 483f0a45..c4d44853 100644
--- a/puppet/services/heat-api-cfn.yaml
+++ b/puppet/services/heat-api-cfn.yaml
@@ -38,8 +38,23 @@ parameters:
default:
tag: openstack.heat.api.cfn
path: /var/log/heat/heat-api-cfn.log
+ EnableInternalTLS:
+ type: boolean
+ default: false
+
+conditions:
+ heat_workers_zero: {equals : [{get_param: HeatWorkers}, 0]}
resources:
+
+ ApacheServiceBase:
+ type: ./apache.yaml
+ properties:
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+ EndpointMap: {get_param: EndpointMap}
+ EnableInternalTLS: {get_param: EnableInternalTLS}
+
HeatBase:
type: ./heat-base.yaml
properties:
@@ -59,19 +74,32 @@ outputs:
config_settings:
map_merge:
- get_attr: [HeatBase, role_data, config_settings]
- - heat::api_cfn::workers: {get_param: HeatWorkers}
- tripleo.heat_api_cfn.firewall_rules:
+ - get_attr: [ApacheServiceBase, role_data, config_settings]
+ - tripleo.heat_api_cfn.firewall_rules:
'125 heat_cfn':
dport:
- 8000
- 13800
- # NOTE: bind IP is found in Heat replacing the network name with the
- # local node IP for the given network; replacement examples
- # (eg. for internal_api):
+ heat::api_cfn::bind_host: {get_param: [ServiceNetMap, HeatApiCfnNetwork]}
+ heat::wsgi::apache_api_cfn::ssl: {get_param: EnableInternalTLS}
+ heat::api_cfn::service_name: 'httpd'
+ # NOTE: bind IP is found in Heat replacing the network name with the local node IP
+ # for the given network; replacement examples (eg. for internal_api):
# internal_api -> IP
# internal_api_uri -> [IP]
# internal_api_subnet - > IP/CIDR
- heat::api_cfn::bind_host: {get_param: [ServiceNetMap, HeatApiNetwork]}
+ heat::wsgi::apache_api_cfn::bind_host: {get_param: [ServiceNetMap, HeatApiCfnNetwork]}
+ heat::wsgi::apache_api_cfn::servername:
+ str_replace:
+ template:
+ "%{hiera('fqdn_$NETWORK')}"
+ params:
+ $NETWORK: {get_param: [ServiceNetMap, HeatApiCfnNetwork]}
+ -
+ if:
+ - heat_workers_zero
+ - {}
+ - heat::wsgi::apache_api_cfn::workers: {get_param: HeatWorkers}
step_config: |
include ::tripleo::profile::base::heat::api_cfn
service_config_settings:
@@ -94,7 +122,16 @@ outputs:
shell: /usr/bin/systemctl show 'openstack-heat-api-cfn' --property ActiveState | grep '\bactive\b'
when: heat_api_cfn_enabled.rc == 0
tags: step0,validation
- - name: Stop heat_api_cfn service
+ - name: check for heat_api_cfn running under apache (post upgrade)
tags: step1
- when: heat_api_cfn_enabled.rc == 0
- service: name=openstack-heat-api-cfn state=stopped
+ shell: "httpd -t -D DUMP_VHOSTS | grep -q heat_api_cfn_wsgi"
+ register: heat_api_cfn_apache
+ ignore_errors: true
+ - name: Stop heat_api_cfn service (running under httpd)
+ tags: step1
+ service: name=httpd state=stopped
+ when: heat_api_cfn_apache.rc == 0
+ - name: Stop and disable heat_api_cfn service (pre-upgrade not under httpd)
+ tags: step1
+ when: heat_api_cfn_apache.rc == 0
+ service: name=openstack-heat-api-cfn state=stopped enabled=no
diff --git a/puppet/services/heat-api-cloudwatch.yaml b/puppet/services/heat-api-cloudwatch.yaml
index 8879bcb2..7f8fa1fe 100644
--- a/puppet/services/heat-api-cloudwatch.yaml
+++ b/puppet/services/heat-api-cloudwatch.yaml
@@ -30,8 +30,23 @@ parameters:
default:
tag: openstack.heat.api.cloudwatch
path: /var/log/heat/heat-api-cloudwatch.log
+ EnableInternalTLS:
+ type: boolean
+ default: false
+
+conditions:
+ heat_workers_zero: {equals : [{get_param: HeatWorkers}, 0]}
resources:
+
+ ApacheServiceBase:
+ type: ./apache.yaml
+ properties:
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+ EndpointMap: {get_param: EndpointMap}
+ EnableInternalTLS: {get_param: EnableInternalTLS}
+
HeatBase:
type: ./heat-base.yaml
properties:
@@ -51,19 +66,34 @@ outputs:
config_settings:
map_merge:
- get_attr: [HeatBase, role_data, config_settings]
- - heat::api_cloudwatch::workers: {get_param: HeatWorkers}
- tripleo.heat_api_cloudwatch.firewall_rules:
+ - get_attr: [ApacheServiceBase, role_data, config_settings]
+ - tripleo.heat_api_cloudwatch.firewall_rules:
'125 heat_cloudwatch':
dport:
- 8003
- 13003
- # NOTE: bind IP is found in Heat replacing the network name with the
- # local node IP for the given network; replacement examples
- # (eg. for internal_api):
+ heat::api_cloudwatch::bind_host:
+ get_param: [ServiceNetMap, HeatApiCloudwatchNetwork]
+ heat::wsgi::apache_api_cloudwatch::ssl: {get_param: EnableInternalTLS}
+ heat::api_cloudwatch::service_name: 'httpd'
+ # NOTE: bind IP is found in Heat replacing the network name with the local node IP
+ # for the given network; replacement examples (eg. for internal_api):
# internal_api -> IP
# internal_api_uri -> [IP]
# internal_api_subnet - > IP/CIDR
- heat::api_cloudwatch::bind_host: {get_param: [ServiceNetMap, HeatApiNetwork]}
+ heat::wsgi::apache_api_cloudwatch::bind_host:
+ get_param: [ServiceNetMap, HeatApiCloudwatchNetwork]
+ heat::wsgi::apache_api_cloudwatch::servername:
+ str_replace:
+ template:
+ "%{hiera('fqdn_$NETWORK')}"
+ params:
+ $NETWORK: {get_param: [ServiceNetMap, HeatApiCloudwatchNetwork]}
+ -
+ if:
+ - heat_workers_zero
+ - {}
+ - heat::wsgi::apache_api_cloudwatch::workers: {get_param: HeatWorkers}
step_config: |
include ::tripleo::profile::base::heat::api_cloudwatch
upgrade_tasks:
@@ -76,7 +106,16 @@ outputs:
shell: /usr/bin/systemctl show 'openstack-heat-api-cloudwatch' --property ActiveState | grep '\bactive\b'
when: heat_api_cloudwatch_enabled.rc == 0
tags: step0,validation
- - name: Stop heat_api_cloudwatch service
+ - name: check for heat_api_cloudwatch running under apache (post upgrade)
+ tags: step1
+ shell: "httpd -t -D DUMP_VHOSTS | grep -q heat_api_cloudwatch_wsgi"
+ register: heat_api_cloudwatch_apache
+ ignore_errors: true
+ - name: Stop heat_api_cloudwatch service (running under httpd)
+ tags: step1
+ service: name=httpd state=stopped
+ when: heat_api_cloudwatch_apache.rc == 0
+ - name: Stop and disable heat_api_cloudwatch service (pre-upgrade not under httpd)
tags: step1
when: heat_api_cloudwatch_enabled.rc == 0
- service: name=openstack-heat-api-cloudwatch state=stopped
+ service: name=openstack-heat-api-cloudwatch state=stopped enabled=no
diff --git a/puppet/services/heat-api.yaml b/puppet/services/heat-api.yaml
index 2464011b..e21369e8 100644
--- a/puppet/services/heat-api.yaml
+++ b/puppet/services/heat-api.yaml
@@ -38,8 +38,23 @@ parameters:
default:
tag: openstack.heat.api
path: /var/log/heat/heat-api.log
+ EnableInternalTLS:
+ type: boolean
+ default: false
+
+conditions:
+ heat_workers_zero: {equals : [{get_param: HeatWorkers}, 0]}
resources:
+
+ ApacheServiceBase:
+ type: ./apache.yaml
+ properties:
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+ EndpointMap: {get_param: EndpointMap}
+ EnableInternalTLS: {get_param: EnableInternalTLS}
+
HeatBase:
type: ./heat-base.yaml
properties:
@@ -59,19 +74,32 @@ outputs:
config_settings:
map_merge:
- get_attr: [HeatBase, role_data, config_settings]
- - heat::api::workers: {get_param: HeatWorkers}
- tripleo.heat_api.firewall_rules:
+ - get_attr: [ApacheServiceBase, role_data, config_settings]
+ - tripleo.heat_api.firewall_rules:
'125 heat_api':
dport:
- 8004
- 13004
- # NOTE: bind IP is found in Heat replacing the network name with the
- # local node IP for the given network; replacement examples
- # (eg. for internal_api):
+ heat::api::bind_host: {get_param: [ServiceNetMap, HeatApiNetwork]}
+ heat::wsgi::apache_api::ssl: {get_param: EnableInternalTLS}
+ heat::api::service_name: 'httpd'
+ # NOTE: bind IP is found in Heat replacing the network name with the local node IP
+ # for the given network; replacement examples (eg. for internal_api):
# internal_api -> IP
# internal_api_uri -> [IP]
# internal_api_subnet - > IP/CIDR
- heat::api::bind_host: {get_param: [ServiceNetMap, HeatApiNetwork]}
+ heat::wsgi::apache_api::bind_host: {get_param: [ServiceNetMap, HeatApiNetwork]}
+ heat::wsgi::apache_api::servername:
+ str_replace:
+ template:
+ "%{hiera('fqdn_$NETWORK')}"
+ params:
+ $NETWORK: {get_param: [ServiceNetMap, HeatApiNetwork]}
+ -
+ if:
+ - heat_workers_zero
+ - {}
+ - heat::wsgi::apache_api::workers: {get_param: HeatWorkers}
step_config: |
include ::tripleo::profile::base::heat::api
service_config_settings:
@@ -94,7 +122,16 @@ outputs:
shell: /usr/bin/systemctl show 'openstack-heat-api' --property ActiveState | grep '\bactive\b'
when: heat_api_enabled.rc == 0
tags: step0,validation
- - name: Stop heat_api service
+ - name: check for heat_api running under apache (post upgrade)
+ tags: step1
+ shell: "httpd -t -D DUMP_VHOSTS | grep -q heat_api_wsgi"
+ register: heat_api_apache
+ ignore_errors: true
+ - name: Stop heat_api service (running under httpd)
+ tags: step1
+ service: name=httpd state=stopped
+ when: heat_api_apache.rc == 0
+ - name: Stop and disable heat_api service (pre-upgrade not under httpd)
tags: step1
when: heat_api_enabled.rc == 0
- service: name=openstack-heat-api state=stopped
+ service: name=openstack-heat-api state=stopped enabled=no
diff --git a/puppet/services/heat-base.yaml b/puppet/services/heat-base.yaml
index e83a9edd..6ada9c25 100644
--- a/puppet/services/heat-base.yaml
+++ b/puppet/services/heat-base.yaml
@@ -125,7 +125,9 @@ outputs:
value: 'role:admin'
heat::rabbit_heartbeat_timeout_threshold: 60
heat::keystone::authtoken::project_name: 'service'
- heat::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri] }
+ heat::keystone::authtoken::user_domain_name: 'Default'
+ heat::keystone::authtoken::project_domain_name: 'Default'
+ heat::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix] }
heat::keystone::authtoken::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix] }
heat::keystone::authtoken::password: {get_param: HeatPassword}
heat::keystone::domain::domain_name: 'heat_stack'
diff --git a/puppet/services/horizon.yaml b/puppet/services/horizon.yaml
index 60b009a8..7ae518b5 100644
--- a/puppet/services/horizon.yaml
+++ b/puppet/services/horizon.yaml
@@ -78,7 +78,7 @@ outputs:
access_log_format: '%a %l %u %t \"%r\" %>s %b \"%%{}{Referer}i\" \"%%{}{User-Agent}i\"'
options: ['FollowSymLinks','MultiViews']
horizon::bind_address: {get_param: [ServiceNetMap, HorizonNetwork]}
- horizon::keystone_url: {get_param: [EndpointMap, KeystoneInternal, uri]}
+ horizon::keystone_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
horizon::password_validator: {get_param: [HorizonPasswordValidator]}
horizon::password_validator_help: {get_param: [HorizonPasswordValidatorHelp]}
horizon::secret_key:
diff --git a/puppet/services/ironic-api.yaml b/puppet/services/ironic-api.yaml
index 7aab6f8d..e24d0de6 100644
--- a/puppet/services/ironic-api.yaml
+++ b/puppet/services/ironic-api.yaml
@@ -49,8 +49,10 @@ outputs:
- get_attr: [IronicBase, role_data, config_settings]
- ironic::api::authtoken::password: {get_param: IronicPassword}
ironic::api::authtoken::project_name: 'service'
+ ironic::api::authtoken::user_domain_name: 'Default'
+ ironic::api::authtoken::project_domain_name: 'Default'
ironic::api::authtoken::username: 'ironic'
- ironic::api::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri] }
+ ironic::api::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix] }
ironic::api::authtoken::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
# NOTE: bind IP is found in Heat replacing the network name with the
# local node IP for the given network; replacement examples
diff --git a/puppet/services/ironic-conductor.yaml b/puppet/services/ironic-conductor.yaml
index f9547bef..56e1a90b 100644
--- a/puppet/services/ironic-conductor.yaml
+++ b/puppet/services/ironic-conductor.yaml
@@ -44,6 +44,10 @@ parameters:
default: 8088
description: Port to use for serving images when iPXE is used.
type: string
+ IronicPassword:
+ description: The password for the Ironic service and db account, used by the Ironic services
+ type: string
+ hidden: true
MonitoringSubscriptionIronicConductor:
default: 'overcloud-ironic-conductor'
type: string
@@ -65,9 +69,7 @@ outputs:
config_settings:
map_merge:
- get_attr: [IronicBase, role_data, config_settings]
- # FIXME: I have no idea why neutron_url is in "api" manifest
- - ironic::api::neutron_url: {get_param: [EndpointMap, NeutronInternal, uri]}
- ironic::conductor::api_url: {get_param: [EndpointMap, IronicInternal, uri_no_suffix]}
+ - ironic::conductor::api_url: {get_param: [EndpointMap, IronicInternal, uri_no_suffix]}
ironic::conductor::cleaning_disk_erase: {get_param: IronicCleaningDiskErase}
ironic::conductor::cleaning_network: {get_param: IronicCleaningNetwork}
ironic::conductor::enabled_drivers: {get_param: IronicEnabledDrivers}
@@ -104,7 +106,40 @@ outputs:
# the VIP, but rather a real IP of the host.
ironic::my_ip: {get_param: [ServiceNetMap, IronicNetwork]}
ironic::pxe::common::http_port: {get_param: IronicIPXEPort}
-
+ # Credentials to access other services
+ ironic::glance::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
+ ironic::glance::username: 'ironic'
+ ironic::glance::password: {get_param: IronicPassword}
+ ironic::glance::project_name: 'service'
+ ironic::glance::user_domain_name: 'Default'
+ ironic::glance::project_domain_name: 'Default'
+ ironic::neutron::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
+ ironic::neutron::username: 'ironic'
+ ironic::neutron::password: {get_param: IronicPassword}
+ ironic::neutron::project_name: 'service'
+ ironic::neutron::user_domain_name: 'Default'
+ ironic::neutron::project_domain_name: 'Default'
+ ironic::service_catalog::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
+ ironic::service_catalog::username: 'ironic'
+ ironic::service_catalog::password: {get_param: IronicPassword}
+ ironic::service_catalog::project_name: 'service'
+ ironic::service_catalog::user_domain_name: 'Default'
+ ironic::service_catalog::project_domain_name: 'Default'
+ ironic::swift::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
+ ironic::swift::username: 'ironic'
+ ironic::swift::password: {get_param: IronicPassword}
+ ironic::swift::project_name: 'service'
+ ironic::swift::user_domain_name: 'Default'
+ ironic::swift::project_domain_name: 'Default'
+ # ironic-inspector support is not implemented, but let's configure
+ # the credentials for consistency.
+ ironic::drivers::inspector::enabled: false
+ ironic::drivers::inspector::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
+ ironic::drivers::inspector::username: 'ironic'
+ ironic::drivers::inspector::password: {get_param: IronicPassword}
+ ironic::drivers::inspector::project_name: 'service'
+ ironic::drivers::inspector::user_domain_name: 'Default'
+ ironic::drivers::inspector::project_domain_name: 'Default'
step_config: |
include ::tripleo::profile::base::ironic::conductor
upgrade_tasks:
diff --git a/puppet/services/keystone.yaml b/puppet/services/keystone.yaml
index f9a15391..17616867 100644
--- a/puppet/services/keystone.yaml
+++ b/puppet/services/keystone.yaml
@@ -35,7 +35,7 @@ parameters:
KeystoneTokenProvider:
description: The keystone token format
type: string
- default: 'uuid'
+ default: 'fernet'
constraints:
- allowed_values: ['uuid', 'fernet']
ServiceNetMap:
diff --git a/puppet/services/manila-api.yaml b/puppet/services/manila-api.yaml
index 7b78c82e..4061ca28 100644
--- a/puppet/services/manila-api.yaml
+++ b/puppet/services/manila-api.yaml
@@ -48,9 +48,11 @@ outputs:
map_merge:
- get_attr: [ManilaBase, role_data, config_settings]
- manila::keystone::authtoken::password: {get_param: ManilaPassword}
- manila::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri]}
+ manila::keystone::authtoken::auth_uri: { get_param: [EndpointMap, KeystoneInternal, uri_no_suffix] }
manila::keystone::authtoken::auth_url: { get_param: [EndpointMap, KeystoneInternal, uri_no_suffix] }
manila::keystone::authtoken::project_name: 'service'
+ manila::keystone::authtoken::user_domain_name: 'Default'
+ manila::keystone::authtoken::project_domain_name: 'Default'
tripleo.manila_api.firewall_rules:
'150 manila':
dport:
diff --git a/puppet/services/monitoring/sensu-base.yaml b/puppet/services/monitoring/sensu-base.yaml
index a8303a59..2fa1569c 100644
--- a/puppet/services/monitoring/sensu-base.yaml
+++ b/puppet/services/monitoring/sensu-base.yaml
@@ -29,7 +29,18 @@ parameters:
default: false
description: >
RabbitMQ client subscriber parameter to specify an SSL connection
- to the RabbitMQ host.
+ to the RabbitMQ host. Set MonitoringRabbitUseSSL to true without
+ specifying a private key or cert chain to use SSL transport,
+ but not cert auth.
+ type: string
+ MonitoringRabbitSSLPrivateKey:
+ default: ''
+ description: Private key to be used by Sensu to connect to RabbitMQ host.
+ type: string
+ MonitoringRabbitSSLCertChain:
+ default: ''
+ description: >
+ Private SSL cert chain to be used by Sensu to connect to RabbitMQ host.
type: string
MonitoringRabbitPassword:
description: The RabbitMQ password used for monitoring purposes.
@@ -71,6 +82,8 @@ outputs:
sensu::rabbitmq_password: {get_param: MonitoringRabbitPassword}
sensu::rabbitmq_port: {get_param: MonitoringRabbitPort}
sensu::rabbitmq_ssl: {get_param: MonitoringRabbitUseSSL}
+ sensu::rabbitmq_ssl_private_key: {get_param: MonitoringRabbitSSLPrivateKey}
+ sensu::rabbitmq_ssl_cert_chain: {get_param: MonitoringRabbitSSLCertChain}
sensu::rabbitmq_user: {get_param: MonitoringRabbitUserName}
sensu::rabbitmq_vhost: {get_param: MonitoringRabbitVhost}
sensu::redact: {get_param: SensuRedactVariables}
diff --git a/puppet/services/neutron-api.yaml b/puppet/services/neutron-api.yaml
index bb191ff0..bb102c08 100644
--- a/puppet/services/neutron-api.yaml
+++ b/puppet/services/neutron-api.yaml
@@ -128,18 +128,20 @@ outputs:
- {get_param: [EndpointMap, MysqlInternal, host]}
- '/ovs_neutron'
- '?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo'
- neutron::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri] }
+ neutron::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix] }
neutron::keystone::authtoken::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
neutron::server::api_workers: {get_param: NeutronWorkers}
neutron::server::rpc_workers: {get_param: NeutronWorkers}
neutron::server::allow_automatic_l3agent_failover: {get_param: NeutronAllowL3AgentFailover}
neutron::server::enable_proxy_headers_parsing: true
neutron::keystone::authtoken::password: {get_param: NeutronPassword}
- neutron::server::notifications::auth_url: { get_param: [ EndpointMap, KeystoneV3Admin, uri ] }
+ neutron::server::notifications::auth_url: { get_param: [ EndpointMap, KeystoneInternal, uri_no_suffix ] }
neutron::server::notifications::tenant_name: 'service'
neutron::server::notifications::project_name: 'service'
neutron::server::notifications::password: {get_param: NovaPassword}
neutron::keystone::authtoken::project_name: 'service'
+ neutron::keystone::authtoken::user_domain_name: 'Default'
+ neutron::keystone::authtoken::project_domain_name: 'Default'
neutron::server::sync_db: true
tripleo.neutron_api.firewall_rules:
'114 neutron api':
diff --git a/puppet/services/neutron-bgpvpn-api.yaml b/puppet/services/neutron-bgpvpn-api.yaml
new file mode 100644
index 00000000..f01cf6f1
--- /dev/null
+++ b/puppet/services/neutron-bgpvpn-api.yaml
@@ -0,0 +1,34 @@
+heat_template_version: ocata
+
+description: >
+ BGPVPN API service configured with Puppet
+
+parameters:
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ BgpvpnServiceProvider:
+ default: 'BGPVPN:Dummy:networking_bgpvpn.neutron.services.service_drivers.driver_api.BGPVPNDriver:default'
+ description: Backend to use as a service provider for BGPVPN
+ type: string
+
+outputs:
+ role_data:
+ description: Role data for the BGPVPN role.
+ value:
+ service_name: neutron_bgpvpn_api
+ config_settings:
+ neutron::services::bgpvpn::service_providers: {get_param: BgpvpnServiceProvider}
+ step_config: |
+ include ::tripleo::profile::base::neutron::bgpvpn
diff --git a/puppet/services/nova-api.yaml b/puppet/services/nova-api.yaml
index f27b53f2..473c24b4 100644
--- a/puppet/services/nova-api.yaml
+++ b/puppet/services/nova-api.yaml
@@ -110,8 +110,10 @@ outputs:
- 13774
- 8775
nova::keystone::authtoken::project_name: 'service'
+ nova::keystone::authtoken::user_domain_name: 'Default'
+ nova::keystone::authtoken::project_domain_name: 'Default'
nova::keystone::authtoken::password: {get_param: NovaPassword}
- nova::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri] }
+ nova::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix] }
nova::keystone::authtoken::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
nova::api::enabled: true
nova::api::default_floating_pool: {get_param: NovaDefaultFloatingPool}
diff --git a/puppet/services/nova-ironic.yaml b/puppet/services/nova-ironic.yaml
index 5eb2170a..843f44c5 100644
--- a/puppet/services/nova-ironic.yaml
+++ b/puppet/services/nova-ironic.yaml
@@ -44,7 +44,7 @@ outputs:
nova::compute::vnc_enabled: false
nova::ironic::common::password: {get_param: IronicPassword}
nova::ironic::common::project_name: 'service'
- nova::ironic::common::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri]}
+ nova::ironic::common::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
nova::ironic::common::username: 'ironic'
nova::ironic::common::api_endpoint: {get_param: [EndpointMap, IronicInternal, uri]}
nova::network::neutron::dhcp_domain: ''
diff --git a/puppet/services/octavia-base.yaml b/puppet/services/octavia-base.yaml
index b537a2bc..a3f616ff 100644
--- a/puppet/services/octavia-base.yaml
+++ b/puppet/services/octavia-base.yaml
@@ -56,7 +56,7 @@ outputs:
octavia::debug: {get_param: Debug}
octavia::purge_config: {get_param: EnableConfigPurge}
octavia::rabbit_use_ssl: {get_param: RabbitClientUseSSL}
- tripleo::profile::base::octavia::rabbit_user: {get_param: RabbitUserName}
- tripleo::profile::base::octavia::rabbit_password: {get_param: RabbitPassword}
- tripleo::profile::base::octavia::rabbit_port: {get_param: RabbitClientPort}
+ octavia::rabbit_userid: {get_param: RabbitUserName}
+ octavia::rabbit_password: {get_param: RabbitPassword}
+ octavia::rabbit_port: {get_param: RabbitClientPort}
diff --git a/puppet/services/opendaylight-api.yaml b/puppet/services/opendaylight-api.yaml
index ceb56a81..6882aeff 100644
--- a/puppet/services/opendaylight-api.yaml
+++ b/puppet/services/opendaylight-api.yaml
@@ -28,7 +28,7 @@ parameters:
OpenDaylightFeatures:
description: List of features to install with ODL
type: comma_delimited_list
- default: ["odl-netvirt-openstack","odl-netvirt-ui"]
+ default: ["odl-netvirt-openstack","odl-netvirt-ui","odl-jolokia"]
OpenDaylightApiVirtualIP:
type: string
default: ''
@@ -59,13 +59,14 @@ outputs:
opendaylight::extra_features: {get_param: OpenDaylightFeatures}
opendaylight::enable_dhcp: {get_param: OpenDaylightEnableDHCP}
opendaylight::odl_bind_ip: {get_param: [ServiceNetMap, OpendaylightApiNetwork]}
- opendaylight::nb_connection_protocol: {get_param: OpenDayLightConnectionProtocol}
+ opendaylight::nb_connection_protocol: {get_param: OpenDaylightConnectionProtocol}
tripleo.opendaylight_api.firewall_rules:
'137 opendaylight api':
dport:
- {get_param: OpenDaylightPort}
- 6640
- 6653
+ - 2550
step_config: |
include tripleo::profile::base::neutron::opendaylight
upgrade_tasks:
diff --git a/puppet/services/pacemaker.yaml b/puppet/services/pacemaker.yaml
index 5be58c18..762d0092 100644
--- a/puppet/services/pacemaker.yaml
+++ b/puppet/services/pacemaker.yaml
@@ -90,7 +90,7 @@ parameters:
PacemakerResources:
type: comma_delimited_list
description: List of resources managed by pacemaker
- default: ['rabbitmq','haproxy']
+ default: ['rabbitmq','haproxy','galera']
outputs:
role_data:
@@ -143,5 +143,7 @@ outputs:
pacemaker_cluster: state=online
- name: Check pacemaker resource
tags: step4
- pacemaker_resource: state=started resource={{item}} check_mode=true wait_for_resource=true timeout=500
+ pacemaker_is_active:
+ resource: "{{ item }}"
+ max_wait: 500
with_items: {get_param: PacemakerResources}
diff --git a/puppet/services/panko-base.yaml b/puppet/services/panko-base.yaml
index 998e64ee..fda13450 100644
--- a/puppet/services/panko-base.yaml
+++ b/puppet/services/panko-base.yaml
@@ -50,8 +50,10 @@ outputs:
panko::debug: {get_param: Debug}
panko::auth::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix] }
panko::keystone::authtoken::project_name: 'service'
+ panko::keystone::authtoken::user_domain_name: 'Default'
+ panko::keystone::authtoken::project_domain_name: 'Default'
panko::keystone::authtoken::password: {get_param: PankoPassword}
- panko::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri] }
+ panko::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix] }
panko::keystone::authtoken::auth_url: { get_param: [EndpointMap, KeystoneInternal, uri_no_suffix] }
panko::auth::auth_password: {get_param: PankoPassword}
panko::auth::auth_region: 'regionOne'
diff --git a/puppet/services/tacker.yaml b/puppet/services/tacker.yaml
index 6f92066e..a4c139b5 100644
--- a/puppet/services/tacker.yaml
+++ b/puppet/services/tacker.yaml
@@ -75,8 +75,10 @@ outputs:
tacker::server::bind_host: {get_param: [ServiceNetMap, TackerApiNetwork]}
tacker::keystone::authtoken::project_name: 'service'
- tacker::keystone::authtoken::auth_url: {get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix]}
- tacker::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri]}
+ tacker::keystone::authtoken::user_domain_name: 'Default'
+ tacker::keystone::authtoken::project_domain_name: 'Default'
+ tacker::keystone::authtoken::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
+ tacker::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
tacker::db::mysql::password: {get_param: TackerPassword}
tacker::db::mysql::user: tacker
diff --git a/releasenotes/notes/Switch-keystone's-default-token-provider-to-fernet-2542fccb5a588852.yaml b/releasenotes/notes/Switch-keystone's-default-token-provider-to-fernet-2542fccb5a588852.yaml
new file mode 100644
index 00000000..50b8167e
--- /dev/null
+++ b/releasenotes/notes/Switch-keystone's-default-token-provider-to-fernet-2542fccb5a588852.yaml
@@ -0,0 +1,6 @@
+---
+features:
+ - Keystone's default token provider is now fernet instead of UUID
+upgrade:
+ - When upgrading, old tokens will not work anymore due to the provider
+ changing from UUID to fernet.
diff --git a/releasenotes/notes/add-bgpvpn-support-f60c5a9cee0bb393.yaml b/releasenotes/notes/add-bgpvpn-support-f60c5a9cee0bb393.yaml
new file mode 100644
index 00000000..2af6aa72
--- /dev/null
+++ b/releasenotes/notes/add-bgpvpn-support-f60c5a9cee0bb393.yaml
@@ -0,0 +1,3 @@
+---
+features:
+ - Add support for BGPVPN Neutron service plugin
diff --git a/releasenotes/notes/add-opendaylight-ha-e46ef46e29689dde.yaml b/releasenotes/notes/add-opendaylight-ha-e46ef46e29689dde.yaml
new file mode 100644
index 00000000..882ee4e5
--- /dev/null
+++ b/releasenotes/notes/add-opendaylight-ha-e46ef46e29689dde.yaml
@@ -0,0 +1,5 @@
+---
+features:
+ - Adds support for OpenDaylight HA clustering. Now when specifying
+ three or more ODL roles, ODL will be deployed in a cluster, and
+ use port 2550 for cluster communication.
diff --git a/releasenotes/notes/add-parameters-for-heat-apis-over-httpd-df83ab04d9f9ebb2.yaml b/releasenotes/notes/add-parameters-for-heat-apis-over-httpd-df83ab04d9f9ebb2.yaml
new file mode 100644
index 00000000..b3a62ced
--- /dev/null
+++ b/releasenotes/notes/add-parameters-for-heat-apis-over-httpd-df83ab04d9f9ebb2.yaml
@@ -0,0 +1,6 @@
+---
+features:
+ - The relevant parameters have been added to deploy the heat APIs over httpd.
+ This means that the HeatWorkers now affect httpd instead of the heat API
+ themselves, and that the apache hieradata will also be deployed in the
+ nodes where the heat APIs run.
diff --git a/releasenotes/notes/deprecate-NeutronExternalNetworkBridge-7d42f1a0718da327.yaml b/releasenotes/notes/deprecate-NeutronExternalNetworkBridge-7d42f1a0718da327.yaml
new file mode 100644
index 00000000..09067296
--- /dev/null
+++ b/releasenotes/notes/deprecate-NeutronExternalNetworkBridge-7d42f1a0718da327.yaml
@@ -0,0 +1,10 @@
+---
+upgrade:
+ - The ``NeutronExternalNetworkBridge`` parameter changed its default value
+ from ``br-ex`` to an empty string value. It means that by default Neutron
+ L3 agent will be able to serve multiple external networks. (It was always
+ the case for those who were using templates with the value of the parameter
+ overridden by an empty string value.)
+deprecations:
+ - The ``NeutronExternalNetworkBridge`` parameter is deprecated and will be
+ removed in a next release.
diff --git a/releasenotes/notes/etcdtoken-4c46bdfac940acda.yaml b/releasenotes/notes/etcdtoken-4c46bdfac940acda.yaml
new file mode 100644
index 00000000..da995949
--- /dev/null
+++ b/releasenotes/notes/etcdtoken-4c46bdfac940acda.yaml
@@ -0,0 +1,6 @@
+---
+security:
+ - |
+ Secure EtcdInitialClusterToken by removing the default value
+ and make the parameter hidden.
+ Fixes `bug 1673266 <https://bugs.launchpad.net/tripleo/+bug/1673266>`__.
diff --git a/releasenotes/notes/gnocchi-keystonev3-d288ba40226545c9.yaml b/releasenotes/notes/gnocchi-keystonev3-d288ba40226545c9.yaml
new file mode 100644
index 00000000..2f2513c9
--- /dev/null
+++ b/releasenotes/notes/gnocchi-keystonev3-d288ba40226545c9.yaml
@@ -0,0 +1,4 @@
+---
+features:
+ - Deploy Gnocchi with Keystone v3 endpoints and make
+ sure it doesn't rely on Keystone v2 anymore.
diff --git a/releasenotes/notes/ha-by-default-55326e699ee8602c.yaml b/releasenotes/notes/ha-by-default-55326e699ee8602c.yaml
deleted file mode 100644
index edcc1250..00000000
--- a/releasenotes/notes/ha-by-default-55326e699ee8602c.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
----
-deprecations:
- - The environments/puppet-pacemaker.yaml file is now deprecated and the HA
- deployment is now the default. In order to get the non-HA deployment use
- environments/nonha-arch.yaml explicitly.
diff --git a/roles_data.yaml b/roles_data.yaml
index 1fddf72f..130451ff 100644
--- a/roles_data.yaml
+++ b/roles_data.yaml
@@ -33,6 +33,7 @@
CountDefault: 1
ServicesDefault:
- OS::TripleO::Services::CACerts
+ - OS::TripleO::Services::CertmongerUser
- OS::TripleO::Services::CephMds
- OS::TripleO::Services::CephMon
- OS::TripleO::Services::CephExternal
@@ -52,6 +53,7 @@
- OS::TripleO::Services::HeatEngine
- OS::TripleO::Services::MySQL
- OS::TripleO::Services::MySQLClient
+ - OS::TripleO::Services::NeutronBgpvpnApi
- OS::TripleO::Services::NeutronDhcpAgent
- OS::TripleO::Services::NeutronL3Agent
- OS::TripleO::Services::NeutronMetadataAgent
@@ -134,6 +136,7 @@
disable_upgrade_deployment: True
ServicesDefault:
- OS::TripleO::Services::CACerts
+ - OS::TripleO::Services::CertmongerUser
- OS::TripleO::Services::CephClient
- OS::TripleO::Services::CephExternal
- OS::TripleO::Services::Timezone
@@ -162,6 +165,7 @@
- name: BlockStorage
ServicesDefault:
- OS::TripleO::Services::CACerts
+ - OS::TripleO::Services::CertmongerUser
- OS::TripleO::Services::BlockStorageCinderVolume
- OS::TripleO::Services::Kernel
- OS::TripleO::Services::Ntp
@@ -180,6 +184,7 @@
disable_upgrade_deployment: True
ServicesDefault:
- OS::TripleO::Services::CACerts
+ - OS::TripleO::Services::CertmongerUser
- OS::TripleO::Services::Kernel
- OS::TripleO::Services::Ntp
- OS::TripleO::Services::SwiftStorage
@@ -198,6 +203,7 @@
- name: CephStorage
ServicesDefault:
- OS::TripleO::Services::CACerts
+ - OS::TripleO::Services::CertmongerUser
- OS::TripleO::Services::CephOSD
- OS::TripleO::Services::Kernel
- OS::TripleO::Services::Ntp
diff --git a/roles_data_undercloud.yaml b/roles_data_undercloud.yaml
index 5070ef38..8e830711 100644
--- a/roles_data_undercloud.yaml
+++ b/roles_data_undercloud.yaml
@@ -34,3 +34,11 @@
- OS::TripleO::Services::NeutronCorePlugin
- OS::TripleO::Services::NeutronOvsAgent
- OS::TripleO::Services::NeutronDhcpAgent
+ - OS::TripleO::Services::AodhApi
+ - OS::TripleO::Services::AodhEvaluator
+ - OS::TripleO::Services::AodhNotifier
+ - OS::TripleO::Services::AodhListener
+ - OS::TripleO::Services::GnocchiApi
+ - OS::TripleO::Services::GnocchiMetricd
+ - OS::TripleO::Services::GnocchiStatsd
+ - OS::TripleO::Services::PankoApi
diff --git a/tools/yaml-validate.py b/tools/yaml-validate.py
index 32987cb2..5ff6f134 100755
--- a/tools/yaml-validate.py
+++ b/tools/yaml-validate.py
@@ -23,6 +23,14 @@ envs_containing_endpoint_map = ['tls-endpoints-public-dns.yaml',
'tls-endpoints-public-ip.yaml',
'tls-everywhere-endpoints-dns.yaml']
ENDPOINT_MAP_FILE = 'endpoint_map.yaml'
+REQUIRED_DOCKER_SECTIONS = ['service_name', 'docker_config', 'puppet_config',
+ 'config_settings', 'step_config']
+OPTIONAL_DOCKER_SECTIONS = ['docker_puppet_tasks', 'upgrade_tasks',
+ 'service_config_settings', 'host_prep_tasks',
+ 'metadata_settings', 'kolla_config']
+DOCKER_PUPPET_CONFIG_SECTIONS = ['config_volume', 'puppet_tags', 'step_config',
+ 'config_image']
+
def exit_usage():
print('Usage %s <yaml file or directory>' % sys.argv[0])
@@ -69,6 +77,7 @@ def validate_hci_compute_services_default(env_filename, env_tpl):
return 1
return 0
+
def validate_mysql_connection(settings):
no_op = lambda *args: False
error_status = [0]
@@ -109,6 +118,55 @@ def validate_mysql_connection(settings):
return error_status[0]
+def validate_docker_service(filename, tpl):
+ if 'outputs' in tpl and 'role_data' in tpl['outputs']:
+ if 'value' not in tpl['outputs']['role_data']:
+ print('ERROR: invalid role_data for filename: %s'
+ % filename)
+ return 1
+ role_data = tpl['outputs']['role_data']['value']
+
+ for section_name in REQUIRED_DOCKER_SECTIONS:
+ if section_name not in role_data:
+ print('ERROR: %s is required in role_data for %s.'
+ % (section_name, filename))
+ return 1
+
+ for section_name in role_data.keys():
+ if section_name in REQUIRED_DOCKER_SECTIONS:
+ continue
+ else:
+ if section_name in OPTIONAL_DOCKER_SECTIONS:
+ continue
+ else:
+ print('ERROR: %s is extra in role_data for %s.'
+ % (section_name, filename))
+ return 1
+
+ if 'puppet_config' in role_data:
+ puppet_config = role_data['puppet_config']
+ for key in puppet_config:
+ if key in DOCKER_PUPPET_CONFIG_SECTIONS:
+ continue
+ else:
+ print('ERROR: %s should not be in puppet_config section.'
+ % key)
+ return 1
+ for key in DOCKER_PUPPET_CONFIG_SECTIONS:
+ if key not in puppet_config:
+ print('ERROR: %s is required in puppet_config for %s.'
+ % (key, filename))
+ return 1
+
+ if 'parameters' in tpl:
+ for param in required_params:
+ if param not in tpl['parameters']:
+ print('ERROR: parameter %s is required for %s.'
+ % (param, filename))
+ return 1
+ return 0
+
+
def validate_service(filename, tpl):
if 'outputs' in tpl and 'role_data' in tpl['outputs']:
if 'value' not in tpl['outputs']['role_data']:
@@ -158,6 +216,10 @@ def validate(filename):
filename != './puppet/services/services.yaml'):
retval = validate_service(filename, tpl)
+ if (filename.startswith('./docker/services/') and
+ filename != './docker/services/services.yaml'):
+ retval = validate_docker_service(filename, tpl)
+
if filename.endswith('hyperconverged-ceph.yaml'):
retval = validate_hci_compute_services_default(filename, tpl)