aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--ci/environments/scenario002-multinode-containers.yaml4
-rw-r--r--ci/environments/scenario002-multinode.yaml4
-rw-r--r--ci/environments/scenario007-multinode-containers.yaml3
-rwxr-xr-xdeployed-server/scripts/enable-ssh-admin.sh33
-rw-r--r--docker/services/ceph-ansible/ceph-base.yaml20
-rw-r--r--docker/services/cinder-api.yaml1
-rw-r--r--docker/services/glance-api.yaml39
-rw-r--r--docker/services/heat-api.yaml1
-rw-r--r--docker/services/horizon.yaml8
-rw-r--r--docker/services/keystone.yaml1
-rw-r--r--docker/services/memcached.yaml16
-rw-r--r--docker/services/mistral-api.yaml36
-rw-r--r--docker/services/nova-api.yaml1
-rw-r--r--docker/services/pacemaker/clustercheck.yaml5
-rw-r--r--environments/composable-roles/standalone.yaml12
-rw-r--r--environments/network-isolation-v6.j2.yaml2
-rw-r--r--environments/storage/enable-ceph.yaml2
-rw-r--r--environments/storage/external-ceph.yaml2
-rw-r--r--environments/storage/glance-nfs.yaml2
-rw-r--r--extraconfig/pre_deploy/rhel-registration/environment-rhel-registration.yaml2
-rw-r--r--extraconfig/pre_deploy/rhel-registration/scripts/rhel-registration40
-rw-r--r--puppet/extraconfig/pre_deploy/compute/neutron-ml2-bigswitch.yaml21
-rw-r--r--puppet/extraconfig/pre_deploy/controller/neutron-ml2-bigswitch.yaml19
-rw-r--r--puppet/role.role.j2.yaml2
-rw-r--r--puppet/services/cinder-base.yaml2
-rw-r--r--puppet/services/database/mysql.yaml50
-rw-r--r--puppet/services/glance-api.yaml4
-rw-r--r--puppet/services/memcached.yaml1
-rw-r--r--puppet/services/neutron-lbaas.yaml3
-rw-r--r--puppet/services/nova-compute.yaml2
-rw-r--r--puppet/services/rabbitmq.yaml2
-rw-r--r--releasenotes/notes/ceph-pools-with-ceph-ansible-f82425e585f90ef6.yaml17
-rw-r--r--releasenotes/notes/rhsm_proxy_verify-548f104c97cf5f90.yaml5
-rw-r--r--releasenotes/notes/sat-tools-0d0f0c53de9d34a5.yaml5
-rw-r--r--sample-env-generator/composable-roles.yaml25
-rwxr-xr-xtools/yaml-validate.py5
36 files changed, 298 insertions, 99 deletions
diff --git a/ci/environments/scenario002-multinode-containers.yaml b/ci/environments/scenario002-multinode-containers.yaml
index bec5f48e..534f8294 100644
--- a/ci/environments/scenario002-multinode-containers.yaml
+++ b/ci/environments/scenario002-multinode-containers.yaml
@@ -9,7 +9,6 @@ resource_registry:
OS::TripleO::Services::BarbicanApi: ../../docker/services/barbican-api.yaml
OS::TripleO::Services::Zaqar: ../../docker/services/zaqar.yaml
OS::TripleO::Services::Ec2Api: ../../docker/services/ec2-api.yaml
- OS::TripleO::Services::MongoDb: ../../docker/services/database/mongodb.yaml
# Some infra instances don't pass the ping test but are otherwise working.
# Since the OVB jobs also test this functionality we can shut it off here.
OS::TripleO::AllNodes::Validation: ../common/all-nodes-validation-disabled.yaml
@@ -58,7 +57,6 @@ parameter_defaults:
- OS::TripleO::Services::SwiftStorage
- OS::TripleO::Services::SwiftRingBuilder
- OS::TripleO::Services::BarbicanApi
- - OS::TripleO::Services::MongoDb
- OS::TripleO::Services::Zaqar
- OS::TripleO::Services::Ec2Api
- OS::TripleO::Services::TripleoPackages
@@ -69,5 +67,7 @@ parameter_defaults:
nova::compute::libvirt::services::libvirt_virt_type: qemu
nova::compute::libvirt::libvirt_virt_type: qemu
Debug: true
+ ZaqarMessageStore: 'swift'
+ ZaqarManagementStore: 'sqlalchemy'
SwiftCeilometerPipelineEnabled: false
NotificationDriver: 'noop'
diff --git a/ci/environments/scenario002-multinode.yaml b/ci/environments/scenario002-multinode.yaml
index 6c7f4ebb..2f731ce9 100644
--- a/ci/environments/scenario002-multinode.yaml
+++ b/ci/environments/scenario002-multinode.yaml
@@ -9,7 +9,6 @@ resource_registry:
OS::TripleO::Services::HAproxy: ../../puppet/services/pacemaker/haproxy.yaml
OS::TripleO::Services::Pacemaker: ../../puppet/services/pacemaker.yaml
OS::TripleO::Services::MySQL: ../../puppet/services/pacemaker/database/mysql.yaml
- OS::TripleO::Services::MongoDb: ../../puppet/services/database/mongodb.yaml
OS::TripleO::Services::CinderBackup: ../../puppet/services/pacemaker/cinder-backup.yaml
OS::TripleO::Services::CinderVolume: ../../puppet/services/pacemaker/cinder-volume.yaml
OS::TripleO::Services::Keepalived: OS::Heat::None
@@ -63,7 +62,6 @@ parameter_defaults:
- OS::TripleO::Services::SwiftStorage
- OS::TripleO::Services::SwiftRingBuilder
- OS::TripleO::Services::BarbicanApi
- - OS::TripleO::Services::MongoDb
- OS::TripleO::Services::Zaqar
- OS::TripleO::Services::Ec2Api
- OS::TripleO::Services::TripleoPackages
@@ -86,5 +84,7 @@ parameter_defaults:
nova::compute::libvirt::services::libvirt_virt_type: qemu
nova::compute::libvirt::libvirt_virt_type: qemu
Debug: true
+ ZaqarMessageStore: 'swift'
+ ZaqarManagementStore: 'sqlalchemy'
SwiftCeilometerPipelineEnabled: false
NotificationDriver: 'noop'
diff --git a/ci/environments/scenario007-multinode-containers.yaml b/ci/environments/scenario007-multinode-containers.yaml
index bad3e4a5..149f2d32 100644
--- a/ci/environments/scenario007-multinode-containers.yaml
+++ b/ci/environments/scenario007-multinode-containers.yaml
@@ -7,6 +7,9 @@ resource_registry:
# Since the OVB jobs also test this functionality we can shut it off here.
OS::TripleO::AllNodes::Validation: ../common/all-nodes-validation-disabled.yaml
OS::TripleO::Services::NovaMigrationTarget: OS::Heat::None
+ OS::TripleO::Services::SwiftProxy: OS::Heat::None
+ OS::TripleO::Services::SwiftStorage: OS::Heat::None
+ OS::TripleO::Services::SwiftRingBuilder: OS::Heat::None
parameter_defaults:
ControllerServices:
- OS::TripleO::Services::Clustercheck
diff --git a/deployed-server/scripts/enable-ssh-admin.sh b/deployed-server/scripts/enable-ssh-admin.sh
index dcabeadf..daff3907 100755
--- a/deployed-server/scripts/enable-ssh-admin.sh
+++ b/deployed-server/scripts/enable-ssh-admin.sh
@@ -10,6 +10,7 @@ SUBNODES_SSH_KEY=${SUBNODES_SSH_KEY:-"$HOME/.ssh/id_rsa"}
# this is the intended variable for overriding
OVERCLOUD_SSH_KEY=${OVERCLOUD_SSH_KEY:-"$SUBNODES_SSH_KEY"}
+SHORT_TERM_KEY_COMMENT="TripleO split stack short term key"
SLEEP_TIME=5
function overcloud_ssh_hosts_json {
@@ -22,7 +23,7 @@ print(json.dumps(re.split("\s+", sys.stdin.read().strip())))'
function overcloud_ssh_key_json {
# we pass the contents to Mistral instead of just path, otherwise
# the key file would have to be readable for the mistral user
- cat "$OVERCLOUD_SSH_KEY" | python -c 'import json,sys; print(json.dumps(sys.stdin.read()))'
+ cat "$1" | python -c 'import json,sys; print(json.dumps(sys.stdin.read()))'
}
function workflow_finished {
@@ -30,6 +31,12 @@ function workflow_finished {
openstack workflow execution show -f shell $execution_id | grep 'state="SUCCESS"' > /dev/null
}
+function generate_short_term_keys {
+ local tmpdir=$(mktemp -d)
+ ssh-keygen -N '' -t rsa -b 4096 -f "$tmpdir/id_rsa" -C "$SHORT_TERM_KEY_COMMENT" > /dev/null
+ echo "$tmpdir"
+}
+
if [ -z "$OVERCLOUD_HOSTS" ]; then
echo 'Please set $OVERCLOUD_HOSTS'
exit 1
@@ -41,7 +48,20 @@ echo "SSH key file: $OVERCLOUD_SSH_KEY"
echo "Hosts: $OVERCLOUD_HOSTS"
echo
-EXECUTION_PARAMS="{\"ssh_user\": \"$OVERCLOUD_SSH_USER\", \"ssh_servers\": $(overcloud_ssh_hosts_json), \"ssh_private_key\": $(overcloud_ssh_key_json)}"
+SHORT_TERM_KEY_DIR=$(generate_short_term_keys)
+SHORT_TERM_KEY_PRIVATE="$SHORT_TERM_KEY_DIR/id_rsa"
+SHORT_TERM_KEY_PUBLIC="$SHORT_TERM_KEY_DIR/id_rsa.pub"
+SHORT_TERM_KEY_PUBLIC_CONTENT=$(cat $SHORT_TERM_KEY_PUBLIC)
+
+for HOST in $OVERCLOUD_HOSTS; do
+ echo "Inserting TripleO short term key for $HOST"
+ # prepending an extra newline so that if authorized_keys didn't
+ # end with a newline previously, we don't end up garbling it up
+ ssh -i "$OVERCLOUD_SSH_KEY" -l "$OVERCLOUD_SSH_USER" "$HOST" "echo -e '\n$SHORT_TERM_KEY_PUBLIC_CONTENT' >> \$HOME/.ssh/authorized_keys"
+done
+
+echo "Starting ssh admin enablement workflow"
+EXECUTION_PARAMS="{\"ssh_user\": \"$OVERCLOUD_SSH_USER\", \"ssh_servers\": $(overcloud_ssh_hosts_json), \"ssh_private_key\": $(overcloud_ssh_key_json "$SHORT_TERM_KEY_PRIVATE")}"
EXECUTION_CREATE_OUTPUT=$(openstack workflow execution create -f shell -d 'deployed server ssh admin creation' tripleo.access.v1.enable_ssh_admin "$EXECUTION_PARAMS")
echo "$EXECUTION_CREATE_OUTPUT"
EXECUTION_ID=$(echo "$EXECUTION_CREATE_OUTPUT" | grep '^id=' | awk '-F"' '{ print $2 }')
@@ -56,5 +76,14 @@ while ! workflow_finished $EXECUTION_ID; do
sleep $SLEEP_TIME
echo -n .
done
+echo # newline after the previous dots
+
+for HOST in $OVERCLOUD_HOSTS; do
+ echo "Removing TripleO short term key from $HOST"
+ ssh -l "$OVERCLOUD_SSH_USER" "$HOST" "sed -i -e '/$SHORT_TERM_KEY_COMMENT/d' \$HOME/.ssh/authorized_keys"
+done
+
+echo "Removing short term keys locally"
+rm -r "$SHORT_TERM_KEY_DIR"
echo "Success."
diff --git a/docker/services/ceph-ansible/ceph-base.yaml b/docker/services/ceph-ansible/ceph-base.yaml
index 8cc81fb0..4674ec14 100644
--- a/docker/services/ceph-ansible/ceph-base.yaml
+++ b/docker/services/ceph-ansible/ceph-base.yaml
@@ -73,15 +73,9 @@ parameters:
description: >
It can be used to override settings for one of the predefined pools, or to create
additional ones. Example:
- {
- "volumes": {
- "size": 5,
- "pg_num": 128,
- "pgp_num": 128
- }
- }
- default: {}
- type: json
+ [{"name": "volumes", "pg_num": 64, "rule_name": ""}]
+ default: []
+ type: comma_delimited_list
CinderRbdPoolName:
default: volumes
type: string
@@ -225,13 +219,7 @@ outputs:
- {get_param: NovaRbdPoolName}
- {get_param: GlanceRbdPoolName}
- {get_param: GnocchiRbdPoolName}
- - repeat:
- template:
- name: <%pool%>
- pg_num: {get_param: CephPoolDefaultPgNum}
- rule_name: ""
- for_each:
- <%pool%>: {get_param: CephPools}
+ - {get_param: CephPools}
openstack_keys: &openstack_keys
- name:
list_join:
diff --git a/docker/services/cinder-api.yaml b/docker/services/cinder-api.yaml
index 25390c63..336b4540 100644
--- a/docker/services/cinder-api.yaml
+++ b/docker/services/cinder-api.yaml
@@ -200,6 +200,7 @@ outputs:
tags: step2
service: name=httpd state=stopped enabled=no
- name: remove old cinder cron jobs
+ tags: step2
file:
path: /var/spool/cron/cinder
state: absent
diff --git a/docker/services/glance-api.yaml b/docker/services/glance-api.yaml
index e1a3827a..b4336bea 100644
--- a/docker/services/glance-api.yaml
+++ b/docker/services/glance-api.yaml
@@ -39,6 +39,13 @@ parameters:
EnableInternalTLS:
type: boolean
default: false
+ GlanceBackend:
+ default: swift
+ description: The short name of the Glance backend to use. Should be one
+ of swift, rbd, cinder, or file
+ type: string
+ constraints:
+ - allowed_values: ['swift', 'file', 'rbd', 'cinder']
GlanceNfsEnabled:
default: false
description: >
@@ -48,11 +55,22 @@ parameters:
default: false
description: Remove package if the service is being disabled during upgrade
type: boolean
+ GlanceNfsShare:
+ default: ''
+ description: >
+ NFS share to mount for image storage (when GlanceNfsEnabled is true)
+ type: string
+ GlanceNfsOptions:
+ default: 'intr,context=system_u:object_r:glance_var_lib_t:s0'
+ description: >
+ NFS mount options for image storage (when GlanceNfsEnabled is true)
+ type: string
conditions:
internal_tls_enabled: {equals: [{get_param: EnableInternalTLS}, true]}
nfs_backend_enabled: {equals: [{get_param: GlanceNfsEnabled}, true]}
+ cinder_backend_enabled: {equals: [{get_param: GlanceBackend}, cinder]}
resources:
@@ -108,6 +126,10 @@ outputs:
dest: "/etc/ceph/"
merge: true
preserve_properties: true
+ permissions:
+ - path: /var/lib/glance
+ owner: glance:glance
+ recurse: true
/var/lib/kolla/config_files/glance_api_tls_proxy.json:
command: /usr/sbin/httpd -DFOREGROUND
config_files:
@@ -147,6 +169,12 @@ outputs:
- nfs_backend_enabled
- /var/lib/glance:/var/lib/glance
- ''
+ -
+ if:
+ - cinder_backend_enabled
+ - - /dev:/dev
+ - /etc/iscsi:/etc/iscsi
+ - []
environment:
- KOLLA_BOOTSTRAP=True
- KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
@@ -157,7 +185,7 @@ outputs:
start_order: 2
image: *glance_api_image
net: host
- privileged: false
+ privileged: {if: [cinder_backend_enabled, true, false]}
restart: always
volumes: *glance_volumes
environment:
@@ -182,6 +210,15 @@ outputs:
- KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
- {}
host_prep_tasks:
+ - name: Mount NFS on host
+ vars:
+ nfs_backend_enable: {get_param: GlanceNfsEnabled}
+ mount: name=/var/lib/glance src="{{item.NFS_SHARE}}" fstype=nfs4 opts="{{item.NFS_OPTIONS}}" state=mounted
+ with_items:
+ - NFS_SHARE: {get_param: GlanceNfsShare}
+ NFS_OPTIONS: {get_param: GlanceNfsOptions}
+ when:
+ - nfs_backend_enable
- name: create persistent logs directory
file:
path: "{{ item }}"
diff --git a/docker/services/heat-api.yaml b/docker/services/heat-api.yaml
index 75d0b8c1..dcba519f 100644
--- a/docker/services/heat-api.yaml
+++ b/docker/services/heat-api.yaml
@@ -166,6 +166,7 @@ outputs:
ignore_errors: True
register: heat_api_enabled
- name: remove old heat cron jobs
+ tags: step2
file:
path: /var/spool/cron/heat
state: absent
diff --git a/docker/services/horizon.yaml b/docker/services/horizon.yaml
index d6ffb6dc..94fd9eef 100644
--- a/docker/services/horizon.yaml
+++ b/docker/services/horizon.yaml
@@ -95,6 +95,12 @@ outputs:
- path: /var/log/horizon/
owner: apache:apache
recurse: true
+ # NOTE The upstream Kolla Dockerfile sets /etc/openstack-dashboard/ ownership to
+ # horizon:horizon - the policy.json files need read permissions for the apache user
+ # FIXME We should consider whether this should be fixed in the Kolla Dockerfile instead
+ - path: /etc/openstack-dashboard/
+ owner: apache:apache
+ recurse: true
# FIXME Apache tries to write a .lock file there
- path: /usr/share/openstack-dashboard/openstack_dashboard/local/
owner: apache:apache
@@ -113,7 +119,7 @@ outputs:
volumes:
- /var/log/containers/horizon:/var/log/horizon
- /var/log/containers/httpd/horizon:/var/log/httpd
- - /var/lib/config-data/horizon/etc/:/etc/
+ - /var/lib/config-data/puppet-generated/horizon/etc/openstack-dashboard:/etc/openstack-dashboard
step_3:
horizon:
image: *horizon_image
diff --git a/docker/services/keystone.yaml b/docker/services/keystone.yaml
index 26cef614..a8ba5bf1 100644
--- a/docker/services/keystone.yaml
+++ b/docker/services/keystone.yaml
@@ -211,6 +211,7 @@ outputs:
tags: step2
service: name=httpd state=stopped enabled=no
- name: remove old keystone cron jobs
+ tags: step2
file:
path: /var/spool/cron/keystone
state: absent
diff --git a/docker/services/memcached.yaml b/docker/services/memcached.yaml
index c78b85a6..67b84249 100644
--- a/docker/services/memcached.yaml
+++ b/docker/services/memcached.yaml
@@ -80,8 +80,8 @@ outputs:
user: root
volumes:
- /var/lib/config-data/memcached/etc/sysconfig/memcached:/etc/sysconfig/memcached:ro
- - /var/log/memcached.log:/var/log/memcached.log
- command: ['/bin/bash', '-c', 'source /etc/sysconfig/memcached; chown ${USER} /var/log/memcached.log']
+ - /var/log/containers/memcached:/var/log/
+ command: ['/bin/bash', '-c', 'source /etc/sysconfig/memcached; touch /var/log/memcached.log && chown ${USER} /var/log/memcached.log']
memcached:
start_order: 1
image: *memcached_image
@@ -93,8 +93,16 @@ outputs:
- {get_attr: [ContainersCommon, volumes]}
-
- /var/lib/config-data/memcached/etc/sysconfig/memcached:/etc/sysconfig/memcached:ro
- # TODO(bogdando) capture memcached syslog logs from a container
- command: ['/bin/bash', '-c', 'source /etc/sysconfig/memcached; /usr/bin/memcached -p ${PORT} -u ${USER} -m ${CACHESIZE} -c ${MAXCONN} $OPTIONS']
+ - /var/log/containers/memcached:/var/log/
+ # NOTE: We're adding the log redirection here, even though should
+ # already be part of the options. This is because the redirection
+ # via the options is not working and ends up being passed as a
+ # parameter to the memcached command (which it silently ignores).
+ # Thus the need for the explicit redirection here. The redirection
+ # will be removed from the $OPTIONS, which is done via the puppet
+ # module, but we'll only be able to do this once the following pull
+ # request merges: https://github.com/saz/puppet-memcached/pull/88
+ command: ['/bin/bash', '-c', 'source /etc/sysconfig/memcached; /usr/bin/memcached -p ${PORT} -u ${USER} -m ${CACHESIZE} -c ${MAXCONN} $OPTIONS >> /var/log/memcached.log 2>&1']
upgrade_tasks:
- name: Stop and disable memcached service
tags: step2
diff --git a/docker/services/mistral-api.yaml b/docker/services/mistral-api.yaml
index 50c80216..1b4b44f2 100644
--- a/docker/services/mistral-api.yaml
+++ b/docker/services/mistral-api.yaml
@@ -36,6 +36,16 @@ parameters:
default: {}
description: Parameters specific to the role
type: json
+ MistralWorkers:
+ default: 1
+ description: The number of workers for the mistral-api.
+ type: number
+ MistralApiPolicies:
+ description: |
+ A hash of policies to configure for Mistral API.
+ e.g. { mistral-context_is_admin: { key: context_is_admin, value: 'role:admin' } }
+ default: {}
+ type: json
resources:
@@ -45,6 +55,16 @@ resources:
MySQLClient:
type: ../../puppet/services/database/mysql-client.yaml
+ MistralBase:
+ type: ../../puppet/services/mistral-base.yaml
+ properties:
+ EndpointMap: {get_param: EndpointMap}
+ ServiceData: {get_param: ServiceData}
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
+
MistralApiBase:
type: ../../puppet/services/mistral-api.yaml
properties:
@@ -60,9 +80,23 @@ outputs:
description: Role data for the Mistral API role.
value:
service_name: {get_attr: [MistralApiBase, role_data, service_name]}
+ # FIXME(mandre) restore once mistral-api image has the necessary packages
+ # to run on top of apache
+ # config_settings:
+ # map_merge:
+ # - get_attr: [MistralApiBase, role_data, config_settings]
config_settings:
map_merge:
- - get_attr: [MistralApiBase, role_data, config_settings]
+ - get_attr: [MistralBase, role_data, config_settings]
+ - mistral::api::api_workers: {get_param: MistralWorkers}
+ mistral::api::bind_host: {get_param: [ServiceNetMap, MistralApiNetwork]}
+ mistral::policy::policies: {get_param: MistralApiPolicies}
+ tripleo.mistral_api.firewall_rules:
+ '133 mistral':
+ dport:
+ - 8989
+ - 13989
+ mistral_wsgi_enabled: false
logging_source: {get_attr: [MistralApiBase, role_data, logging_source]}
logging_groups: {get_attr: [MistralApiBase, role_data, logging_groups]}
step_config: &step_config
diff --git a/docker/services/nova-api.yaml b/docker/services/nova-api.yaml
index f262bcb1..7f1b7a54 100644
--- a/docker/services/nova-api.yaml
+++ b/docker/services/nova-api.yaml
@@ -246,6 +246,7 @@ outputs:
ignore_errors: True
when: {get_param: UpgradeRemoveUnusedPackages}
- name: remove old nova cron jobs
+ tags: step2
file:
path: /var/spool/cron/nova
state: absent
diff --git a/docker/services/pacemaker/clustercheck.yaml b/docker/services/pacemaker/clustercheck.yaml
index b5d128d4..6db8a212 100644
--- a/docker/services/pacemaker/clustercheck.yaml
+++ b/docker/services/pacemaker/clustercheck.yaml
@@ -44,8 +44,11 @@ resources:
ContainersCommon:
type: ../containers-common.yaml
+# We import from the corresponding docker service because otherwise we risk
+# rewriting the tripleo.mysql.firewall_rules key with the baremetal firewall
+# rules (see LP#1728918)
MysqlPuppetBase:
- type: ../../../puppet/services/pacemaker/database/mysql.yaml
+ type: ../../../docker/services/pacemaker/database/mysql.yaml
properties:
EndpointMap: {get_param: EndpointMap}
ServiceData: {get_param: ServiceData}
diff --git a/environments/composable-roles/standalone.yaml b/environments/composable-roles/standalone.yaml
index 3305c9ed..c12d72de 100644
--- a/environments/composable-roles/standalone.yaml
+++ b/environments/composable-roles/standalone.yaml
@@ -30,13 +30,13 @@ parameter_defaults:
# Type: string
ComputeHostnameFormat: '%stackname%-novacompute-%index%'
- # Number of Controller nodes to deploy
+ # Number of ControllerOpenstack nodes
# Type: number
- ControllerCount: 3
+ ControllerOpenstackCount: 3
- # Format for Controller node hostnames Note %index% is translated into the index of the node, e.g 0/1/2 etc and %stackname% is replaced with the stack name e.g overcloud
+ # Format for ControllerOpenstack node hostnames Note %index% is translated into the index of the node, e.g 0/1/2 etc and %stackname% is replaced with the stack name e.g overcloud
# Type: string
- ControllerHostnameFormat: '%stackname%-controller-%index%'
+ ControllerOpenstackHostnameFormat: '%stackname%-controller-%index%'
# Number of Database nodes
# Type: number
@@ -66,9 +66,9 @@ parameter_defaults:
# Type: string
OvercloudComputeFlavor: compute
- # Name of the flavor for Controller nodes
+ # Name of the flavor for ControllerOpenstack nodes
# Type: string
- OvercloudControllerFlavor: control
+ OvercloudControllerOpenstackFlavor: control
# Name of the flavor for Database nodes
# Type: string
diff --git a/environments/network-isolation-v6.j2.yaml b/environments/network-isolation-v6.j2.yaml
index 617dfa61..fefa20ce 100644
--- a/environments/network-isolation-v6.j2.yaml
+++ b/environments/network-isolation-v6.j2.yaml
@@ -55,3 +55,5 @@ parameter_defaults:
RabbitIPv6: True
# Enable IPv6 environment for Memcached.
MemcachedIPv6: True
+ # Enable IPv6 environment for MySQL.
+ MysqlIPv6: True
diff --git a/environments/storage/enable-ceph.yaml b/environments/storage/enable-ceph.yaml
index 596ec16e..c43f2fa1 100644
--- a/environments/storage/enable-ceph.yaml
+++ b/environments/storage/enable-ceph.yaml
@@ -21,7 +21,7 @@ parameter_defaults:
# Type: boolean
CinderEnableRbdBackend: True
- # The short name of the Glance backend to use. Should be one of swift, rbd, or file
+ # The short name of the Glance backend to use. Should be one of swift, rbd, cinder, or file
# Type: string
GlanceBackend: rbd
diff --git a/environments/storage/external-ceph.yaml b/environments/storage/external-ceph.yaml
index 0f2d0396..dde2c907 100644
--- a/environments/storage/external-ceph.yaml
+++ b/environments/storage/external-ceph.yaml
@@ -43,7 +43,7 @@ parameter_defaults:
# Type: string
CinderRbdPoolName: volumes
- # The short name of the Glance backend to use. Should be one of swift, rbd, or file
+ # The short name of the Glance backend to use. Should be one of swift, rbd, cinder, or file
# Type: string
GlanceBackend: rbd
diff --git a/environments/storage/glance-nfs.yaml b/environments/storage/glance-nfs.yaml
index 3c139306..359401d5 100644
--- a/environments/storage/glance-nfs.yaml
+++ b/environments/storage/glance-nfs.yaml
@@ -21,7 +21,7 @@ parameter_defaults:
# Static parameters - these are values that must be
# included in the environment but should not be changed.
# ******************************************************
- # The short name of the Glance backend to use. Should be one of swift, rbd, or file
+ # The short name of the Glance backend to use. Should be one of swift, rbd, cinder, or file
# Type: string
GlanceBackend: file
diff --git a/extraconfig/pre_deploy/rhel-registration/environment-rhel-registration.yaml b/extraconfig/pre_deploy/rhel-registration/environment-rhel-registration.yaml
index 24557517..a5eb35c6 100644
--- a/extraconfig/pre_deploy/rhel-registration/environment-rhel-registration.yaml
+++ b/extraconfig/pre_deploy/rhel-registration/environment-rhel-registration.yaml
@@ -20,7 +20,7 @@ parameter_defaults:
rhel_reg_user: ""
rhel_reg_type: ""
rhel_reg_method: ""
- rhel_reg_sat_repo: "rhel-7-server-satellite-tools-6.1-rpms"
+ rhel_reg_sat_repo: "rhel-7-server-satellite-tools-6.2-rpms"
rhel_reg_http_proxy_host: ""
rhel_reg_http_proxy_port: ""
rhel_reg_http_proxy_username: ""
diff --git a/extraconfig/pre_deploy/rhel-registration/scripts/rhel-registration b/extraconfig/pre_deploy/rhel-registration/scripts/rhel-registration
index d754aafd..4592473f 100644
--- a/extraconfig/pre_deploy/rhel-registration/scripts/rhel-registration
+++ b/extraconfig/pre_deploy/rhel-registration/scripts/rhel-registration
@@ -23,6 +23,8 @@ proxy_port=
proxy_url=
proxy_username=
proxy_password=
+curl_opts="--retry-delay 10 --max-time 30 --retry ${retry_max_count} --cacert /etc/rhsm/ca/redhat-uep.pem"
+portal_test_url="https://$(crudini --get /etc/rhsm/rhsm.conf server hostname)/subscription/"
# process variables..
if [ -n "${REG_AUTO_ATTACH:-}" ]; then
@@ -129,12 +131,14 @@ if [ -n "${REG_HTTP_PROXY_HOST:-}" ]; then
# Good both values are not empty
proxy_url="http://${proxy_host}:${proxy_port}"
config_opts="--server.proxy_hostname=${proxy_host} --server.proxy_port=${proxy_port}"
- sat5_opts="${sat5_opts} --proxy_hostname=${proxy_url}"
+ sat5_opts="${sat5_opts} --proxy=${proxy_url}"
+ curl_opts="${curl_opts} -x http://${proxy_host}:${proxy_port}"
echo "RHSM Proxy set to: ${proxy_url}"
if [ -n "${REG_HTTP_PROXY_USERNAME:-}" ]; then
if [ -n "${REG_HTTP_PROXY_PASSWORD:-}" ]; then
config_opts="${config_opts} --server.proxy_user=${proxy_username} --server.proxy_password=${proxy_password}"
sat5_opts="${sat5_opts} --proxyUser=${proxy_username} --proxyPassword=${proxy_password}"
+ curl_opts="${curl_opts} --proxy-user ${proxy_username}:${proxy_password}"
else
echo "Warning: REG_HTTP_PROXY_PASSWORD cannot be null with non-empty REG_HTTP_PROXY_USERNAME! Skipping..."
proxy_username= ; proxy_password=
@@ -187,10 +191,10 @@ function retry() {
}
function detect_satellite_server {
- if curl --retry ${retry_max_count} --retry-delay 10 --max-time 30 -L -k -s -D - -o /dev/null $REG_SAT_URL/pub/katello-ca-consumer-latest.noarch.rpm | grep "200 OK"; then
+ if curl ${curl_opts} -L -k -s -D - -o /dev/null $REG_SAT_URL/pub/katello-ca-consumer-latest.noarch.rpm | grep "200 OK"; then
echo Satellite 6 or beyond with Katello API detected at $REG_SAT_URL
katello_api_enabled=1
- elif curl --retry ${retry_max_count} --retry-delay 10 --max-time 30 -L -k -s -D - -o /dev/null $REG_SAT_URL/rhn/Login.do | grep "200 OK"; then
+ elif curl ${curl_opts} -L -k -s -D - -o /dev/null $REG_SAT_URL/rhn/Login.do | grep "200 OK"; then
echo Satellite 5 with RHN detected at $REG_SAT_URL
katello_api_enabled=0
else
@@ -199,7 +203,13 @@ function detect_satellite_server {
fi
}
-if [ "x${proxy_url}" != "x" ];then
+if [ "x${proxy_url}" != "x" ]; then
+ # Before everything, we want to make sure the proxy can be reached
+ # Note: no need to manage retries, already done by retry() function.
+ echo "Testing proxy connectivity..."
+ retry bash -c "</dev/tcp/${proxy_host}/${proxy_port}"
+ echo "Proxy ${proxy_url} is reachable!"
+
# Config subscription-manager for proxy
subscription-manager config ${config_opts}
@@ -222,6 +232,22 @@ fi
case "${REG_METHOD:-}" in
portal)
+ # First test curl to RHSM through the specified proxy
+
+ if curl ${curl_opts} -L -s -D - -o /dev/null ${portal_test_url}|grep '200 OK'; then
+ if [ "x${proxy_url}" = "x" ]; then
+ echo "Access to RHSM portal OK, continuing..."
+ else
+ echo "Access to RHSM portal through proxy ${proxy_url} OK, continuing..."
+ fi
+ else
+ if [ "x${proxy_url}" = "x" ]; then
+ echo "Unable to access RHSM portal! Please check your parameters."
+ else
+ echo "Unable to access RHSM portal through configured HTTP proxy (${proxy_url}) ! Please check your parameters."
+ fi
+ exit 1
+ fi
retry subscription-manager register $opts
if [ -z "${REG_AUTO_ATTACH:-}" -a -z "${REG_ACTIVATION_KEY:-}" ]; then
retry subscription-manager attach $attach_opts
@@ -233,7 +259,7 @@ case "${REG_METHOD:-}" in
detect_satellite_server
if [ "$katello_api_enabled" = "1" ]; then
repos="$repos --enable ${satellite_repo}"
- curl --retry ${retry_max_count} --retry-delay 10 --max-time 30 -L -k -O "$REG_SAT_URL/pub/katello-ca-consumer-latest.noarch.rpm"
+ curl ${curl_opts} -L -k -O "$REG_SAT_URL/pub/katello-ca-consumer-latest.noarch.rpm"
# https://bugs.launchpad.net/tripleo/+bug/1711435
# Delete the /etc/rhsm/facts directory entirely so that the
@@ -247,7 +273,7 @@ case "${REG_METHOD:-}" in
rpm -Uvh katello-ca-consumer-latest.noarch.rpm || true
retry subscription-manager register $opts
retry subscription-manager $repos
- retry yum install -y katello-agent || true # needed for errata reporting to satellite6
+ yum install -y katello-agent || true # needed for errata reporting to satellite6
katello-package-upload
# https://bugs.launchpad.net/tripleo/+bug/1711435
@@ -255,7 +281,7 @@ case "${REG_METHOD:-}" in
mkdir -p /etc/rhsm/facts
else
pushd /usr/share/rhn/
- curl --retry ${retry_max_count} --retry-delay 10 --max-time 30 -k -O $REG_SAT_URL/pub/RHN-ORG-TRUSTED-SSL-CERT
+ curl ${curl_opts} -k -O $REG_SAT_URL/pub/RHN-ORG-TRUSTED-SSL-CERT
popd
retry rhnreg_ks --serverUrl=$REG_SAT_URL/XMLRPC $sat5_opts
fi
diff --git a/puppet/extraconfig/pre_deploy/compute/neutron-ml2-bigswitch.yaml b/puppet/extraconfig/pre_deploy/compute/neutron-ml2-bigswitch.yaml
index 93408dd1..6e010dea 100644
--- a/puppet/extraconfig/pre_deploy/compute/neutron-ml2-bigswitch.yaml
+++ b/puppet/extraconfig/pre_deploy/compute/neutron-ml2-bigswitch.yaml
@@ -24,17 +24,16 @@ resources:
config:
datafiles:
neutron_bigswitch_data:
- mapped_data:
- neutron::agents::bigswitch::agent_enabled: {get_input: neutron_enable_bigswitch_agent}
- neutron::agents::bigswitch::lldp_enabled: {get_input: neutron_enable_bigswitch_lldp}
- # NOTE(aschultz): required for the puppet module but we don't
- # actually want them defined on the compute nodes so we're
- # relying on the puppet module's handling of <SERVICE DEFAULT>
- # to just not set these but still accept that they were defined.
- # This will should be fixed in puppet-neutron and removed here,
- # but for backportability, we need to define something.
- neutron::plugins::ml2::bigswitch::restproxy::servers: '<SERVICE DEFAULT>'
- neutron::plugins::ml2::bigswitch::restproxy::server_auth: '<SERVICE DEFAULT>'
+ neutron::agents::bigswitch::agent_enabled: {get_input: neutron_enable_bigswitch_agent}
+ neutron::agents::bigswitch::lldp_enabled: {get_input: neutron_enable_bigswitch_lldp}
+ # NOTE(aschultz): required for the puppet module but we don't
+ # actually want them defined on the compute nodes so we're
+ # relying on the puppet module's handling of <SERVICE DEFAULT>
+ # to just not set these but still accept that they were defined.
+ # This will should be fixed in puppet-neutron and removed here,
+ # but for backportability, we need to define something.
+ neutron::plugins::ml2::bigswitch::restproxy::servers: '<SERVICE DEFAULT>'
+ neutron::plugins::ml2::bigswitch::restproxy::server_auth: '<SERVICE DEFAULT>'
NeutronBigswitchDeployment:
diff --git a/puppet/extraconfig/pre_deploy/controller/neutron-ml2-bigswitch.yaml b/puppet/extraconfig/pre_deploy/controller/neutron-ml2-bigswitch.yaml
index 71a915df..cda598a5 100644
--- a/puppet/extraconfig/pre_deploy/controller/neutron-ml2-bigswitch.yaml
+++ b/puppet/extraconfig/pre_deploy/controller/neutron-ml2-bigswitch.yaml
@@ -50,16 +50,15 @@ resources:
config:
datafiles:
neutron_bigswitch_data:
- mapped_data:
- neutron::agents::bigswitch::agent_enabled: {get_input: neutron_enable_bigswitch_agent}
- neutron::agents::bigswitch::lldp_enabled: {get_input: neutron_enable_bigswitch_lldp}
- neutron::plugins::ml2::bigswitch::restproxy::servers: {get_input: restproxy_servers}
- neutron::plugins::ml2::bigswitch::restproxy::server_auth: {get_input: restproxy_server_auth}
- neutron::plugins::ml2::bigswitch::restproxy::auto_sync_on_failure: {get_input: restproxy_auto_sync_on_failure}
- neutron::plugins::ml2::bigswitch::restproxy::consistency_interval: {get_input: restproxy_consistency_interval}
- neutron::plugins::ml2::bigswitch::restproxy::neutron_id: {get_input: restproxy_neutron_id}
- neutron::plugins::ml2::bigswitch::restproxy::server_ssl: {get_input: restproxy_server_ssl}
- neutron::plugins::ml2::bigswitch::restproxy::ssl_cert_directory: {get_input: restproxy_ssl_cert_directory}
+ neutron::agents::bigswitch::agent_enabled: {get_input: neutron_enable_bigswitch_agent}
+ neutron::agents::bigswitch::lldp_enabled: {get_input: neutron_enable_bigswitch_lldp}
+ neutron::plugins::ml2::bigswitch::restproxy::servers: {get_input: restproxy_servers}
+ neutron::plugins::ml2::bigswitch::restproxy::server_auth: {get_input: restproxy_server_auth}
+ neutron::plugins::ml2::bigswitch::restproxy::auto_sync_on_failure: {get_input: restproxy_auto_sync_on_failure}
+ neutron::plugins::ml2::bigswitch::restproxy::consistency_interval: {get_input: restproxy_consistency_interval}
+ neutron::plugins::ml2::bigswitch::restproxy::neutron_id: {get_input: restproxy_neutron_id}
+ neutron::plugins::ml2::bigswitch::restproxy::server_ssl: {get_input: restproxy_server_ssl}
+ neutron::plugins::ml2::bigswitch::restproxy::ssl_cert_directory: {get_input: restproxy_ssl_cert_directory}
NeutronBigswitchDeployment:
type: OS::Heat::StructuredDeployment
diff --git a/puppet/role.role.j2.yaml b/puppet/role.role.j2.yaml
index 15da1773..d53afd04 100644
--- a/puppet/role.role.j2.yaml
+++ b/puppet/role.role.j2.yaml
@@ -565,6 +565,7 @@ resources:
- bootstrap_node # provided by allNodesConfig
- all_nodes # provided by allNodesConfig
- vip_data # provided by allNodesConfig
+ - net_ip_map
- '"%{::osfamily}"'
# The following are required for compatibility with the Controller role
# where some vendor integrations added hieradata via ExtraConfigPre
@@ -578,6 +579,7 @@ resources:
service_names:
service_names: {get_param: ServiceNames}
sensu::subscriptions: {get_param: MonitoringSubscriptions}
+ net_ip_map: {get_attr: [NetIpMap, net_ip_map]}
service_configs:
map_replace:
- {get_param: ServiceConfigSettings}
diff --git a/puppet/services/cinder-base.yaml b/puppet/services/cinder-base.yaml
index 5b2a2582..2a8026da 100644
--- a/puppet/services/cinder-base.yaml
+++ b/puppet/services/cinder-base.yaml
@@ -89,7 +89,7 @@ parameters:
type: string
description: >
Cron to move deleted instances to another table - User
- default: 'keystone'
+ default: 'cinder'
CinderCronDbPurgeAge:
type: string
description: >
diff --git a/puppet/services/database/mysql.yaml b/puppet/services/database/mysql.yaml
index abbe7a22..c1f54bb6 100644
--- a/puppet/services/database/mysql.yaml
+++ b/puppet/services/database/mysql.yaml
@@ -57,6 +57,11 @@ parameters:
EnableInternalTLS:
type: boolean
default: false
+ MysqlIPv6:
+ default: false
+ description: Enable IPv6 in MySQL
+ type: boolean
+
conditions:
@@ -77,6 +82,7 @@ outputs:
# in tripleo-puppet-elements.
mysql::server::package_name: 'mariadb-galera-server'
mysql::server::manage_config_file: true
+ mysql_ipv6: {get_param: MysqlIPv6}
tripleo.mysql.firewall_rules:
'104 mysql galera':
dport:
@@ -113,30 +119,34 @@ outputs:
{get_param: [ServiceNetMap, MysqlNetwork]}
tripleo::profile::base::database::mysql::generate_dropin_file_limit:
{get_param: MysqlIncreaseFileLimit}
- - generate_service_certificates: true
- tripleo::profile::base::database::mysql::certificate_specs:
- service_certificate: '/etc/pki/tls/certs/mysql.crt'
- service_key: '/etc/pki/tls/private/mysql.key'
- hostname:
- str_replace:
- template: "%{hiera('cloud_name_NETWORK')}"
- params:
- NETWORK: {get_param: [ServiceNetMap, MysqlNetwork]}
- dnsnames:
- - str_replace:
+ - if:
+ - internal_tls_enabled
+ -
+ generate_service_certificates: true
+ tripleo::profile::base::database::mysql::certificate_specs:
+ service_certificate: '/etc/pki/tls/certs/mysql.crt'
+ service_key: '/etc/pki/tls/private/mysql.key'
+ hostname:
+ str_replace:
template: "%{hiera('cloud_name_NETWORK')}"
params:
NETWORK: {get_param: [ServiceNetMap, MysqlNetwork]}
- - str_replace:
- template:
- "%{hiera('fqdn_$NETWORK')}"
+ dnsnames:
+ - str_replace:
+ template: "%{hiera('cloud_name_NETWORK')}"
+ params:
+ NETWORK: {get_param: [ServiceNetMap, MysqlNetwork]}
+ - str_replace:
+ template:
+ "%{hiera('fqdn_$NETWORK')}"
+ params:
+ $NETWORK: {get_param: [ServiceNetMap, MysqlNetwork]}
+ principal:
+ str_replace:
+ template: "mysql/%{hiera('cloud_name_NETWORK')}"
params:
- $NETWORK: {get_param: [ServiceNetMap, MysqlNetwork]}
- principal:
- str_replace:
- template: "mysql/%{hiera('cloud_name_NETWORK')}"
- params:
- NETWORK: {get_param: [ServiceNetMap, MysqlNetwork]}
+ NETWORK: {get_param: [ServiceNetMap, MysqlNetwork]}
+ - {}
step_config: |
include ::tripleo::profile::base::database::mysql
metadata_settings:
diff --git a/puppet/services/glance-api.yaml b/puppet/services/glance-api.yaml
index 8ec3546f..1baf120b 100644
--- a/puppet/services/glance-api.yaml
+++ b/puppet/services/glance-api.yaml
@@ -78,10 +78,10 @@ parameters:
GlanceBackend:
default: swift
description: The short name of the Glance backend to use. Should be one
- of swift, rbd, or file
+ of swift, rbd, cinder, or file
type: string
constraints:
- - allowed_values: ['swift', 'file', 'rbd']
+ - allowed_values: ['swift', 'file', 'rbd', 'cinder']
GlanceNfsEnabled:
default: false
description: >
diff --git a/puppet/services/memcached.yaml b/puppet/services/memcached.yaml
index 2bc08fde..30ea4e1e 100644
--- a/puppet/services/memcached.yaml
+++ b/puppet/services/memcached.yaml
@@ -54,6 +54,7 @@ outputs:
# internal_api_subnet - > IP/CIDR
memcached::listen_ip: {get_param: [ServiceNetMap, MemcachedNetwork]}
memcached::max_memory: {get_param: MemcachedMaxMemory}
+ memcached::verbosity: 'v'
tripleo.memcached.firewall_rules:
'121 memcached':
dport: 11211
diff --git a/puppet/services/neutron-lbaas.yaml b/puppet/services/neutron-lbaas.yaml
index ec477ddc..a2c1a2ae 100644
--- a/puppet/services/neutron-lbaas.yaml
+++ b/puppet/services/neutron-lbaas.yaml
@@ -73,3 +73,6 @@ outputs:
service_config_settings:
neutron_api:
neutron::server::service_providers: {get_param: NeutronServiceProviders}
+ horizon:
+ horizon::neutron_options:
+ enable_lb: True
diff --git a/puppet/services/nova-compute.yaml b/puppet/services/nova-compute.yaml
index 9e5ba129..5326a250 100644
--- a/puppet/services/nova-compute.yaml
+++ b/puppet/services/nova-compute.yaml
@@ -210,7 +210,7 @@ outputs:
collectd:
tripleo.collectd.plugins.nova_compute:
- virt
- collectd::plugins::virt::connection: "qemu:///system"
+ collectd::plugin::virt::connection: 'qemu:///system'
upgrade_tasks:
- name: Stop nova-compute service
tags: step1
diff --git a/puppet/services/rabbitmq.yaml b/puppet/services/rabbitmq.yaml
index a1a60201..879af2a1 100644
--- a/puppet/services/rabbitmq.yaml
+++ b/puppet/services/rabbitmq.yaml
@@ -41,7 +41,7 @@ parameters:
RabbitFDLimit:
default: 65536
description: Configures RabbitMQ FD limit
- type: string
+ type: number
RabbitIPv6:
default: false
description: Enable IPv6 in RabbitMQ
diff --git a/releasenotes/notes/ceph-pools-with-ceph-ansible-f82425e585f90ef6.yaml b/releasenotes/notes/ceph-pools-with-ceph-ansible-f82425e585f90ef6.yaml
new file mode 100644
index 00000000..63e6f212
--- /dev/null
+++ b/releasenotes/notes/ceph-pools-with-ceph-ansible-f82425e585f90ef6.yaml
@@ -0,0 +1,17 @@
+---
+upgrade:
+ - |
+ The format to use for the CephPools parameter needs to be updated into the
+ form expected by ceph-ansible. For example, for a new pool named `mypool`
+ it should change from:
+ { "mypool": { "size": 3, "pg_num": 128, "pgp_num": 128 } }
+ into:
+ [ { "name": "mypool", "pg_num": 128, "rule_name": "" } ]
+ The first is a map where each key is a pool name and its value the pool
+ properties, the second is a list where each item describes all properties
+ of a pool, including its name.
+other:
+ - |
+ With the migration from puppet-ceph to ceph-ansible for the deployment
+ of Ceph, the format of CephPools parameter changes because the two tools
+ use a different format to represent the list of additional pools to create.
diff --git a/releasenotes/notes/rhsm_proxy_verify-548f104c97cf5f90.yaml b/releasenotes/notes/rhsm_proxy_verify-548f104c97cf5f90.yaml
new file mode 100644
index 00000000..626ecbaf
--- /dev/null
+++ b/releasenotes/notes/rhsm_proxy_verify-548f104c97cf5f90.yaml
@@ -0,0 +1,5 @@
+---
+features:
+ - |
+ When using RHSM proxy, TripleO will now verify that the proxy can be reached
+ otherwise we'll stop early and not try to subscribe nodes.
diff --git a/releasenotes/notes/sat-tools-0d0f0c53de9d34a5.yaml b/releasenotes/notes/sat-tools-0d0f0c53de9d34a5.yaml
new file mode 100644
index 00000000..eb3ab5fc
--- /dev/null
+++ b/releasenotes/notes/sat-tools-0d0f0c53de9d34a5.yaml
@@ -0,0 +1,5 @@
+---
+upgrade:
+ - |
+ When deploying with RHSM, sat-tools 6.2 will be installed instead of 6.1.
+ The new version is supported by RHEL 7.4 and provides katello-agent package.
diff --git a/sample-env-generator/composable-roles.yaml b/sample-env-generator/composable-roles.yaml
index 91d6060f..2c929a4b 100644
--- a/sample-env-generator/composable-roles.yaml
+++ b/sample-env-generator/composable-roles.yaml
@@ -89,10 +89,8 @@ environments:
files:
overcloud.yaml:
parameters:
- - ControllerHostnameFormat
- ComputeHostnameFormat
- CephStorageHostnameFormat
- - ControllerCount
- ComputeCount
- CephStorageCount
puppet/services/time/ntp.yaml:
@@ -100,19 +98,21 @@ environments:
- NtpServer
sample-env-generator/composable-roles.yaml:
parameters:
+ - ControllerOpenstackHostnameFormat
- DnsServers
+ - ControllerOpenstackCount
- DatabaseCount
- MessagingCount
- NetworkerCount
- - OvercloudControllerFlavor
+ - OvercloudControllerOpenstackFlavor
- OvercloudComputeFlavor
- OvercloudCephStorageFlavor
- OvercloudDatabaseFlavor
- OvercloudMessagingFlavor
- OvercloudNetworkerFlavor
sample_values:
- ControllerCount: 3
- OvercloudControllerFlavor: control
+ ControllerOpenstackCount: 3
+ OvercloudControllerOpenstackFlavor: control
ComputeCount: 1
OvercloudComputeFlavor: compute
CephStorageCount: 1
@@ -135,6 +135,10 @@ parameters:
description: DNS servers to use for the Overcloud
type: comma_delimited_list
# Dynamic vars based on roles
+ ControllerOpenstackCount:
+ default: 0
+ description: Number of ControllerOpenstack nodes
+ type: number
DatabaseCount:
default: 0
description: Number of Database nodes
@@ -147,10 +151,21 @@ parameters:
default: 0
description: Number of Networker nodes
type: number
+ ControllerOpenstackHostnameFormat:
+ type: string
+ description: >
+ Format for ControllerOpenstack node hostnames
+ Note %index% is translated into the index of the node, e.g 0/1/2 etc
+ and %stackname% is replaced with the stack name e.g overcloud
+ default: "%stackname%-controller-%index%"
OvercloudControllerFlavor:
default: control
description: Name of the flavor for Controller nodes
type: string
+ OvercloudControllerOpenstackFlavor:
+ default: control
+ description: Name of the flavor for ControllerOpenstack nodes
+ type: string
OvercloudComputeFlavor:
default: compute
description: Name of the flavor for Compute nodes
diff --git a/tools/yaml-validate.py b/tools/yaml-validate.py
index c3229621..76f856db 100755
--- a/tools/yaml-validate.py
+++ b/tools/yaml-validate.py
@@ -46,7 +46,10 @@ OPTIONAL_DOCKER_PUPPET_CONFIG_SECTIONS = [ 'puppet_tags', 'volumes' ]
# consistency across files on. This should only contain parameters whose
# definition we cannot change for backwards compatibility reasons. New
# parameters to the templates should not be added to this list.
-PARAMETER_DEFINITION_EXCLUSIONS = {'ManagementNetCidr': ['default'],
+PARAMETER_DEFINITION_EXCLUSIONS = {'CephPools': ['description',
+ 'type',
+ 'default'],
+ 'ManagementNetCidr': ['default'],
'ManagementAllocationPools': ['default'],
'ExternalNetCidr': ['default'],
'ExternalAllocationPools': ['default'],