diff options
-rw-r--r-- | docker/compute-post.yaml | 102 | ||||
-rw-r--r-- | docker/firstboot/install_docker_agents.yaml | 8 | ||||
-rw-r--r-- | docker/firstboot/start_docker_agents.sh | 9 | ||||
-rw-r--r-- | docker/generate_json_config.sh | 96 | ||||
-rw-r--r-- | environments/docker-rdo.yaml | 21 | ||||
-rwxr-xr-x | extraconfig/tasks/yum_update.sh | 128 | ||||
-rw-r--r-- | puppet/ceph-storage.yaml | 7 | ||||
-rw-r--r-- | puppet/cinder-storage.yaml | 6 | ||||
-rw-r--r-- | puppet/compute.yaml | 6 | ||||
-rw-r--r-- | puppet/controller.yaml | 6 | ||||
-rw-r--r-- | puppet/hieradata/compute.yaml | 6 | ||||
-rw-r--r-- | puppet/swift-storage.yaml | 7 |
12 files changed, 280 insertions, 122 deletions
diff --git a/docker/compute-post.yaml b/docker/compute-post.yaml index 0d049ebc..1dc7be16 100644 --- a/docker/compute-post.yaml +++ b/docker/compute-post.yaml @@ -9,6 +9,9 @@ parameters: NodeConfigIdentifiers: type: json description: Value which changes if the node configuration may need to be re-applied + DockerNamespace: + type: string + default: kollaglue DockerComputeImage: type: string DockerComputeDataImage: @@ -67,8 +70,24 @@ resources: config: {get_resource: CopyEtcConfig} servers: {get_param: servers} + CopyJsonConfig: + type: OS::Heat::SoftwareConfig + properties: + group: script + outputs: + - name: result + config: {get_file: ./generate_json_config.sh} + + CopyJsonDeployment: + type: OS::Heat::SoftwareDeployments + depends_on: CopyEtcDeployment + properties: + config: {get_resource: CopyJsonConfig} + servers: {get_param: servers} + NovaComputeContainersDeploymentOVS: type: OS::Heat::StructuredDeployments + depends_on: CopyJsonDeployment properties: config: {get_resource: NovaComputeContainersConfigOVS} servers: {get_param: servers} @@ -79,7 +98,10 @@ resources: group: docker-compose config: ovsvswitchd: - image: {get_param: DockerOvsVswitchdImage} + image: + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerOvsVswitchdImage} ] container_name: ovs-vswitchd net: host privileged: true @@ -87,18 +109,23 @@ resources: volumes: - /run:/run - /lib/modules:/lib/modules:ro + - /var/lib/etc-data/json-config/ovs-vswitchd.json:/var/lib/kolla/config_files/config.json environment: - - KOLLA_CONFIG_STRATEGY=CONFIG_EXTERNAL_COPY_ALWAYS + - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS openvswitchdb: - image: {get_param: DockerOpenvswitchDBImage} + image: + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerOpenvswitchDBImage} ] container_name: ovs-db-server net: host restart: always volumes: - /run:/run + - /var/lib/etc-data/json-config/ovs-dbserver.json:/var/lib/kolla/config_files/config.json environment: - - KOLLA_CONFIG_STRATEGY=CONFIG_EXTERNAL_COPY_ALWAYS + - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS NovaComputeContainersDeploymentNetconfig: type: OS::Heat::SoftwareDeployments @@ -122,7 +149,7 @@ resources: LibvirtContainersDeployment: type: OS::Heat::StructuredDeployments - depends_on: [CopyEtcDeployment, ComputePuppetDeployment, NovaComputeContainersDeploymentNetconfig] + depends_on: [CopyJsonDeployment, CopyEtcDeployment, ComputePuppetDeployment, NovaComputeContainersDeploymentNetconfig] properties: config: {get_resource: LibvirtContainersConfig} servers: {get_param: servers} @@ -133,11 +160,20 @@ resources: group: docker-compose config: computedata: - image: {get_param: DockerComputeDataImage} + image: + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerComputeDataImage} ] container_name: computedata + volumes: + - /var/lib/nova/instances + - /var/lib/libvirt libvirt: - image: {get_param: DockerLibvirtImage} + image: + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerLibvirtImage} ] container_name: libvirt net: host pid: host @@ -146,16 +182,17 @@ resources: volumes: - /run:/run - /lib/modules:/lib/modules:ro - - /var/lib/etc-data/libvirt/libvirtd.conf:/opt/kolla/libvirtd/libvirtd.conf - - /var/lib/nova/instances:/var/lib/nova/instances + - /sys/fs/cgroup:/sys/fs/cgroup + - /var/lib/etc-data/json-config/nova-libvirt.json:/var/lib/kolla/config_files/config.json + - /var/lib/etc-data/libvirt/libvirtd.conf:/var/lib/kolla/config_files/libvirtd.conf environment: - - KOLLA_CONFIG_STRATEGY=CONFIG_EXTERNAL_COPY_ALWAYS + - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS volumes_from: - computedata NovaComputeContainersDeployment: type: OS::Heat::StructuredDeployments - depends_on: [CopyEtcDeployment, ComputePuppetDeployment, NovaComputeContainersDeploymentNetconfig, LibvirtContainersDeployment] + depends_on: [CopyJsonDeployment, CopyEtcDeployment, ComputePuppetDeployment, NovaComputeContainersDeploymentNetconfig, LibvirtContainersDeployment] properties: config: {get_resource: NovaComputeContainersConfig} servers: {get_param: servers} @@ -166,7 +203,10 @@ resources: group: docker-compose config: openvswitch: - image: {get_param: DockerOpenvswitchImage} + image: + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerOpenvswitchImage} ] container_name: openvswitch net: host privileged: true @@ -174,17 +214,20 @@ resources: volumes: - /run:/run - /lib/modules:/lib/modules:ro + - /var/lib/etc-data/json-config/neutron-openvswitch-agent.json:/var/lib/kolla/config_files/config.json - /var/lib/etc-data/neutron/neutron.conf:/etc/kolla/neutron-openvswitch-agent/:ro - - /var/lib/etc-data/neutron/plugins/ml2/ml2_conf.ini:/etc/kolla/neutron-openvswitch-agent/:ro + - /var/lib/etc-data/neutron/plugins/ml2/ml2_conf.ini:/var/lib/kolla/config_files/ml2_conf.ini:ro + - /var/lib/etc-data/neutron/neutron.conf:/var/lib/kolla/config_files/neutron.conf:ro environment: - - KOLLA_CONFIG_STRATEGY=CONFIG_EXTERNAL_COPY_ALWAYS + - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS volumes_from: - computedata - # FIXME: Kolla now uses a JSON model to run custom commands. We rebuilt a custom container to read in KOLLA_COMMAND_ARGS - # FIXME: Here we're subjugating kolla's start scripts because we want our custom run command neutronagent: - image: {get_param: DockerOpenvswitchImage} + image: + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerOpenvswitchImage} ] container_name: neutronagent net: host pid: host @@ -193,33 +236,32 @@ resources: volumes: - /run:/run - /lib/modules:/lib/modules:ro - - /var/lib/etc-data/neutron/neutron.conf:/etc/neutron/neutron.conf:ro - - /var/lib/etc-data/neutron/plugins/ml2/openvswitch_agent.ini:/etc/neutron/plugins/openvswitch/ovs_neutron_plugin.ini:ro + - /var/lib/etc-data/json-config/neutron-agent.json:/var/lib/kolla/config_files/config.json + - /var/lib/etc-data/neutron/plugins/ml2/openvswitch_agent.ini:/var/lib/kolla/config_files/ovs_neutron_plugin.ini:ro + - /var/lib/etc-data/neutron/neutron.conf:/var/lib/kolla/config_files/neutron.conf:ro environment: - - KOLLA_CONFIG_STRATEGY=CONFIG_EXTERNAL_COPY_ALWAYS - # FIXME: Kolla now uses a JSON model to run custom commands. We rebuilt a custom container to read in KOLLA_COMMAND_ARGS - - KOLLA_COMMAND_ARGS=--config-file /usr/share/neutron/neutron-dist.conf --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/openvswitch/ovs_neutron_plugin.ini + - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS volumes_from: - computedata novacompute: - image: {get_param: DockerComputeImage} + image: + list_join: + - '/' + - [ {get_param: DockerNamespace}, {get_param: DockerComputeImage} ] container_name: novacompute net: host privileged: true restart: always volumes: - /run:/run - - /sys/fs/cgroup:/sys/fs/cgroup - /lib/modules:/lib/modules:ro - - /var/lib/etc-data/:/etc/:ro - - /var/lib/nova/instances:/var/lib/nova/instances + - /var/lib/etc-data/json-config/nova-compute.json:/var/lib/kolla/config_files/config.json + - /var/lib/etc-data/nova/nova.conf:/var/lib/kolla/config_files/nova.conf:ro + environment: + - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS volumes_from: - computedata - # FIXME: this skips the kolla start.sh script and just starts Nova - # Ideally we'd have an environment that switched the kolla container - # to be externally configured. - command: /usr/bin/nova-compute ExtraConfig: depends_on: NovaComputeContainersDeployment diff --git a/docker/firstboot/install_docker_agents.yaml b/docker/firstboot/install_docker_agents.yaml index 8adc8939..22a8ff92 100644 --- a/docker/firstboot/install_docker_agents.yaml +++ b/docker/firstboot/install_docker_agents.yaml @@ -4,6 +4,12 @@ parameters: DockerAgentImage: type: string default: dprince/heat-docker-agents-centos + DockerNamespace: + type: string + default: kollaglue + DockerNamespaceIsRegistry: + type: boolean + default: false resources: @@ -21,6 +27,8 @@ resources: str_replace: params: $agent_image: {get_param: DockerAgentImage} + $docker_registry: {get_param: DockerNamespace} + $docker_namespace_is_registry: {get_param: DockerNamespaceIsRegistry} template: {get_file: ./start_docker_agents.sh} outputs: diff --git a/docker/firstboot/start_docker_agents.sh b/docker/firstboot/start_docker_agents.sh index 88759a5d..a0e95d11 100644 --- a/docker/firstboot/start_docker_agents.sh +++ b/docker/firstboot/start_docker_agents.sh @@ -7,7 +7,7 @@ if ! hostname | grep compute &>/dev/null; then exit 0 fi -mkdir -p /var/lib/etc-data/ #FIXME: this should be a docker data container +mkdir -p /var/lib/etc-data/json-config #FIXME: this should be a docker data container # heat-docker-agents service cat <<EOF > /etc/systemd/system/heat-docker-agents.service @@ -38,14 +38,13 @@ EOF #echo "ADD_REGISTRY='--registry-mirror $docker_registry'" >> /etc/sysconfig/docker # Local docker registry 1.8 -#/bin/sed -i s/ADD_REGISTRY/#ADD_REGISTRY/ /etc/sysconfig/docker +if [ $docker_namespace_is_registry ]; then + /bin/sed -i "s/# INSECURE_REGISTRY='--insecure-registry '/INSECURE_REGISTRY='--insecure-registry $docker_registry'/g" /etc/sysconfig/docker +fi /sbin/setenforce 0 /sbin/modprobe ebtables -# Create /var/lib/etc-data for now. FIXME: This should go into a data container. -#mkdir -p /var/lib/etc-data - echo nameserver 8.8.8.8 > /etc/resolv.conf # We need hostname -f to return in a centos container for the puppet hook diff --git a/docker/generate_json_config.sh b/docker/generate_json_config.sh new file mode 100644 index 00000000..5cf49226 --- /dev/null +++ b/docker/generate_json_config.sh @@ -0,0 +1,96 @@ +#!/bin/bash + +KOLLA_DEST=/var/lib/kolla/config_files +JSON_DEST=/var/lib/etc-data/json-config + +# For more config file generation, simply define a new SERVICE_DATA_ +# prefixed variable. The command string is quoted to include config-file +# arguments. Note that the variable name following SERVICE_DATA_ will be +# the filename the JSON config is written to. + +# [EXAMPLE]: SERVICE_DATA_<SERVICE_NAME>=(<command> <source> <dest> <owner> <perms>) + +SERVICE_DATA_NOVA_LIBVIRT=("/usr/sbin/libvirtd" libvirtd.conf /etc/libvirt/libvirtd.conf root 0644) +SERVICE_DATA_NOVA_COMPUTE=("/usr/bin/nova-compute" nova.conf /etc/nova/nova.conf nova 0600) +SERVICE_DATA_NEUTRON_OPENVSWITCH_AGENT=("/usr/bin/neutron-openvswitch-agent --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/ml2/ml2_conf.ini" neutron.conf /etc/neutron/neutron.conf neutron 0600 ml2_conf.ini /etc/neutron/plugins/ml2/ml2_conf.ini neutron 0600) +SERVICE_DATA_NEUTRON_AGENT=("/usr/bin/neutron-openvswitch-agent --config-file /usr/share/neutron/neutron-dist.conf --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/openvswitch/ovs_neutron_plugin.ini" neutron.conf /etc/neutron/neutron.conf neutron 0600 ovs_neutron_plugin.ini /etc/neutron/plugins/openvswitch/ovs_neutron_plugin.ini neutron 0600) +SERVICE_DATA_OVS_VSWITCHD=("/usr/sbin/ovs-vswitchd unix:/run/openvswitch/db.sock -vconsole:emer -vsyslog:err -vfile:info --mlockall --log-file=/var/log/openvswitch/ovs-vswitchd.log") +SERVICE_DATA_OVS_DBSERVER=("/usr/sbin/ovsdb-server /etc/openvswitch/conf.db -vconsole:emer -vsyslog:err -vfile:info --remote=punix:/run/openvswitch/db.sock --log-file=/var/log/openvswitch/ovsdb-server.log") + +function create_json_header() { + local command=$1 + + echo "\ +{ + \"command\": \"${command[@]}\"," + +} + +function create_config_file_header() { + echo " \"config_files\": [" +} + +function create_config_file_block() { + local source=$KOLLA_DEST/$1 + local dest=$2 + local owner=$3 + local perm=$4 + + printf "\ +\t{ +\t \"source\": \"$source\", +\t \"dest\": \"$dest\", +\t \"owner\": \"$owner\", +\t \"perm\": \"$perm\" +\t}" +} + +function add_trailing_comma() { + printf ", \n" +} + +function create_config_file_trailer() { + echo -e "\n ]" +} + +function create_json_trailer() { + echo "}" +} + +function create_json_data() { + local config_data=$1 + shift + + create_json_header "$config_data" + create_config_file_header + while [ "$1" ]; do + create_config_file_block "$@" + shift 4 + if [ "$1" ]; then + add_trailing_comma + fi + done + create_config_file_trailer + create_json_trailer +} + +function write_json_data() { + + local name=$1[@] + local service_data=("${!name}") + + local service_name=${1#SERVICE_DATA_} # chop SERVICE_DATA_ prefix + service_name=${service_name//_/-} # switch underscore to dash + service_name=${service_name,,} # change to lowercase + + echo "Creating JSON file ${service_name}" + create_json_data "${service_data[@]}" > "$JSON_DEST/$service_name.json" +} + +function process_configs() { + for service in ${!SERVICE_DATA_*}; do + write_json_data "${service}" + done +} + +process_configs diff --git a/environments/docker-rdo.yaml b/environments/docker-rdo.yaml index 8a6e1018..23f318bf 100644 --- a/environments/docker-rdo.yaml +++ b/environments/docker-rdo.yaml @@ -7,11 +7,18 @@ resource_registry: parameters: NovaImage: atomic-image +# FIXME: When Kolla cuts liberty tag we can use kollaglue registry parameter_defaults: - DockerComputeImage: rthallisey/centos-binary-nova-compute:liberty - DockerComputeDataImage: kollaglue/centos-rdo-nova-compute-data:liberty2 - DockerLibvirtImage: kollaglue/centos-rdo-nova-libvirt:liberty2 - DockerNeutronAgentImage: kollaglue/centos-rdo-neutron-agents:liberty2 - DockerOpenvswitchImage: rthallisey/centos-rdo-neutron-openvswitch-agent:latest - DockerOvsVswitchdImage: kollaglue/centos-rdo-ovs-vswitchd:liberty2 - DockerOpenvswitchDBImage: kollaglue/centos-rdo-ovs-db-server:liberty2 + # Defaults to kollaglue. Specify a local docker registry + # Example: 192.168.122.131:8787 + DockerNamespace: kollaglue + # Enable local Docker registry + DockerNamespaceIsRegistry: false + # Compute Node Images + DockerComputeImage: centos-binary-nova-compute:liberty + DockerComputeDataImage: centos-binary-data:liberty + DockerLibvirtImage: centos-binary-nova-libvirt:liberty + DockerNeutronAgentImage: centos-binary-neutron-agents:liberty + DockerOpenvswitchImage: centos-binary-neutron-openvswitch-agent:liberty + DockerOvsVswitchdImage: centos-binary-openvswitch-vswitchd:liberty + DockerOpenvswitchDBImage: centos-binary-openvswitch-db-server:liberty diff --git a/extraconfig/tasks/yum_update.sh b/extraconfig/tasks/yum_update.sh index fa523e83..6ab2501c 100755 --- a/extraconfig/tasks/yum_update.sh +++ b/extraconfig/tasks/yum_update.sh @@ -22,7 +22,7 @@ mkdir -p $timestamp_dir update_identifier=${update_identifier//[^a-zA-Z0-9-_]/} # seconds to wait for this node to rejoin the cluster after update -cluster_start_timeout=360 +cluster_start_timeout=600 galera_sync_timeout=360 timestamp_file="$timestamp_dir/$update_identifier" @@ -42,109 +42,81 @@ if [[ "$list_updates" == "" ]]; then fi pacemaker_status=$(systemctl is-active pacemaker) +pacemaker_dumpfile=$(mktemp) if [[ "$pacemaker_status" == "active" ]] ; then - echo "Checking for and adding missing constraints" + echo "Dumping Pacemaker config" + pcs cluster cib $pacemaker_dumpfile + + echo "Checking for missing constraints" if ! pcs constraint order show | grep "start openstack-nova-novncproxy-clone then start openstack-nova-api-clone"; then - pcs constraint order start openstack-nova-novncproxy-clone then openstack-nova-api-clone + pcs -f $pacemaker_dumpfile constraint order start openstack-nova-novncproxy-clone then openstack-nova-api-clone fi if ! pcs constraint order show | grep "start rabbitmq-clone then start openstack-keystone-clone"; then - pcs constraint order start rabbitmq-clone then openstack-keystone-clone + pcs -f $pacemaker_dumpfile constraint order start rabbitmq-clone then openstack-keystone-clone fi if ! pcs constraint order show | grep "promote galera-master then start openstack-keystone-clone"; then - pcs constraint order promote galera-master then openstack-keystone-clone + pcs -f $pacemaker_dumpfile constraint order promote galera-master then openstack-keystone-clone fi if ! pcs constraint order show | grep "start haproxy-clone then start openstack-keystone-clone"; then - pcs constraint order start haproxy-clone then openstack-keystone-clone + pcs -f $pacemaker_dumpfile constraint order start haproxy-clone then openstack-keystone-clone fi if ! pcs constraint order show | grep "start memcached-clone then start openstack-keystone-clone"; then - pcs constraint order start memcached-clone then openstack-keystone-clone + pcs -f $pacemaker_dumpfile constraint order start memcached-clone then openstack-keystone-clone fi if ! pcs constraint order show | grep "promote redis-master then start openstack-ceilometer-central-clone"; then - pcs constraint order promote redis-master then start openstack-ceilometer-central-clone require-all=false + pcs -f $pacemaker_dumpfile constraint order promote redis-master then start openstack-ceilometer-central-clone require-all=false fi if ! pcs resource defaults | grep "resource-stickiness: INFINITY"; then - pcs resource defaults resource-stickiness=INFINITY + pcs -f $pacemaker_dumpfile resource defaults resource-stickiness=INFINITY fi echo "Setting resource start/stop timeouts" - - # timeouts for non-openstack services and special cases - pcs resource update haproxy op start timeout=100s - pcs resource update haproxy op stop timeout=100s - # mongod start timeout is also higher, setting only stop timeout + SERVICES=" +haproxy +memcached +httpd +neutron-dhcp-agent +neutron-l3-agent +neutron-metadata-agent +neutron-openvswitch-agent +neutron-server +openstack-ceilometer-alarm-evaluator +openstack-ceilometer-alarm-notifier +openstack-ceilometer-api +openstack-ceilometer-central +openstack-ceilometer-collector +openstack-ceilometer-notification +openstack-cinder-api +openstack-cinder-scheduler +openstack-cinder-volume +openstack-glance-api +openstack-glance-registry +openstack-heat-api +openstack-heat-api-cfn +openstack-heat-api-cloudwatch +openstack-heat-engine +openstack-keystone +openstack-nova-api +openstack-nova-conductor +openstack-nova-consoleauth +openstack-nova-novncproxy +openstack-nova-scheduler" + for service in $SERVICES; do + pcs -f $pacemaker_dumpfile resource update $service op start timeout=100s op stop timeout=100s + done + # mongod start timeout is higher, setting only stop timeout pcs resource update mongod op stop timeout=100s - # rabbit start timeout is already 100s - pcs resource update rabbitmq op stop timeout=100s - pcs resource update memcached op start timeout=100s - pcs resource update memcached op stop timeout=100s - pcs resource update httpd op start timeout=100s - pcs resource update httpd op stop timeout=100s - # neutron-netns-cleanup stop timeout is 300s, setting only start timeout - pcs resource update neutron-netns-cleanup op start timeout=100s - # neutron-ovs-cleanup stop timeout is 300s, setting only start timeout - pcs resource update neutron-ovs-cleanup op start timeout=100s - - # timeouts for openstack services - pcs resource update neutron-dhcp-agent op start timeout=100s - pcs resource update neutron-dhcp-agent op stop timeout=100s - pcs resource update neutron-l3-agent op start timeout=100s - pcs resource update neutron-l3-agent op stop timeout=100s - pcs resource update neutron-metadata-agent op start timeout=100s - pcs resource update neutron-metadata-agent op stop timeout=100s - pcs resource update neutron-openvswitch-agent op start timeout=100s - pcs resource update neutron-openvswitch-agent op stop timeout=100s - pcs resource update neutron-server op start timeout=100s - pcs resource update neutron-server op stop timeout=100s - pcs resource update openstack-ceilometer-alarm-evaluator op start timeout=100s - pcs resource update openstack-ceilometer-alarm-evaluator op stop timeout=100s - pcs resource update openstack-ceilometer-alarm-notifier op start timeout=100s - pcs resource update openstack-ceilometer-alarm-notifier op stop timeout=100s - pcs resource update openstack-ceilometer-api op start timeout=100s - pcs resource update openstack-ceilometer-api op stop timeout=100s - pcs resource update openstack-ceilometer-central op start timeout=100s - pcs resource update openstack-ceilometer-central op stop timeout=100s - pcs resource update openstack-ceilometer-collector op start timeout=100s - pcs resource update openstack-ceilometer-collector op stop timeout=100s - pcs resource update openstack-ceilometer-notification op start timeout=100s - pcs resource update openstack-ceilometer-notification op stop timeout=100s - pcs resource update openstack-cinder-api op start timeout=100s - pcs resource update openstack-cinder-api op stop timeout=100s - pcs resource update openstack-cinder-scheduler op start timeout=100s - pcs resource update openstack-cinder-scheduler op stop timeout=100s - pcs resource update openstack-cinder-volume op start timeout=100s - pcs resource update openstack-cinder-volume op stop timeout=100s - pcs resource update openstack-glance-api op start timeout=100s - pcs resource update openstack-glance-api op stop timeout=100s - pcs resource update openstack-glance-registry op start timeout=100s - pcs resource update openstack-glance-registry op stop timeout=100s - pcs resource update openstack-heat-api op start timeout=100s - pcs resource update openstack-heat-api op stop timeout=100s - pcs resource update openstack-heat-api-cfn op start timeout=100s - pcs resource update openstack-heat-api-cfn op stop timeout=100s - pcs resource update openstack-heat-api-cloudwatch op start timeout=100s - pcs resource update openstack-heat-api-cloudwatch op stop timeout=100s - pcs resource update openstack-heat-engine op start timeout=100s - pcs resource update openstack-heat-engine op stop timeout=100s - pcs resource update openstack-keystone op start timeout=100s - pcs resource update openstack-keystone op stop timeout=100s - pcs resource update openstack-nova-api op start timeout=100s - pcs resource update openstack-nova-api op stop timeout=100s - pcs resource update openstack-nova-conductor op start timeout=100s - pcs resource update openstack-nova-conductor op stop timeout=100s - pcs resource update openstack-nova-consoleauth op start timeout=100s - pcs resource update openstack-nova-consoleauth op stop timeout=100s - pcs resource update openstack-nova-novncproxy op start timeout=100s - pcs resource update openstack-nova-novncproxy op stop timeout=100s - pcs resource update openstack-nova-scheduler op start timeout=100s - pcs resource update openstack-nova-scheduler op stop timeout=100s + + echo "Applying new Pacemaker config" + pcs cluster cib-push $pacemaker_dumpfile echo "Pacemaker running, stopping cluster node and doing full package update" node_count=$(pcs status xml | grep -o "<nodes_configured.*/>" | grep -o 'number="[0-9]*"' | grep -o "[0-9]*") diff --git a/puppet/ceph-storage.yaml b/puppet/ceph-storage.yaml index 0d968504..0bf0fde4 100644 --- a/puppet/ceph-storage.yaml +++ b/puppet/ceph-storage.yaml @@ -54,7 +54,11 @@ parameters: description: | Role specific additional hiera configuration to inject into the cluster. type: json - + NetworkDeploymentActions: + type: comma_delimited_list + description: > + Heat action when to apply network configuration changes + default: ['CREATE'] resources: CephStorage: @@ -126,6 +130,7 @@ resources: properties: config: {get_resource: NetworkConfig} server: {get_resource: CephStorage} + actions: {get_param: NetworkDeploymentActions} CephStorageDeployment: type: OS::Heat::StructuredDeployment diff --git a/puppet/cinder-storage.yaml b/puppet/cinder-storage.yaml index b536418d..b500e43b 100644 --- a/puppet/cinder-storage.yaml +++ b/puppet/cinder-storage.yaml @@ -107,6 +107,11 @@ parameters: MysqlVirtualIP: type: string default: '' + NetworkDeploymentActions: + type: comma_delimited_list + description: > + Heat action when to apply network configuration changes + default: ['CREATE'] resources: BlockStorage: @@ -178,6 +183,7 @@ resources: properties: config: {get_resource: NetworkConfig} server: {get_resource: BlockStorage} + actions: {get_param: NetworkDeploymentActions} BlockStorageDeployment: type: OS::Heat::StructuredDeployment diff --git a/puppet/compute.yaml b/puppet/compute.yaml index c147a0f7..6ae39132 100644 --- a/puppet/compute.yaml +++ b/puppet/compute.yaml @@ -273,6 +273,11 @@ parameters: Hostname: type: string default: '' # Defaults to Heat created hostname + NetworkDeploymentActions: + type: comma_delimited_list + description: > + Heat action when to apply network configuration changes + default: ['CREATE'] resources: @@ -347,6 +352,7 @@ resources: properties: config: {get_resource: NetworkConfig} server: {get_resource: NovaCompute} + actions: {get_param: NetworkDeploymentActions} input_values: bridge_name: {get_param: NeutronPhysicalBridge} interface_name: {get_param: NeutronPublicInterface} diff --git a/puppet/controller.yaml b/puppet/controller.yaml index d3a8a93d..d47e013e 100644 --- a/puppet/controller.yaml +++ b/puppet/controller.yaml @@ -614,6 +614,11 @@ parameters: Hostname: type: string default: '' # Defaults to Heat created hostname + NetworkDeploymentActions: + type: comma_delimited_list + description: > + Heat action when to apply network configuration changes + default: ['CREATE'] resources: @@ -710,6 +715,7 @@ resources: properties: config: {get_resource: NetworkConfig} server: {get_resource: Controller} + actions: {get_param: NetworkDeploymentActions} input_values: bridge_name: br-ex interface_name: {get_param: NeutronPublicInterface} diff --git a/puppet/hieradata/compute.yaml b/puppet/hieradata/compute.yaml index 173020f8..fa8dcc81 100644 --- a/puppet/hieradata/compute.yaml +++ b/puppet/hieradata/compute.yaml @@ -12,6 +12,12 @@ nova::compute::libvirt::migration_support: true nova::compute::rbd::libvirt_rbd_secret_uuid: "%{hiera('ceph::profile::params::fsid')}" +# Changing the default from 512MB. The current templates can not deploy +# overclouds with swap. On an idle compute node, we see ~1024MB of RAM +# used. 2048 is suggested to account for other possible operations for +# example openvswitch. +nova::compute::reserved_host_memory: 2048 + ceilometer::agent::auth::auth_tenant_name: 'service' ceilometer::agent::auth::auth_endpoint_type: 'internalURL' diff --git a/puppet/swift-storage.yaml b/puppet/swift-storage.yaml index 3d9b9018..19a7c7a3 100644 --- a/puppet/swift-storage.yaml +++ b/puppet/swift-storage.yaml @@ -77,7 +77,11 @@ parameters: description: | Role specific additional hiera configuration to inject into the cluster. type: json - + NetworkDeploymentActions: + type: comma_delimited_list + description: > + Heat action when to apply network configuration changes + default: ['CREATE'] resources: @@ -149,6 +153,7 @@ resources: properties: config: {get_resource: NetworkConfig} server: {get_resource: SwiftStorage} + actions: {get_param: NetworkDeploymentActions} SwiftStorageHieraConfig: type: OS::Heat::StructuredConfig |