diff options
27 files changed, 711 insertions, 35 deletions
@@ -44,3 +44,18 @@ doc/_build # Built by pbr (python setup.py sdist): AUTHORS ChangeLog + +extraconfig/all_nodes/mac_hostname.yaml +extraconfig/all_nodes/random_string.yaml +extraconfig/all_nodes/swap-partition.yaml +extraconfig/all_nodes/swap.yaml +extraconfig/tasks/major_upgrade_pacemaker_init.yaml +network/service_net_map.yaml +overcloud-resource-registry-puppet.yaml +overcloud.yaml +puppet/blockstorage-config.yaml +puppet/cephstorage-config.yaml +puppet/compute-config.yaml +puppet/controller-config.yaml +puppet/objectstorage-config.yaml +puppet/post.yaml @@ -1,3 +1,12 @@ +======================== +Team and repository tags +======================== + +.. image:: http://governance.openstack.org/badges/tripleo-heat-templates.svg + :target: http://governance.openstack.org/reference/tags/index.html + +.. Change things from this point on + ====================== tripleo-heat-templates ====================== diff --git a/ci/environments/scenario001-multinode.yaml b/ci/environments/scenario001-multinode.yaml index 1a5242a9..cb39d3b0 100644 --- a/ci/environments/scenario001-multinode.yaml +++ b/ci/environments/scenario001-multinode.yaml @@ -1,6 +1,9 @@ resource_registry: OS::TripleO::Controller::Net::SoftwareConfig: ../common/net-config-multinode.yaml OS::TripleO::Compute::Net::SoftwareConfig: ../common/net-config-multinode.yaml + OS::TripleO::Services::CephMon: /usr/share/openstack-tripleo-heat-templates/puppet/services/ceph-mon.yaml + OS::TripleO::Services::CephOSD: /usr/share/openstack-tripleo-heat-templates/puppet/services/ceph-osd.yaml + OS::TripleO::Services::CephClient: /usr/share/openstack-tripleo-heat-templates/puppet/services/ceph-client.yaml parameter_defaults: ControllerServices: @@ -47,10 +50,28 @@ parameter_defaults: - OS::TripleO::Services::GnocchiApi - OS::TripleO::Services::GnocchiMetricd - OS::TripleO::Services::GnocchiStatsd + - OS::TripleO::Services::CephMon + - OS::TripleO::Services::CephOSD + - OS::TripleO::Services::CephClient ControllerExtraConfig: nova::compute::libvirt::services::libvirt_virt_type: qemu nova::compute::libvirt::libvirt_virt_type: qemu Debug: true - # we don't deploy Swift so we switch to file backend. - GlanceBackend: 'file' - GnocchiBackend: 'file' + #NOTE(gfidente): not great but we need this to deploy on ext4 + #http://docs.ceph.com/docs/jewel/rados/configuration/filesystem-recommendations/ + ExtraConfig: + ceph::profile::params::osd_max_object_name_len: 256 + ceph::profile::params::osd_max_object_namespace_len: 64 + #NOTE: These ID's and keys should be regenerated for + # a production deployment. What is here is suitable for + # developer and CI testing only. + CephClusterFSID: '4b5c8c0a-ff60-454b-a1b4-9747aa737d19' + CephMonKey: 'AQC+Ox1VmEr3BxAALZejqeHj50Nj6wJDvs96OQ==' + CephAdminKey: 'AQDLOh1VgEp6FRAAFzT7Zw+Y9V6JJExQAsRnRQ==' + CephClientKey: 'AQC+vYNXgDAgAhAAc8UoYt+OTz5uhV7ItLdwUw==' + NovaEnableRbdBackend: true + CinderEnableRbdBackend: true + CinderBackupBackend: ceph + GlanceBackend: rbd + GnocchiBackend: rbd + CinderEnableIscsiBackend: false diff --git a/ci/environments/scenario003-multinode.yaml b/ci/environments/scenario003-multinode.yaml index e540bc55..092426cb 100644 --- a/ci/environments/scenario003-multinode.yaml +++ b/ci/environments/scenario003-multinode.yaml @@ -1,11 +1,11 @@ resource_registry: OS::TripleO::Controller::Net::SoftwareConfig: ../common/net-config-multinode.yaml OS::TripleO::Compute::Net::SoftwareConfig: ../common/net-config-multinode.yaml - OS::TripleO::Services::SaharaApi: /usr/share/openstack-tripleo-heat-templates/puppet/services/sahara-api.yaml - OS::TripleO::Services::SaharaEngine: /usr/share/openstack-tripleo-heat-templates/puppet/services/sahara-engine.yaml - OS::TripleO::Services::MistralApi: /usr/share/openstack-tripleo-heat-templates/puppet/services/mistral-api.yaml - OS::TripleO::Services::MistralEngine: /usr/share/openstack-tripleo-heat-templates/puppet/services/mistral-engine.yaml - OS::TripleO::Services::MistralExecutor: /usr/share/openstack-tripleo-heat-templates/puppet/services/mistral-executor.yaml + OS::TripleO::Services::SaharaApi: ../../puppet/services/sahara-api.yaml + OS::TripleO::Services::SaharaEngine: ../../puppet/services/sahara-engine.yaml + OS::TripleO::Services::MistralApi: ../../puppet/services/mistral-api.yaml + OS::TripleO::Services::MistralEngine: ../../puppet/services/mistral-engine.yaml + OS::TripleO::Services::MistralExecutor: ../../puppet/services/mistral-executor.yaml parameter_defaults: ControllerServices: @@ -49,3 +49,4 @@ parameter_defaults: Debug: true # we don't deploy Swift so we switch to file backend. GlanceBackend: 'file' + KeystoneTokenProvider: 'fernet' diff --git a/ci/environments/scenario004-multinode.yaml b/ci/environments/scenario004-multinode.yaml new file mode 100644 index 00000000..4aa18709 --- /dev/null +++ b/ci/environments/scenario004-multinode.yaml @@ -0,0 +1,62 @@ +resource_registry: + OS::TripleO::Controller::Net::SoftwareConfig: ../common/net-config-multinode.yaml + OS::TripleO::Compute::Net::SoftwareConfig: ../common/net-config-multinode.yaml + OS::TripleO::Services::CephMon: /usr/share/openstack-tripleo-heat-templates/puppet/services/ceph-mon.yaml + OS::TripleO::Services::CephOSD: /usr/share/openstack-tripleo-heat-templates/puppet/services/ceph-osd.yaml + OS::TripleO::Services::CephClient: /usr/share/openstack-tripleo-heat-templates/puppet/services/ceph-client.yaml + OS::TripleO::Services::CephRgw: /usr/share/openstack-tripleo-heat-templates/puppet/services/ceph-rgw.yaml + OS::TripleO::Services::SwiftProxy: OS::Heat::None + OS::TripleO::Services::SwiftStorage: OS::Heat::None + OS::TripleO::Services::SwiftRingBuilder: OS::Heat::None + +parameter_defaults: + ControllerServices: + - OS::TripleO::Services::Kernel + - OS::TripleO::Services::Keystone + - OS::TripleO::Services::GlanceApi + - OS::TripleO::Services::GlanceRegistry + - OS::TripleO::Services::HeatApi + - OS::TripleO::Services::HeatApiCfn + - OS::TripleO::Services::HeatApiCloudwatch + - OS::TripleO::Services::HeatEngine + - OS::TripleO::Services::MySQL + - OS::TripleO::Services::NeutronDhcpAgent + - OS::TripleO::Services::NeutronL3Agent + - OS::TripleO::Services::NeutronMetadataAgent + - OS::TripleO::Services::NeutronServer + - OS::TripleO::Services::NeutronCorePlugin + - OS::TripleO::Services::NeutronOvsAgent + - OS::TripleO::Services::RabbitMQ + - OS::TripleO::Services::HAproxy + - OS::TripleO::Services::Keepalived + - OS::TripleO::Services::Memcached + - OS::TripleO::Services::Pacemaker + - OS::TripleO::Services::NovaConductor + - OS::TripleO::Services::NovaApi + - OS::TripleO::Services::NovaMetadata + - OS::TripleO::Services::NovaScheduler + - OS::TripleO::Services::Ntp + - OS::TripleO::Services::Snmp + - OS::TripleO::Services::Timezone + - OS::TripleO::Services::NovaCompute + - OS::TripleO::Services::NovaLibvirt + - OS::TripleO::Services::CephMon + - OS::TripleO::Services::CephOSD + - OS::TripleO::Services::CephClient + - OS::TripleO::Services::CephRgw + ControllerExtraConfig: + nova::compute::libvirt::services::libvirt_virt_type: qemu + nova::compute::libvirt::libvirt_virt_type: qemu + Debug: true + #NOTE(gfidente): not great but we need this to deploy on ext4 + #http://docs.ceph.com/docs/jewel/rados/configuration/filesystem-recommendations/ + ExtraConfig: + ceph::profile::params::osd_max_object_name_len: 256 + ceph::profile::params::osd_max_object_namespace_len: 64 + #NOTE: These ID's and keys should be regenerated for + # a production deployment. What is here is suitable for + # developer and CI testing only. + CephClusterFSID: '4b5c8c0a-ff60-454b-a1b4-9747aa737d19' + CephMonKey: 'AQC+Ox1VmEr3BxAALZejqeHj50Nj6wJDvs96OQ==' + CephAdminKey: 'AQDLOh1VgEp6FRAAFzT7Zw+Y9V6JJExQAsRnRQ==' + CephClientKey: 'AQC+vYNXgDAgAhAAc8UoYt+OTz5uhV7ItLdwUw==' diff --git a/ci/pingtests/scenario001-multinode.yaml b/ci/pingtests/scenario001-multinode.yaml index 9dcbd390..7374846f 100644 --- a/ci/pingtests/scenario001-multinode.yaml +++ b/ci/pingtests/scenario001-multinode.yaml @@ -118,19 +118,21 @@ resources: ram: 512 vcpus: 1 - gnocchi_res_alarm: - type: OS::Aodh::GnocchiResourcesAlarm - properties: - description: Do stuff with gnocchi - metric: cpu_util - aggregation_method: mean - granularity: 60 - evaluation_periods: 1 - threshold: 50 - alarm_actions: [] - resource_type: instance - resource_id: { get_resource: server1 } - comparison_operator: gt +# Disabling this resource now +# https://bugs.launchpad.net/tripleo/+bug/1646506 +# gnocchi_res_alarm: +# type: OS::Aodh::GnocchiResourcesAlarm +# properties: +# description: Do stuff with gnocchi +# metric: cpu_util +# aggregation_method: mean +# granularity: 60 +# evaluation_periods: 1 +# threshold: 50 +# alarm_actions: [] +# resource_type: instance +# resource_id: { get_resource: server1 } +# comparison_operator: gt asg: type: OS::Heat::AutoScalingGroup diff --git a/ci/pingtests/scenario004-multinode.yaml b/ci/pingtests/scenario004-multinode.yaml new file mode 100644 index 00000000..17792cd1 --- /dev/null +++ b/ci/pingtests/scenario004-multinode.yaml @@ -0,0 +1,127 @@ +heat_template_version: 2013-05-23 + +description: > + HOT template to created resources deployed by scenario004. +parameters: + key_name: + type: string + description: Name of keypair to assign to servers + default: 'pingtest_key' + image: + type: string + description: Name of image to use for servers + default: 'pingtest_image' + public_net_name: + type: string + default: 'nova' + description: > + ID or name of public network for which floating IP addresses will be allocated + private_net_name: + type: string + description: Name of private network to be created + default: 'default-net' + private_net_cidr: + type: string + description: Private network address (CIDR notation) + default: '192.168.2.0/24' + private_net_gateway: + type: string + description: Private network gateway address + default: '192.168.2.1' + private_net_pool_start: + type: string + description: Start of private network IP address allocation pool + default: '192.168.2.100' + private_net_pool_end: + type: string + default: '192.168.2.200' + description: End of private network IP address allocation pool + +resources: + + key_pair: + type: OS::Nova::KeyPair + properties: + save_private_key: true + name: {get_param: key_name } + + private_net: + type: OS::Neutron::Net + properties: + name: { get_param: private_net_name } + + private_subnet: + type: OS::Neutron::Subnet + properties: + network_id: { get_resource: private_net } + cidr: { get_param: private_net_cidr } + gateway_ip: { get_param: private_net_gateway } + allocation_pools: + - start: { get_param: private_net_pool_start } + end: { get_param: private_net_pool_end } + + router: + type: OS::Neutron::Router + properties: + external_gateway_info: + network: { get_param: public_net_name } + + router_interface: + type: OS::Neutron::RouterInterface + properties: + router_id: { get_resource: router } + subnet_id: { get_resource: private_subnet } + + server1: + type: OS::Nova::Server + properties: + name: Server1 + flavor: { get_resource: test_flavor } + image: { get_param: image } + key_name: { get_resource: key_pair } + networks: + - port: { get_resource: server1_port } + + server1_port: + type: OS::Neutron::Port + properties: + network_id: { get_resource: private_net } + fixed_ips: + - subnet_id: { get_resource: private_subnet } + security_groups: [{ get_resource: server_security_group }] + + server1_floating_ip: + type: OS::Neutron::FloatingIP + # TODO: investigate why we need this depends_on and if we could + # replace it by router_id with get_resource: router_interface + depends_on: router_interface + properties: + floating_network: { get_param: public_net_name } + port_id: { get_resource: server1_port } + + server_security_group: + type: OS::Neutron::SecurityGroup + properties: + description: Add security group rules for server + name: pingtest-security-group + rules: + - remote_ip_prefix: 0.0.0.0/0 + protocol: tcp + port_range_min: 22 + port_range_max: 22 + - remote_ip_prefix: 0.0.0.0/0 + protocol: icmp + + test_flavor: + type: OS::Nova::Flavor + properties: + ram: 512 + vcpus: 1 + +outputs: + server1_private_ip: + description: IP address of server1 in private network + value: { get_attr: [ server1, first_address ] } + server1_public_ip: + description: Floating IP address of server1 in public network + value: { get_attr: [ server1_floating_ip, floating_ip_address ] } diff --git a/environments/major-upgrade-composable-steps.yaml b/environments/major-upgrade-composable-steps.yaml new file mode 100644 index 00000000..7e10014b --- /dev/null +++ b/environments/major-upgrade-composable-steps.yaml @@ -0,0 +1,3 @@ +resource_registry: + OS::TripleO::UpgradeSteps: ../puppet/major_upgrade_steps.yaml + OS::TripleO::PostDeploySteps: OS::Heat::None diff --git a/hosts-config.yaml b/hosts-config.yaml index b5a22b7f..a24b9bb4 100644 --- a/hosts-config.yaml +++ b/hosts-config.yaml @@ -8,11 +8,18 @@ parameters: resources: hostsConfigImpl: - type: OS::Heat::StructuredConfig + type: OS::Heat::SoftwareConfig properties: - group: os-apply-config - config: - hosts: {get_param: hosts} + group: script + inputs: + - name: hosts + default: + list_join: + - ' ' + - str_split: + - '\n' + - {get_param: hosts} + config: {get_file: scripts/hosts-config.sh} outputs: config_id: @@ -25,3 +32,6 @@ outputs: hostname-based access to the deployed nodes (useful for testing without setting up a DNS). value: {get_attr: [hostsConfigImpl, config, hosts]} + OS::stack_id: + description: The ID of the hostsConfigImpl resource. + value: {get_resource: hostsConfigImpl} diff --git a/overcloud-resource-registry-puppet.j2.yaml b/overcloud-resource-registry-puppet.j2.yaml index 30b9f2b9..21013bdc 100644 --- a/overcloud-resource-registry-puppet.j2.yaml +++ b/overcloud-resource-registry-puppet.j2.yaml @@ -100,6 +100,10 @@ resource_registry: # validation resources OS::TripleO::AllNodes::Validation: all-nodes-validation.yaml + # Upgrade resources + OS::TripleO::UpgradeConfig: puppet/upgrade_config.yaml + OS::TripleO::UpgradeSteps: OS::Heat::None + # services OS::TripleO::Services: puppet/services/services.yaml OS::TripleO::Services::Apache: puppet/services/apache.yaml diff --git a/overcloud.j2.yaml b/overcloud.j2.yaml index ba1c6b36..39a092b1 100644 --- a/overcloud.j2.yaml +++ b/overcloud.j2.yaml @@ -187,7 +187,7 @@ resources: type: string value: list_join: - - '\n' + - "\n" - - str_replace: template: IP HOST params: @@ -370,7 +370,7 @@ resources: properties: hosts: list_join: - - '\n' + - "\n" - - if: - add_vips_to_etc_hosts - {get_attr: [VipHosts, value]} @@ -378,7 +378,7 @@ resources: - {% for role in roles %} - list_join: - - '\n' + - "\n" - {get_attr: [{{role.name}}, hosts_entry]} {% endfor %} @@ -604,6 +604,21 @@ resources: {{role.name}}: {get_attr: [{{role.name}}ServiceChain, role_data]} {% endfor %} + # Upgrade steps for all roles + AllNodesUpgradeSteps: + type: OS::TripleO::UpgradeSteps + depends_on: AllNodesDeploySteps + properties: + servers: +{% for role in roles %} + {{role.name}}: {get_attr: [{{role.name}}, attributes, nova_server_resource]} +{% endfor %} + role_data: +{% for role in roles %} + {{role.name}}: {get_attr: [{{role.name}}ServiceChain, role_data]} +{% endfor %} + + outputs: ManagedEndpoints: description: Asserts that the keystone endpoints have been provisioned. @@ -636,3 +651,9 @@ outputs: {% for role in roles %} {{role.name}}: {get_attr: [{{role.name}}ServiceChain, role_data, service_names]} {% endfor %} + RoleData: + description: The configuration data associated with each role + value: +{% for role in roles %} + {{role.name}}: {get_attr: [{{role.name}}ServiceChain, role_data]} +{% endfor %} diff --git a/puppet/major_upgrade_steps.j2.yaml b/puppet/major_upgrade_steps.j2.yaml new file mode 100644 index 00000000..f8dad433 --- /dev/null +++ b/puppet/major_upgrade_steps.j2.yaml @@ -0,0 +1,98 @@ +heat_template_version: 2016-10-14 +description: 'Upgrade steps for all roles' + +parameters: + servers: + type: json + + role_data: + type: json + description: Mapping of Role name e.g Controller to the per-role data + + UpdateIdentifier: + type: string + description: > + Setting to a previously unused value during stack-update will trigger + the Upgrade resources to re-run on all roles. + + UpgradeInitCommand: + type: string + description: | + Command or script snippet to run on all overcloud nodes to + initialize the upgrade process. E.g. a repository switch. + default: '' + +resources: + + # For the UpgradeInit also rename /etc/resolv.conf.save for +bug/1567004 + UpgradeInitConfig: + type: OS::Heat::SoftwareConfig + properties: + group: script + config: + list_join: + - '' + - - "#!/bin/bash\n\n" + - "if [[ -f /etc/resolv.conf.save ]] ; then rm /etc/resolv.conf.save; fi\n\n" + - get_param: UpgradeInitCommand + +{% for role in roles %} + {{role.name}}Upgrade_Init: + type: OS::Heat::StructuredDeploymentGroup + properties: + name: {{role.name}}Upgrade_Init + servers: {get_param: [servers, {{role.name}}]} + config: {get_resource: UpgradeInitConfig} +{% endfor %} + +# Upgrade Steps for all roles +# FIXME(shardy): would be nice to make the number of steps configurable +{% for step in range(1, 8) %} + {% for role in roles %} + # Step {{step}} resources + {{role.name}}UpgradeConfig_Step{{step}}: + type: OS::TripleO::UpgradeConfig + # The UpgradeConfig resources could actually be created without + # serialization, but the event output is easier to follow if we + # do, and there should be minimal performance hit (creating the + # config is cheap compared to the time to apply the deployment). + depends_on: + {% if step == 1 %} + - {{role.name}}Upgrade_Init + {% else %} + {% for dep in roles %} + - {{dep.name}}Upgrade_Step{{step -1}} + {% endfor %} + {% endif %} + properties: + UpgradeStepConfig: {get_param: [role_data, {{role.name}}, upgrade_tasks]} + step: {{step}} + + {{role.name}}Upgrade_Step{{step}}: + type: OS::Heat::StructuredDeploymentGroup + {% if step > 1 %} + depends_on: + {% for dep in roles %} + - {{dep.name}}Upgrade_Step{{step -1}} + {% endfor %} + {% endif %} + properties: + name: {{role.name}}Upgrade_Step{{step}} + servers: {get_param: [servers, {{role.name}}]} + config: {get_resource: {{role.name}}UpgradeConfig_Step{{step}}} + input_values: + role: {{role.name}} + update_identifier: {get_param: UpdateIdentifier} + {% endfor %} +{% endfor %} + +outputs: + # Output the config for each role, just use Step1 as the config should be + # the same for all steps (only the tag provided differs) + upgrade_configs: + description: The per-role upgrade configuration used + value: +{% for role in roles %} + {{role.name.lower()}}: {get_attr: [{{role.name}}UpgradeConfig_Step1, upgrade_config]} +{% endfor %} + diff --git a/puppet/role.role.j2.yaml b/puppet/role.role.j2.yaml index a09ed407..587ff58d 100644 --- a/puppet/role.role.j2.yaml +++ b/puppet/role.role.j2.yaml @@ -5,13 +5,17 @@ parameters: description: Flavor for the {{role}} node. default: baremetal type: string +{% if disable_constraints is not defined %} constraints: - custom_constraint: nova.flavor +{% endif %} {{role}}Image: type: string default: overcloud-full +{% if disable_constraints is not defined %} constraints: - custom_constraint: glance.image +{% endif %} ImageUpdatePolicy: default: 'REBUILD_PRESERVE_EPHEMERAL' description: What policy to use when reconstructing instances. REBUILD for rebuilds, REBUILD_PRESERVE_EPHEMERAL to preserve /mnt. @@ -20,8 +24,10 @@ parameters: description: Name of an existing Nova key pair to enable SSH access to the instances type: string default: default +{% if disable_constraints is not defined %} constraints: - custom_constraint: nova.keypair +{% endif %} ServiceNetMap: default: {} description: Mapping of service_name -> network name. Typically set diff --git a/puppet/services/README.rst b/puppet/services/README.rst index 8fe51fa3..856b306e 100644 --- a/puppet/services/README.rst +++ b/puppet/services/README.rst @@ -22,8 +22,8 @@ Config Settings Each service may define a config_settings output variable which returns Hiera settings to be configured. -Steps ------ +Deployment Steps +---------------- Each service may define an output variable which returns a puppet manifest snippet that will run at each of the following steps. Earlier manifests @@ -49,8 +49,28 @@ are re-asserted when applying latter ones. 5) Service activation (Pacemaker) - 6) Fencing (Pacemaker) +Upgrade Steps +------------- + +Each service template may optionally define a `upgrade_tasks` key, which is a +list of ansible tasks to be performed during the upgrade process. + +Similar to the step_config, we allow a series of steps for the per-service +upgrade sequence, defined as ansible tasks with a tag e.g "step1" for the first +step, "step2" for the second, etc. + + Steps/tages correlate to the following: + + 1) Quiesce the control-plane, e.g disable LoadBalancer, stop pacemaker cluster + + 2) Stop all control-plane services, ready for upgrade + + 3) Perform a package update, (either specific packages or the whole system) + + 4) Start services needed for migration tasks (e.g DB) + + 5) Perform any migration tasks, e.g DB sync commands -Note: Not all roles currently support all steps: + 6) Start control-plane services - * ObjectStorage role only supports steps 2, 3 and 4 + 7) Any additional online migration tasks (e.g data migrations) diff --git a/puppet/services/ceph-rgw.yaml b/puppet/services/ceph-rgw.yaml index 89c1a5ee..4b85d28f 100644 --- a/puppet/services/ceph-rgw.yaml +++ b/puppet/services/ceph-rgw.yaml @@ -68,6 +68,7 @@ outputs: ceph::rgw::keystone::auth::public_url: {get_param: [EndpointMap, CephRgwPublic, uri]} ceph::rgw::keystone::auth::internal_url: {get_param: [EndpointMap, CephRgwInternal, uri]} ceph::rgw::keystone::auth::admin_url: {get_param: [EndpointMap, CephRgwAdmin, uri]} + ceph::rgw::keystone::auth::user: 'swift' ceph::rgw::keystone::auth::password: {get_param: SwiftPassword} ceph::rgw::keystone::auth::region: {get_param: KeystoneRegion} ceph::rgw::keystone::auth::tenant: 'service' diff --git a/puppet/services/database/mysql.yaml b/puppet/services/database/mysql.yaml index 9690a8e1..abe752e2 100644 --- a/puppet/services/database/mysql.yaml +++ b/puppet/services/database/mysql.yaml @@ -92,3 +92,11 @@ outputs: $NETWORK: {get_param: [ServiceNetMap, MysqlNetwork]} step_config: | include ::tripleo::profile::base::database::mysql + upgrade_tasks: + - name: Stop service + tags: step2 + service: name=mariadb state=stopped + - name: Start service + tags: step4 + service: name=mariadb state=started + diff --git a/puppet/services/haproxy.yaml b/puppet/services/haproxy.yaml index c8edade5..675a79ec 100644 --- a/puppet/services/haproxy.yaml +++ b/puppet/services/haproxy.yaml @@ -77,3 +77,10 @@ outputs: - get_attr: [HAProxyInternalTLS, role_data, certificates_specs] step_config: | include ::tripleo::profile::base::haproxy + upgrade_tasks: + - name: Stop haproxy service + tags: step1 + service: name=haproxy state=stopped + - name: Start haproxy service + tags: step4 # Needed at step 4 for mysql + service: name=haproxy state=started diff --git a/puppet/services/keepalived.yaml b/puppet/services/keepalived.yaml index 6f2c44ec..b4f1a100 100644 --- a/puppet/services/keepalived.yaml +++ b/puppet/services/keepalived.yaml @@ -64,3 +64,11 @@ outputs: - tripleo::keepalived::public_virtual_interface: {get_param: PublicVirtualInterface} step_config: | include ::tripleo::profile::base::keepalived + upgrade_tasks: + - name: Stop keepalived service + tags: step1 + service: name=keepalived state=stopped + - name: Start keepalived service + tags: step4 # Needed at step 4 for mysql + service: name=keepalived state=started + diff --git a/puppet/services/keystone.yaml b/puppet/services/keystone.yaml index 2b315489..e48d7037 100644 --- a/puppet/services/keystone.yaml +++ b/puppet/services/keystone.yaml @@ -248,3 +248,14 @@ outputs: keystone::db::mysql::allowed_hosts: - '%' - "%{hiera('mysql_bind_host')}" + # Ansible tasks to handle upgrade + upgrade_tasks: + - name: Stop keystone service (running under httpd) + tags: step2 + service: name=httpd state=stopped + - name: Sync keystone DB + tags: step5 + command: keystone-manage db_sync + - name: Start keystone service (running under httpd) + tags: step6 + service: name=httpd state=started diff --git a/puppet/services/rabbitmq.yaml b/puppet/services/rabbitmq.yaml index 8db84fd8..190487e0 100644 --- a/puppet/services/rabbitmq.yaml +++ b/puppet/services/rabbitmq.yaml @@ -103,6 +103,13 @@ outputs: # internal_api_subnet - > IP/CIDR rabbitmq::node_ip_address: {get_param: [ServiceNetMap, RabbitmqNetwork]} rabbitmq::nr_ha_queues: {get_param: RabbitHAQueues} - step_config: | include ::tripleo::profile::base::rabbitmq + upgrade_tasks: + - name: Stop rabbitmq service + tags: step2 + service: name=rabbitmq-server state=stopped + - name: Start rabbitmq service + tags: step6 + service: name=rabbitmq-server state=started + diff --git a/puppet/services/services.yaml b/puppet/services/services.yaml index ffe2d2d4..13df5bbe 100644 --- a/puppet/services/services.yaml +++ b/puppet/services/services.yaml @@ -108,3 +108,8 @@ outputs: expression: $.data.role_data.where($ != null).select($.get('service_config_settings')).where($ != null).reduce($1.mergeWith($2), {}) data: {role_data: {get_attr: [ServiceChain, role_data]}} step_config: {list_join: ["\n", {get_attr: [ServiceChain, role_data, step_config]}]} + upgrade_tasks: + yaql: + # Note we use distinct() here to filter any identical tasks, e.g yum update for all services + expression: $.data.where($ != null).select($.get('upgrade_tasks')).where($ != null).flatten().distinct() + data: {get_attr: [ServiceChain, role_data]} diff --git a/puppet/services/tripleo-packages.yaml b/puppet/services/tripleo-packages.yaml index 124f5fe8..69912fa5 100644 --- a/puppet/services/tripleo-packages.yaml +++ b/puppet/services/tripleo-packages.yaml @@ -32,3 +32,7 @@ outputs: tripleo::packages::enable_install: {get_param: EnablePackageInstall} step_config: | include ::tripleo::packages + upgrade_tasks: + - name: Update all packages + tags: step3 + yum: name=* state=latest diff --git a/puppet/upgrade_config.yaml b/puppet/upgrade_config.yaml new file mode 100644 index 00000000..c67e10b3 --- /dev/null +++ b/puppet/upgrade_config.yaml @@ -0,0 +1,48 @@ +heat_template_version: 2016-10-14 +description: 'Upgrade for via ansible by applying a step related tag' + +parameters: + UpgradeStepConfig: + type: json + description: Config (ansible yaml) that will be used to step through the deployment. + default: '' + + step: + type: string + description: Step number of the upgrade + +resources: + + AnsibleConfig: + type: OS::Heat::Value + properties: + value: + str_replace: + template: CONFIG + params: + CONFIG: + - hosts: localhost + connection: local + tasks: {get_param: UpgradeStepConfig} + + AnsibleUpgradeConfigImpl: + type: OS::Heat::SoftwareConfig + properties: + group: ansible + options: + tags: + str_replace: + template: "stepSTEP" + params: + STEP: {get_param: step} + inputs: + - name: role + config: {get_attr: [AnsibleConfig, value]} + +outputs: + OS::stack_id: + description: The software config which runs ansible with tags + value: {get_resource: AnsibleUpgradeConfigImpl} + upgrade_config: + description: The configuration file used for upgrade + value: {get_attr: [AnsibleConfig, value]} diff --git a/requirements.txt b/requirements.txt index 4e46b891..9c4a708a 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1 +1,2 @@ pbr>=0.5.21,<1.0 +Jinja2>=2.8 # BSD License (3 clause) diff --git a/scripts/hosts-config.sh b/scripts/hosts-config.sh new file mode 100755 index 00000000..4826d615 --- /dev/null +++ b/scripts/hosts-config.sh @@ -0,0 +1,47 @@ +#!/bin/bash +set -eux +set -o pipefail + +write_entries() { + local file="$1" + local entries="$2" + + # Don't do anything if the file isn't there + if [ ! -f "$file" ]; then + return + fi + + if grep -q "^# HEAT_HOSTS_START" "$file"; then + temp=$(mktemp) + awk -v v="$entries" '/^# HEAT_HOSTS_START/ { + print $0 + print v + f=1 + }f &&!/^# HEAT_HOSTS_END$/{next}/^# HEAT_HOSTS_END$/{f=0}!f' "$file" > "$temp" + echo "INFO: Updating hosts file $file, check below for changes" + diff "$file" "$temp" || true + cat "$temp" > "$file" + else + echo -ne "\n# HEAT_HOSTS_START - Do not edit manually within this section!\n" >> "$file" + echo "$entries" >> "$file" + echo -ne "# HEAT_HOSTS_END\n\n" >> "$file" + fi + +} + +if [ ! -z "$hosts" ]; then + # cloud-init files are /etc/cloud/templates/hosts.OSNAME.tmpl + DIST=$(lsb_release -is | tr -s [A-Z] [a-z]) + case $DIST in + fedora|redhatenterpriseserver) + name="redhat" + ;; + *) + name="$DIST" + ;; + esac + write_entries "/etc/cloud/templates/hosts.${name}.tmpl" "$hosts" + write_entries "/etc/hosts" "$hosts" +else + echo "No hosts in Heat, nothing written." +fi diff --git a/tools/process-templates.py b/tools/process-templates.py new file mode 100755 index 00000000..a15b00e2 --- /dev/null +++ b/tools/process-templates.py @@ -0,0 +1,125 @@ +#!/usr/bin/env python +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import argparse +import jinja2 +import os +import sys +import yaml + + +def parse_opts(argv): + parser = argparse.ArgumentParser( + description='Configure host network interfaces using a JSON' + ' config file format.') + parser.add_argument('-p', '--base_path', metavar='BASE_PATH', + help="""base path of templates to process.""", + default='.') + parser.add_argument('-r', '--roles-data', metavar='ROLES_DATA', + help="""relative path to the roles_data.yaml file.""", + default='roles_data.yaml') + parser.add_argument('--safe', + action='store_true', + help="""Enable safe mode (do not overwrite files).""", + default=False) + opts = parser.parse_args(argv[1:]) + + return opts + + +def _j2_render_to_file(j2_template, j2_data, outfile_name=None, + overwrite=True): + yaml_f = outfile_name or j2_template.replace('.j2.yaml', '.yaml') + print('rendering j2 template to file: %s' % outfile_name) + + if not overwrite and os.path.exists(outfile_name): + print('ERROR: path already exists for file: %s' % outfile_name) + sys.exit(1) + + try: + # Render the j2 template + template = jinja2.Environment().from_string(j2_template) + r_template = template.render(**j2_data) + except jinja2.exceptions.TemplateError as ex: + error_msg = ("Error rendering template %s : %s" + % (yaml_f, six.text_type(ex))) + print(error_msg) + raise Exception(error_msg) + with open(outfile_name, 'w') as out_f: + out_f.write(r_template) + + +def process_templates(template_path, role_data_path, overwrite): + + with open(role_data_path) as role_data_file: + role_data = yaml.safe_load(role_data_file) + + j2_excludes_path = os.path.join(template_path, 'j2_excludes.yaml') + with open(j2_excludes_path) as role_data_file: + j2_excludes = yaml.safe_load(role_data_file) + + role_names = [r.get('name') for r in role_data] + r_map = {} + for r in role_data: + r_map[r.get('name')] = r + excl_templates = ['%s/%s' % (template_path, e) + for e in j2_excludes.get('name')] + + if os.path.isdir(template_path): + for subdir, dirs, files in os.walk(template_path): + for f in files: + file_path = os.path.join(subdir, f) + # We do two templating passes here: + # 1. *.role.j2.yaml - we template just the role name + # and create multiple files (one per role) + # 2. *.j2.yaml - we template with all roles_data, + # and create one file common to all roles + if f.endswith('.role.j2.yaml'): + print("jinja2 rendering role template %s" % f) + with open(file_path) as j2_template: + template_data = j2_template.read() + print("jinja2 rendering roles %s" % "," + .join(role_names)) + for role in role_names: + j2_data = {'role': role} + # (dprince) For the undercloud installer we don't + # want to have heat check nova/glance API's + if r_map[role].get('disable_constraints', False): + j2_data['disable_constraints'] = True + out_f = "-".join( + [role.lower(), + os.path.basename(f).replace('.role.j2.yaml', + '.yaml')]) + out_f_path = os.path.join(subdir, out_f) + if not (out_f_path in excl_templates): + _j2_render_to_file(template_data, j2_data, + out_f_path, overwrite) + else: + print('skipping rendering of %s' % out_f_path) + elif f.endswith('.j2.yaml'): + print("jinja2 rendering normal template %s" % f) + with open(file_path) as j2_template: + template_data = j2_template.read() + j2_data = {'roles': role_data} + out_f = file_path.replace('.j2.yaml', '.yaml') + _j2_render_to_file(template_data, j2_data, out_f, + overwrite) + + else: + print('Unexpected argument %s' % template_path) + +opts = parse_opts(sys.argv) + +role_data_path = os.path.join(opts.base_path, opts.roles_data) + +process_templates(opts.base_path, role_data_path, (not opts.safe)) @@ -11,4 +11,9 @@ deps = -r{toxinidir}/requirements.txt commands = {posargs} [testenv:pep8] -commands = python ./tools/yaml-validate.py . +commands = + python ./tools/process-templates.py + python ./tools/yaml-validate.py . + +[testenv:templates] +commands = python ./tools/process-templates.py |