heat_template_version: ocata description: > OpenStack Nova Compute service configured with Puppet parameters: ServiceNetMap: default: {} description: Mapping of service_name -> network name. Typically set via parameter_defaults in the resource registry. This mapping overrides those in ServiceNetMapDefaults. type: json DefaultPasswords: default: {} type: json EndpointMap: default: {} description: Mapping of service endpoint -> protocol. Typically set via parameter_defaults in the resource registry. type: json NovaRbdPoolName: default: vms type: string CephClientUserName: default: openstack type: string CinderEnableNfsBackend: default: false description: Whether to enable or not the NFS backend for Cinder type: boolean CinderEnableRbdBackend: default: false description: Whether to enable or not the Rbd backend for Cinder type: boolean NovaEnableRbdBackend: default: false description: Whether to enable or not the Rbd backend for Nova type: boolean NovaComputeLibvirtVifDriver: default: '' description: Libvirt VIF driver configuration for the network type: string NovaPCIPassthrough: description: > List of PCI Passthrough whitelist parameters. Example - NovaPCIPassthrough: - vendor_id: "8086" product_id: "154c" address: "0000:05:00.0" physical_network: "datacentre" For different formats, refer to the nova.conf documentation for pci_passthrough_whitelist configuration type: json default: '' NovaVcpuPinSet: description: > A list or range of physical CPU cores to reserve for virtual machine processes. Ex. NovaVcpuPinSet: ['4-12','^8'] will reserve cores from 4-12 excluding 8 type: comma_delimited_list default: [] NovaReservedHostMemory: description: > Reserved RAM for host processes. type: number default: 2048 constraints: - range: { min: 512 } MonitoringSubscriptionNovaCompute: default: 'overcloud-nova-compute' type: string NovaComputeLoggingSource: type: json default: tag: openstack.nova.compute path: /var/log/nova/nova-compute.log UpgradeLevelNovaCompute: type: string description: Nova Compute upgrade level default: auto MigrationSshKey: type: json description: > SSH key for migration. Expects a dictionary with keys 'public_key' and 'private_key'. Values should be identical to SSH public/private key files. default: {} resources: NovaBase: type: ./nova-base.yaml properties: ServiceNetMap: {get_param: ServiceNetMap} DefaultPasswords: {get_param: DefaultPasswords} EndpointMap: {get_param: EndpointMap} outputs: role_data: description: Role data for the Nova Compute service. value: service_name: nova_compute monitoring_subscription: {get_param: MonitoringSubscriptionNovaCompute} logging_source: {get_param: NovaComputeLoggingSource} logging_groups: - nova config_settings: map_merge: - get_attr: [NovaBase, role_data, config_settings] - nova::compute::libvirt::manage_libvirt_services: false nova::compute::pci_passthrough: str_replace: template: "JSON_PARAM" params: JSON_PARAM: {get_param: NovaPCIPassthrough} nova::compute::vcpu_pin_set: {get_param: NovaVcpuPinSet} nova::compute::reserved_host_memory: {get_param: NovaReservedHostMemory} # we manage migration in nova common puppet profile nova::compute::libvirt::migration_support: false tripleo::profile::base::nova::manage_migration: true tripleo::profile::base::nova::migration_ssh_key: {get_param: MigrationSshKey} tripleo::profile::base::nova::migration_ssh_localaddrs: - "%{hiera('cold_migration_ssh_inbound_addr')}" - "%{hiera('live_migration_ssh_inbound_addr')}" live_migration_ssh_inbound_addr: {get_param: [ServiceNetMap, NovaLibvirtNetwork]} cold_migration_ssh_inbound_addr: {get_param: [ServiceNetMap, NovaColdMigrationNetwork]} tripleo::profile::base::nova::nova_compute_enabled: true nova::compute::rbd::libvirt_images_rbd_pool: {get_param: NovaRbdPoolName} nova::compute::rbd::libvirt_rbd_user: {get_param: CephClientUserName} tripleo::profile::base::nova::compute::cinder_nfs_backend: {get_param: CinderEnableNfsBackend} rbd_persistent_storage: {get_param: CinderEnableRbdBackend} nova::compute::rbd::rbd_keyring: list_join: - '.' - - 'client' - {get_param: CephClientUserName} nova::compute::rbd::libvirt_rbd_secret_uuid: "%{hiera('ceph::profile::params::fsid')}" nova::compute::instance_usage_audit: true nova::compute::instance_usage_audit_period: 'hour' nova::compute::rbd::ephemeral_storage: {get_param: NovaEnableRbdBackend} # TUNNELLED mode provides a security enhancement when using shared # storage but is not supported when not using shared storage. # See https://bugzilla.redhat.com/show_bug.cgi?id=1301986#c12 # In future versions of QEMU (2.6, mostly), danpb's native # encryption work will obsolete the need to use TUNNELLED transport # mode. nova::migration::live_migration_tunnelled: {get_param: NovaEnableRbdBackend} nova::compute::neutron::libvirt_vif_driver: {get_param: NovaComputeLibvirtVifDriver} # NOTE: bind IP is found in Heat replacing the network name with the # local node IP for the given network; replacement examples # (eg. for internal_api): # internal_api -> IP # internal_api_uri -> [IP] # internal_api_subnet - > IP/CIDR nova::compute::vncserver_proxyclient_address: {get_param: [ServiceNetMap, NovaVncProxyNetwork]} nova::compute::vncproxy_host: {get_param: [EndpointMap, NovaPublic, host_nobrackets]} nova::vncproxy::common::vncproxy_protocol: {get_param: [EndpointMap, NovaVNCProxyPublic, protocol]} nova::vncproxy::common::vncproxy_host: {get_param: [EndpointMap, NovaVNCProxyPublic, host_nobrackets]} nova::vncproxy::common::vncproxy_port: {get_param: [EndpointMap, NovaVNCProxyPublic, port]} step_config: | # TODO(emilien): figure how to deal with libvirt profile. # We'll probably treat it like we do with Neutron plugins. # Until then, just include it in the default nova-compute role. include tripleo::profile::base::nova::compute::libvirt service_config_settings: collectd: tripleo.collectd.plugins.nova_compute: - virt collectd::plugins::virt::connection: "qemu:///system" upgrade_tasks: - name: Stop nova-compute service tags: step1 service: name=openstack-nova-compute state=stopped # If not already set by puppet (e.g a pre-ocata version), set the # upgrade_level for compute to "auto" - name: Set compute upgrade level to auto tags: step3 ini_file: str_replace: template: "dest=/etc/nova/nova.conf section=upgrade_levels option=compute value=LEVEL" params: LEVEL: {get_param: UpgradeLevelNovaCompute} - name: Start nova-compute service tags: step6 service: name=openstack-nova-compute state=started