aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--ci/environments/scenario002-multinode-containers.yaml1
-rw-r--r--docker/services/ceph-ansible/ceph-base.yaml29
-rw-r--r--docker/services/ceph-ansible/ceph-external.yaml66
-rw-r--r--docker/services/ceph-ansible/ceph-rgw.yaml87
-rw-r--r--docker/services/database/mongodb.yaml1
-rw-r--r--docker/services/nova-libvirt.yaml25
-rw-r--r--docker/services/pacemaker/ovn-dbs.yaml140
-rw-r--r--environments/ceph-ansible/ceph-ansible-external.yaml30
-rw-r--r--environments/ceph-ansible/ceph-rgw.yaml5
-rw-r--r--environments/neutron-opendaylight-dpdk.yaml3
-rw-r--r--environments/neutron-opendaylight-sriov.yaml1
-rw-r--r--environments/neutron-opendaylight.yaml1
-rw-r--r--environments/puppet-ceph-external.yaml1
-rw-r--r--environments/services-docker/neutron-opendaylight.yaml1
-rw-r--r--environments/services-docker/neutron-ovn-ha.yaml28
-rwxr-xr-xextraconfig/tasks/pacemaker_common_functions.sh62
-rwxr-xr-xextraconfig/tasks/yum_update.sh3
-rw-r--r--overcloud.j2.yaml20
-rw-r--r--puppet/all-nodes-config.yaml6
-rw-r--r--puppet/services/ceph-base.yaml7
-rw-r--r--puppet/services/disabled/mongodb-disabled.yaml1
-rw-r--r--releasenotes/notes/odl-qos-48b70c804755e3a5.yaml4
-rw-r--r--releasenotes/notes/ovn-container-support-3ab333fff6e90dc4.yaml1
-rw-r--r--roles/ControllerOpenstack.yaml1
-rw-r--r--roles/Database.yaml1
25 files changed, 498 insertions, 27 deletions
diff --git a/ci/environments/scenario002-multinode-containers.yaml b/ci/environments/scenario002-multinode-containers.yaml
index f1dc2683..0ca67d00 100644
--- a/ci/environments/scenario002-multinode-containers.yaml
+++ b/ci/environments/scenario002-multinode-containers.yaml
@@ -9,6 +9,7 @@ resource_registry:
OS::TripleO::Services::BarbicanApi: ../../docker/services/barbican-api.yaml
OS::TripleO::Services::Zaqar: ../../docker/services/zaqar.yaml
OS::TripleO::Services::Ec2Api: ../../docker/services/ec2-api.yaml
+ OS::TripleO::Services::MongoDb: ../../docker/services/database/mongodb.yaml
# NOTE: This is needed because of upgrades from Ocata to Pike. We
# deploy the initial environment with Ocata templates, and
# overcloud-resource-registry.yaml there doesn't have this Docker
diff --git a/docker/services/ceph-ansible/ceph-base.yaml b/docker/services/ceph-ansible/ceph-base.yaml
index 2a592869..18d3e6a3 100644
--- a/docker/services/ceph-ansible/ceph-base.yaml
+++ b/docker/services/ceph-ansible/ceph-base.yaml
@@ -100,6 +100,14 @@ parameters:
CephClientUserName:
default: openstack
type: string
+ CephRgwClientName:
+ default: radosgw
+ type: string
+ CephRgwKey:
+ description: The cephx key for the radosgw client. Can be created
+ with ceph-authtool --gen-print-key.
+ type: string
+ hidden: true
CephPoolDefaultSize:
description: default minimum replication for RBD copies
type: number
@@ -115,6 +123,10 @@ parameters:
CephIPv6:
default: False
type: boolean
+ SwiftPassword:
+ description: The password for the swift service account
+ type: string
+ hidden: true
DockerCephDaemonImage:
description: image
type: string
@@ -244,12 +256,29 @@ outputs:
mds_cap: "allow *"
osd_cap: "allow rw"
mode: "0644"
+ - name:
+ list_join:
+ - '.'
+ - - client
+ - {get_param: CephRgwClientName}
+ key: {get_param: CephRgwKey}
+ mon_cap: "allow rw"
+ osd_cap: "allow rwx"
+ mode: "0644"
keys: *openstack_keys
pools: []
ceph_conf_overrides:
global:
osd_pool_default_size: {get_param: CephPoolDefaultSize}
osd_pool_default_pg_num: {get_param: CephPoolDefaultPgNum}
+ rgw_keystone_api_version: 3
+ rgw_keystone_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
+ rgw_keystone_accepted_roles: 'Member, _member_, admin'
+ rgw_keystone_admin_domain: default
+ rgw_keystone_admin_project: service
+ rgw_keystone_admin_user: swift
+ rgw_keystone_admin_password: {get_param: SwiftPassword}
+ rgw_s3_auth_use_keystone: 'true'
ntp_service_enabled: false
generate_fsid: false
ip_version:
diff --git a/docker/services/ceph-ansible/ceph-external.yaml b/docker/services/ceph-ansible/ceph-external.yaml
new file mode 100644
index 00000000..f93dd566
--- /dev/null
+++ b/docker/services/ceph-ansible/ceph-external.yaml
@@ -0,0 +1,66 @@
+heat_template_version: pike
+
+description: >
+ Ceph External service.
+
+parameters:
+ ServiceData:
+ default: {}
+ description: Dictionary packing service data
+ type: json
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ CephExternalMonHost:
+ default: ''
+ type: string
+ description: List of externally managed Ceph Mon Host IPs. Only used for external Ceph deployments.
+
+resources:
+ CephBase:
+ type: ./ceph-base.yaml
+ properties:
+ ServiceData: {get_param: ServiceData}
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+ EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
+
+outputs:
+ role_data:
+ description: Role data for the Ceph External service.
+ value:
+ service_name: ceph_client
+ upgrade_tasks: []
+ step_config: ''
+ puppet_config:
+ config_image: ''
+ config_volume: ''
+ step_config: ''
+ docker_config: {}
+ service_workflow_tasks: {get_attr: [CephBase, role_data, service_workflow_tasks]}
+ config_settings:
+ ceph_client_ansible_vars:
+ map_merge:
+ - {get_attr: [CephBase, role_data, config_settings, ceph_common_ansible_vars]}
+ - external_cluster_mon_ips: {get_param: CephExternalMonHost} \ No newline at end of file
diff --git a/docker/services/ceph-ansible/ceph-rgw.yaml b/docker/services/ceph-ansible/ceph-rgw.yaml
new file mode 100644
index 00000000..4bed9b46
--- /dev/null
+++ b/docker/services/ceph-ansible/ceph-rgw.yaml
@@ -0,0 +1,87 @@
+heat_template_version: pike
+
+description: >
+ Ceph RadosGW service.
+
+parameters:
+ ServiceData:
+ default: {}
+ description: Dictionary packing service data
+ type: json
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ SwiftPassword:
+ description: The password for the swift service account
+ type: string
+ hidden: true
+ KeystoneRegion:
+ type: string
+ default: 'regionOne'
+ description: Keystone region for endpoint
+
+resources:
+ CephBase:
+ type: ./ceph-base.yaml
+ properties:
+ ServiceData: {get_param: ServiceData}
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+ EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
+
+outputs:
+ role_data:
+ description: Role data for the Ceph RadosGW service.
+ value:
+ service_name: ceph_rgw
+ upgrade_tasks: []
+ step_config: ''
+ puppet_config:
+ config_image: ''
+ config_volume: ''
+ step_config: ''
+ docker_config: {}
+ service_workflow_tasks: {get_attr: [CephBase, role_data, service_workflow_tasks]}
+ config_settings:
+ map_merge:
+ - tripleo.ceph_rgw.firewall_rules:
+ '122 ceph rgw':
+ dport: {get_param: [EndpointMap, CephRgwInternal, port]}
+ - ceph_rgw_ansible_vars:
+ map_merge:
+ - {get_attr: [CephBase, role_data, config_settings, ceph_common_ansible_vars]}
+ - radosgw_keystone: true
+ radosgw_keystone_ssl: false
+ radosgw_address_block: {get_param: [ServiceData, net_cidr_map, {get_param: [ServiceNetMap, CephRgwNetwork]}]}
+ radosgw_civetweb_port: {get_param: [EndpointMap, CephRgwInternal, port]}
+ service_config_settings:
+ keystone:
+ ceph::rgw::keystone::auth::public_url: {get_param: [EndpointMap, CephRgwPublic, uri]}
+ ceph::rgw::keystone::auth::internal_url: {get_param: [EndpointMap, CephRgwInternal, uri]}
+ ceph::rgw::keystone::auth::admin_url: {get_param: [EndpointMap, CephRgwAdmin, uri]}
+ ceph::rgw::keystone::auth::region: {get_param: KeystoneRegion}
+ ceph::rgw::keystone::auth::roles: [ 'admin', 'Member', '_member_' ]
+ ceph::rgw::keystone::auth::tenant: service
+ ceph::rgw::keystone::auth::user: swift
+ ceph::rgw::keystone::auth::password: {get_param: SwiftPassword}
diff --git a/docker/services/database/mongodb.yaml b/docker/services/database/mongodb.yaml
index d6bba20b..5cf6f925 100644
--- a/docker/services/database/mongodb.yaml
+++ b/docker/services/database/mongodb.yaml
@@ -159,6 +159,7 @@ outputs:
upgrade_tasks:
- name: Check for mongodb service
stat: path=/usr/lib/systemd/system/mongod.service
+ tags: common
register: mongod_service
- name: Stop and disable mongodb service
tags: step2
diff --git a/docker/services/nova-libvirt.yaml b/docker/services/nova-libvirt.yaml
index 8f151cfe..d20c093d 100644
--- a/docker/services/nova-libvirt.yaml
+++ b/docker/services/nova-libvirt.yaml
@@ -204,6 +204,7 @@ outputs:
- /var/lib/libvirt:/var/lib/libvirt
- /var/log/libvirt/qemu:/var/log/libvirt/qemu:ro
- /var/log/containers/nova:/var/log/nova
+ - /var/lib/vhost_sockets:/var/lib/vhost_sockets
-
if:
- use_tls_for_live_migration
@@ -252,6 +253,30 @@ outputs:
- /etc/libvirt/qemu
- /var/lib/libvirt
- /var/log/containers/nova
+ # qemu user on host will be cretaed by libvirt package install, ensure
+ # the qemu user created with same uid/gid as like libvirt package.
+ # These specific values are required since ovs is running on host.
+ # Once ovs with DPDK is containerized, we could modify this uid/gid
+ # to match with kolla config values.
+ - name: ensure qemu group is present on the host
+ group:
+ name: qemu
+ gid: 107
+ state: present
+ - name: ensure qemu user is present on the host
+ user:
+ name: qemu
+ uid: 107
+ group: qemu
+ state: present
+ shell: /sbin/nologin
+ comment: qemu user
+ - name: create directory for vhost-user sockets with qemu ownership
+ file:
+ path: /var/lib/vhost_sockets
+ state: directory
+ owner: qemu
+ group: qemu
- name: ensure ceph configurations exist
file:
path: /etc/ceph
diff --git a/docker/services/pacemaker/ovn-dbs.yaml b/docker/services/pacemaker/ovn-dbs.yaml
new file mode 100644
index 00000000..03c5a397
--- /dev/null
+++ b/docker/services/pacemaker/ovn-dbs.yaml
@@ -0,0 +1,140 @@
+heat_template_version: pike
+
+description: >
+ OpenStack containerized OVN DBs service managed by pacemaker
+
+parameters:
+ DockerOvnDbsImage:
+ description: image
+ type: string
+ DockerOvnDbsConfigImage:
+ description: image
+ type: string
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ ServiceData:
+ default: {}
+ description: Dictionary packing service data
+ type: json
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
+ OVNNorthboundServerPort:
+ description: Port of the OVN Northbound DB server
+ type: number
+ default: 6641
+ OVNSouthboundServerPort:
+ description: Port of the OVN Southbound DB server
+ type: number
+ default: 6642
+
+resources:
+
+ ContainersCommon:
+ type: ./../containers-common.yaml
+
+ OVNDbsBase:
+ type: ../../../puppet/services/pacemaker/ovn-dbs.yaml
+ properties:
+ EndpointMap: {get_param: EndpointMap}
+ ServiceData: {get_param: ServiceData}
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
+ OVNNorthboundServerPort: {get_param: OVNNorthboundServerPort}
+ OVNSouthboundServerPort: {get_param: OVNSouthboundServerPort}
+
+outputs:
+ role_data:
+ description: Role data for the OVN Dbs HA role.
+ value:
+ service_name: {get_attr: [OVNDbsBase, role_data, service_name]}
+ config_settings:
+ map_merge:
+ - get_attr: [OVNDbsBase, role_data, config_settings]
+ - tripleo::profile::pacemaker::ovn_dbs_bundle::ovn_dbs_docker_image: {get_param: DockerOvnDbsImage}
+ - tripleo::profile::pacemaker::ovn_dbs_bundle::nb_db_port: {get_param: OVNNorthboundServerPort}
+ - tripleo::profile::pacemaker::ovn_dbs_bundle::sb_db_port: {get_param: OVNSouthboundServerPort}
+ step_config: ''
+ service_config_settings: {get_attr: [OVNDbsBase, role_data, service_config_settings]}
+ # BEGIN DOCKER SETTINGS
+ puppet_config:
+ config_volume: 'ovn_dbs'
+ puppet_tags: 'exec'
+ step_config: ''
+ config_image: &ovn_dbs_config_image {get_param: DockerOvnDbsConfigImage}
+ kolla_config:
+ /var/lib/kolla/config_files/ovn_dbs.json:
+ command: /usr/sbin/pacemaker_remoted
+ config_files:
+ - dest: /etc/libqb/force-filesystem-sockets
+ source: /dev/null
+ owner: root
+ perm: '0644'
+ - source: "/var/lib/kolla/config_files/src/*"
+ dest: "/"
+ merge: true
+ preserve_properties: true
+ optional: true
+ docker_config:
+ step_3:
+ ovn_dbs_init_bundle:
+ start_order: 1
+ detach: false
+ net: host
+ user: root
+ config_volume: 'ovn_dbs_init_bundle'
+ command:
+ - '/bin/bash'
+ - '-c'
+ - str_replace:
+ template:
+ list_join:
+ - '; '
+ - - "cp -a /tmp/puppet-etc/* /etc/puppet; echo '{\"step\": 3}' > /etc/puppet/hieradata/docker.json"
+ - "FACTER_uuid=docker puppet apply --tags file,file_line,concat,augeas,TAGS -v -e 'CONFIG'"
+ params:
+ TAGS: 'pacemaker::resource::bundle,pacemaker::property,pacemaker::resource::ip,pacemaker::resource::ocf,pacemaker::constraint::order,pacemaker::constraint::colocation'
+ CONFIG:
+ list_join:
+ - ';'
+ - - 'include ::tripleo::profile::base::pacemaker'
+ - 'include ::tripleo::profile::pacemaker::ovn_dbs_bundle'
+ image: *ovn_dbs_config_image
+ volumes:
+ - /etc/hosts:/etc/hosts:ro
+ - /etc/localtime:/etc/localtime:ro
+ - /etc/puppet:/tmp/puppet-etc:ro
+ - /usr/share/openstack-puppet/modules:/usr/share/openstack-puppet/modules:ro
+ - /etc/corosync/corosync.conf:/etc/corosync/corosync.conf:ro
+ - /dev/shm:/dev/shm:rw
+ host_prep_tasks:
+ - name: create persistent directories
+ file:
+ path: "{{ item }}"
+ state: directory
+ with_items:
+ - /var/log/containers/openvswitch
+ - /var/lib/openvswitch/ovn
+ upgrade_tasks:
+ - name: Stop and disable ovn-northd service
+ tags: step2
+ service: name=ovn-northd state=stopped enabled=no
diff --git a/environments/ceph-ansible/ceph-ansible-external.yaml b/environments/ceph-ansible/ceph-ansible-external.yaml
new file mode 100644
index 00000000..ed3bedbd
--- /dev/null
+++ b/environments/ceph-ansible/ceph-ansible-external.yaml
@@ -0,0 +1,30 @@
+resource_registry:
+ OS::TripleO::Services::CephExternal: ../../docker/services/ceph-ansible/ceph-external.yaml
+ OS::TripleO::Services::CephMon: OS::Heat::None
+ OS::TripleO::Services::CephClient: OS::Heat::None
+ OS::TripleO::Services::CephOSD: OS::Heat::None
+
+parameter_defaults:
+ # NOTE: These example parameters are required when using CephExternal
+ #CephClusterFSID: '4b5c8c0a-ff60-454b-a1b4-9747aa737d19'
+ #CephClientKey: 'AQDLOh1VgEp6FRAAFzT7Zw+Y9V6JJExQAsRnRQ=='
+ #CephExternalMonHost: '172.16.1.7, 172.16.1.8'
+
+ # the following parameters enable Ceph backends for Cinder, Glance, Gnocchi and Nova
+ NovaEnableRbdBackend: true
+ CinderEnableRbdBackend: true
+ CinderBackupBackend: ceph
+ GlanceBackend: rbd
+ GnocchiBackend: rbd
+ # If the Ceph pools which host VMs, Volumes and Images do not match these
+ # names OR the client keyring to use is not named 'openstack', edit the
+ # following as needed.
+ NovaRbdPoolName: vms
+ CinderRbdPoolName: volumes
+ CinderBackupRbdPoolName: backups
+ GlanceRbdPoolName: images
+ GnocchiRbdPoolName: metrics
+ CephClientUserName: openstack
+
+ # finally we disable the Cinder LVM backend
+ CinderEnableIscsiBackend: false
diff --git a/environments/ceph-ansible/ceph-rgw.yaml b/environments/ceph-ansible/ceph-rgw.yaml
new file mode 100644
index 00000000..4b09a703
--- /dev/null
+++ b/environments/ceph-ansible/ceph-rgw.yaml
@@ -0,0 +1,5 @@
+resource_registry:
+ OS::TripleO::Services::CephRgw: ../../docker/services/ceph-ansible/ceph-rgw.yaml
+ OS::TripleO::Services::SwiftProxy: OS::Heat::None
+ OS::TripleO::Services::SwiftStorage: OS::Heat::None
+ OS::TripleO::Services::SwiftRingBuilder: OS::Heat::None
diff --git a/environments/neutron-opendaylight-dpdk.yaml b/environments/neutron-opendaylight-dpdk.yaml
index 236b2fb9..0d598980 100644
--- a/environments/neutron-opendaylight-dpdk.yaml
+++ b/environments/neutron-opendaylight-dpdk.yaml
@@ -9,8 +9,9 @@ resource_registry:
parameter_defaults:
NeutronEnableForceMetadata: true
+ NeutronPluginExtensions: 'port_security'
NeutronMechanismDrivers: 'opendaylight_v2'
- NeutronServicePlugins: 'odl-router_v2'
+ NeutronServicePlugins: 'odl-router_v2,trunk'
NovaSchedulerDefaultFilters: "RamFilter,ComputeFilter,AvailabilityZoneFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter,NUMATopologyFilter"
OpenDaylightSNATMechanism: 'controller'
diff --git a/environments/neutron-opendaylight-sriov.yaml b/environments/neutron-opendaylight-sriov.yaml
index 5c0a0350..3a212ed3 100644
--- a/environments/neutron-opendaylight-sriov.yaml
+++ b/environments/neutron-opendaylight-sriov.yaml
@@ -11,6 +11,7 @@ resource_registry:
parameter_defaults:
NeutronEnableForceMetadata: true
+ NeutronPluginExtensions: 'port_security'
NeutronMechanismDrivers: ['sriovnicswitch','opendaylight_v2']
NeutronServicePlugins: 'odl-router_v2,trunk'
diff --git a/environments/neutron-opendaylight.yaml b/environments/neutron-opendaylight.yaml
index 4644725d..e9b1ac11 100644
--- a/environments/neutron-opendaylight.yaml
+++ b/environments/neutron-opendaylight.yaml
@@ -12,3 +12,4 @@ parameter_defaults:
NeutronEnableForceMetadata: true
NeutronMechanismDrivers: 'opendaylight_v2'
NeutronServicePlugins: 'odl-router_v2,trunk'
+ NeutronPluginExtensions: 'port_security'
diff --git a/environments/puppet-ceph-external.yaml b/environments/puppet-ceph-external.yaml
index 7718b821..65bbc3e4 100644
--- a/environments/puppet-ceph-external.yaml
+++ b/environments/puppet-ceph-external.yaml
@@ -27,6 +27,7 @@ parameter_defaults:
# following as needed.
NovaRbdPoolName: vms
CinderRbdPoolName: volumes
+ CinderBackupRbdPoolName: backups
GlanceRbdPoolName: images
GnocchiRbdPoolName: metrics
CephClientUserName: openstack
diff --git a/environments/services-docker/neutron-opendaylight.yaml b/environments/services-docker/neutron-opendaylight.yaml
index 31d5790e..873957ae 100644
--- a/environments/services-docker/neutron-opendaylight.yaml
+++ b/environments/services-docker/neutron-opendaylight.yaml
@@ -10,5 +10,6 @@ resource_registry:
parameter_defaults:
NeutronEnableForceMetadata: true
+ NeutronPluginExtensions: 'port_security'
NeutronMechanismDrivers: 'opendaylight_v2'
NeutronServicePlugins: 'odl-router_v2,trunk'
diff --git a/environments/services-docker/neutron-ovn-ha.yaml b/environments/services-docker/neutron-ovn-ha.yaml
new file mode 100644
index 00000000..7d3c1d19
--- /dev/null
+++ b/environments/services-docker/neutron-ovn-ha.yaml
@@ -0,0 +1,28 @@
+# A Heat environment that can be used to deploy OVN services with non HA OVN DB servers.
+resource_registry:
+ OS::TripleO::Docker::NeutronMl2PluginBase: ../../puppet/services/neutron-plugin-ml2-ovn.yaml
+ OS::TripleO::Services::OVNController: ../../docker/services/ovn-controller.yaml
+ OS::TripleO::Services::OVNDBs: ../../docker/services/pacemaker/ovn-dbs.yaml
+# Disabling Neutron services that overlap with OVN
+ OS::TripleO::Services::NeutronOvsAgent: OS::Heat::None
+ OS::TripleO::Services::ComputeNeutronOvsAgent: OS::Heat::None
+ OS::TripleO::Services::NeutronL3Agent: OS::Heat::None
+ OS::TripleO::Services::NeutronMetadataAgent: OS::Heat::None
+ OS::TripleO::Services::NeutronDhcpAgent: OS::Heat::None
+ OS::TripleO::Services::ComputeNeutronCorePlugin: OS::Heat::None
+
+
+parameter_defaults:
+ NeutronMechanismDrivers: ovn
+ OVNVifType: ovs
+ OVNNeutronSyncMode: log
+ OVNQosDriver: ovn-qos
+ OVNTunnelEncapType: geneve
+ NeutronEnableDHCPAgent: false
+ NeutronTypeDrivers: 'geneve,vxlan,vlan,flat'
+ NeutronNetworkType: 'geneve'
+ NeutronServicePlugins: 'qos,ovn-router'
+ NeutronVniRanges: ['1:65536', ]
+ # TODO (numans) - This is temporary and needs to be handled in tripleo-common
+ DockerNeutronApiImage: 'tripleoupstream/centos-binary-neutron-server-ovn:latest'
+ DockerNeutronConfigImage: 'tripleoupstream/centos-binary-neutron-server-ovn:latest'
diff --git a/extraconfig/tasks/pacemaker_common_functions.sh b/extraconfig/tasks/pacemaker_common_functions.sh
index 367f50d7..eb004070 100755
--- a/extraconfig/tasks/pacemaker_common_functions.sh
+++ b/extraconfig/tasks/pacemaker_common_functions.sh
@@ -383,3 +383,65 @@ worfklow. Exiting."
exit 1
fi
}
+
+# This function tries to resolve an RPM dependency issue that can arise when
+# updating ceph packages on nodes that do not run the ceph-osd service. These
+# nodes do not require the ceph-osd package, and updates will fail if the
+# ceph-osd package cannot be updated because it's not available in any enabled
+# repo. The dependency issue is resolved by removing the ceph-osd package from
+# nodes that don't require it.
+#
+# No change is made to nodes that use the ceph-osd service (e.g. ceph storage
+# nodes, and hyperconverged nodes running ceph-osd and compute services). The
+# ceph-osd package is left in place, and the currently enabled repos will be
+# used to update all ceph packages.
+function yum_pre_update {
+ echo "Checking for ceph-osd dependency issues"
+
+ # No need to proceed if the ceph-osd package isn't installed
+ if ! rpm -q ceph-osd >/dev/null 2>&1; then
+ echo "ceph-osd package is not installed"
+ return
+ fi
+
+ # Do not proceed if there's any sign that the ceph-osd package is in use:
+ # - Are there OSD entries in /var/lib/ceph/osd?
+ # - Are any ceph-osd processes running?
+ # - Are there any ceph data disks (as identified by 'ceph-disk')
+ if [ -n "$(ls -A /var/lib/ceph/osd 2>/dev/null)" ]; then
+ echo "ceph-osd package is required (there are OSD entries in /var/lib/ceph/osd)"
+ return
+ fi
+
+ if [ "$(pgrep -xc ceph-osd)" != "0" ]; then
+ echo "ceph-osd package is required (there are ceph-osd processes running)"
+ return
+ fi
+
+ if ceph-disk list |& grep -q "ceph data"; then
+ echo "ceph-osd package is required (ceph data disks detected)"
+ return
+ fi
+
+ # Get a list of all ceph packages available from the currently enabled
+ # repos. Use "--showduplicates" to ensure the list includes installed
+ # packages that happen to be up to date.
+ local ceph_pkgs="$(yum list available --showduplicates 'ceph-*' |& awk '/^ceph/ {print $1}' | sort -u)"
+
+ # No need to proceed if no ceph packages are available from the currently
+ # enabled repos.
+ if [ -z "$ceph_pkgs" ]; then
+ echo "ceph packages are not available from any enabled repo"
+ return
+ fi
+
+ # No need to proceed if the ceph-osd package *is* available
+ if [[ $ceph_pkgs =~ ceph-osd ]]; then
+ echo "ceph-osd package is available from an enabled repo"
+ return
+ fi
+
+ echo "ceph-osd package is not required, but is preventing updates to other ceph packages"
+ echo "Removing ceph-osd package to allow updates to other ceph packages"
+ yum -y remove ceph-osd
+}
diff --git a/extraconfig/tasks/yum_update.sh b/extraconfig/tasks/yum_update.sh
index a2a04e8e..c0c92a60 100755
--- a/extraconfig/tasks/yum_update.sh
+++ b/extraconfig/tasks/yum_update.sh
@@ -85,6 +85,9 @@ fi
# special case https://bugs.launchpad.net/tripleo/+bug/1635205 +bug/1669714
special_case_ovs_upgrade_if_needed
+# Resolve any RPM dependency issues before attempting the update
+yum_pre_update
+
if [[ "$pacemaker_status" == "active" ]] ; then
echo "Pacemaker running, stopping cluster node and doing full package update"
node_count=$(pcs status xml | grep -o "<nodes_configured.*/>" | grep -o 'number="[0-9]*"' | grep -o "[0-9]*")
diff --git a/overcloud.j2.yaml b/overcloud.j2.yaml
index d621244e..367ac5b6 100644
--- a/overcloud.j2.yaml
+++ b/overcloud.j2.yaml
@@ -545,6 +545,8 @@ resources:
ServiceConfigSettings: {get_attr: [{{role.name}}ServiceConfigSettings, value]}
ServiceNames: {get_attr: [{{role.name}}ServiceNames, value]}
MonitoringSubscriptions: {get_attr: [{{role.name}}ServiceChainRoleData, value, monitoring_subscriptions]}
+ LoggingSources: {get_attr: [{{role.name}}ServiceChainRoleData, value, logging_sources]}
+ LoggingGroups: {get_attr: [{{role.name}}ServiceChainRoleData, value, logging_groups]}
ServiceMetadataSettings: {get_attr: [{{role.name}}ServiceChainRoleData, value, service_metadata_settings]}
DeploymentServerBlacklistDict: {get_attr: [DeploymentServerBlacklistDict, value]}
RoleParameters: {get_param: {{role.name}}Parameters}
@@ -636,24 +638,6 @@ resources:
{% for role in roles %}
- {get_attr: [{{role.name}}ServiceNames, value]}
{% endfor %}
- logging_groups:
- yaql:
- expression: >
- $.data.groups.flatten()
- data:
- groups:
-{% for role in roles %}
- - {get_attr: [{{role.name}}ServiceChainRoleData, value, logging_groups]}
-{% endfor %}
- logging_sources:
- yaql:
- expression: >
- $.data.sources.flatten()
- data:
- sources:
-{% for role in roles %}
- - {get_attr: [{{role.name}}ServiceChainRoleData, value, logging_sources]}
-{% endfor %}
controller_ips: {get_attr: [{{primary_role_name}}, ip_address]}
controller_names: {get_attr: [{{primary_role_name}}, hostname]}
service_ips:
diff --git a/puppet/all-nodes-config.yaml b/puppet/all-nodes-config.yaml
index 3044fe39..37c1d4e5 100644
--- a/puppet/all-nodes-config.yaml
+++ b/puppet/all-nodes-config.yaml
@@ -16,10 +16,6 @@ parameters:
type: comma_delimited_list
controller_ips:
type: comma_delimited_list
- logging_groups:
- type: json
- logging_sources:
- type: json
service_ips:
type: json
service_node_names:
@@ -113,8 +109,6 @@ resources:
bootstrap_nodeid_ip: {get_input: bootstrap_nodeid_ip}
all_nodes:
map_merge:
- - tripleo::profile::base::logging::fluentd::fluentd_sources: {get_param: logging_sources}
- - tripleo::profile::base::logging::fluentd::fluentd_groups: {get_param: logging_groups}
- enabled_services:
yaql:
expression: $.data.distinct()
diff --git a/puppet/services/ceph-base.yaml b/puppet/services/ceph-base.yaml
index f6573f6c..8debf8c7 100644
--- a/puppet/services/ceph-base.yaml
+++ b/puppet/services/ceph-base.yaml
@@ -99,7 +99,6 @@ outputs:
ceph::params::packages:
- ceph-base
- ceph-mon
- - ceph-osd
# NOTE: bind IP is found in Heat replacing the network name with the local node IP
# for the given network; replacement examples (eg. for internal_api):
# internal_api -> IP
@@ -152,3 +151,9 @@ outputs:
list_join: ['.', ['client', {get_param: CephClientUserName}]]
MANILA_CLIENT_KEY:
list_join: ['.', ['client', {get_param: ManilaCephFSNativeCephFSAuthId}]]
+ service_config_settings:
+ ceph_osd:
+ ceph::params::packages:
+ - ceph-base
+ - ceph-mon
+ - ceph-osd
diff --git a/puppet/services/disabled/mongodb-disabled.yaml b/puppet/services/disabled/mongodb-disabled.yaml
index c01a91fb..0c6e2bbb 100644
--- a/puppet/services/disabled/mongodb-disabled.yaml
+++ b/puppet/services/disabled/mongodb-disabled.yaml
@@ -39,6 +39,7 @@ outputs:
upgrade_tasks:
- name: Check for mongodb service
stat: path=/usr/lib/systemd/system/mongod.service
+ tags: common
register: mongod_service
- name: Stop and disable mongodb service on upgrade
tags: step1
diff --git a/releasenotes/notes/odl-qos-48b70c804755e3a5.yaml b/releasenotes/notes/odl-qos-48b70c804755e3a5.yaml
new file mode 100644
index 00000000..380ef7ff
--- /dev/null
+++ b/releasenotes/notes/odl-qos-48b70c804755e3a5.yaml
@@ -0,0 +1,4 @@
+---
+fixes:
+ - Disables QoS with OpenDaylight until officially
+ supported.
diff --git a/releasenotes/notes/ovn-container-support-3ab333fff6e90dc4.yaml b/releasenotes/notes/ovn-container-support-3ab333fff6e90dc4.yaml
index 25fd2fbe..6da35473 100644
--- a/releasenotes/notes/ovn-container-support-3ab333fff6e90dc4.yaml
+++ b/releasenotes/notes/ovn-container-support-3ab333fff6e90dc4.yaml
@@ -2,3 +2,4 @@
features:
- Support containerized ovn-controller
- Support containerized OVN Dbs without HA
+ - Support containerized OVN DBs with HA
diff --git a/roles/ControllerOpenstack.yaml b/roles/ControllerOpenstack.yaml
index cc497822..066962c1 100644
--- a/roles/ControllerOpenstack.yaml
+++ b/roles/ControllerOpenstack.yaml
@@ -42,7 +42,6 @@
- OS::TripleO::Services::CinderVolume
- OS::TripleO::Services::Collectd
- OS::TripleO::Services::Congress
- - OS::TripleO::Services::Clustercheck
- OS::TripleO::Services::Docker
- OS::TripleO::Services::Ec2Api
- OS::TripleO::Services::Etcd
diff --git a/roles/Database.yaml b/roles/Database.yaml
index 689b1617..e5c6b4d2 100644
--- a/roles/Database.yaml
+++ b/roles/Database.yaml
@@ -12,6 +12,7 @@
- OS::TripleO::Services::CACerts
- OS::TripleO::Services::CertmongerUser
- OS::TripleO::Services::Collectd
+ - OS::TripleO::Services::Clustercheck
- OS::TripleO::Services::Docker
- OS::TripleO::Services::FluentdClient
- OS::TripleO::Services::Kernel