From 3f7394289faa2945e8f36d008cfb7dacf06279d6 Mon Sep 17 00:00:00 2001 From: joehuang Date: Tue, 20 Sep 2016 05:20:31 -0400 Subject: Cherry Pick the update from the master branch to stable/colorado branch These patches should be cherry picked to colorado branch: https://gerrit.opnfv.org/gerrit/#/c/20077/ https://gerrit.opnfv.org/gerrit/#/c/20635/ https://gerrit.opnfv.org/gerrit/#/c/19833/ https://gerrit.opnfv.org/gerrit/#/c/19587/ But because the structure of the folder was changed in https://gerrit.opnfv.org/gerrit/#/c/19833/ cherry pick using gerrit will lead to merge conflict, has to update the colorado branch through this patch. Change-Id: I971eb28520b47b14de52a761269518b959c6921c Signed-off-by: joehuang --- .../configuration.options.render.rst | 27 -- docs/configurationguide/index.rst | 12 - .../multisite-configuration-guide.rst | 110 ------ ...ultisite.kingbird.configuration.description.rst | 265 -------------- docs/installationprocedure/abstract.rst | 7 - docs/installationprocedure/index.rst | 5 +- .../multisite.configuration.rst | 110 ++++++ .../multisite.kingbird.configuration.rst | 264 ++++++++++++++ ...multisite.kingbird.installation.instruction.rst | 299 ---------------- .../multisite.kingbird.installation.rst | 305 ++++++++++++++++ docs/releasenotes/index.rst | 12 + docs/releasenotes/multisite.release.notes.rst | 14 + .../VNF_high_availability_across_VIM.rst | 5 +- .../multisite-identity-service-management.rst | 6 +- docs/requirements/multisite-vnf-gr-requirement.rst | 5 +- docs/userguide/index.rst | 4 +- docs/userguide/multisite-admin-user-guide.rst | 390 --------------------- docs/userguide/multisite.admin.usage.rst | 390 +++++++++++++++++++++ docs/userguide/multisite.kingbird.usage.rst | 192 ++++++++++ docs/userguide/multisite.kingbird.user.guide.rst | 193 ---------- tools/kingbird/install_kingbird.sh | 4 + 21 files changed, 1302 insertions(+), 1317 deletions(-) delete mode 100644 docs/configurationguide/configuration.options.render.rst delete mode 100644 docs/configurationguide/index.rst delete mode 100644 docs/configurationguide/multisite-configuration-guide.rst delete mode 100644 docs/configurationguide/multisite.kingbird.configuration.description.rst delete mode 100644 docs/installationprocedure/abstract.rst create mode 100644 docs/installationprocedure/multisite.configuration.rst create mode 100644 docs/installationprocedure/multisite.kingbird.configuration.rst delete mode 100644 docs/installationprocedure/multisite.kingbird.installation.instruction.rst create mode 100644 docs/installationprocedure/multisite.kingbird.installation.rst create mode 100644 docs/releasenotes/index.rst create mode 100644 docs/releasenotes/multisite.release.notes.rst delete mode 100644 docs/userguide/multisite-admin-user-guide.rst create mode 100644 docs/userguide/multisite.admin.usage.rst create mode 100644 docs/userguide/multisite.kingbird.usage.rst delete mode 100644 docs/userguide/multisite.kingbird.user.guide.rst diff --git a/docs/configurationguide/configuration.options.render.rst b/docs/configurationguide/configuration.options.render.rst deleted file mode 100644 index f1dc11a..0000000 --- a/docs/configurationguide/configuration.options.render.rst +++ /dev/null @@ -1,27 +0,0 @@ -.. This work is licensed under a Creative Commons Attribution 4.0 International License. -.. http://creativecommons.org/licenses/by/4.0 -.. (c) Christopher Price (Ericsson AB) - -===================== -Configuration Options -===================== - -OPNFV provides a variety of virtual infrastructure deployments called scenarios designed to -host Virtualised Network Functions(VNFs). Each scenario provide specific capabilities and/or -components aimed to solve specific problems for the deployment of VNF's. A scenario may include -components such as OpenStack, OpenDaylight, OVS, KVM etc. where each scenario will include -different source components or configurations. - -OPNFV Scenarios -=============== - -Each OPNFV scenario provides unique features and capabilities, it is important to understand -your target platform capabilities before installing and configuring your target scenario. -This configuration guide outlines how to install and configure components in order to enable -the features you require. - -.. include:: ../scenario/scenariomatrix.rst - -This document will describe how to install and configure your target OPNFV scenarios. -Remember to check the associated validation procedures section following your installation for -details of the use cases and tests that have been run. diff --git a/docs/configurationguide/index.rst b/docs/configurationguide/index.rst deleted file mode 100644 index 791d94d..0000000 --- a/docs/configurationguide/index.rst +++ /dev/null @@ -1,12 +0,0 @@ -.. This work is licensed under a Creative Commons Attribution 4.0 International License. -.. http://creativecommons.org/licenses/by/4.0 - -***************************** -Multisite Configuration Guide -***************************** - -.. toctree:: - :numbered: - :maxdepth: 2 - - multisite.kingbird.configuration.description.rst diff --git a/docs/configurationguide/multisite-configuration-guide.rst b/docs/configurationguide/multisite-configuration-guide.rst deleted file mode 100644 index c005e8d..0000000 --- a/docs/configurationguide/multisite-configuration-guide.rst +++ /dev/null @@ -1,110 +0,0 @@ -.. This work is licensed under a Creative Commons Attribution 4.0 International License. -.. http://creativecommons.org/licenses/by/4.0 - -============================= -Multisite configuration guide -============================= - -Multisite identity service management -===================================== - -Goal ----- - -A user should, using a single authentication point be able to manage virtual -resources spread over multiple OpenStack regions. - -Before you read ---------------- - -This chapter does not intend to cover all configuration of KeyStone and other -OpenStack services to work together with KeyStone. - -This chapter focuses only on the configuration part should be taken into -account in multi-site scenario. - -Please read the configuration documentation related to identity management -of OpenStack for all configuration items. - -http://docs.openstack.org/liberty/config-reference/content/ch_configuring-openstack-identity.html - -How to configure the database cluster for synchronization or asynchrounous -repliation in multi-site scenario is out of scope of this document. The only -remainder is that for the synchronization or replication, only Keystone -database is required. If you are using MySQL, you can configure like this: - -In the master: - - .. code-block:: bash - - binlog-do-db=keystone - -In the slave: - - .. code-block:: bash - - replicate-do-db=keystone - - -Deployment options ------------------- - -For each detail description of each deployment option, please refer to the -admin-user-guide. - -- Distributed KeyStone service with PKI token - - In KeyStone configuration file, PKI token format should be configured - - .. code-block:: bash - - provider = pki - - or - - .. code-block:: bash - - provider = pkiz - - In the [keystone_authtoken] section of each OpenStack service configuration - file in each site, configure the identity_url and auth_uri to the address - of KeyStone service - - .. code-block:: bash - - identity_uri = https://keystone.your.com:35357/ - auth_uri = http://keystone.your.com:5000/v2.0 - - It's better to use domain name for the KeyStone service, but not to use IP - address directly, especially if you deployed KeyStone service in at least - two sites for site level high availability. - -- Distributed KeyStone service with Fernet token -- Distributed KeyStone service with Fernet token + Async replication ( - star-mode). - - In these two deployment options, the token validation is planned to be done - in local site. - - In KeyStone configuration file, Fernet token format should be configured - - .. code-block:: bash - - provider = fernet - - In the [keystone_authtoken] section of each OpenStack service configuration - file in each site, configure the identity_url and auth_uri to the address - of local KeyStone service - - .. code-block:: bash - - identity_uri = https://local-keystone.your.com:35357/ - auth_uri = http://local-keystone.your.com:5000/v2.0 - - and especially, configure the region_name to your local region name, for - example, if you are configuring services in RegionOne, and there is local - KeyStone service in RegionOne, then - - .. code-block:: bash - - region_name = RegionOne diff --git a/docs/configurationguide/multisite.kingbird.configuration.description.rst b/docs/configurationguide/multisite.kingbird.configuration.description.rst deleted file mode 100644 index d003019..0000000 --- a/docs/configurationguide/multisite.kingbird.configuration.description.rst +++ /dev/null @@ -1,265 +0,0 @@ -.. This work is licensed under a Creative Commons Attribution 4.0 International License. -.. http://creativecommons.org/licenses/by/4.0 -.. (c) OPNFV - - -Configuration of Multisite.Kingbird -=================================== - -A brief introduction to configure Multisite Kingbird service. Only the -configuration items for Kingbird will be described here. Logging, -messaging, database, keystonemiddleware etc configuration which are -generated from OpenStack OSLO libary, will not be described here, for -these configuration items are common to Nova, Cinder, Neutron. So please -refer to corresponding description from Nova or Cinder or Neutron. - - -Configuration in [DEFAULT] --------------------------- - -configuration items for kingbird-api -"""""""""""""""""""""""""""""""""""" - -bind_host -********* -- default value: *bind_host = 0.0.0.0* -- description: The host IP to bind for kingbird-api service - -bind_port -********* -- default value: *bind_port = 8118* -- description: The port to bind for kingbird-api service - -api_workers -*********** -- default value: *api_workers = 2* -- description: Number of kingbird-api workers - -configuration items for kingbird-engine -""""""""""""""""""""""""""""""""""""""" - -host -**** -- default value: *host = localhost* -- description: The host name kingbird-engine service is running on - -workers -******* -- default value: *workers = 1* -- description: Number of kingbird-engine workers - -report_interval -*************** -- default value: *report_interval = 60* -- description: Seconds between running periodic reporting tasks to - keep the engine alive in the DB. If the engine doesn't report its - aliveness to the DB more than two intervals, then the lock accquired - by the engine will be removed by other engines. - -common configuration items for kingbird-api and kingbird-engine -""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""" - -use_default_quota_class -*********************** -- default value: *use_default_quota_class = true* -- description: Enables or disables use of default quota class with default - quota, boolean value - -Configuration in [kingbird_global_limit] ----------------------------------------- - -For quota limit, a negative value means unlimited. - -configuration items for kingbird-api and kingbird-engine -"""""""""""""""""""""""""""""""""""""""""""""""""""""""" - -quota_instances -*************** -- default value: *quota_instances = 10* -- description: Number of instances allowed per project, integer value. - -quota_cores -*********** -- default value: *quota_cores = 20* -- description: Number of instance cores allowed per project, integer value. - -quota_ram -********* -- default value: *quota_ram = 512* -- description: Megabytes of instance RAM allowed per project, integer value. - -quota_metadata_items -******************** -- default value: *quota_metadata_items = 128* -- description: Number of metadata items allowed per instance, integer value. - -quota_key_pairs -*************** -- default value: *quota_key_pairs = 10* -- description: Number of key pairs per user, integer value. - -quota_fixed_ips -*************** -- default value: *quota_fixed_ips = -1* -- description: Number of fixed IPs allowed per project, this should be at - least the number of instances allowed, integer value. - -quota_security_groups -********************* -- default value: *quota_security_groups = 10* -- description: Number of security groups per project, integer value. - -quota_floating_ips -****************** -- default value: *quota_floating_ips = 10* -- description: Number of floating IPs allowed per project, integer value. - -quota_network -*************** -- default value: *quota_network = 10* -- description: Number of networks allowed per project, integer value. - -quota_subnet -*************** -- default value: *quota_subnet = 10* -- description: Number of subnets allowed per project, integer value. - -quota_port -*************** -- default value: *quota_port = 50* -- description: Number of ports allowed per project, integer value. - -quota_security_group -******************** -- default value: *quota_security_group = 10* -- description: Number of security groups allowed per project, integer value. - -quota_security_group_rule -************************* -- default value: *quota_security_group_rule = 100* -- description: Number of security group rules allowed per project, integer - value. - -quota_router -************ -- default value: *quota_router = 10* -- description: Number of routers allowed per project, integer value. - -quota_floatingip -**************** -- default value: *quota_floatingip = 50* -- description: Number of floating IPs allowed per project, integer value. - -quota_volumes -*************** -- default value: *quota_volumes = 10* -- description: Number of volumes allowed per project, integer value. - -quota_snapshots -*************** -- default value: *quota_snapshots = 10* -- description: Number of snapshots allowed per project, integer value. - -quota_gigabytes -*************** -- default value: *quota_gigabytes = 1000* -- description: Total amount of storage, in gigabytes, allowed for volumes - and snapshots per project, integer value. - -quota_backups -************* -- default value: *quota_backups = 10* -- description: Number of volume backups allowed per project, integer value. - -quota_backup_gigabytes -********************** -- default value: *quota_backup_gigabytes = 1000* -- description: Total amount of storage, in gigabytes, allowed for volume - backups per project, integer value. - -Configuration in [cache] ----------------------------------------- - -The [cache] section is used by kingbird engine to access the quota -information for Nova, Cinder, Neutron in each region in order to reduce -the KeyStone load while retrieving the endpoint information each time. - -configuration items for kingbird-engine -""""""""""""""""""""""""""""""""""""""" - -auth_uri -*************** -- default value: -- description: Keystone authorization url, for example, http://127.0.0.1:5000/v3. - -admin_username -************** -- default value: -- description: Username of admin account, for example, admin. - -admin_password -************** -- default value: -- description: Password for admin account, for example, password. - -admin_tenant -************ -- default value: -- description: Tenant name of admin account, for example, admin. - -admin_user_domain_name -********************** -- default value: *admin_user_domain_name = Default* -- description: User domain name of admin account. - -admin_project_domain_name -************************* -- default value: *admin_project_domain_name = Default* -- description: Project domain name of admin account. - -Configuration in [scheduler] ----------------------------------------- - -The [scheduler] section is used by kingbird engine to periodically synchronize -and rebalance the quota for each project. - -configuration items for kingbird-engine -""""""""""""""""""""""""""""""""""""""" - -periodic_enable -*************** -- default value: *periodic_enable = True* -- description: Boolean value for enable/disable periodic tasks. - -periodic_interval -***************** -- default value: *periodic_interval = 900* -- description: Periodic time interval for automatic quota sync job, unit is - seconds. - -Configuration in [batch] ----------------------------------------- - -The [batch] section is used by kingbird engine to periodicly synchronize -and rebalance the quota for each project. - -batch_size -*************** -- default value: *batch_size = 3* -- description: Batch size number of projects will be synced at a time. - -Configuration in [locks] ----------------------------------------- - -The [locks] section is used by kingbird engine to periodically synchronize -and rebalance the quota for each project. - -lock_retry_times -**************** -- default value: *lock_retry_times = 3* -- description: Number of times trying to grab a lock. - -lock_retry_interval -******************* -- default value: *lock_retry_interval =10* -- description: Number of seconds between lock retries. diff --git a/docs/installationprocedure/abstract.rst b/docs/installationprocedure/abstract.rst deleted file mode 100644 index 550a809..0000000 --- a/docs/installationprocedure/abstract.rst +++ /dev/null @@ -1,7 +0,0 @@ -.. This work is licensed under a Creative Commons Attribution 4.0 International License. -.. http://creativecommons.org/licenses/by/4.0 -.. (c) - -This document will give the user instructions on how to deploy -available scenarios verified for the Colorado release of OPNFV -platform. diff --git a/docs/installationprocedure/index.rst b/docs/installationprocedure/index.rst index bc51b95..746f819 100644 --- a/docs/installationprocedure/index.rst +++ b/docs/installationprocedure/index.rst @@ -13,6 +13,7 @@ Colorado 1.0 :maxdepth: 2 abstract.rst - multisite.kingbird.installation.instruction.rst - scenario.release.notes.rst + multisite.kingbird.installation.rst + multisite.configuration.rst + multisite.kingbird.configuration.rst diff --git a/docs/installationprocedure/multisite.configuration.rst b/docs/installationprocedure/multisite.configuration.rst new file mode 100644 index 0000000..c005e8d --- /dev/null +++ b/docs/installationprocedure/multisite.configuration.rst @@ -0,0 +1,110 @@ +.. This work is licensed under a Creative Commons Attribution 4.0 International License. +.. http://creativecommons.org/licenses/by/4.0 + +============================= +Multisite configuration guide +============================= + +Multisite identity service management +===================================== + +Goal +---- + +A user should, using a single authentication point be able to manage virtual +resources spread over multiple OpenStack regions. + +Before you read +--------------- + +This chapter does not intend to cover all configuration of KeyStone and other +OpenStack services to work together with KeyStone. + +This chapter focuses only on the configuration part should be taken into +account in multi-site scenario. + +Please read the configuration documentation related to identity management +of OpenStack for all configuration items. + +http://docs.openstack.org/liberty/config-reference/content/ch_configuring-openstack-identity.html + +How to configure the database cluster for synchronization or asynchrounous +repliation in multi-site scenario is out of scope of this document. The only +remainder is that for the synchronization or replication, only Keystone +database is required. If you are using MySQL, you can configure like this: + +In the master: + + .. code-block:: bash + + binlog-do-db=keystone + +In the slave: + + .. code-block:: bash + + replicate-do-db=keystone + + +Deployment options +------------------ + +For each detail description of each deployment option, please refer to the +admin-user-guide. + +- Distributed KeyStone service with PKI token + + In KeyStone configuration file, PKI token format should be configured + + .. code-block:: bash + + provider = pki + + or + + .. code-block:: bash + + provider = pkiz + + In the [keystone_authtoken] section of each OpenStack service configuration + file in each site, configure the identity_url and auth_uri to the address + of KeyStone service + + .. code-block:: bash + + identity_uri = https://keystone.your.com:35357/ + auth_uri = http://keystone.your.com:5000/v2.0 + + It's better to use domain name for the KeyStone service, but not to use IP + address directly, especially if you deployed KeyStone service in at least + two sites for site level high availability. + +- Distributed KeyStone service with Fernet token +- Distributed KeyStone service with Fernet token + Async replication ( + star-mode). + + In these two deployment options, the token validation is planned to be done + in local site. + + In KeyStone configuration file, Fernet token format should be configured + + .. code-block:: bash + + provider = fernet + + In the [keystone_authtoken] section of each OpenStack service configuration + file in each site, configure the identity_url and auth_uri to the address + of local KeyStone service + + .. code-block:: bash + + identity_uri = https://local-keystone.your.com:35357/ + auth_uri = http://local-keystone.your.com:5000/v2.0 + + and especially, configure the region_name to your local region name, for + example, if you are configuring services in RegionOne, and there is local + KeyStone service in RegionOne, then + + .. code-block:: bash + + region_name = RegionOne diff --git a/docs/installationprocedure/multisite.kingbird.configuration.rst b/docs/installationprocedure/multisite.kingbird.configuration.rst new file mode 100644 index 0000000..7eb6106 --- /dev/null +++ b/docs/installationprocedure/multisite.kingbird.configuration.rst @@ -0,0 +1,264 @@ +.. This work is licensed under a Creative Commons Attribution 4.0 International License. +.. http://creativecommons.org/licenses/by/4.0 + + +Configuration of Multisite.Kingbird +=================================== + +A brief introduction to configure Multisite Kingbird service. Only the +configuration items for Kingbird will be described here. Logging, +messaging, database, keystonemiddleware etc configuration which are +generated from OpenStack OSLO libary, will not be described here, for +these configuration items are common to Nova, Cinder, Neutron. So please +refer to corresponding description from Nova or Cinder or Neutron. + + +Configuration in [DEFAULT] +-------------------------- + +configuration items for kingbird-api +"""""""""""""""""""""""""""""""""""" + +bind_host +********* +- default value: *bind_host = 0.0.0.0* +- description: The host IP to bind for kingbird-api service + +bind_port +********* +- default value: *bind_port = 8118* +- description: The port to bind for kingbird-api service + +api_workers +*********** +- default value: *api_workers = 2* +- description: Number of kingbird-api workers + +configuration items for kingbird-engine +""""""""""""""""""""""""""""""""""""""" + +host +**** +- default value: *host = localhost* +- description: The host name kingbird-engine service is running on + +workers +******* +- default value: *workers = 1* +- description: Number of kingbird-engine workers + +report_interval +*************** +- default value: *report_interval = 60* +- description: Seconds between running periodic reporting tasks to + keep the engine alive in the DB. If the engine doesn't report its + aliveness to the DB more than two intervals, then the lock accquired + by the engine will be removed by other engines. + +common configuration items for kingbird-api and kingbird-engine +""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""" + +use_default_quota_class +*********************** +- default value: *use_default_quota_class = true* +- description: Enables or disables use of default quota class with default + quota, boolean value + +Configuration in [kingbird_global_limit] +---------------------------------------- + +For quota limit, a negative value means unlimited. + +configuration items for kingbird-api and kingbird-engine +"""""""""""""""""""""""""""""""""""""""""""""""""""""""" + +quota_instances +*************** +- default value: *quota_instances = 10* +- description: Number of instances allowed per project, integer value. + +quota_cores +*********** +- default value: *quota_cores = 20* +- description: Number of instance cores allowed per project, integer value. + +quota_ram +********* +- default value: *quota_ram = 512* +- description: Megabytes of instance RAM allowed per project, integer value. + +quota_metadata_items +******************** +- default value: *quota_metadata_items = 128* +- description: Number of metadata items allowed per instance, integer value. + +quota_key_pairs +*************** +- default value: *quota_key_pairs = 10* +- description: Number of key pairs per user, integer value. + +quota_fixed_ips +*************** +- default value: *quota_fixed_ips = -1* +- description: Number of fixed IPs allowed per project, this should be at + least the number of instances allowed, integer value. + +quota_security_groups +********************* +- default value: *quota_security_groups = 10* +- description: Number of security groups per project, integer value. + +quota_floating_ips +****************** +- default value: *quota_floating_ips = 10* +- description: Number of floating IPs allowed per project, integer value. + +quota_network +*************** +- default value: *quota_network = 10* +- description: Number of networks allowed per project, integer value. + +quota_subnet +*************** +- default value: *quota_subnet = 10* +- description: Number of subnets allowed per project, integer value. + +quota_port +*************** +- default value: *quota_port = 50* +- description: Number of ports allowed per project, integer value. + +quota_security_group +******************** +- default value: *quota_security_group = 10* +- description: Number of security groups allowed per project, integer value. + +quota_security_group_rule +************************* +- default value: *quota_security_group_rule = 100* +- description: Number of security group rules allowed per project, integer + value. + +quota_router +************ +- default value: *quota_router = 10* +- description: Number of routers allowed per project, integer value. + +quota_floatingip +**************** +- default value: *quota_floatingip = 50* +- description: Number of floating IPs allowed per project, integer value. + +quota_volumes +*************** +- default value: *quota_volumes = 10* +- description: Number of volumes allowed per project, integer value. + +quota_snapshots +*************** +- default value: *quota_snapshots = 10* +- description: Number of snapshots allowed per project, integer value. + +quota_gigabytes +*************** +- default value: *quota_gigabytes = 1000* +- description: Total amount of storage, in gigabytes, allowed for volumes + and snapshots per project, integer value. + +quota_backups +************* +- default value: *quota_backups = 10* +- description: Number of volume backups allowed per project, integer value. + +quota_backup_gigabytes +********************** +- default value: *quota_backup_gigabytes = 1000* +- description: Total amount of storage, in gigabytes, allowed for volume + backups per project, integer value. + +Configuration in [cache] +---------------------------------------- + +The [cache] section is used by kingbird engine to access the quota +information for Nova, Cinder, Neutron in each region in order to reduce +the KeyStone load while retrieving the endpoint information each time. + +configuration items for kingbird-engine +""""""""""""""""""""""""""""""""""""""" + +auth_uri +*************** +- default value: +- description: Keystone authorization url, for example, http://127.0.0.1:5000/v3. + +admin_username +************** +- default value: +- description: Username of admin account, for example, admin. + +admin_password +************** +- default value: +- description: Password for admin account, for example, password. + +admin_tenant +************ +- default value: +- description: Tenant name of admin account, for example, admin. + +admin_user_domain_name +********************** +- default value: *admin_user_domain_name = Default* +- description: User domain name of admin account. + +admin_project_domain_name +************************* +- default value: *admin_project_domain_name = Default* +- description: Project domain name of admin account. + +Configuration in [scheduler] +---------------------------------------- + +The [scheduler] section is used by kingbird engine to periodically synchronize +and rebalance the quota for each project. + +configuration items for kingbird-engine +""""""""""""""""""""""""""""""""""""""" + +periodic_enable +*************** +- default value: *periodic_enable = True* +- description: Boolean value for enable/disable periodic tasks. + +periodic_interval +***************** +- default value: *periodic_interval = 900* +- description: Periodic time interval for automatic quota sync job, unit is + seconds. + +Configuration in [batch] +---------------------------------------- + +The [batch] section is used by kingbird engine to periodicly synchronize +and rebalance the quota for each project. + +batch_size +*************** +- default value: *batch_size = 3* +- description: Batch size number of projects will be synced at a time. + +Configuration in [locks] +---------------------------------------- + +The [locks] section is used by kingbird engine to periodically synchronize +and rebalance the quota for each project. + +lock_retry_times +**************** +- default value: *lock_retry_times = 3* +- description: Number of times trying to grab a lock. + +lock_retry_interval +******************* +- default value: *lock_retry_interval =10* +- description: Number of seconds between lock retries. diff --git a/docs/installationprocedure/multisite.kingbird.installation.instruction.rst b/docs/installationprocedure/multisite.kingbird.installation.instruction.rst deleted file mode 100644 index 8ef3f06..0000000 --- a/docs/installationprocedure/multisite.kingbird.installation.instruction.rst +++ /dev/null @@ -1,299 +0,0 @@ -.. This work is licensed under a Creative Commons Attribution 4.0 International License. -.. http://creativecommons.org/licenses/by/4.0 -.. (c) OPNFV - -=========================================== -Multisite.Kingbird installation instruction -=========================================== - -Preparing the installation --------------------------- -Kingbird is centralized synchronization service for multi-region OpenStack -deployments. In OPNFV Colorado release, Kingbird provides centralized quota -management feature. At least two OpenStack regions with shared KeyStone should -be installed first. - -Kingbird includes kingbird-api and kingbird-engine, kingbird-api and -kingbird-engine which talk to each other through message bus, and both -services access the database. Kingbird-api receives the RESTful -API request for quota management and forward the request to kingbird-engine -to do quota synchronization etc task. - -Therefore install Kingbird on the controller nodes of one of the OpenStack -region, these two services could be deployed in same node or different node. -Both kingbird-api and kingbird-engine can run in multiple nodes with -multi-workers mode. It's up to you how many nodes you want to deploy -kingbird-api and kingbird-engine and they can work in same node or -different nodes. - -HW requirements ---------------- -No special hardware requirements - -Installation instruction ------------------------- - -In colorado release, Kingbird is recommended to be installed in a python -virtual environment. So install and activate virtualenv first. - -.. code-block:: bash - - sudo pip install virtualenv - virtualenv venv - source venv/bin/activate - -Get the latest code of Kingbird from git repository: - -.. code-block:: bash - - git clone https://github.com/openstack/kingbird.git - cd kingbird/ - pip install -e . - - -or get the stable release from PyPI repository: - -.. code-block:: bash - - pip install kingbird - -In case of the database package are not installed, you may need to install: - -.. code-block:: bash - - pip install mysql - pip install pymysql - -In the Kingbird root folder, where you can find the source code of Kingbird, -generate the configuration sample file for Kingbird: - -.. code-block:: bash - - oslo-config-generator --config-file=./tools/config-generator.conf - -prepare the folder used for cache, log and configuration for Kingbird: - -.. code-block:: bash - - sudo rm -rf /var/cache/kingbird - sudo mkdir -p /var/cache/kingbird - sudo chown `whoami` /var/cache/kingbird - sudo rm -rf /var/log/kingbird - sudo mkdir -p /var/log/kingbird - sudo chown `whoami` /var/log/kingbird - sudo rm -rf /etc/kingbird - sudo mkdir -p /etc/kingbird - sudo chown `whoami` /etc/kingbird - -Copy the sample configuration to the configuration folder /etc/kingbird: - -.. code-block:: bash - - cp etc/kingbird/kingbird.conf.sample /etc/kingbird/kingbird.conf - -Before editing the configuration file, prepare the database info for Kingbird. - -.. code-block:: bash - - mysql -uroot -e "CREATE DATABASE $kb_db CHARACTER SET utf8;" - mysql -uroot -e "GRANT ALL PRIVILEGES ON $kb_db.* TO '$kb_db_user'@'%' IDENTIFIED BY '$kb_db_pwd';" - -For example, the following command will create database "kingbird", and grant the -privilege for the db user "kingbird" with password "password": - -.. code-block:: bash - - mysql -uroot -e "CREATE DATABASE kingbird CHARACTER SET utf8;" - mysql -uroot -e "GRANT ALL PRIVILEGES ON kingbird.* TO 'kingbird'@'%' IDENTIFIED BY 'password';" - -Create the service user in OpenStack: - -.. code-block:: bash - - source openrc admin admin - openstack user create --project=service --password=$kb_svc_pwd $kb_svc_user - openstack role add --user=$kb_svc_user --project=service admin - -For example, the following command will create service user "kingbird", -and grant the user "kingbird" with password "password" the role of admin -in service project: - -.. code-block:: bash - - source openrc admin admin - openstack user create --project=service --password=password kingbird - openstack role add --user=kingbird --project=service admin - - - -Then edit the configuration file for Kingbird: - -.. code-block:: bash - - vim /etc/kingbird/kingbird.conf - -By default, the bind_host of kingbird-api is local_host(127.0.0.1), and the -port for the service is 8118, you can leave it as the default if no port -conflict happened. - -To make the Kingbird work normally, you have to edit these configuration -items. The [cache] section is used by kingbird engine to access the quota -information of Nova, Cinder, Neutron in each region, replace the -auth_uri to the keystone service in your environment, -especially if the keystone service is not located in the same node, and -also for the account to access the Nova, Cinder, Neutron in each region, -in the following configuration, user "admin" with password "password" of -the tenant "admin" is configured to access other Nova, Cinder, Neutron in -each region: - -.. code-block:: bash - - [cache] - auth_uri = http://127.0.0.1:5000/v3 - admin_tenant = admin - admin_password = password - admin_username = admin - -Configure the database section with the service user "kingbird" and its -password, to access database "kingbird". For detailed database section -configuration, please refer to http://docs.openstack.org/developer/oslo.db/opts.html, -and change the following configuration accordingly based on your -environment. - -.. code-block:: bash - - [database] - connection = mysql+pymysql://$kb_db_user:$kb_db_pwd@127.0.0.1/$kb_db?charset=utf8 - -For example, if the database is "kingbird", and the db user "kingbird" with -password "password", then the configuration is as following: - -.. code-block:: bash - - [database] - connection = mysql+pymysql://kingbird:password@127.0.0.1/kingbird?charset=utf8 - -The [keystone_authtoken] section is used by keystonemiddleware for token -validation during the API request to the kingbird-api, please refer to -http://docs.openstack.org/developer/keystonemiddleware/middlewarearchitecture.html -on how to configure the keystone_authtoken section for the keystonemiddleware -in detail, and change the following configuration accordingly based on your -environment: - -*please specify the region_name where you want the token will be validated if the -KeyStone is deployed in multiple regions* - -.. code-block:: bash - - [keystone_authtoken] - signing_dir = /var/cache/kingbird - cafile = /opt/stack/data/ca-bundle.pem - auth_uri = http://127.0.0.1:5000/v3 - project_domain_name = Default - project_name = service - user_domain_name = Default - password = $kb_svc_pwd - username = $kb_svc_user - auth_url = http://127.0.0.1:35357/v3 - auth_type = password - region_name = RegionOne - -For example, if the service user is "kingbird, and the password for the user -is "password", then the configuration will look like this: - -.. code-block:: bash - - [keystone_authtoken] - signing_dir = /var/cache/kingbird - cafile = /opt/stack/data/ca-bundle.pem - auth_uri = http://127.0.0.1:5000/v3 - project_domain_name = Default - project_name = service - user_domain_name = Default - password = password - username = kingbird - auth_url = http://127.0.0.1:35357/v3 - auth_type = password - region_name = RegionOne - - -And also configure the message bus connection, you can refer to the message -bus configuration in Nova, Cinder, Neutron configuration file. - -.. code-block:: bash - - [DEFAULT] - rpc_backend = rabbit - control_exchange = openstack - transport_url = None - - [oslo_messaging_rabbit] - rabbit_host = 127.0.0.1 - rabbit_port = 5671 - rabbit_userid = guest - rabbit_password = guest - rabbit_virtual_host = / - -After these basic configuration items configured, now the database schema of -"kingbird" should be created: - -.. code-block:: bash - - python kingbird/cmd/manage.py --config-file=/etc/kingbird/kingbird.conf db_sync - -And create the service and endpoint for Kingbird, please change the endpoint url -according to your cloud planning: - -.. code-block:: bash - - openstack service create --name=kingbird synchronization - openstack endpoint create --region=RegionOne \ - --publicurl=http://127.0.0.1:8118/v1.0 \ - --adminurl=http://127.0.0.1:8118/v1.0 \ - --internalurl=http://127.0.0.1:8118/v1.0 kingbird - -Now it's ready to run kingbird-api and kingbird-engine: - -.. code-block:: bash - - nohup python kingbird/cmd/api.py --config-file=/etc/kingbird/kingbird.conf & - nohup python kingbird/cmd/engine.py --config-file=/etc/kingbird/kingbird.conf & - -Run the following command to check whether kingbird-api and kingbird-engine -are running: - -.. code-block:: bash - - ps aux|grep python - - -Post-installation activities ----------------------------- - -Run the following commands to check whether kingbird-api is serving, please -replace $token to the token you get from "openstack token issue": - -.. code-block:: bash - - openstack token issue - curl -H "Content-Type: application/json" -H "X-Auth-Token: $token" \ - http://127.0.0.1:8118/ - -If the response looks like following: {"versions": [{"status": "CURRENT", -"updated": "2016-03-07", "id": "v1.0", "links": [{"href": -"http://127.0.0.1:8118/v1.0/", "rel": "self"}]}]}, -then that means the kingbird-api is working normally. - -Run the following commands to check whether kingbird-engine is serving, please -replace $token to the token you get from "openstack token issue", and the -$admin_project_id to the admin project id in your environment: - -.. code-block:: bash - - curl -H "Content-Type: application/json" -H "X-Auth-Token: $token" \ - -H "X_ROLE: admin" -X PUT \ - http://127.0.0.1:8118/v1.0/$admin_project_id/os-quota-sets/$admin_project_id/sync - -If the response looks like following: "triggered quota sync for -0320065092b14f388af54c5bd18ab5da", then that means the kingbird-engine -is working normally. diff --git a/docs/installationprocedure/multisite.kingbird.installation.rst b/docs/installationprocedure/multisite.kingbird.installation.rst new file mode 100644 index 0000000..11a516a --- /dev/null +++ b/docs/installationprocedure/multisite.kingbird.installation.rst @@ -0,0 +1,305 @@ +.. This work is licensed under a Creative Commons Attribution 4.0 International License. +.. http://creativecommons.org/licenses/by/4.0 + +=========================================== +Multisite Kingbird installation instruction +=========================================== + +Abstract +-------- +This document will give the user instructions on how to deploy +available scenarios verified for the Colorado release of OPNFV +platform. + + +Preparing the installation +-------------------------- +Kingbird is centralized synchronization service for multi-region OpenStack +deployments. In OPNFV Colorado release, Kingbird provides centralized quota +management feature. At least two OpenStack regions with shared KeyStone should +be installed first. + +Kingbird includes kingbird-api and kingbird-engine, kingbird-api and +kingbird-engine which talk to each other through message bus, and both +services access the database. Kingbird-api receives the RESTful +API request for quota management and forward the request to kingbird-engine +to do quota synchronization etc task. + +Therefore install Kingbird on the controller nodes of one of the OpenStack +region, these two services could be deployed in same node or different node. +Both kingbird-api and kingbird-engine can run in multiple nodes with +multi-workers mode. It's up to you how many nodes you want to deploy +kingbird-api and kingbird-engine and they can work in same node or +different nodes. + +HW requirements +--------------- +No special hardware requirements + +Installation instruction +------------------------ + +In colorado release, Kingbird is recommended to be installed in a python +virtual environment. So install and activate virtualenv first. + +.. code-block:: bash + + sudo pip install virtualenv + virtualenv venv + source venv/bin/activate + +Get the latest code of Kingbird from git repository: + +.. code-block:: bash + + git clone https://github.com/openstack/kingbird.git + cd kingbird/ + pip install -e . + + +or get the stable release from PyPI repository: + +.. code-block:: bash + + pip install kingbird + +In case of the database package are not installed, you may need to install: + +.. code-block:: bash + + pip install mysql + pip install pymysql + +In the Kingbird root folder, where you can find the source code of Kingbird, +generate the configuration sample file for Kingbird: + +.. code-block:: bash + + oslo-config-generator --config-file=./tools/config-generator.conf + +prepare the folder used for cache, log and configuration for Kingbird: + +.. code-block:: bash + + sudo rm -rf /var/cache/kingbird + sudo mkdir -p /var/cache/kingbird + sudo chown `whoami` /var/cache/kingbird + sudo rm -rf /var/log/kingbird + sudo mkdir -p /var/log/kingbird + sudo chown `whoami` /var/log/kingbird + sudo rm -rf /etc/kingbird + sudo mkdir -p /etc/kingbird + sudo chown `whoami` /etc/kingbird + +Copy the sample configuration to the configuration folder /etc/kingbird: + +.. code-block:: bash + + cp etc/kingbird/kingbird.conf.sample /etc/kingbird/kingbird.conf + +Before editing the configuration file, prepare the database info for Kingbird. + +.. code-block:: bash + + mysql -uroot -e "CREATE DATABASE $kb_db CHARACTER SET utf8;" + mysql -uroot -e "GRANT ALL PRIVILEGES ON $kb_db.* TO '$kb_db_user'@'%' IDENTIFIED BY '$kb_db_pwd';" + +For example, the following command will create database "kingbird", and grant the +privilege for the db user "kingbird" with password "password": + +.. code-block:: bash + + mysql -uroot -e "CREATE DATABASE kingbird CHARACTER SET utf8;" + mysql -uroot -e "GRANT ALL PRIVILEGES ON kingbird.* TO 'kingbird'@'%' IDENTIFIED BY 'password';" + +Create the service user in OpenStack: + +.. code-block:: bash + + source openrc admin admin + openstack user create --project=service --password=$kb_svc_pwd $kb_svc_user + openstack role add --user=$kb_svc_user --project=service admin + +For example, the following command will create service user "kingbird", +and grant the user "kingbird" with password "password" the role of admin +in service project: + +.. code-block:: bash + + source openrc admin admin + openstack user create --project=service --password=password kingbird + openstack role add --user=kingbird --project=service admin + + + +Then edit the configuration file for Kingbird: + +.. code-block:: bash + + vim /etc/kingbird/kingbird.conf + +By default, the bind_host of kingbird-api is local_host(127.0.0.1), and the +port for the service is 8118, you can leave it as the default if no port +conflict happened. + +To make the Kingbird work normally, you have to edit these configuration +items. The [cache] section is used by kingbird engine to access the quota +information of Nova, Cinder, Neutron in each region, replace the +auth_uri to the keystone service in your environment, +especially if the keystone service is not located in the same node, and +also for the account to access the Nova, Cinder, Neutron in each region, +in the following configuration, user "admin" with password "password" of +the tenant "admin" is configured to access other Nova, Cinder, Neutron in +each region: + +.. code-block:: bash + + [cache] + auth_uri = http://127.0.0.1:5000/v3 + admin_tenant = admin + admin_password = password + admin_username = admin + +Configure the database section with the service user "kingbird" and its +password, to access database "kingbird". For detailed database section +configuration, please refer to http://docs.openstack.org/developer/oslo.db/opts.html, +and change the following configuration accordingly based on your +environment. + +.. code-block:: bash + + [database] + connection = mysql+pymysql://$kb_db_user:$kb_db_pwd@127.0.0.1/$kb_db?charset=utf8 + +For example, if the database is "kingbird", and the db user "kingbird" with +password "password", then the configuration is as following: + +.. code-block:: bash + + [database] + connection = mysql+pymysql://kingbird:password@127.0.0.1/kingbird?charset=utf8 + +The [keystone_authtoken] section is used by keystonemiddleware for token +validation during the API request to the kingbird-api, please refer to +http://docs.openstack.org/developer/keystonemiddleware/middlewarearchitecture.html +on how to configure the keystone_authtoken section for the keystonemiddleware +in detail, and change the following configuration accordingly based on your +environment: + +*please specify the region_name where you want the token will be validated if the +KeyStone is deployed in multiple regions* + +.. code-block:: bash + + [keystone_authtoken] + signing_dir = /var/cache/kingbird + cafile = /opt/stack/data/ca-bundle.pem + auth_uri = http://127.0.0.1:5000/v3 + project_domain_name = Default + project_name = service + user_domain_name = Default + password = $kb_svc_pwd + username = $kb_svc_user + auth_url = http://127.0.0.1:35357/v3 + auth_type = password + region_name = RegionOne + +For example, if the service user is "kingbird, and the password for the user +is "password", then the configuration will look like this: + +.. code-block:: bash + + [keystone_authtoken] + signing_dir = /var/cache/kingbird + cafile = /opt/stack/data/ca-bundle.pem + auth_uri = http://127.0.0.1:5000/v3 + project_domain_name = Default + project_name = service + user_domain_name = Default + password = password + username = kingbird + auth_url = http://127.0.0.1:35357/v3 + auth_type = password + region_name = RegionOne + + +And also configure the message bus connection, you can refer to the message +bus configuration in Nova, Cinder, Neutron configuration file. + +.. code-block:: bash + + [DEFAULT] + rpc_backend = rabbit + control_exchange = openstack + transport_url = None + + [oslo_messaging_rabbit] + rabbit_host = 127.0.0.1 + rabbit_port = 5671 + rabbit_userid = guest + rabbit_password = guest + rabbit_virtual_host = / + +After these basic configuration items configured, now the database schema of +"kingbird" should be created: + +.. code-block:: bash + + python kingbird/cmd/manage.py --config-file=/etc/kingbird/kingbird.conf db_sync + +And create the service and endpoint for Kingbird, please change the endpoint url +according to your cloud planning: + +.. code-block:: bash + + openstack service create --name=kingbird synchronization + openstack endpoint create --region=RegionOne \ + --publicurl=http://127.0.0.1:8118/v1.0 \ + --adminurl=http://127.0.0.1:8118/v1.0 \ + --internalurl=http://127.0.0.1:8118/v1.0 kingbird + +Now it's ready to run kingbird-api and kingbird-engine: + +.. code-block:: bash + + nohup python kingbird/cmd/api.py --config-file=/etc/kingbird/kingbird.conf & + nohup python kingbird/cmd/engine.py --config-file=/etc/kingbird/kingbird.conf & + +Run the following command to check whether kingbird-api and kingbird-engine +are running: + +.. code-block:: bash + + ps aux|grep python + + +Post-installation activities +---------------------------- + +Run the following commands to check whether kingbird-api is serving, please +replace $token to the token you get from "openstack token issue": + +.. code-block:: bash + + openstack token issue + curl -H "Content-Type: application/json" -H "X-Auth-Token: $token" \ + http://127.0.0.1:8118/ + +If the response looks like following: {"versions": [{"status": "CURRENT", +"updated": "2016-03-07", "id": "v1.0", "links": [{"href": +"http://127.0.0.1:8118/v1.0/", "rel": "self"}]}]}, +then that means the kingbird-api is working normally. + +Run the following commands to check whether kingbird-engine is serving, please +replace $token to the token you get from "openstack token issue", and the +$admin_project_id to the admin project id in your environment: + +.. code-block:: bash + + curl -H "Content-Type: application/json" -H "X-Auth-Token: $token" \ + -H "X_ROLE: admin" -X PUT \ + http://127.0.0.1:8118/v1.0/$admin_project_id/os-quota-sets/$admin_project_id/sync + +If the response looks like following: "triggered quota sync for +0320065092b14f388af54c5bd18ab5da", then that means the kingbird-engine +is working normally. diff --git a/docs/releasenotes/index.rst b/docs/releasenotes/index.rst new file mode 100644 index 0000000..df1e186 --- /dev/null +++ b/docs/releasenotes/index.rst @@ -0,0 +1,12 @@ +.. This work is licensed under a Creative Commons Attribution 4.0 International License. +.. http://creativecommons.org/licenses/by/4.0 + +************************** +Multisite Release Notes +************************** + +.. toctree:: + :numbered: + :maxdepth: 4 + + multisite.release.notes.rst diff --git a/docs/releasenotes/multisite.release.notes.rst b/docs/releasenotes/multisite.release.notes.rst new file mode 100644 index 0000000..d90a064 --- /dev/null +++ b/docs/releasenotes/multisite.release.notes.rst @@ -0,0 +1,14 @@ +.. This work is licensed under a Creative Commons Attribution 4.0 International License. +.. http://creativecommons.org/licenses/by/4.0 + +Release Notes of Multisite project +================================== + +Multisite is to identify the requirements and gaps for the VIM(OpenStack) +to support multi-site NFV cloud. + +The documentation of requirements, installation, configuration and usage +guide for multi-site and Kingbird are provided. + +It's the first release for Kingbird service, known bugs are registered at +https://bugs.launchpad.net/kingbird. diff --git a/docs/requirements/VNF_high_availability_across_VIM.rst b/docs/requirements/VNF_high_availability_across_VIM.rst index 1a7d41b..6c2e9f1 100644 --- a/docs/requirements/VNF_high_availability_across_VIM.rst +++ b/docs/requirements/VNF_high_availability_across_VIM.rst @@ -1,6 +1,5 @@ -This work is licensed under a Creative Commons Attribution 3.0 Unported License. -http://creativecommons.org/licenses/by/3.0/legalcode - +.. This work is licensed under a Creative Commons Attribution 4.0 International License. +.. http://creativecommons.org/licenses/by/4.0 ======================================= VNF high availability across VIM diff --git a/docs/requirements/multisite-identity-service-management.rst b/docs/requirements/multisite-identity-service-management.rst index b411c28..ad2cea1 100644 --- a/docs/requirements/multisite-identity-service-management.rst +++ b/docs/requirements/multisite-identity-service-management.rst @@ -1,7 +1,5 @@ -This work is licensed under a Creative Commons Attribution 3.0 Unported -License. -http://creativecommons.org/licenses/by/3.0/legalcode - +.. This work is licensed under a Creative Commons Attribution 4.0 International License. +.. http://creativecommons.org/licenses/by/4.0 ======================================= Multisite identity service management diff --git a/docs/requirements/multisite-vnf-gr-requirement.rst b/docs/requirements/multisite-vnf-gr-requirement.rst index 7e67cd0..e88ed9c 100644 --- a/docs/requirements/multisite-vnf-gr-requirement.rst +++ b/docs/requirements/multisite-vnf-gr-requirement.rst @@ -1,6 +1,5 @@ -This work is licensed under a Creative Commons Attribution 3.0 Unported License. -http://creativecommons.org/licenses/by/3.0/legalcode - +.. This work is licensed under a Creative Commons Attribution 4.0 International License. +.. http://creativecommons.org/licenses/by/4.0 ========================================= Multisite VNF Geo site disaster recovery diff --git a/docs/userguide/index.rst b/docs/userguide/index.rst index 1429b33..25de482 100644 --- a/docs/userguide/index.rst +++ b/docs/userguide/index.rst @@ -9,5 +9,5 @@ Multisite Admin User Guide :numbered: :maxdepth: 4 - multisite-admin-user-guide.rst - multisite.kingbird.user.guide.rst + multisite.admin.usage.rst + multisite.kingbird.usage.rst diff --git a/docs/userguide/multisite-admin-user-guide.rst b/docs/userguide/multisite-admin-user-guide.rst deleted file mode 100644 index 41f23c0..0000000 --- a/docs/userguide/multisite-admin-user-guide.rst +++ /dev/null @@ -1,390 +0,0 @@ -.. This work is licensed under a Creative Commons Attribution 4.0 International License. -.. http://creativecommons.org/licenses/by/4.0 - -========================== -Multisite admin user guide -========================== - -Multisite identity service management -===================================== - -Goal ----- - -A user should, using a single authentication point be able to manage virtual -resources spread over multiple OpenStack regions. - -Token Format ------------- - -There are 3 types of token format supported by OpenStack KeyStone - - * **UUID** - * **PKI/PKIZ** - * **FERNET** - -It's very important to understand these token format before we begin the -mutltisite identity service management. Please refer to the OpenStack -official site for the identity management. -http://docs.openstack.org/admin-guide-cloud/identity_management.html - -Key consideration in multisite scenario ---------------------------------------- - -A user is provided with a single authentication URL to the Identity (Keystone) -service. Using that URL, the user authenticates with Keystone by -requesting a token typically using username/password credentials. Keystone -server validates the credentials, possibly with an external LDAP/AD server and -returns a token to the user. The user sends a request to a service in a -selected region including the token. Now the service in the region, say Nova -needs to validate the token. The service uses its configured keystone endpoint -and service credentials to request token validation from Keystone. After the -token is validated by KeyStone, the user is authorized to use the service. - -The key considerations for token validation in multisite scenario are: - * Site level failure: impact on authN and authZ shoulde be as minimal as - possible - * Scalable: as more and more sites added, no bottleneck in token validation - * Amount of inter region traffic: should be kept as little as possible - -Hence, Keystone token validation should preferably be done in the same -region as the service itself. - -The challenge to distribute KeyStone service into each region is the KeyStone -backend. Different token format has different data persisted in the backend. - -* UUID: UUID tokens have a fixed size. Tokens are persistently stored and - create a lot of database traffic, the persistence of token is for the revoke - purpose. UUID tokens are validated online by Keystone, call to service will - request keystone for token validation. Keystone can become a - bottleneck in a large system. Due to this, UUID token type is not suitable - for use in multi region clouds, no matter the Keystone database - replicates or not. - -* PKI: Tokens are non persistent cryptographic based tokens and validated - offline (not by the Keystone service) by Keystone middleware which is part - of other services such as Nova. Since PKI tokens include endpoint for all - services in all regions, the token size can become big. There are - several ways to reduce the token size such as no catalog policy, endpoint - filter to make a project binding with limited endpoints, and compressed PKI - token - PKIZ, but the size of token is still unpredictable, making it difficult - to manage. If catalog is not applied, that means the user can access all - regions, in some scenario, it's not allowed to do like this. Centralized - Keystone with PKI token to reduce inter region backend synchronization traffic. - PKI tokens do produce Keystone traffic for revocation lists. - -* Fernet: Tokens are non persistent cryptographic based tokens and validated - online by the Keystone service. Fernet tokens are more lightweight - than PKI tokens and have a fixed size. Fernet tokens require Keystone - deployed in a distributed manner, again to avoid inter region traffic. The - data synchronization cost for the Keystone backend is smaller due to the non- - persisted token. - -Cryptographic tokens bring new (compared to UUID tokens) issues/use-cases -like key rotation, certificate revocation. Key management is out of scope for -this use case. - -Database deployment as the backend for KeyStone service ------------------------------------------------------- - -Database replication: - - Master/slave asynchronous: supported by the database server itself - (mysql/mariadb etc), works over WAN, it's more scalable. But only master will - provide write functionality, domain/project/role provisioning. - - Multi master synchronous: Galera(others like percona), not so scalable, - for multi-master writing, and need more parameter tunning for WAN latency.It - can provide the capability for limited multi-sites multi-write - function for distributed KeyStone service. - - Symmetrical/asymmetrical: data replicated to all regions or a subset, - in the latter case it means some regions needs to access Keystone in another - region. - -Database server sharing: -In an OpenStack controller, normally many databases from different -services are provided from the same database server instance. For HA reasons, -the database server is usually synchronously replicated to a few other nodes -(controllers) to form a cluster. Note that _all_ database are replicated in -this case, for example when Galera sync repl is used. - -Only the Keystone database can be replicated to other sites. Replicating -databases for other services will cause those services to get of out sync and -malfunction. - -Since only the Keystone database is to be sync or replicated to another -region/site, it's better to deploy Keystone database into its own -database server with extra networking requirement, cluster or replication -configuration. How to support this by installer is out of scope. - -The database server can be shared when async master/slave replication is -used, if global transaction identifiers GTID is enabled. - -Deployment options ------------------- - -**Distributed KeyStone service with PKI token** - -Deploy KeyStone service in two sites with database replication. If site -level failure impact is not considered, then KeyStone service can only be -deployed into one site. - -The PKI token has one great advantage is that the token validation can be -done locally, without sending token validation request to KeyStone server. -The drawback of PKI token is -the endpoint list size in the token. If a project will be only spread in -very limited site number(region number), then we can use the endpoint -filter to reduce the token size, make it workable even a lot of sites -in the cloud. -KeyStone middleware(which is co-located in the service like -Nova-API/xxx-API) will have to send the request to the KeyStone server -frequently for the revoke-list, in order to reject some malicious API -request, for example, a user has to be deactivated, but use an old token -to access OpenStack service. - -For this option, needs to leverage database replication to provide -KeyStone Active-Active mode across sites to reduce the impact of site failure. -And the revoke-list request is very frequently asked, so the performance of the -KeyStone server needs also to be taken care. - -Site level keystone load balance is required to provide site level -redundancy, otherwise the KeyStone middleware will not switch request to the -healthy KeyStone server in time. - -And also the cert distribution/revoke to each site / API server for token -validation is required. - -This option can be used for some scenario where there are very limited -sites, especially if each project only spreads into limited sites ( regions ). - -**Distributed KeyStone service with Fernet token** - -Fernet token is a very new format, and just introduced recently,the biggest -gain for this token format is :1) lightweight, size is small to be carried in -the API request, not like PKI token( as the sites increased, the endpoint-list -will grows and the token size is too long to carry in the API request) 2) no -token persistence, this also make the DB not changed too much and with light -weight data size (just project, Role, domain, endpoint etc). The drawback for -the Fernet token is that token has to be validated by KeyStone for each API -request. - -This makes that the DB of KeyStone can work as a cluster in multisite (for -example, using MySQL galera cluster). That means install KeyStone API server in -each site, but share the same the backend DB cluster.Because the DB cluster -will synchronize data in real time to multisite, all KeyStone server can see -the same data. - -Because each site with KeyStone installed, and all data kept same, -therefore all token validation could be done locally in the same site. - -The challenge for this solution is how many sites the DB cluster can -support. Question is aksed to MySQL galera developers, their answer is that no -number/distance/network latency limitation in the code. But in the practice, -they have seen a case to use MySQL cluster in 5 data centers, each data centers -with 3 nodes. - -This solution will be very good for limited sites which the DB cluster can -cover very well. - -**Distributed KeyStone service with Fernet token + Async replication (star-mode)** - -One master KeyStone cluster with Fernet token in two sites (for site level -high availability purpose), other sites will be installed with at least 2 slave -nodes where the node is configured with DB async replication from the master -cluster members, and one slave’s mater node in site1, another slave’s master -node in site 2. - -Only the master cluster nodes are allowed to write, other slave nodes -waiting for replication from the master cluster member( very little delay). - -Pros: - * Deploy database cluster in the master sites is to provide more master - nodes, in order to provide more slaves could be done with async. replication - in parallel. Two sites for the master cluster is to provide higher - reliability (site level) for writing request, but reduce the maintaince - challenge at the same time by limiting the cluster spreading over too many - sites. - * Multi-slaves in other sites is because of the slave has no knowledge of - other slaves, so easy to manage multi-slaves in one site than a cluster, and - multi-slaves work independently but provide multi-instance redundancy(like a - cluster, but independent). - -Cons: - * Need to be aware of the chanllenge of key distribution and rotation - for Fernet token. - -Note: PKI token will be deprecated soon, so Fernet token is encouraged. - -Multisite VNF Geo site disaster recovery -======================================== - -Goal ----- - -A VNF (telecom application) should, be able to restore in another site for -catastrophic failures happened. - -Key consideration in multisite scenario ---------------------------------------- - -Geo site disaster recovery is to deal with more catastrophic failures -(flood, earthquake, propagating software fault), and that loss of calls, or -even temporary loss of service, is acceptable. It is also seems more common -to accept/expect manual / administrator intervene into drive the process, not -least because you don’t want to trigger the transfer by mistake. - -In terms of coordination/replication or backup/restore between geographic -sites, discussion often (but not always) seems to focus on limited application -level data/config replication, as opposed to replication backup/restore between -of cloud infrastructure between different sites. - -And finally, the lack of a requirement to do fast media transfer (without -resignalling) generally removes the need for special networking behavior, with -slower DNS-style redirection being acceptable. - -Here is more concerns about cloud infrastructure level capability to -support VNF geo site disaster recovery - -Option1, Consistency application backup ---------------------------------------- - -The disater recovery process will work like this: - -1) DR(Geo site disaster recovery )software get the volumes for each VM - in the VNF from Nova -2) DR software call Nova quiesce API to quarantee quiecing VMs in desired order -3) DR software takes snapshots of these volumes in Cinder (NOTE: Because - storage often provides fast snapshot, so the duration between quiece and - unquiece is a short interval) -4) DR software call Nova unquiece API to unquiece VMs of the VNF in reverse order -5) DR software create volumes from the snapshots just taken in Cinder -6) DR software create backup (incremental) for these volumes to remote - backup storage ( swift or ceph, or.. ) in Cinder -7) If this site failed, - 1) DR software restore these backup volumes in remote Cinder in the backup site. - 2) DR software boot VMs from bootable volumes from the remote Cinder in - the backup site and attach the regarding data volumes. - -Note: Quiesce/Unquiesce spec was approved in Mitaka, but code not get merged in -time, https://blueprints.launchpad.net/nova/+spec/expose-quiesce-unquiesce-api -The spec was rejected in Newton when it was reproposed: -https://review.openstack.org/#/c/295595/. So this option will not work any more. - -Option2, Vitrual Machine Snapshot ---------------------------------- -1) DR software create VM snapshot in Nova -2) Nova quiece the VM internally - (NOTE: The upper level application or DR software should take care of - avoiding infra level outage induced VNF outage) -3) Nova create image in Glance -4) Nova create a snapshot of the VM, including volumes -5) If the VM is volume backed VM, then create volume snapshot in Cinder -5) No image uploaded to glance, but add the snapshot in the meta data of the - image in Glance -6) DR software to get the snapshot information from the Glance -7) DR software create volumes from these snapshots -9) DR software create backup (incremental) for these volumes to backup storage - ( swift or ceph, or.. ) in Cinder -10) If this site failed, - 1) DR software restore these backup volumes to Cinder in the backup site. - 2) DR software boot vm from bootable volume from Cinder in the backup site - and attach the data volumes. - -This option only provides single VM level consistency disaster recovery. - -This feature is already available in current OPNFV release. - -Option3, Consistency volume replication ---------------------------------------- -1) DR software creates datastore (Block/Cinder, Object/Swift, App Custom - storage) with replication enabled at the relevant scope, for use to - selectively backup/replicate desire data to GR backup site -2) DR software get the reference of storage in the remote site storage -3) If primary site failed, - 1) DR software managing recovery in backup site gets references to relevant - storage and passes to new software instances - 2) Software attaches (or has attached) replicated storage, in the case of - volumes promoting to writable. - -Pros: - * Replication will be done in the storage level automatically, no need to - create backup regularly, for example, daily. - * Application selection of limited amount of data to replicate reduces - risk of replicating failed state and generates less overhear. - * Type of replication and model (active/backup, active/active, etc) can - be tailored to application needs - -Cons: - * Applications need to be designed with support in mind, including both - selection of data to be replicated and consideration of consistency - * "Standard" support in Openstack for Disaster Recovery currently fairly - limited, though active work in this area. - -Note: Volume replication v2.1 support project level replication. - - -VNF high availability across VIM -================================ - -Goal ----- - -A VNF (telecom application) should, be able to realize high availability -deloyment across OpenStack instances. - -Key consideration in multisite scenario ---------------------------------------- - -Most of telecom applications have already been designed as -Active-Standby/Active-Active/N-Way to achieve high availability -(99.999%, corresponds to 5.26 minutes of unplanned downtime in a year), -typically state replication or heart beat between -Active-Active/Active-Active/N-Way (directly or via replicated database -services, or via private designed message format) are required. - -We have to accept the currently limited availability ( 99.99%) of a -given OpenStack instance, and intend to provide the availability of the -telecom application by spreading its function across multiple OpenStack -instances.To help with this, many people appear willing to provide multiple -“independent” OpenStack instances in a single geographic site, with special -networking (L2/L3) between clouds in that physical site. - -The telecom application often has different networking plane for different -purpose: - -1) external network plane: using for communication with other telecom - application. - -2) components inter-communication plane: one VNF often consisted of several - components, this plane is designed for components inter-communication with - each other - -3) backup plane: this plane is used for the heart beat or state replication - between the component's active/standby or active/active or N-way cluster. - -4) management plane: this plane is mainly for the management purpose, like - configuration - -Generally these planes are separated with each other. And for legacy telecom -application, each internal plane will have its fixed or flexible IP addressing -plane. There are some interesting/hard requirements on the networking (L2/L3) -between OpenStack instances, at lease the backup plane across different -OpenStack instances: - -1) Overlay L2 networking is prefered as the backup plane for heartbeat or state - replication, the reason is: - - a) Support legacy compatibility: Some telecom app with built-in internal L2 - network, for easy to move these app to virtualized telecom application, it - would be better to provide L2 network. - - b) Support IP overlapping: multiple telecom applications may have - overlapping IP address for cross OpenStack instance networking. - Therefore over L2 networking across Neutron feature is required - in OpenStack. - -2) L3 networking cross OpenStack instance for heartbeat or state replication. - Can leverage FIP or vRouter inter-connected with overlay L2 network to - establish overlay L3 networking. - -Note: L2 border gateway spec was merged in L2GW project: -https://review.openstack.org/#/c/270786/. Code will be availabe in later -release. diff --git a/docs/userguide/multisite.admin.usage.rst b/docs/userguide/multisite.admin.usage.rst new file mode 100644 index 0000000..41f23c0 --- /dev/null +++ b/docs/userguide/multisite.admin.usage.rst @@ -0,0 +1,390 @@ +.. This work is licensed under a Creative Commons Attribution 4.0 International License. +.. http://creativecommons.org/licenses/by/4.0 + +========================== +Multisite admin user guide +========================== + +Multisite identity service management +===================================== + +Goal +---- + +A user should, using a single authentication point be able to manage virtual +resources spread over multiple OpenStack regions. + +Token Format +------------ + +There are 3 types of token format supported by OpenStack KeyStone + + * **UUID** + * **PKI/PKIZ** + * **FERNET** + +It's very important to understand these token format before we begin the +mutltisite identity service management. Please refer to the OpenStack +official site for the identity management. +http://docs.openstack.org/admin-guide-cloud/identity_management.html + +Key consideration in multisite scenario +--------------------------------------- + +A user is provided with a single authentication URL to the Identity (Keystone) +service. Using that URL, the user authenticates with Keystone by +requesting a token typically using username/password credentials. Keystone +server validates the credentials, possibly with an external LDAP/AD server and +returns a token to the user. The user sends a request to a service in a +selected region including the token. Now the service in the region, say Nova +needs to validate the token. The service uses its configured keystone endpoint +and service credentials to request token validation from Keystone. After the +token is validated by KeyStone, the user is authorized to use the service. + +The key considerations for token validation in multisite scenario are: + * Site level failure: impact on authN and authZ shoulde be as minimal as + possible + * Scalable: as more and more sites added, no bottleneck in token validation + * Amount of inter region traffic: should be kept as little as possible + +Hence, Keystone token validation should preferably be done in the same +region as the service itself. + +The challenge to distribute KeyStone service into each region is the KeyStone +backend. Different token format has different data persisted in the backend. + +* UUID: UUID tokens have a fixed size. Tokens are persistently stored and + create a lot of database traffic, the persistence of token is for the revoke + purpose. UUID tokens are validated online by Keystone, call to service will + request keystone for token validation. Keystone can become a + bottleneck in a large system. Due to this, UUID token type is not suitable + for use in multi region clouds, no matter the Keystone database + replicates or not. + +* PKI: Tokens are non persistent cryptographic based tokens and validated + offline (not by the Keystone service) by Keystone middleware which is part + of other services such as Nova. Since PKI tokens include endpoint for all + services in all regions, the token size can become big. There are + several ways to reduce the token size such as no catalog policy, endpoint + filter to make a project binding with limited endpoints, and compressed PKI + token - PKIZ, but the size of token is still unpredictable, making it difficult + to manage. If catalog is not applied, that means the user can access all + regions, in some scenario, it's not allowed to do like this. Centralized + Keystone with PKI token to reduce inter region backend synchronization traffic. + PKI tokens do produce Keystone traffic for revocation lists. + +* Fernet: Tokens are non persistent cryptographic based tokens and validated + online by the Keystone service. Fernet tokens are more lightweight + than PKI tokens and have a fixed size. Fernet tokens require Keystone + deployed in a distributed manner, again to avoid inter region traffic. The + data synchronization cost for the Keystone backend is smaller due to the non- + persisted token. + +Cryptographic tokens bring new (compared to UUID tokens) issues/use-cases +like key rotation, certificate revocation. Key management is out of scope for +this use case. + +Database deployment as the backend for KeyStone service +------------------------------------------------------ + +Database replication: + - Master/slave asynchronous: supported by the database server itself + (mysql/mariadb etc), works over WAN, it's more scalable. But only master will + provide write functionality, domain/project/role provisioning. + - Multi master synchronous: Galera(others like percona), not so scalable, + for multi-master writing, and need more parameter tunning for WAN latency.It + can provide the capability for limited multi-sites multi-write + function for distributed KeyStone service. + - Symmetrical/asymmetrical: data replicated to all regions or a subset, + in the latter case it means some regions needs to access Keystone in another + region. + +Database server sharing: +In an OpenStack controller, normally many databases from different +services are provided from the same database server instance. For HA reasons, +the database server is usually synchronously replicated to a few other nodes +(controllers) to form a cluster. Note that _all_ database are replicated in +this case, for example when Galera sync repl is used. + +Only the Keystone database can be replicated to other sites. Replicating +databases for other services will cause those services to get of out sync and +malfunction. + +Since only the Keystone database is to be sync or replicated to another +region/site, it's better to deploy Keystone database into its own +database server with extra networking requirement, cluster or replication +configuration. How to support this by installer is out of scope. + +The database server can be shared when async master/slave replication is +used, if global transaction identifiers GTID is enabled. + +Deployment options +------------------ + +**Distributed KeyStone service with PKI token** + +Deploy KeyStone service in two sites with database replication. If site +level failure impact is not considered, then KeyStone service can only be +deployed into one site. + +The PKI token has one great advantage is that the token validation can be +done locally, without sending token validation request to KeyStone server. +The drawback of PKI token is +the endpoint list size in the token. If a project will be only spread in +very limited site number(region number), then we can use the endpoint +filter to reduce the token size, make it workable even a lot of sites +in the cloud. +KeyStone middleware(which is co-located in the service like +Nova-API/xxx-API) will have to send the request to the KeyStone server +frequently for the revoke-list, in order to reject some malicious API +request, for example, a user has to be deactivated, but use an old token +to access OpenStack service. + +For this option, needs to leverage database replication to provide +KeyStone Active-Active mode across sites to reduce the impact of site failure. +And the revoke-list request is very frequently asked, so the performance of the +KeyStone server needs also to be taken care. + +Site level keystone load balance is required to provide site level +redundancy, otherwise the KeyStone middleware will not switch request to the +healthy KeyStone server in time. + +And also the cert distribution/revoke to each site / API server for token +validation is required. + +This option can be used for some scenario where there are very limited +sites, especially if each project only spreads into limited sites ( regions ). + +**Distributed KeyStone service with Fernet token** + +Fernet token is a very new format, and just introduced recently,the biggest +gain for this token format is :1) lightweight, size is small to be carried in +the API request, not like PKI token( as the sites increased, the endpoint-list +will grows and the token size is too long to carry in the API request) 2) no +token persistence, this also make the DB not changed too much and with light +weight data size (just project, Role, domain, endpoint etc). The drawback for +the Fernet token is that token has to be validated by KeyStone for each API +request. + +This makes that the DB of KeyStone can work as a cluster in multisite (for +example, using MySQL galera cluster). That means install KeyStone API server in +each site, but share the same the backend DB cluster.Because the DB cluster +will synchronize data in real time to multisite, all KeyStone server can see +the same data. + +Because each site with KeyStone installed, and all data kept same, +therefore all token validation could be done locally in the same site. + +The challenge for this solution is how many sites the DB cluster can +support. Question is aksed to MySQL galera developers, their answer is that no +number/distance/network latency limitation in the code. But in the practice, +they have seen a case to use MySQL cluster in 5 data centers, each data centers +with 3 nodes. + +This solution will be very good for limited sites which the DB cluster can +cover very well. + +**Distributed KeyStone service with Fernet token + Async replication (star-mode)** + +One master KeyStone cluster with Fernet token in two sites (for site level +high availability purpose), other sites will be installed with at least 2 slave +nodes where the node is configured with DB async replication from the master +cluster members, and one slave’s mater node in site1, another slave’s master +node in site 2. + +Only the master cluster nodes are allowed to write, other slave nodes +waiting for replication from the master cluster member( very little delay). + +Pros: + * Deploy database cluster in the master sites is to provide more master + nodes, in order to provide more slaves could be done with async. replication + in parallel. Two sites for the master cluster is to provide higher + reliability (site level) for writing request, but reduce the maintaince + challenge at the same time by limiting the cluster spreading over too many + sites. + * Multi-slaves in other sites is because of the slave has no knowledge of + other slaves, so easy to manage multi-slaves in one site than a cluster, and + multi-slaves work independently but provide multi-instance redundancy(like a + cluster, but independent). + +Cons: + * Need to be aware of the chanllenge of key distribution and rotation + for Fernet token. + +Note: PKI token will be deprecated soon, so Fernet token is encouraged. + +Multisite VNF Geo site disaster recovery +======================================== + +Goal +---- + +A VNF (telecom application) should, be able to restore in another site for +catastrophic failures happened. + +Key consideration in multisite scenario +--------------------------------------- + +Geo site disaster recovery is to deal with more catastrophic failures +(flood, earthquake, propagating software fault), and that loss of calls, or +even temporary loss of service, is acceptable. It is also seems more common +to accept/expect manual / administrator intervene into drive the process, not +least because you don’t want to trigger the transfer by mistake. + +In terms of coordination/replication or backup/restore between geographic +sites, discussion often (but not always) seems to focus on limited application +level data/config replication, as opposed to replication backup/restore between +of cloud infrastructure between different sites. + +And finally, the lack of a requirement to do fast media transfer (without +resignalling) generally removes the need for special networking behavior, with +slower DNS-style redirection being acceptable. + +Here is more concerns about cloud infrastructure level capability to +support VNF geo site disaster recovery + +Option1, Consistency application backup +--------------------------------------- + +The disater recovery process will work like this: + +1) DR(Geo site disaster recovery )software get the volumes for each VM + in the VNF from Nova +2) DR software call Nova quiesce API to quarantee quiecing VMs in desired order +3) DR software takes snapshots of these volumes in Cinder (NOTE: Because + storage often provides fast snapshot, so the duration between quiece and + unquiece is a short interval) +4) DR software call Nova unquiece API to unquiece VMs of the VNF in reverse order +5) DR software create volumes from the snapshots just taken in Cinder +6) DR software create backup (incremental) for these volumes to remote + backup storage ( swift or ceph, or.. ) in Cinder +7) If this site failed, + 1) DR software restore these backup volumes in remote Cinder in the backup site. + 2) DR software boot VMs from bootable volumes from the remote Cinder in + the backup site and attach the regarding data volumes. + +Note: Quiesce/Unquiesce spec was approved in Mitaka, but code not get merged in +time, https://blueprints.launchpad.net/nova/+spec/expose-quiesce-unquiesce-api +The spec was rejected in Newton when it was reproposed: +https://review.openstack.org/#/c/295595/. So this option will not work any more. + +Option2, Vitrual Machine Snapshot +--------------------------------- +1) DR software create VM snapshot in Nova +2) Nova quiece the VM internally + (NOTE: The upper level application or DR software should take care of + avoiding infra level outage induced VNF outage) +3) Nova create image in Glance +4) Nova create a snapshot of the VM, including volumes +5) If the VM is volume backed VM, then create volume snapshot in Cinder +5) No image uploaded to glance, but add the snapshot in the meta data of the + image in Glance +6) DR software to get the snapshot information from the Glance +7) DR software create volumes from these snapshots +9) DR software create backup (incremental) for these volumes to backup storage + ( swift or ceph, or.. ) in Cinder +10) If this site failed, + 1) DR software restore these backup volumes to Cinder in the backup site. + 2) DR software boot vm from bootable volume from Cinder in the backup site + and attach the data volumes. + +This option only provides single VM level consistency disaster recovery. + +This feature is already available in current OPNFV release. + +Option3, Consistency volume replication +--------------------------------------- +1) DR software creates datastore (Block/Cinder, Object/Swift, App Custom + storage) with replication enabled at the relevant scope, for use to + selectively backup/replicate desire data to GR backup site +2) DR software get the reference of storage in the remote site storage +3) If primary site failed, + 1) DR software managing recovery in backup site gets references to relevant + storage and passes to new software instances + 2) Software attaches (or has attached) replicated storage, in the case of + volumes promoting to writable. + +Pros: + * Replication will be done in the storage level automatically, no need to + create backup regularly, for example, daily. + * Application selection of limited amount of data to replicate reduces + risk of replicating failed state and generates less overhear. + * Type of replication and model (active/backup, active/active, etc) can + be tailored to application needs + +Cons: + * Applications need to be designed with support in mind, including both + selection of data to be replicated and consideration of consistency + * "Standard" support in Openstack for Disaster Recovery currently fairly + limited, though active work in this area. + +Note: Volume replication v2.1 support project level replication. + + +VNF high availability across VIM +================================ + +Goal +---- + +A VNF (telecom application) should, be able to realize high availability +deloyment across OpenStack instances. + +Key consideration in multisite scenario +--------------------------------------- + +Most of telecom applications have already been designed as +Active-Standby/Active-Active/N-Way to achieve high availability +(99.999%, corresponds to 5.26 minutes of unplanned downtime in a year), +typically state replication or heart beat between +Active-Active/Active-Active/N-Way (directly or via replicated database +services, or via private designed message format) are required. + +We have to accept the currently limited availability ( 99.99%) of a +given OpenStack instance, and intend to provide the availability of the +telecom application by spreading its function across multiple OpenStack +instances.To help with this, many people appear willing to provide multiple +“independent” OpenStack instances in a single geographic site, with special +networking (L2/L3) between clouds in that physical site. + +The telecom application often has different networking plane for different +purpose: + +1) external network plane: using for communication with other telecom + application. + +2) components inter-communication plane: one VNF often consisted of several + components, this plane is designed for components inter-communication with + each other + +3) backup plane: this plane is used for the heart beat or state replication + between the component's active/standby or active/active or N-way cluster. + +4) management plane: this plane is mainly for the management purpose, like + configuration + +Generally these planes are separated with each other. And for legacy telecom +application, each internal plane will have its fixed or flexible IP addressing +plane. There are some interesting/hard requirements on the networking (L2/L3) +between OpenStack instances, at lease the backup plane across different +OpenStack instances: + +1) Overlay L2 networking is prefered as the backup plane for heartbeat or state + replication, the reason is: + + a) Support legacy compatibility: Some telecom app with built-in internal L2 + network, for easy to move these app to virtualized telecom application, it + would be better to provide L2 network. + + b) Support IP overlapping: multiple telecom applications may have + overlapping IP address for cross OpenStack instance networking. + Therefore over L2 networking across Neutron feature is required + in OpenStack. + +2) L3 networking cross OpenStack instance for heartbeat or state replication. + Can leverage FIP or vRouter inter-connected with overlay L2 network to + establish overlay L3 networking. + +Note: L2 border gateway spec was merged in L2GW project: +https://review.openstack.org/#/c/270786/. Code will be availabe in later +release. diff --git a/docs/userguide/multisite.kingbird.usage.rst b/docs/userguide/multisite.kingbird.usage.rst new file mode 100644 index 0000000..c5bb3eb --- /dev/null +++ b/docs/userguide/multisite.kingbird.usage.rst @@ -0,0 +1,192 @@ +.. This work is licensed under a Creative Commons Attribution 4.0 International License. +.. http://creativecommons.org/licenses/by/4.0 + +============================= +Multisite.Kingbird user guide +============================= + +Quota management for OpenStack multi-region deployments +------------------------------------------------------- +Kingbird is centralized synchronization service for multi-region OpenStack +deployments. In OPNFV Colorado release, Kingbird provides centralized quota +management feature. Administrator can set quota per project based in Kingbird +and sync the quota limit to multi-region OpenStack periodiclly or on-demand. +The tenant can check the total quota limit and usage from Kingbird for all +regions. Administrator can aslo manage the default quota by quota class +setting. + +Following quota items are supported to be managed in Kingbird: + +- **instances**: Number of instances allowed per project. +- **cores**: Number of instance cores allowed per project. +- **ram**: Megabytes of instance RAM allowed per project. +- **metadata_items**: Number of metadata items allowed per instance. +- **key_pairs**: Number of key pairs per user. +- **fixed_ips**: Number of fixed IPs allowed per project, + valid if Nova Network is used. +- **security_groups**: Number of security groups per project, + valid if Nova Network is used. +- **floating_ips**: Number of floating IPs allowed per project, + valid if Nova Network is used. +- **network**: Number of networks allowed per project, + valid if Neutron is used. +- **subnet**: Number of subnets allowed per project, + valid if Neutron is used. +- **port**: Number of ports allowed per project, + valid if Neutron is used. +- **security_group**: Number of security groups allowed per project, + valid if Neutron is used. +- **security_group_rule**: Number of security group rules allowed per project, + valid if Neutron is used. +- **router**: Number of routers allowed per project, + valid if Neutron is used. +- **floatingip**: Number of floating IPs allowed per project, + valid if Neutron is used. +- **volumes**: Number of volumes allowed per project. +- **snapshots**: Number of snapshots allowed per project. +- **gigabytes**: Total amount of storage, in gigabytes, allowed for volumes + and snapshots per project. +- **backups**: Number of volume backups allowed per project. +- **backup_gigabytes**: Total amount of storage, in gigabytes, allowed for volume + backups per project. + +Only restful APIs are provided for Kingbird in Colorado release, so curl or +other http client can be used to call Kingbird API. + +Before use the following command, get token, project id, and kingbird service +endpoint first. Use $kb_token to repesent the token, and $admin_tenant_id as +administrator project_id, and $tenant_id as the target project_id for quota +management and $kb_ip_addr for the kingbird service endpoint ip address. + +Note: +To view all tenants (projects), run: + +.. code-block:: bash + + openstack project list + +To get token, run: + +.. code-block:: bash + + openstack token issue + +To get Kingbird service endpoint, run: + +.. code-block:: bash + + openstack endpoint list + +Quota Management API +-------------------- + +1. Update global limit for a tenant + + curl \ + -H "Content-Type: application/json" \ + -H "X-Auth-Token: $kb_token" \ + -H "ROLE: dmin" \ + -X PUT \ + -d '{"quota_set":{"cores": 10,"ram": 51200, "metadata_items": 100,"key_pairs": 100, "network":20,"security_group": 20,"security_group_rule": 20}}' \ + http://$kb_ip_addr:8118/v1.0/$admin_tenant_id/os-quota-sets/$tenant_id + +2. Get global limit for a tenant + + curl \ + -H "Content-Type: application/json" \ + -H "X-Auth-Token: $kb_token" \ + -H "X_ROLE: admin" \ + http://$kb_ip_addr:8118/v1.0/$admin_tenant_id/os-quota-sets/$tenant_id + +3. A tenant can also get the global limit by himself + + curl \ + -H "Content-Type: application/json" \ + -H "X-Auth-Token: $kb_token" \ + http://$kb_ip_addr:8118/v1.0/$tenant_id/os-quota-sets/$tenant_id + +4. Get defaults limits + + curl \ + -H "Content-Type: application/json" \ + -H "X-Auth-Token: $kb_token" \ + -H "X_ROLE: admin" \ + http://$kb_ip_addr:8118/v1.0/$admin_tenant_id/os-quota-sets/defaults + +5. Get total usage for a tenant + + curl \ + -H "Content-Type: application/json" \ + -H "X-Auth-Token: $kb_token" \ + -H "X_ROLE: admin" \ + -X GET \ + http://$kb_ip_addr:8118/v1.0/$admin_tenant_id/os-quota-sets/$tenant_id/detail + +6. A tenant can also get the total usage by himself + + curl \ + -H "Content-Type: application/json" \ + -H "X-Auth-Token: $kb_token" \ + -X GET \ + http://$kb_ip_addr:8118/v1.0/$tenant_id/os-quota-sets/$tenant_id/detail + +7. On demand quota sync + + curl \ + -H "Content-Type: application/json" \ + -H "X-Auth-Token: $kb_token" \ + -H "X_ROLE: admin" \ + -X PUT \ + http://$kb_ip_addr:8118/v1.0/$admin_tenant_id/os-quota-sets/$tenant_id/sync + + +8. Delete specific global limit for a tenant + + curl \ + -H "Content-Type: application/json" \ + -H "X-Auth-Token: $kb_token" \ + -H "X_ROLE: admin" \ + -X DELETE \ + -d '{"quota_set": [ "cores", "ram"]}' \ + http://$kb_ip_addr:8118/v1.0/$admin_tenant_id/os-quota-sets/$tenant_id + +9. Delete all kingbird global limit for a tenant + + curl \ + -H "Content-Type: application/json" \ + -H "X-Auth-Token: $kb_token" \ + -H "X_ROLE: admin" \ + -X DELETE \ + http://$kb_ip_addr:8118/v1.0/$admin_tenant_id/os-quota-sets/$tenant_id + + +Quota Class API +--------------- + +1. Update default quota class + + curl \ + -H "Content-Type: application/json" \ + -H "X-Auth-Token: $kb_token" \ + -H "ROLE: dmin" \ + -X PUT \ + -d '{"quota_class_set":{"cores": 100, "network":50,"security_group": 50,"security_group_rule": 50}}' \ + http://$kb_ip_addr:8118/v1.0/$admin_tenant_id/os-quota-class-sets/default + +2. Get default quota class + + curl \ + -H "Content-Type: application/json" \ + -H "X-Auth-Token: $kb_token" \ + -H "X_ROLE: admin" \ + http://$kb_ip_addr:8118/v1.0/$admin_tenant_id/os-quota-class-sets/default + +3. Delete default quota class + + curl \ + -H "Content-Type: application/json" \ + -H "X-Auth-Token: $kb_token" \ + -H "ROLE: dmin" \ + -X DELETE \ + http://$kb_ip_addr:8118/v1.0/$admin_tenant_id/os-quota-class-sets/default + diff --git a/docs/userguide/multisite.kingbird.user.guide.rst b/docs/userguide/multisite.kingbird.user.guide.rst deleted file mode 100644 index 6ae3881..0000000 --- a/docs/userguide/multisite.kingbird.user.guide.rst +++ /dev/null @@ -1,193 +0,0 @@ -.. This work is licensed under a Creative Commons Attribution 4.0 International License. -.. http://creativecommons.org/licenses/by/4.0 -.. (c) OPNFV - -============================= -Multisite.Kingbird user guide -============================= - -Quota management for OpenStack multi-region deployments -------------------------------------------------------- -Kingbird is centralized synchronization service for multi-region OpenStack -deployments. In OPNFV Colorado release, Kingbird provides centralized quota -management feature. Administrator can set quota per project based in Kingbird -and sync the quota limit to multi-region OpenStack periodiclly or on-demand. -The tenant can check the total quota limit and usage from Kingbird for all -regions. Administrator can aslo manage the default quota by quota class -setting. - -Following quota items are supported to be managed in Kingbird: - -- **instances**: Number of instances allowed per project. -- **cores**: Number of instance cores allowed per project. -- **ram**: Megabytes of instance RAM allowed per project. -- **metadata_items**: Number of metadata items allowed per instance. -- **key_pairs**: Number of key pairs per user. -- **fixed_ips**: Number of fixed IPs allowed per project, - valid if Nova Network is used. -- **security_groups**: Number of security groups per project, - valid if Nova Network is used. -- **floating_ips**: Number of floating IPs allowed per project, - valid if Nova Network is used. -- **network**: Number of networks allowed per project, - valid if Neutron is used. -- **subnet**: Number of subnets allowed per project, - valid if Neutron is used. -- **port**: Number of ports allowed per project, - valid if Neutron is used. -- **security_group**: Number of security groups allowed per project, - valid if Neutron is used. -- **security_group_rule**: Number of security group rules allowed per project, - valid if Neutron is used. -- **router**: Number of routers allowed per project, - valid if Neutron is used. -- **floatingip**: Number of floating IPs allowed per project, - valid if Neutron is used. -- **volumes**: Number of volumes allowed per project. -- **snapshots**: Number of snapshots allowed per project. -- **gigabytes**: Total amount of storage, in gigabytes, allowed for volumes - and snapshots per project. -- **backups**: Number of volume backups allowed per project. -- **backup_gigabytes**: Total amount of storage, in gigabytes, allowed for volume - backups per project. - -Only restful APIs are provided for Kingbird in Colorado release, so curl or -other http client can be used to call Kingbird API. - -Before use the following command, get token, project id, and kingbird service -endpoint first. Use $kb_token to repesent the token, and $admin_tenant_id as -administrator project_id, and $tenant_id as the target project_id for quota -management and $kb_ip_addr for the kingbird service endpoint ip address. - -Note: -To view all tenants (projects), run: - -.. code-block:: bash - - openstack project list - -To get token, run: - -.. code-block:: bash - - openstack token issue - -To get Kingbird service endpoint, run: - -.. code-block:: bash - - openstack endpoint list - -Quota Management API --------------------- - -1. Update global limit for a tenant - - curl \ - -H "Content-Type: application/json" \ - -H "X-Auth-Token: $kb_token" \ - -H "ROLE: dmin" \ - -X PUT \ - -d '{"quota_set":{"cores": 10,"ram": 51200, "metadata_items": 100,"key_pairs": 100, "network":20,"security_group": 20,"security_group_rule": 20}}' \ - http://$kb_ip_addr:8118/v1.0/$admin_tenant_id/os-quota-sets/$tenant_id - -2. Get global limit for a tenant - - curl \ - -H "Content-Type: application/json" \ - -H "X-Auth-Token: $kb_token" \ - -H "X_ROLE: admin" \ - http://$kb_ip_addr:8118/v1.0/$admin_tenant_id/os-quota-sets/$tenant_id - -3. A tenant can also get the global limit by himself - - curl \ - -H "Content-Type: application/json" \ - -H "X-Auth-Token: $kb_token" \ - http://$kb_ip_addr:8118/v1.0/$tenant_id/os-quota-sets/$tenant_id - -4. Get defaults limits - - curl \ - -H "Content-Type: application/json" \ - -H "X-Auth-Token: $kb_token" \ - -H "X_ROLE: admin" \ - http://$kb_ip_addr:8118/v1.0/$admin_tenant_id/os-quota-sets/defaults - -5. Get total usage for a tenant - - curl \ - -H "Content-Type: application/json" \ - -H "X-Auth-Token: $kb_token" \ - -H "X_ROLE: admin" \ - -X GET \ - http://$kb_ip_addr:8118/v1.0/$admin_tenant_id/os-quota-sets/$tenant_id/detail - -6. A tenant can also get the total usage by himself - - curl \ - -H "Content-Type: application/json" \ - -H "X-Auth-Token: $kb_token" \ - -X GET \ - http://$kb_ip_addr:8118/v1.0/$tenant_id/os-quota-sets/$tenant_id/detail - -7. On demand quota sync - - curl \ - -H "Content-Type: application/json" \ - -H "X-Auth-Token: $kb_token" \ - -H "X_ROLE: admin" \ - -X PUT \ - http://$kb_ip_addr:8118/v1.0/$admin_tenant_id/os-quota-sets/$tenant_id/sync - - -8. Delete specific global limit for a tenant - - curl \ - -H "Content-Type: application/json" \ - -H "X-Auth-Token: $kb_token" \ - -H "X_ROLE: admin" \ - -X DELETE \ - -d '{"quota_set": [ "cores", "ram"]}' \ - http://$kb_ip_addr:8118/v1.0/$admin_tenant_id/os-quota-sets/$tenant_id - -9. Delete all kingbird global limit for a tenant - - curl \ - -H "Content-Type: application/json" \ - -H "X-Auth-Token: $kb_token" \ - -H "X_ROLE: admin" \ - -X DELETE \ - http://$kb_ip_addr:8118/v1.0/$admin_tenant_id/os-quota-sets/$tenant_id - - -Quota Class API ---------------- - -1. Update default quota class - - curl \ - -H "Content-Type: application/json" \ - -H "X-Auth-Token: $kb_token" \ - -H "ROLE: dmin" \ - -X PUT \ - -d '{"quota_class_set":{"cores": 100, "network":50,"security_group": 50,"security_group_rule": 50}}' \ - http://$kb_ip_addr:8118/v1.0/$admin_tenant_id/os-quota-class-sets/default - -2. Get default quota class - - curl \ - -H "Content-Type: application/json" \ - -H "X-Auth-Token: $kb_token" \ - -H "X_ROLE: admin" \ - http://$kb_ip_addr:8118/v1.0/$admin_tenant_id/os-quota-class-sets/default - -3. Delete default quota class - - curl \ - -H "Content-Type: application/json" \ - -H "X-Auth-Token: $kb_token" \ - -H "ROLE: dmin" \ - -X DELETE \ - http://$kb_ip_addr:8118/v1.0/$admin_tenant_id/os-quota-class-sets/default - diff --git a/tools/kingbird/install_kingbird.sh b/tools/kingbird/install_kingbird.sh index 96f13c3..8619ff3 100644 --- a/tools/kingbird/install_kingbird.sh +++ b/tools/kingbird/install_kingbird.sh @@ -89,6 +89,8 @@ mysql -uroot -e "DROP DATABASE IF EXISTS $mysql_db;" mysql -uroot -e "CREATE DATABASE $mysql_db CHARACTER SET utf8;" mysql -uroot -e "GRANT ALL PRIVILEGES ON $mysql_db.* TO '$mysql_user'@'$mysql_host' IDENTIFIED BY '$mysql_pass';" +set +e + #Configure kingbird user openstack user show kingbird 2>/dev/null if [ $? -eq 0 ]; then @@ -113,6 +115,8 @@ else --region ${OS_REGION_NAME} fi +set -e + # Cleanup the folder before making a fresh clone rm -rf kingbird/ -- cgit 1.2.3-korg