aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJustin chi <chigang@huawei.com>2016-11-09 07:26:38 +0000
committerGerrit Code Review <gerrit@opnfv.org>2016-11-09 07:26:38 +0000
commit5f7e61eb5419681359af3ff3e37fac2b771a5c08 (patch)
tree11356f5a44b0bedbe3f48652628aec298db2670b
parent92c05616a3612dca02ccbf88dcf4c73929f15dad (diff)
parent14c337344987857a4648ff08365b8b128a553ef8 (diff)
Merge "Update the API version for Openstack Newton"
-rw-r--r--deploy/adapters/ansible/openstack_mitaka/roles/aodh/templates/aodh.conf.j22
-rw-r--r--deploy/adapters/ansible/openstack_mitaka/roles/tacker/templates/tacker.j24
-rw-r--r--deploy/adapters/ansible/openstack_mitaka_xenial/roles/aodh/templates/aodh.conf.j22
-rw-r--r--deploy/adapters/ansible/openstack_mitaka_xenial/roles/tacker/templates/tacker.j24
-rw-r--r--deploy/adapters/ansible/openstack_newton_xenial/HA-ansible-multinodes.yml3
-rw-r--r--deploy/adapters/ansible/openstack_newton_xenial/roles/aodh/templates/aodh.conf.j212
-rw-r--r--deploy/adapters/ansible/openstack_newton_xenial/roles/cinder-controller/templates/cinder.conf85
-rw-r--r--deploy/adapters/ansible/openstack_newton_xenial/roles/cinder-volume/templates/cinder.conf82
-rw-r--r--deploy/adapters/ansible/openstack_newton_xenial/roles/dashboard/tasks/main.yml106
-rw-r--r--deploy/adapters/ansible/openstack_newton_xenial/roles/dashboard/templates/local_settings.py.j2326
-rw-r--r--deploy/adapters/ansible/openstack_newton_xenial/roles/ext-network/tasks/main.yml50
-rw-r--r--deploy/adapters/ansible/openstack_newton_xenial/roles/glance/templates/glance-api.conf93
-rw-r--r--deploy/adapters/ansible/openstack_newton_xenial/roles/glance/templates/glance-registry.conf64
-rw-r--r--deploy/adapters/ansible/openstack_newton_xenial/roles/heat/tasks/heat_install.yml2
-rw-r--r--deploy/adapters/ansible/openstack_newton_xenial/roles/heat/templates/heat.j236
-rw-r--r--deploy/adapters/ansible/openstack_newton_xenial/roles/keystone/tasks/keystone_config.yml101
-rw-r--r--deploy/adapters/ansible/openstack_newton_xenial/roles/keystone/tasks/keystone_create.yml93
-rw-r--r--deploy/adapters/ansible/openstack_newton_xenial/roles/keystone/tasks/keystone_install.yml1
-rw-r--r--deploy/adapters/ansible/openstack_newton_xenial/roles/keystone/tasks/main.yml30
-rw-r--r--deploy/adapters/ansible/openstack_newton_xenial/roles/keystone/templates/admin-openrc.sh18
-rw-r--r--deploy/adapters/ansible/openstack_newton_xenial/roles/keystone/templates/demo-openrc.sh17
-rw-r--r--deploy/adapters/ansible/openstack_newton_xenial/roles/keystone/templates/keystone.conf60
-rw-r--r--deploy/adapters/ansible/openstack_newton_xenial/roles/keystone/vars/main.yml37
-rw-r--r--deploy/adapters/ansible/openstack_newton_xenial/roles/neutron-controller/tasks/neutron_install.yml9
-rw-r--r--deploy/adapters/ansible/openstack_newton_xenial/roles/nova-compute/templates/nova.conf113
-rw-r--r--deploy/adapters/ansible/openstack_newton_xenial/roles/tacker/templates/tacker.j22
-rw-r--r--deploy/adapters/ansible/openstack_newton_xenial/templates/neutron.conf420
-rw-r--r--deploy/adapters/ansible/openstack_newton_xenial/templates/nova.conf109
-rw-r--r--deploy/adapters/ansible/roles/ha/templates/haproxy.cfg2
-rw-r--r--deploy/adapters/ansible/roles/tacker/templates/tacker.j22
30 files changed, 1372 insertions, 513 deletions
diff --git a/deploy/adapters/ansible/openstack_mitaka/roles/aodh/templates/aodh.conf.j2 b/deploy/adapters/ansible/openstack_mitaka/roles/aodh/templates/aodh.conf.j2
index d4d232be..752dd0f0 100644
--- a/deploy/adapters/ansible/openstack_mitaka/roles/aodh/templates/aodh.conf.j2
+++ b/deploy/adapters/ansible/openstack_mitaka/roles/aodh/templates/aodh.conf.j2
@@ -21,7 +21,7 @@ rabbit_password = {{ RABBIT_PASS }}
connection = mysql://aodh:{{ AODH_DBPASS }}@{{ db_host }}/aodh
[keystone_authtoken]
-auth_uri = http://{{ internal_vip.ip }}:5000
+auth_uri = http://{{ internal_vip.ip }}:5000/v2.0
auth_url = http://{{ internal_vip.ip }}:35357
identity_uri = http://{{ internal_vip.ip }}:35357
auth_plugin = password
diff --git a/deploy/adapters/ansible/openstack_mitaka/roles/tacker/templates/tacker.j2 b/deploy/adapters/ansible/openstack_mitaka/roles/tacker/templates/tacker.j2
index f1d9125b..4f186b67 100644
--- a/deploy/adapters/ansible/openstack_mitaka/roles/tacker/templates/tacker.j2
+++ b/deploy/adapters/ansible/openstack_mitaka/roles/tacker/templates/tacker.j2
@@ -337,8 +337,8 @@ password = console
username = tacker
auth_url = http://{{ internal_vip.ip }}:35357
auth_plugin = password
-identity_uri = http://{{ internal_vip.ip }}:5000
-auth_uri = http://{{ internal_vip.ip }}:5000
+identity_uri = http://{{ internal_vip.ip }}:5000/v2.0
+auth_uri = http://{{ internal_vip.ip }}:5000/v2.0
[database]
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/aodh/templates/aodh.conf.j2 b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/aodh/templates/aodh.conf.j2
index d4d232be..752dd0f0 100644
--- a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/aodh/templates/aodh.conf.j2
+++ b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/aodh/templates/aodh.conf.j2
@@ -21,7 +21,7 @@ rabbit_password = {{ RABBIT_PASS }}
connection = mysql://aodh:{{ AODH_DBPASS }}@{{ db_host }}/aodh
[keystone_authtoken]
-auth_uri = http://{{ internal_vip.ip }}:5000
+auth_uri = http://{{ internal_vip.ip }}:5000/v2.0
auth_url = http://{{ internal_vip.ip }}:35357
identity_uri = http://{{ internal_vip.ip }}:35357
auth_plugin = password
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/tacker/templates/tacker.j2 b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/tacker/templates/tacker.j2
index f1d9125b..4f186b67 100644
--- a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/tacker/templates/tacker.j2
+++ b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/tacker/templates/tacker.j2
@@ -337,8 +337,8 @@ password = console
username = tacker
auth_url = http://{{ internal_vip.ip }}:35357
auth_plugin = password
-identity_uri = http://{{ internal_vip.ip }}:5000
-auth_uri = http://{{ internal_vip.ip }}:5000
+identity_uri = http://{{ internal_vip.ip }}:5000/v2.0
+auth_uri = http://{{ internal_vip.ip }}:5000/v2.0
[database]
diff --git a/deploy/adapters/ansible/openstack_newton_xenial/HA-ansible-multinodes.yml b/deploy/adapters/ansible/openstack_newton_xenial/HA-ansible-multinodes.yml
index 9be0c384..9fb2ee19 100644
--- a/deploy/adapters/ansible/openstack_newton_xenial/HA-ansible-multinodes.yml
+++ b/deploy/adapters/ansible/openstack_newton_xenial/HA-ansible-multinodes.yml
@@ -71,8 +71,7 @@
- glance
- neutron-common
- neutron-network
- - ceilometer_controller
-# - ext-network
+# - ceilometer_controller
- dashboard
- heat
# - aodh
diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/aodh/templates/aodh.conf.j2 b/deploy/adapters/ansible/openstack_newton_xenial/roles/aodh/templates/aodh.conf.j2
index d4d232be..b580d78c 100644
--- a/deploy/adapters/ansible/openstack_newton_xenial/roles/aodh/templates/aodh.conf.j2
+++ b/deploy/adapters/ansible/openstack_newton_xenial/roles/aodh/templates/aodh.conf.j2
@@ -35,12 +35,22 @@ token_cache_time = 300
revocation_cache_time = 60
[service_credentials]
-os_auth_url = http://{{ internal_vip.ip }}:5000/v2.0
+os_auth_url = http://{{ internal_vip.ip }}:5000/v3
os_username = aodh
os_tenant_name = service
os_password = {{ AODH_PASS }}
os_endpoint_type = internalURL
os_region_name = RegionOne
+auth_type = password
+auth_url = http://{{ internal_vip.ip }}:5000/v3
+project_domain_name = default
+user_domain_name = default
+project_name = service
+username = aodh
+password = {{ AODH_PASS }}
+interface = internalURL
+region_name = RegionOne
+
[api]
host = {{ internal_ip }}
diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/cinder-controller/templates/cinder.conf b/deploy/adapters/ansible/openstack_newton_xenial/roles/cinder-controller/templates/cinder.conf
new file mode 100644
index 00000000..d428a078
--- /dev/null
+++ b/deploy/adapters/ansible/openstack_newton_xenial/roles/cinder-controller/templates/cinder.conf
@@ -0,0 +1,85 @@
+{% set memcached_servers = [] %}
+{% for host in haproxy_hosts.values() %}
+{% set _ = memcached_servers.append('%s:11211'% host) %}
+{% endfor %}
+{% set memcached_servers = memcached_servers|join(',') %}
+
+[DEFAULT]
+rootwrap_config = /etc/cinder/rootwrap.conf
+api_paste_confg = /etc/cinder/api-paste.ini
+iscsi_helper = tgtadm
+volume_name_template = volume-%s
+volume_group = storage-volumes
+verbose = {{ VERBOSE }}
+debug = {{ DEBUG }}
+auth_strategy = keystone
+state_path = /var/lib/cinder
+lock_path = /var/lock/cinder
+notification_driver = cinder.openstack.common.notifier.rpc_notifier
+volumes_dir = /var/lib/cinder/volumes
+transport_url = rabbit://{{ RABBIT_USER }}:{{ RABBIT_PASS }}@{{ rabbit_host }}
+log_file = /var/log/cinder/cinder.log
+
+control_exchange = cinder
+rpc_backend = rabbit
+my_ip = {{ storage_controller_host }}
+
+glance_host = {{ internal_vip.ip }}
+glance_port = 9292
+api_rate_limit = False
+storage_availability_zone = nova
+
+quota_volumes = 10
+quota_gigabytes = 1000
+quota_driver = cinder.quota.DbQuotaDriver
+
+osapi_volume_listen = {{ storage_controller_host }}
+osapi_volume_listen_port = 8776
+
+db_backend = sqlalchemy
+volume_name_template = volume-%s
+snapshot_name_template = snapshot-%s
+
+max_gigabytes = 10000
+
+volume_clear = zero
+volume_clear_size = 10
+
+iscsi_ip_address = {{ storage_controller_host }}
+iscsi_port = 3260
+iscsi_helper = tgtadm
+
+volumes_dir = /var/lib/cinder/volumes
+volume_driver = cinder.volume.drivers.lvm.LVMISCSIDriver
+
+[database]
+connection = mysql://cinder:{{ CINDER_DBPASS }}@{{ db_host }}/cinder
+idle_timeout = 30
+
+[keystone_authtoken]
+auth_uri = http://{{ internal_vip.ip }}:5000
+auth_url = http://{{ internal_vip.ip }}:35357
+memcached_servers = {{ memcached_servers }}
+auth_type = password
+project_domain_name = default
+user_domain_name = default
+project_name = service
+username = cinder
+password = {{ CINDER_PASS }}
+
+identity_uri = http://{{ internal_vip.ip }}:35357
+admin_tenant_name = service
+admin_user = cinder
+admin_password = {{ CINDER_PASS }}
+
+[keymgr]
+encryption_auth_url=http://{{ internal_vip.ip }}:5000/v3
+
+[oslo_messaging_rabbit]
+rabbit_host = {{ rabbit_host }}
+rabbit_port = 5672
+rabbit_userid = {{ RABBIT_USER }}
+rabbit_password = {{ RABBIT_PASS }}
+
+[oslo_concurrency]
+lock_path = /var/lib/cinder/tmp
diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/cinder-volume/templates/cinder.conf b/deploy/adapters/ansible/openstack_newton_xenial/roles/cinder-volume/templates/cinder.conf
new file mode 100644
index 00000000..e4f98e82
--- /dev/null
+++ b/deploy/adapters/ansible/openstack_newton_xenial/roles/cinder-volume/templates/cinder.conf
@@ -0,0 +1,82 @@
+{% set memcached_servers = [] %}
+{% for host in haproxy_hosts.values() %}
+{% set _ = memcached_servers.append('%s:11211'% host) %}
+{% endfor %}
+{% set memcached_servers = memcached_servers|join(',') %}
+
+[DEFAULT]
+rootwrap_config = /etc/cinder/rootwrap.conf
+api_paste_confg = /etc/cinder/api-paste.ini
+iscsi_helper = tgtadm
+volume_name_template = volume-%s
+volume_group = storage-volumes
+verbose = True
+auth_strategy = keystone
+state_path = /var/lib/cinder
+lock_path = /var/lib/cinder/tmp
+notification_driver=cinder.openstack.common.notifier.rpc_notifier
+volumes_dir = /var/lib/cinder/volumes
+transport_url = rabbit://{{ RABBIT_USER }}:{{ RABBIT_PASS }}@{{ rabbit_host }}
+log_file=/var/log/cinder/cinder.log
+
+control_exchange = cinder
+rpc_backend = rabbit
+my_ip = {{ storage_controller_host }}
+
+glance_host = {{ internal_vip.ip }}
+glance_port = 9292
+glance_api_servers = http://{{ internal_vip.ip }}:9292
+api_rate_limit = False
+storage_availability_zone = nova
+
+quota_volumes = 10
+quota_gigabytes = 1000
+quota_driver = cinder.quota.DbQuotaDriver
+
+osapi_volume_listen = {{ storage_controller_host }}
+osapi_volume_listen_port = 8776
+
+db_backend = sqlalchemy
+volume_name_template = volume-%s
+snapshot_name_template = snapshot-%s
+
+max_gigabytes = 10000
+
+volume_clear = zero
+volume_clear_size = 10
+
+iscsi_ip_address = {{ storage_controller_host }}
+iscsi_port=3260
+iscsi_helper=tgtadm
+
+volumes_dir=/var/lib/cinder/volumes
+volume_driver=cinder.volume.drivers.lvm.LVMISCSIDriver
+
+[database]
+connection = mysql://cinder:{{ CINDER_DBPASS }}@{{ db_host }}/cinder
+idle_timeout = 30
+
+[keystone_authtoken]
+auth_uri = http://{{ internal_vip.ip }}:5000
+auth_url = http://{{ internal_vip.ip }}:35357
+memcached_servers = {{ memcached_servers }}
+auth_type = password
+project_domain_name = default
+user_domain_name = default
+project_name = service
+username = cinder
+password = {{ CINDER_PASS }}
+
+identity_uri = http://{{ internal_vip.ip }}:35357
+admin_tenant_name = service
+admin_user = cinder
+admin_password = {{ CINDER_PASS }}
+
+[lvm]
+volume_driver = cinder.volume.drivers.lvm.LVMVolumeDriver
+volume_group = cinder-volumes
+iscsi_protocol = iscsi
+iscsi_helper = tgtadm
+
+[oslo_concurrency]
+lock_path = /var/lib/cinder/tmp
diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/dashboard/tasks/main.yml b/deploy/adapters/ansible/openstack_newton_xenial/roles/dashboard/tasks/main.yml
new file mode 100644
index 00000000..9be6fd6c
--- /dev/null
+++ b/deploy/adapters/ansible/openstack_newton_xenial/roles/dashboard/tasks/main.yml
@@ -0,0 +1,106 @@
+##############################################################################
+# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+- include_vars: "{{ ansible_os_family }}.yml"
+
+- name: disable auto start
+ copy:
+ content: "#!/bin/sh\nexit 101"
+ dest: "/usr/sbin/policy-rc.d"
+ mode: 0755
+ when: ansible_os_family == "Debian"
+
+- name: install dashboard packages
+ action: "{{ ansible_pkg_mgr }} name={{ item }} state=present"
+ with_items: packages | union(packages_noarch)
+
+- name: enable auto start
+ file:
+ path=/usr/sbin/policy-rc.d
+ state=absent
+ when: ansible_os_family == "Debian"
+
+- name: remove ubuntu theme
+ action: "{{ ansible_pkg_mgr }} name=openstack-dashboard-ubuntu-theme state=absent"
+ when: ansible_os_family == 'Debian' and not enable_ubuntu_theme
+ notify:
+ - restart dashboard services
+
+- name: remove default apache2 config
+ file:
+ path: '{{ item }}'
+ state: absent
+ when: ansible_os_family == 'Debian'
+ with_items:
+ - '{{ apache_config_dir }}/conf-available/openstack-dashboard.conf'
+ - '{{ apache_config_dir }}/conf-enabled/openstack-dashboard.conf'
+ - '{{ apache_config_dir }}/sites-available/000-default.conf'
+ - '{{ apache_config_dir }}/sites-enabled/000-default.conf'
+ notify:
+ - restart dashboard services
+
+- name: update apache2 configs
+ template:
+ src: openstack-dashboard.conf.j2
+ dest: '{{ apache_config_dir }}/sites-available/openstack-dashboard.conf'
+ when: ansible_os_family == 'Debian'
+ notify:
+ - restart dashboard services
+
+- name: update apache2 configs redhat
+ template:
+ src: openstack-dashboard-redhat.conf.j2
+ dest: '{{ apache_config_dir }}/conf.d/openstack-dashboard.conf'
+ when: ansible_os_family == 'RedHat'
+ notify:
+ - restart dashboard services
+
+- name: enable dashboard
+ file:
+ src: "/etc/apache2/sites-available/openstack-dashboard.conf"
+ dest: "/etc/apache2/sites-enabled/openstack-dashboard.conf"
+ state: "link"
+ when: ansible_os_family == 'Debian'
+ notify:
+ - restart dashboard services
+
+- name: update ubuntu horizon settings
+ template:
+ src: local_settings.py.j2
+ dest: "/etc/openstack-dashboard/local_settings.py"
+ when: ansible_os_family == 'Debian'
+ notify:
+ - restart dashboard services
+
+- name: precompile horizon css
+ shell: /usr/bin/python /usr/share/openstack-dashboard/manage.py compress --force
+ ignore_errors: True
+ when: ansible_os_family == 'Debian'
+ notify:
+ - restart dashboard services
+
+- name: update redhat version horizon settings
+ lineinfile:
+ dest: /etc/openstack-dashboard/local_settings
+ regexp: '{{ item.regexp }}'
+ line: '{{ item.line }}'
+ with_items:
+ - regexp: '^WEBROOT[ \t]*=.*'
+ line: 'WEBROOT = "/horizon"'
+ - regexp: '^COMPRESS_OFFLINE[ \t]*=.*'
+ line: 'COMPRESS_OFFLINE=False'
+ - regexp: '^ALLOWED_HOSTS[ \t]*=.*'
+ line: 'ALLOWED_HOSTS = ["*"]'
+ - regexp: '^OPENSTACK_HOST[ \t]*=.*'
+ line: 'OPENSTACK_HOST = "{{ internal_ip }}"'
+ when: ansible_os_family == 'RedHat'
+ notify:
+ - restart dashboard services
+
+- meta: flush_handlers
diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/dashboard/templates/local_settings.py.j2 b/deploy/adapters/ansible/openstack_newton_xenial/roles/dashboard/templates/local_settings.py.j2
new file mode 100644
index 00000000..7278d5c2
--- /dev/null
+++ b/deploy/adapters/ansible/openstack_newton_xenial/roles/dashboard/templates/local_settings.py.j2
@@ -0,0 +1,326 @@
+# -*- coding: utf-8 -*-
+
+import os
+
+from django.utils.translation import ugettext_lazy as _
+
+from horizon.utils import secret_key
+
+from openstack_dashboard import exceptions
+from openstack_dashboard.settings import HORIZON_CONFIG
+
+DEBUG = False
+
+WEBROOT = '/'
+
+LOCAL_PATH = os.path.dirname(os.path.abspath(__file__))
+
+SECRET_KEY = secret_key.generate_or_read_from_file('/var/lib/openstack-dashboard/secret_key')
+
+SESSION_ENGINE = 'django.contrib.sessions.backends.cache'
+
+CACHES = {
+ 'default': {
+ 'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
+ 'LOCATION': '{{ internal_vip.ip }}:11211',
+ },
+}
+
+EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
+
+OPENSTACK_HOST = "{{ internal_ip }}"
+OPENSTACK_KEYSTONE_URL = "http://%s:5000/v3" % OPENSTACK_HOST
+OPENSTACK_KEYSTONE_DEFAULT_DOMAIN = "default"
+OPENSTACK_KEYSTONE_DEFAULT_ROLE = "_member_"
+OPENSTACK_KEYSTONE_MULTIDOMAIN_SUPPORT = True
+
+OPENSTACK_API_VERSIONS = {
+ "identity": 3,
+ "image": 2,
+ "volume": 2,
+}
+
+OPENSTACK_KEYSTONE_BACKEND = {
+ 'name': 'native',
+ 'can_edit_user': True,
+ 'can_edit_group': True,
+ 'can_edit_project': True,
+ 'can_edit_domain': True,
+ 'can_edit_role': True,
+}
+
+OPENSTACK_HYPERVISOR_FEATURES = {
+ 'can_set_mount_point': False,
+ 'can_set_password': False,
+ 'requires_keypair': False,
+ 'enable_quotas': True
+}
+
+OPENSTACK_CINDER_FEATURES = {
+ 'enable_backup': False,
+}
+
+OPENSTACK_NEUTRON_NETWORK = {
+ 'enable_router': True,
+ 'enable_quotas': True,
+ 'enable_ipv6': True,
+ 'enable_distributed_router': False,
+ 'enable_ha_router': False,
+ 'enable_lb': True,
+ 'enable_firewall': True,
+ 'enable_vpn': True,
+ 'enable_fip_topology_check': True,
+ 'profile_support': None,
+ 'supported_vnic_types': ['*'],
+}
+
+OPENSTACK_HEAT_STACK = {
+ 'enable_user_pass': True,
+}
+
+IMAGE_CUSTOM_PROPERTY_TITLES = {
+ "architecture": _("Architecture"),
+ "kernel_id": _("Kernel ID"),
+ "ramdisk_id": _("Ramdisk ID"),
+ "image_state": _("Euca2ools state"),
+ "project_id": _("Project ID"),
+ "image_type": _("Image Type"),
+}
+
+IMAGE_RESERVED_CUSTOM_PROPERTIES = []
+
+API_RESULT_LIMIT = 1000
+API_RESULT_PAGE_SIZE = 20
+SWIFT_FILE_TRANSFER_CHUNK_SIZE = 512 * 1024
+INSTANCE_LOG_LENGTH = 35
+DROPDOWN_MAX_ITEMS = 30
+
+TIME_ZONE = "UTC"
+
+LOGGING = {
+ 'version': 1,
+ 'disable_existing_loggers': False,
+ 'formatters': {
+ 'operation': {
+ 'format': '%(asctime)s %(message)s'
+ },
+ },
+ 'handlers': {
+ 'null': {
+ 'level': 'DEBUG',
+ 'class': 'logging.NullHandler',
+ },
+ 'console': {
+ 'level': 'INFO',
+ 'class': 'logging.StreamHandler',
+ },
+ 'operation': {
+ 'level': 'INFO',
+ 'class': 'logging.StreamHandler',
+ 'formatter': 'operation',
+ },
+ },
+ 'loggers': {
+ 'django.db.backends': {
+ 'handlers': ['null'],
+ 'propagate': False,
+ },
+ 'requests': {
+ 'handlers': ['null'],
+ 'propagate': False,
+ },
+ 'horizon': {
+ 'handlers': ['console'],
+ 'level': 'DEBUG',
+ 'propagate': False,
+ },
+ 'horizon.operation_log': {
+ 'handlers': ['operation'],
+ 'level': 'INFO',
+ 'propagate': False,
+ },
+ 'openstack_dashboard': {
+ 'handlers': ['console'],
+ 'level': 'DEBUG',
+ 'propagate': False,
+ },
+ 'novaclient': {
+ 'handlers': ['console'],
+ 'level': 'DEBUG',
+ 'propagate': False,
+ },
+ 'cinderclient': {
+ 'handlers': ['console'],
+ 'level': 'DEBUG',
+ 'propagate': False,
+ },
+ 'keystoneclient': {
+ 'handlers': ['console'],
+ 'level': 'DEBUG',
+ 'propagate': False,
+ },
+ 'glanceclient': {
+ 'handlers': ['console'],
+ 'level': 'DEBUG',
+ 'propagate': False,
+ },
+ 'neutronclient': {
+ 'handlers': ['console'],
+ 'level': 'DEBUG',
+ 'propagate': False,
+ },
+ 'heatclient': {
+ 'handlers': ['console'],
+ 'level': 'DEBUG',
+ 'propagate': False,
+ },
+ 'ceilometerclient': {
+ 'handlers': ['console'],
+ 'level': 'DEBUG',
+ 'propagate': False,
+ },
+ 'swiftclient': {
+ 'handlers': ['console'],
+ 'level': 'DEBUG',
+ 'propagate': False,
+ },
+ 'openstack_auth': {
+ 'handlers': ['console'],
+ 'level': 'DEBUG',
+ 'propagate': False,
+ },
+ 'nose.plugins.manager': {
+ 'handlers': ['console'],
+ 'level': 'DEBUG',
+ 'propagate': False,
+ },
+ 'django': {
+ 'handlers': ['console'],
+ 'level': 'DEBUG',
+ 'propagate': False,
+ },
+ 'iso8601': {
+ 'handlers': ['null'],
+ 'propagate': False,
+ },
+ 'scss': {
+ 'handlers': ['null'],
+ 'propagate': False,
+ },
+ },
+}
+
+SECURITY_GROUP_RULES = {
+ 'all_tcp': {
+ 'name': _('All TCP'),
+ 'ip_protocol': 'tcp',
+ 'from_port': '1',
+ 'to_port': '65535',
+ },
+ 'all_udp': {
+ 'name': _('All UDP'),
+ 'ip_protocol': 'udp',
+ 'from_port': '1',
+ 'to_port': '65535',
+ },
+ 'all_icmp': {
+ 'name': _('All ICMP'),
+ 'ip_protocol': 'icmp',
+ 'from_port': '-1',
+ 'to_port': '-1',
+ },
+ 'ssh': {
+ 'name': 'SSH',
+ 'ip_protocol': 'tcp',
+ 'from_port': '22',
+ 'to_port': '22',
+ },
+ 'smtp': {
+ 'name': 'SMTP',
+ 'ip_protocol': 'tcp',
+ 'from_port': '25',
+ 'to_port': '25',
+ },
+ 'dns': {
+ 'name': 'DNS',
+ 'ip_protocol': 'tcp',
+ 'from_port': '53',
+ 'to_port': '53',
+ },
+ 'http': {
+ 'name': 'HTTP',
+ 'ip_protocol': 'tcp',
+ 'from_port': '80',
+ 'to_port': '80',
+ },
+ 'pop3': {
+ 'name': 'POP3',
+ 'ip_protocol': 'tcp',
+ 'from_port': '110',
+ 'to_port': '110',
+ },
+ 'imap': {
+ 'name': 'IMAP',
+ 'ip_protocol': 'tcp',
+ 'from_port': '143',
+ 'to_port': '143',
+ },
+ 'ldap': {
+ 'name': 'LDAP',
+ 'ip_protocol': 'tcp',
+ 'from_port': '389',
+ 'to_port': '389',
+ },
+ 'https': {
+ 'name': 'HTTPS',
+ 'ip_protocol': 'tcp',
+ 'from_port': '443',
+ 'to_port': '443',
+ },
+ 'smtps': {
+ 'name': 'SMTPS',
+ 'ip_protocol': 'tcp',
+ 'from_port': '465',
+ 'to_port': '465',
+ },
+ 'imaps': {
+ 'name': 'IMAPS',
+ 'ip_protocol': 'tcp',
+ 'from_port': '993',
+ 'to_port': '993',
+ },
+ 'pop3s': {
+ 'name': 'POP3S',
+ 'ip_protocol': 'tcp',
+ 'from_port': '995',
+ 'to_port': '995',
+ },
+ 'ms_sql': {
+ 'name': 'MS SQL',
+ 'ip_protocol': 'tcp',
+ 'from_port': '1433',
+ 'to_port': '1433',
+ },
+ 'mysql': {
+ 'name': 'MYSQL',
+ 'ip_protocol': 'tcp',
+ 'from_port': '3306',
+ 'to_port': '3306',
+ },
+ 'rdp': {
+ 'name': 'RDP',
+ 'ip_protocol': 'tcp',
+ 'from_port': '3389',
+ 'to_port': '3389',
+ },
+}
+
+REST_API_REQUIRED_SETTINGS = ['OPENSTACK_HYPERVISOR_FEATURES',
+ 'LAUNCH_INSTANCE_DEFAULTS',
+ 'OPENSTACK_IMAGE_FORMATS']
+
+DEFAULT_THEME = 'ubuntu'
+WEBROOT='/horizon/'
+ALLOWED_HOSTS = ['*',]
+COMPRESS_OFFLINE = True
+ALLOWED_PRIVATE_SUBNET_CIDR = {'ipv4': [], 'ipv6': []}
diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/ext-network/tasks/main.yml b/deploy/adapters/ansible/openstack_newton_xenial/roles/ext-network/tasks/main.yml
index a8bce16e..2c61ff66 100644
--- a/deploy/adapters/ansible/openstack_newton_xenial/roles/ext-network/tasks/main.yml
+++ b/deploy/adapters/ansible/openstack_newton_xenial/roles/ext-network/tasks/main.yml
@@ -18,37 +18,27 @@
- name: restart neutron server
service: name=neutron-server state=restarted enabled=yes
+- name: wait for neutron ready
+ wait_for: port=9696 delay=10 timeout=30 host={{ internal_vip.ip }}
+
- name: create external net
- neutron_network:
- login_username: ADMIN
- login_password: "{{ ADMIN_PASS }}"
- login_tenant_name: admin
- auth_url: "http://{{ internal_vip.ip }}:35357/v2.0"
- name: "{{ public_net_info.network }}"
- provider_network_type: "{{ public_net_info.type }}"
- provider_physical_network: "{{ public_net_info.provider_network }}"
- provider_segmentation_id: "{{ public_net_info.segment_id}}"
- shared: false
- router_external: yes
- state: present
- run_once: true
- when: 'public_net_info.enable == True'
+ shell:
+ . /opt/admin-openrc.sh;
+ neutron net-create \
+ {{ public_net_info.network }} \
+ --provider:network_type {{ public_net_info.type }} \
+ --provider:physical_network {{ public_net_info.provider_network }} \
+ --router:external True
+ when: public_net_info.enable == True and inventory_hostname == groups['controller'][0]
- name: create external subnet
- neutron_subnet:
- login_username: ADMIN
- login_password: "{{ ADMIN_PASS }}"
- login_tenant_name: admin
- auth_url: "http://{{ internal_vip.ip }}:35357/v2.0"
- name: "{{ public_net_info.subnet }}"
- network_name: "{{ public_net_info.network }}"
- cidr: "{{ public_net_info.floating_ip_cidr }}"
- enable_dhcp: "{{ public_net_info.enable_dhcp }}"
- no_gateway: "{{ public_net_info.no_gateway }}"
- gateway_ip: "{{ public_net_info.external_gw }}"
- allocation_pool_start: "{{ public_net_info.floating_ip_start }}"
- allocation_pool_end: "{{ public_net_info.floating_ip_end }}"
- state: present
- run_once: true
- when: 'public_net_info.enable == True'
+ shell:
+ . /opt/admin-openrc.sh;
+ neutron subnet-create \
+ --name {{ public_net_info.subnet }} \
+ --gateway {{ public_net_info.external_gw }} \
+ --allocation-pool \
+ start={{ public_net_info.floating_ip_start }},end={{ public_net_info.floating_ip_end }} \
+ {{ public_net_info.network }} {{ public_net_info.floating_ip_cidr }}
+ when: public_net_info.enable == True and inventory_hostname == groups['controller'][0]
diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/glance/templates/glance-api.conf b/deploy/adapters/ansible/openstack_newton_xenial/roles/glance/templates/glance-api.conf
new file mode 100644
index 00000000..241f04ce
--- /dev/null
+++ b/deploy/adapters/ansible/openstack_newton_xenial/roles/glance/templates/glance-api.conf
@@ -0,0 +1,93 @@
+{% set workers = ansible_processor_vcpus // 2 %}
+{% set workers = workers if workers else 1 %}
+{% set memcached_servers = [] %}
+{% set rabbitmq_servers = [] %}
+{% for host in haproxy_hosts.values() %}
+{% set _ = memcached_servers.append('%s:11211'% host) %}
+{% set _ = rabbitmq_servers.append('%s:5672'% host) %}
+{% endfor %}
+{% set memcached_servers = memcached_servers|join(',') %}
+{% set rabbitmq_servers = rabbitmq_servers|join(',') %}
+
+[DEFAULT]
+verbose = {{ VERBOSE }}
+debug = {{ DEBUG }}
+log_file = /var/log/glance/api.log
+bind_host = {{ image_host }}
+bind_port = 9292
+backlog = 4096
+workers = {{ workers }}
+registry_host = {{ internal_ip }}
+registry_port = 9191
+registry_client_protocol = http
+cinder_catalog_info = volume:cinder:internalURL
+
+enable_v1_api = True
+enable_v1_registry = True
+enable_v2_api = True
+enable_v2_registry = True
+
+notification_driver = messagingv2
+rpc_backend = rabbit
+
+delayed_delete = False
+scrubber_datadir = /var/lib/glance/scrubber
+scrub_time = 43200
+image_cache_dir = /var/lib/glance/image-cache/
+show_image_direct_url = True
+
+[database]
+backend = sqlalchemy
+connection = mysql://glance:{{ GLANCE_DBPASS }}@{{ db_host }}/glance?charset=utf8
+idle_timeout = 30
+sqlite_db = /var/lib/glance/glance.sqlite
+
+[task]
+task_executor = taskflow
+
+[glance_store]
+default_store = file
+stores = file,http,cinder,rbd
+filesystem_store_datadir = /var/lib/glance/images/
+
+[image_format]
+disk_formats = ami,ari,aki,vhd,vhdx,vmdk,raw,qcow2,vdi,iso,root-tar
+
+[profiler]
+enabled = True
+
+[keystone_authtoken]
+auth_uri = http://{{ internal_vip.ip }}:5000
+auth_url = http://{{ internal_vip.ip }}:35357
+memcached_servers = {{ memcached_servers }}
+auth_type = password
+project_domain_name = default
+user_domain_name = default
+project_name = service
+username = glance
+password = {{ GLANCE_PASS }}
+token_cache_time = 300
+revocation_cache_time = 60
+
+identity_uri = http://{{ internal_vip.ip }}:35357
+admin_tenant_name = service
+admin_user = glance
+admin_password = {{ GLANCE_PASS }}
+
+[paste_deploy]
+flavor= keystone
+
+[oslo_messaging_amqp]
+idle_timeout = 7200
+
+[oslo_messaging_rabbit]
+rabbit_hosts = {{ rabbitmq_servers }}
+rabbit_use_ssl = false
+rabbit_userid = {{ RABBIT_USER }}
+rabbit_password = {{ RABBIT_PASS }}
+rabbit_virtual_host = /
+default_notification_exchange = glance
+
+rabbit_notification_exchange = glance
+rabbit_notification_topic = notifications
+rabbit_durable_queues = False
diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/glance/templates/glance-registry.conf b/deploy/adapters/ansible/openstack_newton_xenial/roles/glance/templates/glance-registry.conf
new file mode 100644
index 00000000..ccd8f1bb
--- /dev/null
+++ b/deploy/adapters/ansible/openstack_newton_xenial/roles/glance/templates/glance-registry.conf
@@ -0,0 +1,64 @@
+{% set workers = ansible_processor_vcpus // 2 %}
+{% set workers = workers if workers else 1 %}
+{% set memcached_servers = [] %}
+{% set rabbitmq_servers = [] %}
+{% for host in haproxy_hosts.values() %}
+{% set _ = memcached_servers.append('%s:11211'% host) %}
+{% set _ = rabbitmq_servers.append('%s:5672'% host) %}
+{% endfor %}
+{% set memcached_servers = memcached_servers|join(',') %}
+{% set rabbitmq_servers = rabbitmq_servers|join(',') %}
+
+[DEFAULT]
+verbose = {{ VERBOSE }}
+debug = {{ DEBUG }}
+log_file = /var/log/glance/api.log
+bind_host = {{ image_host }}
+bind_port = 9191
+backlog = 4096
+workers = {{ workers }}
+
+notification_driver = messagingv2
+rpc_backend = rabbit
+
+[database]
+backend = sqlalchemy
+connection = mysql://glance:{{ GLANCE_DBPASS }}@{{ db_host }}/glance?charset=utf8
+idle_timeout = 30
+
+[profiler]
+enabled = True
+
+[keystone_authtoken]
+auth_uri = http://{{ internal_vip.ip }}:5000
+auth_url = http://{{ internal_vip.ip }}:35357
+memcached_servers = {{ memcached_servers }}
+auth_type = password
+project_domain_name = default
+user_domain_name = default
+project_name = service
+username = glance
+password = {{ GLANCE_PASS }}
+
+identity_uri = http://{{ internal_vip.ip }}:35357
+admin_tenant_name = service
+admin_user = glance
+admin_password = {{ GLANCE_PASS }}
+token_cache_time = 300
+revocation_cache_time = 60
+
+[paste_deploy]
+flavor= keystone
+
+[oslo_messaging_amqp]
+idle_timeout = 7200
+
+[oslo_messaging_rabbit]
+rabbit_hosts = {{ rabbitmq_servers }}
+rabbit_use_ssl = false
+rabbit_userid = {{ RABBIT_USER }}
+rabbit_password = {{ RABBIT_PASS }}
+rabbit_virtual_host = /
+rabbit_notification_exchange = glance
+rabbit_notification_topic = notifications
+rabbit_durable_queues = False
diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/heat/tasks/heat_install.yml b/deploy/adapters/ansible/openstack_newton_xenial/roles/heat/tasks/heat_install.yml
index b90e6402..6a0f1c73 100644
--- a/deploy/adapters/ansible/openstack_newton_xenial/roles/heat/tasks/heat_install.yml
+++ b/deploy/adapters/ansible/openstack_newton_xenial/roles/heat/tasks/heat_install.yml
@@ -21,7 +21,7 @@
- name: create heat user domain
shell: >
- . /opt/admin-openrc-v3.sh;
+ . /opt/admin-openrc.sh;
openstack domain create --description "Stack projects and users" heat;
openstack user create --domain heat --password {{ HEAT_PASS }} heat_domain_admin;
openstack role add --domain heat --user-domain heat --user heat_domain_admin admin;
diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/heat/templates/heat.j2 b/deploy/adapters/ansible/openstack_newton_xenial/roles/heat/templates/heat.j2
index 62df9fd9..72d4b61e 100644
--- a/deploy/adapters/ansible/openstack_newton_xenial/roles/heat/templates/heat.j2
+++ b/deploy/adapters/ansible/openstack_newton_xenial/roles/heat/templates/heat.j2
@@ -1,10 +1,13 @@
+{% set memcached_servers = [] %}
+{% for host in haproxy_hosts.values() %}
+{% set _ = memcached_servers.append('%s:11211'% host) %}
+{% endfor %}
+{% set memcached_servers = memcached_servers|join(',') %}
+
[DEFAULT]
heat_metadata_server_url = http://{{ internal_vip.ip }}:8000
heat_waitcondition_server_url = http://{{ internal_vip.ip }}:8000/v1/waitcondition
rpc_backend = rabbit
-rabbit_host = {{ rabbit_host }}
-rabbit_userid = {{ RABBIT_USER }}
-rabbit_password = {{ RABBIT_PASS }}
log_dir = /var/log/heat
stack_domain_admin = heat_domain_admin
stack_domain_admin_password = {{ HEAT_PASS }}
@@ -17,12 +20,35 @@ use_db_reconnect = True
pool_timeout = 10
[ec2authtoken]
-auth_uri = http://{{ internal_vip.ip }}:5000/v2.0
+auth_uri = http://{{ internal_vip.ip }}:5000
+
+[clients_keystone]
+auth_uri = http://{{ internal_vip.ip }}:35357
[keystone_authtoken]
-auth_uri = http://{{ internal_vip.ip }}:5000/v2.0
+auth_uri = http://{{ internal_vip.ip }}:5000
+auth_url = http://{{ internal_vip.ip }}:35357
+memcached_servers = {{ memcached_servers }}
+auth_type = password
+project_domain_name = default
+user_domain_name = default
+project_name = service
+username = heat
+password = {{ HEAT_PASS }}
+
identity_uri = http://{{ internal_vip.ip }}:35357
admin_tenant_name = service
admin_user = heat
admin_password = {{ HEAT_PASS }}
+[oslo_messaging_rabbit]
+rabbit_host = {{ rabbit_host }}
+rabbit_userid = {{ RABBIT_USER }}
+rabbit_password = {{ RABBIT_PASS }}
+
+[trustee]
+auth_type = password
+auth_url = http://{{ internal_vip.ip }}:35357
+username = heat
+password = {{ HEAT_PASS }}
+user_domain_name = default
diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/keystone/tasks/keystone_config.yml b/deploy/adapters/ansible/openstack_newton_xenial/roles/keystone/tasks/keystone_config.yml
new file mode 100644
index 00000000..35c84ce8
--- /dev/null
+++ b/deploy/adapters/ansible/openstack_newton_xenial/roles/keystone/tasks/keystone_config.yml
@@ -0,0 +1,101 @@
+##############################################################################
+# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+- include_vars: "{{ ansible_os_family }}.yml"
+
+- name: keystone-manage db-sync
+ shell: su -s /bin/sh -c 'keystone-manage db_sync' keystone
+
+- name: Check if fernet keys already exist
+ stat:
+ path: "/etc/keystone/fernet-keys/0"
+ register: fernet_keys_0
+
+- name: Create fernet keys for Keystone
+ command:
+ keystone-manage fernet_setup
+ --keystone-user keystone
+ --keystone-group keystone
+ when: not fernet_keys_0.stat.exists
+ notify:
+ - restart keystone services
+
+- name: Rotate fernet keys for Keystone
+ command:
+ keystone-manage fernet_rotate
+ --keystone-user keystone
+ --keystone-group keystone
+ when: fernet_keys_0.stat.exists
+ notify:
+ - restart keystone services
+
+- name: Distribute the fernet key repository
+ shell: rsync -e 'ssh -o StrictHostKeyChecking=no' \
+ -avz \
+ --delete \
+ /etc/keystone/fernet-keys \
+ root@{{ hostvars[ item ].ansible_eth0.ipv4.address }}:/etc/keystone/
+ with_items: groups['controller'][1:]
+ notify:
+ - restart keystone services
+
+- name: Check if credential keys already exist
+ stat:
+ path: "/etc/keystone/credential-keys/0"
+ register: credential_keys_0
+
+- name: Create credential keys for Keystone
+ command:
+ keystone-manage credential_setup
+ --keystone-user keystone
+ --keystone-group keystone
+ when: not credential_keys_0.stat.exists
+ notify:
+ - restart keystone services
+
+- name: Rotate credential keys for Keystone
+ command:
+ keystone-manage credential_rotate
+ --keystone-user keystone
+ --keystone-group keystone
+ when: credential_keys_0.stat.exists
+ notify:
+ - restart keystone services
+
+- name: Distribute the credential key repository
+ shell: rsync -e 'ssh -o StrictHostKeyChecking=no' \
+ -avz \
+ --delete \
+ /etc/keystone/credential-keys \
+ root@{{ hostvars[ item ].ansible_eth0.ipv4.address }}:/etc/keystone/
+ with_items: groups['controller'][1:]
+ notify:
+ - restart keystone services
+
+- name: Bootstrap the Identity service
+ shell:
+ keystone-manage bootstrap \
+ --bootstrap-password {{ ADMIN_PASS }} \
+ --bootstrap-admin-url http://{{ internal_ip }}:35357/v3/ \
+ --bootstrap-internal-url http://{{ internal_ip }}:35357/v3/ \
+ --bootstrap-public-url http://{{ internal_ip }}:5000/v3/
+ --bootstrap-region-id RegionOne \
+ notify:
+ - restart keystone services
+
+- meta: flush_handlers
+
+- name: wait for keystone ready
+ wait_for: port=35357 delay=3 timeout=30 host={{ internal_vip.ip }}
+
+- name: cron job to purge expired tokens hourly
+ cron:
+ name: 'purge expired tokens'
+ special_time: hourly
+ job: '/usr/bin/keystone-manage token_flush > /var/log/keystone/keystone-tokenflush.log 2>&1'
diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/keystone/tasks/keystone_create.yml b/deploy/adapters/ansible/openstack_newton_xenial/roles/keystone/tasks/keystone_create.yml
new file mode 100644
index 00000000..53077776
--- /dev/null
+++ b/deploy/adapters/ansible/openstack_newton_xenial/roles/keystone/tasks/keystone_create.yml
@@ -0,0 +1,93 @@
+##############################################################################
+# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+- name: set keystone endpoint
+ shell:
+ . /opt/admin-openrc.sh;
+ openstack endpoint set \
+ --interface public \
+ --url {{ item.publicurl }} \
+ $(openstack endpoint list | grep keystone | grep public | awk '{print $2}');
+ openstack endpoint set \
+ --interface internal \
+ --url {{ item.internalurl }} \
+ $(openstack endpoint list | grep keystone | grep internal | awk '{print $2}');
+ openstack endpoint set \
+ --interface admin \
+ --url {{ item.adminurl }} \
+ $(openstack endpoint list | grep keystone | grep admin | awk '{print $2}');
+ with_items: "{{ os_services[0:1] }}"
+
+- name: add service
+ shell:
+ . /opt/admin-openrc.sh;
+ openstack service create \
+ --name "{{ item.name }}"
+ --description "{{ item.description }}" \
+ {{ item.type }}
+ with_items: "{{ os_services[1:] }}"
+
+- name: add project
+ shell:
+ . /opt/admin-openrc.sh;
+ openstack project create --description "Service Project" service;
+ openstack project create --domain default --description "Demo Project" demo;
+
+- name: set admin user
+ shell:
+ . /opt/admin-openrc.sh;
+ openstack user set \
+ --email "{{ item.email }}" \
+ --project "{{ item.tenant }}" \
+ --description "{{ item.tenant_description }}" \
+ --password "{{ item.password }}" \
+ {{ item.user }}
+ with_items: "{{ os_users }}"
+ when: item["user"] == "admin"
+
+- name: add user
+ shell:
+ . /opt/admin-openrc.sh;
+ openstack user create \
+ --email "{{ item.email }}" \
+ --project "{{ item.tenant }}" \
+ --description "{{ item.tenant_description }}" \
+ --password "{{ item.password }}" \
+ {{ item.user }}
+ with_items: "{{ os_users[1:] }}"
+
+- name: add roles
+ shell:
+ . /opt/admin-openrc.sh;
+ openstack role create {{ item.role }}
+ with_items: "{{ os_users }}"
+ when: item["user"] == "demo"
+
+- name: grant roles
+ shell:
+ . /opt/admin-openrc.sh;
+ openstack role add \
+ --project "{{ item.tenant }}" \
+ --user "{{ item.user }}" \
+ {{ item.role }}
+ with_items: "{{ os_users }}"
+
+- name: add endpoints
+ shell:
+ . /opt/admin-openrc.sh;
+ openstack endpoint create \
+ --region {{ item.region }} \
+ {{ item.name }} public {{ item.publicurl }};
+ openstack endpoint create \
+ --region {{ item.region }} \
+ {{ item.name }} internal {{ item.internalurl }};
+ openstack endpoint create \
+ --region {{ item.region }} \
+ {{ item.name }} admin {{ item.adminurl }};
+ with_items: "{{ os_services[1:] }}"
diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/keystone/tasks/keystone_install.yml b/deploy/adapters/ansible/openstack_newton_xenial/roles/keystone/tasks/keystone_install.yml
index 8ff087ce..e9a36d42 100644
--- a/deploy/adapters/ansible/openstack_newton_xenial/roles/keystone/tasks/keystone_install.yml
+++ b/deploy/adapters/ansible/openstack_newton_xenial/roles/keystone/tasks/keystone_install.yml
@@ -93,6 +93,5 @@
with_items:
- admin-openrc.sh
- demo-openrc.sh
- - admin-openrc-v3.sh
- meta: flush_handlers
diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/keystone/tasks/main.yml b/deploy/adapters/ansible/openstack_newton_xenial/roles/keystone/tasks/main.yml
new file mode 100644
index 00000000..ad619d40
--- /dev/null
+++ b/deploy/adapters/ansible/openstack_newton_xenial/roles/keystone/tasks/main.yml
@@ -0,0 +1,30 @@
+##############################################################################
+# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+- include: keystone_install.yml
+ tags:
+ - install
+ - keystone_install
+ - keystone
+
+- include: keystone_config.yml
+ when: inventory_hostname == groups['controller'][0]
+ tags:
+ - config
+ - keystone_config
+ - keystone
+
+- include: keystone_create.yml
+ when: inventory_hostname == groups['controller'][0]
+ tags:
+ - config
+ - keystone_create
+ - keystone
+
+- meta: flush_handlers
diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/keystone/templates/admin-openrc.sh b/deploy/adapters/ansible/openstack_newton_xenial/roles/keystone/templates/admin-openrc.sh
new file mode 100644
index 00000000..94d5850f
--- /dev/null
+++ b/deploy/adapters/ansible/openstack_newton_xenial/roles/keystone/templates/admin-openrc.sh
@@ -0,0 +1,18 @@
+##############################################################################
+# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+# Verify the Identity Service installation
+export OS_PROJECT_DOMAIN_NAME=default
+export OS_USER_DOMAIN_NAME=default
+export OS_TENANT_NAME=admin
+export OS_PROJECT_NAME=admin
+export OS_USERNAME=admin
+export OS_PASSWORD={{ ADMIN_PASS }}
+export OS_AUTH_URL=http://{{ internal_vip.ip }}:35357/v3
+export OS_IDENTITY_API_VERSION=3
+export OS_IMAGE_API_VERSION=2
diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/keystone/templates/demo-openrc.sh b/deploy/adapters/ansible/openstack_newton_xenial/roles/keystone/templates/demo-openrc.sh
new file mode 100644
index 00000000..920f42ed
--- /dev/null
+++ b/deploy/adapters/ansible/openstack_newton_xenial/roles/keystone/templates/demo-openrc.sh
@@ -0,0 +1,17 @@
+##############################################################################
+# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+export OS_PROJECT_DOMAIN_NAME=default
+export OS_USER_DOMAIN_NAME=default
+export OS_TENANT_NAME=demo
+export OS_PROJECT_NAME=demo
+export OS_USERNAME=demo
+export OS_PASSWORD={{ DEMO_PASS }}
+export OS_AUTH_URL=http://{{ internal_vip.ip }}:5000/v3
+export OS_IDENTITY_API_VERSION=3
+export OS_IMAGE_API_VERSION=2
diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/keystone/templates/keystone.conf b/deploy/adapters/ansible/openstack_newton_xenial/roles/keystone/templates/keystone.conf
new file mode 100644
index 00000000..919be344
--- /dev/null
+++ b/deploy/adapters/ansible/openstack_newton_xenial/roles/keystone/templates/keystone.conf
@@ -0,0 +1,60 @@
+{% set memcached_servers = [] %}
+{% set rabbitmq_servers = [] %}
+{% for host in haproxy_hosts.values() %}
+{% set _ = memcached_servers.append('%s:11211'% host) %}
+{% set _ = rabbitmq_servers.append('%s:5672'% host) %}
+{% endfor %}
+{% set memcached_servers = memcached_servers|join(',') %}
+{% set rabbitmq_servers = rabbitmq_servers|join(',') %}
+[DEFAULT]
+debug={{ DEBUG }}
+log_dir = /var/log/keystone
+
+[cache]
+backend = keystone.cache.memcache_pool
+memcache_servers = {{ memcached_servers}}
+enabled=true
+
+[revoke]
+driver = sql
+expiration_buffer = 3600
+caching = true
+
+[database]
+connection = mysql://keystone:{{ KEYSTONE_DBPASS }}@{{ db_host }}/keystone?charset=utf8
+idle_timeout = 30
+min_pool_size = 5
+max_pool_size = 120
+pool_timeout = 30
+
+[fernet_tokens]
+key_repository = /etc/keystone/fernet-keys/
+
+[identity]
+default_domain_id = default
+driver = sql
+
+[assignment]
+driver = sql
+
+[resource]
+driver = sql
+caching = true
+cache_time = 3600
+
+[token]
+enforce_token_bind = permissive
+expiration = 43200
+provider = fernet
+driver = sql
+caching = true
+cache_time = 3600
+
+[eventlet_server]
+public_bind_host = {{ identity_host }}
+admin_bind_host = {{ identity_host }}
+
+[oslo_messaging_rabbit]
+rabbit_userid = {{ RABBIT_USER }}
+rabbit_password = {{ RABBIT_PASS }}
+rabbit_hosts = {{ rabbitmq_servers }}
diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/keystone/vars/main.yml b/deploy/adapters/ansible/openstack_newton_xenial/roles/keystone/vars/main.yml
index 79ed06fe..90977372 100644
--- a/deploy/adapters/ansible/openstack_newton_xenial/roles/keystone/vars/main.yml
+++ b/deploy/adapters/ansible/openstack_newton_xenial/roles/keystone/vars/main.yml
@@ -17,9 +17,9 @@ os_services:
type: identity
region: RegionOne
description: "OpenStack Identity"
- publicurl: "http://{{ public_vip.ip }}:5000/v2.0"
- internalurl: "http://{{ internal_vip.ip }}:5000/v2.0"
- adminurl: "http://{{ internal_vip.ip }}:35357/v2.0"
+ publicurl: "http://{{ public_vip.ip }}:5000/v3"
+ internalurl: "http://{{ internal_vip.ip }}:5000/v3"
+ adminurl: "http://{{ internal_vip.ip }}:35357/v3"
- name: glance
type: image
@@ -33,9 +33,9 @@ os_services:
type: compute
region: RegionOne
description: "OpenStack Compute"
- publicurl: "http://{{ public_vip.ip }}:8774/v2/%(tenant_id)s"
- internalurl: "http://{{ internal_vip.ip }}:8774/v2/%(tenant_id)s"
- adminurl: "http://{{ internal_vip.ip }}:8774/v2/%(tenant_id)s"
+ publicurl: "http://{{ public_vip.ip }}:8774/v2.1/%\\(tenant_id\\)s"
+ internalurl: "http://{{ internal_vip.ip }}:8774/v2.1/%\\(tenant_id\\)s"
+ adminurl: "http://{{ internal_vip.ip }}:8774/v2.1/%\\(tenant_id\\)s"
- name: neutron
type: network
@@ -65,25 +65,25 @@ os_services:
type: volume
region: RegionOne
description: "OpenStack Block Storage"
- publicurl: "http://{{ public_vip.ip }}:8776/v1/%(tenant_id)s"
- internalurl: "http://{{ internal_vip.ip }}:8776/v1/%(tenant_id)s"
- adminurl: "http://{{ internal_vip.ip }}:8776/v1/%(tenant_id)s"
+ publicurl: "http://{{ public_vip.ip }}:8776/v1/%\\(tenant_id\\)s"
+ internalurl: "http://{{ internal_vip.ip }}:8776/v1/%\\(tenant_id\\)s"
+ adminurl: "http://{{ internal_vip.ip }}:8776/v1/%\\(tenant_id\\)s"
- name: cinderv2
type: volumev2
region: RegionOne
description: "OpenStack Block Storage v2"
- publicurl: "http://{{ public_vip.ip }}:8776/v2/%(tenant_id)s"
- internalurl: "http://{{ internal_vip.ip }}:8776/v2/%(tenant_id)s"
- adminurl: "http://{{ internal_vip.ip }}:8776/v2/%(tenant_id)s"
+ publicurl: "http://{{ public_vip.ip }}:8776/v2/%\\(tenant_id\\)s"
+ internalurl: "http://{{ internal_vip.ip }}:8776/v2/%\\(tenant_id\\)s"
+ adminurl: "http://{{ internal_vip.ip }}:8776/v2/%\\(tenant_id\\)s"
- name: heat
type: orchestration
region: RegionOne
description: "OpenStack Orchestration"
- publicurl: "http://{{ public_vip.ip }}:8004/v1/%(tenant_id)s"
- internalurl: "http://{{ internal_vip.ip }}:8004/v1/%(tenant_id)s"
- adminurl: "http://{{ internal_vip.ip }}:8004/v1/%(tenant_id)s"
+ publicurl: "http://{{ public_vip.ip }}:8004/v1/%\\(tenant_id\\)s"
+ internalurl: "http://{{ internal_vip.ip }}:8004/v1/%\\(tenant_id\\)s"
+ adminurl: "http://{{ internal_vip.ip }}:8004/v1/%\\(tenant_id\\)s"
- name: heat-cfn
type: cloudformation
@@ -97,9 +97,9 @@ os_services:
# type: object-store
# region: RegionOne
# description: "OpenStack Object Storage"
-# publicurl: "http://{{ public_vip.ip }}:8080/v1/AUTH_%(tenant_id)s"
-# internalurl: "http://{{ internal_vip.ip }}:8080/v1/AUTH_%(tenant_id)s"
-# adminurl: "http://{{ internal_vip.ip }}:8080/v1/AUTH_%(tenant_id)s"
+# publicurl: "http://{{ public_vip.ip }}:8080/v1/AUTH_%\\(tenant_id\\)s"
+# internalurl: "http://{{ internal_vip.ip }}:8080/v1/AUTH_%\\(tenant_id\\)s"
+# adminurl: "http://{{ internal_vip.ip }}:8080/v1/AUTH_%\\(tenant_id\\)s"
os_users:
- user: admin
@@ -178,3 +178,4 @@ os_users:
# role: admin
# tenant: service
# tenant_description: "Service Tenant"
+
diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/neutron-controller/tasks/neutron_install.yml b/deploy/adapters/ansible/openstack_newton_xenial/roles/neutron-controller/tasks/neutron_install.yml
index 0a30af7a..917a8356 100644
--- a/deploy/adapters/ansible/openstack_newton_xenial/roles/neutron-controller/tasks/neutron_install.yml
+++ b/deploy/adapters/ansible/openstack_newton_xenial/roles/neutron-controller/tasks/neutron_install.yml
@@ -31,12 +31,9 @@
with_items: services | union(services_noarch)
- name: get tenant id to fill neutron.conf
- shell: openstack project show \
- --os-username=admin \
- --os-password=console \
- --os-auth-url=http://{{ internal_vip.ip }}:35357/v2.0 \
- --os-tenant-name=admin \
- service | grep id | awk '{print $4}'
+ shell:
+ . /opt/admin-openrc.sh;
+ openstack project show service | grep id | sed -n "2,1p" | awk '{print $4}'
register: NOVA_ADMIN_TENANT_ID
- name: update neutron conf
diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/nova-compute/templates/nova.conf b/deploy/adapters/ansible/openstack_newton_xenial/roles/nova-compute/templates/nova.conf
new file mode 100644
index 00000000..5f8fb887
--- /dev/null
+++ b/deploy/adapters/ansible/openstack_newton_xenial/roles/nova-compute/templates/nova.conf
@@ -0,0 +1,113 @@
+{% set memcached_servers = [] %}
+{% for host in haproxy_hosts.values() %}
+{% set _ = memcached_servers.append('%s:11211'% host) %}
+{% endfor %}
+{% set memcached_servers = memcached_servers|join(',') %}
+
+[DEFAULT]
+dhcpbridge_flagfile=/etc/nova/nova.conf
+dhcpbridge=/usr/bin/nova-dhcpbridge
+log-dir=/var/log/nova
+state_path=/var/lib/nova
+force_dhcp_release=True
+verbose={{ VERBOSE }}
+ec2_private_dns_show_ip=True
+enabled_apis=osapi_compute,metadata
+
+auth_strategy = keystone
+my_ip = {{ internal_ip }}
+use_neutron = True
+firewall_driver = nova.virt.firewall.NoopFirewallDriver
+transport_url = rabbit://openstack:{{ RABBIT_PASS }}@{{ rabbit_host }}
+default_floating_pool={{ public_net_info.network }}
+metadata_listen={{ internal_ip }}
+linuxnet_interface_driver = nova.network.linux_net.LinuxOVSInterfaceDriver
+
+iscsi_helper=tgtadm
+connection_type=libvirt
+root_helper=sudo nova-rootwrap /etc/nova/rootwrap.conf
+debug={{ DEBUG }}
+volumes_path=/var/lib/nova/volumes
+rpc_backend = rabbit
+rabbit_host = {{ rabbit_host }}
+rabbit_userid = {{ RABBIT_USER }}
+rabbit_password = {{ RABBIT_PASS }}
+osapi_compute_listen={{ internal_ip }}
+network_api_class = nova.network.neutronv2.api.API
+security_group_api = neutron
+instance_usage_audit = True
+instance_usage_audit_period = hour
+notify_on_state_change = vm_and_task_state
+notification_driver = nova.openstack.common.notifier.rpc_notifier
+notification_driver = ceilometer.compute.nova_notifier
+memcached_servers = {{ memcached_servers }}
+
+[database]
+# The SQLAlchemy connection string used to connect to the database
+connection = mysql://nova:{{ NOVA_DBPASS }}@{{ db_host }}/nova
+idle_timeout = 30
+pool_timeout = 10
+use_db_reconnect = True
+
+[api_database]
+connection = mysql://nova:{{ NOVA_DBPASS }}@{{ db_host }}/nova_api
+idle_timeout = 30
+pool_timeout = 10
+use_db_reconnect = True
+
+[oslo_concurrency]
+lock_path=/var/lib/nova/tmp
+
+[libvirt]
+use_virtio_for_bridges=True
+
+[wsgi]
+api_paste_config=/etc/nova/api-paste.ini
+
+[keystone_authtoken]
+auth_uri = http://{{ internal_vip.ip }}:5000
+auth_url = http://{{ internal_vip.ip }}:35357
+memcached_servers = {{ memcached_servers }}
+auth_type = password
+project_domain_name = default
+user_domain_name = default
+project_name = service
+username = nova
+password = {{ NOVA_PASS }}
+
+identity_uri = http://{{ internal_vip.ip }}:35357
+admin_tenant_name = service
+admin_user = nova
+admin_password = {{ NOVA_PASS }}
+
+[vnc]
+enabled = True
+vncserver_listen = {{ internal_ip }}
+vncserver_proxyclient_address = {{ internal_ip }}
+novncproxy_base_url = http://{{ public_vip.ip }}:6080/vnc_auto.html
+novncproxy_host = {{ internal_ip }}
+novncproxy_port = 6080
+
+[glance]
+api_servers = http://{{ internal_vip.ip }}:9292
+host = {{ internal_vip.ip }}
+
+[neutron]
+url = http://{{ internal_vip.ip }}:9696
+auth_url = http://{{ internal_vip.ip }}:35357
+auth_type = password
+project_domain_name = default
+user_domain_name = default
+region_name = RegionOne
+project_name = service
+username = neutron
+password = {{ NEUTRON_PASS }}
+service_metadata_proxy = True
+metadata_proxy_shared_secret = {{ METADATA_SECRET }}
+
+auth_strategy = keystone
+admin_tenant_name = service
+admin_username = neutron
+admin_password = {{ NEUTRON_PASS }}
+admin_auth_url = http://{{ internal_vip.ip }}:35357/v3
+
diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/tacker/templates/tacker.j2 b/deploy/adapters/ansible/openstack_newton_xenial/roles/tacker/templates/tacker.j2
index f1d9125b..ae0f644a 100644
--- a/deploy/adapters/ansible/openstack_newton_xenial/roles/tacker/templates/tacker.j2
+++ b/deploy/adapters/ansible/openstack_newton_xenial/roles/tacker/templates/tacker.j2
@@ -286,7 +286,7 @@ notification_driver = tacker.openstack.common.notifier.rpc_notifier
# notify_nova_on_port_data_changes = True
# URL for connection to nova (Only supports one nova region currently).
-# nova_url = http://127.0.0.1:8774/v2
+# nova_url = http://127.0.0.1:8774/v3
# Name of nova region to use. Useful if keystone manages more than one region
# nova_region_name =
diff --git a/deploy/adapters/ansible/openstack_newton_xenial/templates/neutron.conf b/deploy/adapters/ansible/openstack_newton_xenial/templates/neutron.conf
index 33231ed5..49caa879 100644
--- a/deploy/adapters/ansible/openstack_newton_xenial/templates/neutron.conf
+++ b/deploy/adapters/ansible/openstack_newton_xenial/templates/neutron.conf
@@ -1,403 +1,72 @@
+{% set memcached_servers = [] %}
+{% for host in haproxy_hosts.values() %}
+{% set _ = memcached_servers.append('%s:11211'% host) %}
+{% endfor %}
+{% set memcached_servers = memcached_servers|join(',') %}
+
[DEFAULT]
-# Print more verbose output (set logging level to INFO instead of default WARNING level).
verbose = {{ VERBOSE }}
-
-# Print debugging output (set logging level to DEBUG instead of default WARNING level).
debug = {{ VERBOSE }}
-
-# Where to store Neutron state files. This directory must be writable by the
-# user executing the agent.
state_path = /var/lib/neutron
-
-# Where to store lock files
lock_path = $state_path/lock
-
notify_nova_on_port_status_changes = True
notify_nova_on_port_data_changes = True
-
-# log_format = %(asctime)s %(levelname)8s [%(name)s] %(message)s
-# log_date_format = %Y-%m-%d %H:%M:%S
-
-# use_syslog -> syslog
-# log_file and log_dir -> log_dir/log_file
-# (not log_file) and log_dir -> log_dir/{binary_name}.log
-# use_stderr -> stderr
-# (not user_stderr) and (not log_file) -> stdout
-# publish_errors -> notification system
-
-# use_syslog = False
-# syslog_log_facility = LOG_USER
-
-# use_stderr = True
-# log_file =
log_dir = /var/log/neutron
-
-# publish_errors = False
-
-# Address to bind the API server to
bind_host = {{ network_server_host }}
-
-# Port the bind the API server to
bind_port = 9696
-
-# Path to the extensions. Note that this can be a colon-separated list of
-# paths. For example:
-# api_extensions_path = extensions:/path/to/more/extensions:/even/more/extensions
-# The __path__ of neutron.extensions is appended to this, so if your
-# extensions are in there you don't need to specify them here
-# api_extensions_path =
-
-# (StrOpt) Neutron core plugin entrypoint to be loaded from the
-# neutron.core_plugins namespace. See setup.cfg for the entrypoint names of the
-# plugins included in the neutron source distribution. For compatibility with
-# previous versions, the class name of a plugin can be specified instead of its
-# entrypoint name.
-#
-#core_plugin = neutron.plugins.ml2.plugin.Ml2Plugin
core_plugin = ml2
-# Example: core_plugin = ml2
-
-# (ListOpt) List of service plugin entrypoints to be loaded from the
-# neutron.service_plugins namespace. See setup.cfg for the entrypoint names of
-# the plugins included in the neutron source distribution. For compatibility
-# with previous versions, the class name of a plugin can be specified instead
-# of its entrypoint name.
-#
-# service_plugins =
-# Example: service_plugins = router,firewall,lbaas,vpnaas,metering
service_plugins = router
-
-# Paste configuration file
api_paste_config = api-paste.ini
-
-# The strategy to be used for auth.
-# Supported values are 'keystone'(default), 'noauth'.
auth_strategy = keystone
-
-# Base MAC address. The first 3 octets will remain unchanged. If the
-# 4h octet is not 00, it will also be used. The others will be
-# randomly generated.
-# 3 octet
-# base_mac = fa:16:3e:00:00:00
-# 4 octet
-# base_mac = fa:16:3e:4f:00:00
-
-# Maximum amount of retries to generate a unique MAC address
-# mac_generation_retries = 16
-
-# DHCP Lease duration (in seconds)
dhcp_lease_duration = 86400
-
-# Allow sending resource operation notification to DHCP agent
-# dhcp_agent_notification = True
-
-# Enable or disable bulk create/update/delete operations
-# allow_bulk = True
-# Enable or disable pagination
-# allow_pagination = False
-# Enable or disable sorting
-# allow_sorting = False
-# Enable or disable overlapping IPs for subnets
-# Attention: the following parameter MUST be set to False if Neutron is
-# being used in conjunction with nova security groups
allow_overlapping_ips = True
-# Ensure that configured gateway is on subnet
-# force_gateway_on_subnet = False
-
-
-# RPC configuration options. Defined in rpc __init__
-# The messaging module to use, defaults to kombu.
-# rpc_backend = neutron.openstack.common.rpc.impl_kombu
rpc_backend = rabbit
-rabbit_host = {{ rabbit_host }}
-rabbit_password = {{ RABBIT_PASS }}
-
-# Size of RPC thread pool
rpc_thread_pool_size = 240
-# Size of RPC connection pool
rpc_conn_pool_size = 100
-# Seconds to wait for a response from call or multicall
rpc_response_timeout = 300
-# Seconds to wait before a cast expires (TTL). Only supported by impl_zmq.
rpc_cast_timeout = 300
-# Modules of exceptions that are permitted to be recreated
-# upon receiving exception data from an rpc call.
-# allowed_rpc_exception_modules = neutron.openstack.common.exception, nova.exception
-# AMQP exchange to connect to if using RabbitMQ or QPID
-# control_exchange = neutron
-
-# If passed, use a fake RabbitMQ provider
-# fake_rabbit = False
-
-# Configuration options if sending notifications via kombu rpc (these are
-# the defaults)
-# SSL version to use (valid only if SSL enabled)
-# kombu_ssl_version =
-# SSL key file (valid only if SSL enabled)
-# kombu_ssl_keyfile =
-# SSL cert file (valid only if SSL enabled)
-# kombu_ssl_certfile =
-# SSL certification authority file (valid only if SSL enabled)
-# kombu_ssl_ca_certs =
-# Port where RabbitMQ server is running/listening
-rabbit_port = 5672
-# RabbitMQ single or HA cluster (host:port pairs i.e: host1:5672, host2:5672)
-# rabbit_hosts is defaulted to '$rabbit_host:$rabbit_port'
-# rabbit_hosts = localhost:5672
-# User ID used for RabbitMQ connections
-rabbit_userid = {{ RABBIT_USER }}
-# Location of a virtual RabbitMQ installation.
-# rabbit_virtual_host = /
-# Maximum retries with trying to connect to RabbitMQ
-# (the default of 0 implies an infinite retry count)
-# rabbit_max_retries = 0
-# RabbitMQ connection retry interval
-# rabbit_retry_interval = 1
-# Use HA queues in RabbitMQ (x-ha-policy: all). You need to
-# wipe RabbitMQ database when changing this option. (boolean value)
-# rabbit_ha_queues = false
-# QPID
-# rpc_backend=neutron.openstack.common.rpc.impl_qpid
-# Qpid broker hostname
-# qpid_hostname = localhost
-# Qpid broker port
-# qpid_port = 5672
-# Qpid single or HA cluster (host:port pairs i.e: host1:5672, host2:5672)
-# qpid_hosts is defaulted to '$qpid_hostname:$qpid_port'
-# qpid_hosts = localhost:5672
-# Username for qpid connection
-# qpid_username = ''
-# Password for qpid connection
-# qpid_password = ''
-# Space separated list of SASL mechanisms to use for auth
-# qpid_sasl_mechanisms = ''
-# Seconds between connection keepalive heartbeats
-# qpid_heartbeat = 60
-# Transport to use, either 'tcp' or 'ssl'
-# qpid_protocol = tcp
-# Disable Nagle algorithm
-# qpid_tcp_nodelay = True
-
-# ZMQ
-# rpc_backend=neutron.openstack.common.rpc.impl_zmq
-# ZeroMQ bind address. Should be a wildcard (*), an ethernet interface, or IP.
-# The "host" option should point or resolve to this address.
-# rpc_zmq_bind_address = *
-
-# ============ Notification System Options =====================
-
-# Notifications can be sent when network/subnet/port are created, updated or deleted.
-# There are three methods of sending notifications: logging (via the
-# log_file directive), rpc (via a message queue) and
-# noop (no notifications sent, the default)
-
-# Notification_driver can be defined multiple times
-# Do nothing driver
-# notification_driver = neutron.openstack.common.notifier.no_op_notifier
-# Logging driver
-# notification_driver = neutron.openstack.common.notifier.log_notifier
-# RPC driver.
notification_driver = neutron.openstack.common.notifier.rpc_notifier
-
-# default_notification_level is used to form actual topic name(s) or to set logging level
default_notification_level = INFO
-
-# default_publisher_id is a part of the notification payload
-# host = myhost.com
-# default_publisher_id = $host
-
-# Defined in rpc_notifier, can be comma separated values.
-# The actual topic names will be %s.%(default_notification_level)s
notification_topics = notifications
-
-# Default maximum number of items returned in a single response,
-# value == infinite and value < 0 means no max limit, and value must
-# be greater than 0. If the number of items requested is greater than
-# pagination_max_limit, server will just return pagination_max_limit
-# of number of items.
-# pagination_max_limit = -1
-
-# Maximum number of DNS nameservers per subnet
-# max_dns_nameservers = 5
-
-# Maximum number of host routes per subnet
-# max_subnet_host_routes = 20
-
-# Maximum number of fixed ips per port
-# max_fixed_ips_per_port = 5
-
-# =========== items for agent management extension =============
-# Seconds to regard the agent as down; should be at least twice
-# report_interval, to be sure the agent is down for good
agent_down_time = 75
-# =========== end of items for agent management extension =====
-
-# =========== items for agent scheduler extension =============
-# Driver to use for scheduling network to DHCP agent
network_scheduler_driver = neutron.scheduler.dhcp_agent_scheduler.ChanceScheduler
-# Driver to use for scheduling router to a default L3 agent
router_scheduler_driver = neutron.scheduler.l3_agent_scheduler.ChanceScheduler
-# Driver to use for scheduling a loadbalancer pool to an lbaas agent
-# loadbalancer_pool_scheduler_driver = neutron.services.loadbalancer.agent_scheduler.ChanceScheduler
-
-# Allow auto scheduling networks to DHCP agent. It will schedule non-hosted
-# networks to first DHCP agent which sends get_active_networks message to
-# neutron server
-# network_auto_schedule = True
-
-# Allow auto scheduling routers to L3 agent. It will schedule non-hosted
-# routers to first L3 agent which sends sync_routers message to neutron server
-# router_auto_schedule = True
-
-# Number of DHCP agents scheduled to host a network. This enables redundant
-# DHCP agents for configured networks.
-# dhcp_agents_per_network = 1
-
-# =========== end of items for agent scheduler extension =====
-
-# =========== WSGI parameters related to the API server ==============
-# Number of separate worker processes to spawn. The default, 0, runs the
-# worker thread in the current process. Greater than 0 launches that number of
-# child processes as workers. The parent process manages them.
api_workers = 8
-
-# Number of separate RPC worker processes to spawn. The default, 0, runs the
-# worker thread in the current process. Greater than 0 launches that number of
-# child processes as RPC workers. The parent process manages them.
-# This feature is experimental until issues are addressed and testing has been
-# enabled for various plugins for compatibility.
rpc_workers = 8
-
-# Sets the value of TCP_KEEPIDLE in seconds to use for each server socket when
-# starting API server. Not supported on OS X.
-# tcp_keepidle = 600
-
-# Number of seconds to keep retrying to listen
-# retry_until_window = 30
-
-# Number of backlog requests to configure the socket with.
-# backlog = 4096
-
-# Max header line to accommodate large tokens
-# max_header_line = 16384
-
-# Enable SSL on the API server
-# use_ssl = False
-
-# Certificate file to use when starting API server securely
-# ssl_cert_file = /path/to/certfile
-
-# Private key file to use when starting API server securely
-# ssl_key_file = /path/to/keyfile
-
-# CA certificate file to use when starting API server securely to
-# verify connecting clients. This is an optional parameter only required if
-# API clients need to authenticate to the API server using SSL certificates
-# signed by a trusted CA
-# ssl_ca_file = /path/to/cafile
-# ======== end of WSGI parameters related to the API server ==========
-
-
-# ======== neutron nova interactions ==========
-# Send notification to nova when port status is active.
notify_nova_on_port_status_changes = True
-
-# Send notifications to nova when port data (fixed_ips/floatingips) change
-# so nova can update it's cache.
notify_nova_on_port_data_changes = True
-
-# URL for connection to nova (Only supports one nova region currently).
-nova_url = http://{{ internal_vip.ip }}:8774/v2
-
-# Name of nova region to use. Useful if keystone manages more than one region
+nova_url = http://{{ internal_vip.ip }}:8774/v3
nova_region_name = RegionOne
-
-# Username for connection to nova in admin context
nova_admin_username = nova
-
-# The uuid of the admin nova tenant
-{% if NOVA_ADMIN_TENANT_ID|default('') %}
-nova_admin_tenant_id = {{ NOVA_ADMIN_TENANT_ID.stdout_lines[0] }}
-{% endif %}
-# Password for connection to nova in admin context.
nova_admin_password = {{ NOVA_PASS }}
-
-# Authorization URL for connection to nova in admin context.
-nova_admin_auth_url = http://{{ internal_vip.ip }}:35357/v2.0
-
-# Number of seconds between sending events to nova if there are any events to send
+nova_admin_auth_url = http://{{ internal_vip.ip }}:35357/v3
send_events_interval = 2
-# ======== end of neutron nova interactions ==========
-
[quotas]
-# Default driver to use for quota checks
quota_driver = neutron.db.quota_db.DbQuotaDriver
-
-# Resource name(s) that are supported in quota features
quota_items = network,subnet,port
-
-# Default number of resource allowed per tenant. A negative value means
-# unlimited.
default_quota = -1
-
-# Number of networks allowed per tenant. A negative value means unlimited.
quota_network = 100
-
-# Number of subnets allowed per tenant. A negative value means unlimited.
quota_subnet = 100
-
-# Number of ports allowed per tenant. A negative value means unlimited.
quota_port = 8000
-
-# Number of security groups allowed per tenant. A negative value means
-# unlimited.
quota_security_group = 1000
-
-# Number of security group rules allowed per tenant. A negative value means
-# unlimited.
quota_security_group_rule = 1000
-# Number of vips allowed per tenant. A negative value means unlimited.
-# quota_vip = 10
-
-# Number of pools allowed per tenant. A negative value means unlimited.
-# quota_pool = 10
-
-# Number of pool members allowed per tenant. A negative value means unlimited.
-# The default is unlimited because a member is not a real resource consumer
-# on Openstack. However, on back-end, a member is a resource consumer
-# and that is the reason why quota is possible.
-# quota_member = -1
-
-# Number of health monitors allowed per tenant. A negative value means
-# unlimited.
-# The default is unlimited because a health monitor is not a real resource
-# consumer on Openstack. However, on back-end, a member is a resource consumer
-# and that is the reason why quota is possible.
-# quota_health_monitors = -1
-
-# Number of routers allowed per tenant. A negative value means unlimited.
-# quota_router = 10
-
-# Number of floating IPs allowed per tenant. A negative value means unlimited.
-# quota_floatingip = 50
-
[agent]
-# Use "sudo neutron-rootwrap /etc/neutron/rootwrap.conf" to use the real
-# root filter facility.
-# Change to "sudo" to skip the filtering and just run the comand directly
root_helper = "sudo /usr/bin/neutron-rootwrap /etc/neutron/rootwrap.conf"
-
-# =========== items for agent management extension =============
-# seconds between nodes reporting state to server; should be less than
-# agent_down_time, best if it is half or less than agent_down_time
report_interval = 30
-# =========== end of items for agent management extension =====
-
[keystone_authtoken]
-auth_uri = http://{{ internal_vip.ip }}:5000/v2.0
+auth_uri = http://{{ internal_vip.ip }}:5000
+auth_url = http://{{ internal_vip.ip }}:35357
+memcached_servers = {{ memcached_servers }}
+auth_type = password
+project_domain_name = default
+user_domain_name = default
+project_name = service
+username = neutron
+password = {{ NEUTRON_PASS }}
+
identity_uri = http://{{ internal_vip.ip }}:35357
admin_tenant_name = service
admin_user = neutron
@@ -405,69 +74,21 @@ admin_password = {{ NEUTRON_PASS }}
signing_dir = $state_path/keystone-signing
[database]
-# This line MUST be changed to actually run the plugin.
-# Example:
-# connection = mysql://root:pass@127.0.0.1:3306/neutron
-# Replace 127.0.0.1 above with the IP address of the database used by the
-# main neutron server. (Leave it as is if the database runs on this host.)
-# connection = sqlite:////var/lib/neutron/neutron.sqlite
connection = mysql://neutron:{{ NEUTRON_DBPASS }}@{{ db_host }}/neutron
-
-# The SQLAlchemy connection string used to connect to the slave database
slave_connection =
-
-# Database reconnection retry times - in event connectivity is lost
-# set to -1 implies an infinite retry count
max_retries = 10
-
-# Database reconnection interval in seconds - if the initial connection to the
-# database fails
retry_interval = 10
-
-# Minimum number of SQL connections to keep open in a pool
min_pool_size = 1
-
-# Maximum number of SQL connections to keep open in a pool
max_pool_size = 100
-
-# Timeout in seconds before idle sql connections are reaped
idle_timeout = 30
use_db_reconnect = True
-
-# If set, use this value for max_overflow with sqlalchemy
max_overflow = 100
-
-# Verbosity of SQL debugging information. 0=None, 100=Everything
connection_debug = 0
-
-# Add python stack traces to SQL as comment strings
connection_trace = False
-
-# If set, use this value for pool_timeout with sqlalchemy
pool_timeout = 10
[service_providers]
-# Specify service providers (drivers) for advanced services like loadbalancer, VPN, Firewall.
-# Must be in form:
-# service_provider=<service_type>:<name>:<driver>[:default]
-# List of allowed service types includes LOADBALANCER, FIREWALL, VPN
-# Combination of <service type> and <name> must be unique; <driver> must also be unique
-# This is multiline option, example for default provider:
-# service_provider=LOADBALANCER:name:lbaas_plugin_driver_path:default
-# example of non-default provider:
-# service_provider=FIREWALL:name2:firewall_driver_path
-# --- Reference implementations ---
service_provider=FIREWALL:Iptables:neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewllDriver:default
-# In order to activate Radware's lbaas driver you need to uncomment the next line.
-# If you want to keep the HA Proxy as the default lbaas driver, remove the attribute default from the line below.
-# Otherwise comment the HA Proxy line
-# service_provider = LOADBALANCER:Radware:neutron.services.loadbalancer.drivers.radware.driver.LoadBalancerDriver:default
-# uncomment the following line to make the 'netscaler' LBaaS provider available.
-# service_provider=LOADBALANCER:NetScaler:neutron.services.loadbalancer.drivers.netscaler.netscaler_driver.NetScalerPluginDriver
-# Uncomment the following line (and comment out the OpenSwan VPN line) to enable Cisco's VPN driver.
-# service_provider=VPN:cisco:neutron.services.vpn.service_drivers.cisco_ipsec.CiscoCsrIPsecVPNDriver:default
-# Uncomment the line below to use Embrane heleos as Load Balancer service provider.
-# service_provider=LOADBALANCER:Embrane:neutron.services.loadbalancer.drivers.embrane.driver.EmbraneLbaas:default
{% if enable_fwaas %}
[fwaas]
@@ -484,3 +105,8 @@ project_name = service
username = nova
password = {{ NOVA_PASS }}
+[oslo_messaging_rabbit]
+rabbit_host = {{ rabbit_host }}
+rabbit_password = {{ RABBIT_PASS }}
+rabbit_port = 5672
+rabbit_userid = {{ RABBIT_USER }}
diff --git a/deploy/adapters/ansible/openstack_newton_xenial/templates/nova.conf b/deploy/adapters/ansible/openstack_newton_xenial/templates/nova.conf
index 3a5735cf..4a7bb0a2 100644
--- a/deploy/adapters/ansible/openstack_newton_xenial/templates/nova.conf
+++ b/deploy/adapters/ansible/openstack_newton_xenial/templates/nova.conf
@@ -7,90 +7,113 @@
[DEFAULT]
dhcpbridge_flagfile=/etc/nova/nova.conf
dhcpbridge=/usr/bin/nova-dhcpbridge
-logdir=/var/log/nova
+log-dir=/var/log/nova
state_path=/var/lib/nova
-lock_path=/var/lib/nova/tmp
force_dhcp_release=True
-iscsi_helper=tgtadm
-libvirt_use_virtio_for_bridges=True
-connection_type=libvirt
-root_helper=sudo nova-rootwrap /etc/nova/rootwrap.conf
-verbose={{ VERBOSE}}
-debug={{ DEBUG }}
+verbose={{ VERBOSE }}
ec2_private_dns_show_ip=True
-api_paste_config=/etc/nova/api-paste.ini
-volumes_path=/var/lib/nova/volumes
enabled_apis=osapi_compute,metadata
-default_floating_pool={{ public_net_info.network }}
auth_strategy = keystone
+my_ip = {{ internal_ip }}
+use_neutron = True
+firewall_driver = nova.virt.firewall.NoopFirewallDriver
+transport_url = rabbit://{{ RABBIT_USER }}:{{ RABBIT_PASS }}@{{ rabbit_host }}
+default_floating_pool={{ public_net_info.network }}
+metadata_listen={{ internal_ip }}
+linuxnet_interface_driver = nova.network.linux_net.LinuxOVSInterfaceDriver
+iscsi_helper=tgtadm
+connection_type=libvirt
+root_helper=sudo nova-rootwrap /etc/nova/rootwrap.conf
+debug={{ DEBUG }}
+volumes_path=/var/lib/nova/volumes
rpc_backend = rabbit
-rabbit_host = {{ rabbit_host }}
-rabbit_userid = {{ RABBIT_USER }}
-rabbit_password = {{ RABBIT_PASS }}
-
osapi_compute_listen={{ internal_ip }}
-metadata_listen={{ internal_ip }}
-
-my_ip = {{ internal_ip }}
-vnc_enabled = True
-vncserver_listen = {{ internal_ip }}
-vncserver_proxyclient_address = {{ internal_ip }}
-novncproxy_base_url = http://{{ public_vip.ip }}:6080/vnc_auto.html
-
-novncproxy_host = {{ internal_ip }}
-novncproxy_port = 6080
-
network_api_class = nova.network.neutronv2.api.API
-linuxnet_interface_driver = nova.network.linux_net.LinuxOVSInterfaceDriver
-firewall_driver = nova.virt.firewall.NoopFirewallDriver
security_group_api = neutron
-
instance_usage_audit = True
instance_usage_audit_period = hour
notify_on_state_change = vm_and_task_state
notification_driver = nova.openstack.common.notifier.rpc_notifier
notification_driver = ceilometer.compute.nova_notifier
-
memcached_servers = {{ memcached_servers }}
[database]
# The SQLAlchemy connection string used to connect to the database
connection = mysql://nova:{{ NOVA_DBPASS }}@{{ db_host }}/nova
idle_timeout = 30
-use_db_reconnect = True
pool_timeout = 10
+use_db_reconnect = True
[api_database]
connection = mysql://nova:{{ NOVA_DBPASS }}@{{ db_host }}/nova_api
idle_timeout = 30
-use_db_reconnect = True
pool_timeout = 10
+use_db_reconnect = True
+
+[cinder]
+os_region_name = RegionOne
+
+[oslo_concurrency]
+lock_path=/var/lib/nova/tmp
+
+[libvirt]
+use_virtio_for_bridges=True
+
+[wsgi]
+api_paste_config=/etc/nova/api-paste.ini
[keystone_authtoken]
-auth_uri = http://{{ internal_vip.ip }}:5000/2.0
+auth_uri = http://{{ internal_vip.ip }}:5000
+auth_url = http://{{ internal_vip.ip }}:35357
+memcached_servers = {{ memcached_servers }}
+auth_type = password
+project_domain_name = default
+user_domain_name = default
+project_name = service
+username = nova
+password = {{ NOVA_PASS }}
+
identity_uri = http://{{ internal_vip.ip }}:35357
admin_tenant_name = service
admin_user = nova
admin_password = {{ NOVA_PASS }}
-memcached_servers = {{ memcached_servers }}
+
+[vnc]
+enabled = True
+vncserver_listen = {{ internal_ip }}
+vncserver_proxyclient_address = {{ internal_ip }}
+novncproxy_base_url = http://{{ public_vip.ip }}:6080/vnc_auto.html
+novncproxy_host = {{ internal_ip }}
+novncproxy_port = 6080
[glance]
+api_servers = http://{{ internal_vip.ip }}:9292
host = {{ internal_vip.ip }}
[neutron]
url = http://{{ internal_vip.ip }}:9696
+auth_url = http://{{ internal_vip.ip }}:35357
+auth_type = password
+project_domain_name = default
+user_domain_name = default
+region_name = RegionOne
+project_name = service
+username = neutron
+password = {{ NEUTRON_PASS }}
+service_metadata_proxy = True
+metadata_proxy_shared_secret = {{ METADATA_SECRET }}
+
auth_strategy = keystone
admin_tenant_name = service
admin_username = neutron
admin_password = {{ NEUTRON_PASS }}
-admin_auth_url = http://{{ internal_vip.ip }}:35357/v2.0
-service_metadata_proxy = True
-metadata_proxy_shared_secret = {{ METADATA_SECRET }}
-auth_type = password
-auth_url = http://{{ internal_vip.ip }}:35357
-password = {{ NEUTRON_PASS }}
-username = neutron
-project_domain_name = default
-user_domain_name = default
+admin_auth_url = http://{{ internal_vip.ip }}:35357/v3
+
+[oslo_messaging_rabbit]
+rabbit_host = {{ rabbit_host }}
+rabbit_password = {{ RABBIT_PASS }}
+rabbit_port = 5672
+rabbit_userid = {{ RABBIT_USER }}
+
diff --git a/deploy/adapters/ansible/roles/ha/templates/haproxy.cfg b/deploy/adapters/ansible/roles/ha/templates/haproxy.cfg
index 222b5561..a6876da7 100644
--- a/deploy/adapters/ansible/roles/ha/templates/haproxy.cfg
+++ b/deploy/adapters/ansible/roles/ha/templates/haproxy.cfg
@@ -196,7 +196,7 @@ listen proxy-dashboarad
listen stats
mode http
- bind 0.0.0.0:9999
+ bind 0.0.0.0:9998
stats enable
stats refresh 30s
stats uri /
diff --git a/deploy/adapters/ansible/roles/tacker/templates/tacker.j2 b/deploy/adapters/ansible/roles/tacker/templates/tacker.j2
index 5bc23473..d7311f62 100644
--- a/deploy/adapters/ansible/roles/tacker/templates/tacker.j2
+++ b/deploy/adapters/ansible/roles/tacker/templates/tacker.j2
@@ -10,7 +10,7 @@ state_path = /var/lib/tacker
[keystone_authtoken]
password = console
-auth_uri = http://{{ internal_vip.ip }}:5000
+auth_uri = http://{{ internal_vip.ip }}:5000/v2.0
auth_url = http://{{ internal_vip.ip }}:35357
project_name = service