diff options
Diffstat (limited to 'deploy/compass_conf')
12 files changed, 1049 insertions, 0 deletions
diff --git a/deploy/compass_conf/adapter/ansible_kubernetes.conf b/deploy/compass_conf/adapter/ansible_kubernetes.conf new file mode 100644 index 00000000..7b8023c2 --- /dev/null +++ b/deploy/compass_conf/adapter/ansible_kubernetes.conf @@ -0,0 +1,7 @@ +NAME = 'kubernetes' +DISPLAY_NAME = 'Kubernetes' +PARENT = 'general' +PACKAGE_INSTALLER = 'ansible_installer_kubernetes' +OS_INSTALLER = 'cobbler' +SUPPORTED_OS_PATTERNS = ['(?i)ubuntu-16\.04', '(?i)CentOS-7.*16.*'] +DEPLOYABLE = True diff --git a/deploy/compass_conf/distributed_system/kubernetes.conf b/deploy/compass_conf/distributed_system/kubernetes.conf new file mode 100644 index 00000000..8050ec18 --- /dev/null +++ b/deploy/compass_conf/distributed_system/kubernetes.conf @@ -0,0 +1,3 @@ +NAME ='kubernetes' +PARENT = 'general' +DEPLOYABLE = True diff --git a/deploy/compass_conf/flavor/kubernetes.conf b/deploy/compass_conf/flavor/kubernetes.conf new file mode 100644 index 00000000..96b5f95f --- /dev/null +++ b/deploy/compass_conf/flavor/kubernetes.conf @@ -0,0 +1,11 @@ +ADAPTER_NAME = 'kubernetes' +FLAVORS = [{ + 'flavor': 'ansible-kubernetes', + 'display_name': 'ansible-kubernetes', + 'template': 'ansible-kubernetes.tmpl', + 'roles': [ + 'controller', 'compute', 'ha', 'odl', 'onos', 'opencontrail', 'ceph', 'ceph-adm', 'ceph-mon', 'ceph-osd', 'sec-patch', 'ceph-osd-node' + ], +}] + + diff --git a/deploy/compass_conf/flavor_mapping/kubernetes.conf b/deploy/compass_conf/flavor_mapping/kubernetes.conf new file mode 100644 index 00000000..e569ea46 --- /dev/null +++ b/deploy/compass_conf/flavor_mapping/kubernetes.conf @@ -0,0 +1,28 @@ +ADAPTER = 'kubernetes' +FLAVOR = 'ansible-kubernetes' +CONFIG_MAPPING = { + "mapped_name": "flavor_config", + "mapped_children": [{ + "security": { + "accordion_heading": "Kubernetes Database and Queue Credentials", + "category": "service_credentials", + "data_structure": "table", + "action": "true", + "modifiable_data": ["username", "password"], + "table_display_header": ["Service", "UserName", "Password", "Action"], + "config": { + } + } + },{ + "security": { + "accordion_heading": "Kubernetes User Credentials", + "category": "console_credentials", + "data_structure": "table", + "action": "true", + "modifiable_data": ["username", "password"], + "table_display_header": ["Service", "UserName", "Password", "Action"], + "config":{ + } + } + }] +} diff --git a/deploy/compass_conf/flavor_metadata/ansible-kubernetes.conf b/deploy/compass_conf/flavor_metadata/ansible-kubernetes.conf new file mode 100644 index 00000000..f878d58c --- /dev/null +++ b/deploy/compass_conf/flavor_metadata/ansible-kubernetes.conf @@ -0,0 +1,19 @@ +ADAPTER = 'kubernetes' +FLAVOR = 'ansible-kubernetes' +METADATA = { + 'ha_proxy': { + '_self': { + }, + 'vip': { + '_self': { + 'is_required': True, + 'field': 'general', + 'mapping_to': 'ha_vip' + } + }, + 'test': { + '_self': { + }, + } + } +} diff --git a/deploy/compass_conf/package_installer/ansible-kubernetes.conf b/deploy/compass_conf/package_installer/ansible-kubernetes.conf new file mode 100644 index 00000000..c706ccb2 --- /dev/null +++ b/deploy/compass_conf/package_installer/ansible-kubernetes.conf @@ -0,0 +1,15 @@ +NAME = 'ansible_installer' +INSTANCE_NAME = 'ansible_installer_kubernetes' +SETTINGS = { + 'ansible_dir': '/var/ansible', + 'ansible_run_dir': '/var/ansible/run', + 'ansible_config': 'ansible.cfg', + 'playbook_file': 'site.yml', + 'inventory_file': 'inventory.py', + 'inventory_json_file': 'inventory.json', + 'inventory_group': ['controller', 'compute', 'ha', 'odl', 'onos', 'opencontrail', 'ceph_adm', 'ceph_mon', 'ceph_osd', 'moon'], + 'group_variable': 'all', + 'etc_hosts_path': 'roles/pre-k8s/templates/hosts', + 'runner_dirs': ['roles','kubernetes/roles'] +} + diff --git a/deploy/compass_conf/package_metadata/kubernetes.conf b/deploy/compass_conf/package_metadata/kubernetes.conf new file mode 100755 index 00000000..d5e9a50f --- /dev/null +++ b/deploy/compass_conf/package_metadata/kubernetes.conf @@ -0,0 +1,536 @@ +ADAPTER = 'kubernetes' +METADATA = { + 'security': { + '_self': { + 'required_in_whole_config': True, + }, + 'service_credentials': { + '_self': { + 'required_in_whole_config': True, + 'key_extensions': { + '$service': ['image', 'compute', 'dashboard', 'identity', 'metering', 'network', 'rabbitmq', 'volume', 'mysql', 'heat', 'alarming', 'policy'] + }, + 'mapping_to': 'service_credentials' + }, + '$service': { + '_self': { + 'required_in_whole_config': True, + 'mapping_to': '$service' + }, + 'username': { + '_self': { + 'is_required': True, + 'field': 'username', + 'mapping_to': 'username' + } + }, + 'password': { + '_self': { + 'is_required': True, + 'field': 'password', + 'mapping_to': 'password' + } + } + } + }, + 'console_credentials': { + '_self': { + 'required_in_whole_config': True, + 'key_extensions': { + '$console': ['admin', 'demo', 'compute', 'dashboard', 'image', 'identity', 'metering', 'network', 'object-store', 'volume', 'heat', 'alarming', 'policy'] + }, + 'mapping_to': 'console_credentials' + }, + '$console': { + '_self': { + 'required_in_whole_config': True, + 'mapping_to': '$console' + }, + 'username': { + '_self': { + 'is_required': True, + 'field': 'username', + 'mapping_to': 'username' + } + }, + 'password': { + '_self': { + 'is_required': True, + 'field': 'password', + 'mapping_to': 'password' + } + } + } + } + }, + + 'compass_ip': { + '_self': { + 'mapping_to': 'compass_ip', + 'field': 'anytype', + 'is_required':False, + 'default_value': '10.1.0.1' + } + }, + + 'enable_secgroup': { + '_self': { + 'mapping_to': 'enable_secgroup', + 'field': 'anytype', + 'is_required':False, + 'default_value': True + } + }, + 'enable_fwaas': { + '_self': { + 'mapping_to': 'enable_fwaas', + 'field': 'anytype', + 'is_required':False, + 'default_value': True + } + }, + 'enable_vpnaas': { + '_self': { + 'mapping_to': 'enable_vpnaas', + 'field': 'anytype', + 'is_required':False, + 'default_value': True + } + }, + 'odl_l3_agent': { + '_self': { + 'mapping_to': 'odl_l3_agent', + 'field': 'anytype', + 'is_required':False, + 'default_value': 'Disable' + } + }, + 'onos_sfc': { + '_self': { + 'mapping_to': 'onos_sfc', + 'field': 'anytype', + 'is_required':False, + 'default_value': 'Disable' + } + }, + 'odl_sfc': { + '_self': { + 'mapping_to': 'odl_sfc', + 'field': 'anytype', + 'is_required':False, + 'default_value': 'Disable' + } + }, + 'plugins': { + '_self': { + 'mapping_to': 'plugins', + 'field': 'general_list', + 'is_required':False, + 'default_value': '[]' + } + }, + 'ha_network': { + '_self': { + 'mapping_to': 'ha_network', + 'field': 'anytype', + 'is_required':False, + 'default_value': 'Disable' + } + }, + 'offline_repo_port': { + '_self': { + 'mapping_to': 'offline_repo_port', + 'field': 'anytype', + 'is_required':False, + 'default_value': '5151' + } + }, + 'offline_deployment': { + '_self': { + 'mapping_to': 'offline_deployment', + 'field': 'anytype', + 'is_required':False, + 'default_value': 'Disable' + } + }, + 'network_cfg': { + '_self': { + 'mapping_to': 'network_cfg' + }, + + 'nic_mappings': { + '_self': { + 'mapping_to': 'nic_mappings', + 'field': 'general_list' + } + }, + + 'bond_mappings': { + '_self': { + 'mapping_to': 'bond_mappings', + 'field': 'general_list' + } + }, + + 'sys_intf_mappings': { + '_self': { + 'mapping_to': 'sys_intf_mappings', + 'field': 'general_list' + } + }, + + 'ip_settings': { + '_self': { + 'mapping_to': 'ip_settings', + 'field': 'general_list' + } + }, + + 'provider_net_mappings': { + '_self': { + 'mapping_to': 'provider_net_mappings', + 'field': 'general_list' + } + }, + + 'ceph_disk': { + '_self': { + 'mapping_to': 'ceph_disk', + 'field': 'general', + 'is_required':False + } + }, + + 'public_vip': { + '_self': { + 'mapping_to': 'public_vip', + 'is_required': False + }, + + 'ip': { + '_self': { + 'mapping_to': 'ip', + 'is_required': True, + 'field': 'general', + } + }, + 'netmask': { + '_self': { + 'mapping_to': 'netmask', + 'is_required': True, + 'field': 'general', + } + }, + 'interface': { + '_self': { + 'mapping_to': 'interface', + 'is_required': True, + 'field': 'general', + } + } + }, + + 'internal_vip': { + '_self': { + 'mapping_to': 'internal_vip', + 'is_required': False + }, + + 'ip': { + '_self': { + 'mapping_to': 'ip', + 'is_required': True, + 'field': 'general', + } + }, + 'netmask': { + '_self': { + 'mapping_to': 'netmask', + 'is_required': True, + 'field': 'general', + } + }, + 'interface': { + '_self': { + 'mapping_to': 'interface', + 'is_required': True, + 'field': 'general', + } + } + }, + + 'onos_nic': { + '_self': { + 'mapping_to': 'onos_nic', + 'is_required': False, + 'field': 'general', + 'default_value': 'eth2' + } + }, + + 'tenant_net_info': { + '_self': { + 'mapping_to': 'tenant_net_info', + 'is_required': True + }, + + 'type': { + '_self': { + 'mapping_to': 'type', + 'is_required': True, + 'field': 'general', + 'options': ['vxlan', 'vlan'], + } + }, + 'range': { + '_self': { + 'mapping_to': 'range', + 'is_required': True, + 'field': 'general', + } + }, + 'provider_network': { + '_self': { + 'mapping_to': 'provider_network', + 'is_required': True, + 'field': 'general', + } + } + }, + + 'public_net_info': { + '_self': { + 'mapping_to': 'public_net_info' + }, + + 'enable': { + '_self': { + 'mapping_to': 'enable', + 'is_required': False, + 'field': 'anytype', + 'default_value': True + } + }, + + 'network': { + '_self': { + 'mapping_to': 'network', + 'is_required': True, + 'field': 'general', + 'default_value': 'ext-net' + } + }, + + 'type': { + '_self': { + 'mapping_to': 'type', + 'is_required': True, + 'field': 'general', + 'options': ['flat', 'vlan'], + 'default_value': 'vlan' + } + }, + + 'segment_id': { + '_self': { + 'mapping_to': 'segment_id', + 'is_required': False, + 'field': 'anytype' + } + }, + + 'subnet': { + '_self': { + 'mapping_to': 'subnet', + 'is_required': True, + 'field': 'general', + 'default_value': 'ext-subnet' + } + }, + + 'provider_network': { + '_self': { + 'mapping_to': 'provider_network', + 'is_required': True, + 'field': 'general', + 'default_value': 'physnet' + } + }, + + 'router': { + '_self': { + 'mapping_to': 'router', + 'is_required': True, + 'field': 'general', + 'default_value': 'ext-router' + } + }, + + 'enable_dhcp': { + '_self': { + 'mapping_to': 'enable_dhcp', + 'is_required': True, + 'field': 'anytype' + } + }, + + 'no_gateway': { + '_self': { + 'mapping_to': 'no_gateway', + 'is_required': True, + 'field': 'anytype' + } + }, + + 'external_gw': { + '_self': { + 'mapping_to': 'external_gw', + 'is_required': False, + 'field': 'general' + } + }, + + 'floating_ip_cidr': { + '_self': { + 'mapping_to': 'floating_ip_cidr', + 'is_required': True, + 'field': 'general' + } + }, + + 'floating_ip_start': { + '_self': { + 'mapping_to': 'floating_ip_start', + 'is_required': True, + 'field': 'general' + } + }, + + 'floating_ip_end': { + '_self': { + 'mapping_to': 'floating_ip_end', + 'is_required': True, + 'field': 'general' + } + } + }, + }, + 'neutron_config': { + '_self': { + 'mapping_to': 'neutron_config' + }, + 'openvswitch': { + '_self': { + 'mapping_to': 'openvswitch', + 'required_in_whole_config': True + }, + 'tenant_network_type': { + '_self': { + 'mapping_to': 'tenant_network_type', + 'is_required': True, + 'field': 'general', + 'options': ['gre', 'vlan'], + 'default_value': 'gre' + } + }, + 'network_vlan_ranges': { + '_self': { + 'mapping_to': 'vlan_ranges', + 'is_required': False, + 'field': 'general_list', + 'default_value': ['physnet1:2700:2999'] + } + }, + 'bridge_mappings': { + '_self': { + 'mapping_to': 'bridge_mappings', + 'is_required': False, + 'field': 'general_list', + 'default_value': ['physnet1:br-eth1'] + } + }, + 'tunnel_id_ranges': { + '_self': { + 'mapping_to': 'tunnel_id_ranges', + 'is_required': False, + 'field': 'general_list', + 'default_value': ['1:1000'] + } + } + } + }, + 'network_mapping': { + '_self': { + 'required_in_whole_config': True, + 'key_extensions': { + '$interface_type': ['install'] + } + }, + '$interface_type': { + '_self': { + 'required_in_whole_config': True, + 'field': 'anytype', + 'autofill_callback': autofill_network_mapping, + 'mapping_to': '$interface_type' + }, + 'interface': { + '_self': { + 'is_required': True, + 'field': 'general', + } + }, + 'subnet': { + '_self': { + 'is_required': False, + 'field': 'general' + } + } + } + }, + 'moon_cfg': { + '_self': { + 'required_in_whole_config': False, + 'mapping_to': 'moon_cfg' + }, + 'master': { + '_self': { + 'required_in_whole_config': False, + 'mapping_to': 'master' + }, + 'flag': { + '_self': { + 'is_required': False, + 'field': 'general', + 'mapping_to': 'flag' + } + }, + }, + 'slave': { + '_self': { + 'required_in_whole_config': False, + 'mapping_to': 'slave' + }, + 'flag': { + '_self': { + 'is_required': False, + 'field': 'general', + 'mapping_to': 'flag' + } + }, + 'name': { + '_self': { + 'is_required': False, + 'field': 'general', + 'mapping_to': 'name' + } + }, + 'master_ip': { + '_self': { + 'is_required': False, + 'field': 'general', + 'mapping_to': 'master_ip' + } + }, + } + } +} diff --git a/deploy/compass_conf/role/kubernetes_ansible.conf b/deploy/compass_conf/role/kubernetes_ansible.conf new file mode 100644 index 00000000..89c03d94 --- /dev/null +++ b/deploy/compass_conf/role/kubernetes_ansible.conf @@ -0,0 +1,115 @@ +ADAPTER_NAME = 'kubernetes' +ROLES = [{ + 'role': 'allinone-compute', + 'display_name': 'all in one', + 'description': 'All in One' +}, { + 'role': 'controller', + 'display_name': 'controller node', + 'description': 'Controller Node' +}, { + 'role': 'compute', + 'display_name': 'compute node', + 'description': 'Compute Node' +}, { + 'role': 'storage', + 'display_name': 'storage node', + 'description': 'Storage Node' +}, { + 'role': 'network', + 'display_name': 'network node', + 'description': 'Network Node' +}, { + 'role': 'compute-worker', + 'display_name': 'Compute worker node', + 'description': 'Compute worker node' +}, { + 'role': 'compute-controller', + 'display_name': 'Compute controller node', + 'description': 'Compute controller node' +}, { + 'role': 'network-server', + 'display_name': 'Network server node', + 'description': 'Network server node' +}, { + 'role': 'database', + 'display_name': 'Database node', + 'description': 'Database node' +}, { + 'role': 'messaging', + 'display_name': 'Messaging queue node', + 'description': 'Messaging queue node' +}, { + 'role': 'image', + 'display': 'Image node', + 'description': 'Image node' +}, { + 'role': 'dashboard', + 'display': 'Dashboard node', + 'description': 'Dashboard node' +}, { + 'role': 'identity', + 'display': 'Identity node', + 'description': 'Identity node' +}, { + 'role': 'storage-controller', + 'display': 'Storage controller node', + 'description': 'Storage controller node' +}, { + 'role': 'storage-volume', + 'display': 'Storage volume node', + 'description': 'Storage volume node' +}, { + 'role': 'network-worker', + 'display': 'Network worker node', + 'description': 'Network worker node' +}, { + 'role': 'odl', + 'display': 'open day light', + 'description': 'odl node', + 'optional': True +}, { + 'role': 'onos', + 'display': 'open network operating system', + 'description': 'onos node', + 'optional': True +}, { + 'role': 'opencontrail', + 'display': 'open contrail', + 'description': 'opencontrail node', + 'optional': True +}, { + 'role': 'ha', + 'display': 'Cluster with HA', + 'description': 'Cluster with HA node' +}, { + 'role': 'ceph-adm', + 'display': 'Ceph Admin Node', + 'description': 'Ceph Admin Node', + 'optional': True +}, { + 'role': 'ceph-mon', + 'display': 'Ceph Monitor Node', + 'description': 'Ceph Monitor Node', + 'optional': True +}, { + 'role': 'ceph-osd', + 'display': 'Ceph Storage Node', + 'description': 'Ceph Storage Node', + 'optional': True +}, { + 'role': 'ceph-osd-node', + 'display': 'Ceph osd install from node', + 'description': '', + 'optional': True +}, { + 'role': 'ceph', + 'display': 'ceph node', + 'description': 'ceph node', + 'optional': True +}, { + 'role': 'sec-patch', + 'display': 'sec-patch node', + 'description': 'Security Patch Node', + 'optional': True +}] diff --git a/deploy/compass_conf/templates/ansible_installer/kubernetes/ansible_cfg/ansible-kubernetes.tmpl b/deploy/compass_conf/templates/ansible_installer/kubernetes/ansible_cfg/ansible-kubernetes.tmpl new file mode 100644 index 00000000..f09fa9c8 --- /dev/null +++ b/deploy/compass_conf/templates/ansible_installer/kubernetes/ansible_cfg/ansible-kubernetes.tmpl @@ -0,0 +1,11 @@ +#set cluster_name = $getVar('name', '') +[defaults] +log_path = /var/ansible/run/kubernetes-$cluster_name/ansible.log +host_key_checking = False +callback_whitelist = playbook_done, status_callback +callback_plugins = /opt/ansible_callbacks +forks=100 + +[ssh_connection] +pipelining=True +retries = 5 diff --git a/deploy/compass_conf/templates/ansible_installer/kubernetes/hosts/ansible-kubernetes.tmpl b/deploy/compass_conf/templates/ansible_installer/kubernetes/hosts/ansible-kubernetes.tmpl new file mode 100644 index 00000000..9d628b5e --- /dev/null +++ b/deploy/compass_conf/templates/ansible_installer/kubernetes/hosts/ansible-kubernetes.tmpl @@ -0,0 +1,31 @@ +#set ip_settings={} +#for k,v in $getVar('ip_settings', {}).items() +#set host_ip_settings={} +#for intf in v +#set $host_ip_settings[$intf["alias"]]=intf +#end for +#set $ip_settings[$k]=$host_ip_settings +#end for + +# localhost +127.0.0.1 localhost +#set controllers = $getVar('controller', []) +#set computes = $getVar('compute', []) +#if not $isinstance($controllers, list) + #set controllers = [$controllers] +#end if +#if not $isinstance($compute, list) + #set computes = [$computes] +#end if +# controller +#for worker in $controllers + #set worker_hostname = $worker.hostname + #set worker_ip = $ip_settings[$worker_hostname].mgmt.ip +$worker_ip $worker_hostname +#end for +# compute +#for worker in $computes + #set worker_hostname = $worker.hostname + #set worker_ip = $ip_settings[$worker_hostname].mgmt.ip +$worker_ip $worker_hostname +#end for diff --git a/deploy/compass_conf/templates/ansible_installer/kubernetes/inventories/ansible-kubernetes.tmpl b/deploy/compass_conf/templates/ansible_installer/kubernetes/inventories/ansible-kubernetes.tmpl new file mode 100644 index 00000000..4c363a61 --- /dev/null +++ b/deploy/compass_conf/templates/ansible_installer/kubernetes/inventories/ansible-kubernetes.tmpl @@ -0,0 +1,75 @@ +#set inventory_json = $getVar('inventory_json', []) +#!/usr/bin/env python + +import os +import sys +import copy +import argparse + +try: + import json +except ImportError: + import simplejson as json + +local_inventory='$inventory_json' + +def _byteify(data, ignore_dicts = False): + if isinstance(data, unicode): + return data.encode('utf-8') + if isinstance(data, list): + return [ _byteify(item, ignore_dicts=True) for item in data ] + if isinstance(data, dict) and not ignore_dicts: + return { + _byteify(key, ignore_dicts=True): _byteify(value, ignore_dicts=True) + for key, value in data.iteritems() + } + return data + +def merge_dict(ldict, rdict, overwrite=True): + if not (ldict and rdict): + return + + if not isinstance(ldict, dict): + raise TypeError('ldict type is %s not dict' % type(ldict)) + + if not isinstance(rdict, dict): + raise TypeError('rdict type is %s not dict' % type(rdict)) + + for key, value in rdict.items(): + if isinstance(value, dict) and key in ldict and isinstance(ldict[key], + dict): + merge_dict(ldict[key], value, overwrite) + else: + if overwrite or key not in ldict: + ldict[key] = copy.deepcopy(value) + +def load_inventory(inventory): + if not os.path.exists(inventory): + raise RuntimeError('file: %s not exist' % inventory) + with open(inventory, 'r') as fd: + return json.load(fd, object_hook=_byteify) + +def dump_inventory(inventory, data): + with open(inventory, 'w') as fd: + json.dump(data, fd, indent=4) + +def merge_inventory(linv, rinv): + ldata = load_inventory(linv) + rdata = load_inventory(rinv) + merge_dict(ldata, rdata, overwrite=True) + dump_inventory(linv, ldata) + +def read_cli_args(): + parser = argparse.ArgumentParser() + parser.add_argument('--list', action = 'store_true') + parser.add_argument('--merge', action = 'store') + return parser.parse_args() + +if __name__ == '__main__': + get_args = read_cli_args() + new_inventory = get_args.merge + if get_args.list: + print load_inventory(local_inventory) + elif new_inventory: + merge_inventory(local_inventory, new_inventory) + diff --git a/deploy/compass_conf/templates/ansible_installer/kubernetes/vars/ansible-kubernetes.tmpl b/deploy/compass_conf/templates/ansible_installer/kubernetes/vars/ansible-kubernetes.tmpl new file mode 100644 index 00000000..27211e06 --- /dev/null +++ b/deploy/compass_conf/templates/ansible_installer/kubernetes/vars/ansible-kubernetes.tmpl @@ -0,0 +1,198 @@ +#from random import randint +#set cluster_name = $getVar('name', '') +#set network_cfg = $getVar('network_cfg', {}) +#set ntp_server = $getVar('ntp_server', "") +#set ceph_disk = $getVar('ceph_disk',"") +#set $sys_intf_mappings= {} +#for $intf_info in $network_cfg.sys_intf_mappings +#set $sys_intf_mappings[$intf_info["name"]] = $intf_info +#end for + +#set ip_settings={} +#for k,v in $getVar('ip_settings', {}).items() +#set host_ip_settings={} +#for intf in v +#set $host_ip_settings[$intf["alias"]]=intf +#end for +#set $ip_settings[$k]=$host_ip_settings +#end for + +#set has = $getVar('ha', []) +#set ha_vip = $getVar('ha_vip', []) + +#set controllers = $getVar('controller', []) +#set computers = $getVar('compute', []) + +enable_secgroup: $getVar('enable_secgroup', True) +enable_fwaas: $getVar('enable_fwaas', True) +enable_vpnaas: $getVar('enable_vpnaas', True) +odl_l3_agent: $getVar('odl_l3_agent', 'Disable') +moon: $getVar('moon', 'Disable') +ha_network: $getVar('ha_network', 'Disable') +onos_nic: $getVar('onos_nic', 'eth2') +ip_settings: $ip_settings +network_cfg: $network_cfg +sys_intf_mappings: $sys_intf_mappings +deploy_type: $getVar('deploy_type', 'virtual') + +public_cidr: $computers[0]['install']['subnet'] +storage_cidr: "{{ ip_settings[inventory_hostname]['storage']['cidr'] }}" +mgmt_cidr: "{{ ip_settings[inventory_hostname]['mgmt']['cidr'] }}" + +public_net_info: "{{ network_cfg.public_net_info }}" +host_ip_settings: "{{ ip_settings[inventory_hostname] }}" + +ntp_server: $ntp_server +internal_vip: + ip: $network_cfg["internal_vip"]["ip"] + netmask: $network_cfg["internal_vip"]["netmask"] +#if "vlan_tag" in $sys_intf_mappings[$network_cfg["internal_vip"]["interface"]] + interface: $sys_intf_mappings[$network_cfg["internal_vip"]["interface"]]["name"] +#else + interface: $sys_intf_mappings[$network_cfg["internal_vip"]["interface"]]["interface"] +#end if + +public_vip: + ip: $network_cfg["public_vip"]["ip"] + netmask: $network_cfg["public_vip"]["netmask"] +#if "vlan_tag" in $sys_intf_mappings[$network_cfg["public_vip"]["interface"]] + interface: $sys_intf_mappings[$network_cfg["public_vip"]["interface"]]["name"] +#else + interface: $sys_intf_mappings[$network_cfg["public_vip"]["interface"]]["interface"] +#end if + +db_host: "{{ internal_vip.ip }}" +rabbit_host: "{{ internal_vip.ip }}" + +internal_ip: "{{ ip_settings[inventory_hostname]['mgmt']['ip'] }}" +internal_nic: mgmt + +#set random_id = randint(1, 255) +vrouter_id_internal: $random_id +vrouter_id_public: $random_id + +identity_host: "{{ internal_ip }}" +controllers_host: "{{ internal_ip }}" +storage_controller_host: "{{ internal_ip }}" +compute_controller_host: "{{ internal_ip }}" +image_host: "{{ internal_ip }}" +network_server_host: "{{ internal_ip }}" +dashboard_host: "{{ internal_ip }}" + +haproxy_hosts: +#for $item in $has +#set $hostname=$item["hostname"] + $hostname: $ip_settings[$hostname]["mgmt"]["ip"] +#end for + +host_index: +#for $index, $item in enumerate($has) + $item["hostname"]: $index +#end for + +ERLANG_TOKEN: YOWSJSJIGGAUFZTIBRAD +#set credentials = $getVar('service_credentials', {}) +#set console_credentials = $getVar('console_credentials', {}) +#set rabbit_username = $credentials.rabbitmq.username +#set rabbit_password = $credentials.rabbitmq.password +#set keystone_dbpass = $credentials.identity.password +#set keystone_pass = $console_credentials.identity.password +#set glance_dbpass = $credentials.image.password +#set glance_pass = $console_credentials.image.password +#set nova_dbpass = $credentials.compute.password +#set nova_pass = $console_credentials.compute.password +#set dash_dbpass = $credentials.dashboard.password +#set cinder_dbpass = $credentials.volume.password +#set cinder_pass = $console_credentials.volume.password +#set heat_dbpass = $credentials.heat.password +#set heat_pass = $console_credentials.heat.password +#set neutron_dbpass = $credentials.network.password +#set neutron_pass = $console_credentials.network.password +#set ceilometer_dbpass = $credentials.metering.password +#set ceilometer_pass = $console_credentials.metering.password +#set aodh_dbpass = $credentials.alarming.password +#set aodh_pass = $console_credentials.alarming.password +#set congress_dbpass = $credentials.policy.password +#set congress_pass = $console_credentials.policy.password +#set admin_pass = $console_credentials.admin.password +#set demo_pass = $console_credentials.demo.password + +cluster_name: $cluster_name + +odl_controller: 11.1.0.15 + +DEBUG: true +VERBOSE: true +NTP_SERVER_LOCAL: "{{ controllers_host }}" +DB_HOST: "{{ db_host }}" +MQ_BROKER: rabbitmq + +OPENSTACK_REPO: cloudarchive-newton.list +newton_cloud_archive: deb http://ubuntu-cloud.archive.canonical.com/ubuntu xenial-updates/newton main +ADMIN_TOKEN: admin +CEILOMETER_TOKEN: c095d479023a0fd58a54 +erlang.cookie: DJJVECFMCJPVYQTJTDWG + +RABBIT_USER: $rabbit_username +RABBIT_PASS: $rabbit_password +KEYSTONE_DBPASS: $keystone_dbpass +KEYSTONE_PASS: $keystone_pass +CEILOMETER_DBPASS: $ceilometer_dbpass +CEILOMETER_PASS: $ceilometer_pass +AODH_DBPASS: $aodh_dbpass +AODH_PASS: $aodh_pass +GLANCE_DBPASS: $glance_dbpass +GLANCE_PASS: $glance_pass +NOVA_DBPASS: $nova_dbpass +NOVA_PASS: $nova_pass +DASH_DBPASS: $dash_dbpass +CINDER_DBPASS: $cinder_dbpass +CINDER_PASS: $cinder_pass +NEUTRON_DBPASS: $neutron_dbpass +NEUTRON_PASS: $neutron_pass +HEAT_DBPASS: $heat_dbpass +HEAT_PASS: $heat_pass +CONGRESS_DBPASS: $congress_dbpass +CONGRESS_PASS: $congress_pass +DEMO_PASS: $demo_pass +ADMIN_PASS: $admin_pass + +#set neutron_service_plugins=['router'] + +#if $getVar('enable_fwaas', True) +#neutron_service_plugins.append('firewall') +#end if + +#if $getVar('enable_vpnaas', True) +#neutron_service_plugins.append('vpnaas') +#end if + +NEUTRON_SERVICE_PLUGINS: $neutron_service_plugins +NEUTRON_TYPE_DRIVERS: ['flat', 'gre', 'vxlan', 'vlan'] + +#NEUTRON_MECHANISM_DRIVERS: ['opendaylight'] +NEUTRON_MECHANISM_DRIVERS: ['openvswitch'] +NEUTRON_TUNNEL_TYPES: ['vxlan'] +METADATA_SECRET: metadata_secret +WSREP_SST_USER: wsrep_sst +WSREP_SST_PASS: wsrep_sst_sercet + +INSTANCE_TUNNELS_INTERFACE_IP_ADDRESS: "{{ internal_ip }}" + +#build_in_image: http://download.cirros-cloud.net/0.3.3/cirros-0.3.3-x86_64-disk.img +build_in_image: http://192.168.121.12:9999/img/cirros-0.3.3-x86_64-disk.img +build_in_image_name: cirros-0.3.3-x86_64-disk.img + +physical_device: /dev/sdb + +odl_username: admin +odl_password: admin +odl_api_port: 8080 + +odl_pkg_url: https://nexus.opendaylight.org/content/groups/public/org/opendaylight/integration/distribution-karaf/0.2.2-Helium-SR2/distribution-karaf-0.2.2-Helium-SR2.tar.gz +odl_pkg_name: karaf.tar.gz +odl_home: "/opt/opendaylight-0.2.2/" +odl_base_features: ['config', 'standard', 'region', 'package', 'kar', 'ssh', 'management', 'http', 'odl-base-all','odl-aaa-authn','odl-restconf','odl-nsf-all','odl-adsal-northbound','odl-mdsal-apidocs', 'odl-openflowplugin-all'] +odl_extra_features: ['odl-l2switch-switch', 'odl-ovsdb-plugin', 'odl-ovsdb-openstack', 'odl-ovsdb-northbound','odl-dlux-core', 'odl-restconf-all', 'odl-mdsal-clustering', 'odl-openflowplugin-flow-services', 'odl-netconf-connector', 'odl-netconf-connector-ssh', 'jolokia-osgi'] +odl_features: "{{ odl_base_features + odl_extra_features }}" +odl_api_port: 8080 |