diff options
Diffstat (limited to 'deploy/adapters/ansible')
26 files changed, 446 insertions, 49 deletions
diff --git a/deploy/adapters/ansible/openstack_liberty/roles/odl_cluster/tasks/odl_controller.yml b/deploy/adapters/ansible/openstack_liberty/roles/odl_cluster/tasks/odl_controller.yml index dff7d00b..3c7032d1 100755 --- a/deploy/adapters/ansible/openstack_liberty/roles/odl_cluster/tasks/odl_controller.yml +++ b/deploy/adapters/ansible/openstack_liberty/roles/odl_cluster/tasks/odl_controller.yml @@ -237,7 +237,9 @@ shell: crudini --set /etc/neutron/neutron.conf DEFAULT service_plugins networking_odl.l3.l3_odl.OpenDaylightL3RouterPlugin; when: odl_l3_agent == "Enable" - +- name: configure metadata for l3 configuration + shell: crudini --set /etc/neutron/dhcp_agent.ini DEFAULT enable_isolated_metadata True; + when: odl_l3_agent == "Enable" - name: drop and recreate neutron database shell: mysql -e "drop database if exists neutron;"; diff --git a/deploy/adapters/ansible/openstack_mitaka/roles/ext-network/tasks/main.yml b/deploy/adapters/ansible/openstack_mitaka/roles/ext-network/tasks/main.yml index 4d2afc24..b52b9178 100644 --- a/deploy/adapters/ansible/openstack_mitaka/roles/ext-network/tasks/main.yml +++ b/deploy/adapters/ansible/openstack_mitaka/roles/ext-network/tasks/main.yml @@ -17,6 +17,9 @@ - name: restart neutron server service: name=neutron-server state=restarted enabled=yes +- name: wait for neutron time + shell: "sleep 10" + - name: create external net neutron_network: login_username: ADMIN diff --git a/deploy/adapters/ansible/openstack_mitaka/roles/keystone/vars/Debian.yml b/deploy/adapters/ansible/openstack_mitaka/roles/keystone/vars/Debian.yml deleted file mode 100644 index b8d8e7c2..00000000 --- a/deploy/adapters/ansible/openstack_mitaka/roles/keystone/vars/Debian.yml +++ /dev/null @@ -1,21 +0,0 @@ -############################################################################## -# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. -# -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the Apache License, Version 2.0 -# which accompanies this distribution, and is available at -# http://www.apache.org/licenses/LICENSE-2.0 -############################################################################## ---- - -cron_path: "/var/spool/cron/crontabs" - -packages: - - keystone - - python-openstackclient - -services: - - apache2 - -apache_config_dir: /etc/apache2 -http_service_name: apache2 diff --git a/deploy/adapters/ansible/openstack_mitaka/roles/onos_cluster/files/setup_networks/log.py b/deploy/adapters/ansible/openstack_mitaka/roles/onos_cluster/files/setup_networks/log.py new file mode 100644 index 00000000..fffeb589 --- /dev/null +++ b/deploy/adapters/ansible/openstack_mitaka/roles/onos_cluster/files/setup_networks/log.py @@ -0,0 +1,41 @@ +import logging +import os +loggers = {} +log_dir="/var/log/setup_network" +try: + os.makedirs(log_dir) +except: + pass + +def getLogger(name): + if name in loggers: + return loggers[name] + + logger = logging.getLogger(name) + logger.setLevel(logging.DEBUG) + + # create file handler which logs even debug messages + log_file = "%s/%s.log" % (log_dir, name) + try: + os.remove(log_file) + except: + pass + + fh = logging.FileHandler(log_file) + fh.setLevel(logging.DEBUG) + + # create console handler with a higher log level + ch = logging.StreamHandler() + ch.setLevel(logging.ERROR) + + # create formatter and add it to the handlers + formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s") + ch.setFormatter(formatter) + fh.setFormatter(formatter) + + # add the handlers to logger + logger.addHandler(ch) + logger.addHandler(fh) + + loggers[name] = logger + return logger diff --git a/deploy/adapters/ansible/openstack_mitaka/roles/onos_cluster/files/setup_networks/net_init b/deploy/adapters/ansible/openstack_mitaka/roles/onos_cluster/files/setup_networks/net_init new file mode 100755 index 00000000..c27a8bf8 --- /dev/null +++ b/deploy/adapters/ansible/openstack_mitaka/roles/onos_cluster/files/setup_networks/net_init @@ -0,0 +1,20 @@ +#!/bin/bash +## BEGIN INIT INFO +# Provides: anamon.init +# Default-Start: 3 5 +# Default-Stop: 0 1 2 4 6 +# Required-Start: $network +# Short-Description: Starts the cobbler anamon boot notification program +# Description: anamon runs the first time a machine is booted after +# installation. +## END INIT INFO + +# +# anamon.init: Starts the cobbler post-install boot notification program +# +# chkconfig: 35 0 6 +# +# description: anamon runs the first time a machine is booted after +# installation. +# +python /opt/setup_networks/setup_networks.py diff --git a/deploy/adapters/ansible/openstack_mitaka/roles/onos_cluster/files/setup_networks/setup_networks.py b/deploy/adapters/ansible/openstack_mitaka/roles/onos_cluster/files/setup_networks/setup_networks.py new file mode 100644 index 00000000..e58d6c72 --- /dev/null +++ b/deploy/adapters/ansible/openstack_mitaka/roles/onos_cluster/files/setup_networks/setup_networks.py @@ -0,0 +1,73 @@ +import yaml +import netaddr +import os +import log as logging + +LOG = logging.getLogger("net-init") +config_path = os.path.join(os.path.dirname(__file__), "network.cfg") + +def setup_bondings(bond_mappings): + print bond_mappings + +def add_vlan_link(interface, ifname, vlan_id): + LOG.info("add_vlan_link enter") + cmd = "ip link add link %s name %s type vlan id %s; " % (ifname, interface, vlan_id) + cmd += "ip link set %s up; ip link set %s up" % (interface, ifname) + LOG.info("add_vlan_link: cmd=%s" % cmd) + os.system(cmd) + +def add_ovs_port(ovs_br, ifname, uplink, vlan_id=None): + LOG.info("add_ovs_port enter") + cmd = "ovs-vsctl --may-exist add-port %s %s" % (ovs_br, ifname) + if vlan_id: + cmd += " tag=%s" % vlan_id + cmd += " -- set Interface %s type=internal;" % ifname + cmd += "ip link set dev %s address `ip link show %s |awk '/link\/ether/{print $2}'`;" \ + % (ifname, uplink) + cmd += "ip link set %s up;" % ifname + LOG.info("add_ovs_port: cmd=%s" % cmd) + os.system(cmd) + +def setup_intfs(sys_intf_mappings, uplink_map): + LOG.info("setup_intfs enter") + for intf_name, intf_info in sys_intf_mappings.items(): + if intf_info["type"] == "vlan": + add_vlan_link(intf_name, intf_info["interface"], intf_info["vlan_tag"]) + elif intf_info["type"] == "ovs": + add_ovs_port( + intf_info["interface"], + intf_name, + uplink_map[intf_info["interface"]], + vlan_id=intf_info.get("vlan_tag")) + else: + pass + +def setup_ips(ip_settings, sys_intf_mappings): + LOG.info("setup_ips enter") + for intf_info in ip_settings.values(): + network = netaddr.IPNetwork(intf_info["cidr"]) + if sys_intf_mappings[intf_info["name"]]["type"] == "ovs": + intf_name = intf_info["name"] + else: + intf_name = intf_info["alias"] + cmd = "ip addr add %s/%s brd %s dev %s;" \ + % (intf_info["ip"], intf_info["netmask"], str(network.broadcast),intf_name) + if "gw" in intf_info: + cmd += "route del default;" + cmd += "ip route add default via %s dev %s" % (intf_info["gw"], intf_name) + LOG.info("setup_ips: cmd=%s" % cmd) + os.system(cmd) + +def main(config): + uplink_map = {} + setup_bondings(config["bond_mappings"]) + for provider_net in config["provider_net_mappings"]: + uplink_map[provider_net['name']] = provider_net['interface'] + + setup_intfs(config["sys_intf_mappings"], uplink_map) + setup_ips(config["ip_settings"], config["sys_intf_mappings"]) + +if __name__ == "__main__": + os.system("service openvswitch-switch status|| service openvswitch-switch start") + config = yaml.load(open(config_path)) + main(config) diff --git a/deploy/adapters/ansible/openstack_mitaka/roles/onos_cluster/tasks/main.yml b/deploy/adapters/ansible/openstack_mitaka/roles/onos_cluster/tasks/main.yml index bda2ba30..6b619057 100755 --- a/deploy/adapters/ansible/openstack_mitaka/roles/onos_cluster/tasks/main.yml +++ b/deploy/adapters/ansible/openstack_mitaka/roles/onos_cluster/tasks/main.yml @@ -40,13 +40,74 @@ - name: Stop the Open vSwitch service and clear existing OVSDB shell: > - ovs-vsctl del-br br-int ; - ovs-vsctl del-br br-tun ; - ovs-vsctl del-manager ; - ip link delete onos_port1 type veth peer name onos_port2; + service openvswitch-switch stop ; + rm -rf /var/log/openvswitch/* ; + rm -rf /etc/openvswitch/conf.db ; + service openvswitch-switch start ; when: groups['onos']|length !=0 ignore_errors: True +################################################################## +########### Recover External network ################# +################################################################## + +- name: add ovs bridge + openvswitch_bridge: bridge={{ item["name"] }} state=present + with_items: "{{ network_cfg['provider_net_mappings'] }}" + when: item["type"] == "ovs" and groups['onos']|length !=0 + +- name: add ovs uplink + openvswitch_port: bridge={{ item["name"] }} port={{ item["interface"] }} state=present + with_items: "{{ network_cfg['provider_net_mappings'] }}" + when: item["type"] == "ovs" and groups['onos']|length !=0 + +- name: add ovs uplink + shell: ip link set {{ item["interface"] }} up + with_items: "{{ network_cfg['provider_net_mappings'] }}" + when: item["type"] == "ovs" and groups['onos']|length !=0 + +- name: ensure script dir exist + shell: mkdir -p /opt/setup_networks + when: groups['onos']|length !=0 + +- name: copy scripts + copy: src={{ item }} dest=/opt/setup_networks + with_items: + - setup_networks/log.py + - setup_networks/setup_networks.py + when: groups['onos']|length !=0 + +- name: copy boot scripts + copy: src={{ item }} dest=/etc/init.d/ mode=0755 + with_items: + - setup_networks/net_init + when: groups['onos']|length !=0 + +- name: copy config files + template: src=network.cfg dest=/opt/setup_networks + when: groups['onos']|length !=0 + +- name: make sure python lib exist + action: "{{ ansible_pkg_mgr }} name={{ item }} state=present" + with_items: + - python-yaml + - python-netaddr + when: groups['onos']|length !=0 + +- name: run scripts + shell: python /opt/setup_networks/setup_networks.py + when: groups['onos']|length !=0 + +- name: add to boot scripts + service: name=net_init enabled=yes + when: groups['onos']|length !=0 +################################################################## + +- name: restart keepalived to recover external IP + shell: service keepalived restart + when: inventory_hostname in groups['onos'] + ignore_errors: True + - name: Install ONOS Cluster on Controller include: onos_controller.yml when: inventory_hostname in groups['onos'] and onos_sfc == "Disable" diff --git a/deploy/adapters/ansible/openstack_mitaka/roles/onos_cluster/templates/keepalived.conf b/deploy/adapters/ansible/openstack_mitaka/roles/onos_cluster/templates/keepalived.conf new file mode 100644 index 00000000..4ccf1c43 --- /dev/null +++ b/deploy/adapters/ansible/openstack_mitaka/roles/onos_cluster/templates/keepalived.conf @@ -0,0 +1,47 @@ +global_defs { + router_id {{ inventory_hostname }} +} + +vrrp_sync_group VG1 { + group { + internal_vip + public_vip + } +} + +vrrp_instance internal_vip { + interface {{ internal_vip.interface }} + virtual_router_id {{ vrouter_id_internal }} + state BACKUP + nopreempt + advert_int 1 + priority {{ 50 + (host_index[inventory_hostname] * 50) }} + + authentication { + auth_type PASS + auth_pass 1234 + } + + virtual_ipaddress { + {{ internal_vip.ip }}/{{ internal_vip.netmask }} dev {{ internal_vip.interface }} + } +} + +vrrp_instance public_vip { + interface br-ex + virtual_router_id {{ vrouter_id_public }} + state BACKUP + nopreempt + advert_int 1 + priority {{ 50 + (host_index[inventory_hostname] * 50) }} + + authentication { + auth_type PASS + auth_pass 4321 + } + + virtual_ipaddress { + {{ network_cfg.public_vip.ip }}/{{ network_cfg.public_vip.netmask }} dev br-ex + } + +} diff --git a/deploy/adapters/ansible/openstack_mitaka/roles/onos_cluster/templates/my_configs.debian b/deploy/adapters/ansible/openstack_mitaka/roles/onos_cluster/templates/my_configs.debian new file mode 100644 index 00000000..5ab1519b --- /dev/null +++ b/deploy/adapters/ansible/openstack_mitaka/roles/onos_cluster/templates/my_configs.debian @@ -0,0 +1,14 @@ +{%- for alias, intf in host_ip_settings.items() %} + +auto {{ alias }} +iface {{ alias }} inet static + address {{ intf["ip"] }} + netmask {{ intf["netmask"] }} +{% if "gw" in intf %} + gateway {{ intf["gw"] }} +{% endif %} +{% if intf["name"] == alias %} + pre-up ip link set {{ sys_intf_mappings[alias]["interface"] }} up + pre-up ip link add link {{ sys_intf_mappings[alias]["interface"] }} name {{ alias }} type vlan id {{ sys_intf_mappings[alias]["vlan_tag"] }} +{% endif %} +{% endfor %} diff --git a/deploy/adapters/ansible/openstack_mitaka/roles/onos_cluster/templates/network.cfg b/deploy/adapters/ansible/openstack_mitaka/roles/onos_cluster/templates/network.cfg new file mode 100644 index 00000000..75ba90cb --- /dev/null +++ b/deploy/adapters/ansible/openstack_mitaka/roles/onos_cluster/templates/network.cfg @@ -0,0 +1,5 @@ +bond_mappings: {{ network_cfg["bond_mappings"] }} +ip_settings: {{ ip_settings[inventory_hostname] }} +sys_intf_mappings: {{ sys_intf_mappings }} +provider_net_mappings: {{ network_cfg["provider_net_mappings"] }} + diff --git a/deploy/adapters/ansible/openstack_mitaka/roles/secgroup/templates/neutron.j2 b/deploy/adapters/ansible/openstack_mitaka/roles/secgroup/templates/neutron.j2 new file mode 100644 index 00000000..aac6c8a2 --- /dev/null +++ b/deploy/adapters/ansible/openstack_mitaka/roles/secgroup/templates/neutron.j2 @@ -0,0 +1,4 @@ +[securitygroup] +firewall_driver = neutron.agent.firewall.NoopFirewallDriver +enable_security_group = True + diff --git a/deploy/adapters/ansible/openstack_mitaka/roles/secgroup/templates/nova.j2 b/deploy/adapters/ansible/openstack_mitaka/roles/secgroup/templates/nova.j2 new file mode 100644 index 00000000..7dbc216a --- /dev/null +++ b/deploy/adapters/ansible/openstack_mitaka/roles/secgroup/templates/nova.j2 @@ -0,0 +1,3 @@ +[DEFAULT] +firewall_driver = nova.virt.firewall.NoopFirewallDriver +security_group_api = neutron diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/keystone/templates/admin-openrc-v3.sh b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/keystone/templates/admin-openrc-v3.sh deleted file mode 100644 index c3d863e8..00000000 --- a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/keystone/templates/admin-openrc-v3.sh +++ /dev/null @@ -1,19 +0,0 @@ -############################################################################## -# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. -# -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the Apache License, Version 2.0 -# which accompanies this distribution, and is available at -# http://www.apache.org/licenses/LICENSE-2.0 -############################################################################## -# Verify the Identity Service installation -export OS_PASSWORD={{ ADMIN_PASS }} -export OS_TENANT_NAME=admin -export OS_AUTH_URL=http://{{ internal_vip.ip }}:35357/v3 -export OS_IDENTITY_API_VERSION=3 -export OS_USERNAME=admin -export OS_VOLUME_API_VERSION=2 -export OS_USER_DOMAIN_NAME=Default -export OS_PROJECT_DOMAIN_NAME=Default - - diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/odl_cluster/tasks/odl_controller.yml b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/odl_cluster/tasks/odl_controller.yml index 9de5f478..4cf7948a 100755 --- a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/odl_cluster/tasks/odl_controller.yml +++ b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/odl_cluster/tasks/odl_controller.yml @@ -89,11 +89,21 @@ - name: create karaf config template: - src: org.apache.karaf.features.cfg + src: org.apache.karaf.features.cfg.Debian dest: "{{ odl_home }}/etc/org.apache.karaf.features.cfg" owner: odl group: odl mode: 0775 + when: ansible_os_family == "Debian" + +- name: create karaf config + template: + src: org.apache.karaf.features.cfg.Redhat + dest: "{{ odl_home }}/etc/org.apache.karaf.features.cfg" + owner: odl + group: odl + mode: 0775 + when: ansible_os_family == "RedHat" - name: create tomcat config template: @@ -269,7 +279,9 @@ shell: crudini --set /etc/neutron/neutron.conf DEFAULT service_plugins networking_odl.l3.l3_odl.OpenDaylightL3RouterPlugin; when: odl_l3_agent == "Enable" - +- name: configure metadata for l3 configuration + shell: crudini --set /etc/neutron/dhcp_agent.ini DEFAULT enable_isolated_metadata True; + when: odl_l3_agent == "Enable" - name: drop and recreate neutron database shell: mysql -e "drop database if exists neutron;"; diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/odl_cluster/tasks/openvswitch.yml b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/odl_cluster/tasks/openvswitch.yml index 021143fc..b8cb6c91 100755 --- a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/odl_cluster/tasks/openvswitch.yml +++ b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/odl_cluster/tasks/openvswitch.yml @@ -151,3 +151,8 @@ - name: execute ml2 configuration script command: su -s /bin/sh -c "/opt/ml2_conf.sh;" + +#- name: change odl password for moon +# shell: crudini --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2_odl password {{ ADMIN_PASS }} +# when: moon == "Enable" + diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/secgroup/templates/neutron.j2 b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/secgroup/templates/neutron.j2 new file mode 100644 index 00000000..aac6c8a2 --- /dev/null +++ b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/secgroup/templates/neutron.j2 @@ -0,0 +1,4 @@ +[securitygroup] +firewall_driver = neutron.agent.firewall.NoopFirewallDriver +enable_security_group = True + diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/secgroup/templates/nova.j2 b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/secgroup/templates/nova.j2 new file mode 100644 index 00000000..7dbc216a --- /dev/null +++ b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/secgroup/templates/nova.j2 @@ -0,0 +1,3 @@ +[DEFAULT] +firewall_driver = nova.virt.firewall.NoopFirewallDriver +security_group_api = neutron diff --git a/deploy/adapters/ansible/roles/database/tasks/mariadb_config.yml b/deploy/adapters/ansible/roles/database/tasks/mariadb_config.yml index b18ae8f7..780fc322 100644 --- a/deploy/adapters/ansible/roles/database/tasks/mariadb_config.yml +++ b/deploy/adapters/ansible/roles/database/tasks/mariadb_config.yml @@ -41,6 +41,24 @@ script: remove_user.sh when: ansible_os_family == "RedHat" +- name: restart mysql for centos noha + service: + name: mysql + state: restarted + when: > + inventory_hostname == haproxy_hosts.keys()[0] + and haproxy_hosts|length == 1 + and ansible_os_family == "RedHat" + +- name: restart mysql second time for centos noha + service: + name: mysql + state: restarted + when: > + inventory_hostname == haproxy_hosts.keys()[0] + and haproxy_hosts|length == 1 + and ansible_os_family == "RedHat" + - name: restart first nodes service: name: mysql diff --git a/deploy/adapters/ansible/roles/heat/tasks/heat_install.yml b/deploy/adapters/ansible/roles/heat/tasks/heat_install.yml index 1fbada8c..a6e76c74 100644 --- a/deploy/adapters/ansible/roles/heat/tasks/heat_install.yml +++ b/deploy/adapters/ansible/roles/heat/tasks/heat_install.yml @@ -17,6 +17,18 @@ lineinfile: dest=/opt/service create=yes line='{{ item }}' with_items: services | union(services_noarch) +# ' + +- name: create heat user domain + shell: > + . /opt/admin-openrc-v3.sh; + openstack domain create --description "Stack projects and users" heat; + openstack user create --domain heat --password {{ HEAT_PASS }} heat_domain_admin; + openstack role add --domain heat --user-domain heat --user heat_domain_admin admin; + openstack role create heat_stack_owner; + openstack role add --project demo --user demo heat_stack_owner; + when: inventory_hostname == groups['controller'][0] and ansible_os_family == "Debian" + - name: update heat conf template: src=heat.j2 dest=/etc/heat/heat.conf @@ -24,4 +36,13 @@ notify: - restart heat service - remove heat-sqlite-db + when: ansible_os_family == "RedHat" +- name: update heat conf + template: src=heat_debian.j2 + dest=/etc/heat/heat.conf + backup=yes + notify: + - restart heat service + - remove heat-sqlite-db + when: ansible_os_family == "Debian" diff --git a/deploy/adapters/ansible/roles/heat/templates/heat_debian.j2 b/deploy/adapters/ansible/roles/heat/templates/heat_debian.j2 new file mode 100644 index 00000000..62df9fd9 --- /dev/null +++ b/deploy/adapters/ansible/roles/heat/templates/heat_debian.j2 @@ -0,0 +1,28 @@ +[DEFAULT] +heat_metadata_server_url = http://{{ internal_vip.ip }}:8000 +heat_waitcondition_server_url = http://{{ internal_vip.ip }}:8000/v1/waitcondition +rpc_backend = rabbit +rabbit_host = {{ rabbit_host }} +rabbit_userid = {{ RABBIT_USER }} +rabbit_password = {{ RABBIT_PASS }} +log_dir = /var/log/heat +stack_domain_admin = heat_domain_admin +stack_domain_admin_password = {{ HEAT_PASS }} +stack_user_domain_name = heat + +[database] +connection = mysql://heat:{{ HEAT_DBPASS }}@{{ db_host }}/heat +idle_timeout = 30 +use_db_reconnect = True +pool_timeout = 10 + +[ec2authtoken] +auth_uri = http://{{ internal_vip.ip }}:5000/v2.0 + +[keystone_authtoken] +auth_uri = http://{{ internal_vip.ip }}:5000/v2.0 +identity_uri = http://{{ internal_vip.ip }}:35357 +admin_tenant_name = service +admin_user = heat +admin_password = {{ HEAT_PASS }} + diff --git a/deploy/adapters/ansible/roles/keystone/tasks/keystone_install.yml b/deploy/adapters/ansible/roles/keystone/tasks/keystone_install.yml index ffae8ff0..ea6926f4 100644 --- a/deploy/adapters/ansible/roles/keystone/tasks/keystone_install.yml +++ b/deploy/adapters/ansible/roles/keystone/tasks/keystone_install.yml @@ -83,5 +83,6 @@ with_items: - admin-openrc.sh - demo-openrc.sh + - admin-openrc-v3.sh - meta: flush_handlers diff --git a/deploy/adapters/ansible/openstack_mitaka/roles/keystone/templates/admin-openrc-v3.sh b/deploy/adapters/ansible/roles/keystone/templates/admin-openrc-v3.sh index c3d863e8..c3d863e8 100644 --- a/deploy/adapters/ansible/openstack_mitaka/roles/keystone/templates/admin-openrc-v3.sh +++ b/deploy/adapters/ansible/roles/keystone/templates/admin-openrc-v3.sh diff --git a/deploy/adapters/ansible/roles/keystone/vars/Debian.yml b/deploy/adapters/ansible/roles/keystone/vars/Debian.yml index 67547277..b8d8e7c2 100644 --- a/deploy/adapters/ansible/roles/keystone/vars/Debian.yml +++ b/deploy/adapters/ansible/roles/keystone/vars/Debian.yml @@ -12,6 +12,7 @@ cron_path: "/var/spool/cron/crontabs" packages: - keystone + - python-openstackclient services: - apache2 diff --git a/deploy/adapters/ansible/roles/odl_cluster/tasks/odl_controller.yml b/deploy/adapters/ansible/roles/odl_cluster/tasks/odl_controller.yml index 15513b30..f0ca1054 100755 --- a/deploy/adapters/ansible/roles/odl_cluster/tasks/odl_controller.yml +++ b/deploy/adapters/ansible/roles/odl_cluster/tasks/odl_controller.yml @@ -89,11 +89,21 @@ - name: create karaf config template: - src: org.apache.karaf.features.cfg + src: org.apache.karaf.features.cfg.Debian dest: "{{ odl_home }}/etc/org.apache.karaf.features.cfg" owner: odl group: odl mode: 0775 + when: ansible_os_family == "Debian" + +- name: create karaf config + template: + src: org.apache.karaf.features.cfg.Redhat + dest: "{{ odl_home }}/etc/org.apache.karaf.features.cfg" + owner: odl + group: odl + mode: 0775 + when: ansible_os_family == "RedHat" - name: create tomcat config template: @@ -256,7 +266,9 @@ shell: crudini --set /etc/neutron/neutron.conf DEFAULT service_plugins networking_odl.l3.l3_odl.OpenDaylightL3RouterPlugin; when: odl_l3_agent == "Enable" - +- name: configure metadata for l3 configuration + shell: crudini --set /etc/neutron/dhcp_agent.ini DEFAULT enable_isolated_metadata True; + when: odl_l3_agent == "Enable" - name: drop and recreate neutron database shell: mysql -e "drop database if exists neutron;"; diff --git a/deploy/adapters/ansible/roles/odl_cluster/templates/org.apache.karaf.features.cfg b/deploy/adapters/ansible/roles/odl_cluster/templates/org.apache.karaf.features.cfg.Debian index 4bbd33c3..4bbd33c3 100755 --- a/deploy/adapters/ansible/roles/odl_cluster/templates/org.apache.karaf.features.cfg +++ b/deploy/adapters/ansible/roles/odl_cluster/templates/org.apache.karaf.features.cfg.Debian diff --git a/deploy/adapters/ansible/roles/odl_cluster/templates/org.apache.karaf.features.cfg.Redhat b/deploy/adapters/ansible/roles/odl_cluster/templates/org.apache.karaf.features.cfg.Redhat new file mode 100755 index 00000000..0691a984 --- /dev/null +++ b/deploy/adapters/ansible/roles/odl_cluster/templates/org.apache.karaf.features.cfg.Redhat @@ -0,0 +1,59 @@ +################################################################################ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +################################################################################ + +# +# Defines if the startlvl should be respected during feature startup. The default value is true. The default +# behavior for 2.x is false (!) for this property +# +# Be aware that this property is deprecated and will be removed in Karaf 4.0. So, if you need to +# set this to false, please use this only as a temporary solution! +# +#respectStartLvlDuringFeatureStartup=true + + +# +# Defines if the startlvl should be respected during feature uninstall. The default value is true. +# If true, means stop bundles respecting the descend order of start level in a certain feature. +# +#respectStartLvlDuringFeatureUninstall=true + +# +# Comma separated list of features repositories to register by default +# +featuresRepositories = mvn:org.apache.karaf.features/standard/3.0.3/xml/features,mvn:org.apache.karaf.features/enterprise/3.0.3/xml/features,mvn:org.ops4j.pax.web/pax-web-features/3.1.4/xml/features,mvn:org.apache.karaf.features/spring/3.0.3/xml/features,mvn:org.opendaylight.integration/features-integration-index/0.4.2-Beryllium-SR2/xml/features +#featuresRepositories = mvn:org.apache.karaf.features/standard/3.0.3/xml/features,mvn:org.apache.karaf.features/enterprise/3.0.3/xml/features,mvn:org.ops4j.pax.web/pax-web-features/3.1.4/xml/features,mvn:org.apache.karaf.features/spring/3.0.3/xml/features,mvn:org.opendaylight.integration/features-integration-index/0.4.0-Beryllium/xml/features +#featuresRepositories = mvn:org.apache.karaf.features/standard/3.0.3/xml/features,mvn:org.apache.karaf.features/enterprise/3.0.3/xml/features,mvn:org.ops4j.pax.web/pax-web-features/3.1.4/xml/features,mvn:org.apache.karaf.features/spring/3.0.3/xml/features,mvn:org.opendaylight.integration/features-integration-index/0.4.0-Beryllium-RC1/xml/features +#mvn:org.apache.karaf.features/standard/3.0.3/xml/features,mvn:org.apache.karaf.features/enterprise/3.0.3/xml/features,mvn:org.ops4j.pax.web/pax-web-features/3.1.4/xml/features,mvn:org.apache.karaf.features/spring/3.0.3/xml/features,mvn:org.opendaylight.integration/features-integration-index/0.3.3-Lithium-SR3/xml/features +#mvn:org.apache.karaf.features/standard/3.0.3/xml/features,mvn:org.apache.karaf.features/enterprise/3.0.3/xml/features,mvn:org.ops4j.pax.web/pax-web-features/3.1.4/xml/features,mvn:org.apache.karaf.features/spring/3.0.3/xml/features,mvn:org.opendaylight.integration/features-integration-index/0.3.2-Lithium-SR2/xml/features + +# +# Comma separated list of features to install at startup +# +#featuresBoot=config,standard,region,package,kar,ssh,management,odl-restconf-all,odl-aaa-authn,odl-dlux-all,odl-ovsdb-openstack +featuresBoot=config,standard,region,package,kar,ssh,management,odl-ovsdb-openstack + +#,odl-restconf-all,odl-aaa-authn,odl-dlux-all + +# odl-base-all,odl-restconf,odl-ovsdb-openstack,odl-dlux-all,odl-mdsal-apidocs +#,odl-mdsal-clustering,odl-openflowplugin-flow-services + +# +# Defines if the boot features are started in asynchronous mode (in a dedicated thread) +# +featuresBootAsynchronous=false |