diff options
author | chenshuai@huawei.com <chenshuai@huawei.com> | 2016-02-24 17:52:48 +0800 |
---|---|---|
committer | chenshuai@huawei.com <chenshuai@huawei.com> | 2016-02-24 17:58:17 +0800 |
commit | bb0ee921be7b53609e3b942ad7aeee8b06f458d5 (patch) | |
tree | f5edc239fe11f404ea3d7784fb48265205a64935 /deploy | |
parent | e38c1a6d08b10ea81d208bd40b9df1a13d28db8c (diff) |
support odl_l3_agent enable flag param
JIRA: COMPASS-318
Change-Id: I2c9d3d9143c1f3cc9c8a36c0eb55930ce7e3e2e3
Signed-off-by: chenshuai@huawei.com <chenshuai@huawei.com>
Diffstat (limited to 'deploy')
7 files changed, 388 insertions, 46 deletions
diff --git a/deploy/adapters/ansible/roles/odl_cluster/files/recover_network_odl_l3.py b/deploy/adapters/ansible/roles/odl_cluster/files/recover_network_odl_l3.py new file mode 100755 index 00000000..7bef1052 --- /dev/null +++ b/deploy/adapters/ansible/roles/odl_cluster/files/recover_network_odl_l3.py @@ -0,0 +1,30 @@ +import yaml +import netaddr +import os +import log as logging + +LOG = logging.getLogger("net-recover-odl-l3") +config_path = os.path.join(os.path.dirname(__file__), "network.cfg") + +def setup_bondings(bond_mappings): + print bond_mappings + +def setup_ips_new(config): + LOG.info("setup_ips_new enter") + network = netaddr.IPNetwork(config["ip_settings"]["br-prv"]["cidr"]) + intf_name = config["provider_net_mappings"][0]["interface"] + cmd = "ip link set br-ex up;" + cmd += "ip addr add %s/%s brd %s dev %s;" \ + % (config["ip_settings"]["br-prv"]["ip"], config["ip_settings"]["br-prv"]["netmask"], str(network.broadcast), 'br-ex') + cmd += "route del default;" + cmd += "ip route add default via %s dev %s" % (config["ip_settings"]["br-prv"]["gw"], 'br-ex') + LOG.info("setup_ips_new: cmd=%s" % cmd) + os.system(cmd) + +def main(config): + setup_ips_new(config) + +if __name__ == "__main__": + os.system("service openvswitch-switch status|| service openvswitch-switch start") + config = yaml.load(open(config_path)) + main(config) diff --git a/deploy/adapters/ansible/roles/odl_cluster/files/setup_networks_odl_l3.py b/deploy/adapters/ansible/roles/odl_cluster/files/setup_networks_odl_l3.py new file mode 100644 index 00000000..22797413 --- /dev/null +++ b/deploy/adapters/ansible/roles/odl_cluster/files/setup_networks_odl_l3.py @@ -0,0 +1,91 @@ +import yaml +import netaddr +import os +import log as logging + +LOG = logging.getLogger("net-init-l3") +config_path = os.path.join(os.path.dirname(__file__), "network.cfg") + +def setup_bondings(bond_mappings): + print bond_mappings + +def add_vlan_link(interface, ifname, vlan_id): + LOG.info("add_vlan_link enter") + cmd = "ip link add link %s name %s type vlan id %s; " % (ifname, interface, vlan_id) + cmd += "ip link set %s up; ip link set %s up" % (interface, ifname) + LOG.info("add_vlan_link: cmd=%s" % cmd) + os.system(cmd) + +#def add_ovs_port(ovs_br, ifname, uplink, vlan_id=None): +# LOG.info("add_ovs_port enter") +# cmd = "ovs-vsctl --may-exist add-port %s %s" % (ovs_br, ifname) +# if vlan_id: +# cmd += " tag=%s" % vlan_id +# cmd += " -- set Interface %s type=internal;" % ifname +# cmd += "ip link set dev %s address `ip link show %s |awk '/link\/ether/{print $2}'`;" \ +# % (ifname, uplink) +# cmd += "ip link set %s up;" % ifname +# LOG.info("add_ovs_port: cmd=%s" % cmd) +# os.system(cmd) + +def setup_intfs(sys_intf_mappings, uplink_map): + LOG.info("setup_intfs enter") + for intf_name, intf_info in sys_intf_mappings.items(): + if intf_info["type"] == "vlan": + add_vlan_link(intf_name, intf_info["interface"], intf_info["vlan_tag"]) +# elif intf_info["type"] == "ovs": +# add_ovs_port( +# intf_info["interface"], +# intf_name, +# uplink_map[intf_info["interface"]], +# vlan_id=intf_info.get("vlan_tag")) + else: + pass + +def setup_ips(ip_settings, sys_intf_mappings): + LOG.info("setup_ips enter") + for intf_info in ip_settings.values(): + network = netaddr.IPNetwork(intf_info["cidr"]) + if sys_intf_mappings[intf_info["name"]]["type"] == "ovs": + intf_name = intf_info["name"] + else: + intf_name = intf_info["alias"] + if "gw" in intf_info: + continue + cmd = "ip addr add %s/%s brd %s dev %s;" \ + % (intf_info["ip"], intf_info["netmask"], str(network.broadcast),intf_name) +# if "gw" in intf_info: +# cmd += "route del default;" +# cmd += "ip route add default via %s dev %s" % (intf_info["gw"], intf_name) + LOG.info("setup_ips: cmd=%s" % cmd) + os.system(cmd) + +def setup_ips_new(config): + LOG.info("setup_ips_new enter") + network = netaddr.IPNetwork(config["ip_settings"]["br-prv"]["cidr"]) + intf_name = config["provider_net_mappings"][0]["interface"] +# cmd = "ip addr add %s/%s brd %s dev %s;" \ +# % (config["ip_settings"]["br-prv"]["ip"], config["ip_settings"]["br-prv"]["netmask"], str(network.broadcast), intf_name) + cmd = "ip link set br-ex up;" + cmd += "ip addr add %s/%s brd %s dev %s;" \ + % (config["ip_settings"]["br-prv"]["ip"], config["ip_settings"]["br-prv"]["netmask"], str(network.broadcast), 'br-ex') + cmd += "route del default;" +# cmd += "ip route add default via %s dev %s" % (config["ip_settings"]["br-prv"]["gw"], intf_name) + cmd += "ip route add default via %s dev %s" % (config["ip_settings"]["br-prv"]["gw"], 'br-ex') + LOG.info("setup_ips_new: cmd=%s" % cmd) + os.system(cmd) + +def main(config): + uplink_map = {} + setup_bondings(config["bond_mappings"]) + for provider_net in config["provider_net_mappings"]: + uplink_map[provider_net['name']] = provider_net['interface'] + + setup_intfs(config["sys_intf_mappings"], uplink_map) + setup_ips(config["ip_settings"], config["sys_intf_mappings"]) + setup_ips_new(config) + +if __name__ == "__main__": + os.system("service openvswitch-switch status|| service openvswitch-switch start") + config = yaml.load(open(config_path)) + main(config) diff --git a/deploy/adapters/ansible/roles/odl_cluster/tasks/odl_controller.yml b/deploy/adapters/ansible/roles/odl_cluster/tasks/odl_controller.yml index 91e75dfe..846a71c7 100755 --- a/deploy/adapters/ansible/roles/odl_cluster/tasks/odl_controller.yml +++ b/deploy/adapters/ansible/roles/odl_cluster/tasks/odl_controller.yml @@ -19,6 +19,8 @@ - name: download oracle-jdk8 package file get_url: url="http://{{ http_server.stdout_lines[0] }}/packages/onos/{{ jdk8_pkg_name }}" dest=/opt/{{ jdk8_pkg_name }} +#" + - name: upload install_jdk8 scripts unarchive: src=install_jdk8.tar dest=/opt/ @@ -57,6 +59,8 @@ - name: download odl package get_url: url="http://{{ http_server.stdout_lines[0] }}/packages/odl/{{ odl_pkg_url }}" dest=/opt/{{ odl_pkg_name }} +# " + #- name: download odl package # get_url: url={{ odl_pkg_url }} dest=/opt/{{ odl_pkg_name }} @@ -74,6 +78,15 @@ dest: "{{ service_file.dst }}" mode: 0644 +- name: set l3 fwd enable in custom.properties + template: + src: custom.properties + dest: "{{ odl_home }}/etc/custom.properties" + owner: odl + group: odl + mode: 0775 + when: odl_l3_agent == "Enable" + - name: create karaf config template: src: org.apache.karaf.features.cfg @@ -81,15 +94,11 @@ owner: odl group: odl mode: 0775 -# notify: -# - restart odl service - name: create tomcat config template: src: tomcat-server.xml dest: "{{ odl_home }}/configuration/tomcat-server.xml" -# notify: -# - restart odl service - name: install odl pip packages pip: name={{ item }} state=present @@ -177,12 +186,6 @@ - name: turn off keepalived on control node service: name=keepalived state=stopped -#- name: Install Crudini -# apt: name={{ item }} state=present -# with_items: -# - crudini - - - name: chown opendaylight directory and files shell: > chown -R odl:odl "{{ odl_home }}"; @@ -197,16 +200,6 @@ - name: run openvswitch script include: openvswitch.yml -#- name: add patch-port and normal flow table -# shell: ovs-vsctl add-port br-int patch-br-prv; -# ovs-vsctl set Interface patch-br-prv type=patch; -# ovs-vsctl set Interface patch-br-prv options:peer=patch-int; -# ovs-vsctl add-port br-prv patch-int; -# ovs-vsctl set Interface patch-int type=patch; -# ovs-vsctl set Interface patch-int options:peer=patch-br-prv; -# ovs-ofctl -O OpenFlow13 add-flow br-int "table=0, priority=0 actions=NORMAL"; -# ignore_errors: True - #- name: Configure Neutron1 # shell: > # crudini --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2 mechanism_drivers opendaylight; @@ -221,8 +214,20 @@ #- name: Execute ML2 Configuration File # command: su -s /bin/sh -c "/opt/ml2_conf.sh;" -- name: configure l3 configuration + +- name: configure l2 configuration shell: crudini --set /etc/neutron/l3_agent.ini DEFAULT external_network_bridge br-prv; + when: odl_l3_agent == "Disable" + +- name: configure l3 configuration + shell: crudini --set /etc/neutron/l3_agent.ini DEFAULT external_network_bridge br-ex; + when: odl_l3_agent == "Enable" + +- name: configure odl l3 driver + shell: crudini --set /etc/neutron/neutron.conf DEFAULT service_plugins networking_odl.l3.l3_odl.OpenDaylightL3RouterPlugin; + when: odl_l3_agent == "Enable" + + - name: drop and recreate neutron database shell: mysql -e "drop database if exists neutron;"; @@ -235,8 +240,8 @@ service: name=neutron-server state=started - name: add service daemon - shell: > - echo opendaylight >> /opt/service ; + shell: > + echo keepalived >> /opt/service ; echo neutron-server >> /opt/service ; - name: restart neutron-l3-agent server diff --git a/deploy/adapters/ansible/roles/odl_cluster/tasks/openvswitch.yml b/deploy/adapters/ansible/roles/odl_cluster/tasks/openvswitch.yml index 0431d82a..72182462 100755 --- a/deploy/adapters/ansible/roles/odl_cluster/tasks/openvswitch.yml +++ b/deploy/adapters/ansible/roles/odl_cluster/tasks/openvswitch.yml @@ -23,25 +23,12 @@ - name: shut down and disable Neutron's openvswitch agent services service: name=neutron-plugin-openvswitch-agent state=stopped -#- name: Stop the Open vSwitch service and clear existing OVSDB -# shell: > -# ovs-ofctl del-flows br-int ; -# ovs-vsctl del-br br-tun ; -# ovs-vsctl del-port br-int patch-tun; -# ovs-vsctl del-manager ; - -#- name: Restart OpenVSwitch -# shell: service openvswitch-switch restart; - - name: remove Neutron's openvswitch agent services shell: > update-rc.d -f neutron-plugin-openvswitch-agent remove; mv /etc/init.d/neutron-plugin-openvswitch-agent /home/neutron-plugin-openvswitch-agent; mv /etc/init/neutron-plugin-openvswitch-agent.conf /home/neutron-plugin-openvswitch-agent.conf; -- name: Check External network - shell: ovs-vsctl list-br | grep br-prv - register: extbr - name: Stop the Open vSwitch service and clear existing OVSDB shell: > @@ -50,9 +37,6 @@ rm -rf /etc/openvswitch/conf.db ; service openvswitch-switch start ; -#- name: Set OpenDaylight as the manager -# command: su -s /bin/sh -c "ovs-vsctl set-manager tcp:{{ internal_vip.ip }}:6640;" - - name: set opendaylight as the manager command: su -s /bin/sh -c "ovs-vsctl set-manager tcp:{{ internal_vip.ip }}:6640;" @@ -65,34 +49,80 @@ #' ################################################################## -################ Recover External network ####################### +########### Recover External network for odl l3 ################# +################################################################## + +- name: check br-ex + shell: ovs-vsctl list-br | grep br-ex; while [ $? -ne 0 ]; do sleep 10; ovs-vsctl list-br | grep br-ex; done + when: odl_l3_agent == "Enable" + +- name: add ovs uplink + openvswitch_port: bridge=br-ex port={{ item["interface"] }} state=present + with_items: "{{ network_cfg['provider_net_mappings'] }}" + when: item["type"] == "ovs" and odl_l3_agent == "Enable" + +- name: wait 10 seconds + shell: sleep 10 + when: odl_l3_agent == "Enable" + +- name: set external nic in openvswitch + shell: ovs-vsctl set Open_vSwitch $(ovs-vsctl show | head -n 1) other_config:provider_mappings=br-ex:{{ item["interface"] }} + with_items: "{{ network_cfg['provider_net_mappings'] }}" + when: item["type"] == "ovs" and odl_l3_agent == "Enable" + +- name: copy recovery script + copy: src={{ item }} dest=/opt/setup_networks + with_items: + - recover_network_odl_l3.py + - setup_networks_odl_l3.py + when: odl_l3_agent == "Enable" + +- name: recover external script + shell: python /opt/setup_networks/recover_network_odl_l3.py + when: odl_l3_agent == "Enable" + +- name: update keepalived info + template: src=keepalived.conf dest=/etc/keepalived/keepalived.conf + when: inventory_hostname in groups['odl'] and odl_l3_agent == "Enable" + +- name: modify net-init + shell: sed -i 's/setup_networks.py/setup_networks_odl_l3.py/g' /etc/init.d/net_init + when: odl_l3_agent == "Enable" + +################################################################## +########### Recover External network for odl l2 ################# ################################################################## - name: add ovs bridge openvswitch_bridge: bridge={{ item["name"] }} state=present with_items: "{{ network_cfg['provider_net_mappings'] }}" - when: item["type"] == "ovs" and extbr.rc == 0 + when: item["type"] == "ovs" and odl_l3_agent == "Disable" - name: add ovs uplink openvswitch_port: bridge={{ item["name"] }} port={{ item["interface"] }} state=present with_items: "{{ network_cfg['provider_net_mappings'] }}" - when: item["type"] == "ovs" and extbr.rc == 0 + when: item["type"] == "ovs" and odl_l3_agent == "Disable" - name: copy recovery script copy: src={{ item }} dest=/opt/setup_networks with_items: - recover_network.py - when: extbr.rc == 0 + when: odl_l3_agent == "Disable" - name: recover external script shell: python /opt/setup_networks/recover_network.py - when: extbr.rc == 0 + when: odl_l3_agent == "Disable" + +################################################################## -- name: restart keepalived + +- name: restart keepalived to recover external IP shell: service keepalived restart - when: inventory_hostname in groups['odl'] and extbr.rc == 0 + when: inventory_hostname in groups['odl'] ignore_errors: True + + ################################################################## ################################################################## ################################################################## diff --git a/deploy/adapters/ansible/roles/odl_cluster/templates/custom.properties b/deploy/adapters/ansible/roles/odl_cluster/templates/custom.properties new file mode 100644 index 00000000..4eb86184 --- /dev/null +++ b/deploy/adapters/ansible/roles/odl_cluster/templates/custom.properties @@ -0,0 +1,135 @@ +# Extra packages to import from the boot class loader +org.osgi.framework.system.packages.extra=org.apache.karaf.branding,sun.reflect,sun.reflect.misc,sun.misc,sun.nio.ch + +# https://bugs.eclipse.org/bugs/show_bug.cgi?id=325578 +# Extend the framework to avoid the resources to be presented with +# a URL of type bundleresource: but to be presented as file: +osgi.hook.configurators.include=org.eclipse.virgo.kernel.equinox.extensions.hooks.ExtensionsHookConfigurator + +# Embedded Tomcat configuration File +org.eclipse.gemini.web.tomcat.config.path=configuration/tomcat-server.xml +org.apache.tomcat.util.buf.UDecoder.ALLOW_ENCODED_SLASH=true + +# Use Equinox as default OSGi Framework Implementation +karaf.framework=equinox + +# Netconf startup configuration +netconf.tcp.address=127.0.0.1 +netconf.tcp.port=8383 + +netconf.tcp.client.address=127.0.0.1 +netconf.tcp.client.port=8383 + +netconf.ssh.address=0.0.0.0 +netconf.ssh.port=1830 +# Use Linux style path +netconf.ssh.pk.path = ./configuration/RSA.pk +# Set security provider to BouncyCastle +org.apache.karaf.security.providers = org.bouncycastle.jce.provider.BouncyCastleProvider + + +netconf.config.persister.active=1 + +netconf.config.persister.1.storageAdapterClass=org.opendaylight.controller.config.persist.storage.file.xml.XmlFileStorageAdapter +netconf.config.persister.1.properties.fileStorage=etc/opendaylight/current/controller.currentconfig.xml +netconf.config.persister.1.properties.numberOfBackups=1 + +# logback configuration +logback.configurationFile=configuration/logback.xml + +# Container configuration +container.profile = Container + +# Connection manager configuration +connection.scheme = ANY_CONTROLLER_ONE_MASTER + +# Open Flow related system parameters +# TCP port on which the controller is listening (default 6633) +# of.listenPort=6633 +# IP address of the controller (default: wild card) +# of.address = 127.0.0.1 +# The time (in milliseconds) the controller will wait for a response after sending a Barrier Request or a Statistic Request message (default 2000 msec) +# of.messageResponseTimer=2000 +# The switch liveness timeout value (default 60500 msec) +# of.switchLivenessTimeout=60500 +# The size of the queue holding pending statistics requests (default 64). For large networks of n switches, it is recommended to set the queue size to n +# of.statsQueueSize = 64 +# The flow statistics polling interval in second (default 10 sec) +# of.flowStatsPollInterval=10 +# The port statistics polling interval in second (default 5 sec) +# of.portStatsPollInterval=5 +# The description statistics polling interval in second (default 60 sec) +# of.descStatsPollInterval=60 +# The table statistics polling interval in second (default 10 sec) +# of.tableStatsPollInterval=10 +# The maximum number of asynchronous messages can be sent before sending a Barrier Request (default 100) +# of.barrierMessagePriorCount=100 +# The interval which determines how often the discovery packets should be sent (default 300 sec) +# of.discoveryInterval=300 +# The timeout multiple of discovery interval +# of.discoveryTimeoutMultiple=2 +# For newly added ports, allow one more retry if the elapsed time exceeds this threshold (default 30 sec) +# of.discoveryThreshold=30 +# The maximum number of ports handled in one discovery batch (default 512) +# of.discoveryBatchMaxPorts=512 + +# OVSDB configuration +# ovsdb plugin supports both active and passive connections. It listens on port 6640 by default for Active connections. +ovsdb.listenPort=6640 + +# ovsdb creates Openflow nodes/bridges. This configuration configures the bridge's Openflow version. +# default Openflow version = 1.0, we also support 1.3. +# ovsdb.of.version=1.3 + +# ovsdb can be configured with ml2 to perform l3 forwarding. The config below enables that functionality, which is +# disabled by default. +ovsdb.l3.fwd.enabled=yes + +# ovsdb can be configured with ml2 to perform arp responder, enabled by default. +ovsdb.l3.arp.responder.disabled=no + +# ovsdb can be configured with ml2 to perform l3 forwarding. When used in that scenario, the mac address of the default +# gateway --on the external subnet-- is expected to be resolved from its inet address. The config below overrides that +# specific arp/neighDiscovery lookup. +# ovsdb.l3gateway.mac=00:00:5E:00:02:01 + +# TLS configuration +# To enable TLS, set secureChannelEnabled=true and specify the location of controller Java KeyStore and TrustStore files. +# The Java KeyStore contains controller's private key and certificate. The Java TrustStore contains the trusted certificate +# entries, including switches' Certification Authority (CA) certificates. For example, +# secureChannelEnabled=true +# controllerKeyStore=./configuration/ctlKeyStore +# controllerKeyStorePassword=xxxxxxxx (this password should match the password used for KeyStore generation and at least 6 characters) +# controllerTrustStore=./configuration/ctlTrustStore +# controllerTrustStorePassword=xxxxxxxx (this password should match the password used for TrustStore generation and at least 6 characters) + +secureChannelEnabled=false +controllerKeyStore= +controllerKeyStorePassword= +controllerTrustStore= +controllerTrustStorePassword= + +# User Manager configurations +enableStrongPasswordCheck = false + +#Jolokia configurations +#org.jolokia.listenForHttpService=false + +# Logging configuration for Tomcat-JUL logging +java.util.logging.config.file=configuration/tomcat-logging.properties + +#Hosttracker hostsdb key scheme setting +hosttracker.keyscheme=IP + +# LISP Flow Mapping configuration +# Map-Register messages overwrite existing RLOC sets in EID-to-RLOC mappings (default: true) +lisp.mappingOverwrite = true +# Enable the Solicit-Map-Request (SMR) mechanism (default: true) +lisp.smr = true +# Choose policy for Explicit Locator Path (ELP) handling +# There are three options: +# default: don't add or remove locator records, return mapping as-is +# both: keep the ELP, but add the next hop as a standalone non-LCAF locator with a lower priority +# replace: remove the ELP, add the next hop as a standalone non-LCAF locator +lisp.elpPolicy = default + diff --git a/deploy/adapters/ansible/roles/odl_cluster/templates/keepalived.conf b/deploy/adapters/ansible/roles/odl_cluster/templates/keepalived.conf new file mode 100644 index 00000000..4ccf1c43 --- /dev/null +++ b/deploy/adapters/ansible/roles/odl_cluster/templates/keepalived.conf @@ -0,0 +1,47 @@ +global_defs { + router_id {{ inventory_hostname }} +} + +vrrp_sync_group VG1 { + group { + internal_vip + public_vip + } +} + +vrrp_instance internal_vip { + interface {{ internal_vip.interface }} + virtual_router_id {{ vrouter_id_internal }} + state BACKUP + nopreempt + advert_int 1 + priority {{ 50 + (host_index[inventory_hostname] * 50) }} + + authentication { + auth_type PASS + auth_pass 1234 + } + + virtual_ipaddress { + {{ internal_vip.ip }}/{{ internal_vip.netmask }} dev {{ internal_vip.interface }} + } +} + +vrrp_instance public_vip { + interface br-ex + virtual_router_id {{ vrouter_id_public }} + state BACKUP + nopreempt + advert_int 1 + priority {{ 50 + (host_index[inventory_hostname] * 50) }} + + authentication { + auth_type PASS + auth_pass 4321 + } + + virtual_ipaddress { + {{ network_cfg.public_vip.ip }}/{{ network_cfg.public_vip.netmask }} dev br-ex + } + +} diff --git a/deploy/adapters/ansible/roles/odl_cluster_post/tasks/main.yml b/deploy/adapters/ansible/roles/odl_cluster_post/tasks/main.yml index 83d75c39..8432186c 100755 --- a/deploy/adapters/ansible/roles/odl_cluster_post/tasks/main.yml +++ b/deploy/adapters/ansible/roles/odl_cluster_post/tasks/main.yml @@ -2,3 +2,7 @@ - name: restart opendaylight shell: sleep 60; service opendaylight restart; sleep 300; when: inventory_hostname in groups['odl'] + +- name: add service daemon + shell: echo opendaylight >> /opt/service ; + when: inventory_hostname in groups['odl'] |