diff options
Diffstat (limited to 'deploy/adapters')
19 files changed, 602 insertions, 0 deletions
diff --git a/deploy/adapters/ansible/openstack_juno/HA-ansible-multinodes.yml b/deploy/adapters/ansible/openstack_juno/HA-ansible-multinodes.yml index a5547d07..c330172d 100644 --- a/deploy/adapters/ansible/openstack_juno/HA-ansible-multinodes.yml +++ b/deploy/adapters/ansible/openstack_juno/HA-ansible-multinodes.yml @@ -34,6 +34,12 @@ - neutron-compute - cinder-volume +- hosts: odl + remote_user: root + sudo: True + roles: + - odl_cluster + - hosts: all remote_user: root sudo: True diff --git a/deploy/adapters/ansible/openstack_juno/single-controller.yml b/deploy/adapters/ansible/openstack_juno/single-controller.yml index 4539e5fb..96ec0a6a 100644 --- a/deploy/adapters/ansible/openstack_juno/single-controller.yml +++ b/deploy/adapters/ansible/openstack_juno/single-controller.yml @@ -30,3 +30,9 @@ - common - nova-compute - neutron-compute + +- hosts: odl + remote_user: root + sudo: True + roles: + - odl diff --git a/deploy/adapters/ansible/roles/odl_cluster/handlers/main.yml b/deploy/adapters/ansible/roles/odl_cluster/handlers/main.yml new file mode 100644 index 00000000..017700b7 --- /dev/null +++ b/deploy/adapters/ansible/roles/odl_cluster/handlers/main.yml @@ -0,0 +1,4 @@ +--- +- name: restart odl service + #service: name=opendaylight state=restarted + command: su -s /bin/sh -c "{{ odl_home }}/bin/stop;{{ odl_home }}/bin/start;" diff --git a/deploy/adapters/ansible/roles/odl_cluster/tasks/main.yml b/deploy/adapters/ansible/roles/odl_cluster/tasks/main.yml new file mode 100644 index 00000000..1b27ed14 --- /dev/null +++ b/deploy/adapters/ansible/roles/odl_cluster/tasks/main.yml @@ -0,0 +1,10 @@ +--- +- include_vars: "{{ ansible_os_family }}.yml" + +- name: Install ODL Cluster on Controller + include: main_controller.yml + when: inventory_hostname in groups['controller'] + +- name: Install ODL Cluster on Compute + include: main_compute.yml + when: inventory_hostname in groups['compute'] diff --git a/deploy/adapters/ansible/roles/odl_cluster/tasks/main_compute.yml b/deploy/adapters/ansible/roles/odl_cluster/tasks/main_compute.yml new file mode 100644 index 00000000..39947661 --- /dev/null +++ b/deploy/adapters/ansible/roles/odl_cluster/tasks/main_compute.yml @@ -0,0 +1,16 @@ +--- +- name: shut down and disable Neutron's agent services + service: name=neutron-plugin-openvswitch-agent state=stopped + +- name: Stop the Open vSwitch service and clear existing OVSDB + shell: > + service openvswitch-switch stop ; + rm -rf /var/log/openvswitch/* ; + rm -rf /etc/openvswitch/conf.db ; + service openvswitch-switch start ; + +- name: Set OpenDaylight as the manager + command: su -s /bin/sh -c "ovs-vsctl set-manager tcp:{{odl_controller}}:6640;" + +- name: start and disable Neutron's agent services + service: name=neutron-plugin-openvswitch-agent state=started diff --git a/deploy/adapters/ansible/roles/odl_cluster/tasks/main_controller.yml b/deploy/adapters/ansible/roles/odl_cluster/tasks/main_controller.yml new file mode 100644 index 00000000..751af715 --- /dev/null +++ b/deploy/adapters/ansible/roles/odl_cluster/tasks/main_controller.yml @@ -0,0 +1,149 @@ +--- +- name: install jdk packages + action: "{{ ansible_pkg_mgr }} name={{ item }} state=present" + with_items: packages | union(packages_noarch) + +#- name: install opendaylight packages +# apt: name={{ item }} state=present +# with_items: +# - openjdk-8-jdk + +#- name: create odl directories +# file: +# path: /opt/opendaylight-0.2.2 +# state: "directory" +# group: root +# owner: root +# mode: 0755 + +- name: create odl group + group: name=odl system=yes state=present + +- name: create odl user + user: + name: odl + group: odl + home: "{{ odl_home }}" + createhome: "yes" + system: "yes" + shell: "/bin/false" + +- name: download odl package + get_url: url={{ odl_pkg_url }} dest=/opt/{{ odl_pkg_name }} + +# TODO: unarchive doesn't support strip-component at the moment +# TODO: switch to use untar after support is added. +- name: extract odl package +# unarchive: src=/opt/{{ odl_pkg_name }} dest={{ odl_home }} group=odl owner=odl mode=0775 copy=no + command: su -s /bin/sh -c "tar xzf /opt/{{ odl_pkg_name }} -C {{ odl_home }} --strip-components 1 --no-overwrite-dir -k --skip-old-files" odl + notify: + - restart odl service + +#- name: opendaylight systemd file +# template: src=opendaylight.service dest=/usr/lib/systemd/system/opendaylight.service mode=0644 + +- name: create karaf config + template: + src: org.apache.karaf.features.cfg + dest: "{{ odl_home }}/etc/org.apache.karaf.features.cfg" + owner: odl + group: odl + mode: 0775 + notify: + - restart odl service + +- name: create tomcat config + template: + src: tomcat-server.xml + dest: "{{ odl_home }}/configuration/tomcat-server.xml" + notify: + - restart odl service + +#- name: restart odl service +# service: name=opendaylight state=started pattern="opendaylight" + +########################################################################################################## +################################# OpenDayLight Cluster Configuration ################################# +########################################################################################################## +- name: create initial directory + shell: > + mkdir -p {{ odl_home }}/configuration/initial; + +- name: create akka config + template: + src: akka.conf + dest: "{{ odl_home }}/configuration/initial/akka.conf" + notify: + - restart odl service + + +- name: create module-shards config + template: + src: module-shards.conf + dest: "{{ odl_home }}/configuration/initial/module-shards.conf" + notify: + - restart odl service + +- name: copy Jolokia-OSGi config + shell: > + cp -r jolokia {{ odl_home }}/system/org/; + +- name: remove KARAF Data Directory + shell: > + rm -rf {{ odl_home }}/data/*; + + +########################################################################################################## +################################ OpenDayLight connect with OpenStack ################################ +########################################################################################################## +- name: Turn off neutron-server on control node + service: name=neutron-server state=stopped + +- name: shut down and disable Neutron's agent services + service: name=neutron-plugin-openvswitch-agent state=stopped + +- name: Stop the Open vSwitch service and clear existing OVSDB + shell: > + service openvswitch-switch stop ; + rm -rf /var/log/openvswitch/* ; + rm -rf /etc/openvswitch/conf.db ; + service openvswitch-switch start ; + +- name: Set OpenDaylight as the manager + command: su -s /bin/sh -c "ovs-vsctl set-manager tcp:{{odl_controller}}:6640;" + +- name: start and disable Neutron's agent services + service: name=neutron-plugin-openvswitch-agent state=started + +- name: Install Crudini + apt: name={{ item }} state=present + with_items: + - crudini + +- name: Configure Neutron1 + shell: > + crudini --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2 mechanism_drivers opendaylight; + crudini --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2 tenant_network_types vxlan; + +- name: Create ML2 Configuration File + template: + src: ml2_conf.sh + dest: "/opt/ml2_conf.sh" + mode: 0777 + +- name: Configure Neutron2 + command: su -s /bin/sh -c "/opt/ml2_conf.sh;" + +- name: Configure Neutron3 + shell: > + mysql -e "drop database if exists neutron_ml2;"; + mysql -e "create database neutron_ml2 character set utf8;"; + mysql -e "grant all on neutron_ml2.* to 'neutron'@'%';"; + su -s /bin/sh -c "neutron-db-manage --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/ml2/ml2_conf.ini upgrade head" neutron; + +- name: Restart neutron-server + service: name=neutron-server state=started + + + + diff --git a/deploy/adapters/ansible/roles/odl_cluster/templates/akka.conf b/deploy/adapters/ansible/roles/odl_cluster/templates/akka.conf new file mode 100644 index 00000000..4818e340 --- /dev/null +++ b/deploy/adapters/ansible/roles/odl_cluster/templates/akka.conf @@ -0,0 +1,105 @@ + +odl-cluster-data { + bounded-mailbox { + mailbox-type = "org.opendaylight.controller.cluster.common.actor.MeteredBoundedMailbox" + mailbox-capacity = 1000 + mailbox-push-timeout-time = 100ms + } + + metric-capture-enabled = true + + akka { + loglevel = "INFO" + loggers = ["akka.event.slf4j.Slf4jLogger"] + + actor { + + provider = "akka.cluster.ClusterActorRefProvider" + serializers { + java = "akka.serialization.JavaSerializer" + proto = "akka.remote.serialization.ProtobufSerializer" + } + + serialization-bindings { + "com.google.protobuf.Message" = proto + + } + } + remote { + log-remote-lifecycle-events = off + netty.tcp { + hostname = "{{ hostvars[inventory_hostname]['ansible_' + INTERNAL_INTERFACE].ipv4.address }}" + port = 2550 + maximum-frame-size = 419430400 + send-buffer-size = 52428800 + receive-buffer-size = 52428800 + } + } + + cluster { + seed-nodes = [ +{% for host in groups['odl_controller'] %} + {% if loop.last %} + "akka.tcp://opendaylight-cluster-data@{{ hostvars[host]['ansible_' + INTERNAL_INTERFACE].ipv4.address }}:2550" + {% else %} + "akka.tcp://opendaylight-cluster-data@{{ hostvars[host]['ansible_' + INTERNAL_INTERFACE].ipv4.address }}:2550", + {% endif %} +{% endfor %} + ] + + auto-down-unreachable-after = 10s + + roles = [ +{% set key = 0 %} +{% for host in groups['odl_controller'] %} + {% set key = key + 1 %} + {% if hostvars[host]['ansible_' + INTERNAL_INTERFACE].ipv4.address == hostvars[inventory_hostname]['ansible_' + INTERNAL_INTERFACE].ipv4.address %} + "member-{{ key }}" + {% endif %} +{% endfor %} + ] + + } + } +} + +odl-cluster-rpc { + bounded-mailbox { + mailbox-type = "org.opendaylight.controller.cluster.common.actor.MeteredBoundedMailbox" + mailbox-capacity = 1000 + mailbox-push-timeout-time = 100ms + } + + metric-capture-enabled = true + + akka { + loglevel = "INFO" + loggers = ["akka.event.slf4j.Slf4jLogger"] + + actor { + provider = "akka.cluster.ClusterActorRefProvider" + + } + remote { + log-remote-lifecycle-events = off + netty.tcp { + hostname = "{{ hostvars[inventory_hostname]['ansible_' + INTERNAL_INTERFACE].ipv4.address }}" + port = 2551 + } + } + + cluster { + seed-nodes = [ +{% for host in groups['odl_controller'] %} + {% if loop.last %} + "akka.tcp://odl-cluster-rpc@{{ hostvars[host]['ansible_' + INTERNAL_INTERFACE].ipv4.address }}:2551" + {% else %} + "akka.tcp://odl-cluster-rpc@{{ hostvars[host]['ansible_' + INTERNAL_INTERFACE].ipv4.address }}:2551", + {% endif %} +{% endfor %} + ] + + auto-down-unreachable-after = 10s + } + } +} diff --git a/deploy/adapters/ansible/roles/odl_cluster/templates/jolokia/jolokia-osgi/1.1.5/jolokia-osgi-1.1.5-features.xml b/deploy/adapters/ansible/roles/odl_cluster/templates/jolokia/jolokia-osgi/1.1.5/jolokia-osgi-1.1.5-features.xml new file mode 100644 index 00000000..6242d788 --- /dev/null +++ b/deploy/adapters/ansible/roles/odl_cluster/templates/jolokia/jolokia-osgi/1.1.5/jolokia-osgi-1.1.5-features.xml @@ -0,0 +1,8 @@ +<?xml version="1.0" encoding="UTF-8" standalone="yes"?> +<features xmlns="http://karaf.apache.org/xmlns/features/v1.2.0" name="framework-3.0.1"> + <repository>mvn:org.jolokia/jolokia-osgi/1.1.5/xml/features</repository> + <feature name="jolokia-osgi" version="1.1.5" description="Jolokia Agent"> + <bundle start-level="5" start="true">mvn:org.jolokia/jolokia-osgi/1.1.5</bundle> + </feature> +</features> + diff --git a/deploy/adapters/ansible/roles/odl_cluster/templates/jolokia/jolokia-osgi/1.1.5/jolokia-osgi-1.1.5.jar b/deploy/adapters/ansible/roles/odl_cluster/templates/jolokia/jolokia-osgi/1.1.5/jolokia-osgi-1.1.5.jar Binary files differnew file mode 100644 index 00000000..18e8710b --- /dev/null +++ b/deploy/adapters/ansible/roles/odl_cluster/templates/jolokia/jolokia-osgi/1.1.5/jolokia-osgi-1.1.5.jar diff --git a/deploy/adapters/ansible/roles/odl_cluster/templates/jolokia/jolokia-osgi/1.1.5/jolokia-osgi-1.1.5.jar.sha1 b/deploy/adapters/ansible/roles/odl_cluster/templates/jolokia/jolokia-osgi/1.1.5/jolokia-osgi-1.1.5.jar.sha1 new file mode 100644 index 00000000..790e6ef2 --- /dev/null +++ b/deploy/adapters/ansible/roles/odl_cluster/templates/jolokia/jolokia-osgi/1.1.5/jolokia-osgi-1.1.5.jar.sha1 @@ -0,0 +1 @@ +ba513164178626519a6fb12ced4a3d6e1e07dd54
\ No newline at end of file diff --git a/deploy/adapters/ansible/roles/odl_cluster/templates/ml2_conf.sh b/deploy/adapters/ansible/roles/odl_cluster/templates/ml2_conf.sh new file mode 100644 index 00000000..1ce1e41e --- /dev/null +++ b/deploy/adapters/ansible/roles/odl_cluster/templates/ml2_conf.sh @@ -0,0 +1,6 @@ +cat <<EOT>> /etc/neutron/plugins/ml2/ml2_conf.ini +[ml2_odl] +password = admin +username = admin +url = http://{{ hostvars[inventory_hostname]['ansible_' + INTERNAL_INTERFACE].ipv4.address }}:8080/controller/nb/v2/neutron +EOT diff --git a/deploy/adapters/ansible/roles/odl_cluster/templates/module-shards.conf b/deploy/adapters/ansible/roles/odl_cluster/templates/module-shards.conf new file mode 100644 index 00000000..4477b392 --- /dev/null +++ b/deploy/adapters/ansible/roles/odl_cluster/templates/module-shards.conf @@ -0,0 +1,101 @@ +# This file describes which shards live on which members +# The format for a module-shards is as follows, +# { +# name = "<friendly_name_of_the_module>" +# shards = [ +# { +# name="<any_name_that_is_unique_for_the_module>" +# replicas = [ +# "<name_of_member_on_which_to_run>" +# ] +# ] +# } +# +# For Helium we support only one shard per module. Beyond Helium +# we will support more than 1 +# The replicas section is a collection of member names. This information +# will be used to decide on which members replicas of a particular shard will be +# located. Once replication is integrated with the distributed data store then +# this section can have multiple entries. +# +# + + +module-shards = [ + { + name = "default" + shards = [ + { + name="default" + replicas = [ +{% set key = 0 %} +{% for host in groups['controller'] %} + {% set key = key + 1 %} + {% if loop.last %} + "member-{{ key }}" + {% else %} + "member-{{ key }}", + {% endif %} +{% endfor %} + ] + } + ] + }, + { + name = "topology" + shards = [ + { + name="topology" + replicas = [ +{% set key = 0 %} +{% for host in groups['controller'] %} + {% set key = key + 1 %} + {% if loop.last %} + "member-{{ key }}" + {% else %} + "member-{{ key }}", + {% endif %} +{% endfor %} + ] + } + ] + }, + { + name = "inventory" + shards = [ + { + name="inventory" + replicas = [ +{% set key = 0 %} +{% for host in groups['controller'] %} + {% set key = key + 1 %} + {% if loop.last %} + "member-{{ key }}" + {% else %} + "member-{{ key }}", + {% endif %} +{% endfor %} + ] + } + ] + }, + { + name = "toaster" + shards = [ + { + name="toaster" + replicas = [ +{% set key = 0 %} +{% for host in groups['controller'] %} + {% set key = key + 1 %} + {% if loop.last %} + "member-{{ key }}" + {% else %} + "member-{{ key }}", + {% endif %} +{% endfor %} + ] + } + ] + } +] diff --git a/deploy/adapters/ansible/roles/odl_cluster/templates/opendaylight.conf b/deploy/adapters/ansible/roles/odl_cluster/templates/opendaylight.conf new file mode 100644 index 00000000..c9a36f2e --- /dev/null +++ b/deploy/adapters/ansible/roles/odl_cluster/templates/opendaylight.conf @@ -0,0 +1,40 @@ + +# vim:set ft=upstart ts=2 et: +description "OpenDaylight controller" +author "mskalski@miranits.com" + +start on runlevel [2345] +stop on runlevel [!2345] + +setgid odl +setuid odl + +env KARAF_HOME="/opt/opendaylight-0.3.0" +env JAVA_HOME="/usr/lib/jvm/java-7-openjdk-amd64" +env JAVA_OPTS="-server -Xms128M -Xmx4096M -XX:+UnlockDiagnosticVMOptions -XX:+UnsyncloadClass -XX:MaxPermSize=512M -Dcom.sun.management.jmxremote" +env OPTS="-Dkaraf.startLocalConsole=false -Dkaraf.startRemoteShell=true" +env MAIN="org.apache.karaf.main.Main" + + +chdir /opt/opendaylight-0.3.0 + +script + export KARAF_BASE="$KARAF_HOME" + export KARAF_DATA="$KARAF_BASE/data" + export KARAF_ETC="$KARAF_BASE/etc" + export LD_LIBRARY_PATH="${LD_LIBRARY_PATH}:$KARAF_BASE/lib" + export JAVA_ENDORSED_DIRS="${JAVA_HOME}/jre/lib/endorsed:${JAVA_HOME}/lib/endorsed:${KARAF_HOME}/lib/endorsed" + export JAVA_EXT_DIRS="${JAVA_HOME}/jre/lib/ext:${JAVA_HOME}/lib/ext:${KARAF_HOME}/lib/ext" + + for file in "$KARAF_HOME"/lib/karaf*.jar + do + if [ -z "$CLASSPATH" ]; then + CLASSPATH="$file" + else + CLASSPATH="$CLASSPATH:$file" + fi + done + + exec /usr/bin/java $JAVA_OPTS -Djava.endorsed.dirs="${JAVA_ENDORSED_DIRS}" -Djava.ext.dirs="${JAVA_EXT_DIRS}" -Dkaraf.instances="${KARAF_HOME}/instances" -Dkaraf.home="$KARAF_HOME" -Dkaraf.base="$KARAF_BASE" -Dkaraf.data="$KARAF_DATA" -Dkaraf.etc="$KARAF_ETC" -Djava.io.tmpdir="$KARAF_DATA/tmp" -Djava.util.logging.config.file="$KARAF_BASE/etc/java.util.logging.properties" $KARAF_OPTS $OPTS -classpath "$CLASSPATH" $MAIN + +end script diff --git a/deploy/adapters/ansible/roles/odl_cluster/templates/opendaylight.service b/deploy/adapters/ansible/roles/odl_cluster/templates/opendaylight.service new file mode 100644 index 00000000..61a1dbdc --- /dev/null +++ b/deploy/adapters/ansible/roles/odl_cluster/templates/opendaylight.service @@ -0,0 +1,13 @@ +[Unit] +Description=OpenDaylight SDN Controller +Documentation=https://wiki.opendaylight.org/view/Main_Page http://www.opendaylight.org/ +After=network.service + +[Service] +Type=forking +ExecStart=/opt/opendaylight-0.2.2/bin/start +User=odl +Group=odl + +[Install] +WantedBy=multi-user.target diff --git a/deploy/adapters/ansible/roles/odl_cluster/templates/org.apache.karaf.features.cfg b/deploy/adapters/ansible/roles/odl_cluster/templates/org.apache.karaf.features.cfg new file mode 100644 index 00000000..e53a07b2 --- /dev/null +++ b/deploy/adapters/ansible/roles/odl_cluster/templates/org.apache.karaf.features.cfg @@ -0,0 +1,54 @@ +################################################################################ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +################################################################################ + +# +# Defines if the startlvl should be respected during feature startup. The default value is true. The default +# behavior for 2.x is false (!) for this property +# +# Be aware that this property is deprecated and will be removed in Karaf 4.0. So, if you need to +# set this to false, please use this only as a temporary solution! +# +#respectStartLvlDuringFeatureStartup=true + + +# +# Defines if the startlvl should be respected during feature uninstall. The default value is true. +# If true, means stop bundles respecting the descend order of start level in a certain feature. +# +#respectStartLvlDuringFeatureUninstall=true + +# +# Comma separated list of features repositories to register by default +# +#featuresRepositories = mvn:org.apache.karaf.features/standard/3.0.1/xml/features,mvn:org.apache.karaf.features/enterprise/3.0.1/xml/features,mvn:org.ops4j.pax.web/pax-web-features/3.1.0/xml/features,mvn:org.apache.karaf.features/spring/3.0.1/xml/features,mvn:org.opendaylight.integration/features-integration/0.2.2-Helium-SR2/xml/features,mvn:org.jolokia/jolokia-osgi/1.1.4/xml/features + +featuresRepositories = mvn:org.apache.karaf.features/standard/3.0.1/xml/features,mvn:org.apache.karaf.features/enterprise/3.0.1/xml/features,mvn:org.ops4j.pax.web/pax-web-features/3.1.0/xml/features,mvn:org.apache.karaf.features/spring/3.0.1/xml/features,mvn:org.opendaylight.integration/features-integration/0.2.2-Helium-SR2/xml/features,mvn:org.jolokia/jolokia-osgi/1.1.4/xml/features + +# +# Comma separated list of features to install at startup +# Default features: +# config,standard,region,package,kar,ssh,management +# +featuresBoot= {{ odl_features | join(",") }} + +# +# Defines if the boot features are started in asynchronous mode (in a dedicated thread) +# +featuresBootAsynchronous=false + diff --git a/deploy/adapters/ansible/roles/odl_cluster/templates/tomcat-server.xml b/deploy/adapters/ansible/roles/odl_cluster/templates/tomcat-server.xml new file mode 100644 index 00000000..bc7ab13d --- /dev/null +++ b/deploy/adapters/ansible/roles/odl_cluster/templates/tomcat-server.xml @@ -0,0 +1,61 @@ +<?xml version='1.0' encoding='utf-8'?> +<!-- + Licensed to the Apache Software Foundation (ASF) under one or more + contributor license agreements. See the NOTICE file distributed with + this work for additional information regarding copyright ownership. + The ASF licenses this file to You under the Apache License, Version 2.0 + (the "License"); you may not use this file except in compliance with + the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +--> +<Server> + <!--APR library loader. Documentation at /docs/apr.html --> + <Listener className="org.apache.catalina.core.AprLifecycleListener" SSLEngine="on" /> + <!--Initialize Jasper prior to webapps are loaded. Documentation at /docs/jasper-howto.html --> + <Listener className="org.apache.catalina.core.JasperListener" /> + <!-- Prevent memory leaks due to use of particular java/javax APIs--> + <Listener className="org.apache.catalina.core.JreMemoryLeakPreventionListener" /> + <Listener className="org.apache.catalina.mbeans.GlobalResourcesLifecycleListener" /> + <Listener className="org.apache.catalina.core.ThreadLocalLeakPreventionListener" /> + + <Service name="Catalina"> + <Connector port="{{ odl_api_port }}" protocol="HTTP/1.1" + connectionTimeout="20000" + redirectPort="8443" /> + +<!-- + Please remove the comments around the following Connector tag to enable HTTPS Authentication support. + Remember to add a valid keystore in the configuration folder. + More info : http://tomcat.apache.org/tomcat-7.0-doc/ssl-howto.html#Configuration +--> + + <!-- + <Connector port="8443" protocol="HTTP/1.1" SSLEnabled="true" + maxThreads="150" scheme="https" secure="true" + clientAuth="false" sslProtocol="TLS" + keystoreFile="configuration/keystore" + keystorePass="changeit"/> + --> + + <Engine name="Catalina" defaultHost="localhost"> + <Host name="localhost" appBase="" + unpackWARs="false" autoDeploy="false" + deployOnStartup="false" createDirs="false"> + <Realm className="org.opendaylight.controller.karafsecurity.ControllerCustomRealm" /> + <Valve className="org.apache.catalina.authenticator.SingleSignOn" /> + <Valve className="org.apache.catalina.valves.AccessLogValve" directory="logs" + prefix="web_access_log_" suffix=".txt" resolveHosts="false" + rotatable="true" fileDateFormat="yyyy-MM" + pattern="%{yyyy-MM-dd HH:mm:ss.SSS z}t - [%a] - %r"/> + </Host> + </Engine> + </Service> +</Server> + diff --git a/deploy/adapters/ansible/roles/odl_cluster/vars/Debian.yml b/deploy/adapters/ansible/roles/odl_cluster/vars/Debian.yml new file mode 100644 index 00000000..c0e49019 --- /dev/null +++ b/deploy/adapters/ansible/roles/odl_cluster/vars/Debian.yml @@ -0,0 +1,4 @@ +--- + +packages: + - openjdk-7-jdk diff --git a/deploy/adapters/ansible/roles/odl_cluster/vars/RedHat.yml b/deploy/adapters/ansible/roles/odl_cluster/vars/RedHat.yml new file mode 100644 index 00000000..9c441d62 --- /dev/null +++ b/deploy/adapters/ansible/roles/odl_cluster/vars/RedHat.yml @@ -0,0 +1,4 @@ +--- + +packages: + - java-1.7.0-openjdk diff --git a/deploy/adapters/ansible/roles/odl_cluster/vars/main.yml b/deploy/adapters/ansible/roles/odl_cluster/vars/main.yml new file mode 100644 index 00000000..b98810a3 --- /dev/null +++ b/deploy/adapters/ansible/roles/odl_cluster/vars/main.yml @@ -0,0 +1,14 @@ +--- +odl_username: admin +odl_password: admin +odl_api_port: 8080 + +odl_pkg_url: https://nexus.opendaylight.org/content/groups/public/org/opendaylight/integration/distribution-karaf/0.3.0-Lithium/distribution-karaf-0.3.0-Lithium.tar.gz +odl_pkg_name: karaf.tar.gz +odl_home: "/opt/opendaylight-0.3.0/" +odl_base_features: ['config', 'standard', 'region', 'package', 'kar', 'ssh', 'management', 'odl-restconf','odl-l2switch-switch','odl-openflowplugin-all','odl-mdsal-apidocs','odl-dlux-all','odl-adsal-northbound','odl-nsf-all','odl-ovsdb-openstack','odl-ovsdb-northbound','odl-dlux-core'] +odl_extra_features: ['odl-restconf-all','odl-mdsal-clustering','odl-openflowplugin-flow-services','http','jolokia-osgi'] +odl_features: "{{ odl_base_features + odl_extra_features }}" +odl_api_port: 8080 + +packages_noarch: [] |