diff options
Diffstat (limited to 'deploy/adapters')
16 files changed, 83 insertions, 54 deletions
diff --git a/deploy/adapters/ansible/openstack/templates/neutron.conf b/deploy/adapters/ansible/openstack/templates/neutron.conf index 02a2cfa2..ebc46f99 100644 --- a/deploy/adapters/ansible/openstack/templates/neutron.conf +++ b/deploy/adapters/ansible/openstack/templates/neutron.conf @@ -428,7 +428,8 @@ min_pool_size = 1 max_pool_size = 100 # Timeout in seconds before idle sql connections are reaped -idle_timeout = 3600 +idle_timeout = 30 +use_db_reconnect = True # If set, use this value for max_overflow with sqlalchemy max_overflow = 100 diff --git a/deploy/adapters/ansible/openstack/templates/nova.conf b/deploy/adapters/ansible/openstack/templates/nova.conf index b15032f5..2364132e 100644 --- a/deploy/adapters/ansible/openstack/templates/nova.conf +++ b/deploy/adapters/ansible/openstack/templates/nova.conf @@ -54,6 +54,9 @@ memcached_servers = {{ internal_vip.ip }}:11211 [database] # The SQLAlchemy connection string used to connect to the database connection = mysql://nova:{{ NOVA_DBPASS }}@{{ db_host }}/nova +idle_timeout = 30 +use_db_reconnect = True +pool_timeout = 10 [keystone_authtoken] auth_uri = http://{{ internal_vip.ip }}:5000/2.0 diff --git a/deploy/adapters/ansible/roles/ceph-deploy/tasks/ceph_install_Debian.yml b/deploy/adapters/ansible/roles/ceph-deploy/tasks/ceph_install_Debian.yml index 7c9545ef..49a34dab 100644 --- a/deploy/adapters/ansible/roles/ceph-deploy/tasks/ceph_install_Debian.yml +++ b/deploy/adapters/ansible/roles/ceph-deploy/tasks/ceph_install_Debian.yml @@ -5,7 +5,7 @@ - create_ceph_cluster - name: default config for ceph - shell: cd {{ ceph_cluster_dir[0] }} && echo "osd_journal_size = 1024" >> ceph.conf && echo "osd_pool_default_size = 2" >> ceph.conf + shell: cd {{ ceph_cluster_dir[0] }} && echo "osd_journal_size = 1024" >> ceph.conf && echo "osd_pool_default_size = 2" >> ceph.conf && echo "public_network = {{ public_cidr }}" >> ceph.conf && echo "cluster_network = {{ storage_cidr }} " >> ceph.conf - name: install ceph for every nodes includes jumpserver shell: cd {{ ceph_cluster_dir[0] }} && ceph-deploy install {{ ceph_cluster_hosts.stdout_lines[0]}} diff --git a/deploy/adapters/ansible/roles/ceph-deploy/tasks/ceph_install_RedHat.yml b/deploy/adapters/ansible/roles/ceph-deploy/tasks/ceph_install_RedHat.yml index d907170f..920fbee4 100644 --- a/deploy/adapters/ansible/roles/ceph-deploy/tasks/ceph_install_RedHat.yml +++ b/deploy/adapters/ansible/roles/ceph-deploy/tasks/ceph_install_RedHat.yml @@ -5,7 +5,7 @@ - create_ceph_cluster - name: default config for ceph - shell: cd {{ ceph_cluster_dir[0] }} && echo "osd_journal_size = 1024" >> ceph.conf && echo "osd_pool_default_size = 2" >> ceph.conf + shell: cd {{ ceph_cluster_dir[0] }} && echo "osd_journal_size = 1024" >> ceph.conf && echo "osd_pool_default_size = 2" >> ceph.conf && echo "public_network = {{ public_cidr }}" >> ceph.conf && echo "cluster_network = {{ storage_cidr }} " >> ceph.conf - name: install ceph for every nodes includes jumpserver shell: cd {{ ceph_cluster_dir[0] }} && ceph-deploy install --no-adjust-repos --repo-url http://10.1.0.12/cblr/repo_mirror/centos7-juno-ppa --gpg-url http://10.1.0.12/cblr/repo_mirror/centos7-juno-ppa/ceph_key_release.asc {{ ceph_cluster_hosts.stdout_lines[0]}} diff --git a/deploy/adapters/ansible/roles/cinder-controller/templates/cinder.conf b/deploy/adapters/ansible/roles/cinder-controller/templates/cinder.conf index cf41817b..b61e6562 100644 --- a/deploy/adapters/ansible/roles/cinder-controller/templates/cinder.conf +++ b/deploy/adapters/ansible/roles/cinder-controller/templates/cinder.conf @@ -61,3 +61,4 @@ admin_password = {{ CINDER_PASS }} [database] connection = mysql://cinder:{{ CINDER_DBPASS }}@{{ db_host }}/cinder +idle_timeout = 30 diff --git a/deploy/adapters/ansible/roles/cinder-volume/templates/cinder.conf b/deploy/adapters/ansible/roles/cinder-volume/templates/cinder.conf index 0b855da5..fdcac69d 100644 --- a/deploy/adapters/ansible/roles/cinder-volume/templates/cinder.conf +++ b/deploy/adapters/ansible/roles/cinder-volume/templates/cinder.conf @@ -60,3 +60,4 @@ admin_password = {{ CINDER_PASS }} [database] connection = mysql://cinder:{{ CINDER_DBPASS }}@{{ db_host }}/cinder +idle_timeout = 30 diff --git a/deploy/adapters/ansible/roles/glance/templates/glance-api.conf b/deploy/adapters/ansible/roles/glance/templates/glance-api.conf index 737b9a3a..ab63e054 100644 --- a/deploy/adapters/ansible/roles/glance/templates/glance-api.conf +++ b/deploy/adapters/ansible/roles/glance/templates/glance-api.conf @@ -586,7 +586,7 @@ connection = mysql://glance:{{ GLANCE_DBPASS }}@{{ db_host }}/glance # Deprecated group/name - [DEFAULT]/sql_idle_timeout # Deprecated group/name - [DATABASE]/sql_idle_timeout # Deprecated group/name - [sql]/idle_timeout -#idle_timeout = 3600 +idle_timeout = 30 # Minimum number of SQL connections to keep open in a pool # (integer value) diff --git a/deploy/adapters/ansible/roles/glance/templates/glance-registry.conf b/deploy/adapters/ansible/roles/glance/templates/glance-registry.conf index 1fedb0b5..06c403fd 100644 --- a/deploy/adapters/ansible/roles/glance/templates/glance-registry.conf +++ b/deploy/adapters/ansible/roles/glance/templates/glance-registry.conf @@ -106,7 +106,7 @@ connection = mysql://glance:{{ GLANCE_DBPASS }}@{{ db_host }}/glance # Deprecated group/name - [DEFAULT]/sql_idle_timeout # Deprecated group/name - [DATABASE]/sql_idle_timeout # Deprecated group/name - [sql]/idle_timeout -#idle_timeout = 3600 +idle_timeout = 30 # Minimum number of SQL connections to keep open in a pool # (integer value) diff --git a/deploy/adapters/ansible/roles/ha/templates/haproxy.cfg b/deploy/adapters/ansible/roles/ha/templates/haproxy.cfg index 7b401279..3d76a5bc 100644 --- a/deploy/adapters/ansible/roles/ha/templates/haproxy.cfg +++ b/deploy/adapters/ansible/roles/ha/templates/haproxy.cfg @@ -20,10 +20,10 @@ defaults timeout http-request 10s timeout queue 1m timeout connect 10s - timeout client 6m - timeout server 6m + timeout client 50s + timeout server 50s timeout check 10s - retries 5 + retries 3 listen proxy-mysql bind {{ internal_vip.ip }}:3306 diff --git a/deploy/adapters/ansible/roles/keystone/templates/keystone.conf b/deploy/adapters/ansible/roles/keystone/templates/keystone.conf index fc8bf1f1..b022a084 100644 --- a/deploy/adapters/ansible/roles/keystone/templates/keystone.conf +++ b/deploy/adapters/ansible/roles/keystone/templates/keystone.conf @@ -614,7 +614,7 @@ connection = mysql://keystone:{{ KEYSTONE_DBPASS }}@{{ db_host }}/keystone # Deprecated group/name - [DEFAULT]/sql_idle_timeout # Deprecated group/name - [DATABASE]/sql_idle_timeout # Deprecated group/name - [sql]/idle_timeout -#idle_timeout=3600 +idle_timeout=30 # Minimum number of SQL connections to keep open in a pool # (integer value) diff --git a/deploy/adapters/ansible/roles/odl_cluster/tasks/main.yml b/deploy/adapters/ansible/roles/odl_cluster/tasks/main.yml index 37dc1bd1..f06ce193 100755 --- a/deploy/adapters/ansible/roles/odl_cluster/tasks/main.yml +++ b/deploy/adapters/ansible/roles/odl_cluster/tasks/main.yml @@ -8,3 +8,11 @@ - name: Install ODL Cluster on Compute include: openvswitch.yml when: groups['odl']|length !=0 and inventory_hostname not in groups['odl'] + +- name: check out new flow table if enable + shell: ovs-ofctl --protocol=OpenFlow13 dump-flows br-prv | grep CONTROLLER; while [ $? -ne 0 ]; do sleep 10; ovs-ofctl --protocol=OpenFlow13 dump-flows br-prv | grep CONTROLLER; done + when: groups['odl']|length !=0 + +- name: remove controller from br-prv + shell: ovs-vsctl del-controller br-prv; + when: groups['odl']|length !=0 diff --git a/deploy/adapters/ansible/roles/odl_cluster/tasks/odl_controller.yml b/deploy/adapters/ansible/roles/odl_cluster/tasks/odl_controller.yml index e2af1459..0c13ff21 100755 --- a/deploy/adapters/ansible/roles/odl_cluster/tasks/odl_controller.yml +++ b/deploy/adapters/ansible/roles/odl_cluster/tasks/odl_controller.yml @@ -144,10 +144,10 @@ shell: > rm -rf {{ odl_home }}/data/*; -- name: chown OpenDaylight Directory and Files - shell: > - chown -R odl:odl "{{ odl_home }}"; - chown odl:odl "{{ service_file.dst }}"; +#- name: chown OpenDaylight Directory and Files +# shell: > +# chown -R odl:odl "{{ odl_home }}"; +# chown odl:odl "{{ service_file.dst }}"; ########################################################################################################## @@ -164,6 +164,12 @@ - name: Run OpenVSwitch Script include: openvswitch.yml + +- name: chown OpenDaylight Directory and Files + shell: > + chown -R odl:odl "{{ odl_home }}"; + chown odl:odl "{{ service_file.dst }}"; + #- name: Configure Neutron1 # shell: > # crudini --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2 mechanism_drivers opendaylight; diff --git a/deploy/adapters/ansible/roles/odl_cluster/tasks/openvswitch.yml b/deploy/adapters/ansible/roles/odl_cluster/tasks/openvswitch.yml index e52b18ff..3bef2af3 100755 --- a/deploy/adapters/ansible/roles/odl_cluster/tasks/openvswitch.yml +++ b/deploy/adapters/ansible/roles/odl_cluster/tasks/openvswitch.yml @@ -19,7 +19,7 @@ - name: Stop the Open vSwitch service and clear existing OVSDB shell: > - ovs-vsctl del-br br-int ; + ovs-ofctl del-flows br-int ; ovs-vsctl del-br br-tun ; ovs-vsctl del-manager ; @@ -43,7 +43,7 @@ - name: Configure Neutron1 shell: > crudini --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2 mechanism_drivers opendaylight; - crudini --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2 tenant_network_types vlan; + crudini --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2 tenant_network_types vxlan; #- name: Adjust Service Daemon # shell: > diff --git a/deploy/adapters/ansible/roles/odl_cluster/templates/opendaylight.conf b/deploy/adapters/ansible/roles/odl_cluster/templates/opendaylight.conf index d04cac22..105bb265 100755 --- a/deploy/adapters/ansible/roles/odl_cluster/templates/opendaylight.conf +++ b/deploy/adapters/ansible/roles/odl_cluster/templates/opendaylight.conf @@ -13,7 +13,7 @@ env KARAF_HOME="/opt/opendaylight-0.3.0" #env JAVA_HOME="/usr/lib/jvm/java-7-openjdk-amd64" env JAVA_HOME="/usr/lib/jvm/java-8-oracle" env JAVA_OPTS="-server -Xms128M -Xmx4096M -XX:+UnlockDiagnosticVMOptions -XX:+UnsyncloadClass -XX:MaxPermSize=512M -Dcom.sun.management.jmxremote" -env OPTS="-Dkaraf.startLocalConsole=false -Dkaraf.startRemoteShell=true" +env OPTS="-Dkaraf.startLocalConsole=true -Dkaraf.startRemoteShell=true" env MAIN="org.apache.karaf.main.Main" @@ -26,6 +26,7 @@ script export LD_LIBRARY_PATH="${LD_LIBRARY_PATH}:$KARAF_BASE/lib" export JAVA_ENDORSED_DIRS="${JAVA_HOME}/jre/lib/endorsed:${JAVA_HOME}/lib/endorsed:${KARAF_HOME}/lib/endorsed" export JAVA_EXT_DIRS="${JAVA_HOME}/jre/lib/ext:${JAVA_HOME}/lib/ext:${KARAF_HOME}/lib/ext" + export JAVA_SECURITY_PRO="${KARAF_HOME}/etc/odl.java.security" for file in "$KARAF_HOME"/lib/karaf*.jar do @@ -36,6 +37,6 @@ script fi done - exec $JAVA_HOME/bin/java $JAVA_OPTS -Djava.endorsed.dirs="${JAVA_ENDORSED_DIRS}" -Djava.ext.dirs="${JAVA_EXT_DIRS}" -Dkaraf.instances="${KARAF_HOME}/instances" -Dkaraf.home="$KARAF_HOME" -Dkaraf.base="$KARAF_BASE" -Dkaraf.data="$KARAF_DATA" -Dkaraf.etc="$KARAF_ETC" -Djava.io.tmpdir="$KARAF_DATA/tmp" -Djava.util.logging.config.file="$KARAF_BASE/etc/java.util.logging.properties" $KARAF_OPTS $OPTS -classpath "$CLASSPATH" $MAIN + exec $JAVA_HOME/bin/java -Djava.security.properties="${JAVA_SECURITY_PRO}" $JAVA_OPTS -Djava.endorsed.dirs="${JAVA_ENDORSED_DIRS}" -Djava.ext.dirs="${JAVA_EXT_DIRS}" -Dkaraf.instances="${KARAF_HOME}/instances" -Dkaraf.home="$KARAF_HOME" -Dkaraf.base="$KARAF_BASE" -Dkaraf.data="$KARAF_DATA" -Dkaraf.etc="$KARAF_ETC" -Djava.io.tmpdir="$KARAF_DATA/tmp" -Djava.util.logging.config.file="$KARAF_BASE/etc/java.util.logging.properties" $KARAF_OPTS $OPTS -classpath "$CLASSPATH" $MAIN end script diff --git a/deploy/adapters/ansible/roles/open-contrail/tasks/main.yml b/deploy/adapters/ansible/roles/open-contrail/tasks/main.yml index 94b41557..2a0e2709 100755 --- a/deploy/adapters/ansible/roles/open-contrail/tasks/main.yml +++ b/deploy/adapters/ansible/roles/open-contrail/tasks/main.yml @@ -7,9 +7,9 @@ when: groups['opencontrail_control']|length !=0 # Compass install OpenStack with not only OpenContrail but also ODL or ONOS, and sometimes user just installs OpenStack, so item 'opencontrail_control' is kind of a mark that whether Compass install OpenContrail or not. -- name: Install kernal on all hosts for Open Contrail - include: install/install-kernal.yml - when: groups['opencontrail_control']|length !=0 +#- name: Install kernal on all hosts for Open Contrail +# include: install/install-kernel.yml +# when: groups['opencontrail_control']|length !=0 - name: Install database for Open Contrail include: install/install-database.yml diff --git a/deploy/adapters/ansible/roles/open-contrail/tasks/provision/provision-add-nodes.yml b/deploy/adapters/ansible/roles/open-contrail/tasks/provision/provision-add-nodes.yml index 8245c046..f4ad05cb 100755 --- a/deploy/adapters/ansible/roles/open-contrail/tasks/provision/provision-add-nodes.yml +++ b/deploy/adapters/ansible/roles/open-contrail/tasks/provision/provision-add-nodes.yml @@ -1,39 +1,47 @@ --- -- hosts: config - sudo: yes - tasks: - - name: "provision config node" - shell: "python /opt/contrail/utils/provision_config_node.py --api_server_ip {{ contrail_haproxy_address }} --admin_user {{ contrail_admin_user }} --admin_password {{ contrail_admin_password }} --admin_tenant_name admin --oper add --host_name {{ ansible_hostname }} --host_ip {{ contrail_address }}" +#- hosts: config +# sudo: yes +# tasks: +- name: "provision config node" + shell: "python /opt/contrail/utils/provision_config_node.py --api_server_ip {{ contrail_haproxy_address }} --admin_user {{ contrail_admin_user }} --admin_password {{ contrail_admin_password }} --admin_tenant_name admin --oper add --host_name {{ ansible_hostname }} --host_ip {{ contrail_address }}" + when: inventory_hostname in groups['opencontrail_config'] -- hosts: database - sudo: yes - tasks: - - name: "provision database node" - shell: "python /opt/contrail/utils/provision_database_node.py --api_server_ip {{ contrail_haproxy_address }} --admin_user {{ contrail_admin_user }} --admin_password {{ contrail_admin_password }} --admin_tenant_name admin --oper add --host_name {{ ansible_hostname }} --host_ip {{ contrail_address }}" +#- hosts: database +# sudo: yes +# tasks: +- name: "provision database node" + shell: "python /opt/contrail/utils/provision_database_node.py --api_server_ip {{ contrail_haproxy_address }} --admin_user {{ contrail_admin_user }} --admin_password {{ contrail_admin_password }} --admin_tenant_name admin --oper add --host_name {{ ansible_hostname }} --host_ip {{ contrail_address }}" + when: inventory_hostname in groups['opencontrail_database'] + + +#- hosts: collector +# sudo: yes +# tasks: +- name: "provision collector node" + shell: "python /opt/contrail/utils/provision_analytics_node.py --api_server_ip {{ contrail_haproxy_address }} --admin_user {{ contrail_admin_user }} --admin_password {{ contrail_admin_password }} --admin_tenant_name admin --oper add --host_name {{ ansible_hostname }} --host_ip {{ contrail_address }}" + when: inventory_hostname in groups['opencontrail_collector'] -- hosts: collector - sudo: yes - tasks: - - name: "provision collector node" - shell: "python /opt/contrail/utils/provision_analytics_node.py --api_server_ip {{ contrail_haproxy_address }} --admin_user {{ contrail_admin_user }} --admin_password {{ contrail_admin_password }} --admin_tenant_name admin --oper add --host_name {{ ansible_hostname }} --host_ip {{ contrail_address }}" +#- hosts: control +# sudo: yes +# tasks: +- name: "provision control node" + shell: "python /opt/contrail/utils/provision_control.py --api_server_ip {{ contrail_haproxy_address }} --api_server_port 8082 --admin_user {{ contrail_admin_user }} --admin_password {{ contrail_admin_password }} --admin_tenant_name admin --oper add --host_name {{ ansible_hostname }} --host_ip {{ contrail_address }} --router_asn {{ contrail_router_asn }}" + when: inventory_hostname in groups['opencontrail_control'] + +#- hosts: config +# sudo: yes +# tasks: +- name: "provision metadata services" + shell: "python /opt/contrail/utils/provision_linklocal.py --api_server_ip {{ contrail_haproxy_address }} --admin_user {{ contrail_admin_user }} --admin_password {{ contrail_admin_password }} --admin_tenant_name admin --oper add --ipfabric_service_ip 10.84.50.1 --ipfabric_service_port 8775 --linklocal_service_name metadata --linklocal_service_ip 169.254.169.254 --linklocal_service_port 80" + run_once: yes + when: inventory_hostname in groups['opencontrail_config'] -- hosts: control - sudo: yes - tasks: - - name: "provision control node" - shell: "python /opt/contrail/utils/provision_control.py --api_server_ip {{ contrail_haproxy_address }} --api_server_port 8082 --admin_user {{ contrail_admin_user }} --admin_password {{ contrail_admin_password }} --admin_tenant_name admin --oper add --host_name {{ ansible_hostname }} --host_ip {{ contrail_address }} --router_asn {{ contrail_router_asn }}" -- hosts: config - sudo: yes - tasks: - - name: "provision metadata services" - shell: "python /opt/contrail/utils/provision_linklocal.py --api_server_ip {{ contrail_haproxy_address }} --admin_user {{ contrail_admin_user }} --admin_password {{ contrail_admin_password }} --admin_tenant_name admin --oper add --ipfabric_service_ip 10.84.50.1 --ipfabric_service_port 8775 --linklocal_service_name metadata --linklocal_service_ip 169.254.169.254 --linklocal_service_port 80" - run_once: yes - - -- hosts: config - sudo: yes - tasks: - - name: "provision encap" - shell: "python /opt/contrail/utils/provision_encap.py --api_server_ip {{ contrail_haproxy_address }} --admin_user {{ contrail_admin_user }} --admin_password {{ contrail_admin_password }} --oper add --encap_priority MPLSoUDP,MPLSoGRE,VXLAN" - run_once: yes +#- hosts: config +# sudo: yes +# tasks: +- name: "provision encap" + shell: "python /opt/contrail/utils/provision_encap.py --api_server_ip {{ contrail_haproxy_address }} --admin_user {{ contrail_admin_user }} --admin_password {{ contrail_admin_password }} --oper add --encap_priority MPLSoUDP,MPLSoGRE,VXLAN" + run_once: yes + when: inventory_hostname in groups['opencontrail_config'] +
\ No newline at end of file |