diff options
Diffstat (limited to 'deploy/adapters')
122 files changed, 9408 insertions, 12 deletions
diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/odl_cluster/tasks/moon-odl.yml b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/odl_cluster/tasks/moon-odl.yml index 25306059..b89b2823 100644 --- a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/odl_cluster/tasks/moon-odl.yml +++ b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/odl_cluster/tasks/moon-odl.yml @@ -36,8 +36,14 @@ - name: copy settings.xml template: src=settings.xml dest=/root/.m2/settings.xml -- name: upload swift lib - unarchive: src=odl-aaa-moon.tar.gz dest=/home/ +#- name: upload swift lib +# unarchive: src=odl-aaa-moon.tar.gz dest=/home/ + +- name: download odl-aaa-moon package + get_url: url="http://{{ http_server.stdout_lines[0] }}/packages/moon/{{ odl_aaa_moon }}" dest=/home/ + +- name: unarchive odl-aaa-moon package + command: su -s /bin/sh -c "tar xvf /home/{{ odl_aaa_moon }} -C /home/" - name: install aaa shell: > diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/odl_cluster/vars/main.yml b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/odl_cluster/vars/main.yml index da0c9efd..6ccb3dd8 100755 --- a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/odl_cluster/vars/main.yml +++ b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/odl_cluster/vars/main.yml @@ -19,6 +19,8 @@ odl_base_features: ['config', 'standard', 'region', 'package', 'kar', 'ssh', 'ma odl_extra_features: ['odl-restconf-all','odl-mdsal-clustering','odl-openflowplugin-flow-services','http','jolokia-osgi'] odl_features: "{{ odl_base_features + odl_extra_features }}" +odl_aaa_moon: odl-aaa-moon.tar.gz + jdk8_pkg_name: jdk-8u51-linux-x64.tar.gz controller_packages_noarch: [] diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/swift/tasks/swift.yml b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/swift/tasks/swift.yml index 4e2651a7..10a513f0 100644 --- a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/swift/tasks/swift.yml +++ b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/swift/tasks/swift.yml @@ -9,6 +9,10 @@ --- - include_vars: "{{ ansible_os_family }}.yml" +- name: get image http server + shell: awk -F'=' '/compass_server/ {print $2}' /etc/compass.conf + register: http_server + - include: swift-controller1.yml when: inventory_hostname in groups['controller'] @@ -42,8 +46,14 @@ - rsync when: inventory_hostname in groups['compute'] -- name: upload swift lib - unarchive: src=swift-lib.tar.gz dest=/tmp/ +#- name: upload swift lib +# unarchive: src=swift-lib.tar.gz dest=/tmp/ + +- name: download swift lib package + get_url: url="http://{{ http_server.stdout_lines[0] }}/packages/moon/{{ swift_lib }}" dest=/tmp/ + +- name: unarchive swift lib + command: su -s /bin/sh -c "tar xvf /tmp/{{ swift_lib }} -C /tmp/" - name: copy swift lib command: su -s /bin/sh -c "cp /tmp/swift-lib/* /usr/lib/" diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/swift/vars/main.yml b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/swift/vars/main.yml index 540068da..dc009551 100644 --- a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/swift/vars/main.yml +++ b/deploy/adapters/ansible/openstack_mitaka_xenial/roles/swift/vars/main.yml @@ -7,6 +7,9 @@ # http://www.apache.org/licenses/LICENSE-2.0 ############################################################################## --- + +swift_lib: swift-lib.tar.gz + packages_noarch: [] services_noarch: [] diff --git a/deploy/adapters/ansible/openstack_newton_xenial/.gitkeep b/deploy/adapters/ansible/openstack_newton_xenial/.gitkeep new file mode 100644 index 00000000..e69de29b --- /dev/null +++ b/deploy/adapters/ansible/openstack_newton_xenial/.gitkeep diff --git a/deploy/adapters/ansible/openstack_newton_xenial/HA-ansible-multinodes.yml b/deploy/adapters/ansible/openstack_newton_xenial/HA-ansible-multinodes.yml new file mode 100644 index 00000000..3d5b0a1c --- /dev/null +++ b/deploy/adapters/ansible/openstack_newton_xenial/HA-ansible-multinodes.yml @@ -0,0 +1,258 @@ +--- +- hosts: all + remote_user: root + pre_tasks: + - name: make sure ssh dir exist + file: + path: '{{ item.path }}' + owner: '{{ item.owner }}' + group: '{{ item.group }}' + state: directory + mode: 0755 + with_items: + - path: /root/.ssh + owner: root + group: root + + - name: write ssh config + copy: + content: "UserKnownHostsFile /dev/null\nStrictHostKeyChecking no" + dest: '{{ item.dest }}' + owner: '{{ item.owner }}' + group: '{{ item.group }}' + mode: 0600 + with_items: + - dest: /root/.ssh/config + owner: root + group: root + + - name: generate ssh keys + shell: if [ ! -f ~/.ssh/id_rsa.pub ]; then ssh-keygen -q -t rsa -f ~/.ssh/id_rsa -N ""; else echo "already gen ssh key!"; fi; + + - name: fetch ssh keys + fetch: src=/root/.ssh/id_rsa.pub dest=/tmp/ssh-keys-{{ ansible_hostname }} flat=yes + + - authorized_key: + user: root + key: "{{ lookup('file', 'item') }}" + with_fileglob: + - /tmp/ssh-keys-* + max_fail_percentage: 0 + roles: + - common + +- hosts: all + remote_user: root + accelerate: true + max_fail_percentage: 0 + roles: + - setup-network + +- hosts: ha + remote_user: root + accelerate: true + max_fail_percentage: 0 + roles: + - ha + +- hosts: controller + remote_user: root + accelerate: true + max_fail_percentage: 0 + roles: + - memcached + - database + - mq + - keystone + - apache + - nova-controller + - neutron-controller + - cinder-controller + - glance + - neutron-common + - neutron-network + - ceilometer_controller +# - ext-network + - dashboard + - heat +# - aodh + +- hosts: all + remote_user: root + accelerate: true + max_fail_percentage: 0 + roles: + - storage + +- hosts: compute + remote_user: root + accelerate: true + max_fail_percentage: 0 + roles: + - nova-compute + - neutron-compute + - cinder-volume + - ceilometer_compute + +#- hosts: all +# remote_user: root +# accelerate: true +# max_fail_percentage: 0 +# roles: +# - moon + +- hosts: all + remote_user: root + accelerate: true + max_fail_percentage: 0 + roles: + - secgroup + +- hosts: ceph_adm + remote_user: root + accelerate: true + max_fail_percentage: 0 + roles: [] + # - ceph-deploy + +- hosts: ceph + remote_user: root + accelerate: true + max_fail_percentage: 0 + roles: + - ceph-purge + - ceph-config + +- hosts: ceph_mon + remote_user: root + accelerate: true + max_fail_percentage: 0 + roles: + - ceph-mon + +- hosts: ceph_osd + remote_user: root + accelerate: true + max_fail_percentage: 0 + roles: + - ceph-osd + +- hosts: ceph + remote_user: root + accelerate: true + max_fail_percentage: 0 + roles: + - ceph-openstack + +- hosts: all + remote_user: root + accelerate: true + max_fail_percentage: 0 + roles: + - monitor + + +- hosts: all + remote_user: root + accelerate: true + max_fail_percentage: 0 + tasks: + - name: set bash to nova + user: + name: nova + shell: /bin/bash + + - name: make sure ssh dir exist + file: + path: '{{ item.path }}' + owner: '{{ item.owner }}' + group: '{{ item.group }}' + state: directory + mode: 0755 + with_items: + - path: /var/lib/nova/.ssh + owner: nova + group: nova + + - name: copy ssh keys for nova + shell: cp -rf /root/.ssh/id_rsa /var/lib/nova/.ssh; + + - name: write ssh config + copy: + content: "UserKnownHostsFile /dev/null\nStrictHostKeyChecking no" + dest: '{{ item.dest }}' + owner: '{{ item.owner }}' + group: '{{ item.group }}' + mode: 0600 + with_items: + - dest: /var/lib/nova/.ssh/config + owner: nova + group: nova + + - authorized_key: + user: nova + key: "{{ lookup('file', 'item') }}" + with_fileglob: + - /tmp/ssh-keys-* + + - name: chown ssh file + shell: chown -R nova:nova /var/lib/nova/.ssh; + + +- hosts: all + remote_user: root + accelerate: true + max_fail_percentage: 0 + roles: + - odl_cluster + +- hosts: all + remote_user: root + accelerate: true + max_fail_percentage: 0 + roles: + - onos_cluster + +- hosts: all + remote_user: root + sudo: True + max_fail_percentage: 0 + roles: + - open-contrail + +- hosts: all + remote_user: root + accelerate: true + serial: 1 + max_fail_percentage: 0 + roles: + - odl_cluster_neutron + +- hosts: all + remote_user: root + accelerate: true + max_fail_percentage: 0 + roles: + - odl_cluster_post + +- hosts: controller + remote_user: root + accelerate: true + max_fail_percentage: 0 + roles: + - ext-network + +- hosts: controller + remote_user: root + accelerate: true + max_fail_percentage: 0 + roles: + - controller-recovery + +- hosts: compute + remote_user: root + accelerate: true + max_fail_percentage: 0 + roles: + - compute-recovery + diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/aodh/handlers/main.yml b/deploy/adapters/ansible/openstack_newton_xenial/roles/aodh/handlers/main.yml new file mode 100644 index 00000000..b3399e0c --- /dev/null +++ b/deploy/adapters/ansible/openstack_newton_xenial/roles/aodh/handlers/main.yml @@ -0,0 +1,13 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +- name: restart aodh services + service: name={{ item }} state=restarted enabled=yes + with_items: services | union(services_noarch) + diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/aodh/tasks/aodh_config.yml b/deploy/adapters/ansible/openstack_newton_xenial/roles/aodh/tasks/aodh_config.yml new file mode 100644 index 00000000..e60d5338 --- /dev/null +++ b/deploy/adapters/ansible/openstack_newton_xenial/roles/aodh/tasks/aodh_config.yml @@ -0,0 +1,14 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +- name: aodh db sync + shell: su -s /bin/sh -c "aodh-dbsync" aodh + notify: + - restart aodh services + diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/aodh/tasks/aodh_install.yml b/deploy/adapters/ansible/openstack_newton_xenial/roles/aodh/tasks/aodh_install.yml new file mode 100644 index 00000000..eb51fbea --- /dev/null +++ b/deploy/adapters/ansible/openstack_newton_xenial/roles/aodh/tasks/aodh_install.yml @@ -0,0 +1,31 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +- include_vars: "{{ ansible_os_family }}.yml" + +- name: install aodh packages + action: "{{ ansible_pkg_mgr }} name={{ item }} state=present" + with_items: packages | union(packages_noarch) + +- name: update aodh conf + template: src={{ item }} dest=/etc/aodh/aodh.conf + backup=yes + with_items: + - aodh.conf.j2 +# - api_paste.ini.j2 +# - policy.json.j2 + notify: + - restart aodh services + +- name: write services to monitor list + lineinfile: dest=/opt/service create=yes line='{{ item }}' + with_items: services | union(services_noarch) + +- name: remove default sqlite db + shell: rm /var/lib/aodh/aodh.sqlite || touch aodh.sqllite.db.removed diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/aodh/tasks/main.yml b/deploy/adapters/ansible/openstack_newton_xenial/roles/aodh/tasks/main.yml new file mode 100644 index 00000000..9b61915f --- /dev/null +++ b/deploy/adapters/ansible/openstack_newton_xenial/roles/aodh/tasks/main.yml @@ -0,0 +1,23 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +- include: aodh_install.yml + tags: + - install + - aodh_install + - aodh + +- include: aodh_config.yml + when: inventory_hostname == groups['controller'][0] + tags: + - config + - aodh_config + - aodh + +- meta: flush_handlers diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/aodh/templates/aodh.conf.j2 b/deploy/adapters/ansible/openstack_newton_xenial/roles/aodh/templates/aodh.conf.j2 new file mode 100644 index 00000000..d4d232be --- /dev/null +++ b/deploy/adapters/ansible/openstack_newton_xenial/roles/aodh/templates/aodh.conf.j2 @@ -0,0 +1,46 @@ +{% set memcached_servers = [] %} +{% for host in haproxy_hosts.values() %} +{% set _ = memcached_servers.append('%s:11211'% host) %} +{% endfor %} +{% set memcached_servers = memcached_servers|join(',') %} + +[DEFAULT] +bind_host = {{ internal_ip }} +bind_port = 8042 +rpc_backend = rabbit +auth_strategy = keystone +debug = True + +[oslo_messaging_rabbit] +rabbit_hosts = {{ internal_vip.ip }} +rabbit_userid = {{ RABBIT_USER }} +rabbit_password = {{ RABBIT_PASS }} +#rabbit_use_ssl = false + +[database] +connection = mysql://aodh:{{ AODH_DBPASS }}@{{ db_host }}/aodh + +[keystone_authtoken] +auth_uri = http://{{ internal_vip.ip }}:5000 +auth_url = http://{{ internal_vip.ip }}:35357 +identity_uri = http://{{ internal_vip.ip }}:35357 +auth_plugin = password +project_domain_id = default +user_domain_id = default +project_name = service +username = aodh +password = {{ AODH_PASS }} +memcached_servers = {{ memcached_servers }} +token_cache_time = 300 +revocation_cache_time = 60 + +[service_credentials] +os_auth_url = http://{{ internal_vip.ip }}:5000/v2.0 +os_username = aodh +os_tenant_name = service +os_password = {{ AODH_PASS }} +os_endpoint_type = internalURL +os_region_name = RegionOne + +[api] +host = {{ internal_ip }} diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/aodh/templates/api_paste.ini.j2 b/deploy/adapters/ansible/openstack_newton_xenial/roles/aodh/templates/api_paste.ini.j2 new file mode 100644 index 00000000..151789c4 --- /dev/null +++ b/deploy/adapters/ansible/openstack_newton_xenial/roles/aodh/templates/api_paste.ini.j2 @@ -0,0 +1,22 @@ +# aodh API WSGI Pipeline +# Define the filters that make up the pipeline for processing WSGI requests +# Note: This pipeline is PasteDeploy's term rather than aodh's pipeline +# used for processing samples + +# Remove authtoken from the pipeline if you don't want to use keystone authentication +[pipeline:main] +pipeline = cors request_id authtoken api-server + +[app:api-server] +paste.app_factory = aodh.api.app:app_factory + +[filter:authtoken] +paste.filter_factory = keystonemiddleware.auth_token:filter_factory +oslo_config_project = aodh + +[filter:request_id] +paste.filter_factory = oslo_middleware:RequestId.factory + +[filter:cors] +paste.filter_factory = oslo_middleware.cors:filter_factory +oslo_config_project = aodh diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/aodh/templates/policy.json.j2 b/deploy/adapters/ansible/openstack_newton_xenial/roles/aodh/templates/policy.json.j2 new file mode 100644 index 00000000..4fd873e9 --- /dev/null +++ b/deploy/adapters/ansible/openstack_newton_xenial/roles/aodh/templates/policy.json.j2 @@ -0,0 +1,20 @@ +{ + "context_is_admin": "role:admin", + "segregation": "rule:context_is_admin", + "admin_or_owner": "rule:context_is_admin or project_id:%(project_id)s", + "default": "rule:admin_or_owner", + + "telemetry:get_alarm": "rule:admin_or_owner", + "telemetry:get_alarms": "rule:admin_or_owner", + "telemetry:query_alarm": "rule:admin_or_owner", + + "telemetry:create_alarm": "", + "telemetry:change_alarm": "rule:admin_or_owner", + "telemetry:delete_alarm": "rule:admin_or_owner", + + "telemetry:get_alarm_state": "rule:admin_or_owner", + "telemetry:change_alarm_state": "rule:admin_or_owner", + + "telemetry:alarm_history": "rule:admin_or_owner", + "telemetry:query_alarm_history": "rule:admin_or_owner" +} diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/aodh/vars/Debian.yml b/deploy/adapters/ansible/openstack_newton_xenial/roles/aodh/vars/Debian.yml new file mode 100644 index 00000000..bdf4655e --- /dev/null +++ b/deploy/adapters/ansible/openstack_newton_xenial/roles/aodh/vars/Debian.yml @@ -0,0 +1,22 @@ +############################################################################# +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################# +--- +packages: + - aodh-api + - aodh-evaluator + - aodh-notifier + - aodh-listener + - aodh-expirer + - python-ceilometerclient + +services: + - aodh-api + - aodh-notifier + - aodh-evaluator + - aodh-listener diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/aodh/vars/RedHat.yml b/deploy/adapters/ansible/openstack_newton_xenial/roles/aodh/vars/RedHat.yml new file mode 100644 index 00000000..a0381c6b --- /dev/null +++ b/deploy/adapters/ansible/openstack_newton_xenial/roles/aodh/vars/RedHat.yml @@ -0,0 +1,22 @@ +############################################################################# +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################# +--- +packages: + - openstack-aodh-api + - openstack-aodh-evaluator + - openstack-aodh-notifier + - openstack-aodh-listener + - openstack-aodh-expirer + - python-ceilometerclient + +services: + - openstack-aodh-api + - openstack-aodh-notifier + - openstack-aodh-evaluator + - openstack-aodh-listener diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/aodh/vars/main.yml b/deploy/adapters/ansible/openstack_newton_xenial/roles/aodh/vars/main.yml new file mode 100644 index 00000000..b17f6ed0 --- /dev/null +++ b/deploy/adapters/ansible/openstack_newton_xenial/roles/aodh/vars/main.yml @@ -0,0 +1,12 @@ +############################################################################## +## Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +## +## All rights reserved. This program and the accompanying materials +## are made available under the terms of the Apache License, Version 2.0 +## which accompanies this distribution, and is available at +## http://www.apache.org/licenses/LICENSE-2.0 +############################################################################### +--- +packages_noarch: [] + +services_noarch: [] diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/apache/tasks/main.yml b/deploy/adapters/ansible/openstack_newton_xenial/roles/apache/tasks/main.yml new file mode 100755 index 00000000..ad1d544f --- /dev/null +++ b/deploy/adapters/ansible/openstack_newton_xenial/roles/apache/tasks/main.yml @@ -0,0 +1,31 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +- include_vars: "{{ ansible_os_family }}.yml" + +- name: install packages + action: "{{ ansible_pkg_mgr }} name={{ item }} state=latest update_cache=yes" + with_items: packages | union(packages_noarch) + when ansible_os_family != 'Debian' + +- name: assure listen port exist + template: + dest: '{{ apache_config_dir }}/ports.conf' + src: ports.conf.j2 + notify: + - restart apache related services + +- name: remove default listen port on centos + lineinfile: + dest: /etc/httpd/conf/httpd.conf + state: absent + regexp: 'Listen 80' + when: ansible_os_family == 'RedHat' + +- meta: flush_handlers diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/apache/templates/ports.conf.j2 b/deploy/adapters/ansible/openstack_newton_xenial/roles/apache/templates/ports.conf.j2 new file mode 100644 index 00000000..f6c9c8b1 --- /dev/null +++ b/deploy/adapters/ansible/openstack_newton_xenial/roles/apache/templates/ports.conf.j2 @@ -0,0 +1,3 @@ +Listen {{ internal_ip }}:80 +Listen {{ internal_ip }}:5000 +Listen {{ internal_ip }}:35357 diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/ceilometer_controller/vars/Debian.yml b/deploy/adapters/ansible/openstack_newton_xenial/roles/ceilometer_controller/vars/Debian.yml new file mode 100644 index 00000000..2a3c3249 --- /dev/null +++ b/deploy/adapters/ansible/openstack_newton_xenial/roles/ceilometer_controller/vars/Debian.yml @@ -0,0 +1,34 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +ceilometer_packages: + - ceilometer-api + - ceilometer-collector + - ceilometer-agent-central + - ceilometer-agent-notification +# - ceilometer-alarm-evaluator +# - ceilometer-alarm-notifier + - python-ceilometerclient + +ceilometer_services: + - ceilometer-agent-central + - ceilometer-agent-notification + - ceilometer-api + - ceilometer-collector +# - ceilometer-alarm-evaluator +# - ceilometer-alarm-notifier + +ceilometer_configs_templates: + - src: ceilometer.j2 + dest: + - /etc/ceilometer/ceilometer.conf + - src: glance.j2 + dest: + - /etc/glance/glance-api.conf + - /etc/glance/glance-registry.conf diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/ceilometer_controller/vars/RedHat.yml b/deploy/adapters/ansible/openstack_newton_xenial/roles/ceilometer_controller/vars/RedHat.yml new file mode 100644 index 00000000..6c5f53ec --- /dev/null +++ b/deploy/adapters/ansible/openstack_newton_xenial/roles/ceilometer_controller/vars/RedHat.yml @@ -0,0 +1,36 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +ceilometer_packages: + - openstack-ceilometer-api + - openstack-ceilometer-collector + - openstack-ceilometer-central + - openstack-ceilometer-notification +# - openstack-ceilometer-alarm + - python-ceilometerclient + +ceilometer_services: + - openstack-ceilometer-central + - openstack-ceilometer-notification + - openstack-ceilometer-api + - openstack-ceilometer-collector +# - openstack-ceilometer-alarm-evaluator +# - openstack-ceilometer-alarm-notifier + +ceilometer_configs_templates: + - src: ceilometer.j2 + dest: + - /etc/ceilometer/ceilometer.conf + - src: cinder.j2 + dest: + - /etc/cinder/cinder.conf + - src: glance.j2 + dest: + - /etc/glance/glance-api.conf + - /etc/glance/glance-registry.conf diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/ceph-mon/tasks/install_mon.yml b/deploy/adapters/ansible/openstack_newton_xenial/roles/ceph-mon/tasks/install_mon.yml new file mode 100644 index 00000000..1d14c2d2 --- /dev/null +++ b/deploy/adapters/ansible/openstack_newton_xenial/roles/ceph-mon/tasks/install_mon.yml @@ -0,0 +1,43 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## + +- include_vars: "{{ ansible_os_family }}.yml" + +- name: Create a default data directory + file: path="/var/lib/ceph/mon/ceph-{{ inventory_hostname }}" state="directory" + +- name: Populate the monitor daemon + shell: "ceph-mon --mkfs -i {{ inventory_hostname }} --monmap /tmp/monmap --keyring /tmp/ceph.mon.keyring" + +- name: Change ceph/mon dir owner to ceph + shell: "chown -R ceph:ceph /var/lib/ceph/mon" + when: ansible_os_family == "Debian" + +- name: copy templates + template: + src: ceph-mon.service + dest: /lib/systemd/system/ceph-mon.service + mode: 0755 + when: ansible_os_family == "Debian" + +- name: Touch the done and auto start file + file: path="/var/lib/ceph/mon/ceph-{{ inventory_hostname }}/{{ item }}" state="touch" + with_items: + - "done" + - "{{ ceph_start_type }}" + +- name: start mon daemon + shell: "{{ ceph_start_script }}" + +- name: wait for creating osd keyring + wait_for: path=/var/lib/ceph/bootstrap-osd/ceph.keyring + +- name: fetch osd keyring + fetch: src="/var/lib/ceph/bootstrap-osd/ceph.keyring" dest="/tmp/ceph.osd.keyring" flat=yes + run_once: True diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/ceph-mon/templates/ceph-mon.service b/deploy/adapters/ansible/openstack_newton_xenial/roles/ceph-mon/templates/ceph-mon.service new file mode 100644 index 00000000..5a3cf753 --- /dev/null +++ b/deploy/adapters/ansible/openstack_newton_xenial/roles/ceph-mon/templates/ceph-mon.service @@ -0,0 +1,22 @@ +[Unit] +Description=Ceph cluster monitor daemon +Documentation=man:ceph-mon + +After=network-online.target local-fs.target ceph-create-keys.service +Wants=network-online.target local-fs.target ceph-create-keys.service + +PartOf=ceph.target + +[Service] +LimitNOFILE=1048576 +LimitNPROC=1048576 +EnvironmentFile=-/etc/default/ceph +Environment=CLUSTER=ceph +ExecStart=/usr/bin/ceph-mon -f --cluster ${CLUSTER} --id {{ inventory_hostname }} --setuser ceph --setgroup ceph +ExecReload=/bin/kill -HUP $MAINPID +Restart=on-failure +RestartSec=30 +TasksMax=infinity + +[Install] +WantedBy=multi-user.target diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/ceph-mon/vars/Debian.yml b/deploy/adapters/ansible/openstack_newton_xenial/roles/ceph-mon/vars/Debian.yml new file mode 100644 index 00000000..a792acad --- /dev/null +++ b/deploy/adapters/ansible/openstack_newton_xenial/roles/ceph-mon/vars/Debian.yml @@ -0,0 +1,12 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- + +ceph_start_script: "service ceph-mon start" +ceph_start_type: "systemd" diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/ceph-openstack/tasks/ceph_openstack_post.yml b/deploy/adapters/ansible/openstack_newton_xenial/roles/ceph-openstack/tasks/ceph_openstack_post.yml new file mode 100644 index 00000000..2097ca57 --- /dev/null +++ b/deploy/adapters/ansible/openstack_newton_xenial/roles/ceph-openstack/tasks/ceph_openstack_post.yml @@ -0,0 +1,19 @@ +############################################################################## +## Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +## +## All rights reserved. This program and the accompanying materials +## are made available under the terms of the Apache License, Version 2.0 +## which accompanies this distribution, and is available at +## http://www.apache.org/licenses/LICENSE-2.0 +############################################################################### +--- +- name: get mount info + command: mount + register: mount_info + +- name: try unmount image nfs directory + shell: | + umount /var/lib/glance/images + sed -i '/\/var\/lib\/glance\/images/d' /etc/fstab + when: mount_info.stdout.find('images') != -1 + diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/ceph-openstack/tasks/main.yml b/deploy/adapters/ansible/openstack_newton_xenial/roles/ceph-openstack/tasks/main.yml new file mode 100644 index 00000000..06c3acb6 --- /dev/null +++ b/deploy/adapters/ansible/openstack_newton_xenial/roles/ceph-openstack/tasks/main.yml @@ -0,0 +1,33 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +- include_vars: "{{ ansible_os_family }}.yml" + tags: + - ceph_deploy + - ceph_openstack_pre + - ceph_openstack_conf + - ceph_openstack_post + - ceph_openstack + +- include: ceph_openstack_pre.yml + tags: + - ceph_deploy + - ceph_openstack_pre + - ceph_openstack + +- include: ceph_openstack_conf.yml + tags: + - ceph_deploy + - ceph_openstack_conf + - ceph_openstack + +- include: ceph_openstack_post.yml + tags: + - ceph_deploy + - ceph_openstack_post + - ceph_openstack diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/ceph-openstack/vars/Debian.yml b/deploy/adapters/ansible/openstack_newton_xenial/roles/ceph-openstack/vars/Debian.yml new file mode 100755 index 00000000..db10bd14 --- /dev/null +++ b/deploy/adapters/ansible/openstack_newton_xenial/roles/ceph-openstack/vars/Debian.yml @@ -0,0 +1,30 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +packages: + - ceph-deploy + - python-flask + - libgoogle-perftools4 + - libleveldb1v5 + - liblttng-ust0 + - libsnappy1v5 + - librbd1 + - librados2 + - python-ceph + - ceph + - ceph-mds + - ceph-common + - ceph-fs-common + - gdisk + +services: [] + +cinder_service: cinder-volume +nova_service: nova-compute +glance_service: glance-api diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/ceph-osd/tasks/install_osd.yml b/deploy/adapters/ansible/openstack_newton_xenial/roles/ceph-osd/tasks/install_osd.yml new file mode 100644 index 00000000..16f261ef --- /dev/null +++ b/deploy/adapters/ansible/openstack_newton_xenial/roles/ceph-osd/tasks/install_osd.yml @@ -0,0 +1,37 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- + +- name: create osd lv and mount it on /var/local/osd + script: create_osd.sh + +- name: copy osd keyring + copy: src="/tmp/ceph.osd.keyring" dest="/var/lib/ceph/bootstrap-osd/ceph.keyring" + +- name: prepare osd disk + shell: ceph-disk prepare --fs-type xfs /var/local/osd + +- name: change local/osd dir owner to ceph + shell: chown -R ceph:ceph /var/local/osd + when: ansible_os_family == "Debian" + +- name: activate osd node + shell: ceph-disk activate /var/local/osd + +- name: enable ceph service + service: name=ceph enabled=yes + +- name: rebuild osd after reboot + lineinfile: dest=/etc/init/ceph-osd-all-starter.conf insertafter="^task" line="pre-start script\n set -e\n /opt/setup_storage/losetup.sh\n sleep 3\n mount /dev/storage-volumes/ceph0 /var/local/osd\nend script" + when: ansible_os_family == "Debian" + +- name: rebuild osd after reboot for centos + lineinfile: dest=/etc/init.d/ceph insertafter="^### END INIT INFO" line="\nsleep 1\nmount /dev/storage-volumes/ceph0 /var/local/osd" + when: ansible_os_family == "RedHat" + diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/common/vars/Debian.yml b/deploy/adapters/ansible/openstack_newton_xenial/roles/common/vars/Debian.yml new file mode 100644 index 00000000..46e0374f --- /dev/null +++ b/deploy/adapters/ansible/openstack_newton_xenial/roles/common/vars/Debian.yml @@ -0,0 +1,31 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +packages: + - ubuntu-cloud-keyring + - python-dev + - openvswitch-switch + - openvswitch-switch-dpdk + - python-memcache + - python-iniparse + - python-lxml + - python-crypto + #- python-d* #TODO, need remove + +pip_packages: + - crudini + - python-keyczar + - yang2tosca + +pip_conf: pip.conf + +services: + - ntp + + diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/dashboard/vars/Debian.yml b/deploy/adapters/ansible/openstack_newton_xenial/roles/dashboard/vars/Debian.yml new file mode 100644 index 00000000..aaeb8cdb --- /dev/null +++ b/deploy/adapters/ansible/openstack_newton_xenial/roles/dashboard/vars/Debian.yml @@ -0,0 +1,17 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +packages: [] + +services: + - memcached + - apache2 + +apache_config_dir: /etc/apache2 +horizon_dir: /usr/share/openstack-dashboard/openstack_dashboard diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/database/tasks/mariadb_cluster_debian.yml b/deploy/adapters/ansible/openstack_newton_xenial/roles/database/tasks/mariadb_cluster_debian.yml new file mode 100644 index 00000000..442cd18b --- /dev/null +++ b/deploy/adapters/ansible/openstack_newton_xenial/roles/database/tasks/mariadb_cluster_debian.yml @@ -0,0 +1,69 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +- name: get cluster status + shell: mysql --silent --skip-column-names -e 'SHOW STATUS LIKE "wsrep_evs_state"'|awk '{print $2}' + register: cluster_status + when: + - inventory_hostname == haproxy_hosts.keys()[0] + +- name: start first node to create new cluster + shell: > + service mysql bootstrap; + service mysql start; + when: | + inventory_hostname == haproxy_hosts.keys()[0] + and not cluster_status.stdout | search("OPERATIONAL") + +- name: wait for cluster ready + shell: mysql --silent --skip-column-names -e 'SHOW STATUS LIKE "wsrep_evs_state"'|awk '{print $2}' + register: cluster_status + until: cluster_status|success + failed_when: not cluster_status.stdout | search("OPERATIONAL") + retries: 10 + delay: 3 + when: | + inventory_hostname == haproxy_hosts.keys()[0] + and not cluster_status.stdout | search("OPERATIONAL") + +- name: if I in the cluster nodes + shell: mysql --silent --skip-column-names -e 'SHOW STATUS LIKE "wsrep_incoming_addresses"'|awk '{print $2}' + register: cluster_nodes + changed_when: false + +- name: restart other nodes and join cluster1 + shell: service mysql restart; + when: | + inventory_hostname != haproxy_hosts.keys()[0] + and not cluster_nodes.stdout | search( "{{ internal_ip }}" ) + ignore_errors: True + +- name: delay 60 seconds + shell: sleep 60 + +- name: restart other nodes and join cluster2 + shell: service mysql restart; + when: | + inventory_hostname != haproxy_hosts.keys()[0] + and not cluster_nodes.stdout | search( "{{ internal_ip }}" ) + +- name: chmod directory + shell: > + chmod 755 -R /var/lib/mysql/ ; + chmod 755 -R /var/log/mysql/ ; + chmod 755 -R /etc/mysql/conf.d/; + +- name: restart first nodes + shell: service mysql restart + when: | + (inventory_hostname == haproxy_hosts.keys()[0] + and haproxy_hosts|length > 1 + and not cluster_nodes.stdout | search( '{{ internal_ip }}' )) + + diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/database/tasks/mariadb_install.yml b/deploy/adapters/ansible/openstack_newton_xenial/roles/database/tasks/mariadb_install.yml new file mode 100644 index 00000000..1b08172d --- /dev/null +++ b/deploy/adapters/ansible/openstack_newton_xenial/roles/database/tasks/mariadb_install.yml @@ -0,0 +1,70 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +- name: disable auto start + copy: + content: "#!/bin/sh\nexit 101" + dest: "/usr/sbin/policy-rc.d" + mode: 0755 + when: ansible_os_family == "Debian" + +- name: change open file limit + copy: + content: "* - nofile 65536 }}" + dest: "/etc/security/limits.conf" + mode: 0755 + +- name: install python-mysqldb + action: "{{ ansible_pkg_mgr }} name={{ item }} state=present" + with_items: maridb_packages | union(packages_noarch) + +- name: create conf dir for wsrep + file: path=/etc/my.cnf.d state=directory mode=0755 + when: ansible_os_family == "RedHat" + +- name: update mariadb config file + template: + src: '{{ item.src }}' + dest: '{{ item.dest }}' + backup: yes + mode: 0644 + with_items: mysql_config + +- name: bugfix for rsync version 3.1 + lineinfile: + dest: /usr/bin/wsrep_sst_rsync + state: absent + regexp: '{{ item }}' + with_items: + - "\\s*uid = \\$MYUID$" + - "\\s*gid = \\$MYGID$" + +- name: enable auto start + file: + path=/usr/sbin/policy-rc.d + state=absent + when: ansible_os_family == "Debian" + +- name: set owner + file: path=/var/lib/mysql owner=mysql group=mysql recurse=yes state=directory mode=0755 + +- name: get logfile stat + stat: path='{{ mysql_data_dir }}/ib_logfile0' + register: logfile_stat + +- debug: msg='{{ logfile_stat.stat.exists}}' +- debug: msg='{{ logfile_stat.stat.size }}' + when: logfile_stat.stat.exists + +- name: rm logfile if exist and size mismatch + shell: 'rm -rf {{ mysql_data_dir }}/ib_logfile*' + when: | + logfile_stat.stat.exists + and logfile_stat.stat.size != 1073741824 + diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/database/templates/data.j2 b/deploy/adapters/ansible/openstack_newton_xenial/roles/database/templates/data.j2 new file mode 100644 index 00000000..66c2fead --- /dev/null +++ b/deploy/adapters/ansible/openstack_newton_xenial/roles/database/templates/data.j2 @@ -0,0 +1,51 @@ +#!/bin/sh +mysql -uroot -Dmysql <<EOF +drop database if exists keystone; +drop database if exists glance; +drop database if exists neutron; +drop database if exists nova; +drop database if exists cinder; +drop database if exists heat; +drop database if exists aodh; + +CREATE DATABASE keystone; +{% for host in ['%', 'localhost', inventory_hostname] %} +GRANT ALL ON keystone.* TO 'keystone'@'{{ host }}' IDENTIFIED BY '{{ KEYSTONE_DBPASS }}'; +{% endfor %} + +CREATE DATABASE glance; +{% for host in ['%', 'localhost', inventory_hostname] %} +GRANT ALL ON glance.* TO 'glance'@'{{ host }}' IDENTIFIED BY '{{ GLANCE_DBPASS }}'; +{% endfor %} + +CREATE DATABASE neutron; +{% for host in ['%', 'localhost', inventory_hostname] %} +GRANT ALL ON neutron.* TO 'neutron'@'{{ host }}' IDENTIFIED BY '{{ NEUTRON_DBPASS }}'; +{% endfor %} + +CREATE DATABASE nova; +{% for host in ['%', 'localhost', inventory_hostname] %} +GRANT ALL ON nova.* TO 'nova'@'{{ host }}' IDENTIFIED BY '{{ NOVA_DBPASS }}'; +{% endfor %} + +CREATE DATABASE cinder; +{% for host in ['%', 'localhost', inventory_hostname] %} +GRANT ALL ON cinder.* TO 'cinder'@'{{ host }}' IDENTIFIED BY '{{ CINDER_DBPASS }}'; +{% endfor %} + +CREATE DATABASE heat; +{% for host in ['%', 'localhost', inventory_hostname] %} +GRANT ALL ON heat.* TO 'heat'@'{{ host }}' IDENTIFIED BY '{{ HEAT_DBPASS }}'; +{% endfor %} + +CREATE DATABASE aodh; +{% for host in ['%', 'localhost', inventory_hostname] %} +GRANT ALL ON aodh.* TO 'aodh'@'{{ host }}' IDENTIFIED BY '{{ AODH_DBPASS }}'; +{% endfor %} + +{% if WSREP_SST_USER is defined %} +{% for host in ['%', 'localhost', inventory_hostname] %} +GRANT ALL ON *.* TO '{{ WSREP_SST_USER }}'@'{{ host }}' IDENTIFIED BY '{{ WSREP_SST_PASS }}'; +{% endfor %} +{% endif %} +EOF diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/database/vars/Debian.yml b/deploy/adapters/ansible/openstack_newton_xenial/roles/database/vars/Debian.yml new file mode 100644 index 00000000..1021524d --- /dev/null +++ b/deploy/adapters/ansible/openstack_newton_xenial/roles/database/vars/Debian.yml @@ -0,0 +1,55 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +mongodb_packages: + - mongodb-server + - mongodb-clients + - python-pymongo + +mysql_packages: + - python-mysqldb + - mysql-server + +maridb_packages: + - apt-transport-https + - debconf-utils + - libaio1 + - libc6 + - libdbd-mysql-perl + - libgcc1 + - libgcrypt20 + - libstdc++6 + - python-software-properties + - mariadb-client + - galera-3 + - rsync + - socat + - mariadb-galera-server-10.0 + - python-mysqldb + +pip_packages: [] + +services: [] + +mongodb_service: mongodb +mysql_config: + - dest: /etc/mysql/my.cnf + src: my.cnf + - dest: /etc/mysql/conf.d/wsrep.cnf + src: wsrep.cnf + +mysql_config_dir: /etc/mysql/conf.d +mysql_data_dir: /var/lib/mysql + +mongodb_config: + dest: /etc/mongodb.conf + src: mongodb.conf + journal: /var/lib/mongodb/journal/* + +wsrep_provider_file: "/usr/lib/galera/libgalera_smm.so" diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/database/vars/main.yml b/deploy/adapters/ansible/openstack_newton_xenial/roles/database/vars/main.yml new file mode 100644 index 00000000..a32897f0 --- /dev/null +++ b/deploy/adapters/ansible/openstack_newton_xenial/roles/database/vars/main.yml @@ -0,0 +1,39 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +packages_noarch: [] + +services_noarch: + - mysql + +credentials: + - user: keystone + db: keystone + password: "{{ KEYSTONE_DBPASS }}" + - user: neutron + db: neutron + password: "{{ NEUTRON_DBPASS }}" + - user: glance + db: glance + password: "{{ GLANCE_DBPASS }}" + - user: nova + db: nova_api + password: "{{ NOVA_DBPASS }}" + - user: nova + db: nova + password: "{{ NOVA_DBPASS }}" + - user: cinder + db: cinder + password: "{{ CINDER_DBPASS }}" + - user: heat + db: heat + password: "{{ HEAT_DBPASS }}" + - user: aodh + db: aodh + password: "{{ AODH_DBPASS }}" diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/ext-network/handlers/main.yml b/deploy/adapters/ansible/openstack_newton_xenial/roles/ext-network/handlers/main.yml new file mode 100644 index 00000000..36e39072 --- /dev/null +++ b/deploy/adapters/ansible/openstack_newton_xenial/roles/ext-network/handlers/main.yml @@ -0,0 +1,29 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +- name: restart neutron-plugin-openvswitch-agent + service: name=neutron-openvswitch-agent state=restarted enabled=yes + when: "'opendaylight' not in {{ NEUTRON_MECHANISM_DRIVERS }}" + +- name: restart neutron-l3-agent + service: name=neutron-l3-agent state=restarted enabled=yes + +- name: kill dnsmasq + command: killall dnsmasq + ignore_errors: True + +- name: restart neutron-dhcp-agent + service: name=neutron-dhcp-agent state=restarted enabled=yes + +- name: restart neutron-metadata-agent + service: name=neutron-metadata-agent state=restarted enabled=yes + +- name: restart xorp + service: name=xorp state=restarted enabled=yes sleep=10 + ignore_errors: True diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/ext-network/tasks/main.yml b/deploy/adapters/ansible/openstack_newton_xenial/roles/ext-network/tasks/main.yml new file mode 100644 index 00000000..a8bce16e --- /dev/null +++ b/deploy/adapters/ansible/openstack_newton_xenial/roles/ext-network/tasks/main.yml @@ -0,0 +1,54 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +- include_vars: "{{ ansible_os_family }}.yml" + +# FIXME: temporary workaround for openstack api access random failure +- name: restart api server + service: name={{ item }} state=restarted enabled=yes + with_items: api_services | union(api_services_noarch) + ignore_errors: True + +- name: restart neutron server + service: name=neutron-server state=restarted enabled=yes + +- name: create external net + neutron_network: + login_username: ADMIN + login_password: "{{ ADMIN_PASS }}" + login_tenant_name: admin + auth_url: "http://{{ internal_vip.ip }}:35357/v2.0" + name: "{{ public_net_info.network }}" + provider_network_type: "{{ public_net_info.type }}" + provider_physical_network: "{{ public_net_info.provider_network }}" + provider_segmentation_id: "{{ public_net_info.segment_id}}" + shared: false + router_external: yes + state: present + run_once: true + when: 'public_net_info.enable == True' + +- name: create external subnet + neutron_subnet: + login_username: ADMIN + login_password: "{{ ADMIN_PASS }}" + login_tenant_name: admin + auth_url: "http://{{ internal_vip.ip }}:35357/v2.0" + name: "{{ public_net_info.subnet }}" + network_name: "{{ public_net_info.network }}" + cidr: "{{ public_net_info.floating_ip_cidr }}" + enable_dhcp: "{{ public_net_info.enable_dhcp }}" + no_gateway: "{{ public_net_info.no_gateway }}" + gateway_ip: "{{ public_net_info.external_gw }}" + allocation_pool_start: "{{ public_net_info.floating_ip_start }}" + allocation_pool_end: "{{ public_net_info.floating_ip_end }}" + state: present + run_once: true + when: 'public_net_info.enable == True' + diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/ext-network/vars/Debian.yml b/deploy/adapters/ansible/openstack_newton_xenial/roles/ext-network/vars/Debian.yml new file mode 100644 index 00000000..0b5c78b6 --- /dev/null +++ b/deploy/adapters/ansible/openstack_newton_xenial/roles/ext-network/vars/Debian.yml @@ -0,0 +1,18 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +api_services: + - nova-api + - glance-api + - ceilometer-api + - heat-api + - heat-api-cfn + - aodh-api + - cinder-api + diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/ext-network/vars/RedHat.yml b/deploy/adapters/ansible/openstack_newton_xenial/roles/ext-network/vars/RedHat.yml new file mode 100644 index 00000000..886401fd --- /dev/null +++ b/deploy/adapters/ansible/openstack_newton_xenial/roles/ext-network/vars/RedHat.yml @@ -0,0 +1,17 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +api_services: + - openstack-nova-api + - openstack-glance-api + - openstack-ceilometer-api + - openstack-heat-api + - openstack-heat-api-cfn + - openstack-cinder-api + diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/ext-network/vars/main.yml b/deploy/adapters/ansible/openstack_newton_xenial/roles/ext-network/vars/main.yml new file mode 100644 index 00000000..b19b6ebf --- /dev/null +++ b/deploy/adapters/ansible/openstack_newton_xenial/roles/ext-network/vars/main.yml @@ -0,0 +1,10 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +api_services_noarch: [] diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/glance/tasks/nfs.yml b/deploy/adapters/ansible/openstack_newton_xenial/roles/glance/tasks/nfs.yml new file mode 100644 index 00000000..07dfacdd --- /dev/null +++ b/deploy/adapters/ansible/openstack_newton_xenial/roles/glance/tasks/nfs.yml @@ -0,0 +1,61 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +- name: install nfs packages + action: "{{ ansible_pkg_mgr }} name={{ item }} state=present" + with_items: nfs_packages + +- name: install nfs + local_action: yum name={{ item }} state=present + with_items: + - rpcbind + - nfs-utils + run_once: True + +- name: create image directory + local_action: file path=/opt/images state=directory mode=0777 + run_once: True + +- name: remove nfs config item if exist + local_action: lineinfile dest=/etc/exports state=absent + regexp="^/opt/images" + run_once: True + +- name: update nfs config + local_action: lineinfile dest=/etc/exports state=present + line="/opt/images *(rw,insecure,sync,all_squash)" + run_once: True + +- name: restart compass nfs service + local_action: service name={{ item }} state=restarted enabled=yes + with_items: + - rpcbind + - nfs-server + run_once: True + +- name: get mount info + command: mount + register: mount_info + +- name: get nfs server + shell: awk -F'=' '/compass_server/ {print $2}' /etc/compass.conf + register: ip_info + +- name: restart host nfs service + service: name={{ item }} state=restarted enabled=yes + with_items: '{{ nfs_services }}' + +- name: mount image directory + shell: | + mount -t nfs -onfsvers=3 {{ ip_info.stdout_lines[0] }}:/opt/images /var/lib/glance/images + sed -i '/\/var\/lib\/glance\/images/d' /etc/fstab + echo {{ ip_info.stdout_lines[0] }}:/opt/images /var/lib/glance/images/ nfs nfsvers=3 >> /etc/fstab + when: mount_info.stdout.find('images') == -1 + retries: 5 + delay: 3 diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/glance/vars/Debian.yml b/deploy/adapters/ansible/openstack_newton_xenial/roles/glance/vars/Debian.yml new file mode 100644 index 00000000..d1825012 --- /dev/null +++ b/deploy/adapters/ansible/openstack_newton_xenial/roles/glance/vars/Debian.yml @@ -0,0 +1,21 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +packages: + - glance + - nfs-common + +nfs_packages: + - nfs-common + +nfs_services: [] + +services: + - glance-registry + - glance-api diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/glance/vars/RedHat.yml b/deploy/adapters/ansible/openstack_newton_xenial/roles/glance/vars/RedHat.yml new file mode 100644 index 00000000..2987d0c4 --- /dev/null +++ b/deploy/adapters/ansible/openstack_newton_xenial/roles/glance/vars/RedHat.yml @@ -0,0 +1,23 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +packages: + - openstack-glance + - rpcbind + +nfs_packages: + - nfs-utils + - rpcbind + +nfs_services: + - rpcbind + +services: + - openstack-glance-api + - openstack-glance-registry diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/ha/templates/haproxy.cfg b/deploy/adapters/ansible/openstack_newton_xenial/roles/ha/templates/haproxy.cfg new file mode 100644 index 00000000..c0a0747d --- /dev/null +++ b/deploy/adapters/ansible/openstack_newton_xenial/roles/ha/templates/haproxy.cfg @@ -0,0 +1,216 @@ + +global + #chroot /var/run/haproxy + daemon + user haproxy + group haproxy + maxconn 4000 + pidfile /var/run/haproxy/haproxy.pid + #log 127.0.0.1 local0 + tune.bufsize 1000000 + stats socket /var/run/haproxy.sock + stats timeout 2m + +defaults + log global + maxconn 8000 + option redispatch + option dontlognull + option splice-auto + timeout http-request 10s + timeout queue 1m + timeout connect 10s + timeout client 50s + timeout server 50s + timeout check 10s + retries 3 + +listen proxy-mysql + bind {{ internal_vip.ip }}:3306 + option tcpka + option tcplog + balance source +{% for host, ip in haproxy_hosts.items() %} +{% if loop.index == 1 %} + server {{ host }} {{ ip }}:3306 weight 1 check inter 2000 rise 2 fall 5 +{% else %} + server {{ host }} {{ ip }}:3306 weight 1 check inter 2000 rise 2 fall 5 backup +{% endif %} +{% endfor %} + +listen proxy-rabbit + bind {{ internal_vip.ip }}:5672 + bind {{ public_vip.ip }}:5672 + + option tcpka + option tcplog + timeout client 3h + timeout server 3h + balance source +{% for host,ip in haproxy_hosts.items() %} + server {{ host }} {{ ip }}:5672 weight 1 check inter 2000 rise 2 fall 5 +{% endfor %} + +listen proxy-glance_registry_cluster + bind {{ internal_vip.ip }}:9191 + bind {{ public_vip.ip }}:9191 + option tcpka + option tcplog + balance source +{% for host,ip in haproxy_hosts.items() %} + server {{ host }} {{ ip }}:9191 weight 1 check inter 2000 rise 2 fall 5 +{% endfor %} + +listen proxy-glance_api_cluster + bind {{ internal_vip.ip }}:9292 + bind {{ public_vip.ip }}:9292 + option tcpka + option tcplog + option httpchk + balance source +{% for host,ip in haproxy_hosts.items() %} + server {{ host }} {{ ip }}:9292 weight 1 check inter 2000 rise 2 fall 5 +{% endfor %} + +listen proxy-nova-novncproxy + bind {{ internal_vip.ip }}:6080 + bind {{ public_vip.ip }}:6080 + option tcpka + option tcplog + balance source +{% for host,ip in haproxy_hosts.items() %} + server {{ host }} {{ ip }}:6080 weight 1 check inter 2000 rise 2 fall 5 +{% endfor %} + +listen proxy-network + bind {{ internal_vip.ip }}:9696 + bind {{ public_vip.ip }}:9696 + option tcpka + option tcplog + balance source + option httpchk +{% for host,ip in haproxy_hosts.items() %} + server {{ host }} {{ ip }}:9696 weight 1 check inter 2000 rise 2 fall 5 +{% endfor %} + +listen proxy-volume + bind {{ internal_vip.ip }}:8776 + bind {{ public_vip.ip }}:8776 + option tcpka + option httpchk + option tcplog + balance source +{% for host,ip in haproxy_hosts.items() %} + server {{ host }} {{ ip }}:8776 weight 1 check inter 2000 rise 2 fall 5 +{% endfor %} + +listen proxy-keystone_admin_cluster + bind {{ internal_vip.ip }}:35357 + bind {{ public_vip.ip }}:35357 + option tcpka + option httpchk + option tcplog + balance source +{% for host,ip in haproxy_hosts.items() %} + server {{ host }} {{ ip }}:35357 weight 1 check inter 2000 rise 2 fall 5 +{% endfor %} + +listen proxy-keystone_public_internal_cluster + bind {{ internal_vip.ip }}:5000 + bind {{ public_vip.ip }}:5000 + option tcpka + option httpchk + option tcplog + balance source +{% for host,ip in haproxy_hosts.items() %} + server {{ host }} {{ ip }}:5000 weight 1 check inter 2000 rise 2 fall 5 +{% endfor %} + +listen proxy-nova_compute_api_cluster + bind {{ internal_vip.ip }}:8774 + bind {{ public_vip.ip }}:8774 + mode tcp + option httpchk + option tcplog + balance source +{% for host,ip in haproxy_hosts.items() %} + server {{ host }} {{ ip }}:8774 weight 1 check inter 2000 rise 2 fall 5 +{% endfor %} + +listen proxy-nova_metadata_api_cluster + bind {{ internal_vip.ip }}:8775 + bind {{ public_vip.ip }}:8775 + option tcpka + option tcplog + balance source +{% for host,ip in haproxy_hosts.items() %} + server {{ host }} {{ ip }}:8775 weight 1 check inter 2000 rise 2 fall 5 +{% endfor %} + +listen proxy-cinder_api_cluster + bind {{ internal_vip.ip }}:8776 + bind {{ public_vip.ip }}:8776 + mode tcp + option httpchk + option tcplog + balance source +{% for host,ip in haproxy_hosts.items() %} + server {{ host }} {{ ip }}:8776 weight 1 check inter 2000 rise 2 fall 5 +{% endfor %} + +#listen proxy-swift-proxy +# bind {{ internal_vip.ip }}:8080 +# bind {{ public_vip.ip }}:8080 +# balance source +# option tcpka +# option tcplog +#{% for host,ip in haproxy_hosts.items() %} +# server {{ host }} {{ ip }}:8080 weight 1 check inter 2000 rise 2 fall 5 +#{% endfor %} + +listen proxy-ceilometer_api_cluster + bind {{ internal_vip.ip }}:8777 + bind {{ public_vip.ip }}:8777 + mode tcp + option tcp-check + option tcplog + balance source +{% for host,ip in haproxy_hosts.items() %} + server {{ host }} {{ ip }}:8777 weight 1 check inter 2000 rise 2 fall 5 +{% endfor %} + +listen proxy-aodh_api_cluster + bind {{ internal_vip.ip }}:8042 + bind {{ public_vip.ip }}:8042 + mode tcp + option tcp-check + option tcplog + balance source +{% for host,ip in haproxy_hosts.items() %} + server {{ host }} {{ ip }}:8042 weight 1 check inter 2000 rise 2 fall 5 +{% endfor %} + +listen proxy-dashboarad + bind {{ public_vip.ip }}:80 + mode http + balance source + capture cookie vgnvisitor= len 32 + cookie SERVERID insert indirect nocache + option forwardfor + option httpchk + option httpclose + rspidel ^Set-cookie:\ IP= +{% for host,ip in haproxy_hosts.items() %} + server {{ host }} {{ ip }}:80 cookie {{ host }} weight 1 check inter 2000 rise 2 fall 5 +{% endfor %} + +listen stats + mode http + bind 0.0.0.0:9999 + stats enable + stats refresh 30s + stats uri / + stats realm Global\ statistics + stats auth admin:admin + + diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/heat/tasks/heat_install.yml b/deploy/adapters/ansible/openstack_newton_xenial/roles/heat/tasks/heat_install.yml new file mode 100644 index 00000000..b90e6402 --- /dev/null +++ b/deploy/adapters/ansible/openstack_newton_xenial/roles/heat/tasks/heat_install.yml @@ -0,0 +1,39 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +- include_vars: "{{ ansible_os_family }}.yml" + +- name: install heat related packages + action: "{{ ansible_pkg_mgr }} name={{ item }} state=present" + with_items: packages | union(packages_noarch) + +- name: generate heat service list + lineinfile: dest=/opt/service create=yes line='{{ item }}' + with_items: services | union(services_noarch) + +# ' + +- name: create heat user domain + shell: > + . /opt/admin-openrc-v3.sh; + openstack domain create --description "Stack projects and users" heat; + openstack user create --domain heat --password {{ HEAT_PASS }} heat_domain_admin; + openstack role add --domain heat --user-domain heat --user heat_domain_admin admin; + openstack role create heat_stack_owner; + openstack role add --project demo --user demo heat_stack_owner; + when: inventory_hostname == groups['controller'][0] + +- name: update heat conf + template: src=heat.j2 + dest=/etc/heat/heat.conf + backup=yes + notify: + - restart heat service + - remove heat-sqlite-db + diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/heat/templates/heat.j2 b/deploy/adapters/ansible/openstack_newton_xenial/roles/heat/templates/heat.j2 new file mode 100644 index 00000000..62df9fd9 --- /dev/null +++ b/deploy/adapters/ansible/openstack_newton_xenial/roles/heat/templates/heat.j2 @@ -0,0 +1,28 @@ +[DEFAULT] +heat_metadata_server_url = http://{{ internal_vip.ip }}:8000 +heat_waitcondition_server_url = http://{{ internal_vip.ip }}:8000/v1/waitcondition +rpc_backend = rabbit +rabbit_host = {{ rabbit_host }} +rabbit_userid = {{ RABBIT_USER }} +rabbit_password = {{ RABBIT_PASS }} +log_dir = /var/log/heat +stack_domain_admin = heat_domain_admin +stack_domain_admin_password = {{ HEAT_PASS }} +stack_user_domain_name = heat + +[database] +connection = mysql://heat:{{ HEAT_DBPASS }}@{{ db_host }}/heat +idle_timeout = 30 +use_db_reconnect = True +pool_timeout = 10 + +[ec2authtoken] +auth_uri = http://{{ internal_vip.ip }}:5000/v2.0 + +[keystone_authtoken] +auth_uri = http://{{ internal_vip.ip }}:5000/v2.0 +identity_uri = http://{{ internal_vip.ip }}:35357 +admin_tenant_name = service +admin_user = heat +admin_password = {{ HEAT_PASS }} + diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/keystone/tasks/keystone_install.yml b/deploy/adapters/ansible/openstack_newton_xenial/roles/keystone/tasks/keystone_install.yml new file mode 100644 index 00000000..79d02729 --- /dev/null +++ b/deploy/adapters/ansible/openstack_newton_xenial/roles/keystone/tasks/keystone_install.yml @@ -0,0 +1,98 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +- include_vars: "{{ ansible_os_family }}.yml" + +- name: disable auto start + copy: + content: "#!/bin/sh\nexit 101" + dest: "/usr/sbin/policy-rc.d" + mode: 0755 + when: ansible_os_family == "Debian" + +- name: install keystone packages + action: "{{ ansible_pkg_mgr }} name={{ item }} state=present" + with_items: packages | union(packages_noarch) + +- name: enable auto start + file: + path=/usr/sbin/policy-rc.d + state=absent + when: ansible_os_family == "Debian" + +- name: disable boot auto start + file: + path={{ item }} + state=absent + with_items: + - /etc/init.d/keystone + - /etc/init/keystone.conf + - /lib/systemd/system/keystone.service + when: ansible_os_family == "Debian" + +- name: generate keystone service list + lineinfile: dest=/opt/service create=yes line='{{ item }}' + with_items: services | union(services_noarch) + +- name: delete sqlite database + file: + path: /var/lib/keystone/keystone.db + state: absent + +- name: update keystone conf + template: src=keystone.conf dest=/etc/keystone/keystone.conf backup=yes + notify: + - restart keystone services + +- name: assure listen port exist + lineinfile: + dest: '{{ apache_config_dir }}/ports.conf' + regexp: '{{ item.regexp }}' + line: '{{ item.line}}' + with_items: + - regexp: "^Listen {{ internal_ip }}:5000" + line: "Listen {{ internal_ip }}:5000" + - regexp: "^Listen {{ internal_ip }}:35357" + line: "Listen {{ internal_ip }}:35357" + notify: + - restart keystone services + +- name: update apache2 configs + template: + src: wsgi-keystone.conf.j2 + dest: '{{ apache_config_dir }}/sites-available/wsgi-keystone.conf' + when: ansible_os_family == 'Debian' + notify: + - restart keystone services + +- name: update apache2 configs + template: + src: wsgi-keystone.conf.j2 + dest: '{{ apache_config_dir }}/wsgi-keystone.conf' + when: ansible_os_family == 'RedHat' + notify: + - restart keystone services + +- name: enable keystone server + file: + src: "{{ apache_config_dir }}/sites-available/wsgi-keystone.conf" + dest: "{{ apache_config_dir }}/sites-enabled/wsgi-keystone.conf" + state: "link" + when: ansible_os_family == 'Debian' + notify: + - restart keystone services + +- name: keystone source files + template: src={{ item }} dest=/opt/{{ item }} + with_items: + - admin-openrc.sh + - demo-openrc.sh + - admin-openrc-v3.sh + +- meta: flush_handlers diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/keystone/vars/Debian.yml b/deploy/adapters/ansible/openstack_newton_xenial/roles/keystone/vars/Debian.yml new file mode 100644 index 00000000..6000c6fd --- /dev/null +++ b/deploy/adapters/ansible/openstack_newton_xenial/roles/keystone/vars/Debian.yml @@ -0,0 +1,24 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- + +cron_path: "/var/spool/cron/crontabs" + +packages: + - keystone + - apache2 + - libapache2-mod-wsgi + - python-keystone + - python-openstackclient + +services: + - apache2 + +apache_config_dir: /etc/apache2 +http_service_name: apache2 diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/keystone/vars/main.yml b/deploy/adapters/ansible/openstack_newton_xenial/roles/keystone/vars/main.yml new file mode 100644 index 00000000..ac548a09 --- /dev/null +++ b/deploy/adapters/ansible/openstack_newton_xenial/roles/keystone/vars/main.yml @@ -0,0 +1,179 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +packages_noarch: + - python-keystoneclient + +services_noarch: [] +os_services: + - name: keystone + type: identity + region: RegionOne + description: "OpenStack Identity" + publicurl: "http://{{ public_vip.ip }}:5000/v2.0" + internalurl: "http://{{ internal_vip.ip }}:5000/v2.0" + adminurl: "http://{{ internal_vip.ip }}:35357/v2.0" + + - name: glance + type: image + region: RegionOne + description: "OpenStack Image Service" + publicurl: "http://{{ public_vip.ip }}:9292" + internalurl: "http://{{ internal_vip.ip }}:9292" + adminurl: "http://{{ internal_vip.ip }}:9292" + + - name: nova + type: compute + region: RegionOne + description: "OpenStack Compute" + publicurl: "http://{{ public_vip.ip }}:8774/v2/%(tenant_id)s" + internalurl: "http://{{ internal_vip.ip }}:8774/v2/%(tenant_id)s" + adminurl: "http://{{ internal_vip.ip }}:8774/v2/%(tenant_id)s" + + - name: neutron + type: network + region: RegionOne + description: "OpenStack Networking" + publicurl: "http://{{ public_vip.ip }}:9696" + internalurl: "http://{{ internal_vip.ip }}:9696" + adminurl: "http://{{ internal_vip.ip }}:9696" + + - name: ceilometer + type: metering + region: RegionOne + description: "OpenStack Telemetry" + publicurl: "http://{{ public_vip.ip }}:8777" + internalurl: "http://{{ internal_vip.ip }}:8777" + adminurl: "http://{{ internal_vip.ip }}:8777" + + - name: aodh + type: alarming + region: RegionOne + description: "OpenStack Telemetry" + publicurl: "http://{{ public_vip.ip }}:8042" + internalurl: "http://{{ internal_vip.ip }}:8042" + adminurl: "http://{{ internal_vip.ip }}:8042" + + - name: cinder + type: volume + region: RegionOne + description: "OpenStack Block Storage" + publicurl: "http://{{ public_vip.ip }}:8776/v1/%(tenant_id)s" + internalurl: "http://{{ internal_vip.ip }}:8776/v1/%(tenant_id)s" + adminurl: "http://{{ internal_vip.ip }}:8776/v1/%(tenant_id)s" + + - name: cinderv2 + type: volumev2 + region: RegionOne + description: "OpenStack Block Storage v2" + publicurl: "http://{{ public_vip.ip }}:8776/v2/%(tenant_id)s" + internalurl: "http://{{ internal_vip.ip }}:8776/v2/%(tenant_id)s" + adminurl: "http://{{ internal_vip.ip }}:8776/v2/%(tenant_id)s" + + - name: heat + type: orchestration + region: RegionOne + description: "OpenStack Orchestration" + publicurl: "http://{{ public_vip.ip }}:8004/v1/%(tenant_id)s" + internalurl: "http://{{ internal_vip.ip }}:8004/v1/%(tenant_id)s" + adminurl: "http://{{ internal_vip.ip }}:8004/v1/%(tenant_id)s" + + - name: heat-cfn + type: cloudformation + region: RegionOne + description: "OpenStack CloudFormation Orchestration" + publicurl: "http://{{ public_vip.ip }}:8000/v1" + internalurl: "http://{{ internal_vip.ip }}:8000/v1" + adminurl: "http://{{ internal_vip.ip }}:8000/v1" + +# - name: swift +# type: object-store +# region: RegionOne +# description: "OpenStack Object Storage" +# publicurl: "http://{{ public_vip.ip }}:8080/v1/AUTH_%(tenant_id)s" +# internalurl: "http://{{ internal_vip.ip }}:8080/v1/AUTH_%(tenant_id)s" +# adminurl: "http://{{ internal_vip.ip }}:8080/v1/AUTH_%(tenant_id)s" + +os_users: + - user: admin + password: "{{ ADMIN_PASS }}" + email: admin@admin.com + role: admin + tenant: admin + tenant_description: "Admin Tenant" + + - user: glance + password: "{{ GLANCE_PASS }}" + email: glance@admin.com + role: admin + tenant: service + tenant_description: "Service Tenant" + + - user: nova + password: "{{ NOVA_PASS }}" + email: nova@admin.com + role: admin + tenant: service + tenant_description: "Service Tenant" + + - user: keystone + password: "{{ KEYSTONE_PASS }}" + email: keystone@admin.com + role: admin + tenant: service + tenant_description: "Service Tenant" + + - user: neutron + password: "{{ NEUTRON_PASS }}" + email: neutron@admin.com + role: admin + tenant: service + tenant_description: "Service Tenant" + + - user: ceilometer + password: "{{ CEILOMETER_PASS }}" + email: ceilometer@admin.com + role: admin + tenant: service + tenant_description: "Service Tenant" + + - user: cinder + password: "{{ CINDER_PASS }}" + email: cinder@admin.com + role: admin + tenant: service + tenant_description: "Service Tenant" + + - user: aodh + password: "{{ AODH_PASS }}" + email: aodh@admin.com + role: admin + tenant: service + tenant_description: "Service Tenant" + + - user: heat + password: "{{ HEAT_PASS }}" + email: heat@admin.com + role: admin + tenant: service + tenant_description: "Service Tenant" + + - user: demo + password: "" + email: heat@demo.com + role: heat_stack_user + tenant: demo + tenant_description: "Demo Tenant" + +# - user: swift +# password: "{{ CINDER_PASS }}" +# email: swift@admin.com +# role: admin +# tenant: service +# tenant_description: "Service Tenant" diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/moon/files/controllers.py b/deploy/adapters/ansible/openstack_newton_xenial/roles/moon/files/controllers.py new file mode 100644 index 00000000..6da5b423 --- /dev/null +++ b/deploy/adapters/ansible/openstack_newton_xenial/roles/moon/files/controllers.py @@ -0,0 +1,920 @@ +# Copyright 2015 Open Platform for NFV Project, Inc. and its contributors +# This software is distributed under the terms and conditions of the 'Apache-2.0' +# license which can be found in the file 'LICENSE' in this package distribution +# or at 'http://www.apache.org/licenses/LICENSE-2.0'. + +from keystone.common import controller +from keystone import config +from keystone import exception +from keystone.models import token_model +from keystone.contrib.moon.exception import * +from oslo_log import log +from uuid import uuid4 +import requests + + +CONF = config.CONF +LOG = log.getLogger(__name__) + + +@dependency.requires('configuration_api') +class Configuration(controller.V3Controller): + collection_name = 'configurations' + member_name = 'configuration' + + def __init__(self): + super(Configuration, self).__init__() + + def _get_user_id_from_token(self, token_id): + response = self.token_provider_api.validate_token(token_id) + token_ref = token_model.KeystoneToken(token_id=token_id, token_data=response) + return token_ref.get('user') + + @controller.protected() + def get_policy_templates(self, context, **kw): + user_id = self._get_user_id_from_token(context.get('token_id')) + return self.configuration_api.get_policy_templates_dict(user_id) + + @controller.protected() + def get_aggregation_algorithms(self, context, **kw): + user_id = self._get_user_id_from_token(context.get('token_id')) + return self.configuration_api.get_aggregation_algorithms_dict(user_id) + + @controller.protected() + def get_sub_meta_rule_algorithms(self, context, **kw): + user_id = self._get_user_id_from_token(context.get('token_id')) + return self.configuration_api.get_sub_meta_rule_algorithms_dict(user_id) + + +@dependency.requires('tenant_api', 'resource_api') +class Tenants(controller.V3Controller): + + def __init__(self): + super(Tenants, self).__init__() + + def _get_user_id_from_token(self, token_id): + response = self.token_provider_api.validate_token(token_id) + token_ref = token_model.KeystoneToken(token_id=token_id, token_data=response) + return token_ref.get('user') + + @controller.protected() + def get_tenants(self, context, **kw): + user_id = self._get_user_id_from_token(context.get('token_id')) + return self.tenant_api.get_tenants_dict(user_id) + + def __get_keystone_tenant_dict(self, tenant_id="", tenant_name="", tenant_description="", domain="default"): + tenants = self.resource_api.list_projects() + for tenant in tenants: + if tenant_id and tenant_id == tenant['id']: + return tenant + if tenant_name and tenant_name == tenant['name']: + return tenant + if not tenant_id: + tenant_id = uuid4().hex + if not tenant_name: + tenant_name = tenant_id + tenant = { + "id": tenant_id, + "name": tenant_name, + "description": tenant_description, + "enabled": True, + "domain_id": domain + } + keystone_tenant = self.resource_api.create_project(tenant["id"], tenant) + return keystone_tenant + + @controller.protected() + def add_tenant(self, context, **kw): + user_id = self._get_user_id_from_token(context.get('token_id')) + k_tenant_dict = self.__get_keystone_tenant_dict( + tenant_name=kw.get('tenant_name'), + tenant_description=kw.get('tenant_description', kw.get('tenant_name')), + domain=kw.get('tenant_domain', "default"), + + ) + tenant_dict = dict() + tenant_dict['id'] = k_tenant_dict['id'] + tenant_dict['name'] = kw.get('tenant_name', None) + tenant_dict['description'] = kw.get('tenant_description', None) + tenant_dict['intra_authz_extension_id'] = kw.get('tenant_intra_authz_extension_id', None) + tenant_dict['intra_admin_extension_id'] = kw.get('tenant_intra_admin_extension_id', None) + return self.tenant_api.add_tenant_dict(user_id, tenant_dict['id'], tenant_dict) + + @controller.protected() + def get_tenant(self, context, **kw): + user_id = self._get_user_id_from_token(context.get('token_id')) + tenant_id = kw.get('tenant_id', None) + return self.tenant_api.get_tenant_dict(user_id, tenant_id) + + @controller.protected() + def del_tenant(self, context, **kw): + user_id = self._get_user_id_from_token(context.get('token_id')) + tenant_id = kw.get('tenant_id', None) + return self.tenant_api.del_tenant(user_id, tenant_id) + + @controller.protected() + def set_tenant(self, context, **kw): + user_id = self._get_user_id_from_token(context.get('token_id')) + # Next line will raise an error if tenant doesn't exist + k_tenant_dict = self.resource_api.get_project(kw.get('tenant_id', None)) + tenant_id = kw.get('tenant_id', None) + tenant_dict = dict() + tenant_dict['name'] = k_tenant_dict.get('name', None) + if 'tenant_description' in kw: + tenant_dict['description'] = kw.get('tenant_description', None) + if 'tenant_intra_authz_extension_id' in kw: + tenant_dict['intra_authz_extension_id'] = kw.get('tenant_intra_authz_extension_id', None) + if 'tenant_intra_admin_extension_id' in kw: + tenant_dict['intra_admin_extension_id'] = kw.get('tenant_intra_admin_extension_id', None) + self.tenant_api.set_tenant_dict(user_id, tenant_id, tenant_dict) + + +def callback(self, context, prep_info, *args, **kwargs): + token_ref = "" + if context.get('token_id') is not None: + token_ref = token_model.KeystoneToken( + token_id=context['token_id'], + token_data=self.token_provider_api.validate_token( + context['token_id'])) + if not token_ref: + raise exception.Unauthorized + + +@dependency.requires('authz_api') +class Authz_v3(controller.V3Controller): + + def __init__(self): + super(Authz_v3, self).__init__() + + @controller.protected(callback) + def get_authz(self, context, tenant_id, subject_k_id, object_name, action_name): + try: + return self.authz_api.authz(tenant_id, subject_k_id, object_name, action_name) + except Exception as e: + return {'authz': False, 'comment': unicode(e)} + + +@dependency.requires('admin_api', 'root_api') +class IntraExtensions(controller.V3Controller): + collection_name = 'intra_extensions' + member_name = 'intra_extension' + + def __init__(self): + super(IntraExtensions, self).__init__() + + def _get_user_id_from_token(self, token_id): + response = self.token_provider_api.validate_token(token_id) + token_ref = token_model.KeystoneToken(token_id=token_id, token_data=response) + return token_ref.get('user')['id'] + + # IntraExtension functions + @controller.protected() + def get_intra_extensions(self, context, **kw): + user_id = self._get_user_id_from_token(context.get('token_id')) + return self.admin_api.get_intra_extensions_dict(user_id) + + @controller.protected() + def add_intra_extension(self, context, **kw): + user_id = self._get_user_id_from_token(context.get('token_id')) + intra_extension_dict = dict() + intra_extension_dict['name'] = kw.get('intra_extension_name', None) + intra_extension_dict['model'] = kw.get('intra_extension_model', None) + intra_extension_dict['genre'] = kw.get('intra_extension_genre', None) + intra_extension_dict['description'] = kw.get('intra_extension_description', None) + intra_extension_dict['subject_categories'] = kw.get('intra_extension_subject_categories', dict()) + intra_extension_dict['object_categories'] = kw.get('intra_extension_object_categories', dict()) + intra_extension_dict['action_categories'] = kw.get('intra_extension_action_categories', dict()) + intra_extension_dict['subjects'] = kw.get('intra_extension_subjects', dict()) + intra_extension_dict['objects'] = kw.get('intra_extension_objects', dict()) + intra_extension_dict['actions'] = kw.get('intra_extension_actions', dict()) + intra_extension_dict['subject_scopes'] = kw.get('intra_extension_subject_scopes', dict()) + intra_extension_dict['object_scopes'] = kw.get('intra_extension_object_scopes', dict()) + intra_extension_dict['action_scopes'] = kw.get('intra_extension_action_scopes', dict()) + intra_extension_dict['subject_assignments'] = kw.get('intra_extension_subject_assignments', dict()) + intra_extension_dict['object_assignments'] = kw.get('intra_extension_object_assignments', dict()) + intra_extension_dict['action_assignments'] = kw.get('intra_extension_action_assignments', dict()) + intra_extension_dict['aggregation_algorithm'] = kw.get('intra_extension_aggregation_algorithm', dict()) + intra_extension_dict['sub_meta_rules'] = kw.get('intra_extension_sub_meta_rules', dict()) + intra_extension_dict['rules'] = kw.get('intra_extension_rules', dict()) + ref = self.admin_api.load_intra_extension_dict(user_id, intra_extension_dict=intra_extension_dict) + return self.admin_api.populate_default_data(ref) + + @controller.protected() + def get_intra_extension(self, context, **kw): + user_id = self._get_user_id_from_token(context.get('token_id')) + intra_extension_id = kw.get('intra_extension_id', None) + return self.admin_api.get_intra_extension_dict(user_id, intra_extension_id) + + @controller.protected() + def del_intra_extension(self, context, **kw): + user_id = self._get_user_id_from_token(context.get('token_id')) + intra_extension_id = kw.get('intra_extension_id', None) + self.admin_api.del_intra_extension(user_id, intra_extension_id) + + @controller.protected() + def set_intra_extension(self, context, **kw): + user_id = self._get_user_id_from_token(context.get('token_id')) + intra_extension_id = kw.get('intra_extension_id', None) + intra_extension_dict = dict() + intra_extension_dict['name'] = kw.get('intra_extension_name', None) + intra_extension_dict['model'] = kw.get('intra_extension_model', None) + intra_extension_dict['genre'] = kw.get('intra_extension_genre', None) + intra_extension_dict['description'] = kw.get('intra_extension_description', None) + return self.admin_api.set_intra_extension_dict(user_id, intra_extension_id, intra_extension_dict) + + @controller.protected() + def load_root_intra_extension(self, context, **kw): + self.root_api.load_root_intra_extension_dict() + + # Metadata functions + @controller.protected() + def get_subject_categories(self, context, **kw): + user_id = self._get_user_id_from_token(context.get('token_id')) + intra_extension_id = kw.get('intra_extension_id', None) + return self.admin_api.get_subject_categories_dict(user_id, intra_extension_id) + + @controller.protected() + def add_subject_category(self, context, **kw): + user_id = self._get_user_id_from_token(context.get('token_id')) + intra_extension_id = kw.get('intra_extension_id', None) + subject_category_dict = dict() + subject_category_dict['name'] = kw.get('subject_category_name', None) + subject_category_dict['description'] = kw.get('subject_category_description', None) + return self.admin_api.add_subject_category_dict(user_id, intra_extension_id, subject_category_dict) + + @controller.protected() + def get_subject_category(self, context, **kw): + user_id = self._get_user_id_from_token(context.get('token_id')) + intra_extension_id = kw.get('intra_extension_id', None) + subject_category_id = kw.get('subject_category_id', None) + return self.admin_api.get_subject_category_dict(user_id, intra_extension_id, subject_category_id) + + @controller.protected() + def del_subject_category(self, context, **kw): + user_id = self._get_user_id_from_token(context.get('token_id')) + intra_extension_id = kw.get('intra_extension_id', None) + subject_category_id = kw.get('subject_category_id', None) + self.admin_api.del_subject_category(user_id, intra_extension_id, subject_category_id) + + @controller.protected() + def set_subject_category(self, context, **kw): + user_id = self._get_user_id_from_token(context.get('token_id')) + intra_extension_id = kw.get('intra_extension_id', None) + subject_category_id = kw.get('subject_category_id', None) + subject_category_dict = dict() + subject_category_dict['name'] = kw.get('subject_category_name', None) + subject_category_dict['description'] = kw.get('subject_category_description', None) + return self.admin_api.set_subject_category_dict(user_id, intra_extension_id, subject_category_id, subject_category_dict) + + @controller.protected() + def get_object_categories(self, context, **kw): + user_id = self._get_user_id_from_token(context.get('token_id')) + intra_extension_id = kw.get('intra_extension_id', None) + return self.admin_api.get_object_categories_dict(user_id, intra_extension_id) + + @controller.protected() + def add_object_category(self, context, **kw): + user_id = self._get_user_id_from_token(context.get('token_id')) + intra_extension_id = kw.get('intra_extension_id', None) + object_category_dict = dict() + object_category_dict['name'] = kw.get('object_category_name', None) + object_category_dict['description'] = kw.get('object_category_description', None) + return self.admin_api.add_object_category_dict(user_id, intra_extension_id, object_category_dict) + + @controller.protected() + def get_object_category(self, context, **kw): + user_id = self._get_user_id_from_token(context.get('token_id')) + intra_extension_id = kw.get('intra_extension_id', None) + object_category_id = kw.get('object_category_id', None) + return self.admin_api.get_object_categories_dict(user_id, intra_extension_id, object_category_id) + + @controller.protected() + def del_object_category(self, context, **kw): + user_id = self._get_user_id_from_token(context.get('token_id')) + intra_extension_id = kw.get('intra_extension_id', None) + object_category_id = kw.get('object_category_id', None) + self.admin_api.del_object_category(user_id, intra_extension_id, object_category_id) + + @controller.protected() + def set_object_category(self, context, **kw): + user_id = self._get_user_id_from_token(context.get('token_id')) + intra_extension_id = kw.get('intra_extension_id', None) + object_category_id = kw.get('object_category_id', None) + object_category_dict = dict() + object_category_dict['name'] = kw.get('object_category_name', None) + object_category_dict['description'] = kw.get('object_category_description', None) + return self.admin_api.set_object_category_dict(user_id, intra_extension_id, object_category_id, object_category_dict) + + @controller.protected() + def get_action_categories(self, context, **kw): + user_id = self._get_user_id_from_token(context.get('token_id')) + intra_extension_id = kw.get('intra_extension_id', None) + return self.admin_api.get_action_categories_dict(user_id, intra_extension_id) + + @controller.protected() + def add_action_category(self, context, **kw): + user_id = self._get_user_id_from_token(context.get('token_id')) + intra_extension_id = kw.get('intra_extension_id', None) + action_category_dict = dict() + action_category_dict['name'] = kw.get('action_category_name', None) + action_category_dict['description'] = kw.get('action_category_description', None) + return self.admin_api.add_action_category_dict(user_id, intra_extension_id, action_category_dict) + + @controller.protected() + def get_action_category(self, context, **kw): + user_id = self._get_user_id_from_token(context.get('token_id')) + intra_extension_id = kw.get('intra_extension_id', None) + action_category_id = kw.get('action_category_id', None) + return self.admin_api.get_action_categories_dict(user_id, intra_extension_id, action_category_id) + + @controller.protected() + def del_action_category(self, context, **kw): + user_id = self._get_user_id_from_token(context.get('token_id')) + intra_extension_id = kw.get('intra_extension_id', None) + action_category_id = kw.get('action_category_id', None) + self.admin_api.del_action_category(user_id, intra_extension_id, action_category_id) + + @controller.protected() + def set_action_category(self, context, **kw): + user_id = self._get_user_id_from_token(context.get('token_id')) + intra_extension_id = kw.get('intra_extension_id', None) + action_category_id = kw.get('action_category_id', None) + action_category_dict = dict() + action_category_dict['name'] = kw.get('action_category_name', None) + action_category_dict['description'] = kw.get('action_category_description', None) + return self.admin_api.set_action_category_dict(user_id, intra_extension_id, action_category_id, action_category_dict) + + # Perimeter functions + @controller.protected() + def get_subjects(self, context, **kw): + user_id = self._get_user_id_from_token(context.get('token_id')) + intra_extension_id = kw.get('intra_extension_id', None) + return self.admin_api.get_subjects_dict(user_id, intra_extension_id) + + @controller.protected() + def add_subject(self, context, **kw): + user_id = self._get_user_id_from_token(context.get('token_id')) + intra_extension_id = kw.get('intra_extension_id', None) + subject_dict = dict() + subject_dict['name'] = kw.get('subject_name', None) + subject_dict['description'] = kw.get('subject_description', None) + subject_dict['password'] = kw.get('subject_password', None) + subject_dict['email'] = kw.get('subject_email', None) + return self.admin_api.add_subject_dict(user_id, intra_extension_id, subject_dict) + + @controller.protected() + def get_subject(self, context, **kw): + user_id = self._get_user_id_from_token(context.get('token_id')) + intra_extension_id = kw.get('intra_extension_id', None) + subject_id = kw.get('subject_id', None) + return self.admin_api.get_subject_dict(user_id, intra_extension_id, subject_id) + + @controller.protected() + def del_subject(self, context, **kw): + user_id = self._get_user_id_from_token(context.get('token_id')) + intra_extension_id = kw.get('intra_extension_id', None) + subject_id = kw.get('subject_id', None) + self.admin_api.del_subject(user_id, intra_extension_id, subject_id) + + @controller.protected() + def set_subject(self, context, **kw): + user_id = self._get_user_id_from_token(context.get('token_id')) + intra_extension_id = kw.get('intra_extension_id', None) + subject_id = kw.get('subject_id', None) + subject_dict = dict() + subject_dict['name'] = kw.get('subject_name', None) + subject_dict['description'] = kw.get('subject_description', None) + return self.admin_api.set_subject_dict(user_id, intra_extension_id, subject_id, subject_dict) + + @controller.protected() + def get_objects(self, context, **kw): + user_id = self._get_user_id_from_token(context.get('token_id')) + intra_extension_id = kw.get('intra_extension_id', None) + return self.admin_api.get_objects_dict(user_id, intra_extension_id) + + @controller.protected() + def add_object(self, context, **kw): + user_id = self._get_user_id_from_token(context.get('token_id')) + intra_extension_id = kw.get('intra_extension_id', None) + object_dict = dict() + object_dict['name'] = kw.get('object_name', None) + object_dict['description'] = kw.get('object_description', None) + return self.admin_api.add_object_dict(user_id, intra_extension_id, object_dict) + + @controller.protected() + def get_object(self, context, **kw): + user_id = self._get_user_id_from_token(context.get('token_id')) + intra_extension_id = kw.get('intra_extension_id', None) + object_id = kw.get('object_id', None) + return self.admin_api.get_object_dict(user_id, intra_extension_id, object_id) + + @controller.protected() + def del_object(self, context, **kw): + user_id = self._get_user_id_from_token(context.get('token_id')) + intra_extension_id = kw.get('intra_extension_id', None) + object_id = kw.get('object_id', None) + self.admin_api.del_object(user_id, intra_extension_id, object_id) + + @controller.protected() + def set_object(self, context, **kw): + user_id = self._get_user_id_from_token(context.get('token_id')) + intra_extension_id = kw.get('intra_extension_id', None) + object_id = kw.get('object_id', None) + object_dict = dict() + object_dict['name'] = kw.get('object_name', None) + object_dict['description'] = kw.get('object_description', None) + return self.admin_api.set_object_dict(user_id, intra_extension_id, object_id, object_dict) + + @controller.protected() + def get_actions(self, context, **kw): + user_id = self._get_user_id_from_token(context.get('token_id')) + intra_extension_id = kw.get('intra_extension_id', None) + return self.admin_api.get_actions_dict(user_id, intra_extension_id) + + @controller.protected() + def add_action(self, context, **kw): + user_id = self._get_user_id_from_token(context.get('token_id')) + intra_extension_id = kw.get('intra_extension_id', None) + action_dict = dict() + action_dict['name'] = kw.get('action_name', None) + action_dict['description'] = kw.get('action_description', None) + return self.admin_api.add_action_dict(user_id, intra_extension_id, action_dict) + + @controller.protected() + def get_action(self, context, **kw): + user_id = self._get_user_id_from_token(context.get('token_id')) + intra_extension_id = kw.get('intra_extension_id', None) + action_id = kw.get('action_id', None) + return self.admin_api.get_action_dict(user_id, intra_extension_id, action_id) + + @controller.protected() + def del_action(self, context, **kw): + user_id = self._get_user_id_from_token(context.get('token_id')) + intra_extension_id = kw.get('intra_extension_id', None) + action_id = kw.get('action_id', None) + self.admin_api.del_action(user_id, intra_extension_id, action_id) + + @controller.protected() + def set_action(self, context, **kw): + user_id = self._get_user_id_from_token(context.get('token_id')) + intra_extension_id = kw.get('intra_extension_id', None) + action_id = kw.get('action_id', None) + action_dict = dict() + action_dict['name'] = kw.get('action_name', None) + action_dict['description'] = kw.get('action_description', None) + return self.admin_api.set_action_dict(user_id, intra_extension_id, action_id, action_dict) + + # Scope functions + @controller.protected() + def get_subject_scopes(self, context, **kw): + user_id = self._get_user_id_from_token(context.get('token_id')) + intra_extension_id = kw.get('intra_extension_id', None) + subject_category_id = kw.get('subject_category_id', None) + return self.admin_api.get_subject_scopes_dict(user_id, intra_extension_id, subject_category_id) + + @controller.protected() + def add_subject_scope(self, context, **kw): + user_id = self._get_user_id_from_token(context.get('token_id')) + intra_extension_id = kw.get('intra_extension_id', None) + subject_category_id = kw.get('subject_category_id', None) + subject_scope_dict = dict() + subject_scope_dict['name'] = kw.get('subject_scope_name', None) + subject_scope_dict['description'] = kw.get('subject_scope_description', None) + return self.admin_api.add_subject_scope_dict(user_id, intra_extension_id, subject_category_id, subject_scope_dict) + + @controller.protected() + def get_subject_scope(self, context, **kw): + user_id = self._get_user_id_from_token(context.get('token_id')) + intra_extension_id = kw.get('intra_extension_id', None) + subject_category_id = kw.get('subject_category_id', None) + subject_scope_id = kw.get('subject_scope_id', None) + return self.admin_api.get_subject_scope_dict(user_id, intra_extension_id, subject_category_id, subject_scope_id) + + @controller.protected() + def del_subject_scope(self, context, **kw): + user_id = self._get_user_id_from_token(context.get('token_id')) + intra_extension_id = kw.get('intra_extension_id', None) + subject_category_id = kw.get('subject_category_id', None) + subject_scope_id = kw.get('subject_scope_id', None) + self.admin_api.del_subject_scope(user_id, intra_extension_id, subject_category_id, subject_scope_id) + + @controller.protected() + def set_subject_scope(self, context, **kw): + user_id = self._get_user_id_from_token(context.get('token_id')) + intra_extension_id = kw.get('intra_extension_id', None) + subject_category_id = kw.get('subject_category_id', None) + subject_scope_id = kw.get('subject_scope_id', None) + subject_scope_dict = dict() + subject_scope_dict['name'] = kw.get('subject_scope_name', None) + subject_scope_dict['description'] = kw.get('subject_scope_description', None) + return self.admin_api.set_subject_scope_dict(user_id, intra_extension_id, subject_category_id, subject_scope_id, subject_scope_dict) + + @controller.protected() + def get_object_scopes(self, context, **kw): + user_id = self._get_user_id_from_token(context.get('token_id')) + intra_extension_id = kw.get('intra_extension_id', None) + object_category_id = kw.get('object_category_id', None) + return self.admin_api.get_object_scopes_dict(user_id, intra_extension_id, object_category_id) + + @controller.protected() + def add_object_scope(self, context, **kw): + user_id = self._get_user_id_from_token(context.get('token_id')) + intra_extension_id = kw.get('intra_extension_id', None) + object_category_id = kw.get('object_category_id', None) + object_scope_dict = dict() + object_scope_dict['name'] = kw.get('object_scope_name', None) + object_scope_dict['description'] = kw.get('object_scope_description', None) + return self.admin_api.add_object_scope_dict(user_id, intra_extension_id, object_category_id, object_scope_dict) + + @controller.protected() + def get_object_scope(self, context, **kw): + user_id = self._get_user_id_from_token(context.get('token_id')) + intra_extension_id = kw.get('intra_extension_id', None) + object_category_id = kw.get('object_category_id', None) + object_scope_id = kw.get('object_scope_id', None) + return self.admin_api.get_object_scope_dict(user_id, intra_extension_id, object_category_id, object_scope_id) + + @controller.protected() + def del_object_scope(self, context, **kw): + user_id = self._get_user_id_from_token(context.get('token_id')) + intra_extension_id = kw.get('intra_extension_id', None) + object_category_id = kw.get('object_category_id', None) + object_scope_id = kw.get('object_scope_id', None) + self.admin_api.del_object_scope(user_id, intra_extension_id, object_category_id, object_scope_id) + + @controller.protected() + def set_object_scope(self, context, **kw): + user_id = self._get_user_id_from_token(context.get('token_id')) + intra_extension_id = kw.get('intra_extension_id', None) + object_category_id = kw.get('object_category_id', None) + object_scope_id = kw.get('object_scope_id', None) + object_scope_dict = dict() + object_scope_dict['name'] = kw.get('object_scope_name', None) + object_scope_dict['description'] = kw.get('object_scope_description', None) + return self.admin_api.set_object_scope_dict(user_id, intra_extension_id, object_category_id, object_scope_id, object_scope_dict) + + @controller.protected() + def get_action_scopes(self, context, **kw): + user_id = self._get_user_id_from_token(context.get('token_id')) + intra_extension_id = kw.get('intra_extension_id', None) + action_category_id = kw.get('action_category_id', None) + return self.admin_api.get_action_scopes_dict(user_id, intra_extension_id, action_category_id) + + @controller.protected() + def add_action_scope(self, context, **kw): + user_id = self._get_user_id_from_token(context.get('token_id')) + intra_extension_id = kw.get('intra_extension_id', None) + action_category_id = kw.get('action_category_id', None) + action_scope_dict = dict() + action_scope_dict['name'] = kw.get('action_scope_name', None) + action_scope_dict['description'] = kw.get('action_scope_description', None) + return self.admin_api.add_action_scope_dict(user_id, intra_extension_id, action_category_id, action_scope_dict) + + @controller.protected() + def get_action_scope(self, context, **kw): + user_id = self._get_user_id_from_token(context.get('token_id')) + intra_extension_id = kw.get('intra_extension_id', None) + action_category_id = kw.get('action_category_id', None) + action_scope_id = kw.get('action_scope_id', None) + return self.admin_api.get_action_scope_dict(user_id, intra_extension_id, action_category_id, action_scope_id) + + @controller.protected() + def del_action_scope(self, context, **kw): + user_id = self._get_user_id_from_token(context.get('token_id')) + intra_extension_id = kw.get('intra_extension_id', None) + action_category_id = kw.get('action_category_id', None) + action_scope_id = kw.get('action_scope_id', None) + self.admin_api.del_action_scope(user_id, intra_extension_id, action_category_id, action_scope_id) + + @controller.protected() + def set_action_scope(self, context, **kw): + user_id = self._get_user_id_from_token(context.get('token_id')) + intra_extension_id = kw.get('intra_extension_id', None) + action_category_id = kw.get('action_category_id', None) + action_scope_id = kw.get('action_scope_id', None) + action_scope_dict = dict() + action_scope_dict['name'] = kw.get('action_scope_name', None) + action_scope_dict['description'] = kw.get('action_scope_description', None) + return self.admin_api.set_action_scope_dict(user_id, intra_extension_id, action_category_id, action_scope_id, action_scope_dict) + + # Assignment functions + + @controller.protected() + def add_subject_assignment(self, context, **kw): + user_id = self._get_user_id_from_token(context.get('token_id')) + intra_extension_id = kw.get('intra_extension_id', None) + subject_id = kw.get('subject_id', None) + subject_category_id = kw.get('subject_category_id', None) + subject_scope_id = kw.get('subject_scope_id', None) + return self.admin_api.add_subject_assignment_list(user_id, intra_extension_id, subject_id, subject_category_id, subject_scope_id) + + @controller.protected() + def get_subject_assignment(self, context, **kw): + user_id = self._get_user_id_from_token(context.get('token_id')) + intra_extension_id = kw.get('intra_extension_id', None) + subject_id = kw.get('subject_id', None) + subject_category_id = kw.get('subject_category_id', None) + return self.admin_api.get_subject_assignment_list(user_id, intra_extension_id, subject_id, subject_category_id) + + @controller.protected() + def del_subject_assignment(self, context, **kw): + user_id = self._get_user_id_from_token(context.get('token_id')) + intra_extension_id = kw.get('intra_extension_id', None) + subject_id = kw.get('subject_id', None) + subject_category_id = kw.get('subject_category_id', None) + subject_scope_id = kw.get('subject_scope_id', None) + self.admin_api.del_subject_assignment(user_id, intra_extension_id, subject_id, subject_category_id, subject_scope_id) + + @controller.protected() + def add_object_assignment(self, context, **kw): + user_id = self._get_user_id_from_token(context.get('token_id')) + intra_extension_id = kw.get('intra_extension_id', None) + object_id = kw.get('object_id', None) + object_category_id = kw.get('object_category_id', None) + object_scope_id = kw.get('object_scope_id', None) + return self.admin_api.add_object_assignment_list(user_id, intra_extension_id, object_id, object_category_id, object_scope_id) + + @controller.protected() + def get_object_assignment(self, context, **kw): + user_id = self._get_user_id_from_token(context.get('token_id')) + intra_extension_id = kw.get('intra_extension_id', None) + object_id = kw.get('object_id', None) + object_category_id = kw.get('object_category_id', None) + return self.admin_api.get_object_assignment_list(user_id, intra_extension_id, object_id, object_category_id) + + @controller.protected() + def del_object_assignment(self, context, **kw): + user_id = self._get_user_id_from_token(context.get('token_id')) + intra_extension_id = kw.get('intra_extension_id', None) + object_id = kw.get('object_id', None) + object_category_id = kw.get('object_category_id', None) + object_scope_id = kw.get('object_scope_id', None) + self.admin_api.del_object_assignment(user_id, intra_extension_id, object_id, object_category_id, object_scope_id) + + @controller.protected() + def add_action_assignment(self, context, **kw): + user_id = self._get_user_id_from_token(context.get('token_id')) + intra_extension_id = kw.get('intra_extension_id', None) + action_id = kw.get('action_id', None) + action_category_id = kw.get('action_category_id', None) + action_scope_id = kw.get('action_scope_id', None) + return self.admin_api.add_action_assignment_list(user_id, intra_extension_id, action_id, action_category_id, action_scope_id) + + @controller.protected() + def get_action_assignment(self, context, **kw): + user_id = self._get_user_id_from_token(context.get('token_id')) + intra_extension_id = kw.get('intra_extension_id', None) + action_id = kw.get('action_id', None) + action_category_id = kw.get('action_category_id', None) + return self.admin_api.get_action_assignment_list(user_id, intra_extension_id, action_id, action_category_id) + + @controller.protected() + def del_action_assignment(self, context, **kw): + user_id = self._get_user_id_from_token(context.get('token_id')) + intra_extension_id = kw.get('intra_extension_id', None) + action_id = kw.get('action_id', None) + action_category_id = kw.get('action_category_id', None) + action_scope_id = kw.get('action_scope_id', None) + self.admin_api.del_action_assignment(user_id, intra_extension_id, action_id, action_category_id, action_scope_id) + + # Metarule functions + + @controller.protected() + def get_aggregation_algorithm(self, context, **kw): + user_id = self._get_user_id_from_token(context.get('token_id')) + intra_extension_id = kw.get('intra_extension_id', None) + return self.admin_api.get_aggregation_algorithm_id(user_id, intra_extension_id) + + @controller.protected() + def set_aggregation_algorithm(self, context, **kw): + user_id = self._get_user_id_from_token(context.get('token_id')) + intra_extension_id = kw.get('intra_extension_id', None) + aggregation_algorithm_id = kw.get('aggregation_algorithm_id', None) + return self.admin_api.set_aggregation_algorithm_id(user_id, intra_extension_id, aggregation_algorithm_id) + + @controller.protected() + def get_sub_meta_rules(self, context, **kw): + user_id = self._get_user_id_from_token(context.get('token_id')) + intra_extension_id = kw.get('intra_extension_id', None) + return self.admin_api.get_sub_meta_rules_dict(user_id, intra_extension_id) + + @controller.protected() + def add_sub_meta_rule(self, context, **kw): + user_id = self._get_user_id_from_token(context.get('token_id')) + intra_extension_id = kw.get('intra_extension_id', None) + sub_meta_rule_dict = dict() + sub_meta_rule_dict['name'] = kw.get('sub_meta_rule_name', None) + sub_meta_rule_dict['algorithm'] = kw.get('sub_meta_rule_algorithm', None) + sub_meta_rule_dict['subject_categories'] = kw.get('sub_meta_rule_subject_categories', None) + sub_meta_rule_dict['object_categories'] = kw.get('sub_meta_rule_object_categories', None) + sub_meta_rule_dict['action_categories'] = kw.get('sub_meta_rule_action_categories', None) + return self.admin_api.add_sub_meta_rule_dict(user_id, intra_extension_id, sub_meta_rule_dict) + + @controller.protected() + def get_sub_meta_rule(self, context, **kw): + user_id = self._get_user_id_from_token(context.get('token_id')) + intra_extension_id = kw.get('intra_extension_id', None) + sub_meta_rule_id = kw.get('sub_meta_rule_id', None) + return self.admin_api.get_sub_meta_rule_dict(user_id, intra_extension_id, sub_meta_rule_id) + + @controller.protected() + def del_sub_meta_rule(self, context, **kw): + user_id = self._get_user_id_from_token(context.get('token_id')) + intra_extension_id = kw.get('intra_extension_id', None) + sub_meta_rule_id = kw.get('sub_meta_rule_id', None) + self.admin_api.del_sub_meta_rule(user_id, intra_extension_id, sub_meta_rule_id) + + @controller.protected() + def set_sub_meta_rule(self, context, **kw): + user_id = self._get_user_id_from_token(context.get('token_id')) + intra_extension_id = kw.get('intra_extension_id', None) + sub_meta_rule_id = kw.get('sub_meta_rule_id', None) + sub_meta_rule_dict = dict() + sub_meta_rule_dict['name'] = kw.get('sub_meta_rule_name', None) + sub_meta_rule_dict['algorithm'] = kw.get('sub_meta_rule_algorithm', None) + sub_meta_rule_dict['subject_categories'] = kw.get('sub_meta_rule_subject_categories', None) + sub_meta_rule_dict['object_categories'] = kw.get('sub_meta_rule_object_categories', None) + sub_meta_rule_dict['action_categories'] = kw.get('sub_meta_rule_action_categories', None) + return self.admin_api.set_sub_meta_rule_dict(user_id, intra_extension_id, sub_meta_rule_id, sub_meta_rule_dict) + + # Rules functions + @controller.protected() + def get_rules(self, context, **kw): + user_id = self._get_user_id_from_token(context.get('token_id')) + intra_extension_id = kw.get('intra_extension_id', None) + sub_meta_rule_id = kw.get('sub_meta_rule_id', None) + return self.admin_api.get_rules_dict(user_id, intra_extension_id, sub_meta_rule_id) + + @controller.protected() + def add_rule(self, context, **kw): + user_id = self._get_user_id_from_token(context.get('token_id')) + intra_extension_id = kw.get('intra_extension_id', None) + sub_meta_rule_id = kw.get('sub_meta_rule_id', None) + subject_category_list = kw.get('subject_categories', []) + object_category_list = kw.get('object_categories', []) + action_category_list = kw.get('action_categories', []) + enabled_bool = kw.get('enabled', True) + rule_list = subject_category_list + action_category_list + object_category_list + [enabled_bool, ] + return self.admin_api.add_rule_dict(user_id, intra_extension_id, sub_meta_rule_id, rule_list) + + @controller.protected() + def get_rule(self, context, **kw): + user_id = self._get_user_id_from_token(context.get('token_id')) + intra_extension_id = kw.get('intra_extension_id', None) + sub_meta_rule_id = kw.get('sub_meta_rule_id', None) + rule_id = kw.get('rule_id', None) + return self.admin_api.get_rule_dict(user_id, intra_extension_id, sub_meta_rule_id, rule_id) + + @controller.protected() + def del_rule(self, context, **kw): + user_id = self._get_user_id_from_token(context.get('token_id')) + intra_extension_id = kw.get('intra_extension_id', None) + sub_meta_rule_id = kw.get('sub_meta_rule_id', None) + rule_id = kw.get('rule_id', None) + self.admin_api.del_rule(user_id, intra_extension_id, sub_meta_rule_id, rule_id) + + @controller.protected() + def set_rule(self, context, **kw): + user_id = self._get_user_id_from_token(context.get('token_id')) + intra_extension_id = kw.get('intra_extension_id', None) + sub_meta_rule_id = kw.get('sub_meta_rule_id', None) + rule_id = kw.get('rule_id', None) + rule_list = list() + subject_category_list = kw.get('subject_categories', []) + object_category_list = kw.get('object_categories', []) + action_category_list = kw.get('action_categories', []) + rule_list = subject_category_list + action_category_list + object_category_list + return self.admin_api.set_rule_dict(user_id, intra_extension_id, sub_meta_rule_id, rule_id, rule_list) + + +@dependency.requires('authz_api') +class InterExtensions(controller.V3Controller): + + def __init__(self): + super(InterExtensions, self).__init__() + + def _get_user_from_token(self, token_id): + response = self.token_provider_api.validate_token(token_id) + token_ref = token_model.KeystoneToken(token_id=token_id, token_data=response) + return token_ref['user'] + + # @controller.protected() + # def get_inter_extensions(self, context, **kw): + # user = self._get_user_from_token(context.get('token_id')) + # return { + # 'inter_extensions': + # self.interextension_api.get_inter_extensions() + # } + + # @controller.protected() + # def get_inter_extension(self, context, **kw): + # user = self._get_user_from_token(context.get('token_id')) + # return { + # 'inter_extensions': + # self.interextension_api.get_inter_extension(uuid=kw['inter_extension_id']) + # } + + # @controller.protected() + # def create_inter_extension(self, context, **kw): + # user = self._get_user_from_token(context.get('token_id')) + # return self.interextension_api.create_inter_extension(kw) + + # @controller.protected() + # def delete_inter_extension(self, context, **kw): + # user = self._get_user_from_token(context.get('token_id')) + # if 'inter_extension_id' not in kw: + # raise exception.Error + # return self.interextension_api.delete_inter_extension(kw['inter_extension_id']) + + +@dependency.requires('moonlog_api', 'authz_api') +class Logs(controller.V3Controller): + + def __init__(self): + super(Logs, self).__init__() + + def _get_user_id_from_token(self, token_id): + response = self.token_provider_api.validate_token(token_id) + token_ref = token_model.KeystoneToken(token_id=token_id, token_data=response) + return token_ref['user'] + + @controller.protected() + def get_logs(self, context, **kw): + user_id = self._get_user_id_from_token(context.get('token_id')) + options = kw.get('options', '') + return self.moonlog_api.get_logs(user_id, options) + + +@dependency.requires('identity_api', "token_provider_api", "resource_api") +class MoonAuth(controller.V3Controller): + + def __init__(self): + super(MoonAuth, self).__init__() + + def _get_project(self, uuid="", name=""): + projects = self.resource_api.list_projects() + for project in projects: + if uuid and uuid == project['id']: + return project + elif name and name == project['name']: + return project + + def get_token(self, context, **kw): + data_auth = { + "auth": { + "identity": { + "methods": [ + "password" + ], + "password": { + "user": { + "domain": { + "id": "Default" + }, + "name": kw['username'], + "password": kw['password'] + } + } + } + } + } + + message = {} + if "project" in kw: + project = self._get_project(name=kw['project']) + if project: + data_auth["auth"]["scope"] = dict() + data_auth["auth"]["scope"]['project'] = dict() + data_auth["auth"]["scope"]['project']['id'] = project['id'] + else: + message = { + "error": { + "message": "Unable to find project {}".format(kw['project']), + "code": 200, + "title": "UnScopedToken" + }} + +# req = requests.post("http://localhost:5000/v3/auth/tokens", +# json=data_auth, +# headers={"Content-Type": "application/json"} +# ) + req = requests.post("http://172.16.1.222:5000/v3/auth/tokens", + json=data_auth, + headers={"Content-Type": "application/json"} + ) + if req.status_code not in (200, 201): + LOG.error(req.text) + else: + _token = req.headers['X-Subject-Token'] + _data = req.json() + _result = { + "token": _token, + 'message': message + } + try: + _result["roles"] = map(lambda x: x['name'], _data["token"]["roles"]) + except KeyError: + pass + return _result + return {"token": None, 'message': req.json()} + diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/moon/files/deb.conf b/deploy/adapters/ansible/openstack_newton_xenial/roles/moon/files/deb.conf new file mode 100644 index 00000000..6e1159a1 --- /dev/null +++ b/deploy/adapters/ansible/openstack_newton_xenial/roles/moon/files/deb.conf @@ -0,0 +1,11 @@ +keystone/admin-password: password +keystone/auth-token: password +keystone/admin-password-confirm: password +keystone/admin-email: root@localhost +keystone/admin-role-name: admin +keystone/admin-user: admin +keystone/create-admin-tenant: false +keystone/region-name: Orange +keystone/admin-tenant-name: admin +keystone/register-endpoint: false +keystone/configure_db: false diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/moon/files/deb.conf.bak b/deploy/adapters/ansible/openstack_newton_xenial/roles/moon/files/deb.conf.bak new file mode 100644 index 00000000..6e1159a1 --- /dev/null +++ b/deploy/adapters/ansible/openstack_newton_xenial/roles/moon/files/deb.conf.bak @@ -0,0 +1,11 @@ +keystone/admin-password: password +keystone/auth-token: password +keystone/admin-password-confirm: password +keystone/admin-email: root@localhost +keystone/admin-role-name: admin +keystone/admin-user: admin +keystone/create-admin-tenant: false +keystone/region-name: Orange +keystone/admin-tenant-name: admin +keystone/register-endpoint: false +keystone/configure_db: false diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/moon/files/get_deb_depends.py b/deploy/adapters/ansible/openstack_newton_xenial/roles/moon/files/get_deb_depends.py new file mode 100644 index 00000000..05fc5d46 --- /dev/null +++ b/deploy/adapters/ansible/openstack_newton_xenial/roles/moon/files/get_deb_depends.py @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +import sys +import subprocess + +pkts = [] + +for arg in sys.argv[1:]: + proc = subprocess.Popen(["dpkg-deb", "--info", arg], stdin=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + out = proc.stdout.read() + err = proc.stderr.read() + if err: + print("An error occurred with {} ({})".format(arg, err)) + continue + for line in out.splitlines(): + line = line.decode('utf-8') + if " Depends:" in line: + line = line.replace(" Depends:", "") + for _dep in line.split(','): + pkts.append(_dep.split()[0]) + +print(" ".join(pkts)) diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/moon/handlers/main.yml b/deploy/adapters/ansible/openstack_newton_xenial/roles/moon/handlers/main.yml new file mode 100755 index 00000000..608a8a09 --- /dev/null +++ b/deploy/adapters/ansible/openstack_newton_xenial/roles/moon/handlers/main.yml @@ -0,0 +1,12 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +- name: restart keystone services + service: name={{ item }} state=restarted enabled=yes + with_items: services | union(services_noarch) diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/moon/tasks/main.yml b/deploy/adapters/ansible/openstack_newton_xenial/roles/moon/tasks/main.yml new file mode 100644 index 00000000..a3511de7 --- /dev/null +++ b/deploy/adapters/ansible/openstack_newton_xenial/roles/moon/tasks/main.yml @@ -0,0 +1,11 @@ +############################################################################# +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +- include: moon.yml + when: moon == "Enable" diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/moon/tasks/moon-compute.yml b/deploy/adapters/ansible/openstack_newton_xenial/roles/moon/tasks/moon-compute.yml new file mode 100644 index 00000000..e4142b5f --- /dev/null +++ b/deploy/adapters/ansible/openstack_newton_xenial/roles/moon/tasks/moon-compute.yml @@ -0,0 +1,20 @@ +############################################################################# +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +- name: update api-paste.ini + template: src=api-paste.ini dest=/etc/nova/api-paste.ini backup=yes + +- name: restart nova task + service: name={{ item }} state=restarted enabled=yes + with_items: + - nova-compute + +#- name: restart swift task +# shell: swift-init all start +# ignore_errors: True diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/moon/tasks/moon-controller.yml b/deploy/adapters/ansible/openstack_newton_xenial/roles/moon/tasks/moon-controller.yml new file mode 100644 index 00000000..7a507c88 --- /dev/null +++ b/deploy/adapters/ansible/openstack_newton_xenial/roles/moon/tasks/moon-controller.yml @@ -0,0 +1,238 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +# install all packages +- name: install keystone packages + shell: apt-get install -y python-pip unzip + +# download master.zip +- name: get image http server + shell: awk -F'=' '/compass_server/ {print $2}' /etc/compass.conf + register: http_server + +- name: download keystone-moon packages + get_url: url="http://{{ http_server.stdout_lines[0] }}/packages/moon/master.zip" dest=/tmp/master.zip mode=0444 + +- name: extract keystone-moon packages + unarchive: src=/tmp/master.zip dest=/tmp copy=no + +# install all dependencies +- name: copy scripts + copy: src=get_deb_depends.py dest=/tmp/get_deb_depends.py + +- name: install keystone-moon dependencies + shell: "apt-get install `python /tmp/get_deb_depends.py /tmp/moon-bin-master/*.deb`" + when: ansible_os_family == "Debian" + +- name: delete configuration file + shell: > + rm -f {{ apache_config_dir }}/sites-enabled/wsgi-keystone.conf; + rm -f {{ apache_config_dir }}/sites-available/wsgi-keystone.conf; + +# install keystone moon +- name: copy scripts + copy: src=deb.conf dest=/tmp/deb.conf + +- name: install keystone moon + shell: > + export DEBIAN_FRONTEND="noninteractive"; + sudo -E dpkg -i /tmp/moon-bin-master/*moon*.deb; + +#- name: install keystone moon +# shell: > +# export DEBIAN_FRONTEND="noninteractive"; +# sudo -E debconf-set-selections python-keystone < /tmp/deb.conf; +# sudo -E dpkg -i /tmp/moon-bin-master/*moon*.deb; + +- name: stop keystone task + shell: > + service keystone stop; + mv /etc/init.d/keystone /home/; + mv /etc/init/keystone.conf /home/; + mv /lib/systemd/system/keystone.service /home/; + +# config keystone and apache2 +- name: delete sqlite database + file: + path: /var/lib/keystone/keystone.db + state: absent + +#- name: update keystone conf +# template: src=keystone.conf dest=/etc/keystone/keystone.conf backup=yes + + +#- name: assure listen port exist +# lineinfile: +# dest: '{{ apache_config_dir }}/ports.conf' +# regexp: '{{ item.regexp }}' +# line: '{{ item.line}}' +# with_items: +# - regexp: "^Listen {{ internal_ip }}:5000" +# line: "Listen {{ internal_ip }}:5000" +# - regexp: "^Listen {{ internal_ip }}:35357" +# line: "Listen {{ internal_ip }}:35357" + +- name: update apache2 configs + template: + src: wsgi-keystone.conf.j2 + dest: '{{ apache_config_dir }}/sites-available/wsgi-keystone.conf' + when: ansible_os_family == 'Debian' + +- name: enable keystone server + file: + src: "{{ apache_config_dir }}/sites-available/wsgi-keystone.conf" + dest: "{{ apache_config_dir }}/sites-enabled/wsgi-keystone.conf" + state: "link" + when: ansible_os_family == 'Debian' + +#- name: keystone source files +# template: src={{ item }} dest=/opt/{{ item }} +# with_items: +# - admin-openrc.sh +# - demo-openrc.sh + +# keystone paste ini +- name: keystone paste ini 1 + shell: sudo cp /etc/keystone/keystone-paste.ini /etc/keystone/keystone-paste.ini.bak; + +- name: keystone paste ini 2 + shell: sudo sed "3i[pipeline:moon_pipeline]\npipeline = sizelimit url_normalize request_id build_auth_context token_auth admin_token_auth json_body ec2_extension_v3 s3_extension moon_service\n\n[app:moon_service]\nuse = egg:keystone#moon_service\n" /etc/keystone/keystone-paste.ini > /tmp/keystone-paste.ini; + +- name: keystone paste ini 3 + shell: sudo cp /tmp/keystone-paste.ini /etc/keystone/keystone-paste.ini; + +- name: keystone paste ini 4 + shell: sudo sed "s/use = egg:Paste#urlmap/use = egg:Paste#urlmap\n\/moon = moon_pipeline/" /etc/keystone/keystone-paste.ini > /tmp/keystone-paste.ini; + +- name: keystone paste ini 5 + shell: sudo cp /tmp/keystone-paste.ini /etc/keystone/keystone-paste.ini; + +# moon log +- name: moon log + shell: > + sudo mkdir /var/log/moon/; + sudo chown keystone /var/log/moon/; + sudo addgroup moonlog; + sudo chgrp moonlog /var/log/moon/; + sudo touch /var/log/moon/keystonemiddleware.log; + sudo touch /var/log/moon/system.log; + sudo chgrp moonlog /var/log/moon/keystonemiddleware.log; + sudo chgrp moonlog /var/log/moon/system.log; + sudo chmod g+rw /var/log/moon; + sudo chmod g+rw /var/log/moon/keystonemiddleware.log; + sudo chmod g+rw /var/log/moon/system.log; + sudo adduser keystone moonlog; + # sudo adduser swift moonlog; + sudo adduser nova moonlog; + + +# keystone db sync +- name: keystone db sync + shell: > + sudo /usr/bin/keystone-manage db_sync; + sudo /usr/bin/keystone-manage db_sync --extension moon; + when: inventory_hostname == haproxy_hosts.keys()[0] + + +############################################# +- name: wait for keystone ready + wait_for: port=35357 delay=3 timeout=10 host={{ internal_vip.ip }} + +#- name: cron job to purge expired tokens hourly +# cron: +# name: 'purge expired tokens' +# special_time: hourly +# job: '/usr/bin/keystone-manage token_flush > /var/log/keystone/keystone-tokenflush.log 2>&1' + +############################################# +# moon workaround +- name: copy scripts + copy: src=controllers.py dest=/usr/lib/python2.7/dist-packages/keystone/contrib/moon/controllers.py + +# apache2 restart +- name: restart apache2 + service: name={{ item }} state=restarted enabled=yes + with_items: services | union(services_noarch) + +# install moonclient +- name: install moon client + shell: sudo pip install /tmp/moon-bin-master/python-moonclient-0.1.tar.gz + +################################################### + + +#- name: add tenants +# keystone_user: +# token: "{{ ADMIN_TOKEN }}" +# endpoint: "http://{{ internal_ip }}:35357/v2.0" +# tenant: "{{ item.tenant }}" +# tenant_description: "{{ item.tenant_description }}" +# with_items: "{{ os_users }}" +# when: inventory_hostname == groups['controller'][0] +# +#- name: add users +# keystone_user: +# token: "{{ ADMIN_TOKEN }}" +# endpoint: "http://{{ internal_ip }}:35357/v2.0" +# user: "{{ item.user }}" +# tenant: "{{ item.tenant }}" +# password: "{{ item.password }}" +# email: "{{ item.email }}" +# with_items: "{{ os_users }}" +# when: inventory_hostname == groups['controller'][0] +# +#- name: grant roles +# keystone_user: +# token: "{{ ADMIN_TOKEN }}" +# endpoint: "http://{{ internal_ip }}:35357/v2.0" +# user: "{{ item.user }}" +# role: "{{ item.role }}" +# tenant: "{{ item.tenant }}" +# with_items: "{{ os_users }}" +# when: inventory_hostname == groups['controller'][0] +# +#- name: add endpoints +# keystone_service: +# token: "{{ ADMIN_TOKEN }}" +# endpoint: "http://{{ internal_ip }}:35357/v2.0" +# name: "{{ item.name }}" +# type: "{{ item.type }}" +# region: "{{ item.region}}" +# description: "{{ item.description }}" +# publicurl: "{{ item.publicurl }}" +# internalurl: "{{ item.internalurl }}" +# adminurl: "{{ item.adminurl }}" +# with_items: "{{ os_services }}" +# when: inventory_hostname == groups['controller'][0] + + +################################################### + +- name: update api-paste.ini + template: src=api-paste.ini dest=/etc/nova/api-paste.ini backup=yes + +#- name: update proxy-server conf +# template: src=proxy-server.conf dest=/etc/swift/proxy-server.conf backup=yes + +# restart nova +- name: restart nova + service: name={{ item }} state=restarted enabled=yes + with_items: + - nova-api + - nova-cert + - nova-conductor + - nova-consoleauth + - nova-scheduler + +# restart swift +#- name: restart swift +# service: name={{ item }} state=restarted enabled=yes +# with_items: +# - swift-proxy +# - memcached diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/moon/tasks/moon.yml b/deploy/adapters/ansible/openstack_newton_xenial/roles/moon/tasks/moon.yml new file mode 100644 index 00000000..40e1c98c --- /dev/null +++ b/deploy/adapters/ansible/openstack_newton_xenial/roles/moon/tasks/moon.yml @@ -0,0 +1,16 @@ +############################################################################# +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +- include_vars: "{{ ansible_os_family }}.yml" + +- include: moon-controller.yml + when: inventory_hostname in groups['controller'] + +- include: moon-compute.yml + when: inventory_hostname in groups['compute'] diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/moon/templates/admin-openrc.sh b/deploy/adapters/ansible/openstack_newton_xenial/roles/moon/templates/admin-openrc.sh new file mode 100644 index 00000000..6ba620ff --- /dev/null +++ b/deploy/adapters/ansible/openstack_newton_xenial/roles/moon/templates/admin-openrc.sh @@ -0,0 +1,15 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +# Verify the Identity Service installation +export OS_PASSWORD={{ ADMIN_PASS }} +export OS_TENANT_NAME=admin +export OS_AUTH_URL=http://{{ internal_vip.ip }}:35357/v2.0 +export OS_USERNAME=admin +export OS_VOLUME_API_VERSION=2 + diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/moon/templates/api-paste.ini b/deploy/adapters/ansible/openstack_newton_xenial/roles/moon/templates/api-paste.ini new file mode 100644 index 00000000..f99689b7 --- /dev/null +++ b/deploy/adapters/ansible/openstack_newton_xenial/roles/moon/templates/api-paste.ini @@ -0,0 +1,106 @@ +############ +# Metadata # +############ +[composite:metadata] +use = egg:Paste#urlmap +/: meta + +[pipeline:meta] +pipeline = cors metaapp + +[app:metaapp] +paste.app_factory = nova.api.metadata.handler:MetadataRequestHandler.factory + +############# +# OpenStack # +############# + +[composite:osapi_compute] +use = call:nova.api.openstack.urlmap:urlmap_factory +/: oscomputeversions +# starting in Liberty the v21 implementation replaces the v2 +# implementation and is suggested that you use it as the default. If +# this causes issues with your clients you can rollback to the +# *frozen* v2 api by commenting out the above stanza and using the +# following instead:: +# /v2: openstack_compute_api_legacy_v2 +# if rolling back to v2 fixes your issue please file a critical bug +# at - https://bugs.launchpad.net/nova/+bugs +# +# v21 is an exactly feature match for v2, except it has more stringent +# input validation on the wsgi surface (prevents fuzzing early on the +# API). It also provides new features via API microversions which are +# opt into for clients. Unaware clients will receive the same frozen +# v2 API feature set, but with some relaxed validation +/v2: openstack_compute_api_v21_legacy_v2_compatible +/v2.1: openstack_compute_api_v21 + +# NOTE: this is deprecated in favor of openstack_compute_api_v21_legacy_v2_compatible +[composite:openstack_compute_api_legacy_v2] +use = call:nova.api.auth:pipeline_factory +noauth2 = cors compute_req_id faultwrap sizelimit noauth2 legacy_ratelimit osapi_compute_app_legacy_v2 +keystone = cors compute_req_id faultwrap sizelimit authtoken keystonecontext moon legacy_ratelimit osapi_compute_app_legacy_v2 +keystone_nolimit = cors compute_req_id faultwrap sizelimit authtoken keystonecontext osapi_compute_app_legacy_v2 + +[composite:openstack_compute_api_v21] +use = call:nova.api.auth:pipeline_factory_v21 +noauth2 = cors compute_req_id faultwrap sizelimit noauth2 osapi_compute_app_v21 +keystone = cors compute_req_id faultwrap sizelimit authtoken keystonecontext osapi_compute_app_v21 + +[composite:openstack_compute_api_v21_legacy_v2_compatible] +use = call:nova.api.auth:pipeline_factory_v21 +noauth2 = cors compute_req_id faultwrap sizelimit noauth2 legacy_v2_compatible osapi_compute_app_v21 +keystone = cors compute_req_id faultwrap sizelimit authtoken keystonecontext legacy_v2_compatible osapi_compute_app_v21 + +[filter:request_id] +paste.filter_factory = oslo_middleware:RequestId.factory + +[filter:compute_req_id] +paste.filter_factory = nova.api.compute_req_id:ComputeReqIdMiddleware.factory + +[filter:faultwrap] +paste.filter_factory = nova.api.openstack:FaultWrapper.factory + +[filter:noauth2] +paste.filter_factory = nova.api.openstack.auth:NoAuthMiddleware.factory + +[filter:legacy_ratelimit] +paste.filter_factory = nova.api.openstack.compute.limits:RateLimitingMiddleware.factory + +[filter:sizelimit] +paste.filter_factory = oslo_middleware:RequestBodySizeLimiter.factory + +[filter:legacy_v2_compatible] +paste.filter_factory = nova.api.openstack:LegacyV2CompatibleWrapper.factory + +[app:osapi_compute_app_legacy_v2] +paste.app_factory = nova.api.openstack.compute:APIRouter.factory + +[app:osapi_compute_app_v21] +paste.app_factory = nova.api.openstack.compute:APIRouterV21.factory + +[pipeline:oscomputeversions] +pipeline = faultwrap oscomputeversionapp + +[app:oscomputeversionapp] +paste.app_factory = nova.api.openstack.compute.versions:Versions.factory + +########## +# Shared # +########## + +[filter:cors] +paste.filter_factory = oslo_middleware.cors:filter_factory +oslo_config_project = nova + +[filter:keystonecontext] +paste.filter_factory = nova.api.auth:NovaKeystoneContext.factory + +[filter:authtoken] +paste.filter_factory = keystonemiddleware.auth_token:filter_factory + +[filter:moon] +paste.filter_factory = keystonemiddleware.moon_agent:filter_factory +authz_login=admin +authz_password=password +logfile=/var/log/moon/keystonemiddleware.log diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/moon/templates/demo-openrc.sh b/deploy/adapters/ansible/openstack_newton_xenial/roles/moon/templates/demo-openrc.sh new file mode 100644 index 00000000..5807e868 --- /dev/null +++ b/deploy/adapters/ansible/openstack_newton_xenial/roles/moon/templates/demo-openrc.sh @@ -0,0 +1,13 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +export OS_USERNAME=demo +export OS_PASSWORD={{ DEMO_PASS }} +export OS_TENANT_NAME=demo +export OS_AUTH_URL=http://{{ internal_vip.ip }}:35357/v2.0 + diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/moon/templates/keystone-paste.ini b/deploy/adapters/ansible/openstack_newton_xenial/roles/moon/templates/keystone-paste.ini new file mode 100644 index 00000000..cd9ebede --- /dev/null +++ b/deploy/adapters/ansible/openstack_newton_xenial/roles/moon/templates/keystone-paste.ini @@ -0,0 +1,96 @@ +# Keystone PasteDeploy configuration file. + +[pipeline:moon_pipeline] +pipeline = sizelimit url_normalize request_id build_auth_context token_auth admin_token_auth json_body ec2_extension_v3 s3_extension moon_service + +[app:moon_service] +use = egg:keystone#moon_service + +[filter:debug] +use = egg:oslo.middleware#debug + +[filter:request_id] +use = egg:oslo.middleware#request_id + +[filter:build_auth_context] +use = egg:keystone#build_auth_context + +[filter:token_auth] +use = egg:keystone#token_auth + +[filter:admin_token_auth] +# This is deprecated in the M release and will be removed in the O release. +# Use `keystone-manage bootstrap` and remove this from the pipelines below. +use = egg:keystone#admin_token_auth + +[filter:json_body] +use = egg:keystone#json_body + +[filter:cors] +use = egg:oslo.middleware#cors +oslo_config_project = keystone + +[filter:ec2_extension] +use = egg:keystone#ec2_extension + +[filter:ec2_extension_v3] +use = egg:keystone#ec2_extension_v3 + +[filter:s3_extension] +use = egg:keystone#s3_extension + +[filter:url_normalize] +use = egg:keystone#url_normalize + +[filter:sizelimit] +use = egg:oslo.middleware#sizelimit + +[app:public_service] +use = egg:keystone#public_service + +[app:service_v3] +use = egg:keystone#service_v3 + +[app:admin_service] +use = egg:keystone#admin_service + +[pipeline:public_api] +# The last item in this pipeline must be public_service or an equivalent +# application. It cannot be a filter. +pipeline = cors sizelimit url_normalize request_id admin_token_auth build_auth_context token_auth json_body ec2_extension public_service + +[pipeline:admin_api] +# The last item in this pipeline must be admin_service or an equivalent +# application. It cannot be a filter. +pipeline = cors sizelimit url_normalize request_id admin_token_auth build_auth_context token_auth json_body ec2_extension s3_extension admin_service + +[pipeline:api_v3] +# The last item in this pipeline must be service_v3 or an equivalent +# application. It cannot be a filter. +pipeline = cors sizelimit url_normalize request_id admin_token_auth build_auth_context token_auth json_body ec2_extension_v3 s3_extension service_v3 + +[app:public_version_service] +use = egg:keystone#public_version_service + +[app:admin_version_service] +use = egg:keystone#admin_version_service + +[pipeline:public_version_api] +pipeline = cors sizelimit url_normalize public_version_service + +[pipeline:admin_version_api] +pipeline = cors sizelimit url_normalize admin_version_service + +[composite:main] +use = egg:Paste#urlmap +/moon = moon_pipeline +/v2.0 = public_api +/v3 = api_v3 +/ = public_version_api + +[composite:admin] +use = egg:Paste#urlmap +/moon = moon_pipeline +/v2.0 = admin_api +/v3 = api_v3 +/ = admin_version_api diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/moon/templates/keystone.conf b/deploy/adapters/ansible/openstack_newton_xenial/roles/moon/templates/keystone.conf new file mode 100644 index 00000000..649fc32c --- /dev/null +++ b/deploy/adapters/ansible/openstack_newton_xenial/roles/moon/templates/keystone.conf @@ -0,0 +1,59 @@ +{% set memcached_servers = [] %} +{% set rabbitmq_servers = [] %} +{% for host in haproxy_hosts.values() %} +{% set _ = memcached_servers.append('%s:11211'% host) %} +{% set _ = rabbitmq_servers.append('%s:5672'% host) %} +{% endfor %} +{% set memcached_servers = memcached_servers|join(',') %} +{% set rabbitmq_servers = rabbitmq_servers|join(',') %} +[DEFAULT] +admin_token={{ ADMIN_TOKEN }} +debug={{ DEBUG }} +log_dir = /var/log/keystone + +[cache] +backend=keystone.cache.memcache_pool +memcache_servers={{ memcached_servers}} +enabled=true + +[revoke] +driver=sql +expiration_buffer=3600 +caching=true + +[database] +connection = mysql://keystone:{{ KEYSTONE_DBPASS }}@{{ db_host }}/keystone?charset=utf8 +idle_timeout=30 +min_pool_size=5 +max_pool_size=120 +pool_timeout=30 + + +[identity] +default_domain_id=default +driver=sql + +[assignment] +driver=sql + +[resource] +driver=sql +caching=true +cache_time=3600 + +[token] +enforce_token_bind=permissive +expiration=43200 +provider=uuid +driver=sql +caching=true +cache_time=3600 + +[eventlet_server] +public_bind_host= {{ identity_host }} +admin_bind_host= {{ identity_host }} + +[oslo_messaging_rabbit] +rabbit_userid = {{ RABBIT_USER }} +rabbit_password = {{ RABBIT_PASS }} +rabbit_hosts = {{ rabbitmq_servers }} diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/moon/templates/proxy-server.conf b/deploy/adapters/ansible/openstack_newton_xenial/roles/moon/templates/proxy-server.conf new file mode 100644 index 00000000..9bea7a8e --- /dev/null +++ b/deploy/adapters/ansible/openstack_newton_xenial/roles/moon/templates/proxy-server.conf @@ -0,0 +1,775 @@ +{% set memcached_servers = [] %} +{% for host in haproxy_hosts.values() %} +{% set _ = memcached_servers.append('%s:11211'% host) %} +{% endfor %} +{% set memcached_servers = memcached_servers|join(',') %} +[DEFAULT] +bind_ip = {{ internal_ip }} +bind_port = 8080 +# bind_timeout = 30 +# backlog = 4096 +swift_dir = /etc/swift +user = swift + +# Enables exposing configuration settings via HTTP GET /info. +# expose_info = true + +# Key to use for admin calls that are HMAC signed. Default is empty, +# which will disable admin calls to /info. +# admin_key = secret_admin_key +# +# Allows the ability to withhold sections from showing up in the public calls +# to /info. You can withhold subsections by separating the dict level with a +# ".". The following would cause the sections 'container_quotas' and 'tempurl' +# to not be listed, and the key max_failed_deletes would be removed from +# bulk_delete. Default value is 'swift.valid_api_versions' which allows all +# registered features to be listed via HTTP GET /info except +# swift.valid_api_versions information +# disallowed_sections = swift.valid_api_versions, container_quotas, tempurl + +# Use an integer to override the number of pre-forked processes that will +# accept connections. Should default to the number of effective cpu +# cores in the system. It's worth noting that individual workers will +# use many eventlet co-routines to service multiple concurrent requests. +# workers = auto +# +# Maximum concurrent requests per worker +# max_clients = 1024 +# +# Set the following two lines to enable SSL. This is for testing only. +# cert_file = /etc/swift/proxy.crt +# key_file = /etc/swift/proxy.key +# +# expiring_objects_container_divisor = 86400 +# expiring_objects_account_name = expiring_objects +# +# You can specify default log routing here if you want: +# log_name = swift +# log_facility = LOG_LOCAL0 +# log_level = INFO +# log_headers = false +# log_address = /dev/log +# The following caps the length of log lines to the value given; no limit if +# set to 0, the default. +# log_max_line_length = 0 +# +# This optional suffix (default is empty) that would be appended to the swift transaction +# id allows one to easily figure out from which cluster that X-Trans-Id belongs to. +# This is very useful when one is managing more than one swift cluster. +# trans_id_suffix = +# +# comma separated list of functions to call to setup custom log handlers. +# functions get passed: conf, name, log_to_console, log_route, fmt, logger, +# adapted_logger +# log_custom_handlers = +# +# If set, log_udp_host will override log_address +# log_udp_host = +# log_udp_port = 514 +# +# You can enable StatsD logging here: +# log_statsd_host = +# log_statsd_port = 8125 +# log_statsd_default_sample_rate = 1.0 +# log_statsd_sample_rate_factor = 1.0 +# log_statsd_metric_prefix = +# +# Use a comma separated list of full url (http://foo.bar:1234,https://foo.bar) +# cors_allow_origin = +# strict_cors_mode = True +# +# client_timeout = 60 +# eventlet_debug = false + +[pipeline:main] +# This sample pipeline uses tempauth and is used for SAIO dev work and +# testing. See below for a pipeline using keystone. +#pipeline = catch_errors gatekeeper healthcheck proxy-logging cache container_sync bulk tempurl ratelimit tempauth container-quotas account-quotas slo dlo versioned_writes proxy-logging proxy-server +pipeline = catch_errors gatekeeper healthcheck proxy-logging cache container_sync bulk ratelimit authtoken keystoneauth container-quotas account-quotas slo dlo versioned_writes proxy-logging moon proxy-server + +# The following pipeline shows keystone integration. Comment out the one +# above and uncomment this one. Additional steps for integrating keystone are +# covered further below in the filter sections for authtoken and keystoneauth. +#pipeline = catch_errors gatekeeper healthcheck proxy-logging cache container_sync bulk tempurl ratelimit authtoken keystoneauth container-quotas account-quotas slo dlo versioned_writes proxy-logging proxy-server + +[app:proxy-server] +use = egg:swift#proxy +account_autocreate = True +# You can override the default log routing for this app here: +# set log_name = proxy-server +# set log_facility = LOG_LOCAL0 +# set log_level = INFO +# set log_address = /dev/log +# +# log_handoffs = true +# recheck_account_existence = 60 +# recheck_container_existence = 60 +# object_chunk_size = 65536 +# client_chunk_size = 65536 +# +# How long the proxy server will wait on responses from the a/c/o servers. +# node_timeout = 10 +# +# How long the proxy server will wait for an initial response and to read a +# chunk of data from the object servers while serving GET / HEAD requests. +# Timeouts from these requests can be recovered from so setting this to +# something lower than node_timeout would provide quicker error recovery +# while allowing for a longer timeout for non-recoverable requests (PUTs). +# Defaults to node_timeout, should be overriden if node_timeout is set to a +# high number to prevent client timeouts from firing before the proxy server +# has a chance to retry. +# recoverable_node_timeout = node_timeout +# +# conn_timeout = 0.5 +# +# How long to wait for requests to finish after a quorum has been established. +# post_quorum_timeout = 0.5 +# +# How long without an error before a node's error count is reset. This will +# also be how long before a node is reenabled after suppression is triggered. +# error_suppression_interval = 60 +# +# How many errors can accumulate before a node is temporarily ignored. +# error_suppression_limit = 10 +# +# If set to 'true' any authorized user may create and delete accounts; if +# 'false' no one, even authorized, can. +# allow_account_management = false +# +# Set object_post_as_copy = false to turn on fast posts where only the metadata +# changes are stored anew and the original data file is kept in place. This +# makes for quicker posts. +# object_post_as_copy = true +# +# If set to 'true' authorized accounts that do not yet exist within the Swift +# cluster will be automatically created. +# account_autocreate = false +# +# If set to a positive value, trying to create a container when the account +# already has at least this maximum containers will result in a 403 Forbidden. +# Note: This is a soft limit, meaning a user might exceed the cap for +# recheck_account_existence before the 403s kick in. +# max_containers_per_account = 0 +# +# This is a comma separated list of account hashes that ignore the +# max_containers_per_account cap. +# max_containers_whitelist = +# +# Comma separated list of Host headers to which the proxy will deny requests. +# deny_host_headers = +# +# Prefix used when automatically creating accounts. +# auto_create_account_prefix = . +# +# Depth of the proxy put queue. +# put_queue_depth = 10 +# +# Storage nodes can be chosen at random (shuffle), by using timing +# measurements (timing), or by using an explicit match (affinity). +# Using timing measurements may allow for lower overall latency, while +# using affinity allows for finer control. In both the timing and +# affinity cases, equally-sorting nodes are still randomly chosen to +# spread load. +# The valid values for sorting_method are "affinity", "shuffle", or "timing". +# sorting_method = shuffle +# +# If the "timing" sorting_method is used, the timings will only be valid for +# the number of seconds configured by timing_expiry. +# timing_expiry = 300 +# +# By default on a GET/HEAD swift will connect to a storage node one at a time +# in a single thread. There is smarts in the order they are hit however. If you +# turn on concurrent_gets below, then replica count threads will be used. +# With addition of the concurrency_timeout option this will allow swift to send +# out GET/HEAD requests to the storage nodes concurrently and answer with the +# first to respond. With an EC policy the parameter only affects HEAD requests. +# concurrent_gets = off +# +# This parameter controls how long to wait before firing off the next +# concurrent_get thread. A value of 0 would be fully concurrent, any other +# number will stagger the firing of the threads. This number should be +# between 0 and node_timeout. The default is what ever you set for the +# conn_timeout parameter. +# concurrency_timeout = 0.5 +# +# Set to the number of nodes to contact for a normal request. You can use +# '* replicas' at the end to have it use the number given times the number of +# replicas for the ring being used for the request. +# request_node_count = 2 * replicas +# +# Which backend servers to prefer on reads. Format is r<N> for region +# N or r<N>z<M> for region N, zone M. The value after the equals is +# the priority; lower numbers are higher priority. +# +# Example: first read from region 1 zone 1, then region 1 zone 2, then +# anything in region 2, then everything else: +# read_affinity = r1z1=100, r1z2=200, r2=300 +# Default is empty, meaning no preference. +# read_affinity = +# +# Which backend servers to prefer on writes. Format is r<N> for region +# N or r<N>z<M> for region N, zone M. If this is set, then when +# handling an object PUT request, some number (see setting +# write_affinity_node_count) of local backend servers will be tried +# before any nonlocal ones. +# +# Example: try to write to regions 1 and 2 before writing to any other +# nodes: +# write_affinity = r1, r2 +# Default is empty, meaning no preference. +# write_affinity = +# +# The number of local (as governed by the write_affinity setting) +# nodes to attempt to contact first, before any non-local ones. You +# can use '* replicas' at the end to have it use the number given +# times the number of replicas for the ring being used for the +# request. +# write_affinity_node_count = 2 * replicas +# +# These are the headers whose values will only be shown to swift_owners. The +# exact definition of a swift_owner is up to the auth system in use, but +# usually indicates administrative responsibilities. +# swift_owner_headers = x-container-read, x-container-write, x-container-sync-key, x-container-sync-to, x-account-meta-temp-url-key, x-account-meta-temp-url-key-2, x-container-meta-temp-url-key, x-container-meta-temp-url-key-2, x-account-access-control + +[filter:tempauth] +use = egg:swift#tempauth +# You can override the default log routing for this filter here: +# set log_name = tempauth +# set log_facility = LOG_LOCAL0 +# set log_level = INFO +# set log_headers = false +# set log_address = /dev/log +# +# The reseller prefix will verify a token begins with this prefix before even +# attempting to validate it. Also, with authorization, only Swift storage +# accounts with this prefix will be authorized by this middleware. Useful if +# multiple auth systems are in use for one Swift cluster. +# The reseller_prefix may contain a comma separated list of items. The first +# item is used for the token as mentioned above. If second and subsequent +# items exist, the middleware will handle authorization for an account with +# that prefix. For example, for prefixes "AUTH, SERVICE", a path of +# /v1/SERVICE_account is handled the same as /v1/AUTH_account. If an empty +# (blank) reseller prefix is required, it must be first in the list. Two +# single quote characters indicates an empty (blank) reseller prefix. +# reseller_prefix = AUTH + +# +# The require_group parameter names a group that must be presented by +# either X-Auth-Token or X-Service-Token. Usually this parameter is +# used only with multiple reseller prefixes (e.g., SERVICE_require_group=blah). +# By default, no group is needed. Do not use .admin. +# require_group = + +# The auth prefix will cause requests beginning with this prefix to be routed +# to the auth subsystem, for granting tokens, etc. +# auth_prefix = /auth/ +# token_life = 86400 +# +# This allows middleware higher in the WSGI pipeline to override auth +# processing, useful for middleware such as tempurl and formpost. If you know +# you're not going to use such middleware and you want a bit of extra security, +# you can set this to false. +# allow_overrides = true +# +# This specifies what scheme to return with storage urls: +# http, https, or default (chooses based on what the server is running as) +# This can be useful with an SSL load balancer in front of a non-SSL server. +# storage_url_scheme = default +# +# Lastly, you need to list all the accounts/users you want here. The format is: +# user_<account>_<user> = <key> [group] [group] [...] [storage_url] +# or if you want underscores in <account> or <user>, you can base64 encode them +# (with no equal signs) and use this format: +# user64_<account_b64>_<user_b64> = <key> [group] [group] [...] [storage_url] +# There are special groups of: +# .reseller_admin = can do anything to any account for this auth +# .admin = can do anything within the account +# If neither of these groups are specified, the user can only access containers +# that have been explicitly allowed for them by a .admin or .reseller_admin. +# The trailing optional storage_url allows you to specify an alternate url to +# hand back to the user upon authentication. If not specified, this defaults to +# $HOST/v1/<reseller_prefix>_<account> where $HOST will do its best to resolve +# to what the requester would need to use to reach this host. +# Here are example entries, required for running the tests: +user_admin_admin = admin .admin .reseller_admin +user_test_tester = testing .admin +user_test2_tester2 = testing2 .admin +user_test_tester3 = testing3 +user_test5_tester5 = testing5 service + +# To enable Keystone authentication you need to have the auth token +# middleware first to be configured. Here is an example below, please +# refer to the keystone's documentation for details about the +# different settings. +# +# You'll also need to have the keystoneauth middleware enabled and have it in +# your main pipeline, as show in the sample pipeline at the top of this file. +# +# Following parameters are known to work with keystonemiddleware v2.3.0 +# (above v2.0.0), but checking the latest information in the wiki page[1] +# is recommended. +# 1. http://docs.openstack.org/developer/keystonemiddleware/middlewarearchitecture.html#configuration +# +[filter:authtoken] +paste.filter_factory = keystonemiddleware.auth_token:filter_factory +auth_uri = http://{{ internal_vip.ip }}:5000 +auth_url = http://{{ internal_vip.ip }}:35357 +identity_uri = http://{{ internal_vip.ip }}:35357 +memcached_servers = {{ memcached_servers }} +#auth_plugin = password +auth_type = password +project_domain_id = default +user_domain_id = default +project_name = service +username = swift +password = {{ CINDER_PASS }} +delay_auth_decision = True +admin_user=admin +admin_password={{ ADMIN_PASS }} +admin_token={{ ADMIN_TOKEN }} +# +# delay_auth_decision defaults to False, but leaving it as false will +# prevent other auth systems, staticweb, tempurl, formpost, and ACLs from +# working. This value must be explicitly set to True. +# delay_auth_decision = False +# +# cache = swift.cache +# include_service_catalog = False +# +[filter:keystoneauth] +use = egg:swift#keystoneauth +operator_roles = admin,user +# The reseller_prefix option lists account namespaces that this middleware is +# responsible for. The prefix is placed before the Keystone project id. +# For example, for project 12345678, and prefix AUTH, the account is +# named AUTH_12345678 (i.e., path is /v1/AUTH_12345678/...). +# Several prefixes are allowed by specifying a comma-separated list +# as in: "reseller_prefix = AUTH, SERVICE". The empty string indicates a +# single blank/empty prefix. If an empty prefix is required in a list of +# prefixes, a value of '' (two single quote characters) indicates a +# blank/empty prefix. Except for the blank/empty prefix, an underscore ('_') +# character is appended to the value unless already present. +# reseller_prefix = AUTH +# +# The user must have at least one role named by operator_roles on a +# project in order to create, delete and modify containers and objects +# and to set and read privileged headers such as ACLs. +# If there are several reseller prefix items, you can prefix the +# parameter so it applies only to those accounts (for example +# the parameter SERVICE_operator_roles applies to the /v1/SERVICE_<project> +# path). If you omit the prefix, the option applies to all reseller +# prefix items. For the blank/empty prefix, prefix with '' (do not put +# underscore after the two single quote characters). +# operator_roles = admin, swiftoperator +# +# The reseller admin role has the ability to create and delete accounts +# reseller_admin_role = ResellerAdmin +# +# This allows middleware higher in the WSGI pipeline to override auth +# processing, useful for middleware such as tempurl and formpost. If you know +# you're not going to use such middleware and you want a bit of extra security, +# you can set this to false. +# allow_overrides = true +# +# If the service_roles parameter is present, an X-Service-Token must be +# present in the request that when validated, grants at least one role listed +# in the parameter. The X-Service-Token may be scoped to any project. +# If there are several reseller prefix items, you can prefix the +# parameter so it applies only to those accounts (for example +# the parameter SERVICE_service_roles applies to the /v1/SERVICE_<project> +# path). If you omit the prefix, the option applies to all reseller +# prefix items. For the blank/empty prefix, prefix with '' (do not put +# underscore after the two single quote characters). +# By default, no service_roles are required. +# service_roles = +# +# For backwards compatibility, keystoneauth will match names in cross-tenant +# access control lists (ACLs) when both the requesting user and the tenant +# are in the default domain i.e the domain to which existing tenants are +# migrated. The default_domain_id value configured here should be the same as +# the value used during migration of tenants to keystone domains. +# default_domain_id = default +# +# For a new installation, or an installation in which keystone projects may +# move between domains, you should disable backwards compatible name matching +# in ACLs by setting allow_names_in_acls to false: +# allow_names_in_acls = true + +[filter:healthcheck] +use = egg:swift#healthcheck +# An optional filesystem path, which if present, will cause the healthcheck +# URL to return "503 Service Unavailable" with a body of "DISABLED BY FILE". +# This facility may be used to temporarily remove a Swift node from a load +# balancer pool during maintenance or upgrade (remove the file to allow the +# node back into the load balancer pool). +# disable_path = + +[filter:cache] +use = egg:swift#memcache +memcache_servers = {{ memcached_servers }} +# You can override the default log routing for this filter here: +# set log_name = cache +# set log_facility = LOG_LOCAL0 +# set log_level = INFO +# set log_headers = false +# set log_address = /dev/log +# +# If not set here, the value for memcache_servers will be read from +# memcache.conf (see memcache.conf-sample) or lacking that file, it will +# default to the value below. You can specify multiple servers separated with +# commas, as in: 10.1.2.3:11211,10.1.2.4:11211 (IPv6 addresses must +# follow rfc3986 section-3.2.2, i.e. [::1]:11211) +# memcache_servers = 127.0.0.1:11211 +# +# Sets how memcache values are serialized and deserialized: +# 0 = older, insecure pickle serialization +# 1 = json serialization but pickles can still be read (still insecure) +# 2 = json serialization only (secure and the default) +# If not set here, the value for memcache_serialization_support will be read +# from /etc/swift/memcache.conf (see memcache.conf-sample). +# To avoid an instant full cache flush, existing installations should +# upgrade with 0, then set to 1 and reload, then after some time (24 hours) +# set to 2 and reload. +# In the future, the ability to use pickle serialization will be removed. +# memcache_serialization_support = 2 +# +# Sets the maximum number of connections to each memcached server per worker +# memcache_max_connections = 2 +# +# More options documented in memcache.conf-sample + +[filter:ratelimit] +use = egg:swift#ratelimit +# You can override the default log routing for this filter here: +# set log_name = ratelimit +# set log_facility = LOG_LOCAL0 +# set log_level = INFO +# set log_headers = false +# set log_address = /dev/log +# +# clock_accuracy should represent how accurate the proxy servers' system clocks +# are with each other. 1000 means that all the proxies' clock are accurate to +# each other within 1 millisecond. No ratelimit should be higher than the +# clock accuracy. +# clock_accuracy = 1000 +# +# max_sleep_time_seconds = 60 +# +# log_sleep_time_seconds of 0 means disabled +# log_sleep_time_seconds = 0 +# +# allows for slow rates (e.g. running up to 5 sec's behind) to catch up. +# rate_buffer_seconds = 5 +# +# account_ratelimit of 0 means disabled +# account_ratelimit = 0 + +# DEPRECATED- these will continue to work but will be replaced +# by the X-Account-Sysmeta-Global-Write-Ratelimit flag. +# Please see ratelimiting docs for details. +# these are comma separated lists of account names +# account_whitelist = a,b +# account_blacklist = c,d + +# with container_limit_x = r +# for containers of size x limit write requests per second to r. The container +# rate will be linearly interpolated from the values given. With the values +# below, a container of size 5 will get a rate of 75. +# container_ratelimit_0 = 100 +# container_ratelimit_10 = 50 +# container_ratelimit_50 = 20 + +# Similarly to the above container-level write limits, the following will limit +# container GET (listing) requests. +# container_listing_ratelimit_0 = 100 +# container_listing_ratelimit_10 = 50 +# container_listing_ratelimit_50 = 20 + +[filter:domain_remap] +use = egg:swift#domain_remap +# You can override the default log routing for this filter here: +# set log_name = domain_remap +# set log_facility = LOG_LOCAL0 +# set log_level = INFO +# set log_headers = false +# set log_address = /dev/log +# +# storage_domain = example.com +# path_root = v1 + +# Browsers can convert a host header to lowercase, so check that reseller +# prefix on the account is the correct case. This is done by comparing the +# items in the reseller_prefixes config option to the found prefix. If they +# match except for case, the item from reseller_prefixes will be used +# instead of the found reseller prefix. When none match, the default reseller +# prefix is used. When no default reseller prefix is configured, any request +# with an account prefix not in that list will be ignored by this middleware. +# reseller_prefixes = AUTH +# default_reseller_prefix = + +[filter:catch_errors] +use = egg:swift#catch_errors +# You can override the default log routing for this filter here: +# set log_name = catch_errors +# set log_facility = LOG_LOCAL0 +# set log_level = INFO +# set log_headers = false +# set log_address = /dev/log + +[filter:cname_lookup] +# Note: this middleware requires python-dnspython +use = egg:swift#cname_lookup +# You can override the default log routing for this filter here: +# set log_name = cname_lookup +# set log_facility = LOG_LOCAL0 +# set log_level = INFO +# set log_headers = false +# set log_address = /dev/log +# +# Specify the storage_domain that match your cloud, multiple domains +# can be specified separated by a comma +# storage_domain = example.com +# +# lookup_depth = 1 + +# Note: Put staticweb just after your auth filter(s) in the pipeline +[filter:staticweb] +use = egg:swift#staticweb +# You can override the default log routing for this filter here: +# set log_name = staticweb +# set log_facility = LOG_LOCAL0 +# set log_level = INFO +# set log_headers = false +# set log_address = /dev/log + +# Note: Put tempurl before dlo, slo and your auth filter(s) in the pipeline +[filter:tempurl] +use = egg:swift#tempurl +# The methods allowed with Temp URLs. +# methods = GET HEAD PUT POST DELETE +# +# The headers to remove from incoming requests. Simply a whitespace delimited +# list of header names and names can optionally end with '*' to indicate a +# prefix match. incoming_allow_headers is a list of exceptions to these +# removals. +# incoming_remove_headers = x-timestamp +# +# The headers allowed as exceptions to incoming_remove_headers. Simply a +# whitespace delimited list of header names and names can optionally end with +# '*' to indicate a prefix match. +# incoming_allow_headers = +# +# The headers to remove from outgoing responses. Simply a whitespace delimited +# list of header names and names can optionally end with '*' to indicate a +# prefix match. outgoing_allow_headers is a list of exceptions to these +# removals. +# outgoing_remove_headers = x-object-meta-* +# +# The headers allowed as exceptions to outgoing_remove_headers. Simply a +# whitespace delimited list of header names and names can optionally end with +# '*' to indicate a prefix match. +# outgoing_allow_headers = x-object-meta-public-* + +# Note: Put formpost just before your auth filter(s) in the pipeline +[filter:formpost] +use = egg:swift#formpost + +# Note: Just needs to be placed before the proxy-server in the pipeline. +[filter:name_check] +use = egg:swift#name_check +# forbidden_chars = '"`<> +# maximum_length = 255 +# forbidden_regexp = /\./|/\.\./|/\.$|/\.\.$ + +[filter:list-endpoints] +use = egg:swift#list_endpoints +# list_endpoints_path = /endpoints/ + +[filter:proxy-logging] +use = egg:swift#proxy_logging +# If not set, logging directives from [DEFAULT] without "access_" will be used +# access_log_name = swift +# access_log_facility = LOG_LOCAL0 +# access_log_level = INFO +# access_log_address = /dev/log +# +# If set, access_log_udp_host will override access_log_address +# access_log_udp_host = +# access_log_udp_port = 514 +# +# You can use log_statsd_* from [DEFAULT] or override them here: +# access_log_statsd_host = +# access_log_statsd_port = 8125 +# access_log_statsd_default_sample_rate = 1.0 +# access_log_statsd_sample_rate_factor = 1.0 +# access_log_statsd_metric_prefix = +# access_log_headers = false +# +# If access_log_headers is True and access_log_headers_only is set only +# these headers are logged. Multiple headers can be defined as comma separated +# list like this: access_log_headers_only = Host, X-Object-Meta-Mtime +# access_log_headers_only = +# +# By default, the X-Auth-Token is logged. To obscure the value, +# set reveal_sensitive_prefix to the number of characters to log. +# For example, if set to 12, only the first 12 characters of the +# token appear in the log. An unauthorized access of the log file +# won't allow unauthorized usage of the token. However, the first +# 12 or so characters is unique enough that you can trace/debug +# token usage. Set to 0 to suppress the token completely (replaced +# by '...' in the log). +# Note: reveal_sensitive_prefix will not affect the value +# logged with access_log_headers=True. +# reveal_sensitive_prefix = 16 +# +# What HTTP methods are allowed for StatsD logging (comma-sep); request methods +# not in this list will have "BAD_METHOD" for the <verb> portion of the metric. +# log_statsd_valid_http_methods = GET,HEAD,POST,PUT,DELETE,COPY,OPTIONS +# +# Note: The double proxy-logging in the pipeline is not a mistake. The +# left-most proxy-logging is there to log requests that were handled in +# middleware and never made it through to the right-most middleware (and +# proxy server). Double logging is prevented for normal requests. See +# proxy-logging docs. + +# Note: Put before both ratelimit and auth in the pipeline. +[filter:bulk] +use = egg:swift#bulk +# max_containers_per_extraction = 10000 +# max_failed_extractions = 1000 +# max_deletes_per_request = 10000 +# max_failed_deletes = 1000 + +# In order to keep a connection active during a potentially long bulk request, +# Swift may return whitespace prepended to the actual response body. This +# whitespace will be yielded no more than every yield_frequency seconds. +# yield_frequency = 10 + +# Note: The following parameter is used during a bulk delete of objects and +# their container. This would frequently fail because it is very likely +# that all replicated objects have not been deleted by the time the middleware got a +# successful response. It can be configured the number of retries. And the +# number of seconds to wait between each retry will be 1.5**retry + +# delete_container_retry_count = 0 + +# Note: Put after auth and staticweb in the pipeline. +[filter:slo] +use = egg:swift#slo +# max_manifest_segments = 1000 +# max_manifest_size = 2097152 +# +# Rate limiting applies only to segments smaller than this size (bytes). +# rate_limit_under_size = 1048576 +# +# Start rate-limiting SLO segment serving after the Nth small segment of a +# segmented object. +# rate_limit_after_segment = 10 +# +# Once segment rate-limiting kicks in for an object, limit segments served +# to N per second. 0 means no rate-limiting. +# rate_limit_segments_per_sec = 1 +# +# Time limit on GET requests (seconds) +# max_get_time = 86400 + +# Note: Put after auth and staticweb in the pipeline. +# If you don't put it in the pipeline, it will be inserted for you. +[filter:dlo] +use = egg:swift#dlo +# Start rate-limiting DLO segment serving after the Nth segment of a +# segmented object. +# rate_limit_after_segment = 10 +# +# Once segment rate-limiting kicks in for an object, limit segments served +# to N per second. 0 means no rate-limiting. +# rate_limit_segments_per_sec = 1 +# +# Time limit on GET requests (seconds) +# max_get_time = 86400 + +# Note: Put after auth in the pipeline. +[filter:container-quotas] +use = egg:swift#container_quotas + +# Note: Put after auth in the pipeline. +[filter:account-quotas] +use = egg:swift#account_quotas + +[filter:gatekeeper] +use = egg:swift#gatekeeper +# Set this to false if you want to allow clients to set arbitrary X-Timestamps +# on uploaded objects. This may be used to preserve timestamps when migrating +# from a previous storage system, but risks allowing users to upload +# difficult-to-delete data. +# shunt_inbound_x_timestamp = true +# +# You can override the default log routing for this filter here: +# set log_name = gatekeeper +# set log_facility = LOG_LOCAL0 +# set log_level = INFO +# set log_headers = false +# set log_address = /dev/log + +[filter:container_sync] +use = egg:swift#container_sync +# Set this to false if you want to disallow any full url values to be set for +# any new X-Container-Sync-To headers. This will keep any new full urls from +# coming in, but won't change any existing values already in the cluster. +# Updating those will have to be done manually, as knowing what the true realm +# endpoint should be cannot always be guessed. +# allow_full_urls = true +# Set this to specify this clusters //realm/cluster as "current" in /info +# current = //REALM/CLUSTER + +# Note: Put it at the beginning of the pipeline to profile all middleware. But +# it is safer to put this after catch_errors, gatekeeper and healthcheck. +[filter:xprofile] +use = egg:swift#xprofile +# This option enable you to switch profilers which should inherit from python +# standard profiler. Currently the supported value can be 'cProfile', +# 'eventlet.green.profile' etc. +# profile_module = eventlet.green.profile +# +# This prefix will be used to combine process ID and timestamp to name the +# profile data file. Make sure the executing user has permission to write +# into this path (missing path segments will be created, if necessary). +# If you enable profiling in more than one type of daemon, you must override +# it with an unique value like: /var/log/swift/profile/proxy.profile +# log_filename_prefix = /tmp/log/swift/profile/default.profile +# +# the profile data will be dumped to local disk based on above naming rule +# in this interval. +# dump_interval = 5.0 +# +# Be careful, this option will enable profiler to dump data into the file with +# time stamp which means there will be lots of files piled up in the directory. +# dump_timestamp = false +# +# This is the path of the URL to access the mini web UI. +# path = /__profile__ +# +# Clear the data when the wsgi server shutdown. +# flush_at_shutdown = false +# +# unwind the iterator of applications +# unwind = false + +# Note: Put after slo, dlo in the pipeline. +# If you don't put it in the pipeline, it will be inserted automatically. +[filter:versioned_writes] +use = egg:swift#versioned_writes +# Enables using versioned writes middleware and exposing configuration +# settings via HTTP GET /info. +# WARNING: Setting this option bypasses the "allow_versions" option +# in the container configuration file, which will be eventually +# deprecated. See documentation for more details. +# allow_versioned_writes = false + + +[filter:moon] +paste.filter_factory = keystonemiddleware.moon_agent:filter_factory +authz_login=admin +authz_password={{ ADMIN_PASS }} +auth_host = {{ internal_vip.ip }} +logfile=/var/log/moon/keystonemiddleware.log diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/moon/templates/wsgi-keystone.conf.j2 b/deploy/adapters/ansible/openstack_newton_xenial/roles/moon/templates/wsgi-keystone.conf.j2 new file mode 100644 index 00000000..64d864af --- /dev/null +++ b/deploy/adapters/ansible/openstack_newton_xenial/roles/moon/templates/wsgi-keystone.conf.j2 @@ -0,0 +1,46 @@ + {% set work_threads = (ansible_processor_vcpus + 1) // 2 %} +<VirtualHost {{ internal_ip }}:5000> + WSGIDaemonProcess keystone-public processes={{ work_threads }} threads={{ work_threads }} user=keystone group=keystone display-name=%{GROUP} + WSGIProcessGroup keystone-public + WSGIScriptAlias / /usr/bin/keystone-wsgi-public + WSGIApplicationGroup %{GLOBAL} + WSGIPassAuthorization On + <IfVersion >= 2.4> + ErrorLogFormat "%{cu}t %M" + </IfVersion> + ErrorLog /var/log/{{ http_service_name }}/keystone.log + CustomLog /var/log/{{ http_service_name }}/keystone_access.log combined + + <Directory /usr/bin> + <IfVersion >= 2.4> + Require all granted + </IfVersion> + <IfVersion < 2.4> + Order allow,deny + Allow from all + </IfVersion> + </Directory> +</VirtualHost> + +<VirtualHost {{ internal_ip }}:35357> + WSGIDaemonProcess keystone-admin processes={{ work_threads }} threads={{ work_threads }} user=keystone group=keystone display-name=%{GROUP} + WSGIProcessGroup keystone-admin + WSGIScriptAlias / /usr/bin/keystone-wsgi-admin + WSGIApplicationGroup %{GLOBAL} + WSGIPassAuthorization On + <IfVersion >= 2.4> + ErrorLogFormat "%{cu}t %M" + </IfVersion> + ErrorLog /var/log/{{ http_service_name }}/keystone.log + CustomLog /var/log/{{ http_service_name }}/keystone_access.log combined + + <Directory /usr/bin> + <IfVersion >= 2.4> + Require all granted + </IfVersion> + <IfVersion < 2.4> + Order allow,deny + Allow from all + </IfVersion> + </Directory> +</VirtualHost> diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/moon/vars/Debian.yml b/deploy/adapters/ansible/openstack_newton_xenial/roles/moon/vars/Debian.yml new file mode 100644 index 00000000..0da81179 --- /dev/null +++ b/deploy/adapters/ansible/openstack_newton_xenial/roles/moon/vars/Debian.yml @@ -0,0 +1,168 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- + +packages: + - adduser + - dbconfig-common + - init-system-helpers + - python-keystone + - q-text-as-data + - sqlite3 + - ssl-cert + - debconf + - lsb-base + - python:any + - libjs-sphinxdoc + - python-pip + - unzip + - apache2 + - libapache2-mod-wsgi + +dependency_packages: + - python-cryptography + - python-dateutil + - python-dogpile.cache + - python-eventlet + - python-greenlet + - python-jsonschema + - python-keystoneclient + - python-keystonemiddleware + - python-ldap + - python-ldappool + - python-lxml + - python-memcache + - python-migrate + - python-msgpack + - python-mysqldb + - python-oauthlib + - python-openstackclient + - python-oslo.cache + - python-oslo.concurrency + - python-oslo.config + - python-oslo.context + - python-oslo.db + - python-oslo.i18n + - python-oslo.log + - python-oslo.messaging + - python-oslo.middleware + - python-oslo.policy + - python-oslo.serialization + - python-oslo.service + - python-oslo.utils + - python-pam + - python-passlib + - python-paste + - python-pastedeploy + - python-pbr + - python-pycadf + - python-pymysql + - python-pysaml2 + - python-pysqlite2 + - python-routes + - python-six + - python-sqlalchemy + - python-stevedore + - python-webob + - unzip + - python3-keystoneauth1 + - python3-keystoneclient + - python3-oslo.config + - python3-oslo.context + - python3-oslo.i18n + - python3-oslo.serialization + - python-oslo.service + - python-oslo.utils + - python-pam + - python-passlib + - python-paste + - python-pastedeploy + - python-pbr + - python-pycadf + - python-pymysql + - python-pysaml2 + - python-pysqlite2 + - python-routes + - python-six + - python-sqlalchemy + - python-stevedore + - python-webob + - unzip + - python3-keystoneauth1 + - python3-keystoneclient + - python3-oslo.config + - python3-oslo.context + - python3-oslo.i18n + - python3-oslo.serialization + - python3-oslo.utils + - apache2 + - libapache2-mod-wsgi + - python3-cryptography + - python3-dateutil + - python3-dogpile.cache + - python3-eventlet + - python3-greenlet + - python3-jsonschema + - python3-keystoneclient + - python3-keystonemiddleware + - python3-lxml + - python3-memcache + - python3-migrate + - python3-msgpack + - python3-mysqldb + - python3-oauthlib + - python3-openstackclient + - python3-oslo.cache + - python3-oslo.concurrency + - python3-oslo.config + - python3-oslo.context + - python3-oslo.db + - python3-oslo.i18n + - python3-oslo.log + - python3-oslo.messaging + - python3-oslo.middleware + - python3-oslo.policy + - python3-oslo.serialization + - python3-oslo.service + - python3-oslo.utils + - python3-pam + - python3-passlib + - python3-paste + - python3-pastedeploy + - python3-pbr + - python3-pycadf + - python3-pymysql + - python3-pysaml2 + - python3-routes + - python3-six + - python3-sqlalchemy + - python3-stevedore + - python3-webob + - python3-oslo.service + - python3-oslo.utils + - python3-pam + - python3-passlib + - python3-paste + - python3-pastedeploy + - python3-pbr + - python3-pycadf + - python3-pymysql + - python3-pysaml2 + - python3-routes + - python3-six + - python3-sqlalchemy + - python3-stevedore + - python3-webob + +services: + - apache2 + + +apache_config_dir: /etc/apache2 +http_service_name: apache2 diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/moon/vars/main.yml b/deploy/adapters/ansible/openstack_newton_xenial/roles/moon/vars/main.yml new file mode 100644 index 00000000..cff8c7c2 --- /dev/null +++ b/deploy/adapters/ansible/openstack_newton_xenial/roles/moon/vars/main.yml @@ -0,0 +1,172 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +packages_noarch: [] + +services_noarch: [] + +os_services: + - name: keystone + type: identity + region: RegionOne + description: "OpenStack Identity" + publicurl: "http://{{ public_vip.ip }}:5000/v2.0" + internalurl: "http://{{ internal_vip.ip }}:5000/v2.0" + adminurl: "http://{{ internal_vip.ip }}:35357/v2.0" + + - name: glance + type: image + region: RegionOne + description: "OpenStack Image Service" + publicurl: "http://{{ public_vip.ip }}:9292" + internalurl: "http://{{ internal_vip.ip }}:9292" + adminurl: "http://{{ internal_vip.ip }}:9292" + + - name: nova + type: compute + region: RegionOne + description: "OpenStack Compute" + publicurl: "http://{{ public_vip.ip }}:8774/v2/%(tenant_id)s" + internalurl: "http://{{ internal_vip.ip }}:8774/v2/%(tenant_id)s" + adminurl: "http://{{ internal_vip.ip }}:8774/v2/%(tenant_id)s" + + - name: neutron + type: network + region: RegionOne + description: "OpenStack Networking" + publicurl: "http://{{ public_vip.ip }}:9696" + internalurl: "http://{{ internal_vip.ip }}:9696" + adminurl: "http://{{ internal_vip.ip }}:9696" + + - name: ceilometer + type: metering + region: RegionOne + description: "OpenStack Telemetry" + publicurl: "http://{{ public_vip.ip }}:8777" + internalurl: "http://{{ internal_vip.ip }}:8777" + adminurl: "http://{{ internal_vip.ip }}:8777" + + - name: aodh + type: alarming + region: RegionOne + description: "OpenStack Telemetry" + publicurl: "http://{{ public_vip.ip }}:8042" + internalurl: "http://{{ internal_vip.ip }}:8042" + adminurl: "http://{{ internal_vip.ip }}:8042" + +# - name: cinder +# type: volume +# region: RegionOne +# description: "OpenStack Block Storage" +# publicurl: "http://{{ public_vip.ip }}:8776/v1/%(tenant_id)s" +# internalurl: "http://{{ internal_vip.ip }}:8776/v1/%(tenant_id)s" +# adminurl: "http://{{ internal_vip.ip }}:8776/v1/%(tenant_id)s" +# +# - name: cinderv2 +# type: volumev2 +# region: RegionOne +# description: "OpenStack Block Storage v2" +# publicurl: "http://{{ public_vip.ip }}:8776/v2/%(tenant_id)s" +# internalurl: "http://{{ internal_vip.ip }}:8776/v2/%(tenant_id)s" +# adminurl: "http://{{ internal_vip.ip }}:8776/v2/%(tenant_id)s" + + - name: heat + type: orchestration + region: RegionOne + description: "OpenStack Orchestration" + publicurl: "http://{{ public_vip.ip }}:8004/v1/%(tenant_id)s" + internalurl: "http://{{ internal_vip.ip }}:8004/v1/%(tenant_id)s" + adminurl: "http://{{ internal_vip.ip }}:8004/v1/%(tenant_id)s" + + - name: heat-cfn + type: cloudformation + region: RegionOne + description: "OpenStack CloudFormation Orchestration" + publicurl: "http://{{ public_vip.ip }}:8000/v1" + internalurl: "http://{{ internal_vip.ip }}:8000/v1" + adminurl: "http://{{ internal_vip.ip }}:8000/v1" + +# - name: swift +# type: object-store +# region: RegionOne +# description: "OpenStack Object Storage" +# publicurl: "http://{{ public_vip.ip }}:8080/v1/AUTH_%(tenant_id)s" +# internalurl: "http://{{ internal_vip.ip }}:8080/v1/AUTH_%(tenant_id)s" +# adminurl: "http://{{ internal_vip.ip }}:8080/v1/AUTH_%(tenant_id)s" + +os_users: + - user: admin + password: "{{ ADMIN_PASS }}" + email: admin@admin.com + role: admin + tenant: admin + tenant_description: "Admin Tenant" + + - user: glance + password: "{{ GLANCE_PASS }}" + email: glance@admin.com + role: admin + tenant: service + tenant_description: "Service Tenant" + + - user: nova + password: "{{ NOVA_PASS }}" + email: nova@admin.com + role: admin + tenant: service + tenant_description: "Service Tenant" + + - user: keystone + password: "{{ KEYSTONE_PASS }}" + email: keystone@admin.com + role: admin + tenant: service + tenant_description: "Service Tenant" + + - user: neutron + password: "{{ NEUTRON_PASS }}" + email: neutron@admin.com + role: admin + tenant: service + tenant_description: "Service Tenant" + + - user: ceilometer + password: "{{ CEILOMETER_PASS }}" + email: ceilometer@admin.com + role: admin + tenant: service + tenant_description: "Service Tenant" + + - user: cinder + password: "{{ CINDER_PASS }}" + email: cinder@admin.com + role: admin + tenant: service + tenant_description: "Service Tenant" + + - user: heat + password: "{{ HEAT_PASS }}" + email: heat@admin.com + role: admin + tenant: service + tenant_description: "Service Tenant" + + - user: demo + password: "" + email: heat@demo.com + role: heat_stack_user + tenant: demo + tenant_description: "Demo Tenant" + +# - user: swift +# password: "{{ CINDER_PASS }}" +# email: swift@admin.com +# role: admin +# tenant: service +# tenant_description: "Service Tenant" diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/neutron-compute/handlers/main.yml b/deploy/adapters/ansible/openstack_newton_xenial/roles/neutron-compute/handlers/main.yml new file mode 100644 index 00000000..ca4e8088 --- /dev/null +++ b/deploy/adapters/ansible/openstack_newton_xenial/roles/neutron-compute/handlers/main.yml @@ -0,0 +1,15 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +- name: restart neutron compute service + service: name={{ item }} state=restarted enabled=yes + with_items: services | union(services_noarch) + +- name: restart nova-compute services + service: name=nova-compute state=restarted enabled=yes diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/neutron-compute/tasks/main.yml b/deploy/adapters/ansible/openstack_newton_xenial/roles/neutron-compute/tasks/main.yml new file mode 100644 index 00000000..fd3e51d3 --- /dev/null +++ b/deploy/adapters/ansible/openstack_newton_xenial/roles/neutron-compute/tasks/main.yml @@ -0,0 +1,75 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +- include_vars: "{{ ansible_os_family }}.yml" + +- name: activate ipv4 forwarding + sysctl: name=net.ipv4.ip_forward value=1 + state=present reload=yes + +- name: deactivate ipv4 rp filter + sysctl: name=net.ipv4.conf.all.rp_filter value=0 + state=present reload=yes + +- name: deactivate ipv4 default rp filter + sysctl: name=net.ipv4.conf.default.rp_filter + value=0 state=present reload=yes + +- name: disable auto start + copy: + content: "#!/bin/sh\nexit 101" + dest: "/usr/sbin/policy-rc.d" + mode: 0755 + when: ansible_os_family == "Debian" + +- name: install compute-related neutron packages + action: "{{ ansible_pkg_mgr }} name={{ item }} state=present" + with_items: packages | union(packages_noarch) + +- name: enable auto start + file: + path=/usr/sbin/policy-rc.d + state=absent + when: ansible_os_family == "Debian" + +- name: fix openstack neutron plugin config file + shell: | + sed -i 's,plugins/ml2/openvswitch_agent.ini,plugin.ini,g' /usr/lib/systemd/system/neutron-openvswitch-agent.service + systemctl daemon-reload + when: ansible_os_family == 'RedHat' + +- name: fix openstack neutron plugin config file ubuntu + shell: | + sed -i 's,plugins/ml2/openvswitch_agent.ini,plugin.ini,g' /etc/init/neutron-openvswitch-agent.conf + sed -i 's,plugins/ml2/openvswitch_agent.ini,plugin.ini,g' /etc/init.d/neutron-openvswitch-agent + when: ansible_os_family == "Debian" + +- name: generate neutron compute service list + lineinfile: dest=/opt/service create=yes line='{{ item }}' + with_items: services | union(services_noarch) + +- name: config ml2 plugin + template: src=templates/ml2_conf.ini + dest=/etc/neutron/plugins/ml2/ml2_conf.ini + backup=yes + +- name: ln plugin.ini + file: src=/etc/neutron/plugins/ml2/ml2_conf.ini dest=/etc/neutron/plugin.ini state=link + +- name: config neutron + template: src=templates/neutron.conf + dest=/etc/neutron/neutron.conf backup=yes + notify: + - restart neutron compute service + - restart nova-compute services + +- meta: flush_handlers + +- include: ../../neutron-network/tasks/odl.yml + when: "'opendaylight' in {{ NEUTRON_MECHANISM_DRIVERS }}" diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/neutron-compute/vars/Debian.yml b/deploy/adapters/ansible/openstack_newton_xenial/roles/neutron-compute/vars/Debian.yml new file mode 100644 index 00000000..83d7f323 --- /dev/null +++ b/deploy/adapters/ansible/openstack_newton_xenial/roles/neutron-compute/vars/Debian.yml @@ -0,0 +1,19 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- + +packages: + - neutron-common + - neutron-plugin-ml2 + - openvswitch-switch-dpdk + - openvswitch-switch + - neutron-plugin-openvswitch-agent + +services: + - neutron-openvswitch-agent diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/neutron-network/tasks/main.yml b/deploy/adapters/ansible/openstack_newton_xenial/roles/neutron-network/tasks/main.yml new file mode 100644 index 00000000..31f7f17c --- /dev/null +++ b/deploy/adapters/ansible/openstack_newton_xenial/roles/neutron-network/tasks/main.yml @@ -0,0 +1,117 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +- include_vars: "{{ ansible_os_family }}.yml" + +- name: activate ipv4 forwarding + sysctl: name=net.ipv4.ip_forward value=1 + state=present reload=yes + +- name: deactivate ipv4 rp filter + sysctl: name=net.ipv4.conf.all.rp_filter value=0 + state=present reload=yes + +- name: deactivate ipv4 default rp filter + sysctl: name=net.ipv4.conf.default.rp_filter + value=0 state=present reload=yes + +- name: assert kernel support for vxlan + command: modinfo -F version vxlan + when: "'vxlan' in {{ NEUTRON_TUNNEL_TYPES }}" + +- name: assert iproute2 suppport for vxlan + command: ip link add type vxlan help + register: iproute_out + failed_when: iproute_out.rc == 255 + when: "'vxlan' in {{ NEUTRON_TUNNEL_TYPES }}" + +- name: disable auto start + copy: + content: "#!/bin/sh\nexit 101" + dest: "/usr/sbin/policy-rc.d" + mode: 0755 + when: ansible_os_family == "Debian" + +- name: install neutron network related packages + action: "{{ ansible_pkg_mgr }} name={{ item }} state=present" + with_items: packages | union(packages_noarch) + +- name: enable auto start + file: + path=/usr/sbin/policy-rc.d + state=absent + when: ansible_os_family == "Debian" + +- name: generate neutron network service list + lineinfile: dest=/opt/service create=yes line='{{ item }}' + with_items: services | union(services_noarch) + +- name: fix openstack neutron plugin config file + shell: | + sed -i 's,plugins/ml2/openvswitch_agent.ini,plugin.ini,g' /usr/lib/systemd/system/neutron-openvswitch-agent.service + systemctl daemon-reload + when: ansible_os_family == 'RedHat' + +- name: fix openstack neutron plugin config file ubuntu + shell: | + sed -i 's,plugins/ml2/openvswitch_agent.ini,plugin.ini,g' /etc/init/neutron-openvswitch-agent.conf + sed -i 's,plugins/ml2/openvswitch_agent.ini,plugin.ini,g' /etc/init.d/neutron-openvswitch-agent + when: ansible_os_family == "Debian" + +- name: config l3 agent + template: src=l3_agent.ini dest=/etc/neutron/l3_agent.ini + backup=yes + +- name: config dhcp agent + template: src=dhcp_agent.ini dest=/etc/neutron/dhcp_agent.ini + backup=yes + +- name: update dnsmasq-neutron.conf + template: src=templates/dnsmasq-neutron.conf + dest=/etc/neutron/dnsmasq-neutron.conf + +- name: config metadata agent + template: src=metadata_agent.ini + dest=/etc/neutron/metadata_agent.ini backup=yes + +- name: config ml2 plugin + template: src=templates/ml2_conf.ini + dest=/etc/neutron/plugins/ml2/ml2_conf.ini + backup=yes + +- name: ln plugin.ini + file: src=/etc/neutron/plugins/ml2/ml2_conf.ini dest=/etc/neutron/plugin.ini state=link + +- name: config neutron + template: src=templates/neutron.conf + dest=/etc/neutron/neutron.conf backup=yes + +- name: force mtu to 1450 for vxlan + lineinfile: + dest: /etc/neutron/dnsmasq-neutron.conf + regexp: '^dhcp-option-force' + line: 'dhcp-option-force=26,1450' + when: "'vxlan' in {{ NEUTRON_TUNNEL_TYPES }}" + +- include: firewall.yml + when: enable_fwaas == True + +- include: vpn.yml + when: enable_vpnaas == True + +- include: odl.yml + when: "'opendaylight' in {{ NEUTRON_MECHANISM_DRIVERS }}" + +- name: restart neutron network relation service + service: name={{ item }} state=restarted enabled=yes + with_flattened: + - services_noarch + - services + +- meta: flush_handlers diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/neutron-network/vars/Debian.yml b/deploy/adapters/ansible/openstack_newton_xenial/roles/neutron-network/vars/Debian.yml new file mode 100644 index 00000000..1a78ca8c --- /dev/null +++ b/deploy/adapters/ansible/openstack_newton_xenial/roles/neutron-network/vars/Debian.yml @@ -0,0 +1,25 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +packages: + - neutron-plugin-ml2 + - openvswitch-switch-dpdk + - openvswitch-switch + - neutron-l3-agent + - neutron-dhcp-agent + - neutron-plugin-openvswitch-agent + +services: + - openvswitch-switch + - neutron-openvswitch-agent + +openvswitch_agent: neutron-plugin-openvswitch-agent + +xorp_packages: + - xorp diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/nova-compute/tasks/main.yml b/deploy/adapters/ansible/openstack_newton_xenial/roles/nova-compute/tasks/main.yml new file mode 100644 index 00000000..7bb4f347 --- /dev/null +++ b/deploy/adapters/ansible/openstack_newton_xenial/roles/nova-compute/tasks/main.yml @@ -0,0 +1,63 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +- include_vars: "{{ ansible_os_family }}.yml" + +- name: disable auto start + copy: + content: "#!/bin/sh\nexit 101" + dest: "/usr/sbin/policy-rc.d" + mode: 0755 + when: ansible_os_family == "Debian" + +- name: install nova-compute related packages + action: "{{ ansible_pkg_mgr }} name={{ item }} state=present" + with_items: packages | union(packages_noarch) + +- name: restart virtlogd + service: name=virtlogd state=started enabled=yes + when: ansible_os_family == "Debian" + +- name: enable auto start + file: + path=/usr/sbin/policy-rc.d + state=absent + when: ansible_os_family == "Debian" + +- name: update nova-compute conf + template: src=templates/{{ item }} dest=/etc/nova/{{ item }} + with_items: + - nova.conf + notify: + - restart nova-compute services + +- name: get number of cpu support virtualization + shell: egrep -c '(vmx|svm)' /proc/cpuinfo + register: kvm_cpu_num + +- name: update nova-compute conf + template: src={{ item }} dest=/etc/nova/{{ item }} + with_items: + - nova-compute.conf + notify: + - restart nova-compute services + +- name: generate neutron control service list + lineinfile: dest=/opt/service create=yes line='{{ item }}' + with_items: services | union(services_noarch) +#' +- name: remove nova sqlite db + shell: rm /var/lib/nova/nova.sqlite || touch nova.sqlite.removed + +- meta: flush_handlers + +- name: restart nova-compute and libvirt-bin + shell: > + service nova-compute restart; + service libvirt-bin restart; diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/nova-compute/templates/nova-compute.conf b/deploy/adapters/ansible/openstack_newton_xenial/roles/nova-compute/templates/nova-compute.conf new file mode 100644 index 00000000..305d408b --- /dev/null +++ b/deploy/adapters/ansible/openstack_newton_xenial/roles/nova-compute/templates/nova-compute.conf @@ -0,0 +1,11 @@ +[DEFAULT] +compute_driver=libvirt.LibvirtDriver +force_raw_images = true +[libvirt] +{% if kvm_cpu_num.stdout_lines[0]|int == 0 %} +virt_type=qemu +{% else %} +virt_type=kvm +{% endif %} +images_type = raw +mem_stats_period_seconds=0 diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/nova-controller/tasks/nova_config.yml b/deploy/adapters/ansible/openstack_newton_xenial/roles/nova-controller/tasks/nova_config.yml new file mode 100644 index 00000000..f332c97a --- /dev/null +++ b/deploy/adapters/ansible/openstack_newton_xenial/roles/nova-controller/tasks/nova_config.yml @@ -0,0 +1,21 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +- name: nova api db sync + shell: su -s /bin/sh -c "nova-manage api_db sync" nova + ignore_errors: True + notify: + - restart nova service + +- name: nova db sync + nova_manage: action=dbsync + notify: + - restart nova service + +- meta: flush_handlers diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/odl_cluster/files/odl-aaa-moon.tar.gz b/deploy/adapters/ansible/openstack_newton_xenial/roles/odl_cluster/files/odl-aaa-moon.tar.gz Binary files differindex dd03749c..dd03749c 100644 --- a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/odl_cluster/files/odl-aaa-moon.tar.gz +++ b/deploy/adapters/ansible/openstack_newton_xenial/roles/odl_cluster/files/odl-aaa-moon.tar.gz diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/odl_cluster/files/opendaylight.service b/deploy/adapters/ansible/openstack_newton_xenial/roles/odl_cluster/files/opendaylight.service new file mode 100644 index 00000000..6c9e4c44 --- /dev/null +++ b/deploy/adapters/ansible/openstack_newton_xenial/roles/odl_cluster/files/opendaylight.service @@ -0,0 +1,21 @@ +[Unit] +Description=OpenDaylight +After= + + +[Service] +User=root +Group=root +Type=simple +EnvironmentFile=-/opt/moon-environment +WorkingDirectory=/opt/opendaylight-0.3.0 +PermissionsStartOnly=true +ExecStartPre= +ExecStart=/usr/lib/jvm/java-8-oracle/bin/java -Djava.security.properties=/opt/opendaylight-0.3.0/etc/odl.java.security -server -Xms128M -Xmx2048m -XX:+UnlockDiagnosticVMOptions -XX:+UnsyncloadClass -XX:MaxPermSize=512m -Dcom.sun.management.jmxremote -Djava.endorsed.dirs=/usr/lib/jvm/java-8-oracle/jre/lib/endorsed:/usr/lib/jvm/java-8-oracle/lib/endorsed:/opt/opendaylight-0.3.0/lib/endorsed -Djava.ext.dirs=/usr/lib/jvm/java-8-oracle/jre/lib/ext:/usr/lib/jvm/java-8-oracle/lib/ext:/opt/opendaylight-0.3.0/lib/ext -Dkaraf.instances=/opt/opendaylight-0.3.0/instances -Dkaraf.home=/opt/opendaylight-0.3.0 -Dkaraf.base=/opt/opendaylight-0.3.0 -Dkaraf.data=/opt/opendaylight-0.3.0/data -Dkaraf.etc=/opt/opendaylight-0.3.0/etc -Djava.io.tmpdir=/opt/opendaylight-0.3.0/data/tmp -Djava.util.logging.config.file=/opt/opendaylight-0.3.0/etc/java.util.logging.properties -Dkaraf.startLocalConsole=false -Dkaraf.startRemoteShell=true -classpath /opt/opendaylight-0.3.0/lib/karaf-jaas-boot.jar:/opt/opendaylight-0.3.0/lib/karaf-jmx-boot.jar:/opt/opendaylight-0.3.0/lib/karaf-org.osgi.core.jar:/opt/opendaylight-0.3.0/lib/karaf.branding-1.2.2-Beryllium-SR2.jar:/opt/opendaylight-0.3.0/lib/karaf.jar org.apache.karaf.main.Main +Restart=on-failure +LimitNOFILE=65535 +TimeoutStopSec=15 + +[Install] +WantedBy=multi-user.target + diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/odl_cluster/tasks/moon-odl.yml b/deploy/adapters/ansible/openstack_newton_xenial/roles/odl_cluster/tasks/moon-odl.yml new file mode 100644 index 00000000..25306059 --- /dev/null +++ b/deploy/adapters/ansible/openstack_newton_xenial/roles/odl_cluster/tasks/moon-odl.yml @@ -0,0 +1,55 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- + +- name: delete data journal snapshots + shell: rm -rf {{ odl_home }}/{{ item }} + with_items: + - journal + - data + - snapshots + +- name: remove aaa feature + shell: rm -rf {{ odl_home }}/system/org/opendaylight/aaa/ + +- name: download apache maven package file + get_url: url="http://{{ http_server.stdout_lines[0] }}/packages/moon/apache-maven-3.3.9-bin.tar.gz" dest=/opt/apache-maven-3.3.9-bin.tar.gz + +- name: create maven folder + shell: mkdir -p /opt/apache-maven-3.3.9/ + +- name: extract maven + command: su -s /bin/sh -c "tar zxf /opt/apache-maven-3.3.9-bin.tar.gz -C /opt/apache-maven-3.3.9/ --strip-components 1 --no-overwrite-dir -k --skip-old-files" root + +- name: install maven + shell: ln -s /opt/apache-maven-3.3.9/bin/mvn /usr/local/bin/mvn; + +- name: create m2 directory + file: path=/root/.m2/ state=directory mode=0755 + +- name: copy settings.xml + template: src=settings.xml dest=/root/.m2/settings.xml + +- name: upload swift lib + unarchive: src=odl-aaa-moon.tar.gz dest=/home/ + +- name: install aaa + shell: > + export PATH="/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/usr/lib/jvm/java-8-oracle/bin:/opt/apache-maven-3.3.3/bin"; + export JAVA_HOME="/usr/lib/jvm/java-8-oracle"; + export _JAVA_OPTIONS="-Djava.net.preferIPv4Stack=true"; + export MAVEN_OPTS="-Xmx1024m -XX:MaxPermSize=512m"; + cd /home/odl-aaa-moon/aaa/; + mvn clean install -DskipTests; + +- name: remove shiro ini + shell: rm -f {{ odl_home }}/etc/shiro.ini + +- name: set moon env + template: src=moon-environment dest=/opt/moon-environment diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/odl_cluster/tasks/odl_controller.yml b/deploy/adapters/ansible/openstack_newton_xenial/roles/odl_cluster/tasks/odl_controller.yml new file mode 100755 index 00000000..4cf7948a --- /dev/null +++ b/deploy/adapters/ansible/openstack_newton_xenial/roles/odl_cluster/tasks/odl_controller.yml @@ -0,0 +1,294 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- + +- name: install controller packages + action: "{{ ansible_pkg_mgr }} name={{ item }} state=present" + with_items: controller_packages | union(controller_packages_noarch) + +- name: get image http server + shell: awk -F'=' '/compass_server/ {print $2}' /etc/compass.conf + register: http_server + +- name: download oracle-jdk8 package file + get_url: url="http://{{ http_server.stdout_lines[0] }}/packages/onos/{{ jdk8_pkg_name }}" dest=/opt/{{ jdk8_pkg_name }} + +#" + +- name: upload install_jdk8 scripts + unarchive: src=install_jdk8.tar dest=/opt/ + +- name: install install_jdk8 package + command: su -s /bin/sh -c "/opt/install_jdk8/install_jdk8.sh" + +#- name: install opendaylight packages +# apt: name={{ item }} state=present +# with_items: +# - openjdk-8-jdk + +#- name: create odl directories +# file: +# path: /opt/opendaylight-0.2.2 +# state: "directory" +# group: root +# owner: root +# mode: 0755 + +- name: create odl group + group: name=odl system=yes state=present + +- name: create odl user + user: + name: odl + group: odl + home: "{{ odl_home }}" + createhome: "yes" + system: "yes" + shell: "/bin/false" + +#- name: get image http server +# shell: awk -F'=' '/compass_server/ {print $2}' /etc/compass.conf +# register: http_server + +- name: download odl package + get_url: url="http://{{ http_server.stdout_lines[0] }}/packages/odl/{{ odl_pkg_url }}" dest=/opt/{{ odl_pkg_name }} + +# " + +#- name: download odl package +# get_url: url={{ odl_pkg_url }} dest=/opt/{{ odl_pkg_name }} + +# TODO: unarchive doesn't support strip-component at the moment +# TODO: switch to use untar after support is added. +- name: extract odl package +# unarchive: src=/opt/{{ odl_pkg_name }} dest={{ odl_home }} group=odl owner=odl mode=0775 copy=no + command: su -s /bin/sh -c "tar xzf /opt/{{ odl_pkg_name }} -C {{ odl_home }} --strip-components 1 --no-overwrite-dir -k --skip-old-files" odl +# notify: +# - restart odl service + +- name: opendaylight system file + copy: + src: "{{ service_file.src }}" + dest: "{{ service_file.dst }}" + mode: 0755 + +- name: set l3 fwd enable in custom.properties + template: + src: custom.properties + dest: "{{ odl_home }}/etc/custom.properties" + owner: odl + group: odl + mode: 0775 + when: odl_l3_agent == "Enable" + +- name: create karaf config + template: + src: org.apache.karaf.features.cfg.Debian + dest: "{{ odl_home }}/etc/org.apache.karaf.features.cfg" + owner: odl + group: odl + mode: 0775 + when: ansible_os_family == "Debian" + +- name: create karaf config + template: + src: org.apache.karaf.features.cfg.Redhat + dest: "{{ odl_home }}/etc/org.apache.karaf.features.cfg" + owner: odl + group: odl + mode: 0775 + when: ansible_os_family == "RedHat" + +- name: create tomcat config + template: + src: tomcat-server.xml + dest: "{{ odl_home }}/configuration/tomcat-server.xml" + +- name: create tomcat config + template: + src: jetty.xml + dest: "{{ odl_home }}/etc/jetty.xml" + +- name: download odl pip package + get_url: url="http://{{ http_server.stdout_lines[0] }}/pip/{{ networking_odl_pkg_name }}" dest=/opt/{{ networking_odl_pkg_name }} + +#" + +- name: patch odl pip package + shell: | + cd /opt + tar xf /opt/{{ networking_odl_pkg_name }} + rm -rf /opt/{{ networking_odl_pkg_name }} + sed -i 's/^neutron-lib.*/neutron-lib/' networking-odl-2.0.0/requirements.txt + tar zcf /opt/{{ networking_odl_pkg_name }} networking-odl-2.0.0 + rm -rf networking-odl-2.0.0 + cd - + +- name: odl pip package install + shell: | + cd /opt + pip install {{ networking_odl_pkg_name }} + rm -rf {{ networking_odl_pkg_name }} + cd - + +#- name: restart odl service +# service: name=opendaylight state=started pattern="opendaylight" + +########################################################################################################## +################################# OpenDayLight Cluster Configuration ################################# +########################################################################################################## +#- name: create initial directory +# shell: > +# mkdir -p {{ odl_home }}/configuration/initial; + +#- name: create akka config +# template: +# src: akka.conf +# dest: "{{ odl_home }}/configuration/initial/akka.conf" +# notify: +# - restart odl service + + +#- name: create module-shards config +# template: +# src: module-shards.conf +# dest: "{{ odl_home }}/configuration/initial/module-shards.conf" +# notify: +# - restart odl service + +#- name: copy Jolokia-OSGi config +# shell: > +# cp -r jolokia {{ odl_home }}system/org/; + +#- name: copy Jolokia-OSGi config +# template: +# src: jolokia +# dest: "{{ odl_home }}/system/org/" +# notify: +# - restart odl service + + +#- name: mkdir Jolokia-OSGi directory +# shell: > +# mkdir -p {{ odl_home }}system/org/jolokia; +# mkdir -p {{ odl_home }}system/org/jolokia/jolokia-osgi; +# mkdir -p {{ odl_home }}system/org/jolokia/jolokia-osgi/1.1.5; + + +#- name: copy Jolokia-OSGi config +# template: src={{ item.src }} dest={{ item.dest }} +# with_items: +# - src: "jolokia-osgi-1.1.5-features.xml" +# dest: "{{ odl_home }}/system/org/jolokia/jolokia-osgi/1.1.5/jolokia-osgi-1.1.5-features.xml" +# - src: "jolokia-osgi-1.1.5.jar.sha1" +# dest: "{{ odl_home }}/system/org/jolokia/jolokia-osgi/1.1.5/jolokia-osgi-1.1.5.jar.sha1" +# - src: "jolokia-osgi-1.1.5.jar" +# dest: "{{ odl_home }}/system/org/jolokia/jolokia-osgi/1.1.5/jolokia-osgi-1.1.5.jar" + +#- name: copy Jolokia-OSGi jar config +# copy: src=roles/odl_cluster/templates/jolokia-osgi-1.1.5.jar dest="{{ odl_home }}/system/org/jolokia/jolokia-osgi/1.1.5/" + +- name: remove karaf data directory + shell: rm -rf {{ odl_home }}/data/*; + +#- name: chown OpenDaylight Directory and Files +# shell: > +# chown -R odl:odl "{{ odl_home }}"; +# chown odl:odl "{{ service_file.dst }}"; + + +########################################################################################################## +################################ OpenDayLight connect with OpenStack ################################ +########################################################################################################## +- name: turn off neutron-server neutron-plugins-openvswitch-agent Daemon on control node + shell: > + sed -i '/{{ service_ovs_agent_name }}/d' /opt/service ; + sed -i '/neutron-server/d' /opt/service; + sed -i '/keepalived/d' /opt/service; + +- name: turn off neutron-server on control node + service: name=neutron-server state=stopped + +- name: turn off keepalived on control node + service: name=keepalived state=stopped + when: ansible_os_family == "Debian" + + +################################################################# +########################### moon ################################ +################################################################# + +- include: moon-odl.yml + when: moon == "Enable" + +################################################################# + +- name: chown opendaylight directory and files + shell: > + chown -R odl:odl "{{ odl_home }}"; + chown odl:odl "{{ service_file.dst }}"; + +- name: start opendaylight + service: name=opendaylight state=started + when: ansible_os_family == "Debian" + +- name: set opendaylight autostart + shell: chkconfig opendaylight on + when: ansible_os_family == "RedHat" + +- name: start opendaylight + shell: service opendaylight start + when: ansible_os_family == "RedHat" + +- name: check if opendaylight running + shell: netstat -lpen --tcp | grep java | grep 6653; while [ $? -ne 0 ]; do sleep 10; netstat -lpen --tcp | grep java | grep 6653; done + +- name: run openvswitch script + include: openvswitch.yml + +#- name: Configure Neutron1 +# shell: > +# crudini --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2 mechanism_drivers opendaylight; +# crudini --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2 tenant_network_types vxlan; + +#- name: Create ML2 Configuration File +# template: +# src: ml2_conf.sh +# dest: "/opt/ml2_conf.sh" +# mode: 0777 + +#- name: Execute ML2 Configuration File +# command: su -s /bin/sh -c "/opt/ml2_conf.sh;" + + +- name: configure l2 configuration + shell: crudini --set /etc/neutron/l3_agent.ini DEFAULT external_network_bridge br-prv; + when: odl_l3_agent == "Disable" + +- name: configure l3 configuration + shell: crudini --set /etc/neutron/l3_agent.ini DEFAULT external_network_bridge br-ex; + when: odl_l3_agent == "Enable" + +- name: configure odl l3 driver + shell: crudini --set /etc/neutron/neutron.conf DEFAULT service_plugins networking_odl.l3.l3_odl.OpenDaylightL3RouterPlugin; + when: odl_l3_agent == "Enable" + +- name: configure metadata for l3 configuration + shell: crudini --set /etc/neutron/dhcp_agent.ini DEFAULT enable_isolated_metadata True; + when: odl_l3_agent == "Enable" + +- name: drop and recreate neutron database + shell: mysql -e "drop database if exists neutron;"; + mysql -e "create database neutron character set utf8;"; + mysql -e "grant all on neutron.* to 'neutron'@'%' identified by '{{ NEUTRON_DBPASS }}';"; + su -s /bin/sh -c "neutron-db-manage --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/ml2/ml2_conf.ini upgrade head" neutron; + when: inventory_hostname == haproxy_hosts.keys()[0] + tags: + - test_odl + diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/odl_cluster/tasks/openvswitch.yml b/deploy/adapters/ansible/openstack_newton_xenial/roles/odl_cluster/tasks/openvswitch.yml new file mode 100755 index 00000000..b8cb6c91 --- /dev/null +++ b/deploy/adapters/ansible/openstack_newton_xenial/roles/odl_cluster/tasks/openvswitch.yml @@ -0,0 +1,158 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- + +#- name: Install Crudini +# apt: name={{ item }} state=present +# with_items: +# - crudini + +- name: install compute packages + action: "{{ ansible_pkg_mgr }} name={{ item }} state=present" + with_items: compute_packages | union(compute_packages_noarch) + +- name: remove neutron-openvswitch-agent service daemon + shell: sed -i '/{{ service_ovs_agent_name }}/d' /opt/service ; + +- name: shut down and disable Neutron's openvswitch agent services + service: name={{ service_ovs_agent_name }} state=stopped enabled=no + +- name: remove Neutron's openvswitch agent services + shell: > + update-rc.d -f {{ service_ovs_agent_name }} remove; + mv /etc/init.d/{{ service_ovs_agent_name }} /home/{{ service_ovs_agent_name }}; + mv /etc/init/{{ service_ovs_agent_name }}.conf /home/{{ service_ovs_agent_name }}.conf; + when: ansible_os_family == "Debian" + + +- name: Stop the Open vSwitch service and clear existing OVSDB + shell: > + service {{ service_ovs_name }} stop ; + rm -rf /var/log/openvswitch/* ; + rm -rf /etc/openvswitch/conf.db ; + service {{ service_ovs_name }} start ; + +- name: set opendaylight as the manager + command: su -s /bin/sh -c "ovs-vsctl set-manager tcp:{{ internal_vip.ip }}:6640;" + +- name: restart keepalived to recover external IP before check br-int + shell: service keepalived restart + when: inventory_hostname in groups['odl'] + ignore_errors: True + +- name: check br-int + shell: ovs-vsctl list-br | grep br-int; while [ $? -ne 0 ]; do sleep 10; ovs-vsctl list-br | grep br-int; done + +- name: set local ip in openvswitch + shell: ovs-vsctl set Open_vSwitch $(ovs-vsctl show | head -n 1) other_config={'local_ip'=' {{ internal_ip }} '}; + +#' + +################################################################## +########### Recover External network for odl l3 ################# +################################################################## + +- name: check br-ex + shell: ovs-vsctl list-br | grep br-ex; while [ $? -ne 0 ]; do sleep 10; ovs-vsctl list-br | grep br-ex; done + when: odl_l3_agent == "Enable" + +- name: add ovs uplink + openvswitch_port: bridge=br-ex port={{ item["interface"] }} state=present + with_items: "{{ network_cfg['provider_net_mappings'] }}" + when: item["type"] == "ovs" and odl_l3_agent == "Enable" + +- name: wait 10 seconds + shell: sleep 10 + when: odl_l3_agent == "Enable" + +- name: set external nic in openvswitch + shell: ovs-vsctl set Open_vSwitch $(ovs-vsctl show | head -n 1) other_config:provider_mappings=br-ex:{{ item["interface"] }} + with_items: "{{ network_cfg['provider_net_mappings'] }}" + when: item["type"] == "ovs" and odl_l3_agent == "Enable" + +- name: copy recovery script + copy: src={{ item }} dest=/opt/setup_networks + with_items: + - recover_network_odl_l3.py + - setup_networks_odl_l3.py + when: odl_l3_agent == "Enable" + +- name: recover external script + shell: python /opt/setup_networks/recover_network_odl_l3.py + when: odl_l3_agent == "Enable" + +- name: update keepalived info + template: src=keepalived.conf dest=/etc/keepalived/keepalived.conf + when: inventory_hostname in groups['odl'] and odl_l3_agent == "Enable" + +- name: modify net-init + shell: sed -i 's/setup_networks.py/setup_networks_odl_l3.py/g' /etc/init.d/net_init + when: odl_l3_agent == "Enable" + +################################################################## +########### Recover External network for odl l2 ################# +################################################################## + +- name: add ovs bridge + openvswitch_bridge: bridge={{ item["name"] }} state=present + with_items: "{{ network_cfg['provider_net_mappings'] }}" + when: item["type"] == "ovs" and odl_l3_agent == "Disable" + +- name: add ovs uplink + openvswitch_port: bridge={{ item["name"] }} port={{ item["interface"] }} state=present + with_items: "{{ network_cfg['provider_net_mappings'] }}" + when: item["type"] == "ovs" and odl_l3_agent == "Disable" + +- name: copy recovery script + copy: src={{ item }} dest=/opt/setup_networks + with_items: + - recover_network.py + when: odl_l3_agent == "Disable" + +- name: recover external script + shell: python /opt/setup_networks/recover_network.py + when: odl_l3_agent == "Disable" + +################################################################## + + +- name: restart keepalived to recover external IP + shell: service keepalived restart + when: inventory_hostname in groups['odl'] + ignore_errors: True + + + +################################################################## +################################################################## +################################################################## +- name: configure opendaylight -> ml2 + shell: > + crudini --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2 mechanism_drivers opendaylight; + crudini --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2 tenant_network_types vxlan; + crudini --set /etc/neutron/plugins/ml2/ml2_conf.ini ovs enable_tunneling True; + +#- name: Adjust Service Daemon +# shell: > +# sed -i '/neutron-openvswitch-agent/d' /opt/service ; +# echo opendaylight >> /opt/service ; + +- name: copy ml2 configuration script + template: + src: ml2_conf.sh + dest: "/opt/ml2_conf.sh" + mode: 0777 + +- name: execute ml2 configuration script + command: su -s /bin/sh -c "/opt/ml2_conf.sh;" + +#- name: change odl password for moon +# shell: crudini --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2_odl password {{ ADMIN_PASS }} +# when: moon == "Enable" + diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/odl_cluster/templates/jetty.xml b/deploy/adapters/ansible/openstack_newton_xenial/roles/odl_cluster/templates/jetty.xml new file mode 100755 index 00000000..50ac7c35 --- /dev/null +++ b/deploy/adapters/ansible/openstack_newton_xenial/roles/odl_cluster/templates/jetty.xml @@ -0,0 +1,88 @@ +<?xml version="1.0"?> +<!-- + Licensed to the Apache Software Foundation (ASF) under one + or more contributor license agreements. See the NOTICE file + distributed with this work for additional information + regarding copyright ownership. The ASF licenses this file + to you under the Apache License, Version 2.0 (the + "License"); you may not use this file except in compliance + with the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, + software distributed under the License is distributed on an + "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + KIND, either express or implied. See the License for the + specific language governing permissions and limitations + under the License. +--> +<!DOCTYPE Configure PUBLIC "-//Mort Bay Consulting// +DTD Configure//EN" "http://jetty.mortbay.org/configure.dtd"> + +<Configure class="org.eclipse.jetty.server.Server"> + + <!-- =========================================================== --> + <!-- Set connectors --> + <!-- =========================================================== --> + <!-- One of each type! --> + <!-- =========================================================== --> + + <!-- Use this connector for many frequently idle connections and for + threadless continuations. --> + <Call name="addConnector"> + <Arg> + <New class="org.eclipse.jetty.server.nio.SelectChannelConnector"> + <Set name="host"> + <Property name="jetty.host"/> + </Set> + <Set name="port"> + <Property name="jetty.port" default="8181" /> + </Set> + <Set name="maxIdleTime">300000</Set> + <Set name="Acceptors">2</Set> + <Set name="statsOn">false</Set> + <Set name="confidentialPort">8543</Set> + <Set name="lowResourcesConnections">20000</Set> + <Set name="lowResourcesMaxIdleTime">5000</Set> + </New> + </Arg> + </Call> + + <!-- =========================================================== --> + <!-- Configure Authentication Realms --> + <!-- Realms may be configured for the entire server here, or --> + <!-- they can be configured for a specific web app in a context --> + <!-- configuration (see $(jetty.home)/contexts/test.xml for an --> + <!-- example). --> + <!-- =========================================================== --> + <Call name="addBean"> + <Arg> + <New class="org.eclipse.jetty.plus.jaas.JAASLoginService"> + <Set name="name">karaf</Set> + <Set name="loginModuleName">karaf</Set> + <Set name="roleClassNames"> + <Array type="java.lang.String"> + <Item>org.apache.karaf.jaas.boot.principal.RolePrincipal + </Item> + </Array> + </Set> + </New> + </Arg> + </Call> + <Call name="addBean"> + <Arg> + <New class="org.eclipse.jetty.plus.jaas.JAASLoginService"> + <Set name="name">default</Set> + <Set name="loginModuleName">karaf</Set> + <Set name="roleClassNames"> + <Array type="java.lang.String"> + <Item>org.apache.karaf.jaas.boot.principal.RolePrincipal + </Item> + </Array> + </Set> + </New> + </Arg> + </Call> + +</Configure> diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/odl_cluster/templates/ml2_conf.sh b/deploy/adapters/ansible/openstack_newton_xenial/roles/odl_cluster/templates/ml2_conf.sh new file mode 100755 index 00000000..5e3627bf --- /dev/null +++ b/deploy/adapters/ansible/openstack_newton_xenial/roles/odl_cluster/templates/ml2_conf.sh @@ -0,0 +1,14 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +cat <<EOT>> /etc/neutron/plugins/ml2/ml2_conf.ini +[ml2_odl] +password = admin +username = admin +url = http://{{ internal_vip.ip }}:8181/controller/nb/v2/neutron +EOT diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/odl_cluster/templates/moon-environment b/deploy/adapters/ansible/openstack_newton_xenial/roles/odl_cluster/templates/moon-environment new file mode 100644 index 00000000..9a13da8e --- /dev/null +++ b/deploy/adapters/ansible/openstack_newton_xenial/roles/odl_cluster/templates/moon-environment @@ -0,0 +1,3 @@ +MOON_SERVER_ADDR={{ internal_vip.ip }} +MOON_SERVER_PORT=5000 +no_proxy="localhost,127.0.0.1" diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/odl_cluster/templates/settings.xml b/deploy/adapters/ansible/openstack_newton_xenial/roles/odl_cluster/templates/settings.xml new file mode 100644 index 00000000..5ba3b50c --- /dev/null +++ b/deploy/adapters/ansible/openstack_newton_xenial/roles/odl_cluster/templates/settings.xml @@ -0,0 +1,82 @@ +<?xml version="1.0" encoding="UTF-8"?> +<!-- vi: set et smarttab sw=2 tabstop=2: --> +<!-- + Copyright (c) 2014, 2015 Cisco Systems, Inc. and others. All rights reserved. + + This program and the accompanying materials are made available under the + terms of the Eclipse Public License v1.0 which accompanies this distribution, + and is available at http://www.eclipse.org/legal/epl-v10.html +--> +<settings xmlns="http://maven.apache.org/SETTINGS/1.0.0" + xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" + xsi:schemaLocation="http://maven.apache.org/SETTINGS/1.0.0 http://maven.apache.org/xsd/settings-1.0.0.xsd"> + <localRepository>{{ odl_home }}/system/ </localRepository> + <profiles> + <profile> + <id>opendaylight-release</id> + <repositories> + <repository> + <id>opendaylight-mirror</id> + <name>opendaylight-mirror</name> + <url>https://nexus.opendaylight.org/content/repositories/public/</url> + <releases> + <enabled>true</enabled> + <updatePolicy>never</updatePolicy> + </releases> + <snapshots> + <enabled>false</enabled> + </snapshots> + </repository> + </repositories> + <pluginRepositories> + <pluginRepository> + <id>opendaylight-mirror</id> + <name>opendaylight-mirror</name> + <url>https://nexus.opendaylight.org/content/repositories/public/</url> + <releases> + <enabled>true</enabled> + <updatePolicy>never</updatePolicy> + </releases> + <snapshots> + <enabled>false</enabled> + </snapshots> + </pluginRepository> + </pluginRepositories> + </profile> + + <profile> + <id>opendaylight-snapshots</id> + <repositories> + <repository> + <id>opendaylight-snapshot</id> + <name>opendaylight-snapshot</name> + <url>https://nexus.opendaylight.org/content/repositories/opendaylight.snapshot/</url> + <releases> + <enabled>false</enabled> + </releases> + <snapshots> + <enabled>true</enabled> + </snapshots> + </repository> + </repositories> + <pluginRepositories> + <pluginRepository> + <id>opendaylight-snapshot</id> + <name>opendaylight-snapshot</name> + <url>https://nexus.opendaylight.org/content/repositories/opendaylight.snapshot/</url> + <releases> + <enabled>false</enabled> + </releases> + <snapshots> + <enabled>true</enabled> + </snapshots> + </pluginRepository> + </pluginRepositories> + </profile> + </profiles> + + <activeProfiles> + <activeProfile>opendaylight-release</activeProfile> + <activeProfile>opendaylight-snapshots</activeProfile> + </activeProfiles> +</settings> diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/odl_cluster/templates/tomcat-server.xml b/deploy/adapters/ansible/openstack_newton_xenial/roles/odl_cluster/templates/tomcat-server.xml new file mode 100755 index 00000000..bc7ab13d --- /dev/null +++ b/deploy/adapters/ansible/openstack_newton_xenial/roles/odl_cluster/templates/tomcat-server.xml @@ -0,0 +1,61 @@ +<?xml version='1.0' encoding='utf-8'?> +<!-- + Licensed to the Apache Software Foundation (ASF) under one or more + contributor license agreements. See the NOTICE file distributed with + this work for additional information regarding copyright ownership. + The ASF licenses this file to You under the Apache License, Version 2.0 + (the "License"); you may not use this file except in compliance with + the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +--> +<Server> + <!--APR library loader. Documentation at /docs/apr.html --> + <Listener className="org.apache.catalina.core.AprLifecycleListener" SSLEngine="on" /> + <!--Initialize Jasper prior to webapps are loaded. Documentation at /docs/jasper-howto.html --> + <Listener className="org.apache.catalina.core.JasperListener" /> + <!-- Prevent memory leaks due to use of particular java/javax APIs--> + <Listener className="org.apache.catalina.core.JreMemoryLeakPreventionListener" /> + <Listener className="org.apache.catalina.mbeans.GlobalResourcesLifecycleListener" /> + <Listener className="org.apache.catalina.core.ThreadLocalLeakPreventionListener" /> + + <Service name="Catalina"> + <Connector port="{{ odl_api_port }}" protocol="HTTP/1.1" + connectionTimeout="20000" + redirectPort="8443" /> + +<!-- + Please remove the comments around the following Connector tag to enable HTTPS Authentication support. + Remember to add a valid keystore in the configuration folder. + More info : http://tomcat.apache.org/tomcat-7.0-doc/ssl-howto.html#Configuration +--> + + <!-- + <Connector port="8443" protocol="HTTP/1.1" SSLEnabled="true" + maxThreads="150" scheme="https" secure="true" + clientAuth="false" sslProtocol="TLS" + keystoreFile="configuration/keystore" + keystorePass="changeit"/> + --> + + <Engine name="Catalina" defaultHost="localhost"> + <Host name="localhost" appBase="" + unpackWARs="false" autoDeploy="false" + deployOnStartup="false" createDirs="false"> + <Realm className="org.opendaylight.controller.karafsecurity.ControllerCustomRealm" /> + <Valve className="org.apache.catalina.authenticator.SingleSignOn" /> + <Valve className="org.apache.catalina.valves.AccessLogValve" directory="logs" + prefix="web_access_log_" suffix=".txt" resolveHosts="false" + rotatable="true" fileDateFormat="yyyy-MM" + pattern="%{yyyy-MM-dd HH:mm:ss.SSS z}t - [%a] - %r"/> + </Host> + </Engine> + </Service> +</Server> + diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/odl_cluster/vars/Debian.yml b/deploy/adapters/ansible/openstack_newton_xenial/roles/odl_cluster/vars/Debian.yml new file mode 100755 index 00000000..5e2a2e72 --- /dev/null +++ b/deploy/adapters/ansible/openstack_newton_xenial/roles/odl_cluster/vars/Debian.yml @@ -0,0 +1,25 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- + +controller_packages: +# - openjdk-7-jdk + - crudini + +compute_packages: + - crudini + +service_ovs_name: openvswitch-switch +service_ovs_agent_name: neutron-openvswitch-agent + +service_file: + src: opendaylight.service + dst: /lib/systemd/system/opendaylight.service + +networking_odl_pkg_name: networking-odl-2.0.0.tar.gz diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/odl_cluster/vars/main.yml b/deploy/adapters/ansible/openstack_newton_xenial/roles/odl_cluster/vars/main.yml new file mode 100755 index 00000000..da0c9efd --- /dev/null +++ b/deploy/adapters/ansible/openstack_newton_xenial/roles/odl_cluster/vars/main.yml @@ -0,0 +1,29 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +odl_username: admin +odl_password: admin +odl_api_port: 8181 + +#odl_pkg_url: https://nexus.opendaylight.org/content/groups/public/org/opendaylight/integration/distribution-karaf/0.3.0-Lithium/distribution-karaf-0.3.0-Lithium.tar.gz +odl_pkg_url: karaf.tar.gz +odl_pkg_name: karaf.tar.gz +odl_home: "/opt/opendaylight-0.3.0/" +odl_base_features: ['config', 'standard', 'region', 'package', 'kar', 'ssh', 'management', 'odl-restconf','odl-l2switch-switch','odl-openflowplugin-all','odl-mdsal-apidocs','odl-dlux-all','odl-adsal-northbound','odl-nsf-all','odl-ovsdb-openstack','odl-ovsdb-northbound','odl-dlux-core'] +odl_extra_features: ['odl-restconf-all','odl-mdsal-clustering','odl-openflowplugin-flow-services','http','jolokia-osgi'] +odl_features: "{{ odl_base_features + odl_extra_features }}" + +jdk8_pkg_name: jdk-8u51-linux-x64.tar.gz + +controller_packages_noarch: [] +compute_packages_noarch: [] + +odl_pip: + - networking_odl + diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/onos_cluster/handlers/main.yml b/deploy/adapters/ansible/openstack_newton_xenial/roles/onos_cluster/handlers/main.yml new file mode 100755 index 00000000..e099fcf4 --- /dev/null +++ b/deploy/adapters/ansible/openstack_newton_xenial/roles/onos_cluster/handlers/main.yml @@ -0,0 +1,11 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +- name: restart onos service + service: name=onos state=restarted enabled=yes diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/onos_cluster/tasks/main.yml b/deploy/adapters/ansible/openstack_newton_xenial/roles/onos_cluster/tasks/main.yml new file mode 100755 index 00000000..c8ce1155 --- /dev/null +++ b/deploy/adapters/ansible/openstack_newton_xenial/roles/onos_cluster/tasks/main.yml @@ -0,0 +1,51 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +- include_vars: "{{ ansible_os_family }}.yml" + +- name: remove neutron-plugin-openvswitch-agent auto start + shell: > + update-rc.d neutron-plugin-openvswitch-agent remove; + sed -i /neutron-plugin-openvswitch-agent/d /opt/service + when: groups['onos']|length !=0 + ignore_errors: True + +- name: shut down and disable Neutron's agent services + service: name=neutron-plugin-openvswitch-agent state=stopped + when: groups['onos']|length !=0 + ignore_errors: True + +- name: remove neutron-l3-agent auto start + shell: > + update-rc.d neutron-l3-agent remove; + sed -i /neutron-l3-agent/d /opt/service + when: inventory_hostname in groups['onos'] + ignore_errors: True + +- name: shut down and disable Neutron's l3 agent services + service: name=neutron-l3-agent state=stopped + when: inventory_hostname in groups['onos'] + ignore_errors: True + +- name: Stop the Open vSwitch service and clear existing OVSDB + shell: > + ovs-vsctl del-br br-int ; + ovs-vsctl del-br br-tun ; + ovs-vsctl del-manager ; + ip link delete onos_port1 type veth peer name onos_port2; + when: groups['onos']|length !=0 + ignore_errors: True + +- name: Install ONOS Cluster on Controller + include: onos_controller.yml + when: inventory_hostname in groups['onos'] + +- name: Config ONOS Cluster + include: openvswitch.yml + when: groups['onos']|length !=0 diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/onos_cluster/tasks/onos_controller.yml b/deploy/adapters/ansible/openstack_newton_xenial/roles/onos_cluster/tasks/onos_controller.yml new file mode 100755 index 00000000..d51151a9 --- /dev/null +++ b/deploy/adapters/ansible/openstack_newton_xenial/roles/onos_cluster/tasks/onos_controller.yml @@ -0,0 +1,140 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- + +- name: get image http server + shell: awk -F'=' '/compass_server/ {print $2}' /etc/compass.conf + register: http_server + +- name: download onos driver packages + get_url: url="http://{{ http_server.stdout_lines[0] }}/packages/onos/{{ onos_driver }}" dest=/opt/ + +- name: upload onos sfc driver package + get_url: url="http://{{ http_server.stdout_lines[0] }}/packages/onos/{{ onos_sfc_driver }}" dest=/opt/ + +- name: unarchive onos driver package + command: su -s /bin/sh -c "tar xvf /opt/networking-onos.tar -C /opt/" + +- name: upload onos sfc driver package + command: su -s /bin/sh -c "tar xvf /opt/networking-sfc.tar -C /opt/" + +- name: install onos driver + command: su -s /bin/sh -c "/opt/networking-onos/install_driver.sh" + +- name: install onos sfc driver + command: su -s /bin/sh -c "/opt/networking-sfc/install_driver.sh" + +- name: install onos required packages + action: "{{ ansible_pkg_mgr }} name={{ item }} state=present" + with_items: packages + +- name: download oracle-jdk8 package file + get_url: url="http://{{ http_server.stdout_lines[0] }}/packages/onos/{{ jdk8_pkg_name }}" dest=/opt/{{ jdk8_pkg_name }} + +- name: download oracle-jdk8 script file + get_url: url="http://{{ http_server.stdout_lines[0] }}/packages/onos/{{ jdk8_script_name }}" dest=/opt/ + +- name: unarchive onos driver package + command: su -s /bin/sh -c "tar xvf /opt/install_jdk8.tar -C /opt/" + +- name: install install_jdk8 package + command: su -s /bin/sh -c "/opt/install_jdk8/install_jdk8.sh" + +- name: create JAVA_HOME environment variable + shell: > + export J2SDKDIR=/usr/lib/jvm/java-8-oracle; + export J2REDIR=/usr/lib/jvm/java-8-oracle/jre; + export PATH=$PATH:/usr/lib/jvm/java-8-oracle/bin:/usr/lib/jvm/java-8-oracle/db/bin:/usr/lib/jvm/java-8-oracle/jre/bin; + export JAVA_HOME=/usr/lib/jvm/java-8-oracle; + export DERBY_HOME=/usr/lib/jvm/java-8-oracle/db; + +- name: create onos group + group: name=onos system=yes state=present + +- name: create onos user + user: + name: onos + group: onos + home: "{{ onos_home }}" + createhome: "yes" + system: "yes" + shell: "/bin/false" + +- name: download onos package + get_url: url="http://{{ http_server.stdout_lines[0] }}/packages/onos/{{ onos_pkg_name }}" dest=/opt/{{ onos_pkg_name }} + +- name: create new jar repository + command: su -s /bin/sh -c "mkdir ~/.m2" + ignore_errors: True + +- name: download jar repository + get_url: url="http://{{ http_server.stdout_lines[0] }}/packages/onos/{{ repository }}" dest=~/.m2/ + +- name: extract jar repository + command: su -s /bin/sh -c "tar xvf ~/.m2/repository.tar -C ~/.m2/" + +- name: extract onos package + command: su -s /bin/sh -c "tar xzf /opt/{{ onos_pkg_name }} -C {{ onos_home }} --strip-components 1 --no-overwrite-dir -k --skip-old-files" onos + +- name: configure onos service + shell: > + echo 'export ONOS_OPTS=debug' > {{ onos_home }}/options; + echo 'export ONOS_USER=root' >> {{ onos_home }}/options; + mkdir {{ onos_home }}/var; + mkdir {{ onos_home }}/config; + sed -i '/pre-stop/i\env JAVA_HOME=/usr/lib/jvm/java-8-oracle' {{ onos_home }}/init/onos.conf; + cp -rf {{ onos_home }}/init/onos.conf /etc/init/; + cp -rf {{ onos_home }}/init/onos.conf /etc/init.d/; + +- name: configure onos boot feature + shell: > + sed -i '/^featuresBoot=/c\featuresBoot={{ onos_boot_features }}' {{ onos_home }}/{{ karaf_dist }}/etc/org.apache.karaf.features.cfg; + +- name: wait for config time + shell: "sleep 10" + +- name: start onos service + service: name=onos state=started enabled=yes + +- name: wait for onos start time + shell: "sleep 200" + +- name: add onos auto start + shell: > + echo "onos">>/opt/service + +########################################################################################################## +################################ ONOS connect with OpenStack ################################ +########################################################################################################## +- name: Configure Neutron1 + shell: > + crudini --set /etc/neutron/neutron.conf DEFAULT service_plugins networking_sfc.services.sfc.plugin.SfcPlugin, networking_sfc.services.flowclassifier.plugin.FlowClassifierPlugin, onos_router; + crudini --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2 mechanism_drivers onos_ml2; + crudini --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2 tenant_network_types vxlan; + crudini --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2 type_drivers vxlan + +- name: Create ML2 Configuration File + template: + src: ml2_conf.sh + dest: "/opt/ml2_conf.sh" + mode: 0777 + +- name: Configure Neutron2 + command: su -s /bin/sh -c "/opt/ml2_conf.sh;" + +- name: Configure Neutron3 + shell: > + mysql -e "drop database if exists neutron_ml2;"; + mysql -e "create database neutron_ml2 character set utf8;"; + mysql -e "grant all on neutron_ml2.* to 'neutron'@'%';"; + su -s /bin/sh -c "neutron-db-manage --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/ml2/ml2_conf.ini upgrade head" neutron; + su -s /bin/sh -c "neutron-db-manage --subproject networking-sfc upgrade head" neutron; + +- name: Restart neutron-server + service: name=neutron-server state=restarted diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/onos_cluster/tasks/openvswitch.yml b/deploy/adapters/ansible/openstack_newton_xenial/roles/onos_cluster/tasks/openvswitch.yml new file mode 100755 index 00000000..aac787ea --- /dev/null +++ b/deploy/adapters/ansible/openstack_newton_xenial/roles/onos_cluster/tasks/openvswitch.yml @@ -0,0 +1,57 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- + +- name: set veth port + shell: > + ip link add onos_port1 type veth peer name onos_port2; + ifconfig onos_port1 up; + ifconfig onos_port2 up; + ignore_errors: True + +- name: add openflow-base feature + command: su -s /bin/sh -c "/opt/onos/bin/onos 'feature:install onos-openflow-base'"; + when: inventory_hostname in groups['onos'] + +- name: add openflow feature + command: su -s /bin/sh -c "/opt/onos/bin/onos 'feature:install onos-openflow'"; + when: inventory_hostname in groups['onos'] + +- name: add ovsdatabase feature + command: su -s /bin/sh -c "/opt/onos/bin/onos 'feature:install onos-ovsdatabase'"; + when: inventory_hostname in groups['onos'] + +- name: add ovsdb-base feature + command: su -s /bin/sh -c "/opt/onos/bin/onos 'feature:install onos-ovsdb-base'"; + when: inventory_hostname in groups['onos'] + +- name: add onos driver ovsdb feature + command: su -s /bin/sh -c "/opt/onos/bin/onos 'feature:install onos-drivers-ovsdb'"; + when: inventory_hostname in groups['onos'] + +- name: add ovsdb provider host feature + command: su -s /bin/sh -c "/opt/onos/bin/onos 'feature:install onos-ovsdb-provider-host'"; + when: inventory_hostname in groups['onos'] + +- name: add vtn feature + command: su -s /bin/sh -c "/opt/onos/bin/onos 'feature:install onos-app-vtn-onosfw'"; + when: inventory_hostname in groups['onos'] + +- name: set public eth card start + command: su -s /bin/sh -c "/opt/onos/bin/onos 'externalportname-set -n onos_port2'" + when: inventory_hostname in groups['onos'] + +- name: Set ONOS as the manager + command: su -s /bin/sh -c "ovs-vsctl set-manager tcp:{{ ip_settings[groups['onos'][0]]['mgmt']['ip'] }}:6640;" + +- name: delete default gateway + shell: > + route delete default; + when: inventory_hostname not in groups['onos'] + ignore_errors: True diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/onos_cluster/templates/ml2_conf.sh b/deploy/adapters/ansible/openstack_newton_xenial/roles/onos_cluster/templates/ml2_conf.sh new file mode 100755 index 00000000..8af03df4 --- /dev/null +++ b/deploy/adapters/ansible/openstack_newton_xenial/roles/onos_cluster/templates/ml2_conf.sh @@ -0,0 +1,15 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +cat <<EOT>> /etc/neutron/plugins/ml2/ml2_conf.ini +[onos] +password = admin +username = admin +url_path = http://{{ ip_settings[groups['onos'][0]]['mgmt']['ip'] }}:8181/onos/vtn +EOT + diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/onos_cluster/vars/Debian.yml b/deploy/adapters/ansible/openstack_newton_xenial/roles/onos_cluster/vars/Debian.yml new file mode 100755 index 00000000..59a4dbd9 --- /dev/null +++ b/deploy/adapters/ansible/openstack_newton_xenial/roles/onos_cluster/vars/Debian.yml @@ -0,0 +1,14 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +packages: + - software-properties-common + - crudini + +services: [] diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/onos_cluster/vars/RedHat.yml b/deploy/adapters/ansible/openstack_newton_xenial/roles/onos_cluster/vars/RedHat.yml new file mode 100755 index 00000000..59a4dbd9 --- /dev/null +++ b/deploy/adapters/ansible/openstack_newton_xenial/roles/onos_cluster/vars/RedHat.yml @@ -0,0 +1,14 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +packages: + - software-properties-common + - crudini + +services: [] diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/onos_cluster/vars/main.yml b/deploy/adapters/ansible/openstack_newton_xenial/roles/onos_cluster/vars/main.yml new file mode 100755 index 00000000..f11f1102 --- /dev/null +++ b/deploy/adapters/ansible/openstack_newton_xenial/roles/onos_cluster/vars/main.yml @@ -0,0 +1,19 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +onos_pkg_name: onos-1.6.0.tar.gz +onos_home: /opt/onos/ +karaf_dist: apache-karaf-3.0.5 +jdk8_pkg_name: jdk-8u51-linux-x64.tar.gz +jdk8_script_name: install_jdk8.tar +onos_driver: networking-onos.tar +onos_sfc_driver: networking-sfc.tar +repository: repository.tar +onos_boot_features: config,standard,region,package,kar,ssh,management,webconsole,onos-api,onos-core,onos-incubator,onos-cli,onos-rest,onos-gui,onos-openflow-base, onos-openflow, onos-ovsdatabase, onos-ovsdb-base, onos-drivers-ovsdb, onos-ovsdb-provider-host, onos-app-vtn-onosfw + + diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/open-contrail/tasks/uninstall-openvswitch.yml b/deploy/adapters/ansible/openstack_newton_xenial/roles/open-contrail/tasks/uninstall-openvswitch.yml new file mode 100755 index 00000000..836cb78b --- /dev/null +++ b/deploy/adapters/ansible/openstack_newton_xenial/roles/open-contrail/tasks/uninstall-openvswitch.yml @@ -0,0 +1,46 @@ +--- +- name: del ovs bridge + shell: ovs-vsctl del-br br-int; ovs-vsctl del-br br-tun; ovs-vsctl del-br br-prv; + +- name: remove ovs and ovs-plugin daeman + shell: > + sed -i '/neutron-openvswitch-agent/d' /opt/service ; + sed -i '/openvswitch-switch/d' /opt/service ; + +- name: stop ovs and ovs-plugin + shell: service openvswitch-switch stop; service neutron-openvswitch-agent stop; + +- name: remove ovs and ovs-plugin files + shell: > + update-rc.d -f neutron-openvswitch-agent remove; + mv /etc/init.d/neutron-openvswitch-agent /home/neutron-openvswitch-agent; + mv /etc/init/neutron-openvswitch-agent.conf /home/neutron-openvswitch-agent.conf; + update-rc.d -f openvswitch-switch remove ; + mv /etc/init.d/openvswitch-switch /home/openvswitch-switch ; + mv /etc/init/openvswitch-switch.conf /home/openvswitch-switch.conf ; + update-rc.d -f neutron-ovs-cleanup remove ; + mv /etc/init.d/neutron-ovs-cleanup /home/neutron-ovs-cleanup ; + mv /etc/init/neutron-ovs-cleanup.conf /home/neutron-ovs-cleanup.conf ; + +- name: remove ovs kernel module + shell: rmmod vport_vxlan; rmmod openvswitch; + ignore_errors: True + +- name: copy recovery script + copy: src={{ item }} dest=/opt/setup_networks + with_items: +# - recover_network_opencontrail.py + - setup_networks_opencontrail.py + +#- name: recover external script +# shell: python /opt/setup_networks/recover_network_opencontrail.py + +- name: modify net-init + shell: sed -i 's/setup_networks.py/setup_networks_opencontrail.py/g' /etc/init.d/net_init + +- name: resolve dual NIC problem + shell: > + echo "net.ipv4.conf.all.arp_ignore=1" >> /etc/sysctl.conf ; + /sbin/sysctl -p ; + echo 1 > /proc/sys/net/ipv4/conf/all/arp_ignore ; + diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/secgroup/templates/neutron.j2 b/deploy/adapters/ansible/openstack_newton_xenial/roles/secgroup/templates/neutron.j2 new file mode 100644 index 00000000..e7107660 --- /dev/null +++ b/deploy/adapters/ansible/openstack_newton_xenial/roles/secgroup/templates/neutron.j2 @@ -0,0 +1,7 @@ +[securitygroup] +firewall_driver = neutron.agent.firewall.NoopFirewallDriver +enable_security_group = True + +[agent] +prevent_arp_spoofing = False + diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/secgroup/templates/nova.j2 b/deploy/adapters/ansible/openstack_newton_xenial/roles/secgroup/templates/nova.j2 new file mode 100644 index 00000000..7dbc216a --- /dev/null +++ b/deploy/adapters/ansible/openstack_newton_xenial/roles/secgroup/templates/nova.j2 @@ -0,0 +1,3 @@ +[DEFAULT] +firewall_driver = nova.virt.firewall.NoopFirewallDriver +security_group_api = neutron diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/secgroup/vars/Debian.yml b/deploy/adapters/ansible/openstack_newton_xenial/roles/secgroup/vars/Debian.yml new file mode 100644 index 00000000..221a3d92 --- /dev/null +++ b/deploy/adapters/ansible/openstack_newton_xenial/roles/secgroup/vars/Debian.yml @@ -0,0 +1,35 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +configs_templates: + - src: nova.j2 + dest: + - /etc/nova/nova.conf + - src: neutron.j2 + dest: + - /etc/neutron/plugins/ml2/ml2_conf.ini + - /etc/neutron/plugins/openvswitch/ovs_neutron_plugin.ini + - /etc/neutron/plugins/ml2/restproxy.ini + +controller_services: + - nova-api + - nova-cert + - nova-conductor + - nova-consoleauth + - nova-novncproxy + - nova-scheduler + - neutron-server + - neutron-openvswitch-agent + - neutron-l3-agent + - neutron-dhcp-agent + - neutron-metadata-agent + +compute_services: + - nova-compute + - neutron-openvswitch-agent diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/setup-network/files/setup_networks/net_init b/deploy/adapters/ansible/openstack_newton_xenial/roles/setup-network/files/setup_networks/net_init new file mode 100755 index 00000000..41ccb988 --- /dev/null +++ b/deploy/adapters/ansible/openstack_newton_xenial/roles/setup-network/files/setup_networks/net_init @@ -0,0 +1,24 @@ +#! /bin/sh +### BEGIN INIT INFO +# Provides: anamon.init +# Required-Start: $network +# Required-Stop: +# Should-Start: +# Should-Stop: +# Default-Start: 2 3 4 5 +# Default-Stop: 0 1 6 +# Short-Description: Starts the cobbler anamon boot notification program +# Description: anamon runs the first time a machine is booted after installation. +### END INIT INFO + + + +# +# anamon.init: Starts the cobbler post-install boot notification program +# +# chkconfig: 35 0 6 +# +# description: anamon runs the first time a machine is booted after +# installation. +# +python /opt/setup_networks/setup_networks.py diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/storage/files/storage b/deploy/adapters/ansible/openstack_newton_xenial/roles/storage/files/storage new file mode 100755 index 00000000..3acc6115 --- /dev/null +++ b/deploy/adapters/ansible/openstack_newton_xenial/roles/storage/files/storage @@ -0,0 +1,10 @@ +#! /bin/bash +### BEGIN INIT INFO +# Provides: Storage +# Required-Start: $remote_fs $network +# Required-Stop: $remote_fs $network +# Default-Start: 2 3 4 5 +# Default-Stop: 0 1 6 +# Description: Storage +### END INIT INFO +loop_dev=`sh /opt/setup_storage/losetup.sh` diff --git a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/swift/files/swift-lib.tar.gz b/deploy/adapters/ansible/openstack_newton_xenial/roles/swift/files/swift-lib.tar.gz Binary files differindex fdbb38fd..fdbb38fd 100644 --- a/deploy/adapters/ansible/openstack_mitaka_xenial/roles/swift/files/swift-lib.tar.gz +++ b/deploy/adapters/ansible/openstack_newton_xenial/roles/swift/files/swift-lib.tar.gz diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/swift/tasks/main.yml b/deploy/adapters/ansible/openstack_newton_xenial/roles/swift/tasks/main.yml new file mode 100644 index 00000000..0f083146 --- /dev/null +++ b/deploy/adapters/ansible/openstack_newton_xenial/roles/swift/tasks/main.yml @@ -0,0 +1,11 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +- include: swift.yml + when: moon == "Enable" diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/swift/tasks/swift-compute1.yml b/deploy/adapters/ansible/openstack_newton_xenial/roles/swift/tasks/swift-compute1.yml new file mode 100644 index 00000000..be00484b --- /dev/null +++ b/deploy/adapters/ansible/openstack_newton_xenial/roles/swift/tasks/swift-compute1.yml @@ -0,0 +1,80 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- + +- name: disable auto start + copy: + content: "#!/bin/sh\nexit 101" + dest: "/usr/sbin/policy-rc.d" + mode: 0755 + when: ansible_os_family == "Debian" + +- name: install swift-compute packages + action: "{{ ansible_pkg_mgr }} name={{ item }} state=present" + with_items: compute_packages | union(compute_packages_noarch) + +- name: enable auto start + file: + path=/usr/sbin/policy-rc.d + state=absent + when: ansible_os_family == "Debian" + +- name: format devices + shell: > + dd if=/dev/zero of=/var/swift1 bs=1G count=10; + dd if=/dev/zero of=/var/swift2 bs=1G count=10; + mkfs.xfs /var/swift1; + mkfs.xfs /var/swift2; + +- name: create mount point dirertory + shell: > + mkdir -p /srv/node/swift1; + mkdir -p /srv/node/swift2; + +- name: edit /etc/fstab + shell: > + echo "/var/swift1 /srv/node/swift1/ xfs noatime,nodiratime,nobarrier,logbufs=8 0 2" >> /etc/fstab; + echo "/var/swift2 /srv/node/swift2/ xfs noatime,nodiratime,nobarrier,logbufs=8 0 2" >> /etc/fstab; + mount /srv/node/swift1; + mount /srv/node/swift2; + +- name: edit /etc/default/rsync + shell: sed -i 's/RSYNC_ENABLE=false/RSYNC_ENABLE=true/g' /etc/default/rsync + +- name: restart rsync service + service: name=rsync state=restarted enabled=yes + +- name: copy scripts + template: src={{ item }} dest=/etc/swift/ backup=yes + with_items: + - account-server.conf + - container-server.conf + - object-server.conf + +- name: change directory + shell: > + chown -R swift:swift /srv/node; + mkdir -p /var/cache/swift; + chown -R root:swift /var/cache/swift; + chmod -R 775 /var/cache/swift; + +#- name: copy swift lib +# copy: src=swift-lib.tar.gz dest=/tmp/swift-lib.tar.gz +# +#- name: upload swift lib +# unarchive: src=swift-lib.tar.gz dest=/tmp/ +# +#- name: copy swift lib +# shell: command: su -s /bin/sh -c "cp /tmp/swift-lib/* /usr/lib/" +# +#- name: untar swift lib +# shell: > +# tar zxf /tmp/swift-lib.tar.gz; +# cp /tmp/swift-lib/* /usr/lib/; + diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/swift/tasks/swift-controller1.yml b/deploy/adapters/ansible/openstack_newton_xenial/roles/swift/tasks/swift-controller1.yml new file mode 100644 index 00000000..36d05040 --- /dev/null +++ b/deploy/adapters/ansible/openstack_newton_xenial/roles/swift/tasks/swift-controller1.yml @@ -0,0 +1,34 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- + +- name: disable auto start + copy: + content: "#!/bin/sh\nexit 101" + dest: "/usr/sbin/policy-rc.d" + mode: 0755 + when: ansible_os_family == "Debian" + +- name: install swift-controllor packages + action: "{{ ansible_pkg_mgr }} name={{ item }} state=present" + with_items: controller_packages | union(controller_packages_noarch) + +- name: enable auto start + file: + path=/usr/sbin/policy-rc.d + state=absent + when: ansible_os_family == "Debian" + +- name: make swift directory + file: path=/etc/swift state=directory mode=0755 + +- name: update proxy-server conf + template: src=proxy-server.conf dest=/etc/swift/proxy-server.conf backup=yes + + diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/swift/tasks/swift-controller2.yml b/deploy/adapters/ansible/openstack_newton_xenial/roles/swift/tasks/swift-controller2.yml new file mode 100644 index 00000000..92d4ab22 --- /dev/null +++ b/deploy/adapters/ansible/openstack_newton_xenial/roles/swift/tasks/swift-controller2.yml @@ -0,0 +1,93 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- + +- name: create account.builder file + shell: > + cd /etc/swift ; + swift-ring-builder account.builder create 10 3 1; + +- name: add each storage node to the ring + shell: > + cd /etc/swift; + swift-ring-builder account.builder add --region 1 --zone 1 --ip {{ ip_settings[item.1]['mgmt']['ip'] }} --port 6002 --device swift1 --weight 100 ; + swift-ring-builder account.builder add --region 1 --zone 1 --ip {{ ip_settings[item.1]['mgmt']['ip'] }} --port 6002 --device swift2 --weight 100 ; + with_indexed_items: groups['compute'] + +- name: verify the ring contents 1 + shell: > + cd /etc/swift; + swift-ring-builder account.builder; + +- name: rebalance the ring + shell: > + cd /etc/swift; + swift-ring-builder account.builder rebalance; + + +##################### +- name: create contrainer builder file + shell: > + cd /etc/swift; + swift-ring-builder container.builder create 10 3 1; + +- name: add each storage node to the ring + shell: > + cd /etc/swift; + swift-ring-builder container.builder add --region 1 --zone 1 --ip {{ ip_settings[item.1]['mgmt']['ip'] }} --port 6001 --device swift1 --weight 100; + swift-ring-builder container.builder add --region 1 --zone 1 --ip {{ ip_settings[item.1]['mgmt']['ip'] }} --port 6001 --device swift2 --weight 100; + with_indexed_items: groups['compute'] + +- name: verify the ring contents 2 + shell: > + cd /etc/swift; + swift-ring-builder container.builder; + +- name: rebalance the ring + shell: > + cd /etc/swift; + swift-ring-builder container.builder rebalance; + +############################# + +- name: create object builder file + shell: > + cd /etc/swift; + swift-ring-builder object.builder create 10 3 1; + +- name: add each storage node to the ring + shell: > + cd /etc/swift; + swift-ring-builder object.builder add --region 1 --zone 1 --ip {{ ip_settings[item.1]['mgmt']['ip'] }} --port 6000 --device swift1 --weight 100; + swift-ring-builder object.builder add --region 1 --zone 1 --ip {{ ip_settings[item.1]['mgmt']['ip'] }} --port 6000 --device swift2 --weight 100; + with_indexed_items: groups['compute'] + +- name: verify the ring contents + shell: > + cd /etc/swift; + swift-ring-builder object.builder; + +- name: rebalance the ring + shell: > + cd /etc/swift; + swift-ring-builder object.builder rebalance; + +########################## + +- name: distribute ring configuration files to the other controller + shell: > + cd /etc/swift; + scp account.ring.gz container.ring.gz object.ring.gz root@{{ ip_settings[item.1]['mgmt']['ip'] }}:/etc/swift/; + with_indexed_items: groups['controller'] + +- name: distribute ring configuration files to the all compute + shell: > + cd /etc/swift; + scp account.ring.gz container.ring.gz object.ring.gz root@{{ ip_settings[item.1]['mgmt']['ip'] }}:/etc/swift/; + with_indexed_items: groups['compute'] diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/swift/tasks/swift.yml b/deploy/adapters/ansible/openstack_newton_xenial/roles/swift/tasks/swift.yml new file mode 100644 index 00000000..4e2651a7 --- /dev/null +++ b/deploy/adapters/ansible/openstack_newton_xenial/roles/swift/tasks/swift.yml @@ -0,0 +1,79 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +- include_vars: "{{ ansible_os_family }}.yml" + +- include: swift-controller1.yml + when: inventory_hostname in groups['controller'] + +- include: swift-compute1.yml + when: inventory_hostname in groups['compute'] + +- include: swift-controller2.yml + when: inventory_hostname == haproxy_hosts.keys()[0] + +- name: copy swift.conf + template: src=swift.conf dest=/etc/swift/swift.conf backup=yes + +- name: chown /etc/swift + shell: chown -R root:swift /etc/swift + +- name: restart tasks on controller + service: name={{ item }} state=restarted enabled=yes + with_items: + - memcached + - swift-proxy + when: inventory_hostname in groups['controller'] + +- name: restart tasks on compute + shell: swift-init all start + when: inventory_hostname in groups['compute'] + ignore_errors: True + +- name: restart tasks on controller + service: name={{ item }} state=restarted enabled=yes + with_items: + - rsync + when: inventory_hostname in groups['compute'] + +- name: upload swift lib + unarchive: src=swift-lib.tar.gz dest=/tmp/ + +- name: copy swift lib + command: su -s /bin/sh -c "cp /tmp/swift-lib/* /usr/lib/" + +- name: wait 30 seconds + shell: sleep 30 + +- name: create swift task script + shell: echo {{ item }} >> /opt/swift-service + with_items: + - swift-account + - swift-account-replicator + - swift-container-replicator + - swift-object + - swift-object-updater + - swift-account-auditor + - swift-container + - swift-container-sync + - swift-object-auditor + - swift-account-reaper + - swift-container-auditor + - swift-container-updater + - swift-object-replicator + when: inventory_hostname in groups['compute'] + ignore_errors: True + +- name: restart swift task + shell: > + for i in `cat /opt/swift-service`; do service $i start; done; + sleep 10; + for i in `cat /opt/swift-service`; do service $i restart; done; + when: inventory_hostname in groups['compute'] + ignore_errors: True diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/swift/templates/account-server.conf b/deploy/adapters/ansible/openstack_newton_xenial/roles/swift/templates/account-server.conf new file mode 100644 index 00000000..ea84799f --- /dev/null +++ b/deploy/adapters/ansible/openstack_newton_xenial/roles/swift/templates/account-server.conf @@ -0,0 +1,200 @@ +[DEFAULT] +bind_ip = {{ internal_ip }} +bind_port = 6002 +# bind_timeout = 30 +# backlog = 4096 +user = swift +swift_dir = /etc/swift +devices = /srv/node +mount_check = true +# disable_fallocate = false +# +# Use an integer to override the number of pre-forked processes that will +# accept connections. +# workers = auto +# +# Maximum concurrent requests per worker +# max_clients = 1024 +# +# You can specify default log routing here if you want: +# log_name = swift +# log_facility = LOG_LOCAL0 +# log_level = INFO +# log_address = /dev/log +# The following caps the length of log lines to the value given; no limit if +# set to 0, the default. +# log_max_line_length = 0 +# +# comma separated list of functions to call to setup custom log handlers. +# functions get passed: conf, name, log_to_console, log_route, fmt, logger, +# adapted_logger +# log_custom_handlers = +# +# If set, log_udp_host will override log_address +# log_udp_host = +# log_udp_port = 514 +# +# You can enable StatsD logging here: +# log_statsd_host = +# log_statsd_port = 8125 +# log_statsd_default_sample_rate = 1.0 +# log_statsd_sample_rate_factor = 1.0 +# log_statsd_metric_prefix = +# +# If you don't mind the extra disk space usage in overhead, you can turn this +# on to preallocate disk space with SQLite databases to decrease fragmentation. +# db_preallocation = off +# +# eventlet_debug = false +# +# You can set fallocate_reserve to the number of bytes you'd like fallocate to +# reserve, whether there is space for the given file size or not. +# fallocate_reserve = 0 + +[pipeline:main] +pipeline = healthcheck recon account-server + +[app:account-server] +use = egg:swift#account +# You can override the default log routing for this app here: +# set log_name = account-server +# set log_facility = LOG_LOCAL0 +# set log_level = INFO +# set log_requests = true +# set log_address = /dev/log +# +# auto_create_account_prefix = . +# +# Configure parameter for creating specific server +# To handle all verbs, including replication verbs, do not specify +# "replication_server" (this is the default). To only handle replication, +# set to a True value (e.g. "True" or "1"). To handle only non-replication +# verbs, set to "False". Unless you have a separate replication network, you +# should not specify any value for "replication_server". Default is empty. +# replication_server = false + +[filter:healthcheck] +use = egg:swift#healthcheck +# An optional filesystem path, which if present, will cause the healthcheck +# URL to return "503 Service Unavailable" with a body of "DISABLED BY FILE" +# disable_path = + +[filter:recon] +use = egg:swift#recon +recon_cache_path = /var/cache/swift + +[account-replicator] +# You can override the default log routing for this app here (don't use set!): +# log_name = account-replicator +# log_facility = LOG_LOCAL0 +# log_level = INFO +# log_address = /dev/log +# +# Maximum number of database rows that will be sync'd in a single HTTP +# replication request. Databases with less than or equal to this number of +# differing rows will always be sync'd using an HTTP replication request rather +# than using rsync. +# per_diff = 1000 +# +# Maximum number of HTTP replication requests attempted on each replication +# pass for any one container. This caps how long the replicator will spend +# trying to sync a given database per pass so the other databases don't get +# starved. +# max_diffs = 100 +# +# Number of replication workers to spawn. +# concurrency = 8 +# +# Time in seconds to wait between replication passes +# interval = 30 +# run_pause is deprecated, use interval instead +# run_pause = 30 +# +# node_timeout = 10 +# conn_timeout = 0.5 +# +# The replicator also performs reclamation +# reclaim_age = 604800 +# +# Allow rsync to compress data which is transmitted to destination node +# during sync. However, this is applicable only when destination node is in +# a different region than the local one. +# rsync_compress = no +# +# Format of the rysnc module where the replicator will send data. See +# etc/rsyncd.conf-sample for some usage examples. +# rsync_module = {replication_ip}::account +# +# recon_cache_path = /var/cache/swift + +[account-auditor] +# You can override the default log routing for this app here (don't use set!): +# log_name = account-auditor +# log_facility = LOG_LOCAL0 +# log_level = INFO +# log_address = /dev/log +# +# Will audit each account at most once per interval +# interval = 1800 +# +# accounts_per_second = 200 +# recon_cache_path = /var/cache/swift + +[account-reaper] +# You can override the default log routing for this app here (don't use set!): +# log_name = account-reaper +# log_facility = LOG_LOCAL0 +# log_level = INFO +# log_address = /dev/log +# +# concurrency = 25 +# interval = 3600 +# node_timeout = 10 +# conn_timeout = 0.5 +# +# Normally, the reaper begins deleting account information for deleted accounts +# immediately; you can set this to delay its work however. The value is in +# seconds; 2592000 = 30 days for example. +# delay_reaping = 0 +# +# If the account fails to be be reaped due to a persistent error, the +# account reaper will log a message such as: +# Account <name> has not been reaped since <date> +# You can search logs for this message if space is not being reclaimed +# after you delete account(s). +# Default is 2592000 seconds (30 days). This is in addition to any time +# requested by delay_reaping. +# reap_warn_after = 2592000 + +# Note: Put it at the beginning of the pipeline to profile all middleware. But +# it is safer to put this after healthcheck. +[filter:xprofile] +use = egg:swift#xprofile +# This option enable you to switch profilers which should inherit from python +# standard profiler. Currently the supported value can be 'cProfile', +# 'eventlet.green.profile' etc. +# profile_module = eventlet.green.profile +# +# This prefix will be used to combine process ID and timestamp to name the +# profile data file. Make sure the executing user has permission to write +# into this path (missing path segments will be created, if necessary). +# If you enable profiling in more than one type of daemon, you must override +# it with an unique value like: /var/log/swift/profile/account.profile +# log_filename_prefix = /tmp/log/swift/profile/default.profile +# +# the profile data will be dumped to local disk based on above naming rule +# in this interval. +# dump_interval = 5.0 +# +# Be careful, this option will enable profiler to dump data into the file with +# time stamp which means there will be lots of files piled up in the directory. +# dump_timestamp = false +# +# This is the path of the URL to access the mini web UI. +# path = /__profile__ +# +# Clear the data when the wsgi server shutdown. +# flush_at_shutdown = false +# +# unwind the iterator of applications +# unwind = false diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/swift/templates/container-server.conf b/deploy/adapters/ansible/openstack_newton_xenial/roles/swift/templates/container-server.conf new file mode 100644 index 00000000..88cd2ebb --- /dev/null +++ b/deploy/adapters/ansible/openstack_newton_xenial/roles/swift/templates/container-server.conf @@ -0,0 +1,229 @@ +[DEFAULT] +bind_ip = {{ internal_ip }} +bind_port = 6001 +# bind_timeout = 30 +# backlog = 4096 +user = swift +swift_dir = /etc/swift +devices = /srv/node +mount_check = true +# disable_fallocate = false +# +# Use an integer to override the number of pre-forked processes that will +# accept connections. +# workers = auto +# +# Maximum concurrent requests per worker +# max_clients = 1024 +# +# This is a comma separated list of hosts allowed in the X-Container-Sync-To +# field for containers. This is the old-style of using container sync. It is +# strongly recommended to use the new style of a separate +# container-sync-realms.conf -- see container-sync-realms.conf-sample +# allowed_sync_hosts = 127.0.0.1 +# +# You can specify default log routing here if you want: +# log_name = swift +# log_facility = LOG_LOCAL0 +# log_level = INFO +# log_address = /dev/log +# The following caps the length of log lines to the value given; no limit if +# set to 0, the default. +# log_max_line_length = 0 +# +# comma separated list of functions to call to setup custom log handlers. +# functions get passed: conf, name, log_to_console, log_route, fmt, logger, +# adapted_logger +# log_custom_handlers = +# +# If set, log_udp_host will override log_address +# log_udp_host = +# log_udp_port = 514 +# +# You can enable StatsD logging here: +# log_statsd_host = +# log_statsd_port = 8125 +# log_statsd_default_sample_rate = 1.0 +# log_statsd_sample_rate_factor = 1.0 +# log_statsd_metric_prefix = +# +# If you don't mind the extra disk space usage in overhead, you can turn this +# on to preallocate disk space with SQLite databases to decrease fragmentation. +# db_preallocation = off +# +# eventlet_debug = false +# +# You can set fallocate_reserve to the number of bytes you'd like fallocate to +# reserve, whether there is space for the given file size or not. +# fallocate_reserve = 0 + +[pipeline:main] +pipeline = healthcheck recon container-server + +[app:container-server] +use = egg:swift#container +# You can override the default log routing for this app here: +# set log_name = container-server +# set log_facility = LOG_LOCAL0 +# set log_level = INFO +# set log_requests = true +# set log_address = /dev/log +# +# node_timeout = 3 +# conn_timeout = 0.5 +# allow_versions = false +# auto_create_account_prefix = . +# +# Configure parameter for creating specific server +# To handle all verbs, including replication verbs, do not specify +# "replication_server" (this is the default). To only handle replication, +# set to a True value (e.g. "True" or "1"). To handle only non-replication +# verbs, set to "False". Unless you have a separate replication network, you +# should not specify any value for "replication_server". +# replication_server = false + +[filter:healthcheck] +use = egg:swift#healthcheck +# An optional filesystem path, which if present, will cause the healthcheck +# URL to return "503 Service Unavailable" with a body of "DISABLED BY FILE" +# disable_path = + +[filter:recon] +use = egg:swift#recon +recon_cache_path = /var/cache/swift + +[container-replicator] +# You can override the default log routing for this app here (don't use set!): +# log_name = container-replicator +# log_facility = LOG_LOCAL0 +# log_level = INFO +# log_address = /dev/log +# +# Maximum number of database rows that will be sync'd in a single HTTP +# replication request. Databases with less than or equal to this number of +# differing rows will always be sync'd using an HTTP replication request rather +# than using rsync. +# per_diff = 1000 +# +# Maximum number of HTTP replication requests attempted on each replication +# pass for any one container. This caps how long the replicator will spend +# trying to sync a given database per pass so the other databases don't get +# starved. +# max_diffs = 100 +# +# Number of replication workers to spawn. +# concurrency = 8 +# +# Time in seconds to wait between replication passes +# interval = 30 +# run_pause is deprecated, use interval instead +# run_pause = 30 +# +# node_timeout = 10 +# conn_timeout = 0.5 +# +# The replicator also performs reclamation +# reclaim_age = 604800 +# +# Allow rsync to compress data which is transmitted to destination node +# during sync. However, this is applicable only when destination node is in +# a different region than the local one. +# rsync_compress = no +# +# Format of the rysnc module where the replicator will send data. See +# etc/rsyncd.conf-sample for some usage examples. +# rsync_module = {replication_ip}::container +# +# recon_cache_path = /var/cache/swift + +[container-updater] +# You can override the default log routing for this app here (don't use set!): +# log_name = container-updater +# log_facility = LOG_LOCAL0 +# log_level = INFO +# log_address = /dev/log +# +# interval = 300 +# concurrency = 4 +# node_timeout = 3 +# conn_timeout = 0.5 +# +# slowdown will sleep that amount between containers +# slowdown = 0.01 +# +# Seconds to suppress updating an account that has generated an error +# account_suppression_time = 60 +# +# recon_cache_path = /var/cache/swift + +[container-auditor] +# You can override the default log routing for this app here (don't use set!): +# log_name = container-auditor +# log_facility = LOG_LOCAL0 +# log_level = INFO +# log_address = /dev/log +# +# Will audit each container at most once per interval +# interval = 1800 +# +# containers_per_second = 200 +# recon_cache_path = /var/cache/swift + +[container-sync] +# You can override the default log routing for this app here (don't use set!): +# log_name = container-sync +# log_facility = LOG_LOCAL0 +# log_level = INFO +# log_address = /dev/log +# +# If you need to use an HTTP Proxy, set it here; defaults to no proxy. +# You can also set this to a comma separated list of HTTP Proxies and they will +# be randomly used (simple load balancing). +# sync_proxy = http://10.1.1.1:8888,http://10.1.1.2:8888 +# +# Will sync each container at most once per interval +# interval = 300 +# +# Maximum amount of time to spend syncing each container per pass +# container_time = 60 +# +# Maximum amount of time in seconds for the connection attempt +# conn_timeout = 5 +# Server errors from requests will be retried by default +# request_tries = 3 +# +# Internal client config file path +# internal_client_conf_path = /etc/swift/internal-client.conf + +# Note: Put it at the beginning of the pipeline to profile all middleware. But +# it is safer to put this after healthcheck. +[filter:xprofile] +use = egg:swift#xprofile +# This option enable you to switch profilers which should inherit from python +# standard profiler. Currently the supported value can be 'cProfile', +# 'eventlet.green.profile' etc. +# profile_module = eventlet.green.profile +# +# This prefix will be used to combine process ID and timestamp to name the +# profile data file. Make sure the executing user has permission to write +# into this path (missing path segments will be created, if necessary). +# If you enable profiling in more than one type of daemon, you must override +# it with an unique value like: /var/log/swift/profile/container.profile +# log_filename_prefix = /tmp/log/swift/profile/default.profile +# +# the profile data will be dumped to local disk based on above naming rule +# in this interval. +# dump_interval = 5.0 +# +# Be careful, this option will enable profiler to dump data into the file with +# time stamp which means there will be lots of files piled up in the directory. +# dump_timestamp = false +# +# This is the path of the URL to access the mini web UI. +# path = /__profile__ +# +# Clear the data when the wsgi server shutdown. +# flush_at_shutdown = false +# +# unwind the iterator of applications +# unwind = false diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/swift/templates/object-server.conf b/deploy/adapters/ansible/openstack_newton_xenial/roles/swift/templates/object-server.conf new file mode 100644 index 00000000..effd4f22 --- /dev/null +++ b/deploy/adapters/ansible/openstack_newton_xenial/roles/swift/templates/object-server.conf @@ -0,0 +1,347 @@ +[DEFAULT] +bind_ip = {{ internal_ip }} +bind_port = 6000 +# bind_timeout = 30 +# backlog = 4096 +user = swift +swift_dir = /etc/swift +devices = /srv/node +mount_check = true +# disable_fallocate = false +# expiring_objects_container_divisor = 86400 +# expiring_objects_account_name = expiring_objects +# +# Use an integer to override the number of pre-forked processes that will +# accept connections. NOTE: if servers_per_port is set, this setting is +# ignored. +# workers = auto +# +# Make object-server run this many worker processes per unique port of +# "local" ring devices across all storage policies. This can help provide +# the isolation of threads_per_disk without the severe overhead. The default +# value of 0 disables this feature. +# servers_per_port = 0 +# +# Maximum concurrent requests per worker +# max_clients = 1024 +# +# You can specify default log routing here if you want: +# log_name = swift +# log_facility = LOG_LOCAL0 +# log_level = INFO +# log_address = /dev/log +# The following caps the length of log lines to the value given; no limit if +# set to 0, the default. +# log_max_line_length = 0 +# +# comma separated list of functions to call to setup custom log handlers. +# functions get passed: conf, name, log_to_console, log_route, fmt, logger, +# adapted_logger +# log_custom_handlers = +# +# If set, log_udp_host will override log_address +# log_udp_host = +# log_udp_port = 514 +# +# You can enable StatsD logging here: +# log_statsd_host = +# log_statsd_port = 8125 +# log_statsd_default_sample_rate = 1.0 +# log_statsd_sample_rate_factor = 1.0 +# log_statsd_metric_prefix = +# +# eventlet_debug = false +# +# You can set fallocate_reserve to the number of bytes you'd like fallocate to +# reserve, whether there is space for the given file size or not. +# fallocate_reserve = 0 +# +# Time to wait while attempting to connect to another backend node. +# conn_timeout = 0.5 +# Time to wait while sending each chunk of data to another backend node. +# node_timeout = 3 +# Time to wait while sending a container update on object update. +# container_update_timeout = 1.0 +# Time to wait while receiving each chunk of data from a client or another +# backend node. +# client_timeout = 60 +# +# network_chunk_size = 65536 +# disk_chunk_size = 65536 + +[pipeline:main] +pipeline = healthcheck recon object-server + +[app:object-server] +use = egg:swift#object +# You can override the default log routing for this app here: +# set log_name = object-server +# set log_facility = LOG_LOCAL0 +# set log_level = INFO +# set log_requests = true +# set log_address = /dev/log +# +# max_upload_time = 86400 +# +# slow is the total amount of seconds an object PUT/DELETE request takes at +# least. If it is faster, the object server will sleep this amount of time minus +# the already passed transaction time. This is only useful for simulating slow +# devices on storage nodes during testing and development. +# slow = 0 +# +# Objects smaller than this are not evicted from the buffercache once read +# keep_cache_size = 5242880 +# +# If true, objects for authenticated GET requests may be kept in buffer cache +# if small enough +# keep_cache_private = false +# +# on PUTs, sync data every n MB +# mb_per_sync = 512 +# +# Comma separated list of headers that can be set in metadata on an object. +# This list is in addition to X-Object-Meta-* headers and cannot include +# Content-Type, etag, Content-Length, or deleted +# allowed_headers = Content-Disposition, Content-Encoding, X-Delete-At, X-Object-Manifest, X-Static-Large-Object +# +# auto_create_account_prefix = . +# +# A value of 0 means "don't use thread pools". A reasonable starting point is +# 4. +# threads_per_disk = 0 +# +# Configure parameter for creating specific server +# To handle all verbs, including replication verbs, do not specify +# "replication_server" (this is the default). To only handle replication, +# set to a True value (e.g. "True" or "1"). To handle only non-replication +# verbs, set to "False". Unless you have a separate replication network, you +# should not specify any value for "replication_server". +# replication_server = false +# +# Set to restrict the number of concurrent incoming SSYNC requests +# Set to 0 for unlimited +# Note that SSYNC requests are only used by the object reconstructor or the +# object replicator when configured to use ssync. +# replication_concurrency = 4 +# +# Restricts incoming SSYNC requests to one per device, +# replication_currency above allowing. This can help control I/O to each +# device, but you may wish to set this to False to allow multiple SSYNC +# requests (up to the above replication_concurrency setting) per device. +# replication_one_per_device = True +# +# Number of seconds to wait for an existing replication device lock before +# giving up. +# replication_lock_timeout = 15 +# +# These next two settings control when the SSYNC subrequest handler will +# abort an incoming SSYNC attempt. An abort will occur if there are at +# least threshold number of failures and the value of failures / successes +# exceeds the ratio. The defaults of 100 and 1.0 means that at least 100 +# failures have to occur and there have to be more failures than successes for +# an abort to occur. +# replication_failure_threshold = 100 +# replication_failure_ratio = 1.0 +# +# Use splice() for zero-copy object GETs. This requires Linux kernel +# version 3.0 or greater. If you set "splice = yes" but the kernel +# does not support it, error messages will appear in the object server +# logs at startup, but your object servers should continue to function. +# +# splice = no + +[filter:healthcheck] +use = egg:swift#healthcheck +# An optional filesystem path, which if present, will cause the healthcheck +# URL to return "503 Service Unavailable" with a body of "DISABLED BY FILE" +# disable_path = + +[filter:recon] +use = egg:swift#recon +recon_cache_path = /var/cache/swift +recon_lock_path = /var/lock + +[object-replicator] +# You can override the default log routing for this app here (don't use set!): +# log_name = object-replicator +# log_facility = LOG_LOCAL0 +# log_level = INFO +# log_address = /dev/log +# +# daemonize = on +# +# Time in seconds to wait between replication passes +# interval = 30 +# run_pause is deprecated, use interval instead +# run_pause = 30 +# +# concurrency = 1 +# stats_interval = 300 +# +# default is rsync, alternative is ssync +# sync_method = rsync +# +# max duration of a partition rsync +# rsync_timeout = 900 +# +# bandwidth limit for rsync in kB/s. 0 means unlimited +# rsync_bwlimit = 0 +# +# passed to rsync for io op timeout +# rsync_io_timeout = 30 +# +# Allow rsync to compress data which is transmitted to destination node +# during sync. However, this is applicable only when destination node is in +# a different region than the local one. +# NOTE: Objects that are already compressed (for example: .tar.gz, .mp3) might +# slow down the syncing process. +# rsync_compress = no +# +# Format of the rysnc module where the replicator will send data. See +# etc/rsyncd.conf-sample for some usage examples. +# rsync_module = {replication_ip}::object +# +# node_timeout = <whatever's in the DEFAULT section or 10> +# max duration of an http request; this is for REPLICATE finalization calls and +# so should be longer than node_timeout +# http_timeout = 60 +# +# attempts to kill all workers if nothing replicates for lockup_timeout seconds +# lockup_timeout = 1800 +# +# The replicator also performs reclamation +# reclaim_age = 604800 +# +# ring_check_interval = 15 +# recon_cache_path = /var/cache/swift +# +# limits how long rsync error log lines are +# 0 means to log the entire line +# rsync_error_log_line_length = 0 +# +# handoffs_first and handoff_delete are options for a special case +# such as disk full in the cluster. These two options SHOULD NOT BE +# CHANGED, except for such an extreme situations. (e.g. disks filled up +# or are about to fill up. Anyway, DO NOT let your drives fill up) +# handoffs_first is the flag to replicate handoffs prior to canonical +# partitions. It allows to force syncing and deleting handoffs quickly. +# If set to a True value(e.g. "True" or "1"), partitions +# that are not supposed to be on the node will be replicated first. +# handoffs_first = False +# +# handoff_delete is the number of replicas which are ensured in swift. +# If the number less than the number of replicas is set, object-replicator +# could delete local handoffs even if all replicas are not ensured in the +# cluster. Object-replicator would remove local handoff partition directories +# after syncing partition when the number of successful responses is greater +# than or equal to this number. By default(auto), handoff partitions will be +# removed when it has successfully replicated to all the canonical nodes. +# handoff_delete = auto + +[object-reconstructor] +# You can override the default log routing for this app here (don't use set!): +# Unless otherwise noted, each setting below has the same meaning as described +# in the [object-replicator] section, however these settings apply to the EC +# reconstructor +# +# log_name = object-reconstructor +# log_facility = LOG_LOCAL0 +# log_level = INFO +# log_address = /dev/log +# +# daemonize = on +# +# Time in seconds to wait between reconstruction passes +# interval = 30 +# run_pause is deprecated, use interval instead +# run_pause = 30 +# +# concurrency = 1 +# stats_interval = 300 +# node_timeout = 10 +# http_timeout = 60 +# lockup_timeout = 1800 +# reclaim_age = 604800 +# ring_check_interval = 15 +# recon_cache_path = /var/cache/swift +# handoffs_first = False + +[object-updater] +# You can override the default log routing for this app here (don't use set!): +# log_name = object-updater +# log_facility = LOG_LOCAL0 +# log_level = INFO +# log_address = /dev/log +# +# interval = 300 +# concurrency = 1 +# node_timeout = <whatever's in the DEFAULT section or 10> +# slowdown will sleep that amount between objects +# slowdown = 0.01 +# +# recon_cache_path = /var/cache/swift + +[object-auditor] +# You can override the default log routing for this app here (don't use set!): +# log_name = object-auditor +# log_facility = LOG_LOCAL0 +# log_level = INFO +# log_address = /dev/log +# +# Time in seconds to wait between auditor passes +# interval = 30 +# +# You can set the disk chunk size that the auditor uses making it larger if +# you like for more efficient local auditing of larger objects +# disk_chunk_size = 65536 +# files_per_second = 20 +# concurrency = 1 +# bytes_per_second = 10000000 +# log_time = 3600 +# zero_byte_files_per_second = 50 +# recon_cache_path = /var/cache/swift + +# Takes a comma separated list of ints. If set, the object auditor will +# increment a counter for every object whose size is <= to the given break +# points and report the result after a full scan. +# object_size_stats = + +# The auditor will cleanup old rsync tempfiles after they are "old +# enough" to delete. You can configure the time elapsed in seconds +# before rsync tempfiles will be unlinked, or the default value of +# "auto" try to use object-replicator's rsync_timeout + 900 and fallback +# to 86400 (1 day). +# rsync_tempfile_timeout = auto + +# Note: Put it at the beginning of the pipleline to profile all middleware. But +# it is safer to put this after healthcheck. +[filter:xprofile] +use = egg:swift#xprofile +# This option enable you to switch profilers which should inherit from python +# standard profiler. Currently the supported value can be 'cProfile', +# 'eventlet.green.profile' etc. +# profile_module = eventlet.green.profile +# +# This prefix will be used to combine process ID and timestamp to name the +# profile data file. Make sure the executing user has permission to write +# into this path (missing path segments will be created, if necessary). +# If you enable profiling in more than one type of daemon, you must override +# it with an unique value like: /var/log/swift/profile/object.profile +# log_filename_prefix = /tmp/log/swift/profile/default.profile +# +# the profile data will be dumped to local disk based on above naming rule +# in this interval. +# dump_interval = 5.0 +# +# Be careful, this option will enable profiler to dump data into the file with +# time stamp which means there will be lots of files piled up in the directory. +# dump_timestamp = false +# +# This is the path of the URL to access the mini web UI. +# path = /__profile__ +# +# Clear the data when the wsgi server shutdown. +# flush_at_shutdown = false +# +# unwind the iterator of applications +# unwind = false diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/swift/templates/proxy-server.conf b/deploy/adapters/ansible/openstack_newton_xenial/roles/swift/templates/proxy-server.conf new file mode 100644 index 00000000..b76796cf --- /dev/null +++ b/deploy/adapters/ansible/openstack_newton_xenial/roles/swift/templates/proxy-server.conf @@ -0,0 +1,764 @@ +{% set memcached_servers = [] %} +{% for host in haproxy_hosts.values() %} +{% set _ = memcached_servers.append('%s:11211'% host) %} +{% endfor %} +{% set memcached_servers = memcached_servers|join(',') %} +[DEFAULT] +bind_ip = {{ internal_ip }} +bind_port = 8080 +# bind_timeout = 30 +# backlog = 4096 +swift_dir = /etc/swift +user = swift + +# Enables exposing configuration settings via HTTP GET /info. +# expose_info = true + +# Key to use for admin calls that are HMAC signed. Default is empty, +# which will disable admin calls to /info. +# admin_key = secret_admin_key +# +# Allows the ability to withhold sections from showing up in the public calls +# to /info. You can withhold subsections by separating the dict level with a +# ".". The following would cause the sections 'container_quotas' and 'tempurl' +# to not be listed, and the key max_failed_deletes would be removed from +# bulk_delete. Default value is 'swift.valid_api_versions' which allows all +# registered features to be listed via HTTP GET /info except +# swift.valid_api_versions information +# disallowed_sections = swift.valid_api_versions, container_quotas, tempurl + +# Use an integer to override the number of pre-forked processes that will +# accept connections. Should default to the number of effective cpu +# cores in the system. It's worth noting that individual workers will +# use many eventlet co-routines to service multiple concurrent requests. +# workers = auto +# +# Maximum concurrent requests per worker +# max_clients = 1024 +# +# Set the following two lines to enable SSL. This is for testing only. +# cert_file = /etc/swift/proxy.crt +# key_file = /etc/swift/proxy.key +# +# expiring_objects_container_divisor = 86400 +# expiring_objects_account_name = expiring_objects +# +# You can specify default log routing here if you want: +# log_name = swift +# log_facility = LOG_LOCAL0 +# log_level = INFO +# log_headers = false +# log_address = /dev/log +# The following caps the length of log lines to the value given; no limit if +# set to 0, the default. +# log_max_line_length = 0 +# +# This optional suffix (default is empty) that would be appended to the swift transaction +# id allows one to easily figure out from which cluster that X-Trans-Id belongs to. +# This is very useful when one is managing more than one swift cluster. +# trans_id_suffix = +# +# comma separated list of functions to call to setup custom log handlers. +# functions get passed: conf, name, log_to_console, log_route, fmt, logger, +# adapted_logger +# log_custom_handlers = +# +# If set, log_udp_host will override log_address +# log_udp_host = +# log_udp_port = 514 +# +# You can enable StatsD logging here: +# log_statsd_host = +# log_statsd_port = 8125 +# log_statsd_default_sample_rate = 1.0 +# log_statsd_sample_rate_factor = 1.0 +# log_statsd_metric_prefix = +# +# Use a comma separated list of full url (http://foo.bar:1234,https://foo.bar) +# cors_allow_origin = +# strict_cors_mode = True +# +# client_timeout = 60 +# eventlet_debug = false + +[pipeline:main] +# This sample pipeline uses tempauth and is used for SAIO dev work and +# testing. See below for a pipeline using keystone. +#pipeline = catch_errors gatekeeper healthcheck proxy-logging cache container_sync bulk tempurl ratelimit tempauth container-quotas account-quotas slo dlo versioned_writes proxy-logging proxy-server +pipeline = catch_errors gatekeeper healthcheck proxy-logging cache container_sync bulk ratelimit authtoken keystoneauth container-quotas account-quotas slo dlo versioned_writes proxy-logging proxy-server + +# The following pipeline shows keystone integration. Comment out the one +# above and uncomment this one. Additional steps for integrating keystone are +# covered further below in the filter sections for authtoken and keystoneauth. +#pipeline = catch_errors gatekeeper healthcheck proxy-logging cache container_sync bulk tempurl ratelimit authtoken keystoneauth container-quotas account-quotas slo dlo versioned_writes proxy-logging proxy-server + +[app:proxy-server] +use = egg:swift#proxy +account_autocreate = True +# You can override the default log routing for this app here: +# set log_name = proxy-server +# set log_facility = LOG_LOCAL0 +# set log_level = INFO +# set log_address = /dev/log +# +# log_handoffs = true +# recheck_account_existence = 60 +# recheck_container_existence = 60 +# object_chunk_size = 65536 +# client_chunk_size = 65536 +# +# How long the proxy server will wait on responses from the a/c/o servers. +# node_timeout = 10 +# +# How long the proxy server will wait for an initial response and to read a +# chunk of data from the object servers while serving GET / HEAD requests. +# Timeouts from these requests can be recovered from so setting this to +# something lower than node_timeout would provide quicker error recovery +# while allowing for a longer timeout for non-recoverable requests (PUTs). +# Defaults to node_timeout, should be overriden if node_timeout is set to a +# high number to prevent client timeouts from firing before the proxy server +# has a chance to retry. +# recoverable_node_timeout = node_timeout +# +# conn_timeout = 0.5 +# +# How long to wait for requests to finish after a quorum has been established. +# post_quorum_timeout = 0.5 +# +# How long without an error before a node's error count is reset. This will +# also be how long before a node is reenabled after suppression is triggered. +# error_suppression_interval = 60 +# +# How many errors can accumulate before a node is temporarily ignored. +# error_suppression_limit = 10 +# +# If set to 'true' any authorized user may create and delete accounts; if +# 'false' no one, even authorized, can. +# allow_account_management = false +# +# Set object_post_as_copy = false to turn on fast posts where only the metadata +# changes are stored anew and the original data file is kept in place. This +# makes for quicker posts. +# object_post_as_copy = true +# +# If set to 'true' authorized accounts that do not yet exist within the Swift +# cluster will be automatically created. +# account_autocreate = false +# +# If set to a positive value, trying to create a container when the account +# already has at least this maximum containers will result in a 403 Forbidden. +# Note: This is a soft limit, meaning a user might exceed the cap for +# recheck_account_existence before the 403s kick in. +# max_containers_per_account = 0 +# +# This is a comma separated list of account hashes that ignore the +# max_containers_per_account cap. +# max_containers_whitelist = +# +# Comma separated list of Host headers to which the proxy will deny requests. +# deny_host_headers = +# +# Prefix used when automatically creating accounts. +# auto_create_account_prefix = . +# +# Depth of the proxy put queue. +# put_queue_depth = 10 +# +# Storage nodes can be chosen at random (shuffle), by using timing +# measurements (timing), or by using an explicit match (affinity). +# Using timing measurements may allow for lower overall latency, while +# using affinity allows for finer control. In both the timing and +# affinity cases, equally-sorting nodes are still randomly chosen to +# spread load. +# The valid values for sorting_method are "affinity", "shuffle", or "timing". +# sorting_method = shuffle +# +# If the "timing" sorting_method is used, the timings will only be valid for +# the number of seconds configured by timing_expiry. +# timing_expiry = 300 +# +# By default on a GET/HEAD swift will connect to a storage node one at a time +# in a single thread. There is smarts in the order they are hit however. If you +# turn on concurrent_gets below, then replica count threads will be used. +# With addition of the concurrency_timeout option this will allow swift to send +# out GET/HEAD requests to the storage nodes concurrently and answer with the +# first to respond. With an EC policy the parameter only affects HEAD requests. +# concurrent_gets = off +# +# This parameter controls how long to wait before firing off the next +# concurrent_get thread. A value of 0 would be fully concurrent, any other +# number will stagger the firing of the threads. This number should be +# between 0 and node_timeout. The default is what ever you set for the +# conn_timeout parameter. +# concurrency_timeout = 0.5 +# +# Set to the number of nodes to contact for a normal request. You can use +# '* replicas' at the end to have it use the number given times the number of +# replicas for the ring being used for the request. +# request_node_count = 2 * replicas +# +# Which backend servers to prefer on reads. Format is r<N> for region +# N or r<N>z<M> for region N, zone M. The value after the equals is +# the priority; lower numbers are higher priority. +# +# Example: first read from region 1 zone 1, then region 1 zone 2, then +# anything in region 2, then everything else: +# read_affinity = r1z1=100, r1z2=200, r2=300 +# Default is empty, meaning no preference. +# read_affinity = +# +# Which backend servers to prefer on writes. Format is r<N> for region +# N or r<N>z<M> for region N, zone M. If this is set, then when +# handling an object PUT request, some number (see setting +# write_affinity_node_count) of local backend servers will be tried +# before any nonlocal ones. +# +# Example: try to write to regions 1 and 2 before writing to any other +# nodes: +# write_affinity = r1, r2 +# Default is empty, meaning no preference. +# write_affinity = +# +# The number of local (as governed by the write_affinity setting) +# nodes to attempt to contact first, before any non-local ones. You +# can use '* replicas' at the end to have it use the number given +# times the number of replicas for the ring being used for the +# request. +# write_affinity_node_count = 2 * replicas +# +# These are the headers whose values will only be shown to swift_owners. The +# exact definition of a swift_owner is up to the auth system in use, but +# usually indicates administrative responsibilities. +# swift_owner_headers = x-container-read, x-container-write, x-container-sync-key, x-container-sync-to, x-account-meta-temp-url-key, x-account-meta-temp-url-key-2, x-container-meta-temp-url-key, x-container-meta-temp-url-key-2, x-account-access-control + +[filter:tempauth] +use = egg:swift#tempauth +# You can override the default log routing for this filter here: +# set log_name = tempauth +# set log_facility = LOG_LOCAL0 +# set log_level = INFO +# set log_headers = false +# set log_address = /dev/log +# +# The reseller prefix will verify a token begins with this prefix before even +# attempting to validate it. Also, with authorization, only Swift storage +# accounts with this prefix will be authorized by this middleware. Useful if +# multiple auth systems are in use for one Swift cluster. +# The reseller_prefix may contain a comma separated list of items. The first +# item is used for the token as mentioned above. If second and subsequent +# items exist, the middleware will handle authorization for an account with +# that prefix. For example, for prefixes "AUTH, SERVICE", a path of +# /v1/SERVICE_account is handled the same as /v1/AUTH_account. If an empty +# (blank) reseller prefix is required, it must be first in the list. Two +# single quote characters indicates an empty (blank) reseller prefix. +# reseller_prefix = AUTH + +# +# The require_group parameter names a group that must be presented by +# either X-Auth-Token or X-Service-Token. Usually this parameter is +# used only with multiple reseller prefixes (e.g., SERVICE_require_group=blah). +# By default, no group is needed. Do not use .admin. +# require_group = + +# The auth prefix will cause requests beginning with this prefix to be routed +# to the auth subsystem, for granting tokens, etc. +# auth_prefix = /auth/ +# token_life = 86400 +# +# This allows middleware higher in the WSGI pipeline to override auth +# processing, useful for middleware such as tempurl and formpost. If you know +# you're not going to use such middleware and you want a bit of extra security, +# you can set this to false. +# allow_overrides = true +# +# This specifies what scheme to return with storage urls: +# http, https, or default (chooses based on what the server is running as) +# This can be useful with an SSL load balancer in front of a non-SSL server. +# storage_url_scheme = default +# +# Lastly, you need to list all the accounts/users you want here. The format is: +# user_<account>_<user> = <key> [group] [group] [...] [storage_url] +# or if you want underscores in <account> or <user>, you can base64 encode them +# (with no equal signs) and use this format: +# user64_<account_b64>_<user_b64> = <key> [group] [group] [...] [storage_url] +# There are special groups of: +# .reseller_admin = can do anything to any account for this auth +# .admin = can do anything within the account +# If neither of these groups are specified, the user can only access containers +# that have been explicitly allowed for them by a .admin or .reseller_admin. +# The trailing optional storage_url allows you to specify an alternate url to +# hand back to the user upon authentication. If not specified, this defaults to +# $HOST/v1/<reseller_prefix>_<account> where $HOST will do its best to resolve +# to what the requester would need to use to reach this host. +# Here are example entries, required for running the tests: +user_admin_admin = admin .admin .reseller_admin +user_test_tester = testing .admin +user_test2_tester2 = testing2 .admin +user_test_tester3 = testing3 +user_test5_tester5 = testing5 service + +# To enable Keystone authentication you need to have the auth token +# middleware first to be configured. Here is an example below, please +# refer to the keystone's documentation for details about the +# different settings. +# +# You'll also need to have the keystoneauth middleware enabled and have it in +# your main pipeline, as show in the sample pipeline at the top of this file. +# +# Following parameters are known to work with keystonemiddleware v2.3.0 +# (above v2.0.0), but checking the latest information in the wiki page[1] +# is recommended. +# 1. http://docs.openstack.org/developer/keystonemiddleware/middlewarearchitecture.html#configuration +# +[filter:authtoken] +paste.filter_factory = keystonemiddleware.auth_token:filter_factory +auth_uri = http://{{ internal_vip.ip }}:5000 +auth_url = http://{{ internal_vip.ip }}:35357 +identity_uri = http://{{ internal_vip.ip }}:35357 +memcached_servers = {{ memcached_servers }} +#auth_plugin = password +auth_type = password +project_domain_id = default +user_domain_id = default +project_name = service +username = swift +password = {{ CINDER_PASS }} +delay_auth_decision = True +# +# delay_auth_decision defaults to False, but leaving it as false will +# prevent other auth systems, staticweb, tempurl, formpost, and ACLs from +# working. This value must be explicitly set to True. +# delay_auth_decision = False +# +# cache = swift.cache +# include_service_catalog = False +# +[filter:keystoneauth] +use = egg:swift#keystoneauth +operator_roles = admin,user +# The reseller_prefix option lists account namespaces that this middleware is +# responsible for. The prefix is placed before the Keystone project id. +# For example, for project 12345678, and prefix AUTH, the account is +# named AUTH_12345678 (i.e., path is /v1/AUTH_12345678/...). +# Several prefixes are allowed by specifying a comma-separated list +# as in: "reseller_prefix = AUTH, SERVICE". The empty string indicates a +# single blank/empty prefix. If an empty prefix is required in a list of +# prefixes, a value of '' (two single quote characters) indicates a +# blank/empty prefix. Except for the blank/empty prefix, an underscore ('_') +# character is appended to the value unless already present. +# reseller_prefix = AUTH +# +# The user must have at least one role named by operator_roles on a +# project in order to create, delete and modify containers and objects +# and to set and read privileged headers such as ACLs. +# If there are several reseller prefix items, you can prefix the +# parameter so it applies only to those accounts (for example +# the parameter SERVICE_operator_roles applies to the /v1/SERVICE_<project> +# path). If you omit the prefix, the option applies to all reseller +# prefix items. For the blank/empty prefix, prefix with '' (do not put +# underscore after the two single quote characters). +# operator_roles = admin, swiftoperator +# +# The reseller admin role has the ability to create and delete accounts +# reseller_admin_role = ResellerAdmin +# +# This allows middleware higher in the WSGI pipeline to override auth +# processing, useful for middleware such as tempurl and formpost. If you know +# you're not going to use such middleware and you want a bit of extra security, +# you can set this to false. +# allow_overrides = true +# +# If the service_roles parameter is present, an X-Service-Token must be +# present in the request that when validated, grants at least one role listed +# in the parameter. The X-Service-Token may be scoped to any project. +# If there are several reseller prefix items, you can prefix the +# parameter so it applies only to those accounts (for example +# the parameter SERVICE_service_roles applies to the /v1/SERVICE_<project> +# path). If you omit the prefix, the option applies to all reseller +# prefix items. For the blank/empty prefix, prefix with '' (do not put +# underscore after the two single quote characters). +# By default, no service_roles are required. +# service_roles = +# +# For backwards compatibility, keystoneauth will match names in cross-tenant +# access control lists (ACLs) when both the requesting user and the tenant +# are in the default domain i.e the domain to which existing tenants are +# migrated. The default_domain_id value configured here should be the same as +# the value used during migration of tenants to keystone domains. +# default_domain_id = default +# +# For a new installation, or an installation in which keystone projects may +# move between domains, you should disable backwards compatible name matching +# in ACLs by setting allow_names_in_acls to false: +# allow_names_in_acls = true + +[filter:healthcheck] +use = egg:swift#healthcheck +# An optional filesystem path, which if present, will cause the healthcheck +# URL to return "503 Service Unavailable" with a body of "DISABLED BY FILE". +# This facility may be used to temporarily remove a Swift node from a load +# balancer pool during maintenance or upgrade (remove the file to allow the +# node back into the load balancer pool). +# disable_path = + +[filter:cache] +use = egg:swift#memcache +memcache_servers = {{ memcached_servers }} +# You can override the default log routing for this filter here: +# set log_name = cache +# set log_facility = LOG_LOCAL0 +# set log_level = INFO +# set log_headers = false +# set log_address = /dev/log +# +# If not set here, the value for memcache_servers will be read from +# memcache.conf (see memcache.conf-sample) or lacking that file, it will +# default to the value below. You can specify multiple servers separated with +# commas, as in: 10.1.2.3:11211,10.1.2.4:11211 (IPv6 addresses must +# follow rfc3986 section-3.2.2, i.e. [::1]:11211) +# memcache_servers = 127.0.0.1:11211 +# +# Sets how memcache values are serialized and deserialized: +# 0 = older, insecure pickle serialization +# 1 = json serialization but pickles can still be read (still insecure) +# 2 = json serialization only (secure and the default) +# If not set here, the value for memcache_serialization_support will be read +# from /etc/swift/memcache.conf (see memcache.conf-sample). +# To avoid an instant full cache flush, existing installations should +# upgrade with 0, then set to 1 and reload, then after some time (24 hours) +# set to 2 and reload. +# In the future, the ability to use pickle serialization will be removed. +# memcache_serialization_support = 2 +# +# Sets the maximum number of connections to each memcached server per worker +# memcache_max_connections = 2 +# +# More options documented in memcache.conf-sample + +[filter:ratelimit] +use = egg:swift#ratelimit +# You can override the default log routing for this filter here: +# set log_name = ratelimit +# set log_facility = LOG_LOCAL0 +# set log_level = INFO +# set log_headers = false +# set log_address = /dev/log +# +# clock_accuracy should represent how accurate the proxy servers' system clocks +# are with each other. 1000 means that all the proxies' clock are accurate to +# each other within 1 millisecond. No ratelimit should be higher than the +# clock accuracy. +# clock_accuracy = 1000 +# +# max_sleep_time_seconds = 60 +# +# log_sleep_time_seconds of 0 means disabled +# log_sleep_time_seconds = 0 +# +# allows for slow rates (e.g. running up to 5 sec's behind) to catch up. +# rate_buffer_seconds = 5 +# +# account_ratelimit of 0 means disabled +# account_ratelimit = 0 + +# DEPRECATED- these will continue to work but will be replaced +# by the X-Account-Sysmeta-Global-Write-Ratelimit flag. +# Please see ratelimiting docs for details. +# these are comma separated lists of account names +# account_whitelist = a,b +# account_blacklist = c,d + +# with container_limit_x = r +# for containers of size x limit write requests per second to r. The container +# rate will be linearly interpolated from the values given. With the values +# below, a container of size 5 will get a rate of 75. +# container_ratelimit_0 = 100 +# container_ratelimit_10 = 50 +# container_ratelimit_50 = 20 + +# Similarly to the above container-level write limits, the following will limit +# container GET (listing) requests. +# container_listing_ratelimit_0 = 100 +# container_listing_ratelimit_10 = 50 +# container_listing_ratelimit_50 = 20 + +[filter:domain_remap] +use = egg:swift#domain_remap +# You can override the default log routing for this filter here: +# set log_name = domain_remap +# set log_facility = LOG_LOCAL0 +# set log_level = INFO +# set log_headers = false +# set log_address = /dev/log +# +# storage_domain = example.com +# path_root = v1 + +# Browsers can convert a host header to lowercase, so check that reseller +# prefix on the account is the correct case. This is done by comparing the +# items in the reseller_prefixes config option to the found prefix. If they +# match except for case, the item from reseller_prefixes will be used +# instead of the found reseller prefix. When none match, the default reseller +# prefix is used. When no default reseller prefix is configured, any request +# with an account prefix not in that list will be ignored by this middleware. +# reseller_prefixes = AUTH +# default_reseller_prefix = + +[filter:catch_errors] +use = egg:swift#catch_errors +# You can override the default log routing for this filter here: +# set log_name = catch_errors +# set log_facility = LOG_LOCAL0 +# set log_level = INFO +# set log_headers = false +# set log_address = /dev/log + +[filter:cname_lookup] +# Note: this middleware requires python-dnspython +use = egg:swift#cname_lookup +# You can override the default log routing for this filter here: +# set log_name = cname_lookup +# set log_facility = LOG_LOCAL0 +# set log_level = INFO +# set log_headers = false +# set log_address = /dev/log +# +# Specify the storage_domain that match your cloud, multiple domains +# can be specified separated by a comma +# storage_domain = example.com +# +# lookup_depth = 1 + +# Note: Put staticweb just after your auth filter(s) in the pipeline +[filter:staticweb] +use = egg:swift#staticweb +# You can override the default log routing for this filter here: +# set log_name = staticweb +# set log_facility = LOG_LOCAL0 +# set log_level = INFO +# set log_headers = false +# set log_address = /dev/log + +# Note: Put tempurl before dlo, slo and your auth filter(s) in the pipeline +[filter:tempurl] +use = egg:swift#tempurl +# The methods allowed with Temp URLs. +# methods = GET HEAD PUT POST DELETE +# +# The headers to remove from incoming requests. Simply a whitespace delimited +# list of header names and names can optionally end with '*' to indicate a +# prefix match. incoming_allow_headers is a list of exceptions to these +# removals. +# incoming_remove_headers = x-timestamp +# +# The headers allowed as exceptions to incoming_remove_headers. Simply a +# whitespace delimited list of header names and names can optionally end with +# '*' to indicate a prefix match. +# incoming_allow_headers = +# +# The headers to remove from outgoing responses. Simply a whitespace delimited +# list of header names and names can optionally end with '*' to indicate a +# prefix match. outgoing_allow_headers is a list of exceptions to these +# removals. +# outgoing_remove_headers = x-object-meta-* +# +# The headers allowed as exceptions to outgoing_remove_headers. Simply a +# whitespace delimited list of header names and names can optionally end with +# '*' to indicate a prefix match. +# outgoing_allow_headers = x-object-meta-public-* + +# Note: Put formpost just before your auth filter(s) in the pipeline +[filter:formpost] +use = egg:swift#formpost + +# Note: Just needs to be placed before the proxy-server in the pipeline. +[filter:name_check] +use = egg:swift#name_check +# forbidden_chars = '"`<> +# maximum_length = 255 +# forbidden_regexp = /\./|/\.\./|/\.$|/\.\.$ + +[filter:list-endpoints] +use = egg:swift#list_endpoints +# list_endpoints_path = /endpoints/ + +[filter:proxy-logging] +use = egg:swift#proxy_logging +# If not set, logging directives from [DEFAULT] without "access_" will be used +# access_log_name = swift +# access_log_facility = LOG_LOCAL0 +# access_log_level = INFO +# access_log_address = /dev/log +# +# If set, access_log_udp_host will override access_log_address +# access_log_udp_host = +# access_log_udp_port = 514 +# +# You can use log_statsd_* from [DEFAULT] or override them here: +# access_log_statsd_host = +# access_log_statsd_port = 8125 +# access_log_statsd_default_sample_rate = 1.0 +# access_log_statsd_sample_rate_factor = 1.0 +# access_log_statsd_metric_prefix = +# access_log_headers = false +# +# If access_log_headers is True and access_log_headers_only is set only +# these headers are logged. Multiple headers can be defined as comma separated +# list like this: access_log_headers_only = Host, X-Object-Meta-Mtime +# access_log_headers_only = +# +# By default, the X-Auth-Token is logged. To obscure the value, +# set reveal_sensitive_prefix to the number of characters to log. +# For example, if set to 12, only the first 12 characters of the +# token appear in the log. An unauthorized access of the log file +# won't allow unauthorized usage of the token. However, the first +# 12 or so characters is unique enough that you can trace/debug +# token usage. Set to 0 to suppress the token completely (replaced +# by '...' in the log). +# Note: reveal_sensitive_prefix will not affect the value +# logged with access_log_headers=True. +# reveal_sensitive_prefix = 16 +# +# What HTTP methods are allowed for StatsD logging (comma-sep); request methods +# not in this list will have "BAD_METHOD" for the <verb> portion of the metric. +# log_statsd_valid_http_methods = GET,HEAD,POST,PUT,DELETE,COPY,OPTIONS +# +# Note: The double proxy-logging in the pipeline is not a mistake. The +# left-most proxy-logging is there to log requests that were handled in +# middleware and never made it through to the right-most middleware (and +# proxy server). Double logging is prevented for normal requests. See +# proxy-logging docs. + +# Note: Put before both ratelimit and auth in the pipeline. +[filter:bulk] +use = egg:swift#bulk +# max_containers_per_extraction = 10000 +# max_failed_extractions = 1000 +# max_deletes_per_request = 10000 +# max_failed_deletes = 1000 + +# In order to keep a connection active during a potentially long bulk request, +# Swift may return whitespace prepended to the actual response body. This +# whitespace will be yielded no more than every yield_frequency seconds. +# yield_frequency = 10 + +# Note: The following parameter is used during a bulk delete of objects and +# their container. This would frequently fail because it is very likely +# that all replicated objects have not been deleted by the time the middleware got a +# successful response. It can be configured the number of retries. And the +# number of seconds to wait between each retry will be 1.5**retry + +# delete_container_retry_count = 0 + +# Note: Put after auth and staticweb in the pipeline. +[filter:slo] +use = egg:swift#slo +# max_manifest_segments = 1000 +# max_manifest_size = 2097152 +# +# Rate limiting applies only to segments smaller than this size (bytes). +# rate_limit_under_size = 1048576 +# +# Start rate-limiting SLO segment serving after the Nth small segment of a +# segmented object. +# rate_limit_after_segment = 10 +# +# Once segment rate-limiting kicks in for an object, limit segments served +# to N per second. 0 means no rate-limiting. +# rate_limit_segments_per_sec = 1 +# +# Time limit on GET requests (seconds) +# max_get_time = 86400 + +# Note: Put after auth and staticweb in the pipeline. +# If you don't put it in the pipeline, it will be inserted for you. +[filter:dlo] +use = egg:swift#dlo +# Start rate-limiting DLO segment serving after the Nth segment of a +# segmented object. +# rate_limit_after_segment = 10 +# +# Once segment rate-limiting kicks in for an object, limit segments served +# to N per second. 0 means no rate-limiting. +# rate_limit_segments_per_sec = 1 +# +# Time limit on GET requests (seconds) +# max_get_time = 86400 + +# Note: Put after auth in the pipeline. +[filter:container-quotas] +use = egg:swift#container_quotas + +# Note: Put after auth in the pipeline. +[filter:account-quotas] +use = egg:swift#account_quotas + +[filter:gatekeeper] +use = egg:swift#gatekeeper +# Set this to false if you want to allow clients to set arbitrary X-Timestamps +# on uploaded objects. This may be used to preserve timestamps when migrating +# from a previous storage system, but risks allowing users to upload +# difficult-to-delete data. +# shunt_inbound_x_timestamp = true +# +# You can override the default log routing for this filter here: +# set log_name = gatekeeper +# set log_facility = LOG_LOCAL0 +# set log_level = INFO +# set log_headers = false +# set log_address = /dev/log + +[filter:container_sync] +use = egg:swift#container_sync +# Set this to false if you want to disallow any full url values to be set for +# any new X-Container-Sync-To headers. This will keep any new full urls from +# coming in, but won't change any existing values already in the cluster. +# Updating those will have to be done manually, as knowing what the true realm +# endpoint should be cannot always be guessed. +# allow_full_urls = true +# Set this to specify this clusters //realm/cluster as "current" in /info +# current = //REALM/CLUSTER + +# Note: Put it at the beginning of the pipeline to profile all middleware. But +# it is safer to put this after catch_errors, gatekeeper and healthcheck. +[filter:xprofile] +use = egg:swift#xprofile +# This option enable you to switch profilers which should inherit from python +# standard profiler. Currently the supported value can be 'cProfile', +# 'eventlet.green.profile' etc. +# profile_module = eventlet.green.profile +# +# This prefix will be used to combine process ID and timestamp to name the +# profile data file. Make sure the executing user has permission to write +# into this path (missing path segments will be created, if necessary). +# If you enable profiling in more than one type of daemon, you must override +# it with an unique value like: /var/log/swift/profile/proxy.profile +# log_filename_prefix = /tmp/log/swift/profile/default.profile +# +# the profile data will be dumped to local disk based on above naming rule +# in this interval. +# dump_interval = 5.0 +# +# Be careful, this option will enable profiler to dump data into the file with +# time stamp which means there will be lots of files piled up in the directory. +# dump_timestamp = false +# +# This is the path of the URL to access the mini web UI. +# path = /__profile__ +# +# Clear the data when the wsgi server shutdown. +# flush_at_shutdown = false +# +# unwind the iterator of applications +# unwind = false + +# Note: Put after slo, dlo in the pipeline. +# If you don't put it in the pipeline, it will be inserted automatically. +[filter:versioned_writes] +use = egg:swift#versioned_writes +# Enables using versioned writes middleware and exposing configuration +# settings via HTTP GET /info. +# WARNING: Setting this option bypasses the "allow_versions" option +# in the container configuration file, which will be eventually +# deprecated. See documentation for more details. +# allow_versioned_writes = false diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/swift/templates/rsyncd.conf b/deploy/adapters/ansible/openstack_newton_xenial/roles/swift/templates/rsyncd.conf new file mode 100644 index 00000000..703c55eb --- /dev/null +++ b/deploy/adapters/ansible/openstack_newton_xenial/roles/swift/templates/rsyncd.conf @@ -0,0 +1,23 @@ +uid = swift +gid = swift +log file = /var/log/rsyncd.log +pid file = /var/run/rsyncd.pid +address = {{ internal_ip }} + +[account] +max connections = 2 +path = /srv/node/ +read only = False +lock file = /var/lock/account.lock + +[container] +max connections = 2 +path = /srv/node/ +read only = False +lock file = /var/lock/container.lock + +[object] +max connections = 2 +path = /srv/node/ +read only = False +lock file = /var/lock/object.lock diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/swift/templates/swift.conf b/deploy/adapters/ansible/openstack_newton_xenial/roles/swift/templates/swift.conf new file mode 100644 index 00000000..9a31501b --- /dev/null +++ b/deploy/adapters/ansible/openstack_newton_xenial/roles/swift/templates/swift.conf @@ -0,0 +1,183 @@ +[swift-hash] + +# swift_hash_path_suffix and swift_hash_path_prefix are used as part of the +# the hashing algorithm when determining data placement in the cluster. +# These values should remain secret and MUST NOT change +# once a cluster has been deployed. +# Use only printable chars (python -c "import string; print(string.printable)") + +swift_hash_path_suffix = 7c6a7cd34d07aed5 +swift_hash_path_prefix = 0c4629166f4de441 + +# storage policies are defined here and determine various characteristics +# about how objects are stored and treated. Policies are specified by name on +# a per container basis. Names are case-insensitive. The policy index is +# specified in the section header and is used internally. The policy with +# index 0 is always used for legacy containers and can be given a name for use +# in metadata however the ring file name will always be 'object.ring.gz' for +# backwards compatibility. If no policies are defined a policy with index 0 +# will be automatically created for backwards compatibility and given the name +# Policy-0. A default policy is used when creating new containers when no +# policy is specified in the request. If no other policies are defined the +# policy with index 0 will be declared the default. If multiple policies are +# defined you must define a policy with index 0 and you must specify a +# default. It is recommended you always define a section for +# storage-policy:0. Aliases are not required when defining a storage policy. +# +# A 'policy_type' argument is also supported but is not mandatory. Default +# policy type 'replication' is used when 'policy_type' is unspecified. +[storage-policy:0] +name = Policy-0 +default = yes +#policy_type = replication +aliases = yellow, orange + +# the following section would declare a policy called 'silver', the number of +# replicas will be determined by how the ring is built. In this example the +# 'silver' policy could have a lower or higher # of replicas than the +# 'Policy-0' policy above. The ring filename will be 'object-1.ring.gz'. You +# may only specify one storage policy section as the default. If you changed +# this section to specify 'silver' as the default, when a client created a new +# container w/o a policy specified, it will get the 'silver' policy because +# this config has specified it as the default. However if a legacy container +# (one created with a pre-policy version of swift) is accessed, it is known +# implicitly to be assigned to the policy with index 0 as opposed to the +# current default. Note that even without specifying any aliases, a policy +# always has at least the default name stored in aliases because this field is +# used to contain all human readable names for a storage policy. +# +#[storage-policy:1] +#name = silver +#policy_type = replication + +# The following declares a storage policy of type 'erasure_coding' which uses +# Erasure Coding for data reliability. Please refer to Swift documentation for +# details on how the 'erasure_coding' storage policy is implemented. +# +# Swift uses PyECLib, a Python Erasure coding API library, for encode/decode +# operations. Please refer to Swift documentation for details on how to +# install PyECLib. +# +# When defining an EC policy, 'policy_type' needs to be 'erasure_coding' and +# EC configuration parameters 'ec_type', 'ec_num_data_fragments' and +# 'ec_num_parity_fragments' must be specified. 'ec_type' is chosen from the +# list of EC backends supported by PyECLib. The ring configured for the +# storage policy must have it's "replica" count configured to +# 'ec_num_data_fragments' + 'ec_num_parity_fragments' - this requirement is +# validated when services start. 'ec_object_segment_size' is the amount of +# data that will be buffered up before feeding a segment into the +# encoder/decoder. More information about these configuration options and +# supported `ec_type` schemes is available in the Swift documentation. Please +# refer to Swift documentation for details on how to configure EC policies. +# +# The example 'deepfreeze10-4' policy defined below is a _sample_ +# configuration with an alias of 'df10-4' as well as 10 'data' and 4 'parity' +# fragments. 'ec_type' defines the Erasure Coding scheme. +# 'liberasurecode_rs_vand' (Reed-Solomon Vandermonde) is used as an example +# below. +# +#[storage-policy:2] +#name = deepfreeze10-4 +#aliases = df10-4 +#policy_type = erasure_coding +#ec_type = liberasurecode_rs_vand +#ec_num_data_fragments = 10 +#ec_num_parity_fragments = 4 +#ec_object_segment_size = 1048576 + + +# The swift-constraints section sets the basic constraints on data +# saved in the swift cluster. These constraints are automatically +# published by the proxy server in responses to /info requests. + +[swift-constraints] + +# max_file_size is the largest "normal" object that can be saved in +# the cluster. This is also the limit on the size of each segment of +# a "large" object when using the large object manifest support. +# This value is set in bytes. Setting it to lower than 1MiB will cause +# some tests to fail. It is STRONGLY recommended to leave this value at +# the default (5 * 2**30 + 2). + +#max_file_size = 5368709122 + + +# max_meta_name_length is the max number of bytes in the utf8 encoding +# of the name portion of a metadata header. + +#max_meta_name_length = 128 + + +# max_meta_value_length is the max number of bytes in the utf8 encoding +# of a metadata value + +#max_meta_value_length = 256 + + +# max_meta_count is the max number of metadata keys that can be stored +# on a single account, container, or object + +#max_meta_count = 90 + + +# max_meta_overall_size is the max number of bytes in the utf8 encoding +# of the metadata (keys + values) + +#max_meta_overall_size = 4096 + +# max_header_size is the max number of bytes in the utf8 encoding of each +# header. Using 8192 as default because eventlet use 8192 as max size of +# header line. This value may need to be increased when using identity +# v3 API tokens including more than 7 catalog entries. +# See also include_service_catalog in proxy-server.conf-sample +# (documented in overview_auth.rst) + +#max_header_size = 8192 + + +# By default the maximum number of allowed headers depends on the number of max +# allowed metadata settings plus a default value of 32 for regular http +# headers. If for some reason this is not enough (custom middleware for +# example) it can be increased with the extra_header_count constraint. + +#extra_header_count = 0 + + +# max_object_name_length is the max number of bytes in the utf8 encoding +# of an object name + +#max_object_name_length = 1024 + + +# container_listing_limit is the default (and max) number of items +# returned for a container listing request + +#container_listing_limit = 10000 + + +# account_listing_limit is the default (and max) number of items returned +# for an account listing request +#account_listing_limit = 10000 + + +# max_account_name_length is the max number of bytes in the utf8 encoding +# of an account name + +#max_account_name_length = 256 + + +# max_container_name_length is the max number of bytes in the utf8 encoding +# of a container name + +#max_container_name_length = 256 + + +# By default all REST API calls should use "v1" or "v1.0" as the version string, +# for example "/v1/account". This can be manually overridden to make this +# backward-compatible, in case a different version string has been used before. +# Use a comma-separated list in case of multiple allowed versions, for example +# valid_api_versions = v0,v1,v2 +# This is only enforced for account, container and object requests. The allowed +# api versions are by default excluded from /info. + +# valid_api_versions = v1,v1.0 diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/swift/vars/Debian.yml b/deploy/adapters/ansible/openstack_newton_xenial/roles/swift/vars/Debian.yml new file mode 100644 index 00000000..39aea32d --- /dev/null +++ b/deploy/adapters/ansible/openstack_newton_xenial/roles/swift/vars/Debian.yml @@ -0,0 +1,27 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- + +controller_packages: + - swift + - swift-proxy + - python-swiftclient + - python-keystoneclient + - memcached + +compute_packages: + - xfsprogs + - rsync + - swift + - swift-account + - swift-container + - swift-object + + +services: [] diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/swift/vars/main.yml b/deploy/adapters/ansible/openstack_newton_xenial/roles/swift/vars/main.yml new file mode 100644 index 00000000..540068da --- /dev/null +++ b/deploy/adapters/ansible/openstack_newton_xenial/roles/swift/vars/main.yml @@ -0,0 +1,15 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +packages_noarch: [] + +services_noarch: [] + +controller_packages_noarch: [] +compute_packages_noarch: [] diff --git a/deploy/adapters/ansible/openstack_newton_xenial/roles/tacker/templates/tacker.j2 b/deploy/adapters/ansible/openstack_newton_xenial/roles/tacker/templates/tacker.j2 new file mode 100644 index 00000000..f1d9125b --- /dev/null +++ b/deploy/adapters/ansible/openstack_newton_xenial/roles/tacker/templates/tacker.j2 @@ -0,0 +1,426 @@ +[DEFAULT] +# Print more verbose output (set logging level to INFO instead of default WARNING level). +verbose = True + +# Print debugging output (set logging level to DEBUG instead of default WARNING level). +debug = True + +# Where to store Tacker state files. This directory must be writable by the +# user executing the agent. +state_path = /var/lib/tacker + +# Where to store lock files +lock_path = $state_path/lock + +auth_strategy = keystone +policy_file = /usr/local/etc/tacker/policy.json + +# log_format = %(asctime)s %(levelname)8s [%(name)s] %(message)s +# log_date_format = %Y-%m-%d %H:%M:%S + +# use_syslog -> syslog +# log_file and log_dir -> log_dir/log_file +# (not log_file) and log_dir -> log_dir/{binary_name}.log +# use_stderr -> stderr +# (not user_stderr) and (not log_file) -> stdout +# publish_errors -> notification system + +use_syslog = False +# syslog_log_facility = LOG_USER + +# use_stderr = True +# log_file = +# log_dir = + +# publish_errors = False + +# Address to bind the API server to +bind_host = {{ internal_ip }} + +# Port the bind the API server to +bind_port = 8888 + +# Path to the extensions. Note that this can be a colon-separated list of +# paths. For example: +# api_extensions_path = extensions:/path/to/more/extensions:/even/more/extensions +# The __path__ of tacker.extensions is appended to this, so if your +# extensions are in there you don't need to specify them here +# api_extensions_path = + +# (StrOpt) Tacker core plugin entrypoint to be loaded from the +# tacker.core_plugins namespace. See setup.cfg for the entrypoint names of the +# plugins included in the tacker source distribution. For compatibility with +# previous versions, the class name of a plugin can be specified instead of its +# entrypoint name. +# +# core_plugin = +# Example: core_plugin = ml2 + +# (ListOpt) List of service plugin entrypoints to be loaded from the +# tacker.service_plugins namespace. See setup.cfg for the entrypoint names of +# the plugins included in the tacker source distribution. For compatibility +# with previous versions, the class name of a plugin can be specified instead +# of its entrypoint name. +# +# service_plugins = +# Example: service_plugins = router,firewall,lbaas,vpnaas,metering + +service_plugins = vnfm,nfvo + +# Paste configuration file +# api_paste_config = api-paste.ini + +# The strategy to be used for auth. +# Supported values are 'keystone'(default), 'noauth'. +# auth_strategy = keystone + +# Allow sending resource operation notification to DHCP agent +# dhcp_agent_notification = True + +# Enable or disable bulk create/update/delete operations +# allow_bulk = True +# Enable or disable pagination +# allow_pagination = False +# Enable or disable sorting +# allow_sorting = False +# Enable or disable overlapping IPs for subnets +# Attention: the following parameter MUST be set to False if Tacker is +# being used in conjunction with nova security groups +# allow_overlapping_ips = False +# Ensure that configured gateway is on subnet +# force_gateway_on_subnet = False + + +# RPC configuration options. Defined in rpc __init__ +# The messaging module to use, defaults to kombu. +# rpc_backend = tacker.openstack.common.rpc.impl_kombu +# Size of RPC thread pool +# rpc_thread_pool_size = 64 +# Size of RPC connection pool +# rpc_conn_pool_size = 30 +# Seconds to wait for a response from call or multicall +# rpc_response_timeout = 60 +# Seconds to wait before a cast expires (TTL). Only supported by impl_zmq. +# rpc_cast_timeout = 30 +# Modules of exceptions that are permitted to be recreated +# upon receiving exception data from an rpc call. +# allowed_rpc_exception_modules = tacker.openstack.common.exception, nova.exception +# AMQP exchange to connect to if using RabbitMQ or QPID +# control_exchange = tacker + +# If passed, use a fake RabbitMQ provider +# fake_rabbit = False + +# Configuration options if sending notifications via kombu rpc (these are +# the defaults) +# SSL version to use (valid only if SSL enabled) +# kombu_ssl_version = +# SSL key file (valid only if SSL enabled) +# kombu_ssl_keyfile = +# SSL cert file (valid only if SSL enabled) +# kombu_ssl_certfile = +# SSL certification authority file (valid only if SSL enabled) +# kombu_ssl_ca_certs = +# IP address of the RabbitMQ installation +# rabbit_host = localhost +# Password of the RabbitMQ server +# rabbit_password = guest +# Port where RabbitMQ server is running/listening +# rabbit_port = 5672 +# RabbitMQ single or HA cluster (host:port pairs i.e: host1:5672, host2:5672) +# rabbit_hosts is defaulted to '$rabbit_host:$rabbit_port' +# rabbit_hosts = localhost:5672 +# User ID used for RabbitMQ connections +# rabbit_userid = guest +# Location of a virtual RabbitMQ installation. +# rabbit_virtual_host = / +# Maximum retries with trying to connect to RabbitMQ +# (the default of 0 implies an infinite retry count) +# rabbit_max_retries = 0 +# RabbitMQ connection retry interval +# rabbit_retry_interval = 1 +# Use HA queues in RabbitMQ (x-ha-policy: all). You need to +# wipe RabbitMQ database when changing this option. (boolean value) +# rabbit_ha_queues = false + +# QPID +# rpc_backend=tacker.openstack.common.rpc.impl_qpid +# Qpid broker hostname +# qpid_hostname = localhost +# Qpid broker port +# qpid_port = 5672 +# Qpid single or HA cluster (host:port pairs i.e: host1:5672, host2:5672) +# qpid_hosts is defaulted to '$qpid_hostname:$qpid_port' +# qpid_hosts = localhost:5672 +# Username for qpid connection +# qpid_username = '' +# Password for qpid connection +# qpid_password = '' +# Space separated list of SASL mechanisms to use for auth +# qpid_sasl_mechanisms = '' +# Seconds between connection keepalive heartbeats +# qpid_heartbeat = 60 +# Transport to use, either 'tcp' or 'ssl' +# qpid_protocol = tcp +# Disable Nagle algorithm +# qpid_tcp_nodelay = True + +# ZMQ +# rpc_backend=tacker.openstack.common.rpc.impl_zmq +# ZeroMQ bind address. Should be a wildcard (*), an ethernet interface, or IP. +# The "host" option should point or resolve to this address. +# rpc_zmq_bind_address = * + +# ============ Notification System Options ===================== + +# Notifications can be sent when network/subnet/port are created, updated or deleted. +# There are three methods of sending notifications: logging (via the +# log_file directive), rpc (via a message queue) and +# noop (no notifications sent, the default) + +# Notification_driver can be defined multiple times +# Do nothing driver +# notification_driver = tacker.openstack.common.notifier.no_op_notifier +# Logging driver +# notification_driver = tacker.openstack.common.notifier.log_notifier +# RPC driver. +notification_driver = tacker.openstack.common.notifier.rpc_notifier + +# default_notification_level is used to form actual topic name(s) or to set logging level +# default_notification_level = INFO + +# default_publisher_id is a part of the notification payload +# host = myhost.com +# default_publisher_id = $host + +# Defined in rpc_notifier, can be comma separated values. +# The actual topic names will be %s.%(default_notification_level)s +# notification_topics = notifications + +# Default maximum number of items returned in a single response, +# value == infinite and value < 0 means no max limit, and value must +# be greater than 0. If the number of items requested is greater than +# pagination_max_limit, server will just return pagination_max_limit +# of number of items. +# pagination_max_limit = -1 + +# Maximum number of DNS nameservers per subnet +# max_dns_nameservers = 5 + +# Maximum number of host routes per subnet +# max_subnet_host_routes = 20 + +# Maximum number of fixed ips per port +# max_fixed_ips_per_port = 5 + +# =========== items for agent management extension ============= +# Seconds to regard the agent as down; should be at least twice +# report_interval, to be sure the agent is down for good +# agent_down_time = 75 +# =========== end of items for agent management extension ===== + +# Allow auto scheduling networks to DHCP agent. It will schedule non-hosted +# networks to first DHCP agent which sends get_active_networks message to +# tacker server +# network_auto_schedule = True + +# Allow auto scheduling routers to L3 agent. It will schedule non-hosted +# routers to first L3 agent which sends sync_routers message to tacker server +# router_auto_schedule = True + +# Number of DHCP agents scheduled to host a network. This enables redundant +# DHCP agents for configured networks. +# dhcp_agents_per_network = 1 + +# =========== end of items for agent scheduler extension ===== + +# =========== WSGI parameters related to the API server ============== +# Number of separate worker processes to spawn. The default, 0, runs the +# worker thread in the current process. Greater than 0 launches that number of +# child processes as workers. The parent process manages them. +# api_workers = 0 + +# Number of separate RPC worker processes to spawn. The default, 0, runs the +# worker thread in the current process. Greater than 0 launches that number of +# child processes as RPC workers. The parent process manages them. +# This feature is experimental until issues are addressed and testing has been +# enabled for various plugins for compatibility. +# rpc_workers = 0 + +# Sets the value of TCP_KEEPIDLE in seconds to use for each server socket when +# starting API server. Not supported on OS X. +# tcp_keepidle = 600 + +# Number of seconds to keep retrying to listen +# retry_until_window = 30 + +# Number of backlog requests to configure the socket with. +# backlog = 4096 + +# Max header line to accommodate large tokens +# max_header_line = 16384 + +# Enable SSL on the API server +# use_ssl = False + +# Certificate file to use when starting API server securely +# ssl_cert_file = /path/to/certfile + +# Private key file to use when starting API server securely +# ssl_key_file = /path/to/keyfile + +# CA certificate file to use when starting API server securely to +# verify connecting clients. This is an optional parameter only required if +# API clients need to authenticate to the API server using SSL certificates +# signed by a trusted CA +# ssl_ca_file = /path/to/cafile +# ======== end of WSGI parameters related to the API server ========== + + +# ======== tacker nova interactions ========== +# Send notification to nova when port status is active. +# notify_nova_on_port_status_changes = True + +# Send notifications to nova when port data (fixed_ips/floatingips) change +# so nova can update it's cache. +# notify_nova_on_port_data_changes = True + +# URL for connection to nova (Only supports one nova region currently). +# nova_url = http://127.0.0.1:8774/v2 + +# Name of nova region to use. Useful if keystone manages more than one region +# nova_region_name = + +# Username for connection to nova in admin context +# nova_admin_username = + +# The uuid of the admin nova tenant +# nova_admin_tenant_id = + +# Password for connection to nova in admin context. +# nova_admin_password = + +# Authorization URL for connection to nova in admin context. +# nova_admin_auth_url = + +# CA file for novaclient to verify server certificates +# nova_ca_certificates_file = + +# Boolean to control ignoring SSL errors on the nova url +# nova_api_insecure = False + +# Number of seconds between sending events to nova if there are any events to send +# send_events_interval = 2 + +# ======== end of tacker nova interactions ========== + +[agent] +# Use "sudo tacker-rootwrap /etc/tacker/rootwrap.conf" to use the real +# root filter facility. +# Change to "sudo" to skip the filtering and just run the comand directly +root_helper = sudo /usr/local/bin/tacker-rootwrap /usr/local/etc/tacker/rootwrap.conf + +# =========== items for agent management extension ============= +# seconds between nodes reporting state to server; should be less than +# agent_down_time, best if it is half or less than agent_down_time +# report_interval = 30 + +# =========== end of items for agent management extension ===== + +[keystone_authtoken] +signing_dir = /var/cache/tacker +#cafile = /opt/stack/data/ca-bundle.pem +#project_domain_id = default +project_name = service +#user_domain_id = default +password = console +username = tacker +auth_url = http://{{ internal_vip.ip }}:35357 +auth_plugin = password +identity_uri = http://{{ internal_vip.ip }}:5000 +auth_uri = http://{{ internal_vip.ip }}:5000 + + +[database] +# This line MUST be changed to actually run the plugin. +# Example: +# connection = mysql://root:pass@127.0.0.1:3306/tacker +connection = mysql://tacker:TACKER_DBPASS@{{ internal_vip.ip }}:3306/tacker?charset=utf8 +# Replace 127.0.0.1 above with the IP address of the database used by the +# main tacker server. (Leave it as is if the database runs on this host.) +# connection = sqlite:// +# NOTE: In deployment the [database] section and its connection attribute may +# be set in the corresponding core plugin '.ini' file. However, it is suggested +# to put the [database] section and its connection attribute in this +# configuration file. + +# Database engine for which script will be generated when using offline +# migration +# engine = + +# The SQLAlchemy connection string used to connect to the slave database +# slave_connection = + +# Database reconnection retry times - in event connectivity is lost +# set to -1 implies an infinite retry count +# max_retries = 10 + +# Database reconnection interval in seconds - if the initial connection to the +# database fails +# retry_interval = 10 + +# Minimum number of SQL connections to keep open in a pool +# min_pool_size = 1 + +# Maximum number of SQL connections to keep open in a pool +# max_pool_size = 10 + +# Timeout in seconds before idle sql connections are reaped +# idle_timeout = 3600 + +# If set, use this value for max_overflow with sqlalchemy +# max_overflow = 20 + +# Verbosity of SQL debugging information. 0=None, 100=Everything +# connection_debug = 0 + +# Add python stack traces to SQL as comment strings +# connection_trace = False + +# If set, use this value for pool_timeout with sqlalchemy +# pool_timeout = 10 + +[tacker] +# Specify drivers for hosting device +# infra_driver = heat,nova,noop + +# Specify drivers for mgmt +# mgmt_driver = noop,openwrt + +# Specify drivers for monitoring +# monitor_driver = ping, http_ping + +[nfvo_vim] +# Supported VIM drivers, resource orchestration controllers such as OpenStack, kvm +#Default VIM driver is OpenStack +#vim_drivers = openstack +#Default VIM placement if vim id is not provided +default_vim = VIM0 + +[vim_keys] +#openstack = /etc/tacker/vim/fernet_keys +[tacker_nova] +# parameters for novaclient to talk to nova +region_name = RegionOne +#project_domain_id = default +project_name = service +#user_domain_id = default +password = console +username = nova +auth_url = http://{{ internal_vip.ip }}:35357 +auth_plugin = password + +[tacker_heat] +heat_uri = http://{{ internal_vip.ip }}:8004/v1 +stack_retries = 60 +stack_retry_wait = 5 diff --git a/deploy/adapters/ansible/openstack_newton_xenial/templates/neutron.conf b/deploy/adapters/ansible/openstack_newton_xenial/templates/neutron.conf new file mode 100644 index 00000000..33231ed5 --- /dev/null +++ b/deploy/adapters/ansible/openstack_newton_xenial/templates/neutron.conf @@ -0,0 +1,486 @@ +[DEFAULT] +# Print more verbose output (set logging level to INFO instead of default WARNING level). +verbose = {{ VERBOSE }} + +# Print debugging output (set logging level to DEBUG instead of default WARNING level). +debug = {{ VERBOSE }} + +# Where to store Neutron state files. This directory must be writable by the +# user executing the agent. +state_path = /var/lib/neutron + +# Where to store lock files +lock_path = $state_path/lock + +notify_nova_on_port_status_changes = True +notify_nova_on_port_data_changes = True + +# log_format = %(asctime)s %(levelname)8s [%(name)s] %(message)s +# log_date_format = %Y-%m-%d %H:%M:%S + +# use_syslog -> syslog +# log_file and log_dir -> log_dir/log_file +# (not log_file) and log_dir -> log_dir/{binary_name}.log +# use_stderr -> stderr +# (not user_stderr) and (not log_file) -> stdout +# publish_errors -> notification system + +# use_syslog = False +# syslog_log_facility = LOG_USER + +# use_stderr = True +# log_file = +log_dir = /var/log/neutron + +# publish_errors = False + +# Address to bind the API server to +bind_host = {{ network_server_host }} + +# Port the bind the API server to +bind_port = 9696 + +# Path to the extensions. Note that this can be a colon-separated list of +# paths. For example: +# api_extensions_path = extensions:/path/to/more/extensions:/even/more/extensions +# The __path__ of neutron.extensions is appended to this, so if your +# extensions are in there you don't need to specify them here +# api_extensions_path = + +# (StrOpt) Neutron core plugin entrypoint to be loaded from the +# neutron.core_plugins namespace. See setup.cfg for the entrypoint names of the +# plugins included in the neutron source distribution. For compatibility with +# previous versions, the class name of a plugin can be specified instead of its +# entrypoint name. +# +#core_plugin = neutron.plugins.ml2.plugin.Ml2Plugin +core_plugin = ml2 +# Example: core_plugin = ml2 + +# (ListOpt) List of service plugin entrypoints to be loaded from the +# neutron.service_plugins namespace. See setup.cfg for the entrypoint names of +# the plugins included in the neutron source distribution. For compatibility +# with previous versions, the class name of a plugin can be specified instead +# of its entrypoint name. +# +# service_plugins = +# Example: service_plugins = router,firewall,lbaas,vpnaas,metering +service_plugins = router + +# Paste configuration file +api_paste_config = api-paste.ini + +# The strategy to be used for auth. +# Supported values are 'keystone'(default), 'noauth'. +auth_strategy = keystone + +# Base MAC address. The first 3 octets will remain unchanged. If the +# 4h octet is not 00, it will also be used. The others will be +# randomly generated. +# 3 octet +# base_mac = fa:16:3e:00:00:00 +# 4 octet +# base_mac = fa:16:3e:4f:00:00 + +# Maximum amount of retries to generate a unique MAC address +# mac_generation_retries = 16 + +# DHCP Lease duration (in seconds) +dhcp_lease_duration = 86400 + +# Allow sending resource operation notification to DHCP agent +# dhcp_agent_notification = True + +# Enable or disable bulk create/update/delete operations +# allow_bulk = True +# Enable or disable pagination +# allow_pagination = False +# Enable or disable sorting +# allow_sorting = False +# Enable or disable overlapping IPs for subnets +# Attention: the following parameter MUST be set to False if Neutron is +# being used in conjunction with nova security groups +allow_overlapping_ips = True +# Ensure that configured gateway is on subnet +# force_gateway_on_subnet = False + + +# RPC configuration options. Defined in rpc __init__ +# The messaging module to use, defaults to kombu. +# rpc_backend = neutron.openstack.common.rpc.impl_kombu +rpc_backend = rabbit +rabbit_host = {{ rabbit_host }} +rabbit_password = {{ RABBIT_PASS }} + +# Size of RPC thread pool +rpc_thread_pool_size = 240 +# Size of RPC connection pool +rpc_conn_pool_size = 100 +# Seconds to wait for a response from call or multicall +rpc_response_timeout = 300 +# Seconds to wait before a cast expires (TTL). Only supported by impl_zmq. +rpc_cast_timeout = 300 +# Modules of exceptions that are permitted to be recreated +# upon receiving exception data from an rpc call. +# allowed_rpc_exception_modules = neutron.openstack.common.exception, nova.exception +# AMQP exchange to connect to if using RabbitMQ or QPID +# control_exchange = neutron + +# If passed, use a fake RabbitMQ provider +# fake_rabbit = False + +# Configuration options if sending notifications via kombu rpc (these are +# the defaults) +# SSL version to use (valid only if SSL enabled) +# kombu_ssl_version = +# SSL key file (valid only if SSL enabled) +# kombu_ssl_keyfile = +# SSL cert file (valid only if SSL enabled) +# kombu_ssl_certfile = +# SSL certification authority file (valid only if SSL enabled) +# kombu_ssl_ca_certs = +# Port where RabbitMQ server is running/listening +rabbit_port = 5672 +# RabbitMQ single or HA cluster (host:port pairs i.e: host1:5672, host2:5672) +# rabbit_hosts is defaulted to '$rabbit_host:$rabbit_port' +# rabbit_hosts = localhost:5672 +# User ID used for RabbitMQ connections +rabbit_userid = {{ RABBIT_USER }} +# Location of a virtual RabbitMQ installation. +# rabbit_virtual_host = / +# Maximum retries with trying to connect to RabbitMQ +# (the default of 0 implies an infinite retry count) +# rabbit_max_retries = 0 +# RabbitMQ connection retry interval +# rabbit_retry_interval = 1 +# Use HA queues in RabbitMQ (x-ha-policy: all). You need to +# wipe RabbitMQ database when changing this option. (boolean value) +# rabbit_ha_queues = false +# QPID +# rpc_backend=neutron.openstack.common.rpc.impl_qpid +# Qpid broker hostname +# qpid_hostname = localhost +# Qpid broker port +# qpid_port = 5672 +# Qpid single or HA cluster (host:port pairs i.e: host1:5672, host2:5672) +# qpid_hosts is defaulted to '$qpid_hostname:$qpid_port' +# qpid_hosts = localhost:5672 +# Username for qpid connection +# qpid_username = '' +# Password for qpid connection +# qpid_password = '' +# Space separated list of SASL mechanisms to use for auth +# qpid_sasl_mechanisms = '' +# Seconds between connection keepalive heartbeats +# qpid_heartbeat = 60 +# Transport to use, either 'tcp' or 'ssl' +# qpid_protocol = tcp +# Disable Nagle algorithm +# qpid_tcp_nodelay = True + +# ZMQ +# rpc_backend=neutron.openstack.common.rpc.impl_zmq +# ZeroMQ bind address. Should be a wildcard (*), an ethernet interface, or IP. +# The "host" option should point or resolve to this address. +# rpc_zmq_bind_address = * + +# ============ Notification System Options ===================== + +# Notifications can be sent when network/subnet/port are created, updated or deleted. +# There are three methods of sending notifications: logging (via the +# log_file directive), rpc (via a message queue) and +# noop (no notifications sent, the default) + +# Notification_driver can be defined multiple times +# Do nothing driver +# notification_driver = neutron.openstack.common.notifier.no_op_notifier +# Logging driver +# notification_driver = neutron.openstack.common.notifier.log_notifier +# RPC driver. +notification_driver = neutron.openstack.common.notifier.rpc_notifier + +# default_notification_level is used to form actual topic name(s) or to set logging level +default_notification_level = INFO + +# default_publisher_id is a part of the notification payload +# host = myhost.com +# default_publisher_id = $host + +# Defined in rpc_notifier, can be comma separated values. +# The actual topic names will be %s.%(default_notification_level)s +notification_topics = notifications + +# Default maximum number of items returned in a single response, +# value == infinite and value < 0 means no max limit, and value must +# be greater than 0. If the number of items requested is greater than +# pagination_max_limit, server will just return pagination_max_limit +# of number of items. +# pagination_max_limit = -1 + +# Maximum number of DNS nameservers per subnet +# max_dns_nameservers = 5 + +# Maximum number of host routes per subnet +# max_subnet_host_routes = 20 + +# Maximum number of fixed ips per port +# max_fixed_ips_per_port = 5 + +# =========== items for agent management extension ============= +# Seconds to regard the agent as down; should be at least twice +# report_interval, to be sure the agent is down for good +agent_down_time = 75 +# =========== end of items for agent management extension ===== + +# =========== items for agent scheduler extension ============= +# Driver to use for scheduling network to DHCP agent +network_scheduler_driver = neutron.scheduler.dhcp_agent_scheduler.ChanceScheduler +# Driver to use for scheduling router to a default L3 agent +router_scheduler_driver = neutron.scheduler.l3_agent_scheduler.ChanceScheduler +# Driver to use for scheduling a loadbalancer pool to an lbaas agent +# loadbalancer_pool_scheduler_driver = neutron.services.loadbalancer.agent_scheduler.ChanceScheduler + +# Allow auto scheduling networks to DHCP agent. It will schedule non-hosted +# networks to first DHCP agent which sends get_active_networks message to +# neutron server +# network_auto_schedule = True + +# Allow auto scheduling routers to L3 agent. It will schedule non-hosted +# routers to first L3 agent which sends sync_routers message to neutron server +# router_auto_schedule = True + +# Number of DHCP agents scheduled to host a network. This enables redundant +# DHCP agents for configured networks. +# dhcp_agents_per_network = 1 + +# =========== end of items for agent scheduler extension ===== + +# =========== WSGI parameters related to the API server ============== +# Number of separate worker processes to spawn. The default, 0, runs the +# worker thread in the current process. Greater than 0 launches that number of +# child processes as workers. The parent process manages them. +api_workers = 8 + +# Number of separate RPC worker processes to spawn. The default, 0, runs the +# worker thread in the current process. Greater than 0 launches that number of +# child processes as RPC workers. The parent process manages them. +# This feature is experimental until issues are addressed and testing has been +# enabled for various plugins for compatibility. +rpc_workers = 8 + +# Sets the value of TCP_KEEPIDLE in seconds to use for each server socket when +# starting API server. Not supported on OS X. +# tcp_keepidle = 600 + +# Number of seconds to keep retrying to listen +# retry_until_window = 30 + +# Number of backlog requests to configure the socket with. +# backlog = 4096 + +# Max header line to accommodate large tokens +# max_header_line = 16384 + +# Enable SSL on the API server +# use_ssl = False + +# Certificate file to use when starting API server securely +# ssl_cert_file = /path/to/certfile + +# Private key file to use when starting API server securely +# ssl_key_file = /path/to/keyfile + +# CA certificate file to use when starting API server securely to +# verify connecting clients. This is an optional parameter only required if +# API clients need to authenticate to the API server using SSL certificates +# signed by a trusted CA +# ssl_ca_file = /path/to/cafile +# ======== end of WSGI parameters related to the API server ========== + + +# ======== neutron nova interactions ========== +# Send notification to nova when port status is active. +notify_nova_on_port_status_changes = True + +# Send notifications to nova when port data (fixed_ips/floatingips) change +# so nova can update it's cache. +notify_nova_on_port_data_changes = True + +# URL for connection to nova (Only supports one nova region currently). +nova_url = http://{{ internal_vip.ip }}:8774/v2 + +# Name of nova region to use. Useful if keystone manages more than one region +nova_region_name = RegionOne + +# Username for connection to nova in admin context +nova_admin_username = nova + +# The uuid of the admin nova tenant +{% if NOVA_ADMIN_TENANT_ID|default('') %} +nova_admin_tenant_id = {{ NOVA_ADMIN_TENANT_ID.stdout_lines[0] }} +{% endif %} +# Password for connection to nova in admin context. +nova_admin_password = {{ NOVA_PASS }} + +# Authorization URL for connection to nova in admin context. +nova_admin_auth_url = http://{{ internal_vip.ip }}:35357/v2.0 + +# Number of seconds between sending events to nova if there are any events to send +send_events_interval = 2 + +# ======== end of neutron nova interactions ========== + +[quotas] +# Default driver to use for quota checks +quota_driver = neutron.db.quota_db.DbQuotaDriver + +# Resource name(s) that are supported in quota features +quota_items = network,subnet,port + +# Default number of resource allowed per tenant. A negative value means +# unlimited. +default_quota = -1 + +# Number of networks allowed per tenant. A negative value means unlimited. +quota_network = 100 + +# Number of subnets allowed per tenant. A negative value means unlimited. +quota_subnet = 100 + +# Number of ports allowed per tenant. A negative value means unlimited. +quota_port = 8000 + +# Number of security groups allowed per tenant. A negative value means +# unlimited. +quota_security_group = 1000 + +# Number of security group rules allowed per tenant. A negative value means +# unlimited. +quota_security_group_rule = 1000 + +# Number of vips allowed per tenant. A negative value means unlimited. +# quota_vip = 10 + +# Number of pools allowed per tenant. A negative value means unlimited. +# quota_pool = 10 + +# Number of pool members allowed per tenant. A negative value means unlimited. +# The default is unlimited because a member is not a real resource consumer +# on Openstack. However, on back-end, a member is a resource consumer +# and that is the reason why quota is possible. +# quota_member = -1 + +# Number of health monitors allowed per tenant. A negative value means +# unlimited. +# The default is unlimited because a health monitor is not a real resource +# consumer on Openstack. However, on back-end, a member is a resource consumer +# and that is the reason why quota is possible. +# quota_health_monitors = -1 + +# Number of routers allowed per tenant. A negative value means unlimited. +# quota_router = 10 + +# Number of floating IPs allowed per tenant. A negative value means unlimited. +# quota_floatingip = 50 + +[agent] +# Use "sudo neutron-rootwrap /etc/neutron/rootwrap.conf" to use the real +# root filter facility. +# Change to "sudo" to skip the filtering and just run the comand directly +root_helper = "sudo /usr/bin/neutron-rootwrap /etc/neutron/rootwrap.conf" + +# =========== items for agent management extension ============= +# seconds between nodes reporting state to server; should be less than +# agent_down_time, best if it is half or less than agent_down_time +report_interval = 30 + +# =========== end of items for agent management extension ===== + +[keystone_authtoken] +auth_uri = http://{{ internal_vip.ip }}:5000/v2.0 +identity_uri = http://{{ internal_vip.ip }}:35357 +admin_tenant_name = service +admin_user = neutron +admin_password = {{ NEUTRON_PASS }} +signing_dir = $state_path/keystone-signing + +[database] +# This line MUST be changed to actually run the plugin. +# Example: +# connection = mysql://root:pass@127.0.0.1:3306/neutron +# Replace 127.0.0.1 above with the IP address of the database used by the +# main neutron server. (Leave it as is if the database runs on this host.) +# connection = sqlite:////var/lib/neutron/neutron.sqlite +connection = mysql://neutron:{{ NEUTRON_DBPASS }}@{{ db_host }}/neutron + +# The SQLAlchemy connection string used to connect to the slave database +slave_connection = + +# Database reconnection retry times - in event connectivity is lost +# set to -1 implies an infinite retry count +max_retries = 10 + +# Database reconnection interval in seconds - if the initial connection to the +# database fails +retry_interval = 10 + +# Minimum number of SQL connections to keep open in a pool +min_pool_size = 1 + +# Maximum number of SQL connections to keep open in a pool +max_pool_size = 100 + +# Timeout in seconds before idle sql connections are reaped +idle_timeout = 30 +use_db_reconnect = True + +# If set, use this value for max_overflow with sqlalchemy +max_overflow = 100 + +# Verbosity of SQL debugging information. 0=None, 100=Everything +connection_debug = 0 + +# Add python stack traces to SQL as comment strings +connection_trace = False + +# If set, use this value for pool_timeout with sqlalchemy +pool_timeout = 10 + +[service_providers] +# Specify service providers (drivers) for advanced services like loadbalancer, VPN, Firewall. +# Must be in form: +# service_provider=<service_type>:<name>:<driver>[:default] +# List of allowed service types includes LOADBALANCER, FIREWALL, VPN +# Combination of <service type> and <name> must be unique; <driver> must also be unique +# This is multiline option, example for default provider: +# service_provider=LOADBALANCER:name:lbaas_plugin_driver_path:default +# example of non-default provider: +# service_provider=FIREWALL:name2:firewall_driver_path +# --- Reference implementations --- +service_provider=FIREWALL:Iptables:neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewllDriver:default +# In order to activate Radware's lbaas driver you need to uncomment the next line. +# If you want to keep the HA Proxy as the default lbaas driver, remove the attribute default from the line below. +# Otherwise comment the HA Proxy line +# service_provider = LOADBALANCER:Radware:neutron.services.loadbalancer.drivers.radware.driver.LoadBalancerDriver:default +# uncomment the following line to make the 'netscaler' LBaaS provider available. +# service_provider=LOADBALANCER:NetScaler:neutron.services.loadbalancer.drivers.netscaler.netscaler_driver.NetScalerPluginDriver +# Uncomment the following line (and comment out the OpenSwan VPN line) to enable Cisco's VPN driver. +# service_provider=VPN:cisco:neutron.services.vpn.service_drivers.cisco_ipsec.CiscoCsrIPsecVPNDriver:default +# Uncomment the line below to use Embrane heleos as Load Balancer service provider. +# service_provider=LOADBALANCER:Embrane:neutron.services.loadbalancer.drivers.embrane.driver.EmbraneLbaas:default + +{% if enable_fwaas %} +[fwaas] +driver = neutron_fwaas.services.firewall.drivers.linux.iptables_fwaas.IptablesFwaasDriver +enabled = True +{% endif %} + +[nova] +auth_url = http://{{ internal_vip.ip }}:35357 +auth_type = password +project_domain_name = default +user_domain_name = default +project_name = service +username = nova +password = {{ NOVA_PASS }} + diff --git a/deploy/adapters/ansible/openstack_newton_xenial/templates/nova.conf b/deploy/adapters/ansible/openstack_newton_xenial/templates/nova.conf new file mode 100644 index 00000000..3a5735cf --- /dev/null +++ b/deploy/adapters/ansible/openstack_newton_xenial/templates/nova.conf @@ -0,0 +1,96 @@ +{% set memcached_servers = [] %} +{% for host in haproxy_hosts.values() %} +{% set _ = memcached_servers.append('%s:11211'% host) %} +{% endfor %} +{% set memcached_servers = memcached_servers|join(',') %} + +[DEFAULT] +dhcpbridge_flagfile=/etc/nova/nova.conf +dhcpbridge=/usr/bin/nova-dhcpbridge +logdir=/var/log/nova +state_path=/var/lib/nova +lock_path=/var/lib/nova/tmp +force_dhcp_release=True +iscsi_helper=tgtadm +libvirt_use_virtio_for_bridges=True +connection_type=libvirt +root_helper=sudo nova-rootwrap /etc/nova/rootwrap.conf +verbose={{ VERBOSE}} +debug={{ DEBUG }} +ec2_private_dns_show_ip=True +api_paste_config=/etc/nova/api-paste.ini +volumes_path=/var/lib/nova/volumes +enabled_apis=osapi_compute,metadata + +default_floating_pool={{ public_net_info.network }} +auth_strategy = keystone + +rpc_backend = rabbit +rabbit_host = {{ rabbit_host }} +rabbit_userid = {{ RABBIT_USER }} +rabbit_password = {{ RABBIT_PASS }} + +osapi_compute_listen={{ internal_ip }} +metadata_listen={{ internal_ip }} + +my_ip = {{ internal_ip }} +vnc_enabled = True +vncserver_listen = {{ internal_ip }} +vncserver_proxyclient_address = {{ internal_ip }} +novncproxy_base_url = http://{{ public_vip.ip }}:6080/vnc_auto.html + +novncproxy_host = {{ internal_ip }} +novncproxy_port = 6080 + +network_api_class = nova.network.neutronv2.api.API +linuxnet_interface_driver = nova.network.linux_net.LinuxOVSInterfaceDriver +firewall_driver = nova.virt.firewall.NoopFirewallDriver +security_group_api = neutron + +instance_usage_audit = True +instance_usage_audit_period = hour +notify_on_state_change = vm_and_task_state +notification_driver = nova.openstack.common.notifier.rpc_notifier +notification_driver = ceilometer.compute.nova_notifier + +memcached_servers = {{ memcached_servers }} + +[database] +# The SQLAlchemy connection string used to connect to the database +connection = mysql://nova:{{ NOVA_DBPASS }}@{{ db_host }}/nova +idle_timeout = 30 +use_db_reconnect = True +pool_timeout = 10 + +[api_database] +connection = mysql://nova:{{ NOVA_DBPASS }}@{{ db_host }}/nova_api +idle_timeout = 30 +use_db_reconnect = True +pool_timeout = 10 + +[keystone_authtoken] +auth_uri = http://{{ internal_vip.ip }}:5000/2.0 +identity_uri = http://{{ internal_vip.ip }}:35357 +admin_tenant_name = service +admin_user = nova +admin_password = {{ NOVA_PASS }} +memcached_servers = {{ memcached_servers }} + +[glance] +host = {{ internal_vip.ip }} + +[neutron] +url = http://{{ internal_vip.ip }}:9696 +auth_strategy = keystone +admin_tenant_name = service +admin_username = neutron +admin_password = {{ NEUTRON_PASS }} +admin_auth_url = http://{{ internal_vip.ip }}:35357/v2.0 +service_metadata_proxy = True +metadata_proxy_shared_secret = {{ METADATA_SECRET }} +auth_type = password +auth_url = http://{{ internal_vip.ip }}:35357 +password = {{ NEUTRON_PASS }} +username = neutron +project_domain_name = default +user_domain_name = default diff --git a/deploy/adapters/ansible/roles/open-contrail/tasks/provision/provision-compute.yml b/deploy/adapters/ansible/roles/open-contrail/tasks/provision/provision-compute.yml index d9258ef5..374c4e13 100755 --- a/deploy/adapters/ansible/roles/open-contrail/tasks/provision/provision-compute.yml +++ b/deploy/adapters/ansible/roles/open-contrail/tasks/provision/provision-compute.yml @@ -221,10 +221,17 @@ ###################### nova plugin workaround ####################### ####################################################################### -- name: "copy nova plugs on compute" - copy: - src: "../../templates/nova_contrail_vif.tar.gz" - dest: "/opt/nova_contrail_vif.tar.gz" +#- name: "copy nova plugs on compute" +# copy: +# src: "../../templates/nova_contrail_vif.tar.gz" +# dest: "/opt/nova_contrail_vif.tar.gz" + +- name: get image http server + shell: awk -F'=' '/compass_server/ {print $2}' /etc/compass.conf + register: http_server + +- name: download nova plugin package + get_url: url="http://{{ http_server.stdout_lines[0] }}/packages/open-contrail/{{ nova_plugin }}" dest=/opt/ - name: "unzip nova plugs" command: su -s /bin/sh -c "tar xzf /opt/nova_contrail_vif.tar.gz -C /opt/" diff --git a/deploy/adapters/ansible/roles/open-contrail/tasks/provision/provision-config.yml b/deploy/adapters/ansible/roles/open-contrail/tasks/provision/provision-config.yml index 32142471..615ac281 100755 --- a/deploy/adapters/ansible/roles/open-contrail/tasks/provision/provision-config.yml +++ b/deploy/adapters/ansible/roles/open-contrail/tasks/provision/provision-config.yml @@ -247,10 +247,17 @@ ############# neutron plugins workaround ################## ########################################################### -- name: "copy neutron plugs on controller" - copy: - src: "../../templates/neutron_plugin_contrail.tar.gz" - dest: "/opt/neutron_plugin_contrail.tar.gz" +#- name: "copy neutron plugs on controller" +# copy: +# src: "../../templates/neutron_plugin_contrail.tar.gz" +# dest: "/opt/neutron_plugin_contrail.tar.gz" + +- name: get image http server + shell: awk -F'=' '/compass_server/ {print $2}' /etc/compass.conf + register: http_server + +- name: download neutron_plugin_contrail package + get_url: url="http://{{ http_server.stdout_lines[0] }}/packages/open-contrail/{{ neutron_plugin }}" dest=/opt/ - name: "unzip neutron plugs" command: su -s /bin/sh -c "tar xzf /opt/neutron_plugin_contrail.tar.gz -C /opt/" diff --git a/deploy/adapters/ansible/roles/open-contrail/templates/neutron_plugin_contrail.tar.gz b/deploy/adapters/ansible/roles/open-contrail/templates/neutron_plugin_contrail.tar.gz Binary files differdeleted file mode 100644 index 08077049..00000000 --- a/deploy/adapters/ansible/roles/open-contrail/templates/neutron_plugin_contrail.tar.gz +++ /dev/null diff --git a/deploy/adapters/ansible/roles/open-contrail/templates/nova_contrail_vif.tar.gz b/deploy/adapters/ansible/roles/open-contrail/templates/nova_contrail_vif.tar.gz Binary files differdeleted file mode 100644 index 78dac037..00000000 --- a/deploy/adapters/ansible/roles/open-contrail/templates/nova_contrail_vif.tar.gz +++ /dev/null diff --git a/deploy/adapters/ansible/roles/open-contrail/vars/main.yml b/deploy/adapters/ansible/roles/open-contrail/vars/main.yml index 6facb475..582e41e8 100755 --- a/deploy/adapters/ansible/roles/open-contrail/vars/main.yml +++ b/deploy/adapters/ansible/roles/open-contrail/vars/main.yml @@ -17,6 +17,9 @@ contrail_keystone_address: "{{ public_vip.ip }}" contrail_admin_user: "admin" contrail_admin_password: "console" +neutron_plugin: neutron_plugin_contrail.tar.gz +nova_plugin: nova_contrail_vif.tar.gz + # network infor adapter for compass # contrail_address: "{{ internal_ip }}" |