diff options
83 files changed, 1709 insertions, 163 deletions
@@ -142,3 +142,4 @@ process_param $* prepare_env download_packages build_tar + diff --git a/build/build.yaml b/build/build.yaml index 4719c2c7..93cb796d 100644 --- a/build/build.yaml +++ b/build/build.yaml @@ -15,10 +15,15 @@ packages: get_method: docker url: opnfv/compass-deck:latest - - name: compass-tasks - description: "compass task container integrated with openstack-ansible and kubespray" + - name: compass-tasks-osa + description: "compass task container integrated with openstack-ansible" get_method: docker - url: opnfv/compass-tasks:latest + url: huangxiangyu/compass-tasks-osa:latest + + - name: compass-tasks-k8s + description: "compass task container integrated with kubespray" + get_method: docker + url: huangxiangyu/compass-tasks-k8s:latest - name: compass-cobbler description: "cobbler container for compass" diff --git a/deploy/adapters/ansible/kubernetes/ansible-kubernetes.yml b/deploy/adapters/ansible/kubernetes/ansible-kubernetes.yml index eb80066e..bfdc8958 100755 --- a/deploy/adapters/ansible/kubernetes/ansible-kubernetes.yml +++ b/deploy/adapters/ansible/kubernetes/ansible-kubernetes.yml @@ -25,6 +25,12 @@ roles: - install-k8s-dependence +- hosts: ha + remote_user: root + max_fail_percentage: 0 + roles: + - ha + - hosts: localhost remote_user: root max_fail_percentage: 0 diff --git a/deploy/adapters/ansible/kubernetes/roles/ha/files/chk_k8s_master.sh b/deploy/adapters/ansible/kubernetes/roles/ha/files/chk_k8s_master.sh new file mode 100644 index 00000000..62e79b3b --- /dev/null +++ b/deploy/adapters/ansible/kubernetes/roles/ha/files/chk_k8s_master.sh @@ -0,0 +1,9 @@ +#!/bin/bash + +count=`ss -tnl | grep 6443 | wc -l` + +if [ $count = 0 ]; then + exit 1 +else + exit 0 +fi diff --git a/deploy/adapters/ansible/kubernetes/roles/ha/handlers/main.yml b/deploy/adapters/ansible/kubernetes/roles/ha/handlers/main.yml new file mode 100644 index 00000000..03ed82ec --- /dev/null +++ b/deploy/adapters/ansible/kubernetes/roles/ha/handlers/main.yml @@ -0,0 +1,14 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +- name: restart haproxy + service: name=haproxy state=restarted enabled=yes + +- name: restart keepalived + service: name=keepalived state=restarted enabled=yes diff --git a/deploy/adapters/ansible/kubernetes/roles/ha/tasks/main.yml b/deploy/adapters/ansible/kubernetes/roles/ha/tasks/main.yml new file mode 100644 index 00000000..c7e58376 --- /dev/null +++ b/deploy/adapters/ansible/kubernetes/roles/ha/tasks/main.yml @@ -0,0 +1,83 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +- include_vars: "{{ ansible_os_family }}.yml" + +- name: install keepalived haproxy + action: "{{ ansible_pkg_mgr }} name={{ item }} state=present" + with_items: "{{ packages | union(packages_noarch) }}" + +- name: generate ha service list + lineinfile: dest=/opt/service create=yes line= '{{ item }}' + with_items: "{{ services | union(services_noarch) }}" + +- name: install pexpect + pip: name=pexpect state=present extra_args='--pre' + +- name: activate ip_nonlocal_bind + sysctl: name=net.ipv4.ip_nonlocal_bind value=1 + state=present reload=yes + +- name: set net.ipv4.tcp_keepalive_intvl + sysctl: name=net.ipv4.tcp_keepalive_intvl value=1 + state=present reload=yes + +- name: set net.ipv4.tcp_keepalive_probes + sysctl: name=net.ipv4.tcp_keepalive_probes value=5 + state=present reload=yes + +- name: set net.ipv4.tcp_keepalive_time + sysctl: name=net.ipv4.tcp_keepalive_time value=5 + state=present reload=yes + +- name: update haproxy cfg + template: src=haproxy.cfg dest=/etc/haproxy/haproxy.cfg + notify: restart haproxy + +- name: set haproxy enable flag + lineinfile: dest=/etc/default/haproxy state=present + regexp="ENABLED=*" + line="ENABLED=1" + notify: restart haproxy + when: ansible_os_family == "Debian" + +- name: set haproxy log + lineinfile: dest=/etc/rsyslog.conf state=present + regexp="local0.* /var/log/haproxy.log" + line="local0.* /var/log/haproxy.log" + +- name: set rsyslog udp module + lineinfile: dest=/etc/rsyslog.conf state=present + regexp="^#$ModLoad imudp" + line="$ModLoad imudp" + +- name: set rsyslog udp port + lineinfile: dest=/etc/rsyslog.conf state=present + regexp="^#$UDPServerRun 514" + line="$UDPServerRun 514" + +- name: set keepalived start param + lineinfile: dest=/etc/default/keepalived state=present + regexp="^DAEMON_ARGS=*" + line="DAEMON_ARGS=\"-D -d -S 1\"" + when: ansible_os_family == "Debian" + +- name: set keepalived log + lineinfile: dest=/etc/rsyslog.conf state=present + regexp="local1.* /var/log/keepalived.log" + line="local1.* /var/log/keepalived.log" + +- name: update keepalived info + template: src=keepalived.conf dest=/etc/keepalived/keepalived.conf + notify: restart keepalived + +- name: restart rsyslog + shell: service rsyslog restart + +- meta: flush_handlers diff --git a/deploy/adapters/ansible/kubernetes/roles/ha/templates/haproxy.cfg b/deploy/adapters/ansible/kubernetes/roles/ha/templates/haproxy.cfg new file mode 100644 index 00000000..5cd240c0 --- /dev/null +++ b/deploy/adapters/ansible/kubernetes/roles/ha/templates/haproxy.cfg @@ -0,0 +1,48 @@ + +global + #chroot /var/run/haproxy + daemon + user haproxy + group haproxy + maxconn 4000 + pidfile /var/run/haproxy/haproxy.pid + #log 127.0.0.1 local0 + tune.bufsize 1000000 + stats socket /var/run/haproxy.sock + stats timeout 2m + +defaults + log global + maxconn 8000 + option redispatch + option dontlognull + option splice-auto + timeout http-request 10s + timeout queue 1m + timeout connect 10s + timeout client 50s + timeout server 50s + timeout check 10s + retries 3 + +listen kubernetes-apiserver-https + bind {{ public_vip.ip }}:8383 + option ssl-hello-chk + mode tcp + option tcpka + option tcplog + timeout client 3h + timeout server 3h + balance roundrobin +{% for host,ip in haproxy_hosts.items() %} + server {{ host }} {{ ip }}:6443 weight 1 check inter 2000 rise 2 fall 5 +{% endfor %} + +listen stats + mode http + bind 0.0.0.0:9999 + stats enable + stats refresh 30s + stats uri / + stats realm Global\ statistics + stats auth admin:admin diff --git a/deploy/adapters/ansible/kubernetes/roles/ha/templates/keepalived.conf b/deploy/adapters/ansible/kubernetes/roles/ha/templates/keepalived.conf new file mode 100644 index 00000000..c649bed5 --- /dev/null +++ b/deploy/adapters/ansible/kubernetes/roles/ha/templates/keepalived.conf @@ -0,0 +1,49 @@ +global_defs { + router_id {{ inventory_hostname }} +} + +vrrp_sync_group VG1 { + group { + internal_vip + public_vip + } +} + +vrrp_instance internal_vip { + interface {{ sys_intf_mappings.mgmt.interface }} + virtual_router_id {{ vrouter_id_internal }} + state BACKUP + nopreempt + advert_int 1 + priority {{ 50 + (host_index[inventory_hostname] * 50) }} + + authentication { + auth_type PASS + auth_pass 1234 + } + + + virtual_ipaddress { + {{ internal_vip.ip }}/{{ internal_vip.netmask }} dev {{ sys_intf_mappings.mgmt.interface }} + } +} + +vrrp_instance public_vip { + interface {{ sys_intf_mappings.external.interface }} + virtual_router_id {{ vrouter_id_public }} + state BACKUP + nopreempt + advert_int 1 + priority {{ 50 + (host_index[inventory_hostname] * 50) }} + + authentication { + auth_type PASS + auth_pass 4321 + } + + virtual_ipaddress { + {{ network_cfg.public_vip.ip }}/{{ network_cfg.public_vip.netmask }} dev {{ sys_intf_mappings.external.interface }} + } + +} + diff --git a/deploy/adapters/ansible/kubernetes/roles/ha/vars/Debian.yml b/deploy/adapters/ansible/kubernetes/roles/ha/vars/Debian.yml new file mode 100644 index 00000000..b9f46bdf --- /dev/null +++ b/deploy/adapters/ansible/kubernetes/roles/ha/vars/Debian.yml @@ -0,0 +1,11 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +services: [] +packages: [] diff --git a/deploy/adapters/ansible/kubernetes/roles/ha/vars/RedHat.yml b/deploy/adapters/ansible/kubernetes/roles/ha/vars/RedHat.yml new file mode 100644 index 00000000..b9f46bdf --- /dev/null +++ b/deploy/adapters/ansible/kubernetes/roles/ha/vars/RedHat.yml @@ -0,0 +1,11 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +services: [] +packages: [] diff --git a/deploy/adapters/ansible/kubernetes/roles/ha/vars/main.yml b/deploy/adapters/ansible/kubernetes/roles/ha/vars/main.yml new file mode 100644 index 00000000..77735d1e --- /dev/null +++ b/deploy/adapters/ansible/kubernetes/roles/ha/vars/main.yml @@ -0,0 +1,16 @@ +############################################################################## +# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +packages_noarch: + - keepalived + - haproxy + +services_noarch: + - keepalived + - haproxy diff --git a/deploy/adapters/ansible/kubernetes/roles/install-k8s-dependence/tasks/main.yml b/deploy/adapters/ansible/kubernetes/roles/install-k8s-dependence/tasks/main.yml index 6487e4ef..e683a3fe 100644 --- a/deploy/adapters/ansible/kubernetes/roles/install-k8s-dependence/tasks/main.yml +++ b/deploy/adapters/ansible/kubernetes/roles/install-k8s-dependence/tasks/main.yml @@ -9,6 +9,10 @@ --- - include_vars: "{{ ansible_os_family }}.yml" +- name: Install yum epel-release + command: yum -y install epel-release + when: ansible_os_family == 'RedHat' and ansible_distribution_major_version == '7' + - name: Install yum packages yum: pkg: "{{ item }}" diff --git a/deploy/adapters/ansible/kubernetes/roles/install-k8s-dependence/vars/Debian.yml b/deploy/adapters/ansible/kubernetes/roles/install-k8s-dependence/vars/Debian.yml index e016b855..8ced18b4 100644 --- a/deploy/adapters/ansible/kubernetes/roles/install-k8s-dependence/vars/Debian.yml +++ b/deploy/adapters/ansible/kubernetes/roles/install-k8s-dependence/vars/Debian.yml @@ -2,6 +2,7 @@ packages: - ubuntu-cloud-keyring - python-dev + - python-pip - openvswitch-switch - openvswitch-switch-dpdk - python-memcache diff --git a/deploy/adapters/ansible/kubernetes/roles/install-k8s-dependence/vars/RedHat.yml b/deploy/adapters/ansible/kubernetes/roles/install-k8s-dependence/vars/RedHat.yml index 3ec18e7f..b7e1d3dc 100644 --- a/deploy/adapters/ansible/kubernetes/roles/install-k8s-dependence/vars/RedHat.yml +++ b/deploy/adapters/ansible/kubernetes/roles/install-k8s-dependence/vars/RedHat.yml @@ -1,6 +1,7 @@ --- packages: - python-devel + - python-pip - gcc - redhat-lsb-core - python-crypto diff --git a/deploy/adapters/ansible/kubernetes/roles/install-k8s-dependence/vars/main.yml b/deploy/adapters/ansible/kubernetes/roles/install-k8s-dependence/vars/main.yml index 713b6b5f..7158325a 100644 --- a/deploy/adapters/ansible/kubernetes/roles/install-k8s-dependence/vars/main.yml +++ b/deploy/adapters/ansible/kubernetes/roles/install-k8s-dependence/vars/main.yml @@ -8,7 +8,6 @@ ############################################################################## --- packages_noarch: - - python-pip - ntp services_noarch: [] diff --git a/deploy/adapters/ansible/kubernetes/roles/kargo/files/openssl.conf.j2 b/deploy/adapters/ansible/kubernetes/roles/kargo/files/openssl.conf.j2 new file mode 100644 index 00000000..d998d4cb --- /dev/null +++ b/deploy/adapters/ansible/kubernetes/roles/kargo/files/openssl.conf.j2 @@ -0,0 +1,34 @@ +[req] +req_extensions = v3_req +distinguished_name = req_distinguished_name +[req_distinguished_name] +[ v3_req ] +basicConstraints = CA:FALSE +keyUsage = nonRepudiation, digitalSignature, keyEncipherment +subjectAltName = @alt_names +[alt_names] +DNS.1 = kubernetes +DNS.2 = kubernetes.default +DNS.3 = kubernetes.default.svc +DNS.4 = kubernetes.default.svc.{{ dns_domain }} +DNS.5 = localhost +{% for host in groups['kube-master'] %} +DNS.{{ 5 + loop.index }} = {{ host }} +{% endfor %} +{% if loadbalancer_apiserver is defined and apiserver_loadbalancer_domain_name is defined %} +{% set idx = groups['kube-master'] | length | int + 5 + 1 %} +DNS.{{ idx | string }} = {{ apiserver_loadbalancer_domain_name }} +{% endif %} +{% for host in groups['kube-master'] %} +IP.{{ 2 * loop.index - 1 }} = {{ hostvars[host]['access_ip'] | default(hostvars[host]['ansible_default_ipv4']['address']) }} +IP.{{ 2 * loop.index }} = {{ hostvars[host]['ip'] | default(hostvars[host]['ansible_default_ipv4']['address']) }} +{% endfor %} +{% set idx = groups['kube-master'] | length | int * 2 + 1 %} +IP.{{ idx }} = {{ kube_apiserver_ip }} +IP.{{ idx + 1 }} = 127.0.0.1 +{% if supplementary_addresses_in_ssl_keys is defined %} +{% set is = idx + 1 %} +{% for addr in supplementary_addresses_in_ssl_keys %} +IP.{{ is + loop.index }} = {{ addr }} +{% endfor %} +{% endif %} diff --git a/deploy/adapters/ansible/kubernetes/roles/kargo/tasks/main.yml b/deploy/adapters/ansible/kubernetes/roles/kargo/tasks/main.yml index 2763e53e..af52ad04 100644 --- a/deploy/adapters/ansible/kubernetes/roles/kargo/tasks/main.yml +++ b/deploy/adapters/ansible/kubernetes/roles/kargo/tasks/main.yml @@ -67,7 +67,7 @@ - name: copy inventoriy.json file copy: - src: /var/ansible/run/kubernetes-opnfv2/inventories/inventory.json + src: "{{ run_dir }}/inventories/inventory.json" dest: /tmp/inventory.json tags: - ansible @@ -96,6 +96,51 @@ regexp: '^helm_enabled:' line: 'helm_enabled: {{ helm_flag }}' +- name: enable external lb | set lb domain_nam + lineinfile: + dest: /opt/kargo_k8s/inventory/group_vars/all.yml + regexp: '^## apiserver_loadbalancer_domain_name:' + line: 'apiserver_loadbalancer_domain_name: {{ apiserver_loadbalancer_domain_name }}' + +- name: enable external lb | + lineinfile: + dest: /opt/kargo_k8s/inventory/group_vars/all.yml + regexp: '^#loadbalancer_apiserver:' + line: 'loadbalancer_apiserver:' + +- name: enable external lb | set vip address + lineinfile: + dest: /opt/kargo_k8s/inventory/group_vars/all.yml + regexp: '^# address: 1.2.3.4' + line: ' address: {{ vipaddress }}' + +- name: enable external lb | set vip port + lineinfile: + dest: /opt/kargo_k8s/inventory/group_vars/all.yml + regexp: '^# port: 1234' + line: ' port: {{ exlb_port }}' + +- name: enable internal lb + lineinfile: + dest: /opt/kargo_k8s/inventory/group_vars/all.yml + regexp: '^#loadbalancer_apiserver_localhost: true' + line: 'loadbalancer_apiserver_localhost: true' + +- name: add vip to ssl keys + lineinfile: + dest: /opt/kargo_k8s/inventory/group_vars/k8s-cluster.yml + line: 'supplementary_addresses_in_ssl_keys: [{{ vipaddress }}]' + +- name: rm openssl file + file: + path: /opt/kargo_k8s/roles/kubernetes/secrets/templates/openssl.conf.j2 + state: absent + +- name: copy openssl.conf.j2 + copy: + src: openssl.conf.j2 + dest: /opt/kargo_k8s/roles/kubernetes/secrets/templates/openssl.conf.j2 + - name: copy overrided variables copy: src: "{{ item }}" diff --git a/deploy/adapters/ansible/kubernetes/roles/kargo/vars/main.yml b/deploy/adapters/ansible/kubernetes/roles/kargo/vars/main.yml index 2d396d06..b73056e5 100644 --- a/deploy/adapters/ansible/kubernetes/roles/kargo/vars/main.yml +++ b/deploy/adapters/ansible/kubernetes/roles/kargo/vars/main.yml @@ -1,2 +1,5 @@ --- helm_flag: true +apiserver_loadbalancer_domain_name: "{{ public_vip.ip }}" +vipaddress: "{{ public_vip.ip }}" +exlb_port: 8383 diff --git a/deploy/adapters/ansible/openstack/HA-ansible-multinodes.yml b/deploy/adapters/ansible/openstack/HA-ansible-multinodes.yml index f923d0d3..6ea57c04 100644 --- a/deploy/adapters/ansible/openstack/HA-ansible-multinodes.yml +++ b/deploy/adapters/ansible/openstack/HA-ansible-multinodes.yml @@ -45,6 +45,8 @@ - compute - utility - neutron_server + - ceilometer_all + - horizon_all remote_user: root roles: - post-osa @@ -56,6 +58,13 @@ - collectd - hosts: + - controller + remote_user: root + roles: + - influxdb + - grafana + +- hosts: - neutron_openvswitch_agent - compute remote_user: root diff --git a/deploy/adapters/ansible/roles/config-osa/files/collect-log.sh b/deploy/adapters/ansible/roles/config-osa/files/collect-log.sh new file mode 100755 index 00000000..7f51a2c3 --- /dev/null +++ b/deploy/adapters/ansible/roles/config-osa/files/collect-log.sh @@ -0,0 +1,17 @@ +#!/bin/bash +############################################################################## +# Copyright (c) 2018 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## + +set -o errexit +set -o nounset +set -o pipefail + +SCRIPT_PATH="$(dirname $(realpath ${BASH_SOURCE[0]}))" + +openstack-ansible $SCRIPT_PATH/collect-log.yml diff --git a/deploy/adapters/ansible/roles/config-osa/files/collect-log.yml b/deploy/adapters/ansible/roles/config-osa/files/collect-log.yml new file mode 100644 index 00000000..6e6f9e5a --- /dev/null +++ b/deploy/adapters/ansible/roles/config-osa/files/collect-log.yml @@ -0,0 +1,32 @@ +--- +############################################################################## +# Copyright (c) 2018 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## + +- name: Collect log + hosts: rsyslog[0] + user: root + tasks: + - name: ensure log directory exist + stat: + path: /var/log/log-storage + register: log_dir + + - name: compress log directory + archive: + path: "{{ log_dir.stat.path }}" + dest: "{{ log_dir.stat.path }}/log.tar.gz" + format: gz + when: log_dir.stat.exists is defined and log_dir.stat.exists + + - name: fetch the log tarball + fetch: + src: "{{ log_dir.stat.path }}/log.tar.gz" + dest: /opt/log.tar.gz + flat: "yes" + when: log_dir.stat.exists is defined and log_dir.stat.exists diff --git a/deploy/adapters/ansible/roles/config-osa/files/haproxy.yml b/deploy/adapters/ansible/roles/config-osa/files/haproxy.yml index 3085f6aa..a0a09e49 100644 --- a/deploy/adapters/ansible/roles/config-osa/files/haproxy.yml +++ b/deploy/adapters/ansible/roles/config-osa/files/haproxy.yml @@ -261,3 +261,14 @@ haproxy_default_services: haproxy_backend_options: - "httpchk GET /" haproxy_whitelist_networks: "{{ haproxy_octavia_whitelist_networks }}" + + - service: + haproxy_service_name: tacker + haproxy_backend_nodes: "{{ groups['tacker_all'] | default([]) }}" + haproxy_ssl: "{{ haproxy_ssl }}" + haproxy_port: 9890 + haproxy_balance_type: http + haproxy_backend_options: + - "forwardfor" + - "httpchk" + - "httplog" diff --git a/deploy/adapters/ansible/roles/config-osa/files/polling.yaml b/deploy/adapters/ansible/roles/config-osa/files/polling.yaml new file mode 100644 index 00000000..631e3687 --- /dev/null +++ b/deploy/adapters/ansible/roles/config-osa/files/polling.yaml @@ -0,0 +1,6 @@ +--- +sources: + - name: all_pollsters + interval: 300 + meters: + - "*" diff --git a/deploy/adapters/ansible/roles/config-osa/files/user_ceph.yml b/deploy/adapters/ansible/roles/config-osa/files/user_ceph.yml index 9d5f13a9..6daa1436 100644 --- a/deploy/adapters/ansible/roles/config-osa/files/user_ceph.yml +++ b/deploy/adapters/ansible/roles/config-osa/files/user_ceph.yml @@ -14,3 +14,13 @@ cinder_backends: rbd_user: cinder rbd_secret_uuid: "{{ cinder_ceph_client_uuid }}" report_discard_supported: true + +gnocchi_storage_driver: ceph +gnocchi_ceph_pool: "metrics" +ceph_extra_components: + - component: gnocchi_api + package: + - "{{ python_ceph_package }}" + client: + - '{{ gnocchi_ceph_client }}' + service: '{{ ceph_gnocchi_service_names }}' diff --git a/deploy/adapters/ansible/roles/config-osa/tasks/main.yml b/deploy/adapters/ansible/roles/config-osa/tasks/main.yml index f9eef749..8bb56656 100755 --- a/deploy/adapters/ansible/roles/config-osa/tasks/main.yml +++ b/deploy/adapters/ansible/roles/config-osa/tasks/main.yml @@ -332,6 +332,16 @@ when: - "{{ hostvars[inventory_hostname]['groups']['controller'] | length < 2 }}" +- name: copy collect-log.sh to /opt + copy: + src: collect-log.sh + dest: /opt/collect-log.sh + +- name: copy collect-log.yml to /opt + copy: + src: collect-log.yml + dest: /opt/collect-log.yml + # - name: change repore build # lineinfile: # dest: /etc/ansible/roles/repo_build/tasks/main.yml @@ -345,3 +355,9 @@ - include: fix_pip_version.yml - include: fix_rescue.yml + +- name: include tacker in setup-openstack + lineinfile: + dest: /opt/openstack-ansible/playbooks/setup-openstack.yml + insertafter: "^- include: os-trove" + line: "- include: os-tacker-install.yml" diff --git a/deploy/adapters/ansible/roles/config-osa/tasks/meters.yml b/deploy/adapters/ansible/roles/config-osa/tasks/meters.yml index 8f06a884..2b3bce5f 100644 --- a/deploy/adapters/ansible/roles/config-osa/tasks/meters.yml +++ b/deploy/adapters/ansible/roles/config-osa/tasks/meters.yml @@ -69,3 +69,15 @@ [database]{% raw %} connection = mysql+pymysql://{{ ceilometer_galera_user }}:{{ ceilometer_container_db_password }}@{{ceilometer_galera_address }}/{{ ceilometer_galera_database }}?charset=utf86{% endraw %} # yamllint enable rule:line-length + +- name: make sure the directory exist + file: + dest: /etc/openstack_deploy/ceilometer + state: directory + mode: 0755 + +- name: copy the polling.yml + copy: + dest: /etc/openstack_deploy/ceilometer/polling.yaml + src: polling.yaml + mode: 0644 diff --git a/deploy/adapters/ansible/roles/config-osa/templates/user_variables.yml.j2 b/deploy/adapters/ansible/roles/config-osa/templates/user_variables.yml.j2 index 130b5ad1..03e3a2af 100644 --- a/deploy/adapters/ansible/roles/config-osa/templates/user_variables.yml.j2 +++ b/deploy/adapters/ansible/roles/config-osa/templates/user_variables.yml.j2 @@ -45,6 +45,11 @@ neutron_plugin_type: ml2.ovs neutron_ml2_drivers_type: "local,flat,{{ tenant_net_info['type'] }}" +neutron_plugin_base: + - router + - metering + - trunk + neutron_provider_networks: network_flat_networks: "*" network_types: "{{ tenant_net_info['type'] }}" diff --git a/deploy/adapters/ansible/roles/post-osa/handlers/main.yml b/deploy/adapters/ansible/roles/post-osa/handlers/main.yml index 3d979e6a..d685edca 100755 --- a/deploy/adapters/ansible/roles/post-osa/handlers/main.yml +++ b/deploy/adapters/ansible/roles/post-osa/handlers/main.yml @@ -9,3 +9,17 @@ - name: restart network service shell: "/sbin/ifconfig eth0 0 &&/sbin/ifdown -a && \ /sbin/ifup --ignore-errors -a" + +- name: Restart ceilometer services + service: + name: "{{ item.0.service_name }}" + enabled: "yes" + state: "restarted" + with_subelements: + - "{{ ceilometer_services }}" + - group + when: inventory_hostname in groups[item.1] + register: _restart + until: _restart | success + retries: 5 + delay: 2 diff --git a/deploy/adapters/ansible/roles/post-osa/tasks/ceilometer-upgrade.yml b/deploy/adapters/ansible/roles/post-osa/tasks/ceilometer-upgrade.yml new file mode 100755 index 00000000..3d991b74 --- /dev/null +++ b/deploy/adapters/ansible/roles/post-osa/tasks/ceilometer-upgrade.yml @@ -0,0 +1,28 @@ +############################################################################# +# Copyright (c) 2017 HUAWEI TECHNOLOGIES CO.,LTD and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +- name: check for gnocchi resource file + stat: + path: "/etc/ceilometer/gnocchi_resources.yaml" + register: gnocchi_resource_file + +- name: get the path of ceilometer-upgrade + shell: "find / -name ceilometer-upgrade 2>/dev/null || true" + register: ceilometer_upgrade_path + +- name: Initialize Gnocchi database by creating ceilometer resources + command: "{{ ceilometer_upgrade_path.stdout }} --skip-metering-database" + become: "yes" + when: gnocchi_resource_file.stat.exists + register: _upgrade + until: _upgrade | success + retries: 10 + delay: 2 + notify: + - Restart ceilometer services diff --git a/deploy/adapters/ansible/roles/post-osa/tasks/install_networking_sfc.yml b/deploy/adapters/ansible/roles/post-osa/tasks/install_networking_sfc.yml index fbed5815..d5a04e78 100644 --- a/deploy/adapters/ansible/roles/post-osa/tasks/install_networking_sfc.yml +++ b/deploy/adapters/ansible/roles/post-osa/tasks/install_networking_sfc.yml @@ -3,7 +3,7 @@ - name: install networking-sfc pip: name: networking-sfc - virtualenv: /openstack/venvs/neutron-15.1.4 + virtualenv: /openstack/venvs/neutron-{{ os_ver }} when: - inventory_hostname in groups['neutron_server'] @@ -11,14 +11,12 @@ package: name: crudini state: latest - when: - - inventory_hostname in groups['neutron_server'] - name: Install networking-sfc for CLI pip: name: networking-sfc when: - - inventory_hostname in groups['utility'] + - inventory_hostname not in groups['neutron_server'] - name: turn off neutron-server on control node service: name=neutron-server state=stopped @@ -35,13 +33,18 @@ shell: crudini --merge /etc/neutron/neutron.conf < /opt/sfc.conf when: inventory_hostname in groups['neutron_server'] +- name: Configure SFC extension on compute nodes + shell: crudini --set /etc/neutron/plugins/ml2/ml2_conf.ini \ + agent extensions sfc; + when: inventory_hostname in groups['compute'] + - name: delete sfc.conf shell: rm -rf {{ sfc_plugins.dst }} when: inventory_hostname in groups['neutron_server'] - name: Perform a Neutron DB online upgrade command: | - /openstack/venvs/neutron-15.1.4/bin/neutron-db-manage + /openstack/venvs/neutron-{{ os_ver }}/bin/neutron-db-manage --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/ml2/ml2_conf.ini upgrade --expand @@ -51,7 +54,7 @@ - name: Perform a Neutron DB offline upgrade command: | - /openstack/venvs/neutron-15.1.4/bin/neutron-db-manage + /openstack/venvs/neutron-{{ os_ver }}/bin/neutron-db-manage --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/ml2/ml2_conf.ini upgrade --contract @@ -61,7 +64,7 @@ - name: SFC DB upgrade command: | - /openstack/venvs/neutron-15.1.4/bin/neutron-db-manage + /openstack/venvs/neutron-{{ os_ver }}/bin/neutron-db-manage --subproject networking-sfc upgrade head become: "yes" diff --git a/deploy/adapters/ansible/roles/post-osa/tasks/main.yml b/deploy/adapters/ansible/roles/post-osa/tasks/main.yml index bd83b50f..fed3842f 100644 --- a/deploy/adapters/ansible/roles/post-osa/tasks/main.yml +++ b/deploy/adapters/ansible/roles/post-osa/tasks/main.yml @@ -13,13 +13,19 @@ - ansible_distribution == 'Ubuntu' # install networking-sfc for non odl scenarios -# - include: install_networking_sfc.yml -# when: -# - odl_sfc is not defined or odl_sfc == "Disable" -# - inventory_hostname not in groups['compute'] -# when: ansible_distribution == 'Ubuntu' +- include: install_networking_sfc.yml + when: + - opendaylight is not defined or opendaylight == "Disable" + - inventory_hostname not in groups['horizon_all'] - include: "{{ ansible_os_family }}.yml" when: - ansible_os_family == 'RedHat' and ansible_distribution_major_version == '7' - inventory_hostname in groups['compute'] + +- include: ceilometer-upgrade.yml + when: + - inventory_hostname in groups['ceilometer_all'] + +- include: tacker_horizon.yml + when: inventory_hostname in groups['horizon_all'] diff --git a/deploy/adapters/ansible/roles/post-osa/tasks/tacker_horizon.yml b/deploy/adapters/ansible/roles/post-osa/tasks/tacker_horizon.yml new file mode 100644 index 00000000..2c1d2d87 --- /dev/null +++ b/deploy/adapters/ansible/roles/post-osa/tasks/tacker_horizon.yml @@ -0,0 +1,33 @@ +--- + +- name: remove tacker-horizon directory + file: + path: "{{ tacker_horizon_dir }}" + state: absent + +- name: get tacker horizon (online) + git: + repo: "{{ tacker_horizon_repo }}" + dest: "{{ tacker_horizon_dir }}" + version: "{{ tacker_horizon_branch }}" + when: offline_deployment is defined and offline_deployment == "Disable" + +- name: copy installation script (offline) + get_url: + url: "http://{{ offline_repo_ip }}:{{ offline_repo_port }}/tacker-horizon.tar.gz" + dest: "/opt/" + when: offline_deployment is defined and offline_deployment == "Enable" + +- name: untar tacker-horizon tarball + command: su -s /bin/sh -c "tar xzf /opt/tacker-horizon.tar.gz -C /opt/" + when: offline_deployment is defined and offline_deployment == "Enable" + +- name: install tacker-horizon + shell: | + cd /opt/tacker-horizon/; + pip install -r requirements.txt; + sleep 30; + python setup.py install; + cp tacker_horizon/enabled/* \ + {{ tacker_horizon_enable_path }}/; + service apache2 restart diff --git a/deploy/adapters/ansible/roles/post-osa/vars/main.yml b/deploy/adapters/ansible/roles/post-osa/vars/main.yml index da886028..95f51530 100644 --- a/deploy/adapters/ansible/roles/post-osa/vars/main.yml +++ b/deploy/adapters/ansible/roles/post-osa/vars/main.yml @@ -1,5 +1,17 @@ --- +os_ver: 16.0.5 +os_name: pike + +# yamllint disable rule:line-length +tacker_horizon_repo: https://github.com/openstack/tacker-horizon.git +tacker_horizon_dir: /opt/tacker-horizon +tacker_horizon_branch: "stable/{{ os_name }}" +openstack_release: "{{ os_ver }}" +tacker_horizon_venv: "/openstack/venvs/horizon-{{ openstack_release }}" +tacker_horizon_enable_path: "{{ tacker_horizon_venv }}/lib/python2.7/site-packages/openstack_dashboard/enabled" +# yamllint enable rule:line-length + sfc_plugins: src: sfc.conf dst: /opt/sfc.conf @@ -24,3 +36,14 @@ intf_storage: |- {%- set intf_storage = intf_storage + '.' + compu_sys_mappings["storage"]["vlan_tag"]|string %} {%- endif %} {{- intf_storage }} + +ceilometer_services: + ceilometer-agent-notification: + group: + - ceilometer_agent_notification + service_name: ceilometer-agent-notification + ceilometer-polling: + group: + - ceilometer_agent_central + - ceilometer_agent_compute + service_name: ceilometer-polling diff --git a/deploy/adapters/ansible/roles/setup-host/tasks/main.yml b/deploy/adapters/ansible/roles/setup-host/tasks/main.yml index 4eba3d00..0a63f7f5 100644 --- a/deploy/adapters/ansible/roles/setup-host/tasks/main.yml +++ b/deploy/adapters/ansible/roles/setup-host/tasks/main.yml @@ -10,7 +10,7 @@ --- - name: openstack-hosts-setup - shell: "export ANSIBLE_LOG_PATH=/var/ansible/run/openstack_pike-opnfv2/ansible.log; \ + shell: "export ANSIBLE_LOG_PATH={{ run_dir }}/ansible.log; \ export ANSIBLE_SCP_IF_SSH=y; \ cd /opt/openstack-ansible/playbooks; \ openstack-ansible openstack-hosts-setup.yml \ @@ -25,7 +25,7 @@ when: openstack_hosts_setup_result.stdout.find('Mark openstack-hosts-setup completed') == -1 - name: security-hardening - shell: "export ANSIBLE_LOG_PATH=/var/ansible/run/openstack_pike-opnfv2/ansible.log; \ + shell: "export ANSIBLE_LOG_PATH={{ run_dir }}/ansible.log; \ export ANSIBLE_SCP_IF_SSH=y; \ cd /opt/openstack-ansible/playbooks; \ openstack-ansible security-hardening.yml \ @@ -40,7 +40,7 @@ when: security_hardening_result.stdout.find('Mark security-hardening completed') == -1 - name: lxc-hosts-setup - shell: "export ANSIBLE_LOG_PATH=/var/ansible/run/openstack_pike-opnfv2/ansible.log; \ + shell: "export ANSIBLE_LOG_PATH={{ run_dir }}/ansible.log; \ export ANSIBLE_SCP_IF_SSH=y; \ cd /opt/openstack-ansible/playbooks; \ openstack-ansible lxc-hosts-setup.yml \ @@ -55,7 +55,7 @@ when: lxc_hosts_setup_result.stdout.find('Mark lxc-hosts-setup completed') == -1 - name: lxc-containers-create - shell: "export ANSIBLE_LOG_PATH=/var/ansible/run/openstack_pike-opnfv2/ansible.log; \ + shell: "export ANSIBLE_LOG_PATH={{ run_dir }}/ansible.log; \ export ANSIBLE_SCP_IF_SSH=y; \ cd /opt/openstack-ansible/playbooks; \ openstack-ansible lxc-containers-create.yml \ @@ -66,7 +66,7 @@ register: failed_container - name: destroy the failed_container - shell: "export ANSIBLE_LOG_PATH=/var/ansible/run/openstack_pike-opnfv2/ansible.log; \ + shell: "export ANSIBLE_LOG_PATH={{ run_dir }}/ansible.log; \ export ANSIBLE_SCP_IF_SSH=y; \ cd /opt/openstack-ansible/playbooks; \ openstack-ansible lxc-containers-destroy.yml \ @@ -77,7 +77,7 @@ ignore_errors: "True" - name: retry to setup failed_container - shell: "export ANSIBLE_LOG_PATH=/var/ansible/run/openstack_pike-opnfv2/ansible.log; \ + shell: "export ANSIBLE_LOG_PATH={{ run_dir }}/ansible.log; \ export ANSIBLE_SCP_IF_SSH=y; \ cd /opt/openstack-ansible/playbooks; \ openstack-ansible lxc-containers-create.yml --limit {{item}} \ diff --git a/deploy/adapters/ansible/roles/setup-infrastructure/tasks/main.yml b/deploy/adapters/ansible/roles/setup-infrastructure/tasks/main.yml index 7cf5c86f..4e3a926f 100644 --- a/deploy/adapters/ansible/roles/setup-infrastructure/tasks/main.yml +++ b/deploy/adapters/ansible/roles/setup-infrastructure/tasks/main.yml @@ -8,7 +8,7 @@ ############################################################################## --- - name: setup infrastructure - shell: "export ANSIBLE_LOG_PATH=/var/ansible/run/openstack_pike-opnfv2/ansible.log; \ + shell: "export ANSIBLE_LOG_PATH={{ run_dir }}/ansible.log; \ export ANSIBLE_SCP_IF_SSH=y; \ cd /opt/openstack-ansible/playbooks; \ openstack-ansible setup-infrastructure.yml \ diff --git a/deploy/adapters/ansible/roles/setup-openstack/tasks/main.yml b/deploy/adapters/ansible/roles/setup-openstack/tasks/main.yml index a6ecb82f..c572936d 100644 --- a/deploy/adapters/ansible/roles/setup-openstack/tasks/main.yml +++ b/deploy/adapters/ansible/roles/setup-openstack/tasks/main.yml @@ -8,7 +8,7 @@ ############################################################################## --- - name: setup openstack - shell: "export ANSIBLE_LOG_PATH=/var/ansible/run/openstack_pike-opnfv2/ansible.log; \ + shell: "export ANSIBLE_LOG_PATH={{ run_dir }}/ansible.log; \ export ANSIBLE_SCP_IF_SSH=y; \ cd /opt/openstack-ansible/playbooks; \ openstack-ansible setup-openstack.yml \ diff --git a/deploy/adapters/ansible/roles/storage/vars/main.yml b/deploy/adapters/ansible/roles/storage/vars/main.yml index cbee9c1e..d6c5961a 100644 --- a/deploy/adapters/ansible/roles/storage/vars/main.yml +++ b/deploy/adapters/ansible/roles/storage/vars/main.yml @@ -7,7 +7,7 @@ # http://www.apache.org/licenses/LICENSE-2.0 ############################################################################## --- -host_loopback_ceph_size: "100G" +host_loopback_ceph_size: "500G" rc_local_insert_before: "^exit 0$" bootstrap_host_data_disk_device: null bootstrap_host_data_disk_device_force: "no" diff --git a/deploy/playbook_done.py b/deploy/ansible_plugins/callback/playbook_done.py index 6b1043d4..4784ff63 100644 --- a/deploy/playbook_done.py +++ b/deploy/ansible_plugins/callback/playbook_done.py @@ -17,6 +17,8 @@ """Ansible playbook callback after a playbook run has completed.""" import sys +from distutils.version import LooseVersion +from ansible import __version__ as __ansible_version__ from ansible.plugins.callback import CallbackBase compass_bin = "/opt/compass/bin" @@ -85,7 +87,10 @@ class CallbackModule(CallbackBase): return def v2_playbook_on_stats(self, stats): - all_vars = self.play.get_variable_manager().get_vars(self.loader) + if LooseVersion(__ansible_version__) < LooseVersion("2.4"): + all_vars = self.play.get_variable_manager().get_vars(self.loader) + else: + all_vars = self.play.get_variable_manager().get_vars() host_vars = all_vars["hostvars"] hosts = sorted(stats.processed.keys()) cluster_name = host_vars[hosts[0]]['cluster_name'] diff --git a/deploy/status_callback.py b/deploy/ansible_plugins/callback/status_callback.py index 6169b87f..b87d2094 100644 --- a/deploy/status_callback.py +++ b/deploy/ansible_plugins/callback/status_callback.py @@ -11,6 +11,8 @@ import httplib import simplejson as json import sys # noqa:F401 +from distutils.version import LooseVersion +from ansible import __version__ as __ansible_version__ from ansible.plugins.callback import CallbackBase COMPASS_HOST = "compass-deck" @@ -101,7 +103,10 @@ class CallbackModule(CallbackBase): def v2_playbook_on_stats(self, stats): self._display.display("playbook_on_stats enter") - all_vars = self.play.get_variable_manager().get_vars(self.loader) + if LooseVersion(__ansible_version__) < LooseVersion("2.4"): + all_vars = self.play.get_variable_manager().get_vars(self.loader) + else: + all_vars = self.play.get_variable_manager().get_vars() host_vars = all_vars["hostvars"] hosts = sorted(stats.processed.keys()) cluster_name = host_vars[hosts[0]]['cluster_name'] diff --git a/deploy/compass_conf/flavor/kubernetes.conf b/deploy/compass_conf/flavor/kubernetes.conf index 35c43155..71acadff 100755 --- a/deploy/compass_conf/flavor/kubernetes.conf +++ b/deploy/compass_conf/flavor/kubernetes.conf @@ -4,7 +4,7 @@ FLAVORS = [{ 'display_name': 'ansible-kubernetes', 'template': 'ansible-kubernetes.tmpl', 'roles': [ - 'kube_master', 'etcd', 'kube_node' + 'kube_master', 'etcd', 'kube_node', 'ha' ], }] diff --git a/deploy/compass_conf/package_installer/ansible-kubernetes.conf b/deploy/compass_conf/package_installer/ansible-kubernetes.conf index 32590c82..820691b7 100755 --- a/deploy/compass_conf/package_installer/ansible-kubernetes.conf +++ b/deploy/compass_conf/package_installer/ansible-kubernetes.conf @@ -7,7 +7,7 @@ SETTINGS = { 'playbook_file': 'site.yml', 'inventory_file': 'inventory.py', 'inventory_json_file': 'inventory.json', - 'inventory_group': ['kube_master', 'etcd', 'kube_node'], + 'inventory_group': ['kube_master', 'etcd', 'kube_node', 'ha'], 'group_variable': 'all', 'etc_hosts_path': 'roles/pre-k8s/templates/hosts', 'runner_dirs': ['roles','kubernetes/roles'] diff --git a/deploy/compass_conf/role/kubernetes_ansible.conf b/deploy/compass_conf/role/kubernetes_ansible.conf index ae096f47..c27779ad 100755 --- a/deploy/compass_conf/role/kubernetes_ansible.conf +++ b/deploy/compass_conf/role/kubernetes_ansible.conf @@ -11,5 +11,10 @@ ROLES = [{ 'role': 'kube_node', 'display_name': 'kube node', 'description': 'kube Node' -} +}, { + 'role': 'ha', + 'display_name': 'ha', + 'description': 'ha' +} + ] diff --git a/deploy/compass_conf/templates/ansible_installer/kubernetes/vars/ansible-kubernetes.tmpl b/deploy/compass_conf/templates/ansible_installer/kubernetes/vars/ansible-kubernetes.tmpl index 440bf7d7..f132365a 100644 --- a/deploy/compass_conf/templates/ansible_installer/kubernetes/vars/ansible-kubernetes.tmpl +++ b/deploy/compass_conf/templates/ansible_installer/kubernetes/vars/ansible-kubernetes.tmpl @@ -23,6 +23,8 @@ #set kube_masters = $getVar('kube_master', []) #set kube_nodes = $getVar('kube_node', []) +run_dir: $getVar('run_dir', '') + enable_secgroup: $getVar('enable_secgroup', True) enable_fwaas: $getVar('enable_fwaas', True) enable_vpnaas: $getVar('enable_vpnaas', True) @@ -82,7 +84,7 @@ dashboard_host: "{{ internal_ip }}" haproxy_hosts: #for $item in $has #set $hostname=$item["hostname"] - $hostname: $ip_settings[$hostname]["mgmt"]["ip"] + $hostname: $ip_settings[$hostname]["external"]["ip"] #end for host_index: diff --git a/deploy/compass_conf/templates/ansible_installer/openstack_pike/ansible_cfg/HA-ansible-multinodes.tmpl b/deploy/compass_conf/templates/ansible_installer/openstack_pike/ansible_cfg/HA-ansible-multinodes.tmpl index cd8c8d30..1d0d6478 100755 --- a/deploy/compass_conf/templates/ansible_installer/openstack_pike/ansible_cfg/HA-ansible-multinodes.tmpl +++ b/deploy/compass_conf/templates/ansible_installer/openstack_pike/ansible_cfg/HA-ansible-multinodes.tmpl @@ -3,7 +3,7 @@ log_path = /var/ansible/run/openstack_pike-$cluster_name/ansible.log host_key_checking = False callback_whitelist = playbook_done, status_callback -callback_plugins = /opt/ansible_callbacks +callback_plugins = /opt/ansible_plugins/callback forks=100 [ssh_connection] diff --git a/deploy/compass_vm.sh b/deploy/compass_vm.sh index cf215f3b..17171578 100755 --- a/deploy/compass_vm.sh +++ b/deploy/compass_vm.sh @@ -23,7 +23,7 @@ function check_container_alive() { docker exec -it compass-mq bash -c "exit" 1>/dev/null 2>&1 local mq_state=$? - if [ $((deck_state||tasks_state||cobbler_state||db_state||mq-state)) == 0 ]; then + if [ $((deck_state||tasks_state||cobbler_state||db_state||mq_state)) == 0 ]; then echo "true" else echo "false" diff --git a/deploy/conf/compass.conf b/deploy/conf/compass.conf index 9d9145f1..33fd6200 100644 --- a/deploy/conf/compass.conf +++ b/deploy/conf/compass.conf @@ -17,4 +17,11 @@ export NTP_SERVER="$COMPASS_SERVER" export NAMESERVERS=${USER_NAMESERVER:-"$COMPASS_SERVER"} export COMPASS_REPO_PORT="5151" export OFFLINE_DEPLOY=${OFFLINE_DEPLOY:-'Disable'} -export COMPOSE_IMAGES="[compass-db,compass-mq,compass-deck,compass-tasks,compass-cobbler]" + +if [[ "x"$COMPOSE_IMAGES == "x" && "x"$OPENSTACK_VERSION != "x" ]]; then + export COMPOSE_IMAGES="[compass-db,compass-mq,compass-deck,compass-tasks-osa,compass-cobbler]" +fi + +if [[ "x"$COMPOSE_IMAGES == "x" && "x"$KUBERNETES_VERSION != "x" ]]; then + export COMPOSE_IMAGES="[compass-db,compass-mq,compass-deck,compass-tasks-k8s,compass-cobbler]" +fi diff --git a/deploy/conf/hardware_environment/huawei-pod1/k8-nosdn-nofeature-ha.yml b/deploy/conf/hardware_environment/huawei-pod1/k8-nosdn-nofeature-ha.yml index 995d0107..2cedcf4d 100644 --- a/deploy/conf/hardware_environment/huawei-pod1/k8-nosdn-nofeature-ha.yml +++ b/deploy/conf/hardware_environment/huawei-pod1/k8-nosdn-nofeature-ha.yml @@ -25,6 +25,7 @@ hosts: roles: - kube_master - etcd + - ha - name: host2 mac: 'D8:49:0B:DA:5A:B7' @@ -35,6 +36,7 @@ hosts: roles: - kube_master - etcd + - ha - name: host3 mac: '78:D7:52:A0:B1:99' @@ -45,6 +47,7 @@ hosts: roles: - kube_master - etcd + - ha - name: host4 mac: 'D8:49:0B:DA:5B:5D' diff --git a/deploy/conf/virtual.conf b/deploy/conf/virtual.conf index 1e9034f3..1d47603d 100644 --- a/deploy/conf/virtual.conf +++ b/deploy/conf/virtual.conf @@ -7,3 +7,4 @@ export SWITCH_IPS="1.1.1.1" export SWITCH_CREDENTIAL="version=2c,community=public" export DEPLOYMENT_TIMEOUT="300" export POLL_SWITCHES_FLAG="nopoll_switches" +export NAT_EXTERNAL=${NAT_EXTERNAL:true} diff --git a/deploy/conf/vm_environment/k8-nosdn-nofeature-ha.yml b/deploy/conf/vm_environment/k8-nosdn-nofeature-ha.yml index 003f41be..42262057 100644 --- a/deploy/conf/vm_environment/k8-nosdn-nofeature-ha.yml +++ b/deploy/conf/vm_environment/k8-nosdn-nofeature-ha.yml @@ -16,16 +16,19 @@ hosts: roles: - kube_master - etcd + - ha - name: host2 roles: - kube_master - etcd + - ha - name: host3 roles: - kube_master - etcd + - ha - name: host4 roles: diff --git a/deploy/host_virtual.sh b/deploy/host_virtual.sh index 03a1230f..d955b747 100755 --- a/deploy/host_virtual.sh +++ b/deploy/host_virtual.sh @@ -52,6 +52,11 @@ function launch_host_vms() { vm_template_file="$vm_template_dir/host.xml" vm_template_arch="$vm_template_dir/host-$COMPASS_ARCH.xml" [ -f $vm_template_arch ] && vm_template_file=$vm_template_arch + if [[ "$NAT_EXTERNAL" == "false" ]]; then + NET_IAAS="external" + else + NET_IAAS="external_nat" + fi log_info "bringing up pxe boot vms" i=0 @@ -67,7 +72,7 @@ function launch_host_vms() { -e "s#REPLACE_IMAGE#$vm_dir/disk.img#g" \ -e "s/REPLACE_BOOT_MAC/${mac_array[i]}/g" \ -e "s/REPLACE_NET_INSTALL/install/g" \ - -e "s/REPLACE_NET_IAAS/external_nat/g" \ + -e "s/REPLACE_NET_IAAS/$NET_IAAS/g" \ "$vm_template_file" \ > $vm_dir/libvirt.xml diff --git a/deploy/network.sh b/deploy/network.sh index 698771b3..eea62277 100755 --- a/deploy/network.sh +++ b/deploy/network.sh @@ -76,9 +76,9 @@ function setup_bridge_external() sudo virsh net-destroy external sudo virsh net-undefine external - #save_network_info + save_network_info sed -e "s/REPLACE_NAME/external/g" \ - -e "s/REPLACE_OVS/br-external_nat/g" \ + -e "s/REPLACE_OVS/br-external/g" \ $COMPASS_DIR/deploy/template/network/bridge_ovs.xml \ > $WORK_DIR/network/external.xml @@ -86,14 +86,12 @@ function setup_bridge_external() sudo virsh net-start external sudo virsh net-autostart external - python $COMPASS_DIR/deploy/setup_vnic.py } function recover_bridge_external() { sudo virsh net-start external - python $COMPASS_DIR/deploy/setup_vnic.py } function setup_nat_net() { @@ -128,7 +126,12 @@ function recover_nat_net() { function setup_virtual_net() { setup_nat_net install $INSTALL_GW $INSTALL_NETMASK - setup_nat_net external_nat $EXT_NAT_GW $EXT_NAT_MASK $EXT_NAT_IP_START $EXT_NAT_IP_END + + if [[ "$NAT_EXTERNAL" == "false" ]]; then + setup_bridge_external + else + setup_nat_net external_nat $EXT_NAT_GW $EXT_NAT_MASK $EXT_NAT_IP_START $EXT_NAT_IP_END + fi } function recover_virtual_net() { diff --git a/plugins/barometer/plugin.desc b/plugins/barometer/plugin.desc index 896d6f27..3d976c2e 100644 --- a/plugins/barometer/plugin.desc +++ b/plugins/barometer/plugin.desc @@ -15,6 +15,7 @@ plugin: maintainers: - john.hinman@intel.com + - ramamani.yeleswarapu@intel.com # host os type: ubuntu/centos os_version: ubuntu @@ -50,3 +51,12 @@ plugin: inventory: - compute + - role: influxdb + phrase: post_openstack + inventory: + - controller + + - role: grafana + phrase: post_openstack + inventory: + - controller diff --git a/plugins/barometer/roles/collectd/files/install_docker.sh b/plugins/barometer/roles/collectd/files/install_docker.sh new file mode 100644 index 00000000..f0c08bae --- /dev/null +++ b/plugins/barometer/roles/collectd/files/install_docker.sh @@ -0,0 +1,25 @@ +#!/bin/bash +# ############################################################################# +# Copyright (c) 2018 Intel Corp. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +# ############################################################################# + +apt-get update +apt-get install -y apt-transport-https ca-certificates curl software-properties-common +sleep 3 + +curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add - +apt-key fingerprint 0EBFCD88 + +add-apt-repository \ + "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" + +apt-get update +sleep 3 + +apt-get install -y docker-ce +sleep 5 diff --git a/plugins/barometer/roles/collectd/tasks/collectd.yml b/plugins/barometer/roles/collectd/tasks/collectd.yml index 48e15989..4167e71b 100644 --- a/plugins/barometer/roles/collectd/tasks/collectd.yml +++ b/plugins/barometer/roles/collectd/tasks/collectd.yml @@ -1,5 +1,5 @@ # ############################################################################# -# Copyright (c) 2017 Intel Corp. +# Copyright (c) 2017-18 Intel Corp. # # All rights reserved. This program and the accompanying materials # are made available under the terms of the Apache License, Version 2.0 @@ -7,120 +7,162 @@ # http://www.apache.org/licenses/LICENSE-2.0 # ############################################################################# --- -- name: install dependencies - apt: - name: "{{ item }}" - state: present - with_items: - - libltdl7 - - init-system-helpers - - mcelog - - rrdtool - - libc6 - - librrd4 - - libvirt-bin - - libvirt-dev - - gcc - - git - - python3-pip - -- name: create workspace directory - file: - path: "{{ workspace }}" - state: directory - mode: 0755 - -- name: download, unarchive and install collectd packages +- name: copy install_docker script + remote_user: root + copy: + src: install_docker.sh + dest: /opt/install_docker.sh + mode: 0777 + +- name: install docker + command: su -s /bin/sh -c "/opt/install_docker.sh" + +- name: create collectd_sample_configs dir + remote_user: root shell: | - cd "{{ workspace }}"; - wget "{{ artifacts_collectd_pkg }}"; - su -s /bin/sh -c \ - "tar xzf collectd_pkg.tar.gz -C {{ workspace }} --strip-components 1"; - apt-get install ./libcollectd*.deb -y; - apt-get install ./collectd*.deb -y - -- name: make stack dir - file: - path: /opt/stack - state: directory - mode: 0755 - -- name: check plugin dir exists - stat: - path: /opt/stack/collectd-ceilometer-plugin - register: stat_result + rm -rf /root/collectd_sample_configs; + mkdir /root/collectd_sample_configs; -- name: fetch collectd plugin source code +- name: docker pull opnfv/barometer-collectd + remote_user: root shell: | - cd /opt/stack; - git clone https://github.com/openstack/collectd-ceilometer-plugin.git; - cd /opt/stack/collectd-ceilometer-plugin; - git checkout stable/pike - when: stat_result.stat.exists == False + docker pull opnfv/barometer-collectd; + sleep 5 - name: configure logfile conf + remote_user: root template: src: logfile.conf.j2 - dest: /etc/collectd/collectd.conf.d/logfile.conf + dest: /root/collectd_sample_configs/logfile.conf + +- name: configure csv conf + remote_user: root + template: + src: csv.conf.j2 + dest: /root/collectd_sample_configs/csv.conf + +- name: check if vswitchd on host + shell: ps -ef | grep vswitchd | grep -v grep > /dev/null + register: vswitchd_result + ignore_errors: "true" + +- name: check if db.sock exists + stat: + path: /var/run/openvswitch/db.sock + register: dbsock_result + ignore_errors: "true" + +- name: configure ovs_stats conf and ovs_events conf + remote_user: root + template: + src: "{{ item }}.conf.j2" + dest: "/root/collectd_sample_configs/{{ item }}.conf" + when: vswitchd_result|succeeded and dbsock_result|succeeded + with_items: + - ovs_stats + - ovs_events + +- name: check if mcelog running on host + shell: ps -ef | grep mcelog | grep -v grep > /dev/null + register: mcelog_running + ignore_errors: "true" + +- name: check if mcelog exists + shell: which mcelog > /dev/null + register: mcelog_exists + ignore_errors: "true" + +- name: check if mcelog-client exists + stat: + path: /var/run/mcelog-client + register: mcelog_client_exists + ignore_errors: "true" + +- name: configure mcelog conf + remote_user: root + template: + src: mcelog.conf.j2 + dest: /root/collectd_sample_configs/mcelog.conf + when: mcelog_running|succeeded and mcelog_exists|succeeded and mcelog_client_exists|succeeded - name: configure collectd-aodh plugin conf + remote_user: root template: src: collectd-aodh.conf.j2 - dest: /etc/collectd/collectd.conf.d/collectd-aodh-plugin.conf + dest: /root/collectd_sample_configs/collectd-aodh-plugin.conf - name: configure collectd-gnocchi plugin conf + remote_user: root template: src: collectd-gnocchi.conf.j2 - dest: /etc/collectd/collectd.conf.d/collectd-gnocchi-plugin.conf + dest: /root/collectd_sample_configs/collectd-gnocchi-plugin.conf + +- name: check if hugepages folders exist on host + stat: + path: "{{ item }}" + register: hugepages_result + ignore_errors: "true" + with_items: + /sys/devices/system/node + /sys/kernel/mm/hugepages - name: configure hugepages conf + remote_user: root template: src: hugepages.conf.j2 - dest: /etc/collectd/collectd.conf.d/hugepages.conf + dest: /root/collectd_sample_configs/hugepages.conf + when: hugepages_result|succeeded -- name: configure mcelog conf +- name: check if rdt on host + shell: | + grep -q cqm* "/proc/cpuinfo" + register: rdt_result + ignore_errors: "true" + +- name: configure rdt conf + remote_user: root template: - src: mcelog.conf.j2 - dest: /etc/collectd/collectd.conf.d/mcelog.conf + src: rdt.conf.j2 + dest: /root/collectd_sample_configs/rdt.conf + when: rdt_result|succeeded -- name: configure ovs_stats conf +- name: load msr kernel module + modprobe: + name: msr + state: present + +- name: check if libvirtd on host + shell: ps -ef | grep libvirtd | grep -v grep > /dev/null + register: libvirt_result + ignore_errors: "true" + +- name: configure virt conf + remote_user: root template: - src: ovs_stats.conf.j2 - dest: /etc/collectd/collectd.conf.d/ovs_stats.conf + src: virt.conf.j2 + dest: /root/collectd_sample_configs/virt.conf + when: libvirt_result|succeeded -- name: configure ovs_events conf +- name: configure intel_pmu conf + remote_user: root template: - src: ovs_events.conf.j2 - dest: /etc/collectd/collectd.conf.d/ovs_events.conf - -- name: configure collectd conf - lineinfile: - dest: /etc/collectd/collectd.conf - regexp: '{{ item.regexp }}' - line: '{{ item.line }}' - with_items: - - regexp: '#LoadPlugin numa' - line: 'LoadPlugin numa' - - regexp: '#LoadPlugin virt' - line: 'LoadPlugin virt' - - regexp: '#LoadPlugin cpufreq' - line: 'LoadPlugin cpufreq' - - regexp: '#LoadPlugin cpusleep' - line: 'LoadPlugin cpusleep' + src: intel_pmu.conf.j2 + dest: /root/collectd_sample_configs/intel_pmu.conf -- name: configure mcelog conf - lineinfile: - dest: /etc/mcelog/mcelog.conf - regexp: '{{ item.regexp }}' - line: '{{ item.line }}' - with_items: - - regexp: '#socket-path = /var/run/mcelog-client' - line: 'socket-path = /var/run/mcelog-client' +- name: configure network conf + remote_user: root + template: + src: network.conf.j2 + dest: /root/collectd_sample_configs/network.conf + +- name: configure default plugins + remote_user: root + template: + src: default_plugins.conf.j2 + dest: /root/collectd_sample_configs/default_plugins.conf -- name: install gnocchiclient, aodhclient, set ovs manager, restart mcelog, collectd +- name: run barometer collectd container + remote_user: root shell: | - pip install gnocchiclient; - pip install aodhclient; - ovs-vsctl set-manager ptcp:6640:127.0.0.1; - systemctl restart mcelog; - systemctl restart collectd + docker run -dti --net=host -v /root/collectd_sample_configs:/opt/collectd/etc/collectd.conf.d \ + -v /var/run:/var/run -v /tmp:/tmp --privileged opnfv/barometer-collectd /run_collectd.sh diff --git a/plugins/barometer/roles/collectd/tasks/main.yml b/plugins/barometer/roles/collectd/tasks/main.yml index d33823ed..88602ea2 100644 --- a/plugins/barometer/roles/collectd/tasks/main.yml +++ b/plugins/barometer/roles/collectd/tasks/main.yml @@ -1,5 +1,5 @@ # ############################################################################# -# Copyright (c) 2017 Intel Corp. +# Copyright (c) 2017-18 Intel Corp. # # All rights reserved. This program and the accompanying materials # are made available under the terms of the Apache License, Version 2.0 diff --git a/plugins/barometer/roles/collectd/templates/collectd-aodh.conf.j2 b/plugins/barometer/roles/collectd/templates/collectd-aodh.conf.j2 index 301ba25a..b7fcfb60 100644 --- a/plugins/barometer/roles/collectd/templates/collectd-aodh.conf.j2 +++ b/plugins/barometer/roles/collectd/templates/collectd-aodh.conf.j2 @@ -3,7 +3,7 @@ </LoadPlugin> <Plugin python> - ModulePath "/opt/stack/collectd-ceilometer-plugin" + ModulePath "/src/barometer/src/collectd-openstack-plugins/collectd-openstack-plugins" LogTraces true Interactive false Import "collectd_ceilometer.aodh.plugin" diff --git a/plugins/barometer/roles/collectd/templates/collectd-gnocchi.conf.j2 b/plugins/barometer/roles/collectd/templates/collectd-gnocchi.conf.j2 index b54e9d5a..79e2872e 100644 --- a/plugins/barometer/roles/collectd/templates/collectd-gnocchi.conf.j2 +++ b/plugins/barometer/roles/collectd/templates/collectd-gnocchi.conf.j2 @@ -3,7 +3,7 @@ </LoadPlugin> <Plugin python> - ModulePath "/opt/stack/collectd-ceilometer-plugin" + ModulePath "/src/barometer/src/collectd-openstack-plugins/collectd-openstack-plugins" LogTraces true Interactive false Import "collectd_ceilometer.gnocchi.plugin" diff --git a/plugins/barometer/roles/collectd/templates/csv.conf.j2 b/plugins/barometer/roles/collectd/templates/csv.conf.j2 new file mode 100644 index 00000000..06abf4ba --- /dev/null +++ b/plugins/barometer/roles/collectd/templates/csv.conf.j2 @@ -0,0 +1,20 @@ +# Copyright 2017-18 OPNFV +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +LoadPlugin csv + +<Plugin csv> + DataDir "/tmp/collectd/csv" + StoreRates false +</Plugin> + diff --git a/plugins/barometer/roles/collectd/templates/default_plugins.conf.j2 b/plugins/barometer/roles/collectd/templates/default_plugins.conf.j2 new file mode 100644 index 00000000..bd1850e6 --- /dev/null +++ b/plugins/barometer/roles/collectd/templates/default_plugins.conf.j2 @@ -0,0 +1,31 @@ +# Copyright 2017-18 OPNFV +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +#Hostname "" + +LoadPlugin cpufreq +LoadPlugin disk +#LoadPlugin ethstat +#LoadPlugin ipc +#LoadPlugin ipmi +LoadPlugin load +LoadPlugin memory +LoadPlugin numa +LoadPlugin processes +#LoadPlugin df +#LoadPlugin turbostat +#LoadPlugin uptime +#LoadPlugin contextswitch +LoadPlugin irq +LoadPlugin swap + diff --git a/plugins/barometer/roles/collectd/templates/hugepages.conf.j2 b/plugins/barometer/roles/collectd/templates/hugepages.conf.j2 index cefcc25d..9378c1f2 100644 --- a/plugins/barometer/roles/collectd/templates/hugepages.conf.j2 +++ b/plugins/barometer/roles/collectd/templates/hugepages.conf.j2 @@ -1,3 +1,16 @@ +# Copyright 2017-18 OPNFV +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. LoadPlugin hugepages <Plugin hugepages> diff --git a/plugins/barometer/roles/collectd/templates/intel_pmu.conf.j2 b/plugins/barometer/roles/collectd/templates/intel_pmu.conf.j2 new file mode 100644 index 00000000..caba8825 --- /dev/null +++ b/plugins/barometer/roles/collectd/templates/intel_pmu.conf.j2 @@ -0,0 +1,23 @@ +# Copyright 2017-18 OPNFV +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +LoadPlugin intel_pmu + +<Plugin intel_pmu> + ReportHardwareCacheEvents true + ReportKernelPMUEvents true + ReportSoftwareEvents true +# EventList "/var/cache/pmu/GenuineIntel-6-2D-core.json" +# HardwareEvents "L2_RQSTS.CODE_RD_HIT,L2_RQSTS.CODE_RD_MISS" "L2_RQSTS.ALL_CODE_RD" +</Plugin> + diff --git a/plugins/barometer/roles/collectd/templates/logfile.conf.j2 b/plugins/barometer/roles/collectd/templates/logfile.conf.j2 index 77d86a99..bf53ae8f 100644 --- a/plugins/barometer/roles/collectd/templates/logfile.conf.j2 +++ b/plugins/barometer/roles/collectd/templates/logfile.conf.j2 @@ -1,3 +1,16 @@ +# Copyright 2017-18 OPNFV +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. LoadPlugin "logfile" <Plugin "logfile"> diff --git a/plugins/barometer/roles/collectd/templates/mcelog.conf.j2 b/plugins/barometer/roles/collectd/templates/mcelog.conf.j2 index 3a043ec8..f9ae8aa0 100644 --- a/plugins/barometer/roles/collectd/templates/mcelog.conf.j2 +++ b/plugins/barometer/roles/collectd/templates/mcelog.conf.j2 @@ -1,9 +1,25 @@ -LoadPlugin mcelog +# Copyright 2017-18 OPNFV +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +<LoadPlugin mcelog> + Interval 1 +</LoadPlugin> <Plugin mcelog> <Memory> - McelogClientSocket "/var/run/mcelog-client" - PersistentNotification false + McelogClientSocket "/var/run/mcelog-client" + PersistentNotification false </Memory> +## McelogLogfile "/var/log/mcelog" </Plugin> diff --git a/plugins/barometer/roles/collectd/templates/network.conf.j2 b/plugins/barometer/roles/collectd/templates/network.conf.j2 new file mode 100644 index 00000000..e9343e66 --- /dev/null +++ b/plugins/barometer/roles/collectd/templates/network.conf.j2 @@ -0,0 +1,19 @@ +# Copyright 2017-18 OPNFV, Intel Corp. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +LoadPlugin network + +<Plugin network> + Server "{{ internal_vip.ip }}" "25826" +</Plugin> + diff --git a/plugins/barometer/roles/collectd/templates/ovs_events.conf.j2 b/plugins/barometer/roles/collectd/templates/ovs_events.conf.j2 index d72e2004..2d71fa18 100644 --- a/plugins/barometer/roles/collectd/templates/ovs_events.conf.j2 +++ b/plugins/barometer/roles/collectd/templates/ovs_events.conf.j2 @@ -1,8 +1,23 @@ +# Copyright 2017-18 OPNFV +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. <LoadPlugin ovs_events> Interval 1 </LoadPlugin> + <Plugin "ovs_events"> Port 6640 + Address "127.0.0.1" Socket "/var/run/openvswitch/db.sock" Interfaces "br0" "veth0" SendNotification false diff --git a/plugins/barometer/roles/collectd/templates/ovs_stats.conf.j2 b/plugins/barometer/roles/collectd/templates/ovs_stats.conf.j2 index 945c4e92..b7e4d5c8 100644 --- a/plugins/barometer/roles/collectd/templates/ovs_stats.conf.j2 +++ b/plugins/barometer/roles/collectd/templates/ovs_stats.conf.j2 @@ -1,6 +1,20 @@ +# Copyright 2017-18 OPNFV +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License <LoadPlugin ovs_stats> Interval 1 </LoadPlugin> + <Plugin ovs_stats> Port "6640" Address "127.0.0.1" diff --git a/plugins/barometer/roles/collectd/templates/rdt.conf.j2 b/plugins/barometer/roles/collectd/templates/rdt.conf.j2 new file mode 100644 index 00000000..96d5eb34 --- /dev/null +++ b/plugins/barometer/roles/collectd/templates/rdt.conf.j2 @@ -0,0 +1,21 @@ +# Copyright 2017-18 OPNFV +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +<LoadPlugin intel_rdt> + Interval 1 +</LoadPlugin> + +<Plugin "intel_rdt"> + Cores "" +</Plugin> + diff --git a/plugins/barometer/roles/collectd/templates/virt.conf.j2 b/plugins/barometer/roles/collectd/templates/virt.conf.j2 new file mode 100644 index 00000000..8048bc13 --- /dev/null +++ b/plugins/barometer/roles/collectd/templates/virt.conf.j2 @@ -0,0 +1,32 @@ +# Copyright 2017-18 OPNFV +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +LoadPlugin virt + +<Plugin virt> +# Connection "xen:///" + RefreshInterval 60 +# Domain "name" +# BlockDevice "name:device" +# BlockDeviceFormat target +# BlockDeviceFormatBasename false +# InterfaceDevice "name:device" +# IgnoreSelected false +# HostnameFormat name +# InterfaceFormat name +# PluginInstanceFormat name +# Instances 1 + ExtraStats "cpu_util disk disk_err domain_state fs_info job_stats_background pcpu perf vcpupin" +</Plugin> + diff --git a/plugins/barometer/roles/collectd/vars/main.yml b/plugins/barometer/roles/collectd/vars/main.yml index 9fc0687d..b3a0b3b7 100644 --- a/plugins/barometer/roles/collectd/vars/main.yml +++ b/plugins/barometer/roles/collectd/vars/main.yml @@ -1,5 +1,5 @@ # ############################################################################# -# Copyright (c) 2017 Intel Corp. +# Copyright (c) 2017-18 Intel Corp. # # All rights reserved. This program and the accompanying materials # are made available under the terms of the Apache License, Version 2.0 @@ -8,9 +8,3 @@ # ############################################################################# --- openstack_passwd_file: /etc/openstack_deploy/user_secrets.yml - -workspace: /tmp/plugin - -artifacts_collectd_pkg: http://artifacts.opnfv.org/compass4nfv/package/master/collectd_pkg.tar.gz - -collectd_ver: 5.8.0.23.g576797d-1~xenial_amd64.deb diff --git a/plugins/barometer/roles/grafana/files/configure_grafana.sh b/plugins/barometer/roles/grafana/files/configure_grafana.sh new file mode 100644 index 00000000..39fcc82b --- /dev/null +++ b/plugins/barometer/roles/grafana/files/configure_grafana.sh @@ -0,0 +1,35 @@ +#!/bin/bash +# Copyright 2017-2018 OPNFV, Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +if [ -z "${influxdb_host}" ] +then + influxdb_host=localhost +fi + + +while [ -z "$RETURN" ] +do + sleep 1 + RETURN=$(curl -u admin:admin -X POST -H 'content-type: application/json'\ + http://127.0.0.1:3000/api/datasources -d \ + '{"name":"collectd","type":"influxdb","url":"http://'"${influxdb_host}"':8086","access":"proxy","isDefault":true,"database":"collectd","user":"admin","password":"admin","basicAuth":false}') +done + +FILES=/opt/barometer/docker/barometer-grafana/dashboards/*.json +for f in $FILES +do + curl -u admin:admin -X POST -H 'content-type: application/json' \ + http://127.0.0.1:3000/api/dashboards/db -d @$f ; +done diff --git a/plugins/barometer/roles/grafana/files/install_grafana.sh b/plugins/barometer/roles/grafana/files/install_grafana.sh new file mode 100644 index 00000000..0b9f4ec5 --- /dev/null +++ b/plugins/barometer/roles/grafana/files/install_grafana.sh @@ -0,0 +1,26 @@ +#!/bin/bash +# Copyright 2017-2018 OPNFV, Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +echo "deb https://packagecloud.io/grafana/stable/debian/ wheezy main" | tee /etc/apt/sources.list.d/grafana.list + +curl https://packagecloud.io/gpg.key | apt-key add - + +apt-get update + +apt-get install -y grafana + +sleep 5 + +service grafana-server start diff --git a/plugins/barometer/roles/grafana/tasks/grafana.yml b/plugins/barometer/roles/grafana/tasks/grafana.yml new file mode 100644 index 00000000..9c01920f --- /dev/null +++ b/plugins/barometer/roles/grafana/tasks/grafana.yml @@ -0,0 +1,34 @@ +# ############################################################################# +# Copyright (c) 2018 Intel Corp. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +# ############################################################################# +--- +- name: copy install_grafana script + remote_user: root + copy: + src: install_grafana.sh + dest: /opt/install_grafana.sh + mode: 0777 + +- name: install grafana + command: su -s /bin/sh -c "/opt/install_grafana.sh" + +- name: fetch barometer source code + remote_user: root + shell: | + cd /opt && rm -rf barometer; + git clone https://github.com/opnfv/barometer.git; + +- name: copy configure_grafana script + remote_user: root + copy: + src: configure_grafana.sh + dest: /opt/configure_grafana.sh + mode: 0777 + +- name: configure grafana + command: su -s /bin/sh -c "/opt/configure_grafana.sh" diff --git a/plugins/barometer/roles/grafana/tasks/main.yml b/plugins/barometer/roles/grafana/tasks/main.yml new file mode 100644 index 00000000..50d15b49 --- /dev/null +++ b/plugins/barometer/roles/grafana/tasks/main.yml @@ -0,0 +1,11 @@ +# ############################################################################# +# Copyright (c) 2018 Intel Corp. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +# ############################################################################# +--- +- include: grafana.yml + when: barometer is defined and barometer == "Enable" diff --git a/plugins/barometer/roles/influxdb/files/install_influxdb.sh b/plugins/barometer/roles/influxdb/files/install_influxdb.sh new file mode 100644 index 00000000..7fd1e046 --- /dev/null +++ b/plugins/barometer/roles/influxdb/files/install_influxdb.sh @@ -0,0 +1,31 @@ +#!/bin/bash +# Copyright 2017-2018 OPNFV, Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +curl -sL https://repos.influxdata.com/influxdb.key | apt-key add - + +source /etc/lsb-release + +echo "deb https://repos.influxdata.com/${DISTRIB_ID,,} ${DISTRIB_CODENAME} stable" | tee /etc/apt/sources.list.d/influxdb.list + +apt-get update + +apt-get install -y influxdb + +sleep 5 + +# install types.db +mkdir /opt/collectd && mkdir /opt/collectd/share && mkdir /opt/collectd/share/collectd && cd /opt/collectd/share/collectd + +wget https://raw.githubusercontent.com/collectd/collectd/master/src/types.db && cd - diff --git a/plugins/barometer/roles/influxdb/tasks/influxdb.yml b/plugins/barometer/roles/influxdb/tasks/influxdb.yml new file mode 100644 index 00000000..8e1f4ca1 --- /dev/null +++ b/plugins/barometer/roles/influxdb/tasks/influxdb.yml @@ -0,0 +1,28 @@ +# ############################################################################# +# Copyright (c) 2018 Intel Corp. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +# ############################################################################# +--- +- name: copy install_influxdb script + remote_user: root + copy: + src: install_influxdb.sh + dest: /opt/install_influxdb.sh + mode: 0777 + +- name: install influxdb + command: su -s /bin/sh -c "/opt/install_influxdb.sh" + +- name: configure /etc/influxdb/influxdb.conf + remote_user: root + template: + src: influxdb.conf.j2 + dest: /etc/influxdb/influxdb.conf + +- name: start influxdb service + remote_user: root + shell: service influxdb start diff --git a/plugins/barometer/roles/influxdb/tasks/main.yml b/plugins/barometer/roles/influxdb/tasks/main.yml new file mode 100644 index 00000000..b6da783b --- /dev/null +++ b/plugins/barometer/roles/influxdb/tasks/main.yml @@ -0,0 +1,11 @@ +# ############################################################################# +# Copyright (c) 2018 Intel Corp. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +# ############################################################################# +--- +- include: influxdb.yml + when: barometer is defined and barometer == "Enable" diff --git a/plugins/barometer/roles/influxdb/templates/influxdb.conf.j2 b/plugins/barometer/roles/influxdb/templates/influxdb.conf.j2 new file mode 100644 index 00000000..d35e93a7 --- /dev/null +++ b/plugins/barometer/roles/influxdb/templates/influxdb.conf.j2 @@ -0,0 +1,478 @@ +### Welcome to the InfluxDB configuration file. + +# The values in this file override the default values used by the system if +# a config option is not specified. The commented out lines are the configuration +# field and the default value used. Uncommenting a line and changing the value +# will change the value used at runtime when the process is restarted. + +# Once every 24 hours InfluxDB will report usage data to usage.influxdata.com +# The data includes a random ID, os, arch, version, the number of series and other +# usage data. No data from user databases is ever transmitted. +# Change this option to true to disable reporting. +# reporting-disabled = false + +# Bind address to use for the RPC service for backup and restore. +# bind-address = "127.0.0.1:8088" + +### +### [meta] +### +### Controls the parameters for the Raft consensus group that stores metadata +### about the InfluxDB cluster. +### + +[meta] + # Where the metadata/raft database is stored + dir = "/var/lib/influxdb/meta" + + # Automatically create a default retention policy when creating a database. + # retention-autocreate = true + + # If log messages are printed for the meta service + # logging-enabled = true + +### +### [data] +### +### Controls where the actual shard data for InfluxDB lives and how it is +### flushed from the WAL. "dir" may need to be changed to a suitable place +### for your system, but the WAL settings are an advanced configuration. The +### defaults should work for most systems. +### + +[data] + # The directory where the TSM storage engine stores TSM files. + dir = "/var/lib/influxdb/data" + + # The directory where the TSM storage engine stores WAL files. + wal-dir = "/var/lib/influxdb/wal" + + # The amount of time that a write will wait before fsyncing. A duration + # greater than 0 can be used to batch up multiple fsync calls. This is useful for slower + # disks or when WAL write contention is seen. A value of 0s fsyncs every write to the WAL. + # Values in the range of 0-100ms are recommended for non-SSD disks. + # wal-fsync-delay = "0s" + + + # The type of shard index to use for new shards. The default is an in-memory index that is + # recreated at startup. A value of "tsi1" will use a disk based index that supports higher + # cardinality datasets. + # index-version = "inmem" + + # Trace logging provides more verbose output around the tsm engine. Turning + # this on can provide more useful output for debugging tsm engine issues. + # trace-logging-enabled = false + + # Whether queries should be logged before execution. Very useful for troubleshooting, but will + # log any sensitive data contained within a query. + # query-log-enabled = true + + # Settings for the TSM engine + + # CacheMaxMemorySize is the maximum size a shard's cache can + # reach before it starts rejecting writes. + # Valid size suffixes are k, m, or g (case insensitive, 1024 = 1k). + # Vaues without a size suffix are in bytes. + # cache-max-memory-size = "1g" + + # CacheSnapshotMemorySize is the size at which the engine will + # snapshot the cache and write it to a TSM file, freeing up memory + # Valid size suffixes are k, m, or g (case insensitive, 1024 = 1k). + # Values without a size suffix are in bytes. + # cache-snapshot-memory-size = "25m" + + # CacheSnapshotWriteColdDuration is the length of time at + # which the engine will snapshot the cache and write it to + # a new TSM file if the shard hasn't received writes or deletes + # cache-snapshot-write-cold-duration = "10m" + + # CompactFullWriteColdDuration is the duration at which the engine + # will compact all TSM files in a shard if it hasn't received a + # write or delete + # compact-full-write-cold-duration = "4h" + + # The maximum number of concurrent full and level compactions that can run at one time. A + # value of 0 results in 50% of runtime.GOMAXPROCS(0) used at runtime. Any number greater + # than 0 limits compactions to that value. This setting does not apply + # to cache snapshotting. + # max-concurrent-compactions = 0 + + # The maximum series allowed per database before writes are dropped. This limit can prevent + # high cardinality issues at the database level. This limit can be disabled by setting it to + # 0. + # max-series-per-database = 1000000 + + # The maximum number of tag values per tag that are allowed before writes are dropped. This limit + # can prevent high cardinality tag values from being written to a measurement. This limit can be + # disabled by setting it to 0. + # max-values-per-tag = 100000 + +### +### [coordinator] +### +### Controls the clustering service configuration. +### + +[coordinator] + # The default time a write request will wait until a "timeout" error is returned to the caller. + # write-timeout = "10s" + + # The maximum number of concurrent queries allowed to be executing at one time. If a query is + # executed and exceeds this limit, an error is returned to the caller. This limit can be disabled + # by setting it to 0. + # max-concurrent-queries = 0 + + # The maximum time a query will is allowed to execute before being killed by the system. This limit + # can help prevent run away queries. Setting the value to 0 disables the limit. + # query-timeout = "0s" + + # The time threshold when a query will be logged as a slow query. This limit can be set to help + # discover slow or resource intensive queries. Setting the value to 0 disables the slow query logging. + # log-queries-after = "0s" + + # The maximum number of points a SELECT can process. A value of 0 will make + # the maximum point count unlimited. This will only be checked every second so queries will not + # be aborted immediately when hitting the limit. + # max-select-point = 0 + + # The maximum number of series a SELECT can run. A value of 0 will make the maximum series + # count unlimited. + # max-select-series = 0 + + # The maxium number of group by time bucket a SELECT can create. A value of zero will max the maximum + # number of buckets unlimited. + # max-select-buckets = 0 + +### +### [retention] +### +### Controls the enforcement of retention policies for evicting old data. +### + +[retention] + # Determines whether retention policy enforcement enabled. + # enabled = true + + # The interval of time when retention policy enforcement checks run. + # check-interval = "30m" + +### +### [shard-precreation] +### +### Controls the precreation of shards, so they are available before data arrives. +### Only shards that, after creation, will have both a start- and end-time in the +### future, will ever be created. Shards are never precreated that would be wholly +### or partially in the past. + +[shard-precreation] + # Determines whether shard pre-creation service is enabled. + # enabled = true + + # The interval of time when the check to pre-create new shards runs. + # check-interval = "10m" + + # The default period ahead of the endtime of a shard group that its successor + # group is created. + # advance-period = "30m" + +### +### Controls the system self-monitoring, statistics and diagnostics. +### +### The internal database for monitoring data is created automatically if +### if it does not already exist. The target retention within this database +### is called 'monitor' and is also created with a retention period of 7 days +### and a replication factor of 1, if it does not exist. In all cases the +### this retention policy is configured as the default for the database. + +[monitor] + # Whether to record statistics internally. + # store-enabled = true + + # The destination database for recorded statistics + # store-database = "_internal" + + # The interval at which to record statistics + # store-interval = "10s" + +### +### [http] +### +### Controls how the HTTP endpoints are configured. These are the primary +### mechanism for getting data into and out of InfluxDB. +### + +[http] + # Determines whether HTTP endpoint is enabled. + # enabled = true + + # The bind address used by the HTTP service. + # bind-address = ":8086" + + # Determines whether user authentication is enabled over HTTP/HTTPS. + # auth-enabled = false + + # The default realm sent back when issuing a basic auth challenge. + # realm = "InfluxDB" + + # Determines whether HTTP request logging is enabled. + # log-enabled = true + + # Determines whether detailed write logging is enabled. + # write-tracing = false + + # Determines whether the pprof endpoint is enabled. This endpoint is used for + # troubleshooting and monitoring. + # pprof-enabled = true + + # Determines whether HTTPS is enabled. + # https-enabled = false + + # The SSL certificate to use when HTTPS is enabled. + # https-certificate = "/etc/ssl/influxdb.pem" + + # Use a separate private key location. + # https-private-key = "" + + # The JWT auth shared secret to validate requests using JSON web tokens. + # shared-secret = "" + + # The default chunk size for result sets that should be chunked. + # max-row-limit = 0 + + # The maximum number of HTTP connections that may be open at once. New connections that + # would exceed this limit are dropped. Setting this value to 0 disables the limit. + # max-connection-limit = 0 + + # Enable http service over unix domain socket + # unix-socket-enabled = false + + # The path of the unix domain socket. + # bind-socket = "/var/run/influxdb.sock" + + # The maximum size of a client request body, in bytes. Setting this value to 0 disables the limit. + # max-body-size = 25000000 + + +### +### [ifql] +### +### Configures the ifql RPC API. +### + +[ifql] + # Determines whether the RPC service is enabled. + # enabled = true + + # Determines whether additional logging is enabled. + # log-enabled = true + + # The bind address used by the ifql RPC service. + # bind-address = ":8082" + + +### +### [subscriber] +### +### Controls the subscriptions, which can be used to fork a copy of all data +### received by the InfluxDB host. +### + +[subscriber] + # Determines whether the subscriber service is enabled. + # enabled = true + + # The default timeout for HTTP writes to subscribers. + # http-timeout = "30s" + + # Allows insecure HTTPS connections to subscribers. This is useful when testing with self- + # signed certificates. + # insecure-skip-verify = false + + # The path to the PEM encoded CA certs file. If the empty string, the default system certs will be used + # ca-certs = "" + + # The number of writer goroutines processing the write channel. + # write-concurrency = 40 + + # The number of in-flight writes buffered in the write channel. + # write-buffer-size = 1000 + + +### +### [[graphite]] +### +### Controls one or many listeners for Graphite data. +### + +[[graphite]] + # Determines whether the graphite endpoint is enabled. + # enabled = false + # database = "graphite" + # retention-policy = "" + # bind-address = ":2003" + # protocol = "tcp" + # consistency-level = "one" + + # These next lines control how batching works. You should have this enabled + # otherwise you could get dropped metrics or poor performance. Batching + # will buffer points in memory if you have many coming in. + + # Flush if this many points get buffered + # batch-size = 5000 + + # number of batches that may be pending in memory + # batch-pending = 10 + + # Flush at least this often even if we haven't hit buffer limit + # batch-timeout = "1s" + + # UDP Read buffer size, 0 means OS default. UDP listener will fail if set above OS max. + # udp-read-buffer = 0 + + ### This string joins multiple matching 'measurement' values providing more control over the final measurement name. + # separator = "." + + ### Default tags that will be added to all metrics. These can be overridden at the template level + ### or by tags extracted from metric + # tags = ["region=us-east", "zone=1c"] + + ### Each template line requires a template pattern. It can have an optional + ### filter before the template and separated by spaces. It can also have optional extra + ### tags following the template. Multiple tags should be separated by commas and no spaces + ### similar to the line protocol format. There can be only one default template. + # templates = [ + # "*.app env.service.resource.measurement", + # # Default template + # "server.*", + # ] + +### +### [collectd] +### +### Controls one or many listeners for collectd data. +### + +[[collectd]] + enabled = true + bind-address = ":25826" # the bind address + database = "collectd" # Name of the database that will be written to + retention-policy = "" + batch-size = 5000 # will flush if this many points get buffered + batch-pending = 10 # number of batches that may be pending in memory + batch-timeout = "10s" + read-buffer = 0 # UDP read buffer size, 0 means to use OS default + typesdb = "/opt/collectd/share/collectd/types.db" + security-level = "none" # "none", "sign", or "encrypt" + + # enabled = false + # bind-address = ":25826" + # database = "collectd" + # retention-policy = "" + # + # The collectd service supports either scanning a directory for multiple types + # db files, or specifying a single db file. + # typesdb = "/usr/local/share/collectd" + # + # security-level = "none" + # auth-file = "/etc/collectd/auth_file" + + # These next lines control how batching works. You should have this enabled + # otherwise you could get dropped metrics or poor performance. Batching + # will buffer points in memory if you have many coming in. + + # Flush if this many points get buffered + # batch-size = 5000 + + # Number of batches that may be pending in memory + # batch-pending = 10 + + # Flush at least this often even if we haven't hit buffer limit + # batch-timeout = "10s" + + # UDP Read buffer size, 0 means OS default. UDP listener will fail if set above OS max. + # read-buffer = 0 + + # Multi-value plugins can be handled two ways. + # "split" will parse and store the multi-value plugin data into separate measurements + # "join" will parse and store the multi-value plugin as a single multi-value measurement. + # "split" is the default behavior for backward compatability with previous versions of influxdb. + # parse-multivalue-plugin = "split" +### +### [opentsdb] +### +### Controls one or many listeners for OpenTSDB data. +### + +[[opentsdb]] + # enabled = false + # bind-address = ":4242" + # database = "opentsdb" + # retention-policy = "" + # consistency-level = "one" + # tls-enabled = false + # certificate= "/etc/ssl/influxdb.pem" + + # Log an error for every malformed point. + # log-point-errors = true + + # These next lines control how batching works. You should have this enabled + # otherwise you could get dropped metrics or poor performance. Only points + # metrics received over the telnet protocol undergo batching. + + # Flush if this many points get buffered + # batch-size = 1000 + + # Number of batches that may be pending in memory + # batch-pending = 5 + + # Flush at least this often even if we haven't hit buffer limit + # batch-timeout = "1s" + +### +### [[udp]] +### +### Controls the listeners for InfluxDB line protocol data via UDP. +### + +[[udp]] + # enabled = false + # bind-address = ":8089" + # database = "udp" + # retention-policy = "" + + # These next lines control how batching works. You should have this enabled + # otherwise you could get dropped metrics or poor performance. Batching + # will buffer points in memory if you have many coming in. + + # Flush if this many points get buffered + # batch-size = 5000 + + # Number of batches that may be pending in memory + # batch-pending = 10 + + # Will flush at least this often even if we haven't hit buffer limit + # batch-timeout = "1s" + + # UDP Read buffer size, 0 means OS default. UDP listener will fail if set above OS max. + # read-buffer = 0 + +### +### [continuous_queries] +### +### Controls how continuous queries are run within InfluxDB. +### + +[continuous_queries] + # Determines whether the continuous query service is enabled. + # enabled = true + + # Controls whether queries are logged when executed by the CQ service. + # log-enabled = true + + # Controls whether queries are logged to the self-monitoring data store. + # query-stats-enabled = false + + # interval for how often continuous queries will be checked if they need to run + # run-interval = "1s" + + diff --git a/plugins/odl_cluster/roles/setup-odl/files/opendaylight.service b/plugins/odl_cluster/roles/setup-odl/files/opendaylight.service index a6966d82..fe125ccf 100755 --- a/plugins/odl_cluster/roles/setup-odl/files/opendaylight.service +++ b/plugins/odl_cluster/roles/setup-odl/files/opendaylight.service @@ -10,7 +10,7 @@ Type=simple WorkingDirectory=/opt/opendaylight PermissionsStartOnly=true ExecStartPre= -ExecStart=/usr/lib/jvm/java-8-oracle/bin/java -Djava.security.properties=/opt/opendaylight/etc/odl.java.security -server -Xms128M -Xmx2048m -XX:+UnlockDiagnosticVMOptions -XX:+UnsyncloadClass -XX:+HeapDumpOnOutOfMemoryError -Dcom.sun.management.jmxremote -Djava.security.egd=file:/dev/./urandom -Djava.endorsed.dirs=/usr/lib/jvm/java-8-oracle/jre/lib/endorsed:/usr/lib/jvm/java-8-oracle/lib/endorsed:/opt/opendaylight/lib/endorsed -Djava.ext.dirs=/usr/lib/jvm/java-8-oracle/jre/lib/ext:/usr/lib/jvm/java-8-oracle/lib/ext:/opt/opendaylight/lib/ext -Dkaraf.instances=/opt/opendaylight/instances -Dkaraf.home=/opt/opendaylight -Dkaraf.base=/opt/opendaylight -Dkaraf.data=/opt/opendaylight/data -Dkaraf.etc=/opt/opendaylight/etc -Dkaraf.restart.jvm.supported=true -Djava.io.tmpdir=/opt/opendaylight/data/tmp -Djava.util.logging.config.file=/opt/opendaylight/etc/java.util.logging.properties -Dkaraf.startLocalConsole=false -Dkaraf.startRemoteShell=true -classpath /opt/opendaylight/lib/boot/org.apache.karaf.diagnostic.boot-4.0.9.jar:/opt/opendaylight/lib/boot/org.apache.karaf.jaas.boot-4.0.9.jar:/opt/opendaylight/lib/boot/org.apache.karaf.main-4.0.9.jar:/opt/opendaylight/lib/boot/org.osgi.core-6.0.0.jar org.apache.karaf.main.Main +ExecStart=/usr/lib/jvm/java-8-oracle/bin/java -Djava.security.properties=/opt/opendaylight/etc/odl.java.security -server -Xms128M -Xmx2048m -XX:+UnlockDiagnosticVMOptions -XX:+UnsyncloadClass -XX:+HeapDumpOnOutOfMemoryError -Dcom.sun.management.jmxremote -Djava.security.egd=file:/dev/./urandom -Djava.endorsed.dirs=/usr/lib/jvm/java-8-oracle/jre/lib/endorsed:/usr/lib/jvm/java-8-oracle/lib/endorsed:/opt/opendaylight/lib/endorsed -Djava.ext.dirs=/usr/lib/jvm/java-8-oracle/jre/lib/ext:/usr/lib/jvm/java-8-oracle/lib/ext:/opt/opendaylight/lib/ext -Dkaraf.instances=/opt/opendaylight/instances -Dkaraf.home=/opt/opendaylight -Dkaraf.base=/opt/opendaylight -Dkaraf.data=/opt/opendaylight/data -Dkaraf.etc=/opt/opendaylight/etc -Dkaraf.restart.jvm.supported=true -Djava.io.tmpdir=/opt/opendaylight/data/tmp -Djava.util.logging.config.file=/opt/opendaylight/etc/java.util.logging.properties -Dkaraf.startLocalConsole=false -Dkaraf.startRemoteShell=true -classpath /opt/opendaylight/lib/boot/org.apache.karaf.diagnostic.boot-4.0.10.jar:/opt/opendaylight/lib/boot/org.apache.karaf.jaas.boot-4.0.10.jar:/opt/opendaylight/lib/boot/org.apache.karaf.main-4.0.10.jar:/opt/opendaylight/lib/boot/org.osgi.core-6.0.0.jar org.apache.karaf.main.Main Restart=on-failure LimitNOFILE=65535 TimeoutStopSec=15 diff --git a/plugins/odl_cluster/roles/setup-odl/tasks/control-servers-2.yml b/plugins/odl_cluster/roles/setup-odl/tasks/control-servers-2.yml index 39d0312f..5faa4a22 100755 --- a/plugins/odl_cluster/roles/setup-odl/tasks/control-servers-2.yml +++ b/plugins/odl_cluster/roles/setup-odl/tasks/control-servers-2.yml @@ -3,7 +3,7 @@ - name: configure odl l3 driver shell: | crudini --set /etc/neutron/neutron.conf DEFAULT service_plugins \ - odl-router,metering; + odl-router_v2,metering,trunk; when: odl_l3_agent == "Enable" - name: configure opendaylight -> ml2 diff --git a/plugins/odl_cluster/roles/setup-odl/templates/org.apache.karaf.features.cfg b/plugins/odl_cluster/roles/setup-odl/templates/org.apache.karaf.features.cfg index 86b2baec..fa0e87f7 100755 --- a/plugins/odl_cluster/roles/setup-odl/templates/org.apache.karaf.features.cfg +++ b/plugins/odl_cluster/roles/setup-odl/templates/org.apache.karaf.features.cfg @@ -37,9 +37,9 @@ # Comma separated list of features repositories to register by default # featuresRepositories = \ - mvn:org.opendaylight.integration/features-index/0.7.0/xml/features, \ - mvn:org.apache.karaf.features/framework/4.0.9/xml/features, \ - mvn:org.apache.karaf.features/standard/4.0.9/xml/features + mvn:org.opendaylight.integration/features-index/0.7.1/xml/features, \ + mvn:org.apache.karaf.features/framework/4.0.10/xml/features, \ + mvn:org.apache.karaf.features/standard/4.0.10/xml/features # # Comma separated list of features to install at startup diff --git a/plugins/odl_cluster/roles/setup-odl/vars/main.yml b/plugins/odl_cluster/roles/setup-odl/vars/main.yml index 45b6c5e1..05ad98f5 100755 --- a/plugins/odl_cluster/roles/setup-odl/vars/main.yml +++ b/plugins/odl_cluster/roles/setup-odl/vars/main.yml @@ -17,8 +17,8 @@ odl_username: admin odl_password: admin odl_api_port: 8181 -odl_pkg_url: karaf-0.7.0.tar.gz -odl_pkg_name: karaf-0.7.0.tar.gz +odl_pkg_url: karaf-0.7.1.tar.gz +odl_pkg_name: karaf-0.7.1.tar.gz odl_home: "/opt/opendaylight/" odl_base_features: - config @@ -48,7 +48,7 @@ odl_extra_features: odl_features: "{{ odl_base_features + odl_extra_features }}" -sdn_package: http://artifacts.opnfv.org/compass4nfv/packages/master/sdn_package.tar.gz +sdn_package: http://artifacts.opnfv.org/compass4nfv/package/master/sdn_package.tar.gz jdk8_pkg_name: jdk-8u51-linux-x64.tar.gz jdk8_script_name: install_jdk8.tar diff --git a/util/docker-compose/roles/compass/tasks/main.yml b/util/docker-compose/roles/compass/tasks/main.yml index c7dba96b..d03a7bf2 100755 --- a/util/docker-compose/roles/compass/tasks/main.yml +++ b/util/docker-compose/roles/compass/tasks/main.yml @@ -19,7 +19,11 @@ {% for item in compass_images.results %} {% if "ansible_facts" in item %} {% set facts = item.ansible_facts %} - {% set _ = image_dict.update({facts.image_name: facts.image_repo}) %} + {% set image_name = facts.image_name %} + {% if "compass_tasks" in image_name %} + {% set image_name = "compass_tasks" %} + {% endif %} + {% set _ = image_dict.update({image_name: facts.image_repo}) %} {% endif %} {% endfor %} {% for key in image_dict %} @@ -95,6 +99,11 @@ "{{ docker_compose_dir }}"/ansible/$i done +- name: copy ansible plugins + copy: + src: "{{ compass_dir }}/deploy/ansible_plugins" + dest: "{{ docker_compose_dir }}" + - name: create run dir file: path: "{{ docker_compose_dir }}/ansible/run" @@ -123,16 +132,6 @@ tags: - redploy -- name: add ansible callback - shell: | - docker cp "{{ item }}" \ - compass-deck:/root/compass-deck/bin/ansible_callbacks - docker cp "{{ item }}" \ - compass-tasks:/opt/ansible_callbacks - with_items: - - "{{ compass_dir }}/deploy/status_callback.py" - - "{{ compass_dir }}/deploy/playbook_done.py" - - name: rm ansible run shell: | docker exec compass-tasks bash -c "rm -rf /var/ansible/run/*" diff --git a/util/docker-compose/roles/compass/templates/docker-compose.yml.j2 b/util/docker-compose/roles/compass/templates/docker-compose.yml.j2 index 7fc8ea3a..f7748634 100755 --- a/util/docker-compose/roles/compass/templates/docker-compose.yml.j2 +++ b/util/docker-compose/roles/compass/templates/docker-compose.yml.j2 @@ -43,6 +43,7 @@ services: volumes: - {{ docker_compose_dir }}/compass_conf:/etc/compass - {{ docker_compose_dir }}/ansible:/var/ansible + - {{ docker_compose_dir }}/ansible_plugins:/opt/ansible_plugins command: - /usr/local/bin/start.sh {% endif %} |