summaryrefslogtreecommitdiffstats
path: root/deploy/adapters/ansible/openstack_mitaka/roles/tacker
diff options
context:
space:
mode:
authorliyuenan <liyuenan@huawei.com>2016-12-19 11:06:36 +0800
committerliyuenan <liyuenan@huawei.com>2016-12-20 15:05:03 +0800
commit819912d0379f6cd2b2693c2968576f7514a117c5 (patch)
treee24d274484fa1ec8976c9f1bd44f5ee6e445724b /deploy/adapters/ansible/openstack_mitaka/roles/tacker
parenteb5dbdac42b1b7b775fbc1dc513376425a6898ff (diff)
master only support newton
JIRA: COMPASS-513 Remove other roles and ppa, master only support newton. Change-Id: I47ddb16baa25902c3e05cc7f9d0d6430f5dc7e00 Signed-off-by: liyuenan <liyuenan@huawei.com>
Diffstat (limited to 'deploy/adapters/ansible/openstack_mitaka/roles/tacker')
-rw-r--r--deploy/adapters/ansible/openstack_mitaka/roles/tacker/files/tacker.conf36
-rwxr-xr-xdeploy/adapters/ansible/openstack_mitaka/roles/tacker/tasks/tacker_controller.yml215
-rw-r--r--deploy/adapters/ansible/openstack_mitaka/roles/tacker/templates/config.yaml4
-rw-r--r--deploy/adapters/ansible/openstack_mitaka/roles/tacker/templates/tacker.j2426
-rw-r--r--deploy/adapters/ansible/openstack_mitaka/roles/tacker/templates/tackerc.sh12
5 files changed, 0 insertions, 693 deletions
diff --git a/deploy/adapters/ansible/openstack_mitaka/roles/tacker/files/tacker.conf b/deploy/adapters/ansible/openstack_mitaka/roles/tacker/files/tacker.conf
deleted file mode 100644
index 0c90dcb9..00000000
--- a/deploy/adapters/ansible/openstack_mitaka/roles/tacker/files/tacker.conf
+++ /dev/null
@@ -1,36 +0,0 @@
-description "OpenStack Tacker Server"
-author "Yifei Xue <xueyifei@huawei.com>"
-
-start on runlevel [2345]
-stop on runlevel [!2345]
-
-chdir /var/run
-
-respawn
-respawn limit 20 5
-limit nofile 65535 65535
-
-pre-start script
- for i in lock run log lib ; do
- mkdir -p /var/$i/tacker
- chown root /var/$i/tacker
- done
-end script
-
-script
- [ -x "/usr/local/bin/tacker-server" ] || exit 0
- DAEMON_ARGS=""
- CONFIG_FILE="/usr/local/etc/tacker/tacker.conf"
- USE_SYSLOG=""
- USE_LOGFILE=""
- NO_OPENSTACK_CONFIG_FILE_DAEMON_ARG=""
- [ -r /etc/default/openstack ] && . /etc/default/openstack
- [ -r /etc/default/$UPSTART_JOB ] && . /etc/default/$UPSTART_JOB
- [ "x$USE_SYSLOG" = "xyes" ] && DAEMON_ARGS="$DAEMON_ARGS --use-syslog"
- [ "x$USE_LOGFILE" != "xno" ] && DAEMON_ARGS="$DAEMON_ARGS --log-file=/var/log/tacker/tacker.log"
- [ -z "$NO_OPENSTACK_CONFIG_FILE_DAEMON_ARG" ] && DAEMON_ARGS="$DAEMON_ARGS --config-file=$CONFIG_FILE"
-
- exec start-stop-daemon --start --chdir /var/lib/tacker \
- --chuid root:root --make-pidfile --pidfile /var/run/tacker/tacker.pid \
- --exec /usr/local/bin/tacker-server -- ${DAEMON_ARGS}
-end script
diff --git a/deploy/adapters/ansible/openstack_mitaka/roles/tacker/tasks/tacker_controller.yml b/deploy/adapters/ansible/openstack_mitaka/roles/tacker/tasks/tacker_controller.yml
deleted file mode 100755
index cd3b19e8..00000000
--- a/deploy/adapters/ansible/openstack_mitaka/roles/tacker/tasks/tacker_controller.yml
+++ /dev/null
@@ -1,215 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-- name: get http server
- shell: awk -F'=' '/compass_server/ {print $2}' /etc/compass.conf
- register: http_server
-
-- name: creat tacker_home, tacker_client_home, tacker_horizon_home
- shell: >
- mkdir -p /opt/tacker
- mkdir -p /opt/tacker_client
- mkdir -p /opt/tacker_horizon
-
-- name: download tacker package
- get_url: url="http://{{ http_server.stdout_lines[0] }}/packages/tacker/{{ tacker_pkg_name }}" dest=/opt/{{ tacker_pkg_name }}
-
-- name: download tacker_client package
- get_url: url="http://{{ http_server.stdout_lines[0] }}/packages/tacker/{{ tacker_client_pkg_name }}" dest=/opt/{{ tacker_client_pkg_name }}
-
-- name: download tacker_horizon package
- get_url: url="http://{{ http_server.stdout_lines[0] }}/packages/tacker/{{ tacker_horizon_pkg_name }}" dest=/opt/{{ tacker_horizon_pkg_name }}
-
-- name: extract tacker package
- command: su -s /bin/sh -c "tar xzf /opt/{{ tacker_pkg_name }} -C {{ tacker_home }} --strip-components 1 --no-overwrite-dir -k --skip-old-files"
-
-- name: extract tacker_client package
- command: su -s /bin/sh -c "tar xzf /opt/{{ tacker_client_pkg_name }} -C {{ tacker_client_home }} --strip-components 1 --no-overwrite-dir -k --skip-old-files"
-
-- name: extract tacker_horizon package
- command: su -s /bin/sh -c "tar xzf /opt/{{ tacker_horizon_pkg_name }} -C {{ tacker_horizon_home }} --strip-components 1 --no-overwrite-dir -k --skip-old-files"
-
-- name: edit ml2_conf.ini
- shell: crudini --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2 extension_drivers port_security;
-
-- name: Restart neutron-server
- service: name=neutron-server state=restarted
-
-- name: "create haproxy configuration for tacker"
- template:
- src: "haproxy-tacker-cfg.j2"
- dest: "/tmp/haproxy-tacker.cfg"
-
-- name: get the current haproxy configuration
- shell: cat /etc/haproxy/haproxy.cfg
- register: ha_cfg
-
-- name: "combination of the haproxy configuration"
- shell: "cat /tmp/haproxy-tacker.cfg >> /etc/haproxy/haproxy.cfg"
- when: ha_cfg.stdout.find('8888') == -1
-
-- name: "delete temporary configuration file"
- file:
- dest: "/tmp/haproxy-tacker.cfg"
- state: "absent"
-
-- name: "restart haproxy"
- service:
- name: "haproxy"
- state: "restarted"
-
-- name: drop and recreate tacker database
- shell: mysql -e "drop database if exists tacker;";
- mysql -e "create database tacker character set utf8;";
- mysql -e "grant all on tacker.* to 'tacker'@'%' identified by 'TACKER_DBPASS';";
- when: inventory_hostname == haproxy_hosts.keys()[0]
-
-- name: get the openstack user info
- shell: . /opt/admin-openrc.sh; openstack user list
- register: user_info
-
-- name: get the openstack service info
- shell: . /opt/admin-openrc.sh; openstack service list
- register: service_info
-
-- name: get the openstack endpoint info
- shell: . /opt/admin-openrc.sh; openstack endpoint list
- register: endpoint_info
-
-- name: delete the existed tacker endpoint
- shell: . /opt/admin-openrc.sh; openstack endpoint delete $(openstack endpoint list | grep tacker | awk '{print $2}')
- when: endpoint_info.stdout.find('tacker') != -1 and inventory_hostname == haproxy_hosts.keys()[0]
-
-- name: delete the existed tacker service
- shell: . /opt/admin-openrc.sh; openstack service delete tacker
- when: service_info.stdout.find('tacker') != -1 and inventory_hostname == haproxy_hosts.keys()[0]
-
-- name: delete the existed tacker user
- shell: . /opt/admin-openrc.sh; openstack user delete tacker
- when: user_info.stdout.find('tacker') != -1 and inventory_hostname == haproxy_hosts.keys()[0]
-
-- name: create tacker user with admin privileges
- shell: . /opt/admin-openrc.sh; openstack user create --password console tacker; openstack role add --project service --user tacker admin;
- when: inventory_hostname == haproxy_hosts.keys()[0]
-
-- name: creat tacker service
- shell: >
- . /opt/admin-openrc.sh; openstack service create --name tacker --description "Tacker Project" nfv-orchestration
- when: inventory_hostname == haproxy_hosts.keys()[0]
-
-- name: provide an endpoint to tacker service
- shell: >
- . /opt/admin-openrc.sh; openstack endpoint create --region RegionOne \
- --publicurl 'http://{{ public_vip.ip }}:8888/' \
- --adminurl 'http://{{ internal_vip.ip }}:8888/' \
- --internalurl 'http://{{ internal_vip.ip }}:8888/' tacker
- when: inventory_hostname == haproxy_hosts.keys()[0]
-
-- name: install pip package
- pip: name=Babel state=present version=2.3.4
-
-- name: install pip packages
- shell: >
- pip install tosca-parser heat-translator oslosphinx;
-
-- name: install tacker
- shell: >
- . /opt/admin-openrc.sh; cd {{ tacker_home }}; python setup.py install
-
-- name: create 'tacker' directory in '/var/cache', set ownership and permissions
- shell: >
- mkdir -p /var/cache/tacker
-# sudo chown <LOGIN_USER>:root /var/cache/tacker
-# chmod 700 /var/cache/tacker
-
-- name: create 'tacker' directory in '/var/log'
- shell: mkdir -p /var/log/tacker
-
-- name: copy tacker configs
- template: src={{ item.src }} dest=/opt/os_templates
- with_items: "{{ tacker_configs_templates }}"
-
-- name: edit tacker configuration file
- shell: crudini --merge /usr/local/etc/tacker/tacker.conf < /opt/os_templates/tacker.j2
-
-- name: populate tacker database
- shell: >
- . /opt/admin-openrc.sh; /usr/local/bin/tacker-db-manage --config-file /usr/local/etc/tacker/tacker.conf upgrade head
- when: inventory_hostname == haproxy_hosts.keys()[0]
-
-- name: install tacker client
- shell: >
- . /opt/admin-openrc.sh; cd {{ tacker_client_home }}; python setup.py install
-
-- name: install tacker horizon
- shell: >
- . /opt/admin-openrc.sh; cd {{ tacker_horizon_home }}; python setup.py install
-
-- name: enable tacker horizon in dashboard
- shell: >
- cp {{ tacker_horizon_home }}/openstack_dashboard_extensions/* /usr/share/openstack-dashboard/openstack_dashboard/enabled/
-
-- name: restart apache server
- shell: service apache2 restart
-
-- name: create tacker service
- copy: src=tacker.conf dest=/etc/init
-
-- name: create tacker service work dir
- file: path=/var/lib/tacker state=directory
-
-- name: link the tacker service
- file:
- src: /etc/init/tacker.conf
- dest: /etc/init.d/tacker
- state: link
-
-- name: start tacker service
- shell: service tacker start
-
-- name: create tackerc file
- template: src=tackerc.sh dest=/opt/tackerc.sh mode=777
-
-- name: get the nfv_user info
- shell: . /opt/tackerc.sh; openstack user list
- register: nfvuser_info
-
-- name: delete the existed nfv user
- shell: . /opt/tackerc.sh; openstack user delete nfv_user
- when: nfvuser_info.stdout.find('nfv') != -1 and inventory_hostname == haproxy_hosts.keys()[0]
-
-- name: get the openstack project info
- shell: . /opt/tackerc.sh; openstack project list
- register: nfvproject_info
-
-- name: delete the existed nfv project
- shell: . /opt/tackerc.sh; openstack project delete $(openstack project list | grep nfv | awk '{print $2}')
- when: nfvproject_info.stdout.find('nfv') != -1 and inventory_hostname == haproxy_hosts.keys()[0]
-
-- name: create an nfv project
- shell: . /opt/tackerc.sh; openstack project create --description "NFV Project" nfv
- when: inventory_hostname == haproxy_hosts.keys()[0]
-
-- name: create nfv user with admin privileges
- shell: . /opt/tackerc.sh; openstack user create --password console nfv_user; openstack role add --project nfv --user nfv_user admin;
- when: inventory_hostname == haproxy_hosts.keys()[0]
-
-- name: create config.yml
- template: src=config.yaml dest=/opt/config.yaml
-
-- name: check if tacker running
- shell: . /opt/tackerc.sh; while (!(tacker ext-list)); do sleep 30; done
-
-- name: register VIM to tacker
- shell: . /opt/tackerc.sh; tacker vim-register --config-file /opt/config.yaml --description "OpenStack" --name VIM0
- when: inventory_hostname == haproxy_hosts.keys()[0]
-
-- name: restart tacker service
- shell: service tacker stop; service tacker start
-
diff --git a/deploy/adapters/ansible/openstack_mitaka/roles/tacker/templates/config.yaml b/deploy/adapters/ansible/openstack_mitaka/roles/tacker/templates/config.yaml
deleted file mode 100644
index 8f73e907..00000000
--- a/deploy/adapters/ansible/openstack_mitaka/roles/tacker/templates/config.yaml
+++ /dev/null
@@ -1,4 +0,0 @@
-auth_url: 'http://{{ public_vip.ip }}:5000/v2.0'
-username: 'nfv_user'
-password: 'console'
-project_name: 'nfv'
diff --git a/deploy/adapters/ansible/openstack_mitaka/roles/tacker/templates/tacker.j2 b/deploy/adapters/ansible/openstack_mitaka/roles/tacker/templates/tacker.j2
deleted file mode 100644
index 4f186b67..00000000
--- a/deploy/adapters/ansible/openstack_mitaka/roles/tacker/templates/tacker.j2
+++ /dev/null
@@ -1,426 +0,0 @@
-[DEFAULT]
-# Print more verbose output (set logging level to INFO instead of default WARNING level).
-verbose = True
-
-# Print debugging output (set logging level to DEBUG instead of default WARNING level).
-debug = True
-
-# Where to store Tacker state files. This directory must be writable by the
-# user executing the agent.
-state_path = /var/lib/tacker
-
-# Where to store lock files
-lock_path = $state_path/lock
-
-auth_strategy = keystone
-policy_file = /usr/local/etc/tacker/policy.json
-
-# log_format = %(asctime)s %(levelname)8s [%(name)s] %(message)s
-# log_date_format = %Y-%m-%d %H:%M:%S
-
-# use_syslog -> syslog
-# log_file and log_dir -> log_dir/log_file
-# (not log_file) and log_dir -> log_dir/{binary_name}.log
-# use_stderr -> stderr
-# (not user_stderr) and (not log_file) -> stdout
-# publish_errors -> notification system
-
-use_syslog = False
-# syslog_log_facility = LOG_USER
-
-# use_stderr = True
-# log_file =
-# log_dir =
-
-# publish_errors = False
-
-# Address to bind the API server to
-bind_host = {{ internal_ip }}
-
-# Port the bind the API server to
-bind_port = 8888
-
-# Path to the extensions. Note that this can be a colon-separated list of
-# paths. For example:
-# api_extensions_path = extensions:/path/to/more/extensions:/even/more/extensions
-# The __path__ of tacker.extensions is appended to this, so if your
-# extensions are in there you don't need to specify them here
-# api_extensions_path =
-
-# (StrOpt) Tacker core plugin entrypoint to be loaded from the
-# tacker.core_plugins namespace. See setup.cfg for the entrypoint names of the
-# plugins included in the tacker source distribution. For compatibility with
-# previous versions, the class name of a plugin can be specified instead of its
-# entrypoint name.
-#
-# core_plugin =
-# Example: core_plugin = ml2
-
-# (ListOpt) List of service plugin entrypoints to be loaded from the
-# tacker.service_plugins namespace. See setup.cfg for the entrypoint names of
-# the plugins included in the tacker source distribution. For compatibility
-# with previous versions, the class name of a plugin can be specified instead
-# of its entrypoint name.
-#
-# service_plugins =
-# Example: service_plugins = router,firewall,lbaas,vpnaas,metering
-
-service_plugins = vnfm,nfvo
-
-# Paste configuration file
-# api_paste_config = api-paste.ini
-
-# The strategy to be used for auth.
-# Supported values are 'keystone'(default), 'noauth'.
-# auth_strategy = keystone
-
-# Allow sending resource operation notification to DHCP agent
-# dhcp_agent_notification = True
-
-# Enable or disable bulk create/update/delete operations
-# allow_bulk = True
-# Enable or disable pagination
-# allow_pagination = False
-# Enable or disable sorting
-# allow_sorting = False
-# Enable or disable overlapping IPs for subnets
-# Attention: the following parameter MUST be set to False if Tacker is
-# being used in conjunction with nova security groups
-# allow_overlapping_ips = False
-# Ensure that configured gateway is on subnet
-# force_gateway_on_subnet = False
-
-
-# RPC configuration options. Defined in rpc __init__
-# The messaging module to use, defaults to kombu.
-# rpc_backend = tacker.openstack.common.rpc.impl_kombu
-# Size of RPC thread pool
-# rpc_thread_pool_size = 64
-# Size of RPC connection pool
-# rpc_conn_pool_size = 30
-# Seconds to wait for a response from call or multicall
-# rpc_response_timeout = 60
-# Seconds to wait before a cast expires (TTL). Only supported by impl_zmq.
-# rpc_cast_timeout = 30
-# Modules of exceptions that are permitted to be recreated
-# upon receiving exception data from an rpc call.
-# allowed_rpc_exception_modules = tacker.openstack.common.exception, nova.exception
-# AMQP exchange to connect to if using RabbitMQ or QPID
-# control_exchange = tacker
-
-# If passed, use a fake RabbitMQ provider
-# fake_rabbit = False
-
-# Configuration options if sending notifications via kombu rpc (these are
-# the defaults)
-# SSL version to use (valid only if SSL enabled)
-# kombu_ssl_version =
-# SSL key file (valid only if SSL enabled)
-# kombu_ssl_keyfile =
-# SSL cert file (valid only if SSL enabled)
-# kombu_ssl_certfile =
-# SSL certification authority file (valid only if SSL enabled)
-# kombu_ssl_ca_certs =
-# IP address of the RabbitMQ installation
-# rabbit_host = localhost
-# Password of the RabbitMQ server
-# rabbit_password = guest
-# Port where RabbitMQ server is running/listening
-# rabbit_port = 5672
-# RabbitMQ single or HA cluster (host:port pairs i.e: host1:5672, host2:5672)
-# rabbit_hosts is defaulted to '$rabbit_host:$rabbit_port'
-# rabbit_hosts = localhost:5672
-# User ID used for RabbitMQ connections
-# rabbit_userid = guest
-# Location of a virtual RabbitMQ installation.
-# rabbit_virtual_host = /
-# Maximum retries with trying to connect to RabbitMQ
-# (the default of 0 implies an infinite retry count)
-# rabbit_max_retries = 0
-# RabbitMQ connection retry interval
-# rabbit_retry_interval = 1
-# Use HA queues in RabbitMQ (x-ha-policy: all). You need to
-# wipe RabbitMQ database when changing this option. (boolean value)
-# rabbit_ha_queues = false
-
-# QPID
-# rpc_backend=tacker.openstack.common.rpc.impl_qpid
-# Qpid broker hostname
-# qpid_hostname = localhost
-# Qpid broker port
-# qpid_port = 5672
-# Qpid single or HA cluster (host:port pairs i.e: host1:5672, host2:5672)
-# qpid_hosts is defaulted to '$qpid_hostname:$qpid_port'
-# qpid_hosts = localhost:5672
-# Username for qpid connection
-# qpid_username = ''
-# Password for qpid connection
-# qpid_password = ''
-# Space separated list of SASL mechanisms to use for auth
-# qpid_sasl_mechanisms = ''
-# Seconds between connection keepalive heartbeats
-# qpid_heartbeat = 60
-# Transport to use, either 'tcp' or 'ssl'
-# qpid_protocol = tcp
-# Disable Nagle algorithm
-# qpid_tcp_nodelay = True
-
-# ZMQ
-# rpc_backend=tacker.openstack.common.rpc.impl_zmq
-# ZeroMQ bind address. Should be a wildcard (*), an ethernet interface, or IP.
-# The "host" option should point or resolve to this address.
-# rpc_zmq_bind_address = *
-
-# ============ Notification System Options =====================
-
-# Notifications can be sent when network/subnet/port are created, updated or deleted.
-# There are three methods of sending notifications: logging (via the
-# log_file directive), rpc (via a message queue) and
-# noop (no notifications sent, the default)
-
-# Notification_driver can be defined multiple times
-# Do nothing driver
-# notification_driver = tacker.openstack.common.notifier.no_op_notifier
-# Logging driver
-# notification_driver = tacker.openstack.common.notifier.log_notifier
-# RPC driver.
-notification_driver = tacker.openstack.common.notifier.rpc_notifier
-
-# default_notification_level is used to form actual topic name(s) or to set logging level
-# default_notification_level = INFO
-
-# default_publisher_id is a part of the notification payload
-# host = myhost.com
-# default_publisher_id = $host
-
-# Defined in rpc_notifier, can be comma separated values.
-# The actual topic names will be %s.%(default_notification_level)s
-# notification_topics = notifications
-
-# Default maximum number of items returned in a single response,
-# value == infinite and value < 0 means no max limit, and value must
-# be greater than 0. If the number of items requested is greater than
-# pagination_max_limit, server will just return pagination_max_limit
-# of number of items.
-# pagination_max_limit = -1
-
-# Maximum number of DNS nameservers per subnet
-# max_dns_nameservers = 5
-
-# Maximum number of host routes per subnet
-# max_subnet_host_routes = 20
-
-# Maximum number of fixed ips per port
-# max_fixed_ips_per_port = 5
-
-# =========== items for agent management extension =============
-# Seconds to regard the agent as down; should be at least twice
-# report_interval, to be sure the agent is down for good
-# agent_down_time = 75
-# =========== end of items for agent management extension =====
-
-# Allow auto scheduling networks to DHCP agent. It will schedule non-hosted
-# networks to first DHCP agent which sends get_active_networks message to
-# tacker server
-# network_auto_schedule = True
-
-# Allow auto scheduling routers to L3 agent. It will schedule non-hosted
-# routers to first L3 agent which sends sync_routers message to tacker server
-# router_auto_schedule = True
-
-# Number of DHCP agents scheduled to host a network. This enables redundant
-# DHCP agents for configured networks.
-# dhcp_agents_per_network = 1
-
-# =========== end of items for agent scheduler extension =====
-
-# =========== WSGI parameters related to the API server ==============
-# Number of separate worker processes to spawn. The default, 0, runs the
-# worker thread in the current process. Greater than 0 launches that number of
-# child processes as workers. The parent process manages them.
-# api_workers = 0
-
-# Number of separate RPC worker processes to spawn. The default, 0, runs the
-# worker thread in the current process. Greater than 0 launches that number of
-# child processes as RPC workers. The parent process manages them.
-# This feature is experimental until issues are addressed and testing has been
-# enabled for various plugins for compatibility.
-# rpc_workers = 0
-
-# Sets the value of TCP_KEEPIDLE in seconds to use for each server socket when
-# starting API server. Not supported on OS X.
-# tcp_keepidle = 600
-
-# Number of seconds to keep retrying to listen
-# retry_until_window = 30
-
-# Number of backlog requests to configure the socket with.
-# backlog = 4096
-
-# Max header line to accommodate large tokens
-# max_header_line = 16384
-
-# Enable SSL on the API server
-# use_ssl = False
-
-# Certificate file to use when starting API server securely
-# ssl_cert_file = /path/to/certfile
-
-# Private key file to use when starting API server securely
-# ssl_key_file = /path/to/keyfile
-
-# CA certificate file to use when starting API server securely to
-# verify connecting clients. This is an optional parameter only required if
-# API clients need to authenticate to the API server using SSL certificates
-# signed by a trusted CA
-# ssl_ca_file = /path/to/cafile
-# ======== end of WSGI parameters related to the API server ==========
-
-
-# ======== tacker nova interactions ==========
-# Send notification to nova when port status is active.
-# notify_nova_on_port_status_changes = True
-
-# Send notifications to nova when port data (fixed_ips/floatingips) change
-# so nova can update it's cache.
-# notify_nova_on_port_data_changes = True
-
-# URL for connection to nova (Only supports one nova region currently).
-# nova_url = http://127.0.0.1:8774/v2
-
-# Name of nova region to use. Useful if keystone manages more than one region
-# nova_region_name =
-
-# Username for connection to nova in admin context
-# nova_admin_username =
-
-# The uuid of the admin nova tenant
-# nova_admin_tenant_id =
-
-# Password for connection to nova in admin context.
-# nova_admin_password =
-
-# Authorization URL for connection to nova in admin context.
-# nova_admin_auth_url =
-
-# CA file for novaclient to verify server certificates
-# nova_ca_certificates_file =
-
-# Boolean to control ignoring SSL errors on the nova url
-# nova_api_insecure = False
-
-# Number of seconds between sending events to nova if there are any events to send
-# send_events_interval = 2
-
-# ======== end of tacker nova interactions ==========
-
-[agent]
-# Use "sudo tacker-rootwrap /etc/tacker/rootwrap.conf" to use the real
-# root filter facility.
-# Change to "sudo" to skip the filtering and just run the comand directly
-root_helper = sudo /usr/local/bin/tacker-rootwrap /usr/local/etc/tacker/rootwrap.conf
-
-# =========== items for agent management extension =============
-# seconds between nodes reporting state to server; should be less than
-# agent_down_time, best if it is half or less than agent_down_time
-# report_interval = 30
-
-# =========== end of items for agent management extension =====
-
-[keystone_authtoken]
-signing_dir = /var/cache/tacker
-#cafile = /opt/stack/data/ca-bundle.pem
-#project_domain_id = default
-project_name = service
-#user_domain_id = default
-password = console
-username = tacker
-auth_url = http://{{ internal_vip.ip }}:35357
-auth_plugin = password
-identity_uri = http://{{ internal_vip.ip }}:5000/v2.0
-auth_uri = http://{{ internal_vip.ip }}:5000/v2.0
-
-
-[database]
-# This line MUST be changed to actually run the plugin.
-# Example:
-# connection = mysql://root:pass@127.0.0.1:3306/tacker
-connection = mysql://tacker:TACKER_DBPASS@{{ internal_vip.ip }}:3306/tacker?charset=utf8
-# Replace 127.0.0.1 above with the IP address of the database used by the
-# main tacker server. (Leave it as is if the database runs on this host.)
-# connection = sqlite://
-# NOTE: In deployment the [database] section and its connection attribute may
-# be set in the corresponding core plugin '.ini' file. However, it is suggested
-# to put the [database] section and its connection attribute in this
-# configuration file.
-
-# Database engine for which script will be generated when using offline
-# migration
-# engine =
-
-# The SQLAlchemy connection string used to connect to the slave database
-# slave_connection =
-
-# Database reconnection retry times - in event connectivity is lost
-# set to -1 implies an infinite retry count
-# max_retries = 10
-
-# Database reconnection interval in seconds - if the initial connection to the
-# database fails
-# retry_interval = 10
-
-# Minimum number of SQL connections to keep open in a pool
-# min_pool_size = 1
-
-# Maximum number of SQL connections to keep open in a pool
-# max_pool_size = 10
-
-# Timeout in seconds before idle sql connections are reaped
-# idle_timeout = 3600
-
-# If set, use this value for max_overflow with sqlalchemy
-# max_overflow = 20
-
-# Verbosity of SQL debugging information. 0=None, 100=Everything
-# connection_debug = 0
-
-# Add python stack traces to SQL as comment strings
-# connection_trace = False
-
-# If set, use this value for pool_timeout with sqlalchemy
-# pool_timeout = 10
-
-[tacker]
-# Specify drivers for hosting device
-# infra_driver = heat,nova,noop
-
-# Specify drivers for mgmt
-# mgmt_driver = noop,openwrt
-
-# Specify drivers for monitoring
-# monitor_driver = ping, http_ping
-
-[nfvo_vim]
-# Supported VIM drivers, resource orchestration controllers such as OpenStack, kvm
-#Default VIM driver is OpenStack
-#vim_drivers = openstack
-#Default VIM placement if vim id is not provided
-default_vim = VIM0
-
-[vim_keys]
-#openstack = /etc/tacker/vim/fernet_keys
-[tacker_nova]
-# parameters for novaclient to talk to nova
-region_name = RegionOne
-#project_domain_id = default
-project_name = service
-#user_domain_id = default
-password = console
-username = nova
-auth_url = http://{{ internal_vip.ip }}:35357
-auth_plugin = password
-
-[tacker_heat]
-heat_uri = http://{{ internal_vip.ip }}:8004/v1
-stack_retries = 60
-stack_retry_wait = 5
diff --git a/deploy/adapters/ansible/openstack_mitaka/roles/tacker/templates/tackerc.sh b/deploy/adapters/ansible/openstack_mitaka/roles/tacker/templates/tackerc.sh
deleted file mode 100644
index c673e7f1..00000000
--- a/deploy/adapters/ansible/openstack_mitaka/roles/tacker/templates/tackerc.sh
+++ /dev/null
@@ -1,12 +0,0 @@
-#!/bin/sh
-export LC_ALL=C
-export OS_NO_CACHE=true
-export OS_TENANT_NAME=service
-export OS_PROJECT_NAME=service
-export OS_USERNAME=tacker
-export OS_PASSWORD=console
-export OS_AUTH_URL=http://{{ internal_vip.ip }}:5000/v2.0
-export OS_DEFAULT_DOMAIN=default
-export OS_AUTH_STRATEGY=keystone
-export OS_REGION_NAME=RegionOne
-export TACKER_ENDPOINT_TYPE=internalurl