diff options
author | chigang <chigang@huawei.com> | 2017-06-30 20:24:25 +0800 |
---|---|---|
committer | chigang <chigang@huawei.com> | 2017-07-03 21:19:28 +0800 |
commit | 95ecdb773c9fa90f9e4f1f792f5cc5dc8328fd6a (patch) | |
tree | 1b012703eb52f78fe35119a4f9eba98b221f69d9 /deploy/adapters/ansible/roles/neutron-compute/templates | |
parent | d529e77a45c77c10ac6970ca9e733e92e89d138f (diff) |
Remove obsoleted code
JIRA:-
use OpenStack-ansible deploy openstack, so remove obsoleted code.
Some of the enhanced features will be added in later versions
Change-Id: Ie92b92b5de234a7d7d03b578b0bc15fd0218b3b3
Signed-off-by: chigang <chigang@huawei.com>
Diffstat (limited to 'deploy/adapters/ansible/roles/neutron-compute/templates')
5 files changed, 0 insertions, 349 deletions
diff --git a/deploy/adapters/ansible/roles/neutron-compute/templates/dhcp_agent.ini b/deploy/adapters/ansible/roles/neutron-compute/templates/dhcp_agent.ini deleted file mode 100644 index 4340c39c..00000000 --- a/deploy/adapters/ansible/roles/neutron-compute/templates/dhcp_agent.ini +++ /dev/null @@ -1,90 +0,0 @@ -[DEFAULT] -# Show debugging output in log (sets DEBUG log level output) -# debug = False -verbose = True - -# The DHCP agent will resync its state with Neutron to recover from any -# transient notification or rpc errors. The interval is number of -# seconds between attempts. -resync_interval = 5 - -# The DHCP agent requires an interface driver be set. Choose the one that best -# matches your plugin. -# interface_driver = - -# Example of interface_driver option for OVS based plugins(OVS, Ryu, NEC, NVP, -# BigSwitch/Floodlight) -interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver - -# Name of Open vSwitch bridge to use -# ovs_integration_bridge = br-int - -# Use veth for an OVS interface or not. -# Support kernels with limited namespace support -# (e.g. RHEL 6.5) so long as ovs_use_veth is set to True. -ovs_use_veth = False - -# Example of interface_driver option for LinuxBridge -# interface_driver = neutron.agent.linux.interface.BridgeInterfaceDriver - -# The agent can use other DHCP drivers. Dnsmasq is the simplest and requires -# no additional setup of the DHCP server. -dhcp_driver = neutron.agent.linux.dhcp.Dnsmasq - -# Allow overlapping IP (Must have kernel build with CONFIG_NET_NS=y and -# iproute2 package that supports namespaces). -use_namespaces = True - -# The DHCP server can assist with providing metadata support on isolated -# networks. Setting this value to True will cause the DHCP server to append -# specific host routes to the DHCP request. The metadata service will only -# be activated when the subnet does not contain any router port. The guest -# instance must be configured to request host routes via DHCP (Option 121). -enable_isolated_metadata = True - -# Allows for serving metadata requests coming from a dedicated metadata -# access network whose cidr is 169.254.169.254/16 (or larger prefix), and -# is connected to a Neutron router from which the VMs send metadata -# request. In this case DHCP Option 121 will not be injected in VMs, as -# they will be able to reach 169.254.169.254 through a router. -# This option requires enable_isolated_metadata = True -enable_metadata_network = False - -# Number of threads to use during sync process. Should not exceed connection -# pool size configured on server. -# num_sync_threads = 4 - -# Location to store DHCP server config files -# dhcp_confs = $state_path/dhcp - -# Domain to use for building the hostnames -dhcp_domain = openstacklocal - -# Override the default dnsmasq settings with this file -# dnsmasq_config_file = -dnsmasq_config_file = /etc/neutron/dnsmasq-neutron.conf - -# Comma-separated list of DNS servers which will be used by dnsmasq -# as forwarders. -# dnsmasq_dns_servers = - -# Limit number of leases to prevent a denial-of-service. -dnsmasq_lease_max = 16777216 - -# Location to DHCP lease relay UNIX domain socket -# dhcp_lease_relay_socket = $state_path/dhcp/lease_relay - -# Location of Metadata Proxy UNIX domain socket -# metadata_proxy_socket = $state_path/metadata_proxy - -# dhcp_delete_namespaces, which is false by default, can be set to True if -# namespaces can be deleted cleanly on the host running the dhcp agent. -# Do not enable this until you understand the problem with the Linux iproute -# utility mentioned in https://bugs.launchpad.net/neutron/+bug/1052535 and -# you are sure that your version of iproute does not suffer from the problem. -# If True, namespaces will be deleted when a dhcp server is disabled. -# dhcp_delete_namespaces = False - -# Timeout for ovs-vsctl commands. -# If the timeout expires, ovs commands will fail with ALARMCLOCK error. -# ovs_vsctl_timeout = 10 diff --git a/deploy/adapters/ansible/roles/neutron-compute/templates/etc/xorp/config.boot b/deploy/adapters/ansible/roles/neutron-compute/templates/etc/xorp/config.boot deleted file mode 100644 index 32caf96d..00000000 --- a/deploy/adapters/ansible/roles/neutron-compute/templates/etc/xorp/config.boot +++ /dev/null @@ -1,25 +0,0 @@ -interfaces { - restore-original-config-on-shutdown: false - interface {{ hostvars[inventory_hostname][neutron_vxlan_interface|default(internal_interface)]['device'] }} { - description: "Internal pNodes interface" - disable: false - default-system-config - } -} - -protocols { - igmp { - disable: false - interface {{ hostvars[inventory_hostname][neutron_vxlan_interface|default(internal_interface)]['device'] }} { - vif {{ hostvars[inventory_hostname][neutron_vxlan_interface|default(internal_interface)]['device'] }} { - disable: false - version: 3 - } - } - traceoptions { - flag all { - disable: false - } - } - } -} diff --git a/deploy/adapters/ansible/roles/neutron-compute/templates/l3_agent.ini b/deploy/adapters/ansible/roles/neutron-compute/templates/l3_agent.ini deleted file mode 100644 index 5f499348..00000000 --- a/deploy/adapters/ansible/roles/neutron-compute/templates/l3_agent.ini +++ /dev/null @@ -1,81 +0,0 @@ -[DEFAULT] -# Show debugging output in log (sets DEBUG log level output) -# debug = False -verbose = True - -# L3 requires that an interface driver be set. Choose the one that best -# matches your plugin. -# interface_driver = - -# Example of interface_driver option for OVS based plugins (OVS, Ryu, NEC) -# that supports L3 agent -# interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver -interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver - -# Use veth for an OVS interface or not. -# Support kernels with limited namespace support -# (e.g. RHEL 6.5) so long as ovs_use_veth is set to True. -# ovs_use_veth = False - -# Example of interface_driver option for LinuxBridge -# interface_driver = neutron.agent.linux.interface.BridgeInterfaceDriver - -# Allow overlapping IP (Must have kernel build with CONFIG_NET_NS=y and -# iproute2 package that supports namespaces). -use_namespaces = True - -# If use_namespaces is set as False then the agent can only configure one router. - -# This is done by setting the specific router_id. -# router_id = - -# When external_network_bridge is set, each L3 agent can be associated -# with no more than one external network. This value should be set to the UUID -# of that external network. To allow L3 agent support multiple external -# networks, both the external_network_bridge and gateway_external_network_id -# must be left empty. -# gateway_external_network_id = - -# Indicates that this L3 agent should also handle routers that do not have -# an external network gateway configured. This option should be True only -# for a single agent in a Neutron deployment, and may be False for all agents -# if all routers must have an external network gateway -handle_internal_only_routers = True - -# Name of bridge used for external network traffic. This should be set to -# empty value for the linux bridge. when this parameter is set, each L3 agent -# can be associated with no more than one external network. -external_network_bridge = - -# TCP Port used by Neutron metadata server -metadata_port = 9697 - -# Send this many gratuitous ARPs for HA setup. Set it below or equal to 0 -# to disable this feature. -send_arp_for_ha = 3 - -# seconds between re-sync routers' data if needed -periodic_interval = 40 - -# seconds to start to sync routers' data after -# starting agent -periodic_fuzzy_delay = 5 - -# enable_metadata_proxy, which is true by default, can be set to False -# if the Nova metadata server is not available -# enable_metadata_proxy = True - -# Location of Metadata Proxy UNIX domain socket -# metadata_proxy_socket = $state_path/metadata_proxy - -# router_delete_namespaces, which is false by default, can be set to True if -# namespaces can be deleted cleanly on the host running the L3 agent. -# Do not enable this until you understand the problem with the Linux iproute -# utility mentioned in https://bugs.launchpad.net/neutron/+bug/1052535 and -# you are sure that your version of iproute does not suffer from the problem. -# If True, namespaces will be deleted when a router is destroyed. -# router_delete_namespaces = False - -# Timeout for ovs-vsctl commands. -# If the timeout expires, ovs commands will fail with ALARMCLOCK error. -# ovs_vsctl_timeout = 10 diff --git a/deploy/adapters/ansible/roles/neutron-compute/templates/metadata_agent.ini b/deploy/adapters/ansible/roles/neutron-compute/templates/metadata_agent.ini deleted file mode 100644 index 994f0a63..00000000 --- a/deploy/adapters/ansible/roles/neutron-compute/templates/metadata_agent.ini +++ /dev/null @@ -1,46 +0,0 @@ -[DEFAULT] -# Show debugging output in log (sets DEBUG log level output) -debug = True - -# The Neutron user information for accessing the Neutron API. -auth_url = http://{{ internal_vip.ip }}:5000/v3 -auth_region = RegionOne -# Turn off verification of the certificate for ssl -# auth_insecure = False -# Certificate Authority public key (CA cert) file for ssl -# auth_ca_cert = -admin_tenant_name = service -admin_user = neutron -admin_password = {{ NEUTRON_PASS }} - -# Network service endpoint type to pull from the keystone catalog -# endpoint_type = adminURL - -# IP address used by Nova metadata server -nova_metadata_ip = {{ internal_vip.ip }} - -# TCP Port used by Nova metadata server -nova_metadata_port = 8775 - -# When proxying metadata requests, Neutron signs the Instance-ID header with a -# shared secret to prevent spoofing. You may select any string for a secret, -# but it must match here and in the configuration used by the Nova Metadata -# Server. NOTE: Nova uses a different key: neutron_metadata_proxy_shared_secret -metadata_proxy_shared_secret = {{ METADATA_SECRET }} - -# Location of Metadata Proxy UNIX domain socket -# metadata_proxy_socket = $state_path/metadata_proxy - -# Number of separate worker processes for metadata server -# metadata_workers = 0 - -# Number of backlog requests to configure the metadata server socket with -# metadata_backlog = 128 - -# URL to connect to the cache backend. -# Example of URL using memory caching backend -# with ttl set to 5 seconds: cache_url = memory://?default_ttl=5 -# default_ttl=0 parameter will cause cache entries to never expire. -# Otherwise default_ttl specifies time in seconds a cache entry is valid for. -# No cache is used in case no value is passed. -# cache_url = diff --git a/deploy/adapters/ansible/roles/neutron-compute/templates/neutron.conf b/deploy/adapters/ansible/roles/neutron-compute/templates/neutron.conf deleted file mode 100644 index d74435fe..00000000 --- a/deploy/adapters/ansible/roles/neutron-compute/templates/neutron.conf +++ /dev/null @@ -1,107 +0,0 @@ -[DEFAULT] -verbose = {{ VERBOSE }} -debug = {{ VERBOSE }} -state_path = /var/lib/neutron -notify_nova_on_port_status_changes = True -notify_nova_on_port_data_changes = True -log_dir = /var/log/neutron -bind_host = {{ network_server_host }} -bind_port = 9696 -core_plugin = ml2 -service_plugins = router -api_paste_config = api-paste.ini -auth_strategy = keystone -dhcp_lease_duration = 86400 -allow_overlapping_ips = True -rpc_backend = rabbit -rpc_thread_pool_size = 240 -rpc_conn_pool_size = 100 -rpc_response_timeout = 300 -rpc_cast_timeout = 300 -notification_driver = neutron.openstack.common.notifier.rpc_notifier -default_notification_level = INFO -notification_topics = notifications -agent_down_time = 75 -network_scheduler_driver = neutron.scheduler.dhcp_agent_scheduler.ChanceScheduler -router_scheduler_driver = neutron.scheduler.l3_agent_scheduler.ChanceScheduler -api_workers = 8 -rpc_workers = 8 -notify_nova_on_port_status_changes = True -notify_nova_on_port_data_changes = True -nova_url = http://{{ internal_vip.ip }}:8774/v3 -nova_region_name = RegionOne -nova_admin_username = nova -nova_admin_password = {{ NOVA_PASS }} -nova_admin_auth_url = http://{{ internal_vip.ip }}:35357/v3 -send_events_interval = 2 - -[quotas] -quota_driver = neutron.db.quota_db.DbQuotaDriver -quota_items = network,subnet,port -default_quota = -1 -quota_network = 100 -quota_subnet = 100 -quota_port = 8000 -quota_security_group = 1000 -quota_security_group_rule = 1000 - -[agent] -root_helper = "sudo /usr/bin/neutron-rootwrap /etc/neutron/rootwrap.conf" -report_interval = 30 - -[keystone_authtoken] -auth_uri = http://{{ internal_vip.ip }}:5000 -auth_url = http://{{ internal_vip.ip }}:35357 -auth_type = password -project_domain_name = default -user_domain_name = default -project_name = service -username = neutron -password = {{ NEUTRON_PASS }} - -identity_uri = http://{{ internal_vip.ip }}:35357 -admin_tenant_name = service -admin_user = neutron -admin_password = {{ NEUTRON_PASS }} -signing_dir = $state_path/keystone-signing - -[database] -connection = mysql://neutron:{{ NEUTRON_DBPASS }}@{{ db_host }}/neutron -slave_connection = -max_retries = 10 -retry_interval = 10 -min_pool_size = 1 -max_pool_size = 100 -idle_timeout = 30 -use_db_reconnect = True -max_overflow = 100 -connection_debug = 0 -connection_trace = False -pool_timeout = 10 - -[service_providers] -service_provider=FIREWALL:Iptables:neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewllDriver:default - -{% if enable_fwaas %} -[fwaas] -driver = neutron_fwaas.services.firewall.drivers.linux.iptables_fwaas.IptablesFwaasDriver -enabled = True -{% endif %} - -[nova] -auth_url = http://{{ internal_vip.ip }}:35357 -auth_type = password -project_domain_name = default -user_domain_name = default -project_name = service -username = nova -password = {{ NOVA_PASS }} - -[oslo_messaging_rabbit] -rabbit_host = {{ rabbit_host }} -rabbit_password = {{ RABBIT_PASS }} -rabbit_port = 5672 -rabbit_userid = {{ RABBIT_USER }} - -[oslo_concurrency] -lock_path = $state_path/lock |