summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--docker/Dockerfile2
-rwxr-xr-xnfvbench/cfg.default.yaml62
-rw-r--r--nfvbench/chaining.py112
-rw-r--r--nfvbench/cleanup.py20
-rw-r--r--nfvbench/nfvbenchvm/nfvbenchvm.conf4
-rwxr-xr-xnfvbenchvm/dib/build-image.sh2
-rw-r--r--nfvbenchvm/dib/elements/nfvbenchvm/package-installs.yaml2
-rw-r--r--nfvbenchvm/dib/elements/nfvbenchvm/static/etc/rc.d/rc.local62
-rw-r--r--nfvbenchvm/dib/elements/nfvbenchvm/static/vpp/startup.conf1
9 files changed, 240 insertions, 27 deletions
diff --git a/docker/Dockerfile b/docker/Dockerfile
index 6d69465..b84bfa3 100644
--- a/docker/Dockerfile
+++ b/docker/Dockerfile
@@ -2,7 +2,7 @@
FROM ubuntu:16.04
ENV TREX_VER "v2.61"
-ENV VM_IMAGE_VER "0.10"
+ENV VM_IMAGE_VER "0.11"
# Note: do not clone with --depth 1 as it will cause pbr to fail extracting the nfvbench version
# from the git tag
diff --git a/nfvbench/cfg.default.yaml b/nfvbench/cfg.default.yaml
index 9a4a815..fdd5f84 100755
--- a/nfvbench/cfg.default.yaml
+++ b/nfvbench/cfg.default.yaml
@@ -83,6 +83,11 @@ flavor:
# test VM will only use the first 2 queues.
vif_multiqueue_size: 1
+# Increase number of buffers allocated for VPP VM forwarder. May be needed in scenarios with large
+# number of interfaces and worker threads, or a lot of physical interfaces with multiple RSS queues.
+# Value is per CPU socket. Default is 16384.
+num_mbufs: 16384
+
# Name of the availability zone to use for the test VMs
# Must be one of the zones listed by 'nova availability-zone-list'
# availability_zone: 'nova'
@@ -448,6 +453,63 @@ idle_networks:
# physnet name to use for all idle interfaces
physical_network:
+# MANAGEMENT INTERFACE
+# By default each test VM will have 2 virtual interfaces for looping traffic.
+# If use_management_port is true, additional virtual interface can be
+# added at VM creation time, this interface will be used for VM management over SSH.
+# This will be helpful for debug (forwarder config, capture traffic...)
+# or to emulate VNF with management interface
+use_management_port: false
+
+# If a network with given name already exists it will be reused.
+# Otherwise a new network is created for management interface.
+# If use_management_port is false, the options below will be ignored
+# and no management interface will be added.
+management_network:
+ name: 'nfvbench-management-net'
+ # Subnet name to use for management subnetwork
+ subnet: 'nfvbench-management-subnet'
+ # CIDR to use for management network
+ cidr: '192.168.0.0/24'
+ gateway: '192.168.0.254'
+ # Type of network associated to the management virtual interface (vlan or vxlan)
+ network_type: 'vlan'
+ # segmentation ID to use for the network attached to the management virtual interface
+ # vlan: leave empty to let neutron pick the segmentation ID
+ # vxlan: must specify the starting VNI value to be used (cannot be empty)
+ segmentation_id:
+ # physnet name to use for all idle interfaces
+ physical_network:
+
+# Floating IP for management interface
+# If use_floating_ip is true, floating IP will be set on management interface port
+# One floating IP by loop VM will be used (floating ips are often limited,
+# use them on limited context mainly for debug). If there are 10 PVP chains, this will require 10
+# floating IPs. If 10 PVVP chains, it will require 20 floating IPs
+use_floating_ip: false
+
+# If a network with given name already exists it will be reused.
+# Set same name as management_network if you want to use a floating IP from this network
+# Otherwise set name, subnet and CIDR information from your floating IP pool network
+# Floating network used to set floating IP on management port.
+# Only 1 floating network will be used for all VMs and chains (shared network).
+# If use_floating_ip is false, the options below will be ignored
+# and no floating IP will be added.
+floating_network:
+ name: 'nfvbench-floating-net'
+ # Subnet name to use for floating subnetwork
+ subnet: 'nfvbench-floating-subnet'
+ # CIDR to use for floating network
+ cidr: '192.168.0.0/24'
+ # Type of network associated to the management virtual interface (vlan or vxlan)
+ network_type: 'vlan'
+ # segmentation ID to use for the network attached to the management virtual interface
+ # vlan: leave empty to let neutron pick the segmentation ID
+ # vxlan: must specify the starting VNI value to be used (cannot be empty)
+ segmentation_id:
+ # physnet name to use for all idle interfaces
+ physical_network:
+
# In the scenario of PVVP + SRIOV, there is choice of how the traffic will be
# handled in the middle network. The default (false) will use vswitch, while
# SRIOV can be used by toggling below setting.
diff --git a/nfvbench/chaining.py b/nfvbench/chaining.py
index 49d23b7..d035a40 100644
--- a/nfvbench/chaining.py
+++ b/nfvbench/chaining.py
@@ -135,6 +135,7 @@ class ChainVnfPort(object):
self.manager = vnf.manager
self.reuse = False
self.port = None
+ self.floating_ip = None
if vnf.instance:
# VNF instance is reused, we need to find an existing port that matches this instance
# and network
@@ -182,18 +183,35 @@ class ChainVnfPort(object):
"""Get the IP address for this port."""
return self.port['fixed_ips'][0]['ip_address']
+ def set_floating_ip(self, chain_network):
+ # create and add floating ip to port
+ try:
+ self.floating_ip = self.manager.neutron_client.create_floatingip({
+ 'floatingip': {
+ 'floating_network_id': chain_network.get_uuid(),
+ 'port_id': self.port['id'],
+ 'description': 'nfvbench floating ip for port:' + self.port['name'],
+ }})['floatingip']
+ LOG.info('Floating IP %s created and associated on port %s',
+ self.floating_ip['floating_ip_address'], self.name)
+ return self.floating_ip['floating_ip_address']
+ except Exception:
+ LOG.info('Failed to created and associated floating ip on port %s (ignored)', self.name)
+ return self.port['fixed_ips'][0]['ip_address']
+
def delete(self):
"""Delete this port instance."""
if self.reuse or not self.port:
return
- retry = 0
- while retry < self.manager.config.generic_retry_count:
+ for _ in range(0, self.manager.config.generic_retry_count):
try:
self.manager.neutron_client.delete_port(self.port['id'])
LOG.info("Deleted port %s", self.name)
+ if self.floating_ip:
+ self.manager.neutron_client.delete_floatingip(self.floating_ip['id'])
+ LOG.info("Deleted floating IP %s", self.floating_ip['description'])
return
except Exception:
- retry += 1
time.sleep(self.manager.config.generic_poll_sec)
LOG.error('Unable to delete port: %s', self.name)
@@ -358,17 +376,15 @@ class ChainNetwork(object):
def delete(self):
"""Delete this network."""
if not self.reuse and self.network:
- retry = 0
- while retry < self.manager.config.generic_retry_count:
+ for retry in range(0, self.manager.config.generic_retry_count):
try:
self.manager.neutron_client.delete_network(self.network['id'])
LOG.info("Deleted network: %s", self.name)
return
except Exception:
- retry += 1
LOG.info('Error deleting network %s (retry %d/%d)...',
self.name,
- retry,
+ retry + 1,
self.manager.config.generic_retry_count)
time.sleep(self.manager.config.generic_poll_sec)
LOG.error('Unable to delete network: %s', self.name)
@@ -397,6 +413,7 @@ class ChainVnf(object):
# For example if 7 idle interfaces are requested, the corresp. ports will be
# at index 2 to 8
self.ports = []
+ self.management_port = None
self.routers = []
self.status = None
self.instance = None
@@ -429,10 +446,12 @@ class ChainVnf(object):
tg_mac2 = self.routers[RIGHT].ports[1]['mac_address'] # router edge mac right
# edge cidr mask left
vnf_gateway1_cidr = \
- self.ports[LEFT].get_ip() + self.manager.config.edge_networks.left.cidr[-3:]
+ self.ports[LEFT].get_ip() + self.__get_network_mask(
+ self.manager.config.edge_networks.left.cidr)
# edge cidr mask right
vnf_gateway2_cidr = \
- self.ports[RIGHT].get_ip() + self.manager.config.edge_networks.right.cidr[-3:]
+ self.ports[RIGHT].get_ip() + self.__get_network_mask(
+ self.manager.config.edge_networks.right.cidr)
if config.vm_forwarder != 'vpp':
raise ChainException(
'L3 router mode imply to set VPP as VM forwarder.'
@@ -444,9 +463,11 @@ class ChainVnf(object):
tg_mac2 = remote_mac_pair[1]
g1cidr = devices[LEFT].get_gw_ip(
- self.chain.chain_id) + self.manager.config.internal_networks.left.cidr[-3:]
+ self.chain.chain_id) + self.__get_network_mask(
+ self.manager.config.internal_networks.left.cidr)
g2cidr = devices[RIGHT].get_gw_ip(
- self.chain.chain_id) + self.manager.config.internal_networks.right.cidr[-3:]
+ self.chain.chain_id) + self.__get_network_mask(
+ self.manager.config.internal_networks.right.cidr)
vnf_gateway1_cidr = g1cidr
vnf_gateway2_cidr = g2cidr
@@ -465,10 +486,27 @@ class ChainVnf(object):
'vnf_gateway2_cidr': vnf_gateway2_cidr,
'tg_mac1': tg_mac1,
'tg_mac2': tg_mac2,
- 'vif_mq_size': config.vif_multiqueue_size
+ 'vif_mq_size': config.vif_multiqueue_size,
+ 'num_mbufs': config.num_mbufs
}
+ if self.manager.config.use_management_port:
+ mgmt_ip = self.management_port.port['fixed_ips'][0]['ip_address']
+ mgmt_mask = self.__get_network_mask(self.manager.config.management_network.cidr)
+ vm_config['intf_mgmt_cidr'] = mgmt_ip + mgmt_mask
+ vm_config['intf_mgmt_ip_gw'] = self.manager.config.management_network.gateway
+ vm_config['intf_mac_mgmt'] = self.management_port.port['mac_address']
+ else:
+ # Interface management config left empty to avoid error in VM spawn
+ # if nfvbench config has values for management network but use_management_port=false
+ vm_config['intf_mgmt_cidr'] = ''
+ vm_config['intf_mgmt_ip_gw'] = ''
+ vm_config['intf_mac_mgmt'] = ''
return content.format(**vm_config)
+ @staticmethod
+ def __get_network_mask(network):
+ return '/' + network.split('/')[1]
+
def _get_vnic_type(self, port_index):
"""Get the right vnic type for given port indexself.
@@ -575,17 +613,28 @@ class ChainVnf(object):
self.instance = instance
LOG.info('Reusing existing instance %s on %s',
self.name, self.get_hypervisor_name())
+ # create management port if needed
+ if self.manager.config.use_management_port:
+ self.management_port = ChainVnfPort(self.name + '-mgmt', self,
+ self.manager.management_network, 'normal')
+ ip = self.management_port.port['fixed_ips'][0]['ip_address']
+ if self.manager.config.use_floating_ip:
+ ip = self.management_port.set_floating_ip(self.manager.floating_ip_network)
+ LOG.info("Management interface will be active using IP: %s, "
+ "and you can connect over SSH with login: nfvbench and password: nfvbench", ip)
# create or reuse/discover 2 ports per instance
if self.manager.config.l3_router:
- self.ports = [ChainVnfPort(self.name + '-' + str(index),
- self,
- networks[index + 2],
- self._get_vnic_type(index)) for index in [0, 1]]
+ for index in [0, 1]:
+ self.ports.append(ChainVnfPort(self.name + '-' + str(index),
+ self,
+ networks[index + 2],
+ self._get_vnic_type(index)))
else:
- self.ports = [ChainVnfPort(self.name + '-' + str(index),
- self,
- networks[index],
- self._get_vnic_type(index)) for index in [0, 1]]
+ for index in [0, 1]:
+ self.ports.append(ChainVnfPort(self.name + '-' + str(index),
+ self,
+ networks[index],
+ self._get_vnic_type(index)))
# create idle networks and ports only if instance is not reused
# if reused, we do not care about idle networks/ports
@@ -627,8 +676,10 @@ class ChainVnf(object):
def create_vnf(self, remote_mac_pair):
"""Create the VNF instance if it does not already exist."""
if self.instance is None:
- port_ids = [{'port-id': vnf_port.port['id']}
- for vnf_port in self.ports]
+ port_ids = []
+ if self.manager.config.use_management_port:
+ port_ids.append({'port-id': self.management_port.port['id']})
+ port_ids.extend([{'port-id': vnf_port.port['id']} for vnf_port in self.ports])
# add idle ports
for idle_port in self.idle_ports:
port_ids.append({'port-id': idle_port.port['id']})
@@ -736,6 +787,8 @@ class ChainVnf(object):
if self.instance:
self.manager.comp.delete_server(self.instance)
LOG.info("Deleted instance %s", self.name)
+ if self.manager.config.use_management_port:
+ self.management_port.delete()
for port in self.ports:
port.delete()
for port in self.idle_ports:
@@ -1052,6 +1105,16 @@ class ChainManager(object):
self.flavor = ChainFlavor(config.flavor_type, config.flavor, self.comp)
# Get list of all existing instances to check if some instances can be reused
self.existing_instances = self.comp.get_server_list()
+ # If management port is requested for VMs, create management network (shared)
+ if self.config.use_management_port:
+ self.management_network = ChainNetwork(self, self.config.management_network,
+ None, False)
+ # If floating IP is used for management, create and share
+ # across chains the floating network
+ if self.config.use_floating_ip:
+ self.floating_ip_network = ChainNetwork(self,
+ self.config.floating_network,
+ None, False)
else:
# For EXT chains, the external_networks left and right fields in the config
# must be either a prefix string or a list of at least chain-count strings
@@ -1419,7 +1482,6 @@ class ChainManager(object):
if hypervisor:
LOG.info('Found hypervisor for EXT chain: %s', hypervisor.hypervisor_hostname)
return[':' + hypervisor.hypervisor_hostname]
-
# no openstack = no chains
return []
@@ -1429,5 +1491,9 @@ class ChainManager(object):
chain.delete()
for network in self.networks:
network.delete()
+ if self.config.use_management_port and hasattr(self, 'management_network'):
+ self.management_network.delete()
+ if self.config.use_floating_ip and hasattr(self, 'floating_ip_network'):
+ self.floating_ip_network.delete()
if self.flavor:
self.flavor.delete()
diff --git a/nfvbench/cleanup.py b/nfvbench/cleanup.py
index fc85b5d..6a79a63 100644
--- a/nfvbench/cleanup.py
+++ b/nfvbench/cleanup.py
@@ -104,16 +104,25 @@ class NetworkCleaner(object):
LOG.info('Discovering ports...')
all_ports = self.neutron_client.list_ports()['ports']
self.ports = [port for port in all_ports if port['network_id'] in net_ids]
+ LOG.info('Discovering floating ips...')
+ all_floating_ips = self.neutron_client.list_floatingips()['floatingips']
+ self.floating_ips = [floating_ip for floating_ip in all_floating_ips if
+ floating_ip['floating_network_id'] in net_ids and "nfvbench" in
+ floating_ip['description']]
else:
self.ports = []
+ self.floating_ips = []
def get_resource_list(self):
res_list = [["Network", net['name'], net['id']] for net in self.networks]
res_list.extend([["Port", port['name'], port['id']] for port in self.ports])
+ res_list.extend(
+ [["Floating IP", floating_ip['description'], floating_ip['id']] for floating_ip in
+ self.floating_ips])
return res_list
def get_cleaner_code(self):
- return "networks and ports"
+ return "networks, ports and floating ips"
def clean_needed(self, clean_options):
if clean_options is None:
@@ -129,7 +138,12 @@ class NetworkCleaner(object):
self.neutron_client.delete_port(port['id'])
except Exception:
LOG.exception("Port deletion failed")
-
+ for floating_ip in self.floating_ips:
+ LOG.info("Deleting floating ip %s...", floating_ip['id'])
+ try:
+ self.neutron_client.delete_floatingip(floating_ip['id'])
+ except Exception:
+ LOG.exception("Floating IP deletion failed")
# associated subnets are automatically deleted by neutron
for net in self.networks:
LOG.info("Deleting network %s...", net['name'])
@@ -255,6 +269,8 @@ class Cleaner(object):
self.nova_client = Client(2, session=session)
network_names = [inet['name'] for inet in config.internal_networks.values()]
network_names.extend([inet['name'] for inet in config.edge_networks.values()])
+ network_names.extend(config.management_network['name'])
+ network_names.extend(config.floating_network['name'])
router_names = [rtr['router_name'] for rtr in config.edge_networks.values()]
# add idle networks as well
if config.idle_networks.name:
diff --git a/nfvbench/nfvbenchvm/nfvbenchvm.conf b/nfvbench/nfvbenchvm/nfvbenchvm.conf
index a8e2551..8f5e7e9 100644
--- a/nfvbench/nfvbenchvm/nfvbenchvm.conf
+++ b/nfvbench/nfvbenchvm/nfvbenchvm.conf
@@ -10,3 +10,7 @@ TG_NET2={tg_net2}
TG_GATEWAY1_IP={tg_gateway1_ip}
TG_GATEWAY2_IP={tg_gateway2_ip}
VIF_MQ_SIZE={vif_mq_size}
+NUM_MBUFS={num_mbufs}
+INTF_MGMT_CIDR={intf_mgmt_cidr}
+INTF_MGMT_IP_GW={intf_mgmt_ip_gw}
+INTF_MAC_MGMT={intf_mac_mgmt} \ No newline at end of file
diff --git a/nfvbenchvm/dib/build-image.sh b/nfvbenchvm/dib/build-image.sh
index fce298c..09ccf6a 100755
--- a/nfvbenchvm/dib/build-image.sh
+++ b/nfvbenchvm/dib/build-image.sh
@@ -30,7 +30,7 @@ set -e
gs_url=artifacts.opnfv.org/nfvbench/images
# image version number
-__version__=0.10
+__version__=0.11
image_name=nfvbenchvm_centos-$__version__
# if image exists skip building
diff --git a/nfvbenchvm/dib/elements/nfvbenchvm/package-installs.yaml b/nfvbenchvm/dib/elements/nfvbenchvm/package-installs.yaml
index e3184c7..a5868fa 100644
--- a/nfvbenchvm/dib/elements/nfvbenchvm/package-installs.yaml
+++ b/nfvbenchvm/dib/elements/nfvbenchvm/package-installs.yaml
@@ -10,6 +10,8 @@ numactl-libs:
numactl-devel:
vpp:
vpp-plugins:
+vpp-config:
kernel-firmware:
kernel-headers:
kernel-devel:
+openssh-server: \ No newline at end of file
diff --git a/nfvbenchvm/dib/elements/nfvbenchvm/static/etc/rc.d/rc.local b/nfvbenchvm/dib/elements/nfvbenchvm/static/etc/rc.d/rc.local
index 59cb4a1..64557d4 100644
--- a/nfvbenchvm/dib/elements/nfvbenchvm/static/etc/rc.d/rc.local
+++ b/nfvbenchvm/dib/elements/nfvbenchvm/static/etc/rc.d/rc.local
@@ -98,6 +98,60 @@ get_pci_address() {
return 0
}
+get_eth_port() {
+ # device mapping for CentOS Linux 7:
+ # lspci:
+ # 00.03.0 Ethernet controller: Red Hat, Inc. Virtio network device
+ # 00.04.0 Ethernet controller: Red Hat, Inc. Virtio network device
+ # /sys/class/net:
+ # /sys/class/net/eth0 -> ../../devices/pci0000:00/0000:00:03.0/virtio0/net/eth0
+ # /sys/class/net/eth1 -> ../../devices/pci0000:00/0000:00:04.0/virtio1/net/eth1
+
+ mac=$1
+ for f in $(ls $NET_PATH/); do
+ if grep -q "$mac" $NET_PATH/$f/address; then
+ eth_port=$(readlink $NET_PATH/$f | cut -d "/" -f8)
+ # some virtual interfaces match on MAC and do not have a PCI address
+ if [ "$eth_port" -a "$eth_port" != "N/A" ]; then
+ # Found matching interface
+ logger "NFVBENCHVM: found interface $f ($eth_port) matching $mac"
+ break
+ else
+ eth_port=""
+ fi
+ fi;
+ done
+ if [ -z "$eth_port" ]; then
+ echo "ERROR: Cannot find eth port for MAC $mac" >&2
+ logger "NFVBENCHVM ERROR: Cannot find eth port for MAC $mac"
+ return 1
+ fi
+ echo $eth_port
+ return 0
+}
+
+# Set VM MANAGEMENT port up and running
+if [ $INTF_MGMT_CIDR ] && [ $INTF_MGMT_IP_GW ]; then
+ if [ $INTF_MAC_MGMT ]; then
+ ETH_PORT=$(get_eth_port $INTF_MAC_MGMT)
+ else
+ ETH_PORT="eth0"
+ fi
+ ip addr add $INTF_MGMT_CIDR dev $ETH_PORT
+ ip link set $ETH_PORT up
+ ip route add default via $INTF_MGMT_IP_GW dev $ETH_PORT
+else
+ echo "INFO: VM management IP Addresses missing in $NFVBENCH_CONF"
+fi
+
+# Set dynamically interfaces mac values, if VM is spawn without using NFVBench
+# and management interface is used on eth0
+if [ -z "$INTF_MAC1" ] && [ -z "$INTF_MAC2" ]; then
+ INTF_MAC1=$(ip l show eth1 | grep -o -Ei '([a-fA-F0-9:]{17}|[a-fA-F0-9]{12}$)' | head -1)
+ INTF_MAC2=$(ip l show eth2 | grep -o -Ei '([a-fA-F0-9:]{17}|[a-fA-F0-9]{12}$)' | head -1)
+fi
+
+
# Sometimes the interfaces on the loopback VM will use different drivers, e.g.
# one from vswitch which is virtio based, one is from SRIOV VF. In this case,
# we have to make sure the forwarder uses them in the right order, which is
@@ -150,6 +204,7 @@ if [ $PCI_ADDRESS_1 ] && [ $PCI_ADDRESS_2 ]; then
sed -i "s/{{PCI_ADDRESS_2}}/$PCI_ADDRESS_2/g" /etc/vpp/startup.conf
sed -i "s/{{WORKER_CORES}}/$WORKER_CORES/g" /etc/vpp/startup.conf
sed -i "s/{{VIF_MQ_SIZE}}/${VIF_MQ_SIZE}/g" /etc/vpp/startup.conf
+ sed -i "s/{{NUM_MBUFS}}/${NUM_MBUFS}/g" /etc/vpp/startup.conf
service vpp start
sleep 10
@@ -175,3 +230,10 @@ else
echo "$INTF_MAC2: $PCI_ADDRESS_2"
logger "NFVBENCHVM ERROR: Cannot find PCI Address from MAC"
fi
+
+# Set SSH config
+logger $(cat /etc/ssh/sshd_config | grep "PasswordAuthentication")
+sed -i 's/PasswordAuthentication no/#PasswordAuthentication no/g' /etc/ssh/sshd_config
+sed -i 's/#PasswordAuthentication yes/PasswordAuthentication yes/g' /etc/ssh/sshd_config
+logger $(cat /etc/ssh/sshd_config | grep "PasswordAuthentication")
+service sshd restart
diff --git a/nfvbenchvm/dib/elements/nfvbenchvm/static/vpp/startup.conf b/nfvbenchvm/dib/elements/nfvbenchvm/static/vpp/startup.conf
index ce5ab45..d174299 100644
--- a/nfvbenchvm/dib/elements/nfvbenchvm/static/vpp/startup.conf
+++ b/nfvbenchvm/dib/elements/nfvbenchvm/static/vpp/startup.conf
@@ -20,6 +20,7 @@ dpdk {
dev {{PCI_ADDRESS_1}}
dev {{PCI_ADDRESS_2}}
uio-driver igb_uio
+ num-mbufs {{NUM_MBUFS}}
}
api-segment {