diff options
Diffstat (limited to 'VNFs/DPPD-PROX/helper-scripts/openstackrapid')
18 files changed, 1225 insertions, 367 deletions
diff --git a/VNFs/DPPD-PROX/helper-scripts/openstackrapid/3VMrapid.yaml b/VNFs/DPPD-PROX/helper-scripts/openstackrapid/3VMrapid.yaml new file mode 100644 index 00000000..98bc95a4 --- /dev/null +++ b/VNFs/DPPD-PROX/helper-scripts/openstackrapid/3VMrapid.yaml @@ -0,0 +1,183 @@ +## +## Copyright (c) 2010-2017 Intel Corporation +## +## Licensed under the Apache License, Version 2.0 (the "License"); +## you may not use this file except in compliance with the License. +## You may obtain a copy of the License at +## +## http://www.apache.org/licenses/LICENSE-2.0 +## +## Unless required by applicable law or agreed to in writing, software +## distributed under the License is distributed on an "AS IS" BASIS, +## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +## See the License for the specific language governing permissions and +## limitations under the License. +## + +heat_template_version: 2016-04-08 +description: RAPID stack (Rapid Automated Performance Indication for Dataplane) +parameters: + image: + type: string + label: Image name or ID + description: Image to be used for compute instance + default: RapidVM + flavor: + type: string + label: Flavor + description: Type of instance (flavor) to be used + default: prox_flavor + key: + type: string + label: Key name + description: Name of key-pair to be used for compute instance + default: prox + dataplane_network: + type: string + label: Private network name or ID + description: Network to attach instance to. + default: dataplane-network + internal_network: + type: string + label: Private network name or ID + description: Network to attach instance to. + default: admin_internal_net + floating_network: + type: string + label: Floating network name or ID + description: Public Network to attach instance to. + default: admin_floating_net + availability_zone: + type: string + description: The Availability Zone to launch the instance. + default: nova + +resources: + vm1_admin_port: + type: OS::Neutron::Port + properties: + network: {get_param: internal_network} + security_groups: + - default + vm1_dataplane_port: + type: OS::Neutron::Port + properties: + network: {get_param: dataplane_network} + security_groups: + - default + vm1_floating_ip: + type: OS::Neutron::FloatingIP + properties: + floating_network: {get_param: floating_network} + port_id: {get_resource: vm1_admin_port} + vm1: + type: OS::Nova::Server + properties: + availability_zone: { get_param: availability_zone } + user_data: + get_file: prox_user_data.sh + key_name: { get_param: key } + image: { get_param: image } + flavor: { get_param: flavor } + networks: + - port: {get_resource: vm1_admin_port} + - port: {get_resource: vm1_dataplane_port} + vm2_admin_port: + type: OS::Neutron::Port + properties: + network: {get_param: internal_network} + security_groups: + - default + vm2_dataplane_port: + type: OS::Neutron::Port + properties: + network: {get_param: dataplane_network} + security_groups: + - default + vm2_floating_ip: + type: OS::Neutron::FloatingIP + properties: + floating_network: {get_param: floating_network} + port_id: {get_resource: vm2_admin_port} + vm2: + type: OS::Nova::Server + properties: + availability_zone: { get_param: availability_zone } + user_data: + get_file: prox_user_data.sh + key_name: { get_param: key } + image: { get_param: image } + flavor: { get_param: flavor } + networks: + - port: {get_resource: vm2_admin_port} + - port: {get_resource: vm2_dataplane_port} + vm3_admin_port: + type: OS::Neutron::Port + properties: + network: {get_param: internal_network} + security_groups: + - default + vm3_dataplane_port: + type: OS::Neutron::Port + properties: + network: {get_param: dataplane_network} + security_groups: + - default + vm3_floating_ip: + type: OS::Neutron::FloatingIP + properties: + floating_network: {get_param: floating_network} + port_id: {get_resource: vm3_admin_port} + vm3: + type: OS::Nova::Server + properties: + availability_zone: { get_param: availability_zone } + user_data: + get_file: prox_user_data.sh + key_name: { get_param: key } + image: { get_param: image } + flavor: { get_param: flavor } + networks: + - port: {get_resource: vm3_admin_port} + - port: {get_resource: vm3_dataplane_port} + +outputs: + total_number_of_VMs: + description: Number of VMs created by this stack + value: 3 + vm1_private_ip: + description: IP address ofVM1 admin port + value: { get_attr: [vm1_admin_port, fixed_ips, 0, ip_address] } + vm1_public_ip: + description: Floating IP address of VM1 in public network + value: { get_attr: [ vm1_floating_ip, floating_ip_address ] } + vm1_dataplane_ip: + description: IP address of VM1 dataplane port + value: { get_attr: [vm1_dataplane_port, fixed_ips, 0, ip_address] } + vm1_dataplane_mac: + description: The MAC address of VM1 dataplane port + value: { get_attr: [vm1_dataplane_port, mac_address] } + vm2_private_ip: + description: IP address of the VM2 admin port + value: { get_attr: [vm2_admin_port, fixed_ips, 0, ip_address] } + vm2_public_ip: + description: Floating IP address of VM2 in public network + value: { get_attr: [ vm2_floating_ip, floating_ip_address ] } + vm2_dataplane_ip: + description: IP address of VM2 dataplane port + value: { get_attr: [vm2_dataplane_port, fixed_ips, 0, ip_address] } + vm2_dataplane_mac: + description: The MAC address of VM2 dataplane port + value: { get_attr: [vm2_dataplane_port, mac_address] } + vm3_private_ip: + description: IP address of the VM3 admin port + value: { get_attr: [vm3_admin_port, fixed_ips, 0, ip_address] } + vm3_public_ip: + description: Floating IP address of VM3 in public network + value: { get_attr: [ vm3_floating_ip, floating_ip_address ] } + vm3_dataplane_ip: + description: IP address of VM3 dataplane port + value: { get_attr: [vm3_dataplane_port, fixed_ips, 0, ip_address] } + vm3_dataplane_mac: + description: The MAC address of VM3 dataplane port + value: { get_attr: [vm3_dataplane_port, mac_address] } diff --git a/VNFs/DPPD-PROX/helper-scripts/openstackrapid/3VMsriovrapid.yaml b/VNFs/DPPD-PROX/helper-scripts/openstackrapid/3VMsriovrapid.yaml new file mode 100644 index 00000000..fe910def --- /dev/null +++ b/VNFs/DPPD-PROX/helper-scripts/openstackrapid/3VMsriovrapid.yaml @@ -0,0 +1,177 @@ +## +## Copyright (c) 2010-2017 Intel Corporation +## +## Licensed under the Apache License, Version 2.0 (the "License"); +## you may not use this file except in compliance with the License. +## You may obtain a copy of the License at +## +## http://www.apache.org/licenses/LICENSE-2.0 +## +## Unless required by applicable law or agreed to in writing, software +## distributed under the License is distributed on an "AS IS" BASIS, +## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +## See the License for the specific language governing permissions and +## limitations under the License. +## + +heat_template_version: 2016-04-08 +description: RAPID stack (Rapid Automated Performance Indication for Dataplane) +parameters: + image: + type: string + label: Image name or ID + description: Image to be used for compute instance + default: RapidVM + flavor: + type: string + label: Flavor + description: Type of instance (flavor) to be used + default: prox_flavor + key: + type: string + label: Key name + description: Name of key-pair to be used for compute instance + default: prox + dataplane_network: + type: string + label: Private network name or ID + description: Network to attach instance to. + default: dataplane-network + internal_network: + type: string + label: Private network name or ID + description: Network to attach instance to. + default: admin_internal_net + floating_network: + type: string + label: Floating network name or ID + description: Public Network to attach instance to. + default: admin_floating_net + availability_zone: + type: string + description: The Availability Zone to launch the instance. + default: nova + vm1_sriov_port: + type: string + description: The sriov port to be used by VM1 + default: Port1 + vm2_sriov_port: + type: string + description: The sriov port to be used by VM2 + default: Port2 + vm3_sriov_port: + type: string + description: The sriov port to be used by VM3 + default: Port3 + +resources: + vm1_admin_port: + type: OS::Neutron::Port + properties: + network: {get_param: internal_network} + security_groups: + - default + vm1_floating_ip: + type: OS::Neutron::FloatingIP + properties: + floating_network: {get_param: floating_network} + port_id: {get_resource: vm1_admin_port} + vm1: + type: OS::Nova::Server + properties: + availability_zone: { get_param: availability_zone } + user_data: + get_file: prox_user_data.sh + key_name: { get_param: key } + image: { get_param: image } + flavor: { get_param: flavor } + networks: + - port: {get_resource: vm1_admin_port} + - port: {get_param: vm1_sriov_port} + vm2_admin_port: + type: OS::Neutron::Port + properties: + network: {get_param: internal_network} + security_groups: + - default + vm2_floating_ip: + type: OS::Neutron::FloatingIP + properties: + floating_network: {get_param: floating_network} + port_id: {get_resource: vm2_admin_port} + vm2: + type: OS::Nova::Server + properties: + availability_zone: { get_param: availability_zone } + user_data: + get_file: prox_user_data.sh + key_name: { get_param: key } + image: { get_param: image } + flavor: { get_param: flavor } + networks: + - port: {get_resource: vm2_admin_port} + - port: {get_param: vm2_sriov_port} + vm3_admin_port: + type: OS::Neutron::Port + properties: + network: {get_param: internal_network} + security_groups: + - default + vm3_floating_ip: + type: OS::Neutron::FloatingIP + properties: + floating_network: {get_param: floating_network} + port_id: {get_resource: vm3_admin_port} + vm3: + type: OS::Nova::Server + properties: + availability_zone: { get_param: availability_zone } + user_data: + get_file: prox_user_data.sh + key_name: { get_param: key } + image: { get_param: image } + flavor: { get_param: flavor } + networks: + - port: {get_resource: vm3_admin_port} + - port: {get_param: vm3_sriov_port} + +outputs: + total_number_of_VMs: + description: Number of VMs created by this stack + value: 3 + vm1_private_ip: + description: IP address of VM1 admin port + value: { get_attr: [vm1_admin_port, fixed_ips, 0, ip_address] } + vm1_public_ip: + description: Floating IP address of VM1 in public network + value: { get_attr: [ vm1_floating_ip, floating_ip_address ] } + vm1_dataplane_ip: + description: IP address of VM1 dataplane port + value: { get_attr: [vm1, networks,{get_param: dataplane_network},0] } + vm1_dataplane_mac: + description: The MAC address of VM1 dataplane port + value: { get_attr: [vm1, addresses, {get_param: dataplane_network}] } + vm2_private_ip: + description: IP address of VM2 admin port + value: { get_attr: [vm2_admin_port, fixed_ips, 0, ip_address] } + vm2_public_ip: + description: Floating IP address of VM2 in public network + value: { get_attr: [ vm2_floating_ip, floating_ip_address ] } + vm2_dataplane_ip: + description: IP address of VM2 dataplane port + value: { get_attr: [vm2, networks,{get_param: dataplane_network},0] } + vm2_dataplane_mac: + description: The MAC address of VM2 dataplane port + value: { get_attr: [vm2, addresses, {get_param: dataplane_network}]} + vm3_private_ip: + description: IP address of VM3 admin port + value: { get_attr: [vm3_admin_port, fixed_ips, 0, ip_address] } + vm3_public_ip: + description: Floating IP address of VM32 in public network + value: { get_attr: [ vm3_floating_ip, floating_ip_address ] } + vm3_dataplane_ip: + description: IP address of VM3 dataplane port + value: { get_attr: [vm3, networks,{get_param: dataplane_network},0] } + vm3_dataplane_mac: + description: The MAC address of VM3 dataplane port + value: { get_attr: [vm3, addresses, {get_param: dataplane_network}]} diff --git a/VNFs/DPPD-PROX/helper-scripts/openstackrapid/README b/VNFs/DPPD-PROX/helper-scripts/openstackrapid/README index 7c229904..c29532ac 100644 --- a/VNFs/DPPD-PROX/helper-scripts/openstackrapid/README +++ b/VNFs/DPPD-PROX/helper-scripts/openstackrapid/README @@ -24,9 +24,12 @@ Copy the files in a directory on a machine that can run the OpenStack CLI commands and that can reach the OpenStack public network. Also create a qcow2 image in the same directory with the following characteristics: * Name of the qcow2 file should be: rapidVM.qcow2 - This default name can be changed on the rapid command line + This default name can be overruled on the rapid command line (--image_file) * Should have DPDK and PROX installed. PROX should be in /root/prox/ directory * Image should have cloud-init installed +* /mnt/huge should exist to support a command that is executed at startup of the VM: 'mount -t hugetlbfs nodev /mnt/huge' +* Compile prox with 'make crc=soft'. This is a workaround for some cases where the crc calculation offload is not working as expected. +* Compile dpdk to support AESN-NI Multi Buffer Crypto Poll Mode Driver: http://dpdk.org/doc/guides/cryptodevs/aesni_mb.html Source the openrc file of the OpenStack environment so that the OpenStack CLI commands can be run: @@ -34,52 +37,68 @@ commands can be run: Now you can run the createrapid.py file. Use help for more info on the usage: # ./createrapid.py --help -runrapid.py will use the OpenStack CLI to create the flavor, key-pair, network, image, +createrapid.py will use the OpenStack CLI to create the flavor, key-pair, network, image, stack, ... -It will create a <STACK>.cfg file containing all info that will be used by runrapid.py +It will create a <STACK>.env file containing all info that will be used by runrapid.py to actually run the tests. Logging can be found in the CREATE<STACK>.log file Now you can run the runrapid.py file. Use help for more info on the usage: # ./runrapid.py --help -The script will connect to the 2 VMs that have been instantiated and it will launch -PROX in both VMs. +The script will connect to all VMs that have been instantiated and it will launch +PROX in all VMs. +This will be done through the floating IP assigned to the VMs. You need to make sure +that floating IPs are working on your OpenStack deployment. Once that is done it will connect to the PROX tcp socket and start sending commands to run the actual test. It will print test results on the screen while running. -The PROX instance in the Generator VM will generate packets which will arrive in -the PROX instance running on the SUT (System Under Test) VM. The SUT will then -send the packets back to the generator by swapping source and destination. +The actual test that is running is decribed in <TEST>.test. Notes about prox_user_data.sh script: - The script contains commands that will be executed using cloud-init at startup of the VMs. - The script also assumes some specific DPDK directory and tools which might change over different DPDK release. This release has been tested with DPDK-17.02. +- huge pages are allocated for DPDK on node 0 (hard-coded) in the VM. -An example of the cfg file generated by createrapid.py can be found below. +Note on using SRIOV ports: +Before running createrapid, it is best to already create the network, subnet and ports +manually by entering the following commands (change the parameters to your needs): +openstack network create --share --external --provider-network-type flat --provider-physical-network physnet2 fast-network +openstack subnet create --network fast-network --subnet-range 20.20.20.0/24 --gateway none fast-subnet +openstack port create --network fast-network --vnic-type direct --fixed-ip subnet=fast-subnet Port1 +openstack port create --network fast-network --vnic-type direct --fixed-ip subnet=fast-subnet Port2 +openstack port create --network fast-network --vnic-type direct --fixed-ip subnet=fast-subnet Port3 +Make sure to use the network and subnet in the createrapid parameters list. Port1, Port2 and Port3 +are being used in the yaml files. + + +An example of the env file generated by createrapid.py can be found below. Note that this file can be created manually in case the stack is created in a different way (not using the createrapid.py). This can be useful in case you are not using OpenStack as a VIM or when using special configurations that cannot be -achieved using createrapid.py. Only the [Generator] and [SUT] section are used as -input for runrapid.py. In the [SUT] section, you can also set admin_ip = 'none'. -In that case, only the generator will be controlled by runrapid.py and you must -start the SUT manually. - -[Generator] -admin_ip = 192.168.3.139 -dp_ip = 10.10.10.3 -hex_dp_ip = 0a 0a 0a 03 -dp_mac = fa:16:3e:96:6c:e7 - -[SUT] -admin_ip = 192.168.3.133 +achieved using createrapid.py. Only the [VMx] sections are used as +input for runrapid.py. +[DEFAULT] +admin_ip = none + +[VM1] +admin_ip = 192.168.4.130 dp_ip = 10.10.10.6 -hex_dp_ip = 0a 0a 0a 06 -dp_mac = fa:16:3e:0b:9c:57 +dp_mac = fa:16:3e:3c:1e:12 + +[VM2] +admin_ip = 192.168.4.140 +dp_ip = 10.10.10.9 +dp_mac = fa:16:3e:2a:00:5d + +[VM3] +admin_ip = 192.168.4.138 +dp_ip = 10.10.10.11 +dp_mac = fa:16:3e:ae:fa:86 [OpenStack] -stack = rapidTestEnv -yaml = rapid.yaml +stack = rapid +yaml = 3VMrapid.yaml key = prox flavor = prox_flavor image = rapidVM @@ -87,9 +106,11 @@ image_file = rapidVM.qcow2 dataplane_network = dataplane-network subnet = dpdk-subnet subnet_cidr = 10.10.10.0/24 -admin_network = admin_internal_net +internal_network = admin_internal_net +floating_network = admin_floating_net [rapid] loglevel = DEBUG -version = 17.09.03 +version = 17.10.25 +total_number_of_vms = 3 diff --git a/VNFs/DPPD-PROX/helper-scripts/openstackrapid/basicrapid.test b/VNFs/DPPD-PROX/helper-scripts/openstackrapid/basicrapid.test new file mode 100644 index 00000000..e1180039 --- /dev/null +++ b/VNFs/DPPD-PROX/helper-scripts/openstackrapid/basicrapid.test @@ -0,0 +1,26 @@ +[DEFAULT] +name = BasicSwapTesting +number_of_tests = 3 +total_number_of_vms = 2 +init_code=init_test() +dest_vm = not_used +gw_vm = not_used +script_control = false + + +[VM1] +name = Generator +config_file = gen.cfg +dest_vm = 2 + +[VM2] +name = Swap +config_file = swap.cfg + +[test1] +cmd=run_speedtest(sock[0],sock[1],sutstatcores,genstatcores,gencontrolcores) +[test2] +cmd=run_sizetest(sock[0],sock[1],sutstatcores,genstatcores,gencontrolcores) +[test3] +cmd=run_flowtest(sock[0],sock[1],sutstatcores,genstatcores,gencontrolcores) + diff --git a/VNFs/DPPD-PROX/helper-scripts/openstackrapid/createrapid.py b/VNFs/DPPD-PROX/helper-scripts/openstackrapid/createrapid.py index eb4910ff..0fb67237 100755 --- a/VNFs/DPPD-PROX/helper-scripts/openstackrapid/createrapid.py +++ b/VNFs/DPPD-PROX/helper-scripts/openstackrapid/createrapid.py @@ -31,8 +31,8 @@ from logging import handlers from prox_ctrl import prox_ctrl import ConfigParser -version="17.09.03" -stack = "rapidTestEnv" #Default string for stack +version="17.12.15" +stack = "rapid" #Default string for stack yaml = "rapid.yaml" #Default string for yaml file key = "prox" # This is also the default in the yaml file.... flavor = "prox_flavor" # This is also the default in the yaml file.... @@ -41,12 +41,13 @@ image_file = "rapidVM.qcow2" dataplane_network = "dataplane-network" # This is also the default in the yaml file.... subnet = "dpdk-subnet" #Hardcoded at this moment subnet_cidr="10.10.10.0/24" # cidr for dataplane -admin_network="admin_internal_net" +internal_network="admin_internal_net" +floating_network="admin_floating_net" loglevel="DEBUG" # sets log level for writing to file runtime=10 # time in seconds for 1 test run def usage(): - print("usage: rapid [--version] [-v]") + print("usage: createrapid [--version] [-v]") print(" [--stack STACK_NAME]") print(" [--yaml YAML_FILE]") print(" [--key KEY_NAME]") @@ -54,31 +55,37 @@ def usage(): print(" [--image IMAGE_NAME]") print(" [--image_file IMAGE_FILE]") print(" [--dataplane_network DP_NETWORK]") - print(" [--admin_network ADMIN_NETWORK]") + print(" [--subnet DP_SUBNET]") + print(" [--subnet_cidr SUBNET_CIDR]") + print(" [--internal_network ADMIN_NETWORK]") + print(" [--floating_network ADMIN_NETWORK]") print(" [--log DEBUG|INFO|WARNING|ERROR|CRITICAL") print(" [-h] [--help]") print("") - print("Command-line interface to RAPID") + print("Command-line interface to createrapid") print("") print("optional arguments:") print(" -v, --version Show program's version number and exit") - print(" --stack STACK_NAME Specify a name for the heat stack. Default is rapidTestEnv.") - print(" --yaml YAML_FILE Specify the yaml file to be used. Default is rapid.yaml.") - print(" --key KEY_NAME Specify the key to be used. Default is prox.") - print(" --flavor FLAVOR_NAME Specify the flavor to be used. Default is prox_flavor.") - print(" --image IMAGE_NAME Specify the image to be used. Default is rapidVM.") - print(" --image_file IMAGE_FILE Specify the image qcow2 file to be used. Default is rapidVM.qcow2.") - print(" --dataplane_network NETWORK Specify the network name to be used for the dataplane. Default is dataplane-network.") - print(" --admin_network NETWORK Specify the network name to be used for the control plane. Default is admin-network.") + print(" --stack STACK_NAME Specify a name for the heat stack. Default is %s."%stack) + print(" --yaml YAML_FILE Specify the yaml file to be used. Default is %s."%yaml) + print(" --key KEY_NAME Specify the key to be used. Default is %s."%key) + print(" --flavor FLAVOR_NAME Specify the flavor to be used. Default is %s."%flavor) + print(" --image IMAGE_NAME Specify the image to be used. Default is %s."%image) + print(" --image_file IMAGE_FILE Specify the image qcow2 file to be used. Default is %s."%image_file) + print(" --dataplane_network NETWORK Specify the network name to be used for the dataplane. Default is %s."%dataplane_network) + print(" --subnet DP_SUBNET Specify the subnet name to be used for the dataplane. Default is %s."%subnet) + print(" --subnet_cidr SUBNET_CIDR Specify the subnet CIDR to be used for the dataplane. Default is %s."%subnet_cidr) + print(" --internal_network NETWORK Specify the network name to be used for the control plane. Default is %s."%internal_network) + print(" --floating_network NETWORK Specify the external floating ip network name. Default is %s."%floating_network) print(" --log Specify logging level for log file output, screen output level is hard coded") print(" -h, --help Show help message and exit.") print("") print("To delete the rapid stack, type the following command") - print(" openstack stack delete --yes --wait rapidTestEnv") - print("Note that rapidTestEnv is the default stack name. Replace with STACK_NAME if needed") + print(" openstack stack delete --yes --wait %s"%stack) + print("Note that %s is the default stack name. Replace with STACK_NAME if needed"%stack) try: - opts, args = getopt.getopt(sys.argv[1:], "vh", ["version","help", "yaml=","stack=","key=","flavor=","image=","dataplane_network=","admin_network=","log="]) + opts, args = getopt.getopt(sys.argv[1:], "vh", ["version","help", "yaml=","stack=","key=","flavor=","image=","image_file=","dataplane_network=","subnet=","subnet_cidr=","internal_network=","floating_network=","log="]) except getopt.GetoptError as err: print("===========================================") print(str(err)) @@ -116,9 +123,18 @@ for opt, arg in opts: elif opt in ("--dataplane_network"): dataplane_network = arg print ("Using dataplane network: "+ dataplane_network) - elif opt in ("--admin_network"): - admin_network = arg - print ("Using controle plane network: "+ admin_network) + elif opt in ("--subnet"): + subnet = arg + print ("Using dataplane subnet: "+ subnet) + elif opt in ("--subnet_cidr"): + subnet_cidr = arg + print ("Using dataplane subnet: "+ subnet_cidr) + elif opt in ("--internal_network"): + internal_network = arg + print ("Using controle plane network: "+ internal_network) + elif opt in ("--floating_network"): + floating_network = arg + print ("Using floating ip network: "+ floating_network) elif opt in ("--log"): loglevel = arg print ("Log level: "+ loglevel) @@ -140,8 +156,8 @@ log.setLevel(numeric_level) log.propagate = 0 # create a console handler -# and set its log level to the command-line option -# +# and set its log level to the command-line option +# console_handler = logging.StreamHandler(sys.stdout) console_handler.setLevel(logging.INFO) console_handler.setFormatter(screen_formatter) @@ -165,7 +181,7 @@ needRoll = os.path.isfile(log_file) # This is a stale log, so roll it -if needRoll: +if needRoll: # Add timestamp log.debug('\n---------\nLog closed on %s.\n---------\n' % time.asctime()) @@ -177,16 +193,28 @@ log.debug('\n---------\nLog started on %s.\n---------\n' % time.asctime()) log.debug("createrapid.py version: "+version) # Checking if the control network already exists, if not, stop the script -log.debug("Checking control plane network: "+admin_network) -cmd = 'openstack network show '+admin_network +log.debug("Checking control plane network: "+internal_network) +cmd = 'openstack network show '+internal_network +log.debug (cmd) +cmd = cmd + ' |grep "status " | tr -s " " | cut -d" " -f 4' +NetworkExist = subprocess.check_output(cmd , shell=True).strip() +if NetworkExist == 'ACTIVE': + log.info("Control plane network ("+internal_network+") already active") +else: + log.exception("Control plane network " + internal_network + " not existing") + raise Exception("Control plane network " + internal_network + " not existing") + +# Checking if the floating ip network already exists, if not, stop the script +log.debug("Checking floating ip network: "+floating_network) +cmd = 'openstack network show '+floating_network log.debug (cmd) cmd = cmd + ' |grep "status " | tr -s " " | cut -d" " -f 4' NetworkExist = subprocess.check_output(cmd , shell=True).strip() if NetworkExist == 'ACTIVE': - log.info("Control plane network ("+admin_network+") already active") + log.info("Floating ip network ("+floating_network+") already active") else: - log.exception("Control plane network " + admin_network + " not existing") - raise Exception("Control plane network " + admin_network + " not existing") + log.exception("Floating ip network " + floating_network + " not existing") + raise Exception("Floating ip network " + floating_network + " not existing") # Checking if the image already exists, if not create it log.debug("Checking image: "+image) @@ -306,7 +334,7 @@ cmd = cmd+' |grep "stack_status " | tr -s " " | cut -d"|" -f 3' StackRunning = subprocess.check_output(cmd , shell=True).strip() if StackRunning == '': log.info('Creating Stack ...') - cmd = 'openstack stack create -t '+ yaml + ' --parameter flavor="'+flavor +'" --parameter key="'+ key + '" --parameter image="'+image + '" --parameter dataplane_network="'+dataplane_network+ '" --parameter admin_network="'+admin_network+'" --wait '+stack + cmd = 'openstack stack create -t '+ yaml + ' --parameter flavor="'+flavor +'" --parameter key="'+ key + '" --parameter image="'+image + '" --parameter dataplane_network="'+dataplane_network+ '" --parameter internal_network="'+internal_network+'" --parameter floating_network="'+floating_network+'" --wait '+stack log.debug(cmd) cmd = cmd + ' |grep "stack_status " | tr -s " " | cut -d"|" -f 3' StackRunning = subprocess.check_output(cmd , shell=True).strip() @@ -319,29 +347,27 @@ log.info("Stack ("+stack+") running") cmd='openstack stack show -f yaml -c outputs ' + stack log.debug(cmd) output = subprocess.check_output(cmd , shell=True).strip() -matchObj = re.search('.*gen_dataplane_ip.*?([0-9]*\.[0-9]*\.[0-9]*\.[0-9]*)', output, re.DOTALL) -genDPIP = matchObj.group(1) -matchObj = re.search('.*gen_public_ip.*?([0-9]*\.[0-9]*\.[0-9]*\.[0-9]*)', output, re.DOTALL) -genAdminIP = matchObj.group(1) -matchObj = re.search('.*gen_dataplane_mac.*?([a-fA-F0-9:]{17})', output, re.DOTALL) -genDPmac = matchObj.group(1) -matchObj = re.search('.*sut_dataplane_ip.*?([0-9]*\.[0-9]*\.[0-9]*\.[0-9]*)', output, re.DOTALL) -sutDPIP = matchObj.group(1) -matchObj = re.search('.*sut_public_ip.*?([0-9]*\.[0-9]*\.[0-9]*\.[0-9]*)', output, re.DOTALL) -sutAdminIP = matchObj.group(1) -matchObj = re.search('.*sut_dataplane_mac.*?([a-fA-F0-9:]{17})', output, re.DOTALL) -sutDPmac = matchObj.group(1) -log.info('Generator: (admin IP: '+ genAdminIP + '), (dataplane IP: ' + genDPIP+'), (dataplane MAC: ' +genDPmac+')') -log.info('SUT: (admin IP: '+ sutAdminIP + '), (dataplane IP: ' + sutDPIP+'), (dataplane MAC: ' +sutDPmac+')') +matchObj = re.search('.*total_number_of_VMs.*?([0-9])', output, re.DOTALL) +total_number_of_VMs = matchObj.group(1) +vmDPIP =[] +vmAdminIP =[] +vmDPmac =[] config = ConfigParser.RawConfigParser() -config.add_section('Generator') -config.set('Generator', 'admin_ip', genAdminIP) -config.set('Generator', 'dp_ip', genDPIP) -config.set('Generator', 'dp_mac', genDPmac) -config.add_section('SUT') -config.set('SUT', 'admin_ip', sutAdminIP) -config.set('SUT', 'dp_ip', sutDPIP) -config.set('SUT', 'dp_mac', sutDPmac) +for vm in range(1, int(total_number_of_VMs)+1): + searchString = '.*vm%d_dataplane_ip.*?([0-9]*\.[0-9]*\.[0-9]*\.[0-9]*)' % vm + matchObj = re.search(searchString, output, re.DOTALL) + vmDPIP.append(matchObj.group(1)) + searchString = '.*vm%d_public_ip.*?([0-9]*\.[0-9]*\.[0-9]*\.[0-9]*)' % vm + matchObj = re.search(searchString, output, re.DOTALL) + vmAdminIP.append(matchObj.group(1)) + searchString = '.*vm%d_dataplane_mac.*?([a-fA-F0-9:]{17})' % vm + matchObj = re.search(searchString, output, re.DOTALL) + vmDPmac.append(matchObj.group(1)) + log.info('VM%d: (admin IP: %s), (dataplane IP: %s), (dataplane MAC: %s)' % (vm,vmAdminIP[-1],vmDPIP[-1],vmDPmac[-1])) + config.add_section('VM%d'%vm) + config.set('VM%d'%vm, 'admin_ip', vmAdminIP[-1]) + config.set('VM%d'%vm, 'dp_ip', vmDPIP[-1]) + config.set('VM%d'%vm, 'dp_mac', vmDPmac[-1]) config.add_section('OpenStack') config.set('OpenStack', 'stack', stack) config.set('OpenStack', 'yaml', yaml) @@ -352,12 +378,14 @@ config.set('OpenStack', 'image_file', image_file) config.set('OpenStack', 'dataplane_network', dataplane_network) config.set('OpenStack', 'subnet', subnet) config.set('OpenStack', 'subnet_cidr', subnet_cidr) -config.set('OpenStack', 'admin_network', admin_network) +config.set('OpenStack', 'internal_network', internal_network) +config.set('OpenStack', 'floating_network', floating_network) config.add_section('rapid') config.set('rapid', 'loglevel', loglevel) config.set('rapid', 'version', version) +config.set('rapid', 'total_number_of_VMs', total_number_of_VMs) config.set('DEFAULT', 'admin_ip', 'none') -# Writing our configuration file -with open(stack+'.cfg', 'wb') as configfile: - config.write(configfile) +# Writing the environment file +with open(stack+'.env', 'wb') as envfile: + config.write(envfile) diff --git a/VNFs/DPPD-PROX/helper-scripts/openstackrapid/gen.cfg b/VNFs/DPPD-PROX/helper-scripts/openstackrapid/gen.cfg index f06079f9..3222bde5 100644 --- a/VNFs/DPPD-PROX/helper-scripts/openstackrapid/gen.cfg +++ b/VNFs/DPPD-PROX/helper-scripts/openstackrapid/gen.cfg @@ -31,7 +31,7 @@ $mbs=8 mempool size=4K [global] -name=Basic Gen +name=${name} [core 0] mode=master @@ -41,12 +41,25 @@ name=p0 task=0 mode=gen sub mode=l3 -rx port=p0 tx port=p0 bps=1250000000 -pkt inline=00 00 00 00 00 00 00 00 00 00 00 00 08 00 45 00 00 1c 00 01 00 00 40 11 f7 7d ${gen_hex_ip} ${sut_hex_ip} 0b b8 0b b9 00 08 55 7b -gateway ipv4=${sut_ip} -;local ipv4=${gen_ip} +pkt inline=00 00 00 00 00 00 00 00 00 00 00 00 08 00 45 00 00 24 00 01 00 00 40 11 f7 7d ${local_hex_ip} ${dest_hex_ip} 0b b8 0b b9 00 10 55 7b +pkt size=50 +;gateway ipv4=${gw_ip} +local ipv4=${local_ip} min bulk size=$mbs max bulk size=16 +drop=no +lat pos=42 +accuracy pos=46 + +[core 2] +name=lat +task=0 +mode=lat +sub mode=l3 +rx port=p0 +lat pos=42 +accuracy pos=46 + diff --git a/VNFs/DPPD-PROX/helper-scripts/openstackrapid/gen_gw.cfg b/VNFs/DPPD-PROX/helper-scripts/openstackrapid/gen_gw.cfg new file mode 100644 index 00000000..03f8b354 --- /dev/null +++ b/VNFs/DPPD-PROX/helper-scripts/openstackrapid/gen_gw.cfg @@ -0,0 +1,65 @@ +;; +;; Copyright (c) 2010-2017 Intel Corporation +;; +;; Licensed under the Apache License, Version 2.0 (the "License"); +;; you may not use this file except in compliance with the License. +;; You may obtain a copy of the License at +;; +;; http://www.apache.org/licenses/LICENSE-2.0 +;; +;; Unless required by applicable law or agreed to in writing, software +;; distributed under the License is distributed on an "AS IS" BASIS, +;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +;; See the License for the specific language governing permissions and +;; limitations under the License. +;; + +[eal options] +-n=4 ; force number of memory channels +no-output=no ; disable DPDK debug output + +[lua] +dofile("parameters.lua") + +[port 0] +name=p0 + +[variables] +$mbs=8 + +[defaults] +mempool size=4K + +[global] +name=${name} + +[core 0] +mode=master + +[core 1] +name=p0 +task=0 +mode=gen +sub mode=l3 +tx port=p0 +bps=1250000000 +pkt inline=00 00 00 00 00 00 00 00 00 00 00 00 08 00 45 00 00 24 00 01 00 00 40 11 f7 7d ${local_hex_ip} ${dest_hex_ip} 0b b8 0b b9 00 10 55 7b +pkt size=50 +gateway ipv4=${gw_ip} +local ipv4=${local_ip} +min bulk size=$mbs +max bulk size=16 +drop=no +lat pos=42 +accuracy pos=46 + +[core 2] +name=lat +task=0 +mode=lat +sub mode=l3 +rx port=p0 +lat pos=42 +accuracy pos=46 + + diff --git a/VNFs/DPPD-PROX/helper-scripts/openstackrapid/impair.cfg b/VNFs/DPPD-PROX/helper-scripts/openstackrapid/impair.cfg new file mode 100644 index 00000000..b508c9b4 --- /dev/null +++ b/VNFs/DPPD-PROX/helper-scripts/openstackrapid/impair.cfg @@ -0,0 +1,47 @@ +;; +;; Copyright (c) 2010-2017 Intel Corporation +;; +;; Licensed under the Apache License, Version 2.0 (the "License"); +;; you may not use this file except in compliance with the License. +;; You may obtain a copy of the License at +;; +;; http://www.apache.org/licenses/LICENSE-2.0 +;; +;; Unless required by applicable law or agreed to in writing, software +;; distributed under the License is distributed on an "AS IS" BASIS, +;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +;; See the License for the specific language governing permissions and +;; limitations under the License. +;; + +[eal options] +-n=4 ; force number of memory channels +no-output=no ; disable DPDK debug output + +[lua] +dofile("parameters.lua") + +[port 0] +name=if0 +mac=hardware + +[defaults] +mempool size=2K + +[global] +name=${name} + +[core 0] +mode=master + +[core 1] +name=impair +task=0 +mode=impair +sub mode=l3 +rx port=if0 +tx port=if0 +delay us=20 +probability=60 +local ipv4=${local_ip} + diff --git a/VNFs/DPPD-PROX/helper-scripts/openstackrapid/impair.test b/VNFs/DPPD-PROX/helper-scripts/openstackrapid/impair.test new file mode 100644 index 00000000..da856876 --- /dev/null +++ b/VNFs/DPPD-PROX/helper-scripts/openstackrapid/impair.test @@ -0,0 +1,26 @@ +[DEFAULT] +name = impairTesting +number_of_tests = 1 +total_number_of_vms = 3 +init_code=init_test() +dest_vm = not_used +gw_vm = not_used +script_control = false + +[VM1] +name = Generator +config_file = gen_gw.cfg +gw_vm = 2 +dest_vm = 3 +script_control = true + +[VM2] +name = ImpairGW +config_file = impair.cfg + +[VM3] +name = Swap +config_file = swap.cfg + +[test1] +cmd=run_speedtest(sock[0],'none',sutstatcores,genstatcores,gencontrolcores) diff --git a/VNFs/DPPD-PROX/helper-scripts/openstackrapid/prox_ctrl.py b/VNFs/DPPD-PROX/helper-scripts/openstackrapid/prox_ctrl.py index 7438d4d6..daa96a50 100644 --- a/VNFs/DPPD-PROX/helper-scripts/openstackrapid/prox_ctrl.py +++ b/VNFs/DPPD-PROX/helper-scripts/openstackrapid/prox_ctrl.py @@ -183,6 +183,16 @@ class prox_sock(object): def reset_stats(self): self._send('reset stats') + def lat_stats(self, cores, task=0): + min_lat = max_lat = avg_lat = 0 + self._send('lat stats %s %s' % (','.join(map(str, cores)), task)) + for core in cores: + stats = self._recv().split(',') + min_lat += int(stats[0]) + max_lat += int(stats[1]) + avg_lat += int(stats[2]) + return min_lat, max_lat, avg_lat + def core_stats(self, cores, task=0): rx = tx = drop = tsc = hz = 0 self._send('core stats %s %s' % (','.join(map(str, cores)), task)) diff --git a/VNFs/DPPD-PROX/helper-scripts/openstackrapid/prox_user_data.sh b/VNFs/DPPD-PROX/helper-scripts/openstackrapid/prox_user_data.sh index da2ab925..f211934a 100755 --- a/VNFs/DPPD-PROX/helper-scripts/openstackrapid/prox_user_data.sh +++ b/VNFs/DPPD-PROX/helper-scripts/openstackrapid/prox_user_data.sh @@ -16,7 +16,7 @@ ## limitations under the License. ## -echo 128 > /sys/devices/system/node/node0/hugepages/hugepages-2048kB/nr_hugepages +echo 1024 > /sys/devices/system/node/node0/hugepages/hugepages-2048kB/nr_hugepages mount -t hugetlbfs nodev /mnt/huge modprobe uio insmod /root/dpdk/x86_64-native-linuxapp-gcc/kmod/igb_uio.ko diff --git a/VNFs/DPPD-PROX/helper-scripts/openstackrapid/rapid.yaml b/VNFs/DPPD-PROX/helper-scripts/openstackrapid/rapid.yaml index 6f3c1cd4..63778c1c 100644 --- a/VNFs/DPPD-PROX/helper-scripts/openstackrapid/rapid.yaml +++ b/VNFs/DPPD-PROX/helper-scripts/openstackrapid/rapid.yaml @@ -37,7 +37,7 @@ parameters: label: Private network name or ID description: Network to attach instance to. default: dataplane-network - admin_network: + internal_network: type: string label: Private network name or ID description: Network to attach instance to. @@ -53,24 +53,24 @@ parameters: default: nova resources: - sut_admin_port: + vm1_admin_port: type: OS::Neutron::Port properties: - network: {get_param: admin_network} + network: {get_param: internal_network} security_groups: - default - sut_dataplane_port: + vm1_dataplane_port: type: OS::Neutron::Port properties: network: {get_param: dataplane_network} security_groups: - default - sut_floating_ip: + vm1_floating_ip: type: OS::Neutron::FloatingIP properties: floating_network: {get_param: floating_network} - port_id: {get_resource: sut_admin_port} - sut: + port_id: {get_resource: vm1_admin_port} + vm1: type: OS::Nova::Server properties: availability_zone: { get_param: availability_zone } @@ -80,26 +80,26 @@ resources: image: { get_param: image } flavor: { get_param: flavor } networks: - - port: {get_resource: sut_admin_port} - - port: {get_resource: sut_dataplane_port} - gen_admin_port: + - port: {get_resource: vm1_admin_port} + - port: {get_resource: vm1_dataplane_port} + vm2_admin_port: type: OS::Neutron::Port properties: - network: {get_param: admin_network} + network: {get_param: internal_network} security_groups: - default - gen_dataplane_port: + vm2_dataplane_port: type: OS::Neutron::Port properties: network: {get_param: dataplane_network} security_groups: - default - gen_floating_ip: + vm2_floating_ip: type: OS::Neutron::FloatingIP properties: floating_network: {get_param: floating_network} - port_id: {get_resource: gen_admin_port} - gen: + port_id: {get_resource: vm2_admin_port} + vm2: type: OS::Nova::Server properties: availability_zone: { get_param: availability_zone } @@ -109,31 +109,75 @@ resources: image: { get_param: image } flavor: { get_param: flavor } networks: - - port: {get_resource: gen_admin_port} - - port: {get_resource: gen_dataplane_port} + - port: {get_resource: vm2_admin_port} + - port: {get_resource: vm2_dataplane_port} +# vm3_admin_port: +# type: OS::Neutron::Port +# properties: +# network: {get_param: internal_network} +# security_groups: +# - default +# vm3_dataplane_port: +# type: OS::Neutron::Port +# properties: +# network: {get_param: dataplane_network} +# security_groups: +# - default +# vm3_floating_ip: +# type: OS::Neutron::FloatingIP +# properties: +# floating_network: {get_param: floating_network} +# port_id: {get_resource: vm3_admin_port} +# vm3: +# type: OS::Nova::Server +# properties: +# availability_zone: { get_param: availability_zone } +# user_data: +# get_file: prox_user_data.sh +# key_name: { get_param: key } +# image: { get_param: image } +# flavor: { get_param: flavor } +# networks: +# - port: {get_resource: vm3_admin_port} +# - port: {get_resource: vm3_dataplane_port} outputs: - sut_private_ip: - description: IP address of the sut admin port - value: { get_attr: [sut_admin_port, fixed_ips, 0, ip_address] } - sut_public_ip: - description: Floating IP address of sut in public network - value: { get_attr: [ sut_floating_ip, floating_ip_address ] } - sut_dataplane_ip: - description: IP address of sut dataplane port - value: { get_attr: [sut_dataplane_port, fixed_ips, 0, ip_address] } - sut_dataplane_mac: - description: The MAC address of the sut dataplane port - value: { get_attr: [sut_dataplane_port, mac_address] } - gen_private_ip: - description: IP address of the gen admin port - value: { get_attr: [gen_admin_port, fixed_ips, 0, ip_address] } - gen_public_ip: - description: Floating IP address of gen in public network - value: { get_attr: [ gen_floating_ip, floating_ip_address ] } - gen_dataplane_ip: - description: IP address of gen dataplane port - value: { get_attr: [gen_dataplane_port, fixed_ips, 0, ip_address] } - gen_dataplane_mac: - description: The MAC address of the gen dataplane port - value: { get_attr: [gen_dataplane_port, mac_address] } + total_number_of_VMs: + description: Number of VMs created by this stack + value: 2 + vm1_private_ip: + description: IP address ofVM1 admin port + value: { get_attr: [vm1_admin_port, fixed_ips, 0, ip_address] } + vm1_public_ip: + description: Floating IP address of VM1 in public network + value: { get_attr: [ vm1_floating_ip, floating_ip_address ] } + vm1_dataplane_ip: + description: IP address of VM1 dataplane port + value: { get_attr: [vm1_dataplane_port, fixed_ips, 0, ip_address] } + vm1_dataplane_mac: + description: The MAC address of VM1 dataplane port + value: { get_attr: [vm1_dataplane_port, mac_address] } + vm2_private_ip: + description: IP address of the VM2 admin port + value: { get_attr: [vm2_admin_port, fixed_ips, 0, ip_address] } + vm2_public_ip: + description: Floating IP address of VM2 in public network + value: { get_attr: [ vm2_floating_ip, floating_ip_address ] } + vm2_dataplane_ip: + description: IP address of VM2 dataplane port + value: { get_attr: [vm2_dataplane_port, fixed_ips, 0, ip_address] } + vm2_dataplane_mac: + description: The MAC address of VM2 dataplane port + value: { get_attr: [vm2_dataplane_port, mac_address] } +# vm3_private_ip: +# description: IP address of the VM3 admin port +# value: { get_attr: [vm3_admin_port, fixed_ips, 0, ip_address] } +# vm3_public_ip: +# description: Floating IP address of VM3 in public network +# value: { get_attr: [ vm3_floating_ip, floating_ip_address ] } +# vm3_dataplane_ip: +# description: IP address of VM3 dataplane port +# value: { get_attr: [vm3_dataplane_port, fixed_ips, 0, ip_address] } +# vm3_dataplane_mac: +# description: The MAC address of VM3 dataplane port +# value: { get_attr: [vm3_dataplane_port, mac_address] } diff --git a/VNFs/DPPD-PROX/helper-scripts/openstackrapid/runrapid.py b/VNFs/DPPD-PROX/helper-scripts/openstackrapid/runrapid.py index b421c709..7447e160 100755 --- a/VNFs/DPPD-PROX/helper-scripts/openstackrapid/runrapid.py +++ b/VNFs/DPPD-PROX/helper-scripts/openstackrapid/runrapid.py @@ -31,33 +31,33 @@ from logging import handlers from prox_ctrl import prox_ctrl import ConfigParser -version="17.09.03" -stack = "rapidTestEnv" #Default string for stack +version="17.12.15" +stack = "rapid" #Default string for stack +test = "basicrapid" #Default string for stack loglevel="DEBUG" # sets log level for writing to file runtime=10 # time in seconds for 1 test run def usage(): - print("usage: rapid [--version] [-v]") + print("usage: runrapid [--version] [-v]") print(" [--stack STACK_NAME]") + print(" [--test TEST_NAME]") print(" [--runtime TIME_FOR_TEST]") - print(" [--log DEBUG|INFO|WARNING|ERROR|CRITICAL") + print(" [--log DEBUG|INFO|WARNING|ERROR|CRITICAL]") print(" [-h] [--help]") print("") - print("Command-line interface to RAPID") + print("Command-line interface to runrapid") print("") print("optional arguments:") print(" -v, --version Show program's version number and exit") - print(" --stack STACK_NAME Parameters will be read from STACK_NAME.cfg Default is rapidTestEnv.") + print(" --stack STACK_NAME Parameters will be read from STACK_NAME.env Default is %s."%stack) + print(" --test TEST_NAME Test cases will be read from TEST_NAME.test Default is %s."%test) print(" --runtime Specify time in seconds for 1 test run") print(" --log Specify logging level for log file output, screen output level is hard coded") print(" -h, --help Show help message and exit.") print("") - print("To delete the rapid stack, type the following command") - print(" openstack stack delete --yes --wait rapidTestEnv") - print("Note that rapidTestEnv is the default stack name. Replace with STACK_NAME if needed") try: - opts, args = getopt.getopt(sys.argv[1:], "vh", ["version","help", "stack=","runtime=","log="]) + opts, args = getopt.getopt(sys.argv[1:], "vh", ["version","help", "stack=", "test=","runtime=","log="]) except getopt.GetoptError as err: print("===========================================") print(str(err)) @@ -77,6 +77,9 @@ for opt, arg in opts: if opt in ("--stack"): stack = arg print ("Using '"+stack+"' as name for the stack") + if opt in ("--test"): + test = arg + print ("Using '"+test+".test' for test case definition") elif opt in ("--runtime"): runtime = arg print ("Runtime: "+ runtime) @@ -101,8 +104,8 @@ log.setLevel(numeric_level) log.propagate = 0 # create a console handler -# and set its log level to the command-line option -# +# and set its log level to the command-line option +# console_handler = logging.StreamHandler(sys.stdout) console_handler.setLevel(logging.INFO) console_handler.setFormatter(screen_formatter) @@ -110,7 +113,7 @@ console_handler.setFormatter(screen_formatter) # create a file handler # and set its log level to DEBUG # -log_file = 'RUN' +stack +'.log' +log_file = 'RUN' +stack+'.'+test+'.log' file_handler = logging.handlers.RotatingFileHandler(log_file, backupCount=10) #file_handler = log.handlers.TimedRotatingFileHandler(log_file, 'D', 1, 5) file_handler.setLevel(numeric_level) @@ -126,7 +129,7 @@ needRoll = os.path.isfile(log_file) # This is a stale log, so roll it -if needRoll: +if needRoll: # Add timestamp log.debug('\n---------\nLog closed on %s.\n---------\n' % time.asctime()) @@ -150,7 +153,7 @@ def connect_socket(client): if attempts > 20: log.exception("Failed to connect to PROX on %s after %d attempts" % (client.ip(), attempts)) raise Exception("Failed to connect to PROX on %s after %d attempts" % (client.ip(), attempts)) - time.sleep(8) + time.sleep(2) log.debug("Trying to connect to PROX (just launched) on %s, attempt: %d" % (client.ip(), attempts)) log.info("Connected to PROX on %s" % client.ip()) return sock @@ -167,22 +170,25 @@ def connect_client(client): if attempts > 20: log.exception("Failed to connect to VM after %d attempts:\n%s" % (attempts, ex)) raise Exception("Failed to connect to VM after %d attempts:\n%s" % (attempts, ex)) - time.sleep(8) + time.sleep(2) log.debug("Trying to connect to VM which was just launched on %s, attempt: %d" % (client.ip(), attempts)) log.info("Connected to VM on %s" % client.ip()) -def run_iteration(gensock,sutsock,cores,gencores): - if sutAdminIP!='none': - old_sut_rx, old_sut_tx, old_sut_drop, old_sut_tsc, sut_tsc_hz = sutsock.core_stats([1]) - old_rx, old_tx, old_drop, old_tsc, tsc_hz = gensock.core_stats(cores) +def run_iteration(gensock,sutsock,sutstatcores,genstatcores,gencontrolcores): + gensock.start(gencontrolcores) + time.sleep(1) + if sutsock!='none': + old_sut_rx, old_sut_tx, old_sut_drop, old_sut_tsc, sut_tsc_hz = sutsock.core_stats(sutstatcores) + old_rx, old_tx, old_drop, old_tsc, tsc_hz = gensock.core_stats(genstatcores) time.sleep(float(runtime)) + lat_min, lat_max, lat_avg = gensock.lat_stats([2]) # Get statistics after some execution time - new_rx, new_tx, new_drop, new_tsc, tsc_hz = gensock.core_stats(cores) - if sutAdminIP!='none': - new_sut_rx, new_sut_tx, new_sut_drop, new_sut_tsc, sut_tsc_hz = sutsock.core_stats([1]) + new_rx, new_tx, new_drop, new_tsc, tsc_hz = gensock.core_stats(genstatcores) + if sutsock!='none': + new_sut_rx, new_sut_tx, new_sut_drop, new_sut_tsc, sut_tsc_hz = sutsock.core_stats(sutstatcores) time.sleep(1) #Stop generating - gensock.stop(gencores) + gensock.stop(gencontrolcores) drop = new_drop-old_drop # drop is all packets dropped by all tasks. This includes packets dropped at the generator task + packets dropped by the nop task. In steady state, this equals to the number of packets received by this VM rx = new_rx - old_rx # rx is all packets received by the nop task = all packets received in the gen VM tx = new_tx - old_tx # tx is all generated packets actually accepted by the interface @@ -190,7 +196,7 @@ def run_iteration(gensock,sutsock,cores,gencores): pps_req_tx = round((tx+drop-rx)*tsc_hz*1.0/(tsc*1000000),3) pps_tx = round(tx*tsc_hz*1.0/(tsc*1000000),3) pps_rx = round(rx*tsc_hz*1.0/(tsc*1000000),3) - if sutAdminIP!='none': + if sutsock!='none': sut_rx = new_sut_rx - old_sut_rx sut_tx = new_sut_tx - old_sut_tx sut_tsc = new_sut_tsc - old_sut_tsc @@ -200,9 +206,9 @@ def run_iteration(gensock,sutsock,cores,gencores): pps_sut_tx = 0 pps_sut_tx_str = 'NO MEAS.' if (tx == 0): + log.critical("TX = 0. Test interrupted since no packet has been sent.") raise Exception("TX = 0") - drop_rate = round(((pps_req_tx-pps_rx) * 100.0)/pps_req_tx,1) - return(drop_rate,pps_req_tx,pps_tx,pps_sut_tx_str,pps_rx) + return(pps_req_tx,pps_tx,pps_sut_tx_str,pps_rx,lat_avg) def new_speed(speed,drop_rate): # Following calculates the ratio for the new speed to be applied @@ -213,96 +219,68 @@ def new_speed(speed,drop_rate): # The second line goes through (p,q) and (100,y100) y0=0.99 y100=0.1 - p=15 - q=.9 + p=1 + q=.99 ratio = min((q-y0)/p*drop_rate+y0,(q-y100)/(p-100)*drop_rate+q-p*(q-y100)/(p-100)) return (int(speed*ratio*100)+0.5)/100 -def run_speedtest(): - global genclient - global sutclient - log.info("Starting PROX") - speed = 100 - attempts = 0 - cores = [1] - gencores = [1] - cmd = '/root/prox/build/prox -e -t -o cli -f /root/gen.cfg' - genclient.fork_cmd(cmd, 'PROX GEN speed Test') - gensock = connect_socket(genclient) - gensock.reset_stats() - if sutAdminIP!='none': - cmd = '/root/prox/build/prox -t -o cli -f /root/sut.cfg' - sutclient.fork_cmd(cmd, 'PROX SUT speed Test') - sutsock = connect_socket(sutclient) - sutsock.reset_stats() - else: - sutsock = 'none' - log.info("+-----------------------------------------------------------------------------------------------------------+") - log.info("| Generator is sending UDP (1 flow) packets (64 bytes) to SUT. SUT sends packets back |") - log.info("+--------+-----------------+----------------+----------------+----------------+----------------+------------+") - log.info("| Test | Speed requested | Req to Generate| Sent by Gen | Forward by SUT | Rec. by Gen | Result |") - log.info("+--------+-----------------+----------------+----------------+----------------+----------------+------------+") +def get_drop_rate(speed,pps_rx,size): + # pps_rx are all the packets that are received by the generator. That is substracted + # from the pps that we wanted to send. This is calculated by taking the variable speed + # which is the requested percentage of a 10Gb/s link. So we take 10000bps (10Gbps, note + # that the speed variable is already expressed in % so we only take 100 and not 10000) + # divided by the number of bits in 1 packet. That is 8 bits in a byte times the size of + # a frame (=our size + 24 bytes overhead). + return (100*(speed * 100 / (8*(size+24)) - pps_rx)/(speed*100.0/(8*(size+24)))) + +def run_speedtest(gensock,sutsock,sutstatcores,genstatcores,gencores): + log.info("+----------------------------------------------------------------------------------------------------------------------------+") + log.info("| Generator is sending UDP (1 flow) packets (64 bytes) to SUT. SUT sends packets back |") + log.info("+--------+-----------------+----------------+----------------+----------------+----------------+----------------+------------+") + log.info("| Test | Speed requested | Sent to NIC | Sent by Gen | Forward by SUT | Rec. by Gen | Avg. Latency | Result |") + log.info("+--------+-----------------+----------------+----------------+----------------+----------------+----------------+------------+") + speed = 100 + size=64 + attempts = 0 while (speed > 0.1): attempts += 1 print('Measurement ongoing at speed: ' + str(round(speed,2)) + '% ',end='\r') sys.stdout.flush() # Start generating packets at requested speed (in % of a 10Gb/s link) gensock.speed(speed, gencores) - gensock.start(gencores) time.sleep(1) # Get statistics now that the generation is stable and NO ARP messages any more - drop_rate,pps_req_tx,pps_tx,pps_sut_tx_str,pps_rx = run_iteration(gensock,sutsock,cores,gencores) + pps_req_tx,pps_tx,pps_sut_tx_str,pps_rx,lat_avg = run_iteration(gensock,sutsock,sutstatcores,genstatcores,gencores) + drop_rate = get_drop_rate(speed,pps_rx,size) if ((drop_rate) < 1): # This will stop the test when number of dropped packets is below a certain percentage - log.info("+--------+-----------------+----------------+----------------+----------------+----------------+------------+") - log.info('|{:>7}'.format(str(attempts))+" | "+ '{:>14}'.format(str(round(speed,2))) + '% | '+ '{:>9}'.format(str(pps_req_tx))+' Mpps | '+ '{:>9}'.format(str(pps_tx)) +' Mpps | ' + '{:>9}'.format(pps_sut_tx_str) +' Mpps | '+ '{:>9}'.format(str(pps_rx))+" Mpps | SUCCESS |") - log.info("+--------+-----------------+----------------+----------------+----------------+----------------+------------+") + log.info("+--------+-----------------+----------------+----------------+----------------+----------------+----------------+------------+") + log.info('|{:>7}'.format(str(attempts))+" | "+ '{:>14}'.format(str(round(speed,2))) + '% | '+ '{:>9}'.format(str(pps_req_tx))+' Mpps | '+ '{:>9}'.format(str(pps_tx)) +' Mpps | ' + '{:>9}'.format(pps_sut_tx_str) +' Mpps | '+ '{:>9}'.format(str(pps_rx))+' Mpps | '+ '{:>9}'.format(str(lat_avg))+" us | SUCCESS |") + log.info("+--------+-----------------+----------------+----------------+----------------+----------------+----------------+------------+") break else: - log.info('|{:>7}'.format(str(attempts))+" | "+ '{:>14}'.format(str(round(speed,2))) + '% | '+ '{:>9}'.format(str(pps_req_tx))+' Mpps | '+ '{:>9}'.format(str(pps_tx)) +' Mpps | ' + '{:>9}'.format(pps_sut_tx_str) +' Mpps | '+ '{:>9}'.format(str(pps_rx))+" Mpps | FAILED |") + log.info('|{:>7}'.format(str(attempts))+" | "+ '{:>14}'.format(str(round(speed,2))) + '% | '+ '{:>9}'.format(str(pps_req_tx))+' Mpps | '+ '{:>9}'.format(str(pps_tx)) +' Mpps | ' + '{:>9}'.format(pps_sut_tx_str) +' Mpps | '+ '{:>9}'.format(str(pps_rx))+' Mpps | '+ '{:>9}'.format(str(lat_avg))+" us | FAILED |") speed = new_speed(speed,drop_rate) - gensock.quit() - if sutAdminIP!='none': - sutsock.quit() time.sleep(2) # print("") -def run_flowtest(): - global genclient - global sutclient - log.info("Starting PROX") - speed = 100 - attempts = 0 - cores = [1] - gencores = [1] - cmd = '/root/prox/build/prox -e -t -o cli -f /root/gen.cfg' - genclient.fork_cmd(cmd, 'PROX GEN flow Test') - gensock = connect_socket(genclient) - gensock.reset_stats() - if sutAdminIP!='none': - cmd = '/root/prox/build/prox -t -o cli -f /root/sut.cfg' - sutclient.fork_cmd(cmd, 'PROX SUT flow Test') - sutsock = connect_socket(sutclient) - sutsock.reset_stats() - else: - sutsock = 'none' - log.info("+----------------------------------------------------------------------------------------------+") - log.info("| UDP, 64 bytes, different number of flows by randomizing SRC & DST UDP port |") - log.info("+--------+-----------------+----------------+----------------+----------------+----------------+") - log.info("| Flows | Speed requested | Req to Generate| Sent by Gen | Forward by SUT | Rec. by Gen |") - log.info("+--------+-----------------+----------------+----------------+----------------+----------------+") - cores = [1] - gencores = [1] +def run_flowtest(gensock,sutsock,sutstatcores,genstatcores,gencores): + log.info("+---------------------------------------------------------------------------------------------------------------+") + log.info("| UDP, 64 bytes, different number of flows by randomizing SRC & DST UDP port |") + log.info("+--------+-----------------+----------------+----------------+----------------+----------------+----------------+") + log.info("| Flows | Speed requested | Sent to NIC | Sent by Gen | Forward by SUT | Rec. by Gen | Avg. Latency |") + log.info("+--------+-----------------+----------------+----------------+----------------+----------------+----------------+") speed = 100 + size=64 # To generate a desired number of flows, PROX will randomize the bits in source and destination ports, as specified by the bit masks in the flows variable. flows={128:['1000000000000XXX','100000000000XXXX'],1024:['10000000000XXXXX','10000000000XXXXX'],8192:['1000000000XXXXXX','100000000XXXXXXX'],65535:['10000000XXXXXXXX','10000000XXXXXXXX'],524280:['1000000XXXXXXXXX','100000XXXXXXXXXX']} +# flows={524280:['1000000XXXXXXXXX','100000XXXXXXXXXX']} for flow_number in sorted(flows.iterkeys()): #speed = 100 Commented out: Not starting from 100% since we are trying more flows, so speed will not be higher than the speed achieved in previous loop - attempts = 0 gensock.reset_stats() - if sutAdminIP!='none': + if sutsock!='none': sutsock.reset_stats() source_port,destination_port = flows[flow_number] gensock.set_random(gencores,0,34,source_port,2) @@ -310,59 +288,34 @@ def run_flowtest(): while (speed > 0.1): print(str(flow_number)+' flows: Measurement ongoing at speed: ' + str(round(speed,2)) + '% ',end='\r') sys.stdout.flush() - attempts += 1 # Start generating packets at requested speed (in % of a 10Gb/s link) gensock.speed(speed, gencores) - gensock.start(gencores) time.sleep(1) # Get statistics now that the generation is stable and NO ARP messages any more - drop_rate,pps_req_tx,pps_tx,pps_sut_tx_str,pps_rx = run_iteration(gensock,sutsock,cores,gencores) + pps_req_tx,pps_tx,pps_sut_tx_str,pps_rx,lat_avg = run_iteration(gensock,sutsock,sutstatcores,genstatcores,gencores) + drop_rate = get_drop_rate(speed,pps_rx,size) if ((drop_rate) < 1): # This will stop the test when number of dropped packets is below a certain percentage - log.info('|{:>7}'.format(str(flow_number))+" | "+ '{:>14}'.format(str(round(speed,2))) + '% | '+ '{:>9}'.format(str(pps_req_tx))+' Mpps | '+ '{:>9}'.format(str(pps_tx)) +' Mpps | ' + '{:>9}'.format(pps_sut_tx_str) +' Mpps | '+ '{:>9}'.format(str(pps_rx))+" Mpps |") - log.info("+--------+-----------------+----------------+----------------+----------------+----------------+") + log.info('|{:>7}'.format(str(flow_number))+" | "+ '{:>14}'.format(str(round(speed,2))) + '% | '+ '{:>9}'.format(str(pps_req_tx))+' Mpps | '+ '{:>9}'.format(str(pps_tx)) +' Mpps | ' + '{:>9}'.format(pps_sut_tx_str) +' Mpps | '+ '{:>9}'.format(str(pps_rx))+" Mpps |"+ '{:>9}'.format(str(lat_avg))+" us |") + log.info("+--------+-----------------+----------------+----------------+----------------+----------------+----------------+") break speed = new_speed(speed,drop_rate) - gensock.quit() - if sutAdminIP!='none': - sutsock.quit() time.sleep(2) # print("") -def run_sizetest(): - global genclient - global sutclient - log.info("Starting PROX") - speed = 100 - attempts = 0 - cores = [1] - gencores = [1] - cmd = '/root/prox/build/prox -e -t -o cli -f /root/gen.cfg' - genclient.fork_cmd(cmd, 'PROX GEN size Test') - gensock = connect_socket(genclient) - gensock.reset_stats() - if sutAdminIP!='none': - cmd = '/root/prox/build/prox -t -o cli -f /root/sut.cfg' - sutclient.fork_cmd(cmd, 'PROX SUT size Test') - sutsock = connect_socket(sutclient) - sutsock.reset_stats() - else: - sutsock = 'none' - log.info("+----------------------------------------------------------------------------------------------+") - log.info("| UDP, 1 flow, different packet sizes |") - log.info("+--------+-----------------+----------------+----------------+----------------+----------------+") - log.info("| Pktsize| Speed requested | Req to Generate| Sent by Gen | Forward by SUT | Rec. by Gen |") - log.info("+--------+-----------------+----------------+----------------+----------------+----------------+") - cores = [1] - gencores = [1] +def run_sizetest(gensock,sutsock,sutstatcores,genstatcores,gencores): + log.info("+---------------------------------------------------------------------------------------------------------------+") + log.info("| UDP, 1 flow, different packet sizes |") + log.info("+--------+-----------------+----------------+----------------+----------------+----------------+----------------+") + log.info("| Pktsize| Speed requested | Sent to NIC | Sent by Gen | Forward by SUT | Rec. by Gen | Avg. Latency |") + log.info("+--------+-----------------+----------------+----------------+----------------+----------------+----------------+") speed = 100 # To generate a desired number of flows, PROX will randomize the bits in source and destination ports, as specified by the bit masks in the flows variable. - sizes=[1500,1024,512,256,128,64] + sizes=[1400,1024,512,256,128,64] for size in sizes: #speed = 100 Commented out: Not starting from 100% since we are trying smaller packets, so speed will not be higher than the speed achieved in previous loop - attempts = 0 gensock.reset_stats() - if sutAdminIP!='none': + if sutsock!='none': sutsock.reset_stats() gensock.set_size(gencores,0,size) # This is setting the frame size gensock.set_value(gencores,0,16,(size-18),2) # 18 is the difference between the frame size and IP size = size of (MAC addresses, ethertype and FCS) @@ -371,83 +324,121 @@ def run_sizetest(): while (speed > 0.1): print(str(size)+' bytes: Measurement ongoing at speed: ' + str(round(speed,2)) + '% ',end='\r') sys.stdout.flush() - attempts += 1 # Start generating packets at requested speed (in % of a 10Gb/s link) gensock.speed(speed, gencores) - gensock.start(gencores) time.sleep(1) # Get statistics now that the generation is stable and NO ARP messages any more - drop_rate,pps_req_tx,pps_tx,pps_sut_tx_str,pps_rx = run_iteration(gensock,sutsock,cores,gencores) + pps_req_tx,pps_tx,pps_sut_tx_str,pps_rx,lat_avg = run_iteration(gensock,sutsock,sutstatcores,genstatcores,gencores) + drop_rate = get_drop_rate(speed,pps_rx,size) if ((drop_rate) < 1): # This will stop the test when number of dropped packets is below a certain percentage - log.info('|{:>7}'.format(str(size))+" | "+ '{:>14}'.format(str(round(speed,2))) + '% | '+ '{:>9}'.format(str(pps_req_tx))+' Mpps | '+ '{:>9}'.format(str(pps_tx)) +' Mpps | ' + '{:>9}'.format(pps_sut_tx_str) +' Mpps | '+ '{:>9}'.format(str(pps_rx))+" Mpps |") - log.info("+--------+-----------------+----------------+----------------+----------------+----------------+") + log.info('|{:>7}'.format(str(size))+" | "+ '{:>14}'.format(str(round(speed,2))) + '% | '+ '{:>9}'.format(str(pps_req_tx))+' Mpps | '+ '{:>9}'.format(str(pps_tx)) +' Mpps | ' + '{:>9}'.format(pps_sut_tx_str) +' Mpps | '+ '{:>9}'.format(str(pps_rx))+" Mpps |"+ '{:>10}'.format(str(lat_avg))+" us |") + log.info("+--------+-----------------+----------------+----------------+----------------+----------------+----------------+") break speed = new_speed(speed,drop_rate) - gensock.quit() - if sutAdminIP!='none': - sutsock.quit() time.sleep(2) #======================================================================== + +def init_test(): + global sutstatcores + global genstatcores + global genrxcores + global gencontrolcores + sutstatcores = [1] + genstatcores = [1,2] + genrxcores = [2] + gencontrolcores = [1] +# Running at low speed to make sure the ARP messages can get through. +# If not doing this, the ARP message could be dropped by a switch in overload and then the test will not give proper results +# Note hoever that if we would run the test steps during a very long time, the ARP would expire in the switch. +# PROX will send a new ARP request every seconds so chances are very low that they will all fail to get through + sock[0].speed(0.01, gencontrolcores) + sock[0].start(genstatcores) + time.sleep(2) + sock[0].stop(gencontrolcores) + sock[1].start([1]) + +vmDPIP =[] +vmAdminIP =[] +vmDPmac =[] +hexDPIP =[] +config_file =[] +script_control =[] + +testconfig = ConfigParser.RawConfigParser() +testconfig.read(test+'.test') +required_number_of_VMs = testconfig.get('DEFAULT', 'total_number_of_vms') config = ConfigParser.RawConfigParser() -config.read(stack+'.cfg') - -genAdminIP = config.get('Generator', 'admin_ip') -genDPmac = config.get('Generator', 'dp_mac') -genDPIP = config.get('Generator', 'dp_ip') -sutAdminIP = config.get('SUT', 'admin_ip') -sutDPmac = config.get('SUT', 'dp_mac') -sutDPIP = config.get('SUT', 'dp_ip') +config.read(stack+'.env') key = config.get('OpenStack', 'key') -ip = genDPIP.split('.') -hexgenDPIP=hex(int(ip[0]))[2:].zfill(2) + ' ' + hex(int(ip[1]))[2:].zfill(2) + ' ' + hex(int(ip[2]))[2:].zfill(2) + ' ' + hex(int(ip[3]))[2:].zfill(2) -ip = sutDPIP.split('.') -hexsutDPIP=hex(int(ip[0]))[2:].zfill(2) + ' ' + hex(int(ip[1]))[2:].zfill(2) + ' ' + hex(int(ip[2]))[2:].zfill(2) + ' ' + hex(int(ip[3]))[2:].zfill(2) -with open("parameters.lua", "w") as f: - f.write('gen_hex_ip="'+hexgenDPIP+'"\n') - f.write('sut_hex_ip="'+hexsutDPIP+'"\n') - f.write('gen_ip="'+genDPIP+'"\n') - f.write('sut_ip="'+sutDPIP+'"\n') - f.close - +total_number_of_VMs = config.get('rapid', 'total_number_of_VMs') +if int(required_number_of_VMs) > int(total_number_of_VMs): + log.exception("Not enough VMs for this test: %s needed and only %s available" % (required_number_of_VMs,total_number_of_VMs)) + raise Exception("Not enough VMs for this test: %s needed and only %s available" % (required_number_of_VMs,total_number_of_VMs)) +for vm in range(1, int(total_number_of_VMs)+1): + vmAdminIP.append(config.get('VM%d'%vm, 'admin_ip')) + vmDPmac.append(config.get('VM%d'%vm, 'dp_mac')) + vmDPIP.append(config.get('VM%d'%vm, 'dp_ip')) + ip = vmDPIP[-1].split('.') + hexDPIP.append(hex(int(ip[0]))[2:].zfill(2) + ' ' + hex(int(ip[1]))[2:].zfill(2) + ' ' + hex(int(ip[2]))[2:].zfill(2) + ' ' + hex(int(ip[3]))[2:].zfill(2)) +for vm in range(1, int(required_number_of_VMs)+1): + config_file.append(testconfig.get('VM%d'%vm, 'config_file')) + script_control.append(testconfig.get('VM%d'%vm, 'script_control')) + with open("parameters%d.lua"%vm, "w") as f: + f.write('name="%s"\n'% testconfig.get('VM%d'%vm, 'name')) + f.write('local_ip="%s"\n'% vmDPIP[vm-1]) + f.write('local_hex_ip="%s"\n'% hexDPIP[vm-1]) + gwVM = testconfig.get('VM%d'%vm, 'gw_vm') + if gwVM <> 'not_used': + gwVMindex = int(gwVM)-1 + f.write('gw_ip="%s"\n'% vmDPIP[gwVMindex]) + f.write('gw_hex_ip="%s"\n'% hexDPIP[gwVMindex]) + destVM = testconfig.get('VM%d'%vm, 'dest_vm') + if destVM <> 'not_used': + destVMindex = int(destVM)-1 + f.write('dest_ip="%s"\n'% vmDPIP[destVMindex]) + f.write('dest_hex_ip="%s"\n'% hexDPIP[destVMindex]) + f.close ##################################################################################### -genclient = prox_ctrl(genAdminIP, key+'.pem','root') -connect_client(genclient) -genclient.scp_put('./gen.cfg', '/root/gen.cfg') -genclient.scp_put('./parameters.lua', '/root/parameters.lua') -# Creating script to bind the right network interface to the poll mode driver -with open("devbind.sh") as f: - newText=f.read().replace('MACADDRESS', genDPmac) -with open("gendevbind.sh", "w") as f: - f.write(newText) -st = os.stat('gendevbind.sh') -os.chmod('gendevbind.sh', st.st_mode | stat.S_IEXEC) -genclient.scp_put('./gendevbind.sh', '/root/gendevbind.sh') -cmd = '/root/gendevbind.sh' -genclient.run_cmd(cmd) -log.info("Generator Config files copied & running devbind.sh") +client =[] +sock =[] -##################################################################################### -if sutAdminIP!='none': - sutclient = prox_ctrl(sutAdminIP, key+'.pem','root') - connect_client(sutclient) - sutclient.scp_put('./sut.cfg', '/root/sut.cfg') - sutclient.scp_put('./parameters.lua', '/root/parameters.lua') +for vm in range(0, int(required_number_of_VMs)): + client.append(prox_ctrl(vmAdminIP[vm], key+'.pem','root')) + connect_client(client[-1]) # Creating script to bind the right network interface to the poll mode driver + devbindfile = "devbindvm%d.sh"%(vm+1) with open("devbind.sh") as f: - newText=f.read().replace('MACADDRESS', sutDPmac) - with open("sutdevbind.sh", "w") as f: - f.write(newText) - st = os.stat('sutdevbind.sh') - os.chmod('sutdevbind.sh', st.st_mode | stat.S_IEXEC) - sutclient.scp_put('./sutdevbind.sh', '/root/sutdevbind.sh') - cmd = '/root/sutdevbind.sh' - sutclient.run_cmd(cmd) - log.info("SUT Config files copied & running devbind.sh") -run_speedtest() -run_flowtest() -run_sizetest() -##################################################################################### -genclient.close() -if sutAdminIP!='none': - sutclient.close() + newText=f.read().replace('MACADDRESS', vmDPmac[vm]) + with open(devbindfile, "w") as f: + f.write(newText) + st = os.stat(devbindfile) + os.chmod(devbindfile, st.st_mode | stat.S_IEXEC) + client[-1].scp_put('./%s'%devbindfile, '/root/devbind.sh') + cmd = '/root/devbind.sh' + client[-1].run_cmd(cmd) + log.info("devbind.sh running on VM%d"%(vm+1)) + client[-1].scp_put('./%s'%config_file[vm], '/root/%s'%config_file[vm]) + client[-1].scp_put('./parameters%d.lua'%(vm+1), '/root/parameters.lua') + log.info("Starting PROX on VM%d"%(vm+1)) + if script_control[vm] == 'true': + cmd = '/root/prox/build/prox -e -t -o cli -f /root/%s'%config_file[vm] + else: + cmd = '/root/prox/build/prox -t -o cli -f /root/%s'%config_file[vm] + client[-1].fork_cmd(cmd, 'PROX Testing on VM%d'%(vm+1)) + sock.append(connect_socket(client[-1])) + +init_code = testconfig.get('DEFAULT', 'init_code') +eval(init_code) +#################################################### +# Run test cases +# Best to run the flow test at the end since otherwise the tests coming after thatmight be influenced by the big number of entries in the switch flow tables +#################################################### +number_of_tests = testconfig.get('DEFAULT', 'number_of_tests') +for vm in range(1, int(number_of_tests)+1): + cmd=testconfig.get('test%d'%vm,'cmd') + eval(cmd) +#################################################### +for vm in range(0, int(required_number_of_VMs)): + sock[vm].quit() + client[vm].close() diff --git a/VNFs/DPPD-PROX/helper-scripts/openstackrapid/secgw.test b/VNFs/DPPD-PROX/helper-scripts/openstackrapid/secgw.test new file mode 100644 index 00000000..3f537e33 --- /dev/null +++ b/VNFs/DPPD-PROX/helper-scripts/openstackrapid/secgw.test @@ -0,0 +1,31 @@ +[DEFAULT] +name = GWTesting +number_of_tests = 2 +total_number_of_vms = 3 +init_code=init_test() +dest_vm = not_used +gw_vm = not_used +script_control = false + + +[VM1] +name = Generator +config_file = gen_gw.cfg +dest_vm = 3 +gw_vm = 2 +script_control = true + +[VM2] +name = GW1 +config_file = secgw1.cfg +dest_vm = 3 + +[VM3] +name = GW2 +config_file = secgw2.cfg + +[test1] +cmd=run_speedtest(sock[0],'none',sutstatcores,genstatcores,gencontrolcores) + +[test2] +cmd=run_sizetest(sock[0],'none',sutstatcores,genstatcores,gencontrolcores) diff --git a/VNFs/DPPD-PROX/helper-scripts/openstackrapid/secgw1.cfg b/VNFs/DPPD-PROX/helper-scripts/openstackrapid/secgw1.cfg new file mode 100644 index 00000000..c4aa82fa --- /dev/null +++ b/VNFs/DPPD-PROX/helper-scripts/openstackrapid/secgw1.cfg @@ -0,0 +1,54 @@ +;; +;; Copyright (c) 2010-2017 Intel Corporation +;; +;; Licensed under the Apache License, Version 2.0 (the "License"); +;; you may not use this file except in compliance with the License. +;; You may obtain a copy of the License at +;; +;; http://www.apache.org/licenses/LICENSE-2.0 +;; +;; Unless required by applicable law or agreed to in writing, software +;; distributed under the License is distributed on an "AS IS" BASIS, +;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +;; See the License for the specific language governing permissions and +;; limitations under the License. +;; + +;; +; This is sample ESP config. +;; + +[eal options] +-n=4 ; force number of memory channels +no-output=no ; disable DPDK debug output + +[lua] +dofile("parameters.lua") + +[port 0] +name=if +mac=hardware + +[variables] +$tun_hop_limit=5 +$lookup_port_mask=0xffc0 + +[defaults] +mempool size=16K + +[global] +start time=20 +name=${name} + +[core 0] +mode=master + +[core 1] +name=esp_enc +task=0 +mode=esp_enc +sub mode=l3 +local ipv4=${local_ip} +remote ipv4=${dest_ip} +rx port=if +tx port=if diff --git a/VNFs/DPPD-PROX/helper-scripts/openstackrapid/secgw2.cfg b/VNFs/DPPD-PROX/helper-scripts/openstackrapid/secgw2.cfg new file mode 100644 index 00000000..6b9c7500 --- /dev/null +++ b/VNFs/DPPD-PROX/helper-scripts/openstackrapid/secgw2.cfg @@ -0,0 +1,55 @@ +;; +;; Copyright (c) 2010-2017 Intel Corporation +;; +;; Licensed under the Apache License, Version 2.0 (the "License"); +;; you may not use this file except in compliance with the License. +;; You may obtain a copy of the License at +;; +;; http://www.apache.org/licenses/LICENSE-2.0 +;; +;; Unless required by applicable law or agreed to in writing, software +;; distributed under the License is distributed on an "AS IS" BASIS, +;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +;; See the License for the specific language governing permissions and +;; limitations under the License. +;; + +;; +; This is sample ESP config. +;; +[eal options] +-n=4 ; force number of memory channels +no-output=no ; disable DPDK debug output + +[lua] +dofile("parameters.lua") + +[port 0] +name=if +mac=hardware + +[variables] +$tun_hop_limit=5 +$lookup_port_mask=0xffc0 + +[defaults] +mempool size=16K + +[global] +start time=20 +name=${name} + +[core 0] +mode=master + +[core 1] +name=esp_dec +task=0 +mode=esp_dec +sub mode=l3 +local ipv4=${local_ip} +rx port=if +tx port=if + + + diff --git a/VNFs/DPPD-PROX/helper-scripts/openstackrapid/sriovrapid.yaml b/VNFs/DPPD-PROX/helper-scripts/openstackrapid/sriovrapid.yaml index e36eff44..d76e1ee3 100644 --- a/VNFs/DPPD-PROX/helper-scripts/openstackrapid/sriovrapid.yaml +++ b/VNFs/DPPD-PROX/helper-scripts/openstackrapid/sriovrapid.yaml @@ -37,7 +37,7 @@ parameters: label: Private network name or ID description: Network to attach instance to. default: dataplane-network - admin_network: + internal_network: type: string label: Private network name or ID description: Network to attach instance to. @@ -51,28 +51,32 @@ parameters: type: string description: The Availability Zone to launch the instance. default: nova - gen_sriov_port: + vm1_sriov_port: type: string - description: The sriov port to be used by the generator - default: Port3 - sut_sriov_port: + description: The sriov port to be used by VM1 + default: Port1 + vm2_sriov_port: type: string - description: The sriov port to be used by the sut - default: Port4 + description: The sriov port to be used by VM2 + default: Port2 +# vm3_sriov_port: +# type: string +# description: The sriov port to be used by VM3 +# default: Port3 resources: - sut_admin_port: + vm1_admin_port: type: OS::Neutron::Port properties: - network: {get_param: admin_network} + network: {get_param: internal_network} security_groups: - default - sut_floating_ip: + vm1_floating_ip: type: OS::Neutron::FloatingIP properties: floating_network: {get_param: floating_network} - port_id: {get_resource: sut_admin_port} - sut: + port_id: {get_resource: vm1_admin_port} + vm1: type: OS::Nova::Server properties: availability_zone: { get_param: availability_zone } @@ -82,20 +86,20 @@ resources: image: { get_param: image } flavor: { get_param: flavor } networks: - - port: {get_resource: sut_admin_port} - - port: {get_param: sut_sriov_port} - gen_admin_port: + - port: {get_resource: vm1_admin_port} + - port: {get_param: vm1_sriov_port} + vm2_admin_port: type: OS::Neutron::Port properties: - network: {get_param: admin_network} + network: {get_param: internal_network} security_groups: - default - gen_floating_ip: + vm2_floating_ip: type: OS::Neutron::FloatingIP properties: floating_network: {get_param: floating_network} - port_id: {get_resource: gen_admin_port} - gen: + port_id: {get_resource: vm2_admin_port} + vm2: type: OS::Nova::Server properties: availability_zone: { get_param: availability_zone } @@ -105,32 +109,69 @@ resources: image: { get_param: image } flavor: { get_param: flavor } networks: - - port: {get_resource: gen_admin_port} - - port: {get_param: gen_sriov_port} + - port: {get_resource: vm2_admin_port} + - port: {get_param: vm2_sriov_port} +# vm3_admin_port: +# type: OS::Neutron::Port +# properties: +# network: {get_param: internal_network} +# security_groups: +# - default +# vm3_floating_ip: +# type: OS::Neutron::FloatingIP +# properties: +# floating_network: {get_param: floating_network} +# port_id: {get_resource: vm3_admin_port} +# vm3: +# type: OS::Nova::Server +# properties: +# availability_zone: { get_param: availability_zone } +# user_data: +# get_file: prox_user_data.sh +# key_name: { get_param: key } +# image: { get_param: image } +# flavor: { get_param: flavor } +# networks: +# - port: {get_resource: vm3_admin_port} +# - port: {get_param: vm3_sriov_port} outputs: - sut_private_ip: - description: IP address of the sut admin port - value: { get_attr: [sut_admin_port, fixed_ips, 0, ip_address] } - sut_public_ip: - description: Floating IP address of sut in public network - value: { get_attr: [ sut_floating_ip, floating_ip_address ] } - sut_dataplane_ip: - description: IP address of sut dataplane port - value: { get_attr: [sut, networks,{get_param: dataplane_network},0] } - sut_dataplane_mac: - description: The MAC address of the sut dataplane port - value: { get_attr: [sut, addresses, {get_param: dataplane_network}] } - gen_private_ip: - description: IP address of the gen admin port - value: { get_attr: [gen_admin_port, fixed_ips, 0, ip_address] } - gen_public_ip: - description: Floating IP address of gen in public network - value: { get_attr: [ gen_floating_ip, floating_ip_address ] } - gen_dataplane_ip: - description: IP address of gen dataplane port - value: { get_attr: [gen, networks,{get_param: dataplane_network},0] } - gen_dataplane_mac: - description: The MAC address of the gen dataplane port - value: { get_attr: [gen, addresses, {get_param: dataplane_network}] } - + total_number_of_VMs: + description: Number of VMs created by this stack + value: 2 + vm1_private_ip: + description: IP address of VM1 admin port + value: { get_attr: [vm1_admin_port, fixed_ips, 0, ip_address] } + vm1_public_ip: + description: Floating IP address of VM1 in public network + value: { get_attr: [ vm1_floating_ip, floating_ip_address ] } + vm1_dataplane_ip: + description: IP address of VM1 dataplane port + value: { get_attr: [vm1, networks,{get_param: dataplane_network},0] } + vm1_dataplane_mac: + description: The MAC address of VM1 dataplane port + value: { get_attr: [vm1, addresses, {get_param: dataplane_network}] } + vm2_private_ip: + description: IP address of VM2 admin port + value: { get_attr: [vm2_admin_port, fixed_ips, 0, ip_address] } + vm2_public_ip: + description: Floating IP address of VM2 in public network + value: { get_attr: [ vm2_floating_ip, floating_ip_address ] } + vm2_dataplane_ip: + description: IP address of VM2 dataplane port + value: { get_attr: [vm2, networks,{get_param: dataplane_network},0] } + vm2_dataplane_mac: + description: The MAC address of VM2 dataplane port + value: { get_attr: [vm2, addresses, {get_param: dataplane_network}]} +# vm3_private_ip: +# description: IP address of VM3 admin port +# value: { get_attr: [vm3_admin_port, fixed_ips, 0, ip_address] } +# vm3_public_ip: +# description: Floating IP address of VM32 in public network +# value: { get_attr: [ vm3_floating_ip, floating_ip_address ] } +# vm3_dataplane_ip: +# description: IP address of VM3 dataplane port +# value: { get_attr: [vm3, networks,{get_param: dataplane_network},0] } +# vm3_dataplane_mac: +# description: The MAC address of VM3 dataplane port +# value: { get_attr: [vm3, addresses, {get_param: dataplane_network}]} diff --git a/VNFs/DPPD-PROX/helper-scripts/openstackrapid/swap.cfg b/VNFs/DPPD-PROX/helper-scripts/openstackrapid/swap.cfg new file mode 100644 index 00000000..7b8dd492 --- /dev/null +++ b/VNFs/DPPD-PROX/helper-scripts/openstackrapid/swap.cfg @@ -0,0 +1,46 @@ +;; +;; Copyright (c) 2010-2017 Intel Corporation +;; +;; Licensed under the Apache License, Version 2.0 (the "License"); +;; you may not use this file except in compliance with the License. +;; You may obtain a copy of the License at +;; +;; http://www.apache.org/licenses/LICENSE-2.0 +;; +;; Unless required by applicable law or agreed to in writing, software +;; distributed under the License is distributed on an "AS IS" BASIS, +;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +;; See the License for the specific language governing permissions and +;; limitations under the License. +;; + +[eal options] +-n=4 ; force number of memory channels +no-output=no ; disable DPDK debug output + +[lua] +dofile("parameters.lua") + +[port 0] +name=if0 +mac=hardware + +[defaults] +mempool size=2K + +[global] +name=${name} + +[core 0] +mode=master + +[core 1] +name=swap +task=0 +mode=swap +sub mode=l3 +rx port=if0 +tx port=if0 +local ipv4=${local_ip} +drop=no + |