From 3717375b27980ec55fe54f23077d01e51f130b6f Mon Sep 17 00:00:00 2001 From: kubi Date: Tue, 19 Jan 2016 16:02:05 +0800 Subject: support os+odl for ipv6 As we known, the network of os+odl scenario is still not available. so this patch still need to do more debug work. however, Jan 19th is code freeze day, i think a good way is to merge it at first when os+odl become available, i will do rest debug work and fix it Change-Id: I518f759069e74d2e57e40b52dfecaef4b4d26a02 Signed-off-by: kubi --- samples/ping6_odl.yaml | 34 ++++++ yardstick/benchmark/scenarios/networking/ping6.py | 40 +++++-- .../scenarios/networking/ping6_radvd.conf | 11 ++ .../scenarios/networking/ping6_setup_with_odl.bash | 119 +++++++++++++++++++++ 4 files changed, 196 insertions(+), 8 deletions(-) create mode 100644 samples/ping6_odl.yaml create mode 100644 yardstick/benchmark/scenarios/networking/ping6_radvd.conf create mode 100644 yardstick/benchmark/scenarios/networking/ping6_setup_with_odl.bash diff --git a/samples/ping6_odl.yaml b/samples/ping6_odl.yaml new file mode 100644 index 000000000..cfb556e51 --- /dev/null +++ b/samples/ping6_odl.yaml @@ -0,0 +1,34 @@ +--- +# Sample test case for ipv6 + +schema: "yardstick:task:0.1" + +scenarios: +- + type: Ping6 + options: + packetsize: 200 + sdn: odl + host: host1,host2,host3,host4,host5 + nodes: + host1: node1.IPV6 + host2: node2.IPV6 + host3: node3.IPV6 + host4: node4.IPV6 + host5: node5.IPV6 + runner: + type: Iteration + iterations: 1 + interval: 1 + run_step: 'setup,run,teardown' + sla: + max_rtt: 10 + action: monitor + + +context: + type: Node + name: IPV6 + file: /root/yardstick/etc/yardstick/nodes/compass_sclab_virtual/pod.yaml + + diff --git a/yardstick/benchmark/scenarios/networking/ping6.py b/yardstick/benchmark/scenarios/networking/ping6.py index 5364cdf26..e68756462 100644 --- a/yardstick/benchmark/scenarios/networking/ping6.py +++ b/yardstick/benchmark/scenarios/networking/ping6.py @@ -27,9 +27,11 @@ class Ping6(base.Scenario): # pragma: no cover TARGET_SCRIPT = 'ping6_benchmark.bash' PRE_SETUP_SCRIPT = 'ping6_pre_setup.bash' SETUP_SCRIPT = 'ping6_setup.bash' + SETUP_ODL_SCRIPT = 'ping6_setup_with_odl.bash' FIND_HOST_SCRIPT = 'ping6_find_host.bash' TEARDOWN_SCRIPT = 'ping6_teardown.bash' METADATA_SCRIPT = 'ping6_metadata.txt' + RADVD_SCRIPT = 'ping6_radvd.conf' POST_TEARDOWN_SCRIPT = 'ping6_post_teardown.bash' def __init__(self, scenario_cfg, context_cfg): @@ -64,6 +66,10 @@ class Ping6(base.Scenario): # pragma: no cover 'yardstick.benchmark.scenarios.networking', Ping6.SETUP_SCRIPT) + self.setup_odl_script = pkg_resources.resource_filename( + 'yardstick.benchmark.scenarios.networking', + Ping6.SETUP_ODL_SCRIPT) + self.pre_setup_script = pkg_resources.resource_filename( 'yardstick.benchmark.scenarios.networking', Ping6.PRE_SETUP_SCRIPT) @@ -72,6 +78,10 @@ class Ping6(base.Scenario): # pragma: no cover 'yardstick.benchmark.scenarios.networking', Ping6.METADATA_SCRIPT) + self.ping6_radvd_script = pkg_resources.resource_filename( + 'yardstick.benchmark.scenarios.networking', + Ping6.RADVD_SCRIPT) + options = self.scenario_cfg['options'] host_str = options.get("host", 'host1') self.host_list = host_str.split(',') @@ -82,12 +92,23 @@ class Ping6(base.Scenario): # pragma: no cover # ssh host1 self._ssh_host(self.host_list[0]) - # run script to setup ipv6 - self.client.run("cat > ~/setup.sh", - stdin=open(self.setup_script, "rb")) + self.client.run("cat > ~/metadata.txt", stdin=open(self.ping6_metadata_script, "rb")) - cmd = "sudo bash setup.sh" + + # run script to setup ipv6 with nosdn or odl + sdn = options.get("sdn", 'nosdn') + if 'odl' in sdn: + self.client.run("cat > ~/br-ex.radvd.conf", + stdin=open(self.ping6_radvd_script, "rb")) + self.client.run("cat > ~/setup_odl.sh", + stdin=open(self.setup_odl_script, "rb")) + cmd = "sudo bash setup_odl.sh" + else: + self.client.run("cat > ~/setup.sh", + stdin=open(self.setup_script, "rb")) + cmd = "sudo bash setup.sh" + status, stdout, stderr = self.client.execute(cmd) self.setup_done = True @@ -109,32 +130,35 @@ class Ping6(base.Scenario): # pragma: no cover self.host_list = host_str.split(',') self.host_list.sort() self._ssh_host(self.host_list[0]) + + # find ipv4-int-network1 to ssh VM self.client.run("cat > ~/find_host.sh", stdin=open(self.ping6_find_host_script, "rb")) cmd = "sudo bash find_host.sh" LOG.debug("Executing command: %s", cmd) status, stdout, stderr = self.client.execute(cmd) host_name = stdout.strip() - print host_name + + # copy vRouterKey to target host self.client.run("cat ~/vRouterKey", stdout=open("/tmp/vRouterKey", "w")) self._ssh_host(host_name) - self.client.run("cat > ~/vRouterKey", stdin=open("/tmp/vRouterKey", "rb")) + # run ping6 benchmark self.client.run("cat > ~/ping6.sh", stdin=open(self.ping6_script, "rb")) cmd = "sudo bash ping6.sh" LOG.debug("Executing command: %s", cmd) status, stdout, stderr = self.client.execute(cmd) - print stdout + if status: raise RuntimeError(stderr) + # sla if stdout: result["rtt"] = float(stdout) - if "sla" in self.scenario_cfg: sla_max_rtt = int(self.scenario_cfg["sla"]["max_rtt"]) assert result["rtt"] <= sla_max_rtt, \ diff --git a/yardstick/benchmark/scenarios/networking/ping6_radvd.conf b/yardstick/benchmark/scenarios/networking/ping6_radvd.conf new file mode 100644 index 000000000..7833bf282 --- /dev/null +++ b/yardstick/benchmark/scenarios/networking/ping6_radvd.conf @@ -0,0 +1,11 @@ +interface $router_interface +{ + AdvSendAdvert on; + MinRtrAdvInterval 3; + MaxRtrAdvInterval 10; + prefix 2001:db8:0:1::/64 + { + AdvOnLink on; + AdvAutonomous on; + }; +}; \ No newline at end of file diff --git a/yardstick/benchmark/scenarios/networking/ping6_setup_with_odl.bash b/yardstick/benchmark/scenarios/networking/ping6_setup_with_odl.bash new file mode 100644 index 000000000..f9a2c4094 --- /dev/null +++ b/yardstick/benchmark/scenarios/networking/ping6_setup_with_odl.bash @@ -0,0 +1,119 @@ +#!/bin/bash + +############################################################################## +# Copyright (c) 2015 Huawei Technologies Co.,Ltd and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +# need to debug + +# download and create image +source /opt/admin-openrc.sh +wget https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-22-20150521.x86_64.qcow2 +glance image-create --name 'Fedora22' --disk-format qcow2 \ +--container-format bare --file ./Fedora-Cloud-Base-22-20150521.x86_64.qcow2 + +# create router +neutron router-create ipv4-router +neutron router-create ipv6-router + +# Associate the net04_ext to the Neutron routers +neutron router-gateway-set ipv6-router ext-net +neutron router-gateway-set ipv4-router ext-net + +# create two ipv4 networks with associated subnets +neutron net-create ipv4-int-network1 +neutron net-create ipv4-int-network2 + +# Create IPv4 subnet and associate it to ipv4-router +neutron subnet-create --name ipv4-int-subnet1 \ +--dns-nameserver 8.8.8.8 ipv4-int-network1 20.0.0.0/24 + +# Associate the ipv4-int-subnet1 with ipv4-router +neutron router-interface-add ipv4-router ipv4-int-subnet1 + +# BIN-HU: Here, for scenario 2, ipv6-int-subnet2 cannot be created automatically because of a bug in ODL +# BIN-HU: we need to manually spawn a RADVD daemon in ipv6-router namespace + +# Create an IPv4 subnet ipv4-int-subnet2 and associate it with ipv6-router +neutron subnet-create --name ipv4-int-subnet2 --dns-nameserver 8.8.8.8 ipv4-int-network2 10.0.0.0/24 + +neutron router-interface-add ipv6-router ipv4-int-subnet2 + +# BIN-HU: for the reason above in scenario 2, we need to remove the following command + +# create key +nova keypair-add vRouterKey > ~/vRouterKey + +# Create ports for vRouter +neutron port-create --name eth0-vRouter --mac-address fa:16:3e:11:11:11 ipv4-int-network2 +neutron port-create --name eth1-vRouter --mac-address fa:16:3e:22:22:22 ipv4-int-network1 + +# Create ports for VM1 and VM2. +neutron port-create --name eth0-VM1 --mac-address fa:16:3e:33:33:33 ipv4-int-network1 +neutron port-create --name eth0-VM2 --mac-address fa:16:3e:44:44:44 ipv4-int-network1 + + +# Hope you are cloning the following repo for some files like radvd.conf and metadata.txt +# JFYI, metadata.txt is available at the following git repo. https://github.com/sridhargaddam/opnfv_os_ipv6_poc/blob/master/metadata.txt +# vRouter boot +nova boot --image Fedora22 --flavor m1.small \ +--user-data ./metadata.txt \ +--nic port-id=$(neutron port-list | grep -w eth0-vRouter | awk '{print $2}') \ +--nic port-id=$(neutron port-list | grep -w eth1-vRouter | awk '{print $2}') \ +--key-name vRouterKey vRouter + +# BIN-HU: Note that some other parameters might be needed in Scenario 2, if it does not work +# BIN-HU: Refer to http://artifacts.opnfv.org/ipv6/docs/setupservicevm/4-ipv6-configguide-servicevm.html#boot-two-other-vms-in-ipv4-int-network1 +# BIN-HU: Section 3.5.7 +# VM create +nova boot --image Fedora22 --flavor m1.small \ +--nic port-id=$(neutron port-list | grep -w eth0-VM1 | awk '{print $2}') \ +--key-name vRouterKey VM1 + +nova boot --image Fedora22 --flavor m1.small \ +--nic port-id=$(neutron port-list | grep -w eth0-VM2 | awk '{print $2}') \ +--key-name vRouterKey VM2 + +nova list + +# BIN-HU: Now we need to spawn a RADVD daemon inside ipv6-router namespace +# BIN-HU: The following is specific for Scenario 2 to spawn a RADVD daemon in ipv6-router namespace +# BIN-HU: Refer to http://artifacts.opnfv.org/ipv6/docs/setupservicevm/4-ipv6-configguide-servicevm.html#spawn-radvd-in-ipv6-router +# BIN-HU: Section 3.5.8, Steps SETUP-SVM-24 through SETUP-SVM-30 +# BIN-HU: Also note that in case of HA deployment, ipv6-router created in previous step +# BIN-HU: could be in any of the controller node. Thus you need to identify in which controller node +# BIN-HU: ipv6-router is created in order to manually spawn RADVD daemon inside the ipv6-router +# BIN-HU: namespace in the following steps. +# BIN-HU: Just FYI: the following command in Neutron will display the controller on which the +# BIN-HU: ipv6-router is spawned. +neutron l3-agent-list-hosting-router ipv6-router + +# find host which is located by ipv6-router, but need to debug +host_num=$(neutron l3-agent-list-hosting-router ipv6-router | grep True | awk -F [=\ ] '{printf $4}') +ssh $host_num + +# BIN-HU: identify the ipv6-router namespace and move to the namespace +sudo ip netns exec qrouter-$(neutron router-list | grep -w ipv6-router | awk '{print $2}') bash + +# BIN-HU: Inside ipv6-router namespace, configure the IPv6 address on the interface. +export router_interface=$(ip a s | grep -w "global qr-*" | awk '{print $7}') +ip -6 addr add 2001:db8:0:1::1 dev $router_interface + +# BIN-HU: Update the sample file radvd.conf with $router_interface +sed -i 's/$router_interface/'$router_interface'/g' ~/br-ex.radvd.conf + +# BIN-HU: Spawn a RADVD daemon to simulate an external router +radvd -C ~/br-ex.radvd.conf -p ~/br-ex.pid.radvd + +# BIN-HU: Add an IPv6 downstream route pointing to the eth0 interface of vRouter. +ip -6 route add 2001:db8:0:2::/64 via 2001:db8:0:1:f816:3eff:fe11:1111 + +# BIN-HU: you can double check the routing table +ip -6 route show + +exit +# BIN-HU: End of Scenario 2, and you can continue to SSH etc., the same as Scenario 1 -- cgit 1.2.3-korg