From 9bb0cf584c67ddd1be93a39e2ef4481a4a6e2d13 Mon Sep 17 00:00:00 2001 From: MofassirArif Date: Tue, 19 Jan 2016 10:07:50 -0800 Subject: docs: add docs for usage, introduction and iperf testcase Change-Id: Ida3460ddd5d2b377351681e5f1d2457ec76ae95f Signed-off-by: MofassirArif (cherry picked from commit 67373633d382f3152d970a22192b4fc7c11248b7) --- data/hosts | 4 +- data/my_key.pem | 50 ++-- docs/how-to-use-docs/03-usage-guide.rst | 274 +++++++++++++++++++++ docs/how-to-use-docs/index.rst | 3 + docs/iperf_testcase.rst | 42 ++++ opnfv-creds.sh | 2 +- test_cases/default/network/iperf_vm.yaml | 4 +- .../dell-us-deploying-bm3/network/iperf_bm.yaml | 2 +- 8 files changed, 350 insertions(+), 31 deletions(-) create mode 100644 docs/how-to-use-docs/03-usage-guide.rst create mode 100644 docs/iperf_testcase.rst diff --git a/data/hosts b/data/hosts index 30ea3988..ab071623 100644 --- a/data/hosts +++ b/data/hosts @@ -1,4 +1,4 @@ [2-host] -172.18.0.119 +172.18.1.92 [1-server] -172.18.0.118 +172.18.1.93 diff --git a/data/my_key.pem b/data/my_key.pem index 58e5a389..41d9e669 100644 --- a/data/my_key.pem +++ b/data/my_key.pem @@ -1,27 +1,27 @@ -----BEGIN RSA PRIVATE KEY----- -MIIEpQIBAAKCAQEAxYmqpbNeM7MT0o0D2wpFSuGHQThXzSods3QxL5x7r6A1Gs2t -VNoZEkBGtymyeD85NPA5SBYErUFs+5+h09j64a5n+s7zJ17v6WPSvz3gXsQmcsKc -VVsn+0KI7TJUsv/HPTT9yICXN8mbePp0SoNWwr9dZbFlBGcELDFghT91JuQekN5T -wsqaRMmRZ/rwj3dpsnFYJNMc9r+wPUbDletZowgdvddsoWUmof4TMxKLCLiiCzzS -TgVhuf0eb1VdCoV3miTg/sE5pSE/jLIGHIzZzXp1ShckUCZXfJzfoBuTDde30Wdk -+XtX4xhwifnIocfkeI2L3VIDX7RVtaS2+GSvEwIDAQABAoIBAQCIn/73PGgbOfCr -3/yasy/Z6sKxyVZxAIAqbmLWm1Sw1A3my/rmhTJx/SLr7FsT8CaRBtWXliMF8gp+ -vpoe/CQJk6c3QYvL303wDqrkutdEtEYjeZbHMpUko5Aw/m62n1Iec1hUJRxx6W8u -7YshPlXzvIfMnjVQJjAsoLoxbwKIMmgYmRytO2W5hPnM/o5VO6lLHur1jEsHt6df -BgMy2mrKeec4AlIr88+ni4ayDSjdE2C2l+x3lLRz75Wrv5iWAOj99hnpY5ChOU9g -y0aJTmQ5iZLSSbpQ7UPYbkrv2jERd+HXTntVaEetf6cwnGyyjrOGy0BxxzPHjPW3 -ux85RCUBAoGBAOPEwbDnseune7+Dkz1DhdicBkV/9bB32B77Yt7XpjsMJZGko+44 -4mQZvRddWfHx9hr3p+MpYFD71pugO0qfxRcZIC3UhvltlD9Ez7ZC8HO45OKxhSIm -xHoHSXRLgF3YBTgjoUXLZns/+bu8J63ZbfbYg1jTTkQcEccLCXyKu+3JAoGBAN4F -qqhsLLnh1Fo9XbJIAGUKzirWEnbGRtDV66b5j90MVMHanZ0NHpJN5/xM44ymuvMY -Un/oi+BZdm2wtb5wB3+vj4tRR2MpIHiWh9ELSHcGI3bJkR+9oEeDUfRVB3VNcmxv -mhmONTp+B4v1SghDivYWmo86C3ysXLi/1klnZ7P7AoGBAM2T+X7CoUQhlv/0siDJ -oTUxHjf8lrUAdoEAROz9l3wUKpSaFZwem7fdw14jU9ucmJUektnlrplptPoiVWG1 -cx61/uVevbTDwtqYMSJAqObKK0yxDYkVlKDPkuz0eJg7MfrJrfZg786un6li2i1/ -4lC6e1Lg5fNzolgVDirqzVSBAoGAR1ekzffsq1JQzSp46CfQ0KcXNpaRWk8+RC7p -ST9aJhqnRZ99FBE6KKMWD3GZkQGmgyTmpalRASdeMcMds3MGRdZhFtBoUwnNIFKm -k9q/T1fOn4YHtx5U2YXuGMgV3HClewiliN60ZfZHcIbCYkNp7Me4pJtvQ4GTTd5+ -+hlbLm8CgYEAsQA3/qFgS9/QF+IZ/7fgv5wMuGJOF607sM43O8Fxz1KFAYLQaL8T -jIrFCNvqmNHf8bYPBWt0/jo6+rov8avWUvffeqrEuKIgqW0068HUyeU9rKdYNgw7 -XjU0R0K16I9GQjYQph2IZlJaWBXuRZKrO7Nhqw/ck6ANzhKcJ114suA= +MIIEogIBAAKCAQEAzVkXQaXre1h1FmIOAPwgBst5y5crMVI6dSPIBqo0sLU4tCG7 +WdpZCYlyymj64gjOrHNT0rwX5t0YRywzh9mzOej5PDPBpS4uWxnb+jRpoy/m7fKp +HDJgas8RWhRJ6CSMoCfD7ZxvEPnhHVNpgscKipE8ivoY173T9dGothiY48Z6430g +NoLLIHiunL4lUyeBwZ9Sn6rLdHkL558wlxpiNbnaxCSIKywmuH3KiAVTQhAWGNQp +a4BU4hP844tFiTVT7+oySHhEZokvBlE1RTc199s752tyEN1eBQGycsWw3csJ6Eth +bwVWlKaGzJkTc+jcoOX420easK2Jrkha8Jk05QIDAQABAoIBAHuhlNfobiMf+baV +KHs9UIbmwJhrlgymxh06grZIiVqOcOo6mNKbHBoaz6q/k7S8urmm4aOxrO5I1NIc +8ZVr43UNJ+kv+/lYGX6tzfwQzDz8nRtLirc4OUZ1DqxeJLUINEZESrjnAxOEbh06 +1/5tmZIdqQa/Vm+lkVSheuLPYlVXWDPhJNhYZlAfbR6TiRnEmLnuTiuV4KHbuT9u +OpeB2PBGFF7qmlAFKMHTNQ/tMQvKZqZCD+u49spN33XJ7vp18WjCjlfYf7l5ysTX +BqOtIaqp/mCi/o45u3OWVG2R4JUEAVEYYjohS7BSs8pqpeyGDGOOykYP3Tl1zakt +CXReZWECgYEA+F/oOfH3bBu/tR4vjl7faFJaiJxqgwzg1NlmE9b9S4NSb2DQuv98 +o74kf61MiMg8XcuuJzkpbEze8BJUsacMtlHWI5G1KMgUfLYhjqE/YRZj9eUO/Cb/ +pSWzHOSC1Tbz9FRP6+8DcHB/TWRId8iX6RRsFWBA/fGflISY2l01E+kCgYEA06cE +axalbEonNJZ2F8Dnj2DBMoKmkODQw7JSqYYgePiSiPlBBFkCGzUu0nrxduWIXVPp +MdcIlm+B2C6CWNA9Na4x6I+5cy6ku4+lt8Oz59pqY0x909iyYJ6SA8GeaQcPnoVu +c5h/G2eA3oc6SxsMK/22GwNR55CPkAva9+A3p50CgYAEXLLYaa59wJMCXFBbgMEN +tPyQD6czPAOq2VKYoJr8O4c0G5Au6JPI0GsVrvZ8JIAi6ZPabn+Svlrf/oJsSFHJ +1fAb2dBDshfiBNTcC2rwipMg23AC77BntxzJMh42Hmv0a5Knwx/dVqx1sIAxUl2Q +o2Iuke0ySI8T7aw9kYuAGQKBgGLR3U8+sJfh+3IjOhoXGEaqTyoNNEX6oZ5teQjr +teelb42CiyfDgyc+6pCdlHYF72hb0EpT8w+CGqbb+EINYDbbETRbPqQXyBRGmoI1 +Xp9HLFsWkL1DtO1FvDkCwrqY8GL8O7i/H8GkzteXXdFJXKKBf/AW2bv7k/wWfPM0 +/edFAoGAR2QJQpvz/qAa64SndY32jYB9KMIb8aZJ7xe11+MgljFJ93MK1rsOtBXP +hbxoRaQUMw8MyuadOrUiK8vUNuFzRa/JNKB23bVb9fhMgbNkRYauJjj7Pl/KlxEs +azwNk2nziRpd3qWhvU1rAjFGUs6vjzMFQsJPXFGs+hnZTObH+6A= -----END RSA PRIVATE KEY----- diff --git a/docs/how-to-use-docs/03-usage-guide.rst b/docs/how-to-use-docs/03-usage-guide.rst new file mode 100644 index 00000000..2bd2f034 --- /dev/null +++ b/docs/how-to-use-docs/03-usage-guide.rst @@ -0,0 +1,274 @@ +.. + TODO As things will change, then this document has to be revised before the + next release. Steps: + 1. Verify that the instructions below are correct and have not been changed. + 2. Add everything that is currently missing and should be included in this document. + 3. Make sure each title has a paragraph or an introductory sentence under it. + 4. Make sure each sentence is grammatically correct and easily understandable. + 5. Remove this comment section. + +Guide to run QTIP: +================== + +This guide will serve as a first step to familiarize the user with how to +run QTIP the first time when the user clones QTIP on to their host machine. +In order to clone QTIP please follow the instructions in the +installation.rst located in docs/userguide/installation.rst. + +QTIP Directory structure: +------------------------- + +The QTIP directory has been sectioned off into multiple folders to facilitate + segmenting information into relevant categories. The folders that concern + the end user are `test_cases/` and `test_list/`. + +test_cases/: +------------ + +This folder is used to store all the config files which are used to setup the + environment prior to a test. This folder is further divided into opnfv pods + which run QTIP. Inside each pod there are folders which contain the config + files segmented based on test cases. Namely, these include, `Compute`, + `Network` and `Storage`. The default folder is there for the end user who + is interested in testing their infrastructure but arent part of a opnfv pod. + +The structure of the directory for the user appears as follows +:: + + test_cases/default/compute + test_cases/default/network + test_cases/default/storage + +The benchmarks that are part of the QTIP framework are listed under these +folders. An example of the compute folder is shown below. +Their naming convention is _.yaml +:: + + dhrystone_bm.yaml + dhrystone_vm.yaml + whetstone_vm.yaml + whetstone_bm.yaml + ssl_vm.yaml + ssl_bm.yaml + ramspeed_vm.yaml + ramspeed_bm.yaml + dpi_vm.yaml + dpi_bm.yaml + +The above listed files are used to configure the environment. The VM/BM tag +distinguishes between a test to be run on the Virtual Machine or the compute +node itself, respectively. + + +test_list/: +----------- + +This folder contains three files, namely `compute`, `network` and `storage`. +These files list the benchmarks are to be run by the QTIP framework. Sample +compute test file is shown below +:: + + dhrystone_vm.yaml + dhrystone_bm.yaml + whetstone_vm.yaml + ssl_bm.yaml + +The compute file will now run all the benchmarks listed above one after +another on the environment. `NOTE: Please ensure there are no blank lines +in this file as that has been known to throw an exception`. + +Preparing a config file for test: +--------------------------------- + +We will be using dhrystone as a example to list out the changes that the +user will need to do in order to run the benchmark. +Dhrystone on Compute Nodes: +^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +QTIP framework can run benchmarks on the actual compute nodes as well. In +order to run dhrystone on the compute nodes we will be editing the +dhrystone_bm.yaml file. + +:: + + Scenario: + benchmark: dhrystone + host: machine_1, machine_2 + server: + +The `Scenario` field is used by to specify the name of the benchmark to +run as done by `benchmark: dhrystone`. The `host` and `server` tag are +not used for the compute benchmarks but are included here to help the +user `IF` they wish to control the execution. By default both machine_1 +and machine_2 will have dhrystone run on them in parallel but the user +can change this so that machine_1 run dhrystone before machine_2. This +will be elaborated in the `Context` tag. + +:: + + Context: + Host_Machines: + machine_1: + ip: 10.20.0.6 + pw: + role: host + machine_2: + ip: 10.20.0.5 + pw: + role: host + + Virtual_Machines: + +The `Context` tag helps the user list the number of compute nodes they want + to run dhrystone on. The user can list all the compute nodes under the + `Host_Machines` tag. All the machines under test must be listed under the + `Host_Machines` and naming it incrementally higher. The `ip:` tag is used + to specify the IP of the particular compute node. The `pw:` tag can be left + blank because QTIP uses its own key for ssh. In order to run dhrystone on + one compute node at a time the user needs to edit the `role:` tag. `role: + host` for machine_1 and `role: server` for machine_2 will allow for + dhrystone to be run on machine_1 and then run on machine_2. + +:: + + + Test_Description: + Test_category: "Compute" + Benchmark: "dhrystone" + Overview: > + ''' This test will run the dhrystone benchmark in parallel on + machine_1 and machine_2. + +The above field is purely for a description purpose to explain to the user +the working of the test and is not fed to the framework. + +Sample dhrystone_bm.yaml file: +------------------------------ +:: + + Scenario: + benchmark: dhrystone + host: machine_1, machine_2 + server: + + Context: + Host_Machines: + machine_1: + ip: 10.20.0.6 + pw: + role: host + machine_2: + ip: 10.20.0.5 + pw: + role: host + + Virtual_Machines: + + + Test_Description: + Test_category: "Compute" + Benchmark: "dhrystone" + Overview: > + ''' This test will run the dhrystone benchmark in parallel on + machine_1 and machine_2.\n + +Dhrystone on Virtual Machine: +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +To run dhrystone on the VMs we will be editing dhrystone_vm.yaml file. +Snippets on the file are given below. + +:: + + Scenario: + benchmark: dhrystone + host: virtualmachine_1, virtualmachine_2 + server: + + +The `Scenario` field is used by to specify the name of the benchmark to +run as done by `benchmark: dhrystone`. The `host` and `server` tag are +not used for the compute benchmarks but are included here to help the +user `IF` they wish to control the execution. By default both +virtualmachine_1 and virtualmachine_2 will have dhrystone run on them +in parallel but the user can change this so that virtualmachine_1 run +dhrystone before virtualmachine_2. This will be elaborated in the +`Context` tag. +:: + + Context: + Host_Machines: + + Virtual_Machines: + virtualmachine_1: + availability_zone: compute1 + public_network: 'net04_ext' + OS_image: QTIP_CentOS + flavor: m1.large + role: host + virtualmachine_2: + availability_zone: compute2 + public_network: 'net04_ext' + OS_image: QTIP_CentOS + flavor: m1.large + role: host + +The `Context` tag helps the user list the number of VMs and their +characteristic. The user can list all the VMs they want to bring up +under the `Virtual_Machines:` tag. In the above example we will be +bringing up two VMs. One on Compute1 and the other on Compute2. The +user can change this as desired `NOTE: Please ensure you have the +necessary compute nodes before listing under the 'availability_zone:' +tag`. The rest of the options do not need to be modified by the user. + +Running dhrystone sequentially (Optional): +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +In order to run dhrystone on one VM at a time the user needs to edit +the `role:` tag. `role: host` for virtualmachine_1 and `role: server` +for virtualmachine_2 will allow for dhrystone to be run on +virtualmachine_1 and then run on virtualmachine_2. + +:: + + Test_Description: + Test_category: "Compute" + Benchmark: "dhrystone" + Overview: + This test will run the dhrystone benchmark in parallel on + virtualmachine_1 and virtualmachine_2 + +The above field is purely for a decription purpose to explain to +the user the working of the test and is not fed to the framework. + +Sample dhrystone_vm.yaml file: +------------------------------ +:: + + Scenario: + benchmark: dhrystone + host: virtualmachine_1, virtualmachine_2 + server: + + Context: + Host_Machines: + + Virtual_Machines: + virtualmachine_1: + availability_zone: compute1 + public_network: 'net04_ext' + OS_image: QTIP_CentOS + flavor: m1.large + role: host + virtualmachine_2: + availability_zone: compute2 + public_network: 'net04_ext' + OS_image: QTIP_CentOS + flavor: m1.large + role: host + + Test_Description: + Test_category: "Compute" + Benchmark: "dhrystone" + Overview: > + This test will run the dhrystone benchmark in parallel on + machine_1 and machine_2.\n diff --git a/docs/how-to-use-docs/index.rst b/docs/how-to-use-docs/index.rst index bbe991b0..713599c0 100644 --- a/docs/how-to-use-docs/index.rst +++ b/docs/how-to-use-docs/index.rst @@ -20,6 +20,9 @@ Contents: documentation-example.rst 01-introduction.rst + 02-methodology.rst + 03-usage-guide.rst + Indices and tables ================== diff --git a/docs/iperf_testcase.rst b/docs/iperf_testcase.rst new file mode 100644 index 00000000..fa2b44a4 --- /dev/null +++ b/docs/iperf_testcase.rst @@ -0,0 +1,42 @@ +NETWORK THROUGHPUT TESTCASE + +QTIP uses IPerf3 as the main tool for testing the network throughput. +There are two tests that are run through the QTIP framework. + +Network Throughput for VMs +Network Throughput for Compute Nodes + +For the throughout of the compute nodes we simply go into the systems-under-test +and install iperf3 on the nodes. One of the SUTs is used a server and the other as a +client. The client pushes traffic to the server for a duration specified by the user +configuration file for iperf. These files can be found in the test_cases/{POD}/network/ +directory. The bandwidth is limited only by the physical link layer speed available to the server. +The result file inlcudes the b/s bandwidth and the CPU usage for both the client and server. + +For the VMs we are running two topologies through the framework. + +1: VMs on the same compute nodes +2: VMs on different compute nodes + +QTIP framework sets up a stack with a private network, security groups, routers and attaches the VMs to this network. Iperf3 is installed +on the VMs and one is assigned the role of client while other serves as a server. Traffic is pushed +over the QTIP private network between the VMs. A closer look in needed to see how the traffic actually +flows between the VMs in this configuration to understand what is happening to the packet as traverses +the openstack network. + +The packet originates from VM1 and its sent to the linux bridge via a tap interface where the security groups +are written. Afterwards the packet is forwarded to the Integration bridge via a patch port. Since VM2 is also connected +to the Integration bridge in a similar manner as VM1 so the packet gets forwarded to the linux bridge connecting +VM2. After the linux bridge the packet is sent to VM2 and is recieved by the Iperf3 server. Since no physical link is +involved in this topology, only the OVS (Integration bridge) is being benchmarked and we are seeing bandwidth in the range +of 14-15 Gbps. + +For the topology where the VMs are spawned on different compute nodes, the path the packet takes becomes more cumbersome. +The packet leaves a VM and makes its way to the Integration Bridge as in the first topology however the integration bridge +forwards the packet to the physical link through the ethernet bridge. The packet then gets a VLAN/Tunnel depending on the network +and is forwarded to the particular Compute node where the second VM is spwaned. The packets enter the compute node through the physical +ethernet port and makes its way to the VM through the integration bridge and linux bridge. As seen here the path is much more involved +even when discussed without the mention of overheads faced at all the internfaces so we are seeing the results in the range of 2 Gbps. + + + \ No newline at end of file diff --git a/opnfv-creds.sh b/opnfv-creds.sh index 9266c19a..54d5aa3a 100644 --- a/opnfv-creds.sh +++ b/opnfv-creds.sh @@ -4,7 +4,7 @@ export OS_NO_CACHE='true' export OS_TENANT_NAME='admin' export OS_USERNAME='admin' export OS_PASSWORD='admin' -export OS_AUTH_URL='http://172.18.0.69:5000/v2.0' +export OS_AUTH_URL='http://172.18.1.5:5000/v2.0' export OS_AUTH_STRATEGY='keystone' export OS_REGION_NAME='RegionOne' export CINDER_ENDPOINT_TYPE='internalURL' diff --git a/test_cases/default/network/iperf_vm.yaml b/test_cases/default/network/iperf_vm.yaml index d1cda0b4..49bf13ad 100644 --- a/test_cases/default/network/iperf_vm.yaml +++ b/test_cases/default/network/iperf_vm.yaml @@ -14,14 +14,14 @@ Context: Virtual_Machines: virtualmachine_1: - availability_zone: compute4 + availability_zone: compute1 OS_image: QTIP_CentOS public_network: 'net04_ext' role: 1-server flavor: m1.large virtualmachine_2: - availability_zone: compute4 + availability_zone: compute1 OS_image: QTIP_CentOS public_network: 'net04_ext' role: 2-host diff --git a/test_cases/dell-us-deploying-bm3/network/iperf_bm.yaml b/test_cases/dell-us-deploying-bm3/network/iperf_bm.yaml index 4fd3d7f5..3d2862bf 100644 --- a/test_cases/dell-us-deploying-bm3/network/iperf_bm.yaml +++ b/test_cases/dell-us-deploying-bm3/network/iperf_bm.yaml @@ -15,7 +15,7 @@ Context: pw: role: 1-server machine_2: - ip: 10.20.0.6 + ip: 10.20.0.4 pw: role: 2-host -- cgit 1.2.3-korg