summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authoragardner <agardner@linuxfoundation.org>2017-04-27 11:46:29 +0200
committeragardner <agardner@linuxfoundation.org>2017-04-27 11:46:29 +0200
commitb127369752dd4f6a1b038c14c45f90df54a505fd (patch)
tree40de2aa0e25e5fac3682420271f0952308426fb7
parent1297f44c1bb0e9b0e2b9620b73190bec00604c24 (diff)
Adding all releng octopus and pharos docs.
Please argue and remove as needed in the review. Change-Id: Ia376d8be14c56f6a2fae3cd753ea53b869e5f784 Signed-off-by: agardner <agardner@linuxfoundation.org>
-rw-r--r--docs/configguide/configguide.rst40
-rw-r--r--docs/configguide/index.rst30
-rw-r--r--docs/configguide/jumpserverinstall.rst102
-rw-r--r--docs/configguide/lab_update_guide.rst84
-rw-r--r--docs/how-to-use-docs/documentation-example.rst86
-rw-r--r--docs/how-to-use-docs/index.rst26
-rw-r--r--docs/images/ZTE-POD.jpgbin0 -> 369826 bytes
-rw-r--r--docs/images/bridge1.pngbin0 -> 78415 bytes
-rw-r--r--docs/images/bridge2.pngbin0 -> 77276 bytes
-rw-r--r--docs/images/opnfv-example-lab-diagram.pngbin0 -> 70804 bytes
-rw-r--r--docs/images/opnfv-pharos-diagram-v01.jpgbin0 -> 63461 bytes
-rw-r--r--docs/images/opnfv-test.jpgbin0 -> 175535 bytes
-rw-r--r--docs/images/pharos-archi1.jpgbin0 -> 104922 bytes
-rwxr-xr-xdocs/index.rst28
-rw-r--r--docs/information/index.rst14
-rw-r--r--docs/information/pharos.rst120
-rw-r--r--docs/jenkins-job-builder/index.rst9
-rw-r--r--docs/jenkins-job-builder/opnfv-jjb-usage.rst93
-rw-r--r--docs/lab-description/images/lab_topology_example.jpgbin0 -> 54609 bytes
-rw-r--r--docs/lab-description/images/pod_topology_example.pngbin0 -> 45640 bytes
-rw-r--r--docs/lab-description/index.rst27
-rw-r--r--docs/lab-description/inventory.rst11
-rw-r--r--docs/lab-description/lab_description.rst63
-rw-r--r--docs/lab-description/pod_description.rst99
-rw-r--r--docs/labs/Dell.rst859
-rw-r--r--docs/labs/ericsson/images/ericsson-opnfv-topology.pngbin0 -> 1011133 bytes
-rw-r--r--docs/labs/ericsson/index.rst15
-rw-r--r--docs/labs/ericsson/lab_description.rst148
-rw-r--r--docs/labs/huawei-us-lab/huawei-lab-pod.pngbin0 -> 264448 bytes
-rw-r--r--docs/labs/huawei-us-lab/huawei-lab-topology.pngbin0 -> 37079 bytes
-rw-r--r--docs/labs/huawei-us-lab/huawei-lab-virtual.pngbin0 -> 646320 bytes
-rw-r--r--docs/labs/huawei-us-lab/index.rst16
-rw-r--r--docs/labs/huawei-us-lab/lab_specification.rst100
-rw-r--r--docs/labs/huawei-us-lab/net.pngbin0 -> 95297 bytes
-rw-r--r--docs/labs/huawei-us-lab/pod_specification.rst124
-rw-r--r--docs/labs/images/Dell_Overview.jpgbin0 -> 45771 bytes
-rw-r--r--docs/labs/images/Dell_POD1.jpgbin0 -> 61229 bytes
-rw-r--r--docs/labs/images/Dell_POD2.jpgbin0 -> 61182 bytes
-rw-r--r--docs/labs/images/Dell_POD3.jpgbin0 -> 61950 bytes
-rw-r--r--docs/labs/images/orange_paris_pod1.jpgbin0 -> 353262 bytes
-rw-r--r--docs/labs/images/orange_pod2.pngbin0 -> 83662 bytes
-rw-r--r--docs/labs/images/spirent_vptc-public-drawing.pngbin0 -> 80627 bytes
-rwxr-xr-xdocs/labs/index.rst21
-rw-r--r--docs/labs/ool/images/ool-testlab.pngbin0 -> 763367 bytes
-rw-r--r--docs/labs/ool/index.rst16
-rw-r--r--docs/labs/ool/inventory.rst11
-rw-r--r--docs/labs/ool/lab_description.rst72
-rw-r--r--docs/labs/ool/pod1_description.rst185
-rw-r--r--docs/labs/ool/pod1_inventory.yaml37
-rw-r--r--docs/labs/ool/virtual1_description.rst102
-rw-r--r--docs/labs/orange-lannion-lab/index.rst216
-rw-r--r--docs/labs/orange-paris-lab/index.rst15
-rw-r--r--docs/labs/orange-paris-lab/orange_paris_lab_description.rst75
-rw-r--r--docs/labs/orange-paris-lab/orange_paris_pod1_description.rst129
-rw-r--r--docs/labs/spirent.rst44
-rw-r--r--docs/labs/zte-sh-lab/images/zte_sh_lab_topology.pngbin0 -> 223339 bytes
-rw-r--r--docs/labs/zte-sh-lab/images/zte_sh_pod_topology.pngbin0 -> 125873 bytes
-rw-r--r--docs/labs/zte-sh-lab/index.rst18
-rw-r--r--docs/labs/zte-sh-lab/lab_description.rst127
-rw-r--r--docs/labs/zte-sh-lab/pod-1.rst160
-rw-r--r--docs/labs/zte-sh-lab/pod-1.yaml55
-rw-r--r--docs/labs/zte-sh-lab/pod-2.rst162
-rw-r--r--docs/labs/zte-sh-lab/pod-2.yaml55
-rw-r--r--docs/labs/zte-sh-lab/pod-3.rst163
-rw-r--r--docs/labs/zte-sh-lab/pod-3.yaml56
-rw-r--r--docs/octopus_docs/images/ci_infra.pngbin0 -> 123943 bytes
-rw-r--r--docs/octopus_docs/images/daily_job.pngbin0 -> 11968 bytes
-rw-r--r--docs/octopus_docs/images/merge_job.pngbin0 -> 28138 bytes
-rw-r--r--docs/octopus_docs/images/pipeline_overview.pngbin0 -> 26210 bytes
-rw-r--r--docs/octopus_docs/images/stability_screenshot10.pngbin0 -> 58979 bytes
-rw-r--r--docs/octopus_docs/images/stability_screenshot11.pngbin0 -> 60369 bytes
-rw-r--r--docs/octopus_docs/images/stability_screenshot9.pngbin0 -> 49489 bytes
-rw-r--r--docs/octopus_docs/images/verify_job.pngbin0 -> 26956 bytes
-rw-r--r--docs/octopus_docs/index.rst22
-rw-r--r--docs/octopus_docs/octopus_info.rst38
-rw-r--r--docs/octopus_docs/opnfv-artifact-repository.rst193
-rw-r--r--docs/octopus_docs/opnfv-ci-infrastructure.rst85
-rw-r--r--docs/octopus_docs/opnfv-ci-pipelines.rst109
-rw-r--r--docs/octopus_docs/opnfv-jenkins-slave-connection.rst181
-rw-r--r--docs/octopus_docs/opnfv-jjb-usage.rst179
-rw-r--r--docs/octopus_docs/opnfv-stablebranch.rst187
-rw-r--r--docs/platformoverview/labinfrastructure.rst21
-rw-r--r--docs/scenario-lifecycle/From OS-BASIC to NOSDN-FDIO.pngbin0 -> 51232 bytes
-rw-r--r--docs/scenario-lifecycle/From OS-BASIC to NOSDN-OVS.pngbin0 -> 50055 bytes
-rw-r--r--docs/scenario-lifecycle/ODL Generic Scenarios Evolution.pngbin0 -> 95014 bytes
-rw-r--r--docs/scenario-lifecycle/creating-scenarios.rst96
-rw-r--r--docs/scenario-lifecycle/current-status.rst50
-rw-r--r--docs/scenario-lifecycle/deployment-options.rst128
-rw-r--r--docs/scenario-lifecycle/feature-compatibility-nosdn.pngbin0 -> 24694 bytes
-rw-r--r--docs/scenario-lifecycle/feature-compatibility-odl.pngbin0 -> 32124 bytes
-rw-r--r--docs/scenario-lifecycle/generic-scenarios.rst53
-rw-r--r--docs/scenario-lifecycle/index.rst24
-rw-r--r--docs/scenario-lifecycle/mano-scenarios.rst31
-rw-r--r--docs/scenario-lifecycle/parent-child-relations.rst62
-rw-r--r--docs/scenario-lifecycle/parent-child.pngbin0 -> 26454 bytes
-rw-r--r--docs/scenario-lifecycle/scenario-descriptor-files.rst228
-rw-r--r--docs/scenario-lifecycle/scenario-overview.rst97
-rw-r--r--docs/scenario-lifecycle/scenario-tree+idea.pngbin0 -> 128763 bytes
-rw-r--r--docs/scenario-lifecycle/scenario-tree.pngbin0 -> 81067 bytes
-rw-r--r--docs/scenario-lifecycle/sibling.pngbin0 -> 32538 bytes
-rw-r--r--docs/scenario-lifecycle/specific-scenarios.rst34
-rw-r--r--docs/specification/hardwarespec.rst52
-rw-r--r--docs/specification/index.rst18
-rw-r--r--docs/specification/networkconfig.rst62
-rw-r--r--docs/specification/objectives.rst29
-rw-r--r--docs/specification/remoteaccess.rst63
106 files changed, 5855 insertions, 0 deletions
diff --git a/docs/configguide/configguide.rst b/docs/configguide/configguide.rst
new file mode 100644
index 0000000..2946677
--- /dev/null
+++ b/docs/configguide/configguide.rst
@@ -0,0 +1,40 @@
+.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+.. http://creativecommons.org/licenses/by/4.0
+.. (c) 2016 OPNFV.
+
+***************
+Lab Setup Guide
+***************
+
+Provides an overview for setting up a Pharos lab. A full set of
+:ref:`pharos_master` documents are maintained in the *pharos* repo.
+
+When setting up an OPNFV community lab ...
+
+* Provide the Pharos community with details of the intended setup, including ...
+
+ * Overview of resources are being offered to the community, intended purpose and known limitations
+ * Lab owner name with contacts
+ * Timelines for availablity for development, test, release production, ...
+
+* Update the Pharos Wiki with lab details
+
+ * Lab map, organization, contacts, status, location, resources, role, etc.
+ * `Community labs <https://wiki.opnfv.org/display/pharos#PharosHome-Overview>`_
+ * :ref:`pharos_wiki`
+
+* Update the Pharos project information file "Current Labs"
+
+ * :ref:`pharos_information`
+
+* Create new Wiki pages for lab and POD specific information
+
+ * Access procedures
+ * Usage guidelines for developers
+ * Update infomtation as PODs are re-assigned or usage/availability changes
+
+* Fill Lab and POD templates ... :ref:`pharos_lab` ... :ref:`pharos_pod`
+
+ * Note that security sensitive lab information should be stored in the secure Pharos repo
+
+* Connect PODs to Jenkins/CI
diff --git a/docs/configguide/index.rst b/docs/configguide/index.rst
new file mode 100644
index 0000000..c51f029
--- /dev/null
+++ b/docs/configguide/index.rst
@@ -0,0 +1,30 @@
+.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+.. http://creativecommons.org/licenses/by/4.0
+.. (c) 2016 OPNFV.
+
+.. Top level of Pharos configuration documents.
+
+********************
+Pharos Configuration
+********************
+
+OPNFV development, test and production activities rely on Pharos resources and support from the
+Pharos community. Lab owners and Pharos project committers/contributors will evolve the vision for
+Pharos as well as expand lab capabilities that are needed to help OPNFV be highly successful.
+
+Pharos configuration documents provide information on how to setup hardware and networks in a
+Pharos compliant lab. Jira is used to track Pharos activities including lab operations. Lab
+resources can be used for and declared as either *Development (bare-metal or virtual)* or
+*Production/CI (bare-metal or virtual)*. If a resource is used for and declared as *Development*
+resource, it can not be used for and declared as *Production/CI* resource at the same time and vice
+versa. Changing the resource declation must be brought in to Infra WG. Production/CI PODs are
+required to be connected to OPNFV Jenkins and available on a 24/7 basis other than scheduled
+maintenance and troubleshooting. Jenkins slave status can be seen on `Jenkins dashboard
+https://build.opnfv.org/ci/computer/`.
+
+.. toctree::
+ :maxdepth: 2
+
+ ./configguide.rst
+ ./lab_update_guide.rst
+ ./jumpserverinstall.rst
diff --git a/docs/configguide/jumpserverinstall.rst b/docs/configguide/jumpserverinstall.rst
new file mode 100644
index 0000000..e51e946
--- /dev/null
+++ b/docs/configguide/jumpserverinstall.rst
@@ -0,0 +1,102 @@
+.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+.. http://creativecommons.org/licenses/by/4.0
+.. (c) 2016 OPNFV.
+
+
+Jump Server Configuration
+-------------------------
+
+Jump server install procedures are maintained by each installer project. Addional Jump server
+configuraton BKMs will be maintained here. The below install information was used for Fuel however
+may be outdated (please refer to Fuel Installer documents).
+
+**Procedure**
+
+1. Obtain CentOS 7 Minimal ISO and install
+
+ ``wget http://mirrors.kernel.org/centos/7/isos/x86_64/CentOS-7-x86_64-Minimal-1503-01.iso``
+
+2. Set parameters appropriate for your environment during installation
+
+3. Disable NetworkManager
+
+ ``systemctl disable NetworkManager``
+
+4. Configure your /etc/sysconfig/network-scripts/ifcfg-* files for your network
+
+5. Restart networking
+
+ ``service network restart``
+
+6. Edit /etc/resolv.conf and add a nameserver, for example 8.8.8.8
+
+ ``echo nameserver 8.8.8.8 >> /etc/resolv.conf``
+
+7. Install libvirt & kvm
+
+ ``yum -y update``
+ ``yum -y install kvm qemu-kvm libvirt``
+ ``systemctl enable libvirtd``
+
+8. Reboot:
+
+ ``shutdown -r now``
+
+9. Configure SSHD
+
+ If you wish to avoid annoying delay when use ssh to log in, disable DNS lookups:
+
+ When **UseDNS** is existed in the config file, update it:
+
+ ``sed -i -e 's/^#*UseDNS\ \+yes/UseDNS no/' /etc/ssh/sshd_config``
+
+ or append the setting when not existed:
+
+ ``echo UseDNS no >> /etc/ssh/ssd_config``
+
+ Disable Password Authenticaion for security:
+
+ ``sed -i -e 's/^#PasswordAuthentication\ \+yes/PasswordAuthentication no/' /etc/ssh/sshd_config``
+
+ If you want to disable IPv6 connections, comment IPv6 ListenAddress and change AddressFamily to inet:
+
+ ``sed -i -e 's/^ListenAddress\ \+::/#ListenAddress ::/' /etc/ssh/sshd_config``
+ ``sed -i -e 's/^AddressFamily\ \+any/AddressFamily inet/' /etc/ssh/sshd_config``
+
+10. Restart sshd
+
+ ``systemctl restart sshd``
+
+11. Install virt-install
+
+ ``yum -y install virt-install``
+
+12. Visit artifacts.opnfv.org and D/L the OPNFV Fuel ISO
+
+13. Create a bridge using the interface on the PXE network, for example: br0
+
+ ``brctl addbr br0``
+
+14. Make a directory owned by qemu:
+
+ ``mkdir /home/qemu; mkdir -p /home/qemu/VMs/fuel-6.0/disk``
+
+ ``chown -R qemu:qemu /home/qemu``
+
+15. Copy the ISO to /home/qemu
+
+ ``cd /home/qemu``
+
+ ``virt-install -n opnfv-2015-05-22_18-34-07-fuel -r 4096 --vcpus=4
+ --cpuset=0-3 -c opnfv-2015-05-22_18-34-07.iso --os-type=linux
+ --os-variant=rhel6 --boot hd,cdrom --disk
+ path=/home/qemu/VMs/mirantis-fuel-6.0/disk/fuel-vhd0.qcow2,bus=virtio,size=50,format=qcow2
+ -w bridge=br0,model=virtio --graphics vnc,listen=0.0.0.0``
+
+16. Temporarily flush the firewall rules to make things easier:
+
+ ``iptables -F``
+
+17. Connect to the console of the installing VM with your favorite VNC client.
+
+18. Change the IP settings to match the pod, use an IP in the PXE/Admin network for the Fuel Master
diff --git a/docs/configguide/lab_update_guide.rst b/docs/configguide/lab_update_guide.rst
new file mode 100644
index 0000000..c939164
--- /dev/null
+++ b/docs/configguide/lab_update_guide.rst
@@ -0,0 +1,84 @@
+.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+.. http://creativecommons.org/licenses/by/4.0
+.. (c) 2016 OPNFV.
+
+
+.. _pharos_wiki:
+
+*************************
+Updating Pharos Documents
+*************************
+
+Details about each Community Lab is found in 3 places:
+
+ - Summary of lab including location, contacts, status, etc.
+ on the `Pharos Project Wiki page <https://wiki.opnfv.org/display/pharos>`_
+ - Lab specific details are provided with dedicated Wiki pages,
+ see this `Example Lab <https://wiki.opnfv.org/display/pharos/Intel+Hosting>`_
+ - Pharos repo docs ...
+
+ - docs/information/pharos.rst ... project information file
+ - docs/labs/ ... Lab documents (includes lab specific capabilities, usages and policies;
+ POD information)
+ - docs/labs/images/ ... Lab and POD toplogies
+
+Update Pharos repo
+------------------
+
+Clone the Pharos Git repository
+
+ * Make the changes to Pharos project information file (docs/information/pharos.rst)
+ * After code gets merged http://artifacts.opnfv.org/pharos/docs/information/pharos.html will
+ contain your change
+
+
+Update Pharos Wiki
+------------------
+
+Edit Wiki page
+
+ * https://wiki.opnfv.org/pharos
+ * Look for {{scrape>http://artifacts.opnfv.org/pharos/docs/information/pharos.html}}
+ * Click "Preview" and see if your change is shown; if shown add a short "Edit summary" and click
+ "Save" (Wiki does not auto update content)
+
+You will see a section of code as shown below. Add your page to the bullet list with wiki link, nice
+name, and location summary
+
+Update the map info on the Pharos Project Page https://wiki.opnfv.org/pharos?&#community_labs
+
+ * You will see a section of code as shown below. Add your lab infomation to the list with a comma
+ separated list as follows:
+
+ * longitude
+ * latitude
+ * .8 <- for size
+ * marker color png ([[marker-green.png|marker-green.png]], [[marker-blue.png|marker-blue.png]],
+ [[marker-red.png|marker-red.png]], [[marker-gold.png|marker-gold.png]])
+ * Nice Format Lab Name
+ * '';''
+ * Location Summary
+ * ''\\'' <-- for a new line
+ * external link: <-- optional
+
+.. MAP Code Example (see Wiki page for current version)::
+
+MAP::
+
+ <olmap id="olMapOne" width="877px" height="200px" lat="45.0" lon="0.0" zoom="3" statusbar="1" toolbar="1" controls="1"
+ poihoverstyle="0" baselyr="OpenStreetMap" gpxfile="" kmlfile="">
+ 45.52,-122.67,60,.8,marker-red.png,Linux Foundation;Portland, Oregon \\ external link: [[http://www.test.com|test.com]]
+ 39.7392,-104.9902,60,.8,marker-red.png,Cable Labs;Denver, CA \\ external link: [[http://www.test.com|test.com]]
+ 37.333685,-121.891272,60,.6,marker-green.png,[[pharos/spirentvctlab|Spirent VCT Lab]] \\ San Jose, California
+ 39.90,116.35,60,.8,marker-red.png,China Mobile Labs;Beijing, China \\ external link: [[http://www.test.com|test.com]]
+ 37.413137,-121.977975,-180,.6,marker-red.png,Dell Labs;Santa Clara, California \\ link: [[https://wiki.opnfv.org/dell_hosting]]
+ 59.41,17.95,60,.8,marker-red.png,Enea Pharos Lab;Kista, Sweden \\ external link: [[http://www.enea.com/pharos-lab|ENEA pharos lab]]
+ 45.50,-73.66,60,.8,marker-blue.png,Ericsson Labs;Montreal, Canada \\ external link: [[http://www.test.com|test.com]]
+ 34.26,108.97,60,.8,marker-green.png, Huawei Labs;Xi an, China \\ external link: [[http://www.test.com|test.com]]
+ 37.373424,-121.964913,60,.8,marker-green.png, Huawei Labs;Santa Clara, USA \\ external link: [[http://www.test.com|test.com]]
+ 45.53,-122.97,60,.8,marker-green.png,Intel Labs;Hillsboro, Oregon \\ link: [[https://wiki.opnfv.org/get_started/intel_hosting|intel_hosting]]
+ 48.75867,-3.45196,60,.8,marker-gold.png,Orange Labs;Lannion, France \\ external link: [[http://www.test.com|test.com]]
+ 48.825786,2.274797,-60,.8,marker-gold.png,Orange Labs;Paris, France \\ external link: [[http://www.test.com|test.com]]
+ 31.97,118.79,60,.8,marker-red.png,ZTE Labs;Nan Jing, China \\ link:[[zte-nj-testlab|ZTE, Nan Jing]]
+ [[http://test.com|test.com]] \\ internal link: [[::start]]\\ **DW Formatting**
+ </olmap>
diff --git a/docs/how-to-use-docs/documentation-example.rst b/docs/how-to-use-docs/documentation-example.rst
new file mode 100644
index 0000000..ae0a7f2
--- /dev/null
+++ b/docs/how-to-use-docs/documentation-example.rst
@@ -0,0 +1,86 @@
+.. two dots create a comment. please leave this logo at the top of each of your rst files.
+.. image:: ../etc/opnfv-logo.png
+ :height: 40
+ :width: 200
+ :alt: OPNFV
+ :align: left
+
+.. these two pipes are to seperate the logo from the first title
+
+|
+|
+
+How to create documentation for your OPNFV project
+==================================================
+
+this is the directory structure of the docs/ directory that can be found in the root of your project
+directory
+
+.. code-block:: bash
+
+ ./etc
+ ./etc/opnfv-logo.png
+ ./etc/conf.py
+ ./how-to-use-docs
+ ./how-to-use-docs/documentation-example.rst
+ ./how-to-use-docs/index.rst
+
+To create your own documentation, Create any number of directories (depending on your need) and
+place in each of them an index.rst. This index file must refence your other rst files.
+
+* Here is an example index.rst
+
+.. code-block:: bash
+
+ Example Documentation table of contents
+ =======================================
+
+ Contents:
+
+ .. toctree::
+ :numbered:
+ :maxdepth: 4
+
+ documentation-example.rst
+
+ Indices and tables
+ ==================
+
+ * :ref:`search`
+
+
+The Sphinx Build
+================
+
+When you push documentation changes to gerrit a jenkins job will create html documentation.
+
+* Verify Jobs
+
+For verify jobs a link to the documentation will show up as a comment in gerrit for you to see the
+result.
+
+* Merge jobs
+
+Once you are happy with the look of your documentation you can submit the patchset the merge job
+will copy the output of each documentation directory to
+http://artifacts.opnfv.org/$project/docs/$name_of_your_folder/index.html
+
+Here are some quick examples of how to use rst markup
+
+This is a headline::
+
+ here is some code, note that it is indented
+
+links are easy to add: Here is a link to sphinx, the tool that we are using to generate documetation
+http://sphinx-doc.org/
+
+* Bulleted Items
+
+ **this will be bold**
+
+.. code-block:: bash
+
+ echo "Heres is a code block with bash syntax highlighting"
+
+
+Leave these at the bottom of each of your documents they are used internally
diff --git a/docs/how-to-use-docs/index.rst b/docs/how-to-use-docs/index.rst
new file mode 100644
index 0000000..2fea43e
--- /dev/null
+++ b/docs/how-to-use-docs/index.rst
@@ -0,0 +1,26 @@
+.. OPNFV Release Engineering documentation, created by
+ sphinx-quickstart on Tue Jun 9 19:12:31 2015.
+ You can adapt this file completely to your liking, but it should at least
+ contain the root `toctree` directive.
+
+.. image:: ../etc/opnfv-logo.png
+ :height: 40
+ :width: 200
+ :alt: OPNFV
+ :align: left
+
+Example Documentation table of contents
+=======================================
+
+Contents:
+
+.. toctree::
+ :numbered:
+ :maxdepth: 4
+
+ documentation-example.rst
+
+Indices and tables
+==================
+
+* :ref:`search`
diff --git a/docs/images/ZTE-POD.jpg b/docs/images/ZTE-POD.jpg
new file mode 100644
index 0000000..23907f9
--- /dev/null
+++ b/docs/images/ZTE-POD.jpg
Binary files differ
diff --git a/docs/images/bridge1.png b/docs/images/bridge1.png
new file mode 100644
index 0000000..9a557ae
--- /dev/null
+++ b/docs/images/bridge1.png
Binary files differ
diff --git a/docs/images/bridge2.png b/docs/images/bridge2.png
new file mode 100644
index 0000000..7d8c383
--- /dev/null
+++ b/docs/images/bridge2.png
Binary files differ
diff --git a/docs/images/opnfv-example-lab-diagram.png b/docs/images/opnfv-example-lab-diagram.png
new file mode 100644
index 0000000..5a3901c
--- /dev/null
+++ b/docs/images/opnfv-example-lab-diagram.png
Binary files differ
diff --git a/docs/images/opnfv-pharos-diagram-v01.jpg b/docs/images/opnfv-pharos-diagram-v01.jpg
new file mode 100644
index 0000000..b688655
--- /dev/null
+++ b/docs/images/opnfv-pharos-diagram-v01.jpg
Binary files differ
diff --git a/docs/images/opnfv-test.jpg b/docs/images/opnfv-test.jpg
new file mode 100644
index 0000000..54a8110
--- /dev/null
+++ b/docs/images/opnfv-test.jpg
Binary files differ
diff --git a/docs/images/pharos-archi1.jpg b/docs/images/pharos-archi1.jpg
new file mode 100644
index 0000000..3c4478a
--- /dev/null
+++ b/docs/images/pharos-archi1.jpg
Binary files differ
diff --git a/docs/index.rst b/docs/index.rst
new file mode 100755
index 0000000..94844fa
--- /dev/null
+++ b/docs/index.rst
@@ -0,0 +1,28 @@
+.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+.. http://creativecommons.org/licenses/by/4.0
+.. (c) 2016 OPNFV.
+
+.. OPNFV Pharos Documentation master file.
+
+.. _pharos_master:
+
+======
+PHAROS
+======
+
+OPNFV Community Lab Infrastructure
+==================================
+
+.. toctree::
+ :maxdepth: 3
+ :numbered: 3
+
+ ./information/index.rst
+ ./specification/index.rst
+ ./lab-description/index.rst
+ ./configguide/index.rst
+ ./labs/index.rst
+
+Indices
+=======
+* :ref:`search`
diff --git a/docs/information/index.rst b/docs/information/index.rst
new file mode 100644
index 0000000..0311526
--- /dev/null
+++ b/docs/information/index.rst
@@ -0,0 +1,14 @@
+.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+.. http://creativecommons.org/licenses/by/4.0
+.. (c) 2016 OPNFV.
+
+
+**************************
+Pharos Project Information
+**************************
+
+
+.. toctree::
+ :maxdepth: 2
+
+ ./pharos.rst
diff --git a/docs/information/pharos.rst b/docs/information/pharos.rst
new file mode 100644
index 0000000..1679e38
--- /dev/null
+++ b/docs/information/pharos.rst
@@ -0,0 +1,120 @@
+.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+.. http://creativecommons.org/licenses/by/4.0
+.. (c) 2016 OPNFV.
+
+
+Introduction
+------------
+
+The `Pharos Project <https://www.opnfv.org/developers/pharos>`_ deals with developing an OPNFV lab
+infrastructure that is geographically and technically diverse. This will greatly assist in
+developing a highly robust and stable OPNFV platform. Community labs are hosted by individual
+companies and there is also an OPNFV lab hosted by the Linux Foundation that has controlled access
+for key development and production activities. The **Pharos Specification** defines a "compliant"
+deployment and test environment. Pharos is responsible for defining lab capabilities, developing
+management/usage policies and process; and a support plan for reliable access to project and release
+resources. Community labs are provided as a service by companies and are not controlled by Pharos
+however our goal is to provide easy visibility of all lab capabilities and their usage at all-times.
+
+A requirement of Pharos labs is to provide *bare-metal* for development, deployment and testing.
+This is resource intensive from a hardware and support perspective while providing remote access can
+also be very challenging due to corporate IT policies. Achieving a consistent *look and feel* of a
+federated lab infrastructure continues to be an objective. Virtual environments are also useful and
+provided by some labs. Jira is currently used for tracking lab operational issues as well as for
+Pharos project activities.
+
+Future lab capabilities are currently focused on:
+
+1) Automatic resource provisioning
+2) Dashboards (for capability and usage)
+3) *Virtual Labs* for developer on-boarding
+
+Project Communication
+---------------------
+
+* `Pharos page <https://www.opnfv.org/developers/pharos>`_
+* `Pharos project Wiki <https://wiki.opnfv.org/display/pharos>`_
+* `Pharos Planning <https://wiki.opnfv.org/display/pharos/Pharos+Colorado+Plan>`_
+* `Pharos Jira <https://jira.opnfv.org/projects/PHAROS/summary>`_
+* `Bi-weekly Pharos meeting <https://wiki.opnfv.org/display/pharos/Pharos+Meetings>`_
+* `Weekly INFRA WG meeting <https://wiki.opnfv.org/display/INF/Infra+Working+Group>`_
+* `Weekly coordination meeting for Test related projects <https://wiki.opnfv.org/meetings/test>`_
+* `IRC: freenode.net #opnfv-pharos <http://webchat.freenode.net/?channels=opnfv-pharos>`_
+* Mailing List: use opnfv-tech-discuss and tag your emails with [Pharos] in the subject for filtering
+
+Project Release Artifacts
+-------------------------
+
+* `Project Repository <https://gerrit.opnfv.org/gerrit/gitweb?p=pharos.git>`_
+* `Continuous Integration <https://build.opnfv.org/ci/view/pharos/>`_
+* `Documentation <http://artifacts.opnfv.org/pharos/docs/>`_
+
+Pharos Lab Process
+------------------
+
+* Process for requesting lab access and support https://wiki.opnfv.org/display/pharos/Pharos+Rls+B+Support
+* Pharos Lab Governance and Policies https://wiki.opnfv.org/display/pharos/Pharos+Policies
+* Status of Community labs https://wiki.opnfv.org/display/pharos/#PharosHome-Overview
+
+Current Labs
+------------
+
+An interactive map of OPNFV lab locations, lab owners and other lab information is maintained on the
+`Pharos Wiki <https://wiki.opnfv.org/pharos#community_labs>`_
+
++----+---------------+----------------------------------------------------------------------------+-------------------+
+| # | **Hosted by** | **Home page** | **Location** |
+| | | | |
++----+---------------+----------------------------------------------------------------------------+-------------------+
+| 1 | Linux | https://wiki.opnfv.org/display/pharos/Lflab+Hosting | Portland, Oregon |
+| | Foundation | | |
++----+---------------+----------------------------------------------------------------------------+-------------------+
+| 2 | Spirent | https://wiki.opnfv.org/display/pharos/Spirentvctlab | Nephoscale, CA |
+| | | | |
++----+---------------+----------------------------------------------------------------------------+-------------------+
+| 3 | China Mobile | https://wiki.opnfv.org/display/pharos/Lab2+Chinamobile+Hosting | Beijing, China |
+| | | | |
++----+---------------+----------------------------------------------------------------------------+-------------------+
+| 4 | Dell | https://wiki.opnfv.org/display/pharos/Dell+Hosting | Santa Clara, CA |
+| | | | |
++----+---------------+----------------------------------------------------------------------------+-------------------+
+| 5 | Enea | https://wiki.opnfv.org/display/pharos/Enea-pharos-lab | Kista, Sweden |
+| | | | |
++----+---------------+----------------------------------------------------------------------------+-------------------+
+| 6 | Ericsson | https://wiki.opnfv.org/display/pharos/Ericsson+Hosting+and+Request+Process | Montreal, Canada |
+| | | | |
++----+---------------+----------------------------------------------------------------------------+-------------------+
+| 7 | Huawei | https://wiki.opnfv.org/display/pharos/Lab4+Huawei | Xi an, China |
+| | | | |
++----+---------------+----------------------------------------------------------------------------+-------------------+
+| 8 | Huawei | https://wiki.opnfv.org/display/pharos/Huawei+Sc+Hosting | Santa Clara, CA |
+| | | | |
++----+---------------+----------------------------------------------------------------------------+-------------------+
+| 9 | Intel | https://wiki.opnfv.org/display/pharos/Intel+Hosting | Hillsboro, Oregon |
+| | | | |
++----+---------------+----------------------------------------------------------------------------+-------------------+
+| 10 | Orange | https://wiki.opnfv.org/display/pharos/Opnfv-orange | Lannion, France |
+| | | | |
++----+---------------+----------------------------------------------------------------------------+-------------------+
+| 11 | Orange | https://wiki.opnfv.org/display/pharos/Opnfv-orange | Paris, France |
+| | | | |
++----+---------------+----------------------------------------------------------------------------+-------------------+
+| 12 | ZTE | https://wiki.opnfv.org/display/pharos/ZTE+SH+Testlab | Shanghai, China |
+| | | | |
++----+---------------+----------------------------------------------------------------------------+-------------------+
+| 13 | Okinawa | https://wiki.opnfv.org/display/pharos/OOL+TestLab | Okinawa |
+| | Open Lab | | |
++----+---------------+----------------------------------------------------------------------------+-------------------+
+
+
+
+Pharos project Key Facts
+------------------------
+
+**Key Project Facts are maintained in the Pharos INFO file in the project repo**
+
+ * Can be viewed on the project
+ `wiki INFO <https://wiki.opnfv.org/pharos?&#pharos_project_-_key_facts>`_
+ * Project key facts in
+ `repo INFO <https://gerrit.opnfv.org/gerrit/gitweb?p=pharos.git;f=INFO;hb=refs/heads/master>`_
+
diff --git a/docs/jenkins-job-builder/index.rst b/docs/jenkins-job-builder/index.rst
new file mode 100644
index 0000000..4d23ade
--- /dev/null
+++ b/docs/jenkins-job-builder/index.rst
@@ -0,0 +1,9 @@
+===========================
+Release Engineering Project
+===========================
+
+.. toctree::
+ :numbered:
+ :maxdepth: 2
+
+ opnfv-jjb-usage
diff --git a/docs/jenkins-job-builder/opnfv-jjb-usage.rst b/docs/jenkins-job-builder/opnfv-jjb-usage.rst
new file mode 100644
index 0000000..f34833f
--- /dev/null
+++ b/docs/jenkins-job-builder/opnfv-jjb-usage.rst
@@ -0,0 +1,93 @@
+===========================================
+Creating/Configuring/Verifying Jenkins Jobs
+===========================================
+
+Clone and setup the repo::
+
+ git clone ssh://YOU@gerrit.opnfv.org:29418/releng
+ cd releng
+ git review -s
+
+Make changes::
+
+ git commit -sv
+ git review
+ remote: Resolving deltas: 100% (3/3)
+ remote: Processing changes: new: 1, refs: 1, done
+ remote:
+ remote: New Changes:
+ remote: https://gerrit.opnfv.org/gerrit/51
+ remote:
+ To ssh://agardner@gerrit.opnfv.org:29418/releng.git
+ * [new branch] HEAD -> refs/publish/master
+
+Test with tox::
+
+ tox -v -ejjb
+
+Submit the change to gerrit::
+
+ git review -v
+
+Follow the link to gerrit https://gerrit.opnfv.org/gerrit/51 in a few moments
+the verify job will have completed and you will see Verified +1 jenkins-ci in
+the gerrit ui.
+
+If the changes pass the verify job
+https://build.opnfv.org/ci/view/builder/job/builder-verify-jjb/ ,
+the patch can be submitited by a committer.
+
+Job Types
+
+* Verify Job
+
+ * Trigger: **recheck** or **reverify**
+
+* Merge Job
+
+ * Trigger: **remerge**
+
+* Experimental Job
+
+ * Trigger: **check-experimental**
+
+The verify and merge jobs are retriggerable in Gerrit by simply leaving
+a comment with one of the keywords listed above.
+This is useful in case you need to re-run one of those jobs in case
+if build issues or something changed with the environment.
+
+The experimental jobs are not triggered automatically. You need to leave
+a comment with the keyword list above to trigger it manually. It is useful
+for trying out experimental features.
+
+Note that, experimental jobs `skip vote`_ for verified status, which means
+it will reset the verified status to 0. If you want to keep the verified
+status, use **recheck-experimental** in commit message to trigger both
+verify and experimental jobs.
+
+You can add below persons as reviewers to your patch in order to get it
+reviewed and submitted.
+
+* fatih.degirmenci@ericsson.com
+* agardner@linuxfoundation.org
+* trozet@redhat.com
+* morgan.richomme@orange.com
+* vlaza@cloudbasesolutions.com
+* matthew.lijun@huawei.com
+* meimei@huawei.com
+* jose.lausuch@ericsson.com
+* koffirodrigue@gmail.com
+* r-mibu@cq.jp.nec.com
+* tbramwell@linuxfoundation.org
+
+Or Add the group releng-contributors
+
+Or just email a request for submission to opnfv-helpdesk@rt.linuxfoundation.org
+
+The Current merge and verify jobs for jenkins job builder can be found
+in `releng-jobs.yaml`_.
+
+.. _releng-jobs.yaml:
+ https://gerrit.opnfv.org/gerrit/gitweb?p=releng.git;a=blob;f=jjb/releng-jobs.yaml;
+.. _skip vote:
+ https://wiki.jenkins-ci.org/display/JENKINS/Gerrit+Trigger#GerritTrigger-SkipVote
diff --git a/docs/lab-description/images/lab_topology_example.jpg b/docs/lab-description/images/lab_topology_example.jpg
new file mode 100644
index 0000000..d151e9b
--- /dev/null
+++ b/docs/lab-description/images/lab_topology_example.jpg
Binary files differ
diff --git a/docs/lab-description/images/pod_topology_example.png b/docs/lab-description/images/pod_topology_example.png
new file mode 100644
index 0000000..11d0997
--- /dev/null
+++ b/docs/lab-description/images/pod_topology_example.png
Binary files differ
diff --git a/docs/lab-description/index.rst b/docs/lab-description/index.rst
new file mode 100644
index 0000000..23b675a
--- /dev/null
+++ b/docs/lab-description/index.rst
@@ -0,0 +1,27 @@
+.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+.. http://creativecommons.org/licenses/by/4.0
+.. (c) 2016 OPNFV.
+
+.. Top level of Pharos templates and configuration files
+
+****************************************
+Pharos Templates and Configuration Files
+****************************************
+
+Lab and POD templates are provided to help lab owners document capabilities, configurations and
+network topologies. Compute, network and storage specifications with network topology details are
+required to help developers use lab resources efficiently while minimizing support needs. This also
+greatly assists with troubleshoting. It is the responsibility of the lab owner to keep individual
+lab documents updated and determine appropriate level of detail that is exposed publicly through
+the Wiki or maintained in a secure Pharos repo with controlled access.
+
+The goal of the Pharos Project is automation of resource provisioning. This requires machine
+readable inventory and network configuration files that follow common format.
+
+
+.. toctree::
+ :maxdepth: 2
+
+ ./lab_description.rst
+ ./pod_description.rst
+ ./inventory.rst
diff --git a/docs/lab-description/inventory.rst b/docs/lab-description/inventory.rst
new file mode 100644
index 0000000..81c0dd5
--- /dev/null
+++ b/docs/lab-description/inventory.rst
@@ -0,0 +1,11 @@
+.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+.. http://creativecommons.org/licenses/by/4.0
+.. (c) 2016 OPNFV.
+
+******************
+POD Inventory File
+******************
+
+The Common Inventory File provides a template for machine reabable input into every installer. For
+convenience the YAML file template currently resides in the Genesis Project repo. Refer to the
+following patch for further infomation https://gerrit.opnfv.org/gerrit/#/c/4079.
diff --git a/docs/lab-description/lab_description.rst b/docs/lab-description/lab_description.rst
new file mode 100644
index 0000000..d36e8c6
--- /dev/null
+++ b/docs/lab-description/lab_description.rst
@@ -0,0 +1,63 @@
+.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+.. http://creativecommons.org/licenses/by/4.0
+.. (c) 2016 OPNFV.
+
+.. _pharos_lab:
+
+**************************
+Lab Specification Template
+**************************
+
+Introduction
+------------
+
+Add an summary of what your lab hosts, its focus areas and purpose
+
+
+Lab Resources
+-------------
+
++----------+--------------+-----------------+--------------+-------------+---------+-------+
+| POD Name | Project(s) | Project Lead(s) | Email(s) | POD Role | Status | Notes |
++----------+--------------+-----------------+--------------+-------------+---------+-------+
+| POD1 | Project Name | John Doe | john@abc.com | CI: stable | Active | |
++----------+--------------+-----------------+--------------+-------------+---------+-------+
+
+* **POD Name:** Use consistent naming / numbering to avoid confusion. Hyperlinked to POD description.
+* **POD Role:** CI stable, CI latest, Dev/test, Stand-alone, Virtual, ...
+* **Status:** Assigned, Configuring, Active, Troubleshooting, Available, ...
+
+
+Acceptable Usage Policy
+-----------------------
+
+Define lab user policies and expectations
+
+
+Remote Access Infrastructure
+----------------------------
+
+Describe lab remote access setup (typically VPN, also link speed, any known restrictions, etc.)
+
+
+Remote Access Procedure
+-----------------------
+
+Define lab process for requesting access to the lab (e.g. VPN guide,
+how to modify BIOS settings, etc.)
+
+
+Lab Documentation
+-----------------
+
+List lab specific documents here
+
+
+Lab Topology
+------------
+
+Provide a diagram showing the network topology of lab including lights-out network. Any security
+sensitive details should not be exposed publically. The following diagram is an example only.
+
+.. image:: ./images/lab_topology_example.jpg
+ :alt: Lab diagram not found
diff --git a/docs/lab-description/pod_description.rst b/docs/lab-description/pod_description.rst
new file mode 100644
index 0000000..856d5c2
--- /dev/null
+++ b/docs/lab-description/pod_description.rst
@@ -0,0 +1,99 @@
+.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+.. http://creativecommons.org/licenses/by/4.0
+.. (c) 2016 OPNFV.
+
+.. _pharos_pod:
+
+**************************
+POD Specification Template
+**************************
+
+Introduction
+------------
+
+Add an summary of the POD usage (Project, CI stable, CI latest, dev/test, stand-alone servers, etc.)
+
+
+Additional Requirements
+-----------------------
+
+Describe any addional POD requirements beyond a standard Pharos compliant POD e.g. test equipment,
+shared usage, ...
+
+
+Server Specifications
+---------------------
+
+**Jump Host**
+
++----------+--------+-------+---------------+-----------+--------+-----------+---------------------+-------------------+-------------------+-------+
+| | | | | | | Local | Lights-out network | 1GbE: NIC#/IP | 10GbE: NIC#/IP | |
+| Hostname | Vendor | Model | Serial Number | CPUs | Memory | Storage | (IPMI): IP/MAC, U/P | MAC/VLAN/Network | MAC/VLAN/Network | Notes |
++----------+--------+-------+---------------+-----------+--------+-----------+---------------------+-------------------+-------------------+-------+
+| jump | Dell | R730 | ABCDEF007 | E5-2699x2 | 64 GB | 240GB SSD | 10.10.10.10 | IF0: 10.2.117.36 | IF2: 10.2.12.1 | |
+| | | | | | | 1 TB SATA | 00:1E:67:D4:36:9A | 00:1E:67:4F:B7:B1 | 00:1E:67:4F:B7:B4 | |
+| | | | | | | | root/root | VLAN 984 | VLAN 202 | |
+| | | | | | | | | Public | Private | |
+| | | | | | | | | IF1: 10.2.1.1 | IF3: 10.2.13.1 | |
+| | | | | | | | | 00:1E:67:4F:B7:B2 | 00:1E:67:4F:B7:B5 | |
+| | | | | | | | | VLAN 201 | VLAN 203 | |
+| | | | | | | | | Admin | Storage | |
++----------+--------+-------+---------------+-----------+--------+-----------+---------------------+-------------------+-------------------+-------+
+
+
+**Compute Nodes**
+
++----------+--------+-------+---------------+------+--------+---------+---------------------+------------------+------------------+-------+
+| | | | | | | Local | Lights-out network | 1GbE: NIC#/IP | 10GbE: NIC#/IP | |
+| Hostname | Vendor | Model | Serial Number | CPUs | Memory | Storage | (IPMI): IP/MAC, U/P | MAC/VLAN/Network | MAC/VLAN/Network | Notes |
++----------+--------+-------+---------------+------+--------+---------+---------------------+------------------+------------------+-------+
+| node1 | | | | | | | | | | |
+| | | | | | | | | | | |
+| | | | | | | | | | | |
++----------+--------+-------+---------------+------+--------+---------+---------------------+------------------+------------------+-------+
+| node2 | | | | | | | | | | |
+| | | | | | | | | | | |
+| | | | | | | | | | | |
++----------+--------+-------+---------------+------+--------+---------+---------------------+------------------+------------------+-------+
+| node3 | | | | | | | | | | |
+| | | | | | | | | | | |
+| | | | | | | | | | | |
++----------+--------+-------+---------------+------+--------+---------+---------------------+------------------+------------------+-------+
+| node4 | | | | | | | | | | |
+| | | | | | | | | | | |
+| | | | | | | | | | | |
++----------+--------+-------+---------------+------+--------+---------+---------------------+------------------+------------------+-------+
+| node5 | | | | | | | | | | |
+| | | | | | | | | | | |
+| | | | | | | | | | | |
++----------+--------+-------+---------------+------+--------+---------+---------------------+------------------+------------------+-------+
+
+VPN Users
+---------
+
++----------+--------------+---------+-------------+------------+
+| Name | Email | Project | Role | Notes |
++----------+--------------+---------+-------------+------------+
+| joe user | ju@gmail.com | Pharos | contributer | CI support |
++----------+--------------+---------+-------------+------------+
+
+
+Firewall Rules
+--------------
+
++------------+------------+-------+
+| Port(s) | Service | Notes |
++------------+------------+-------+
+| 22, 43, 80 | Jenkins CI | |
++------------+------------+-------+
+
+
+POD Topology
+------------
+
+Provide a diagram showing the network topology of the POD. Any security sensitive details should not
+be exposed publically and can be stored in the secure Pharos repo. The following diagram is an
+example only.
+
+.. image:: ./images/pod_topology_example.png
+ :alt: POD diagram not found
diff --git a/docs/labs/Dell.rst b/docs/labs/Dell.rst
new file mode 100644
index 0000000..d93eb5d
--- /dev/null
+++ b/docs/labs/Dell.rst
@@ -0,0 +1,859 @@
+Dell OPNFV Testlab
+==================================================
+
+Overview
+------------------
+
+Dell is hosting an OPNFV testlab at its Santa Clara facility. The testlab would host baremetal
+servers for the use of OPNFV community as part of the OPNFV Pharos Project
+
+
+The Dell Testlab consists of 2 PODs
+ * POD1 for Fuel
+ * POD2 for Foreman
+
+.. image:: images/Dell_Overview.jpg
+ :height: 553
+ :width: 449
+ :alt: Dell Testlab Overiew
+ :align: left
+
+Each of the 2 PODs consists of 6 servers that consist of
+ * 1 Jump Server
+ * 3 Servers for Control Nodes
+ * 2 Servers for Compute Nodes
+
+
+
+Hardware details
+-----------------
+
+All the servers within the two PODs reside within a single Dell PowerEdge 620 chassis and have the
+following specifications:
+
+POD1-Fuel
+^^^^^^^^^
+
++------------------+----------------------+--------+------------+---------------------+--------+
+| Hostname | Model | Memory | Storage | Processor | Socket |
++------------------+----------------------+--------+------------+---------------------+--------+
+| Fuel Jump Server | Dell PowerEdge M620 | 64 GB | 1200GB HDD | Intel Xeon E5-2640 | 2 |
++------------------+----------------------+--------+------------+---------------------+--------+
+| Node2 | Dell PowerEdge M620 | 64 GB | 600GB HDD | Intel Xeon E5-2640 | 2 |
++------------------+----------------------+--------+------------+---------------------+--------+
+| Node3 | Dell PowerEdge M620 | 64 GB | 600GB HDD | Intel Xeon E5-2640 | 2 |
++------------------+----------------------+--------+------------+---------------------+--------+
+| Node4 | Dell PowerEdge M620 | 64 GB | 600GB HDD | Intel Xeon E5-2640 | 2 |
++------------------+----------------------+--------+------------+---------------------+--------+
+| Node5 | Dell PowerEdge M620 | 64 GB | 600GB HDD | Intel Xeon E5-2640 | 2 |
++------------------+----------------------+--------+------------+---------------------+--------+
+| Node6 | Dell PowerEdge M620 | 64 GB | 600GB HDD | Intel Xeon E5-2640 | 2 |
++------------------+----------------------+--------+------------+---------------------+--------+
+
+POD2-Foreman
+^^^^^^^^^^^^
+
++---------------------+----------------------+-----------+-----------+---------------------+--------+
+| Hostname | Model | Memory | Storage | Processor | Socket |
++---------------------+----------------------+-----------+-----------+---------------------+--------+
+| Foreman Jump Server | Dell PowerEdge M620 | 64 GB | 300GB HDD | Intel Xeon E5-2640 | 2 |
++---------------------+----------------------+-----------+-----------+---------------------+--------+
+| Node7 | Dell PowerEdge M620 | 64 GB | 300GB HDD | Intel Xeon E5-2640 | 2 |
++---------------------+----------------------+-----------+-----------+---------------------+--------+
+| Node8 | Dell PowerEdge M620 | 64 GB | 300GB HDD | Intel Xeon E5-2640 | 2 |
++---------------------+----------------------+-----------+-----------+---------------------+--------+
+| Node9 | Dell PowerEdge M620 | 64 GB | 300GB HDD | Intel Xeon E5-2640 | 2 |
++---------------------+----------------------+-----------+-----------+---------------------+--------+
+| Node11 | Dell PowerEdge M620 | 64 GB | 300GB HDD | Intel Xeon E5-2640 | 2 |
++---------------------+----------------------+-----------+-----------+---------------------+--------+
+| Node12 | Dell PowerEdge M620 | 64 GB | 300GB HDD | Intel Xeon E5-2640 | 2 |
++---------------------+----------------------+-----------+-----------+---------------------+--------+
+
+Software
+---------
+
+The Jump servers in the Testlab are pre-provisioned with the following softwares:
+
+ * Fuel-Jump Server:
+
+ 1. OS: Ubuntu 14.04
+
+
+ * Foreman-Jump Server:
+
+ 1. OS: CentOS7
+
+Networks
+----------
+
+POD1-Fuel
+^^^^^^^^^
+.. image:: images/Dell_POD1.jpg
+ :height: 647
+ :width: 821
+ :alt: POD1-Fuel Overview
+ :align: left
+
++-----------+-------------------------------------+-------+-------------------+-----+------------------------------+
+| Hostname | NIC Model | Ports | MAC | BW | Roles |
++-----------+-------------------------------------+-------+-------------------+-----+------------------------------+
+| Fuel Jump | 1, Broadcom NetXtreme II BCM57810 | em1 | A4:1F:72:11:B4:81 | 10G | Unused |
++-----------+-------------------------------------+-------+-------------------+-----+------------------------------+
+| | | em2 | A4:1F:72:11:B4:84 | 10G | Unused |
++-----------+-------------------------------------+-------+-------------------+-----+------------------------------+
+| | 2, Intel 82599 | p3p1 | A4:1F:72:11:B4:85 | 10G | Public |
++-----------+-------------------------------------+-------+-------------------+-----+------------------------------+
+| | | p3p2 | A4:1F:72:11:B4:87 | 10G | Fuel Admin/mgmt/pvt/ storage |
++-----------+-------------------------------------+-------+-------------------+-----+------------------------------+
+| | 3, Intel 82599 | p1p1 | A4:1F:72:11:B4:89 | 10G | Unused |
++-----------+-------------------------------------+-------+-------------------+-----+------------------------------+
+| | | p1p2 | A4:1F:72:11:B4:8B | 10G | Unused |
++-----------+-------------------------------------+-------+-------------------+-----+------------------------------+
+| Node2 | 1, Broadcom NetXtreme II BCM57810 | em1 | A4:1F:72:11:B4:8E | 10G | Unused |
++-----------+-------------------------------------+-------+-------------------+-----+------------------------------+
+| | | em2 | A4:1F:72:11:B4:91 | 10G | Unused |
++-----------+-------------------------------------+-------+-------------------+-----+------------------------------+
+| | 2, Intel 82599 | p3p1 | A4:1F:72:11:B4:92 | 10G | Public |
++-----------+-------------------------------------+-------+-------------------+-----+------------------------------+
+| | | p3p2 | A4:1F:72:11:B4:94 | 10G | Fuel Admin/mgmt/pvt/ storage |
++-----------+-------------------------------------+-------+-------------------+-----+------------------------------+
+| | 3, Intel 82599 | p1p1 | A4:1F:72:11:B4:96 | 10G | Unused |
++-----------+-------------------------------------+-------+-------------------+-----+------------------------------+
+| | | p1p2 | A4:1F:72:11:B4:98 | 10G | Unused |
++-----------+-------------------------------------+-------+-------------------+-----+------------------------------+
+| Node3 | 1, Broadcom NetXtreme II BCM57810 | em1 | A4:1F:72:11:B4:9B | 10G | Unused |
++-----------+-------------------------------------+-------+-------------------+-----+------------------------------+
+| | | em2 | A4:1F:72:11:B4:9E | 10G | Unused |
++-----------+-------------------------------------+-------+-------------------+-----+------------------------------+
+| | 2, Intel 82599 | p3p1 | A4:1F:72:11:B4:9F | 10G | Public |
++-----------+-------------------------------------+-------+-------------------+-----+------------------------------+
+| | | p3p2 | A4:1F:72:11:B4:A1 | 10G | Fuel Admin/mgmt/pvt/ storage |
++-----------+-------------------------------------+-------+-------------------+-----+------------------------------+
+| | 3, Intel 82599 | p1p1 | A4:1F:72:11:B4:A3 | 10G | Unused |
++-----------+-------------------------------------+-------+-------------------+-----+------------------------------+
+| | | p1p2 | A4:1F:72:11:B4:A5 | 10G | Unused |
++-----------+-------------------------------------+-------+-------------------+-----+------------------------------+
+| Node4 | 1, Broadcom NetXtreme II BCM57810 | em1 | A4:1F:72:11:B4:A8 | 10G | Unused |
++-----------+-------------------------------------+-------+-------------------+-----+------------------------------+
+| | | em2 | A4:1F:72:11:B4:AB | 10G | Unused |
++-----------+-------------------------------------+-------+-------------------+-----+------------------------------+
+| | 2, Intel 82599 | p3p1 | A4:1F:72:11:B4:AC | 10G | Public |
++-----------+-------------------------------------+-------+-------------------+-----+------------------------------+
+| | | p3p2 | A4:1F:72:11:B4:AE | 10G | Fuel Admin/mgmt/pvt/ storage |
++-----------+-------------------------------------+-------+-------------------+-----+------------------------------+
+| | 3, Intel 82599 | p1p1 | A4:1F:72:11:B4:B0 | 10G | Unused |
++-----------+-------------------------------------+-------+-------------------+-----+------------------------------+
+| | | p1p2 | A4:1F:72:11:B4:B1 | 10G | Unused |
++-----------+-------------------------------------+-------+-------------------+-----+------------------------------+
+| Node5 | 1, Broadcom NetXtreme II BCM57810 | em1 | A4:1F:72:11:B4:B5 | 10G | Unused |
++-----------+-------------------------------------+-------+-------------------+-----+------------------------------+
+| | | em2 | A4:1F:72:11:B4:B8 | 10G | Unused |
++-----------+-------------------------------------+-------+-------------------+-----+------------------------------+
+| | 2, Intel 82599 | p3p1 | A4:1F:72:11:B4:B9 | 10G | Public |
++-----------+-------------------------------------+-------+-------------------+-----+------------------------------+
+| | | p3p2 | A4:1F:72:11:B4:BB | 10G | Fuel Admin/mgmt/pvt/ storage |
++-----------+-------------------------------------+-------+-------------------+-----+------------------------------+
+| | 3, Broadcom NetXtreme II BCM57810 | p1p1 | A4:1F:72:11:B4:BD | 10G | Unused |
++-----------+-------------------------------------+-------+-------------------+-----+------------------------------+
+| | | p1p2 | A4:1F:72:11:B4:C0 | 10G | Unused |
++-----------+-------------------------------------+-------+-------------------+-----+------------------------------+
+| Node6 | 1, Broadcom NetXtreme II BCM57810 | em1 | A4:1F:72:11:B4:C2 | 10G | Unused |
++-----------+-------------------------------------+-------+-------------------+-----+------------------------------+
+| | | em2 | A4:1F:72:11:B4:C5 | 10G | Unused |
++-----------+-------------------------------------+-------+-------------------+-----+------------------------------+
+| | 2, Intel 82599 | p3p1 | A4:1F:72:11:B4:C6 | 10G | Public |
++-----------+-------------------------------------+-------+-------------------+-----+------------------------------+
+| | | p3p2 | A4:1F:72:11:B4:C8 | 10G | Fuel Admin/mgmt/pvt/ storage |
++-----------+-------------------------------------+-------+-------------------+-----+------------------------------+
+| | 3, Broadcom NetXtreme II BCM57810 | p1p1 | A4:1F:72:11:B4:CA | 10G | Unused |
++-----------+-------------------------------------+-------+-------------------+-----+------------------------------+
+| | | p1p2 | A4:1F:72:11:B4:CD | 10G | Unused |
++-----------+-------------------------------------+-------+-------------------+-----+------------------------------+
+
+
+
+POD2-Foreman
+^^^^^^^^^^^^
+
+.. image:: images/Dell_POD2.jpg
+ :height: 721
+ :width: 785
+ :alt: POD2-Foreman Overview
+ :align: left
+
+
++--------------+--------------------------------------+-------+-------------------+-----+--------------------------+
+| Hostname | NIC Model | Ports | MAC | BW | Roles |
++--------------+--------------------------------------+-------+-------------------+-----+--------------------------+
+| Foreman Jump | 1, Broadcom NetXtreme II BCM57810 | em1 | A4:1F:72:11:B5:1D | 10G | Foreman Admin |
++--------------+--------------------------------------+-------+-------------------+-----+--------------------------+
+| | | em2 | A4:1F:72:11:B5:20 | 10G | Foreman Private/ Storage |
++--------------+--------------------------------------+-------+-------------------+-----+--------------------------+
+| | 2, Intel 82599 | p3p1 | A4:1F:72:11:B5:21 | 10G | Public |
++--------------+--------------------------------------+-------+-------------------+-----+--------------------------+
+| | | p3p2 | A4:1F:72:11:B5:23 | 10G | Unused |
++--------------+--------------------------------------+-------+-------------------+-----+--------------------------+
+| | 3, TBD | p1p1 | A4:1F:72:11:B4:89 | 10G | Unused |
++--------------+--------------------------------------+-------+-------------------+-----+--------------------------+
+| | | p1p2 | A4:1F:72:11:B4:8B | 10G | Unused |
++--------------+--------------------------------------+-------+-------------------+-----+--------------------------+
+| Node7 | 1, Broadcom NetXtreme II BCM57810 | em1 | A4:1F:72:11:B4:CF | 10G | Foreman Admin |
++--------------+--------------------------------------+-------+-------------------+-----+--------------------------+
+| | | em2 | A4:1F:72:11:B4:D2 | 10G | Foreman Private/ Storage |
++--------------+--------------------------------------+-------+-------------------+-----+--------------------------+
+| | 2, Intel 82599 | p3p1 | A4:1F:72:11:B4:D3 | 10G | Public |
++--------------+--------------------------------------+-------+-------------------+-----+--------------------------+
+| | | p3p2 | A4:1F:72:11:B4:D5 | 10G | Unused |
++--------------+--------------------------------------+-------+-------------------+-----+--------------------------+
+| | 3, Broadcom NetXtreme II BCM57810 | p1p1 | A4:1F:72:11:B4:D7 | 10G | Unused |
++--------------+--------------------------------------+-------+-------------------+-----+--------------------------+
+| | | p1p2 | A4:1F:72:11:B4:DA | 10G | Unused |
++--------------+--------------------------------------+-------+-------------------+-----+--------------------------+
+| Node8 | 1, Broadcom NetXtreme II BCM57810 | em1 | A4:1F:72:11:B4:DC | 10G | Foreman Admin |
++--------------+--------------------------------------+-------+-------------------+-----+--------------------------+
+| | | em2 | A4:1F:72:11:B4:DF | 10G | Foreman Private/ Storage |
++--------------+--------------------------------------+-------+-------------------+-----+--------------------------+
+| | 2, Intel 82599 | p3p1 | A4:1F:72:11:B4:E0 | 10G | Public |
++--------------+--------------------------------------+-------+-------------------+-----+--------------------------+
+| | | p3p2 | A4:1F:72:11:B4:E2 | 10G | Unused |
++--------------+--------------------------------------+-------+-------------------+-----+--------------------------+
+| | 3, Broadcom NetXtreme II BCM57810 | p1p1 | A4:1F:72:11:B4:E4 | 10G | Unused |
++--------------+--------------------------------------+-------+-------------------+-----+--------------------------+
+| | | p1p2 | A4:1F:72:11:B4:E7 | 10G | Unused |
++--------------+--------------------------------------+-------+-------------------+-----+--------------------------+
+| Node9 | 1, Broadcom NetXtreme II BCM57810 | em1 | A4:1F:72:11:B4:E9 | 10G | Foreman Admin |
++--------------+--------------------------------------+-------+-------------------+-----+--------------------------+
+| | | em2 | A4:1F:72:11:B4:EC | 10G | Foreman Private/ Storage |
++--------------+--------------------------------------+-------+-------------------+-----+--------------------------+
+| | 2, Intel 82599 | p3p1 | A4:1F:72:11:B4:ED | 10G | Public |
++--------------+--------------------------------------+-------+-------------------+-----+--------------------------+
+| | | p3p2 | A4:1F:72:11:B4:EF | 10G | Unused |
++--------------+--------------------------------------+-------+-------------------+-----+--------------------------+
+| | 3, Intel 82599 | p1p1 | A4:1F:72:11:B4:F1 | 10G | Unused |
++--------------+--------------------------------------+-------+-------------------+-----+--------------------------+
+| | | p1p2 | A4:1F:72:11:B4:F3 | 10G | Unused |
++--------------+--------------------------------------+-------+-------------------+-----+--------------------------+
+| Node11 | 1, Broadcom NetXtreme II BCM57810 | em1 | A4:1F:72:11:B5:03 | 10G | Foreman Admin |
++--------------+--------------------------------------+-------+-------------------+-----+--------------------------+
+| | | em2 | A4:1F:72:11:B5:06 | 10G | Foreman Private/ Storage |
++--------------+--------------------------------------+-------+-------------------+-----+--------------------------+
+| | 2, Intel 82599 | p3p1 | A4:1F:72:11:B5:07 | 10G | Public |
++--------------+--------------------------------------+-------+-------------------+-----+--------------------------+
+| | | p3p2 | A4:1F:72:11:B5:09 | 10G | Unused |
++--------------+--------------------------------------+-------+-------------------+-----+--------------------------+
+| | 3, Intel 82599 | p1p1 | A4:1F:72:11:B5:0B | 10G | Unused |
++--------------+--------------------------------------+-------+-------------------+-----+--------------------------+
+| | | p1p2 | A4:1F:72:11:B5:0D | 10G | Unused |
++--------------+--------------------------------------+-------+-------------------+-----+--------------------------+
+| Node12 | 1, Broadcom NetXtreme II BCM57810 | em1 | A4:1F:72:11:B5:10 | 10G | Foreman Admin |
++--------------+--------------------------------------+-------+-------------------+-----+--------------------------+
+| | | em2 | A4:1F:72:11:B5:13 | 10G | Foreman Private/ Storage |
++--------------+--------------------------------------+-------+-------------------+-----+--------------------------+
+| | 2, Intel 82599 | p3p1 | A4:1F:72:11:B5:14 | 10G | Public |
++--------------+--------------------------------------+-------+-------------------+-----+--------------------------+
+| | | p3p2 | A4:1F:72:11:B5:16 | 10G | Unused |
++--------------+--------------------------------------+-------+-------------------+-----+--------------------------+
+| | 3, TBD | p1p1 | A4:1F:72:11:B4:89 | 10G | Unused |
++--------------+--------------------------------------+-------+-------------------+-----+--------------------------+
+| | | p1p2 | A4:1F:72:11:B4:8B | 10G | Unused |
++--------------+--------------------------------------+-------+-------------------+-----+--------------------------+
+
+Subnet allocations
+^^^^^^^^^^^^^^^^^^
+
++-----------------+--------------+------------------+--------------+----------+
+| Network name | Address | Mask | Gateway | VLAN id |
++-----------------+--------------+------------------+--------------+----------+
+| Fuel Admin | 10.20.0.0 | 255.255.0.0 | 10.20.0.1 | Untagged |
++-----------------+--------------+------------------+--------------+----------+
+| Fuel Mangement | 192.168.0.0 | 255.255.255.0 | 192.168.0.1 | 101 |
++-----------------+--------------+------------------+--------------+----------+
+| Fuel Storage | 192.168.1.0 | 255.255.255.0 | 192.168.1.1 | 102 |
++-----------------+--------------+------------------+--------------+----------+
+| Fuel Public | 172.18.0.64 | 255.255.255.192 | 172.18.0.65 | Untagged |
++-----------------+--------------+------------------+--------------+----------+
+| Foreman Admin | 10.4.14.0 | 255.255.255.0 | 10.4.14.100 | Untagged |
++-----------------+--------------+------------------+--------------+----------+
+| Foreman Private | 10.4.5.0 | 255.255.255.0 | 10.4.5.1 | Untagged |
++-----------------+--------------+------------------+--------------+----------+
+| Foreman Public | 172.18.0.0 | 255.255.255.192 | 172.18.0.1 | Untagged |
++-----------------+--------------+------------------+--------------+----------+
+| Lights Out | 172.18.0.128 | 255.255.255.128 | 172.18.0.129 | Untagged |
++-----------------+--------------+------------------+--------------+----------+
+
+
+Lights out Network
+^^^^^^^^^^^^^^^^^^
+
+**POD1**
+
++-----------+--------------------+-------------------+-------------+-------------+
+| Hostname | Lights-out address | MAC | Username | Password |
++-----------+--------------------+-------------------+-------------+-------------+
+| Fuel-Jump | 172.18.1.131 | A4:1F:72:11:B4:80 | root | calvin |
++-----------+--------------------+-------------------+-------------+-------------+
+| Node2 | 172.18.1.132 | A4:1F:72:11:B4:8D | root | calvin |
++-----------+--------------------+-------------------+-------------+-------------+
+| Node3 | 172.18.1.133 | A4:1F:72:11:B4:9A | root | calvin |
++-----------+--------------------+-------------------+-------------+-------------+
+| Node4 | 172.18.1.134 | A4:1F:72:11:B4:A7 | root | calvin |
++-----------+--------------------+-------------------+-------------+-------------+
+| Node5 | 172.18.1.135 | A4:1F:72:11:B4:B4 | root | calvin |
++-----------+--------------------+-------------------+-------------+-------------+
+| Node6 | 172.18.1.136 | A4:1F:72:11:B4:C1 | root | calvin |
++-----------+--------------------+-------------------+-------------+-------------+
+
+**POD2**
+
++--------------+--------------------+-------------------+-------------+-------------+
+| Hostname | Lights-out address | MAC | Username | Password |
++--------------+--------------------+-------------------+-------------+-------------+
+| Foreman-Jump | 172.18.1.143 | A4:1F:72:11:B5:1C | root | calvin |
++--------------+--------------------+-------------------+-------------+-------------+
+| Node7 | 172.18.1.137 | A4:1F:72:11:B4:CE | root | calvin |
++--------------+--------------------+-------------------+-------------+-------------+
+| Node8 | 172.18.1.138 | A4:1F:72:11:B4:DB | root | calvin |
++--------------+--------------------+-------------------+-------------+-------------+
+| Node9 | 172.18.1.139 | A4:1F:72:11:B4:E8 | root | calvin |
++--------------+--------------------+-------------------+-------------+-------------+
+| Node11 | 172.18.1.141 | A4:1F:72:11:B5:02 | root | calvin |
++--------------+--------------------+-------------------+-------------+-------------+
+| Node12 | 172.18.1.142 | A4:1F:72:11:B5:0F | root | calvin |
++--------------+--------------------+-------------------+-------------+-------------+
+
+
+Remote access infrastructure
+-----------------------------
+
+The Dell OPNFV testlab is free to use for the OPNFV community.
+
+A VPN is used to provide access to the Dell Testlab.
+
+To access the Testlab, please contact Waqas_Riaz@DELL.com with the following details:
+ * Name
+ * Email
+ * Designation
+ * Organization
+ * Purpose of using the lab
+
+Processing the request can take 2-3 business days.
+
+*Accessing the Teslab*
+----------------------
+
+POD1 JumpServer
+^^^^^^^^^^^^^^^
+
+ IP: 172.18.0.67
+ User: opnfv
+ Passwd: d3ll1234
+
+
+POD2 JumpServer
+^^^^^^^^^^^^^^^
+ IP: 172.18.0.11
+ User: opnfv
+ Passwd: d3ll1234
+
+==================
+Dell OPNFV Testlab
+==================
+
+Overview
+------------------
+
+Dell is hosting an OPNFV testlab at its Santa Clara facility. The testlab would host baremetal
+servers for the use of OPNFV community as part of the OPNFV Pharos Project
+
+
+The Dell Testlab consists of 3 PODs for the use of the community
+ * POD1 (Jenkins slave: dell-us-testing-bm-1)
+ * POD2 (Jenkins slave: dell-us-deploying-bm2)
+ * POD3 (Jenkins slave: dell-us-delpoyingbm3)
+
+.. image:: images/Dell_Overview.jpg
+ :height: 648
+ :width: 735
+ :alt: Dell Testlab Overiew
+ :align: left
+
+Each of the 2 PODs consists of 6 servers that consist of
+ * 1 Jump Server
+ * 3 Servers for Control Nodes
+ * 2 Servers for Compute Nodes
+
+
+
+Hardware details
+-----------------
+
+For POD1 and POD2, the servers reside within a single Dell PowerEdge 620 chassis and have the
+following specifications:
+
+
+
+**POD1**
+
++-------------+----------------------+--------+------------+---------------------+--------+
+| Hostname | Model | Memory | Storage | Processor | Socket |
++-------------+----------------------+--------+------------+---------------------+--------+
+| Jump Server | Dell PowerEdge M620 | 64 GB | 1200GB HDD | Intel Xeon E5-2640 | 2 |
++-------------+----------------------+--------+------------+---------------------+--------+
+| Node2 | Dell PowerEdge M620 | 64 GB | 600GB HDD | Intel Xeon E5-2640 | 2 |
++-------------+----------------------+--------+------------+---------------------+--------+
+| Node3 | Dell PowerEdge M620 | 64 GB | 600GB HDD | Intel Xeon E5-2640 | 2 |
++-------------+----------------------+--------+------------+---------------------+--------+
+| Node4 | Dell PowerEdge M620 | 64 GB | 600GB HDD | Intel Xeon E5-2640 | 2 |
++-------------+----------------------+--------+------------+---------------------+--------+
+| Node5 | Dell PowerEdge M620 | 64 GB | 600GB HDD | Intel Xeon E5-2640 | 2 |
++-------------+----------------------+--------+------------+---------------------+--------+
+| Node6 | Dell PowerEdge M620 | 64 GB | 600GB HDD | Intel Xeon E5-2640 | 2 |
++-------------+----------------------+--------+------------+---------------------+--------+
+
+
+
+
+**POD2**
+
++-------------+----------------------+-----------+-----------+---------------------+--------+
+| Hostname | Model | Memory | Storage | Processor | Socket |
++-------------+----------------------+-----------+-----------+---------------------+--------+
+| Jump Server | Dell PowerEdge M620 | 64 GB | 300GB HDD | Intel Xeon E5-2630 | 2 |
++-------------+----------------------+-----------+-----------+---------------------+--------+
+| Node7 | Dell PowerEdge M620 | 64 GB | 300GB HDD | Intel Xeon E5-2640 | 2 |
++-------------+----------------------+-----------+-----------+---------------------+--------+
+| Node8 | Dell PowerEdge M620 | 64 GB | 300GB HDD | Intel Xeon E5-2640 | 2 |
++-------------+----------------------+-----------+-----------+---------------------+--------+
+| Node9 | Dell PowerEdge M620 | 64 GB | 300GB HDD | Intel Xeon E5-2640 | 2 |
++-------------+----------------------+-----------+-----------+---------------------+--------+
+| Node11 | Dell PowerEdge M620 | 64 GB | 300GB HDD | Intel Xeon E5-2640 | 2 |
++-------------+----------------------+-----------+-----------+---------------------+--------+
+| Node12 | Dell PowerEdge M620 | 64 GB | 300GB HDD | Intel Xeon E5-2640 | 2 |
++-------------+----------------------+-----------+-----------+---------------------+--------+
+
+
+POD3 consists of 6 R630 Rack servers with the following specifications:
+
+**POD3**
+
++-------------+----------------------+--------+-----------+---------------------+--------+
+| Hostname | Model | Memory | Storage | Processor | Socket |
++-------------+----------------------+--------+-----------+---------------------+--------+
+| Jump Server | Dell PowerEdge R630 | 128 GB | 750GB SSD | Intel Xeon E5-2698 | 2 |
++-------------+----------------------+--------+-----------+---------------------+--------+
+| Node2 | Dell PowerEdge R630 | 128 GB | 750GB SSD | Intel Xeon E5-2698 | 2 |
++-------------+----------------------+--------+-----------+---------------------+--------+
+| Node3 | Dell PowerEdge R630 | 128 GB | 750GB SSD | Intel Xeon E5-2698 | 2 |
++-------------+----------------------+--------+-----------+---------------------+--------+
+| Node4 | Dell PowerEdge R630 | 128 GB | 750GB SSD | Intel Xeon E5-2698 | 2 |
++-------------+----------------------+--------+-----------+---------------------+--------+
+| Node5 | Dell PowerEdge R630 | 128 GB | 750GB SSD | Intel Xeon E5-2698 | 2 |
++-------------+----------------------+--------+-----------+---------------------+--------+
+| Node6 | Dell PowerEdge R630 | 128 GB | 750GB SSD | Intel Xeon E5-2698 | 2 |
++-------------+----------------------+--------+-----------+---------------------+--------+
+
+
+
+Software
+---------
+
+The Jump servers in the Testlab are pre-provisioned with the following softwares:
+
+ * POD1-Jump Server:
+
+ 1. OS: Ubuntu 14.04
+
+
+ * POD2-Jump Server:
+
+ 1. OS: CentOS7.1
+
+
+ * POD3-Jump Server:
+
+ 1. OS: CentOS7.1
+
+
+Networks
+----------
+
+
+
+**POD1**
+
+.. image:: images/Dell_POD1.jpg
+ :height: 649
+ :width: 815
+ :alt: POD1-Fuel Overview
+ :align: left
+
++-------------+-------------------------------------+-------+-------------------+-----+-----------------------------+
+| Hostname | NIC Model | Ports | MAC | BW | VLANs/Roles |
++-------------+-------------------------------------+-------+-------------------+-----+-----------------------------+
+| Jump Server | 1, Broadcom NetXtreme II BCM57810 | em1 | A4:1F:72:11:B4:81 | 10G | PXE |
++-------------+-------------------------------------+-------+-------------------+-----+-----------------------------+
+| | | em2 | A4:1F:72:11:B4:84 | 10G | Internal Networks (101-106) |
++-------------+-------------------------------------+-------+-------------------+-----+-----------------------------+
+| | 2, Intel 82599 | p3p1 | A4:1F:72:11:B4:85 | 1G | Public |
++-------------+-------------------------------------+-------+-------------------+-----+-----------------------------+
+| | | p3p2 | A4:1F:72:11:B4:87 | 10G | Unused |
++-------------+-------------------------------------+-------+-------------------+-----+-----------------------------+
+| | 3, Intel 82599 | p1p1 | A4:1F:72:11:B4:89 | 10G | Unused |
++-------------+-------------------------------------+-------+-------------------+-----+-----------------------------+
+| | | p1p2 | A4:1F:72:11:B4:8B | 10G | Unused |
++-------------+-------------------------------------+-------+-------------------+-----+-----------------------------+
+| Node2 | 1, Broadcom NetXtreme II BCM57810 | em1 | A4:1F:72:11:B4:8E | 10G | PXE |
++-------------+-------------------------------------+-------+-------------------+-----+-----------------------------+
+| | | em2 | A4:1F:72:11:B4:91 | 10G | Internal Networks (101-106) |
++-------------+-------------------------------------+-------+-------------------+-----+-----------------------------+
+| | 2, Intel 82599 | p3p1 | A4:1F:72:11:B4:92 | 1G | Public |
++-------------+-------------------------------------+-------+-------------------+-----+-----------------------------+
+| | | p3p2 | A4:1F:72:11:B4:94 | 10G | Unused |
++-------------+-------------------------------------+-------+-------------------+-----+-----------------------------+
+| | 3, Intel 82599 | p1p1 | A4:1F:72:11:B4:96 | 10G | Unused |
++-------------+-------------------------------------+-------+-------------------+-----+-----------------------------+
+| | | p1p2 | A4:1F:72:11:B4:98 | 10G | Unused |
++-------------+-------------------------------------+-------+-------------------+-----+-----------------------------+
+| Node3 | 1, Broadcom NetXtreme II BCM57810 | em1 | A4:1F:72:11:B4:9B | 10G | PXE |
++-------------+-------------------------------------+-------+-------------------+-----+-----------------------------+
+| | | em2 | A4:1F:72:11:B4:9E | 10G | Internal Networks (101-106) |
++-------------+-------------------------------------+-------+-------------------+-----+-----------------------------+
+| | 2, Intel 82599 | p3p1 | A4:1F:72:11:B4:9F | 1G | Public |
++-------------+-------------------------------------+-------+-------------------+-----+-----------------------------+
+| | | p3p2 | A4:1F:72:11:B4:A1 | 10G | Unused |
++-------------+-------------------------------------+-------+-------------------+-----+-----------------------------+
+| | 3, Intel 82599 | p1p1 | A4:1F:72:11:B4:A3 | 10G | Unused |
++-------------+-------------------------------------+-------+-------------------+-----+-----------------------------+
+| | | p1p2 | A4:1F:72:11:B4:A5 | 10G | Unused |
++-------------+-------------------------------------+-------+-------------------+-----+-----------------------------+
+| Node4 | 1, Broadcom NetXtreme II BCM57810 | em1 | A4:1F:72:11:B4:A8 | 10G | PXE |
++-------------+-------------------------------------+-------+-------------------+-----+-----------------------------+
+| | | em2 | A4:1F:72:11:B4:AB | 10G | Internal Networks (101-106) |
++-------------+-------------------------------------+-------+-------------------+-----+-----------------------------+
+| | 2, Intel 82599 | p3p1 | A4:1F:72:11:B4:AC | 1G | Public |
++-------------+-------------------------------------+-------+-------------------+-----+-----------------------------+
+| | | p3p2 | A4:1F:72:11:B4:AE | 10G | Unused |
++-------------+-------------------------------------+-------+-------------------+-----+-----------------------------+
+| | 3, Intel 82599 | p1p1 | A4:1F:72:11:B4:B0 | 10G | Unused |
++-------------+-------------------------------------+-------+-------------------+-----+-----------------------------+
+| | | p1p2 | A4:1F:72:11:B4:B1 | 10G | Unused |
++-------------+-------------------------------------+-------+-------------------+-----+-----------------------------+
+| Node5 | 1, Broadcom NetXtreme II BCM57810 | em1 | A4:1F:72:11:B4:B5 | 10G | PXE |
++-------------+-------------------------------------+-------+-------------------+-----+-----------------------------+
+| | | em2 | A4:1F:72:11:B4:B8 | 10G | Internal Networks (101-106) |
++-------------+-------------------------------------+-------+-------------------+-----+-----------------------------+
+| | 2, Intel 82599 | p3p1 | A4:1F:72:11:B4:B9 | 1G | Public |
++-------------+-------------------------------------+-------+-------------------+-----+-----------------------------+
+| | | p3p2 | A4:1F:72:11:B4:BB | 10G | Unused |
++-------------+-------------------------------------+-------+-------------------+-----+-----------------------------+
+| | 3, Broadcom NetXtreme II BCM57810 | p1p1 | A4:1F:72:11:B4:BD | 10G | Unused |
++-------------+-------------------------------------+-------+-------------------+-----+-----------------------------+
+| | | p1p2 | A4:1F:72:11:B4:C0 | 10G | Unused |
++-------------+-------------------------------------+-------+-------------------+-----+-----------------------------+
+| Node6 | 1, Broadcom NetXtreme II BCM57810 | em1 | A4:1F:72:11:B4:C2 | 10G | PXE |
++-------------+-------------------------------------+-------+-------------------+-----+-----------------------------+
+| | | em2 | A4:1F:72:11:B4:C5 | 10G | Internal Networks (101-106) |
++-------------+-------------------------------------+-------+-------------------+-----+-----------------------------+
+| | 2, Intel 82599 | p3p1 | A4:1F:72:11:B4:C6 | 1G | Public |
++-------------+-------------------------------------+-------+-------------------+-----+-----------------------------+
+| | | p3p2 | A4:1F:72:11:B4:C8 | 10G | Unused |
++-------------+-------------------------------------+-------+-------------------+-----+-----------------------------+
+| | 3, Broadcom NetXtreme II BCM57810 | p1p1 | A4:1F:72:11:B4:CA | 10G | Unused |
++-------------+-------------------------------------+-------+-------------------+-----+-----------------------------+
+| | | p1p2 | A4:1F:72:11:B4:CD | 10G | Unused |
++-------------+-------------------------------------+-------+-------------------+-----+-----------------------------+
+
+
+
+**POD2**
+
+.. image:: images/Dell_POD2.jpg
+ :height: 602
+ :width: 815
+ :alt: POD2 Overview
+ :align: left
+
+
++--------------+--------------------------------------+-------+-------------------+-----+-----------------------------+
+| Hostname | NIC Model | Ports | MAC | BW | Roles |
++--------------+--------------------------------------+-------+-------------------+-----+-----------------------------+
+| Foreman Jump | 1, Broadcom NetXtreme II BCM57810 | em1 | A4:1F:72:11:B5:1D | 10G | PXE |
++--------------+--------------------------------------+-------+-------------------+-----+-----------------------------+
+| | | em2 | A4:1F:72:11:B5:20 | 10G | Internal Networks (201-205) |
++--------------+--------------------------------------+-------+-------------------+-----+-----------------------------+
+| | 2, Intel 82599 | p3p1 | A4:1F:72:11:B5:21 | 1G | Public |
++--------------+--------------------------------------+-------+-------------------+-----+-----------------------------+
+| | | p3p2 | A4:1F:72:11:B5:23 | 10G | Unused |
++--------------+--------------------------------------+-------+-------------------+-----+-----------------------------+
+| | 3, TBD | p1p1 | A4:1F:72:11:B4:89 | 10G | Unused |
++--------------+--------------------------------------+-------+-------------------+-----+-----------------------------+
+| | | p1p2 | A4:1F:72:11:B4:8B | 10G | Unused |
++--------------+--------------------------------------+-------+-------------------+-----+-----------------------------+
+| Node7 | 1, Broadcom NetXtreme II BCM57810 | em1 | A4:1F:72:11:B4:CF | 10G | PXE |
++--------------+--------------------------------------+-------+-------------------+-----+-----------------------------+
+| | | em2 | A4:1F:72:11:B4:D2 | 10G | Internal Networks (201-205) |
++--------------+--------------------------------------+-------+-------------------+-----+-----------------------------+
+| | 2, Intel 82599 | p3p1 | A4:1F:72:11:B4:D3 | 1G | Public |
++--------------+--------------------------------------+-------+-------------------+-----+-----------------------------+
+| | | p3p2 | A4:1F:72:11:B4:D5 | 10G | Unused |
++--------------+--------------------------------------+-------+-------------------+-----+-----------------------------+
+| | 3, Broadcom NetXtreme II BCM57810 | p1p1 | A4:1F:72:11:B4:D7 | 10G | Unused |
++--------------+--------------------------------------+-------+-------------------+-----+-----------------------------+
+| | | p1p2 | A4:1F:72:11:B4:DA | 10G | Unused |
++--------------+--------------------------------------+-------+-------------------+-----+-----------------------------+
+| Node8 | 1, Broadcom NetXtreme II BCM57810 | em1 | A4:1F:72:11:B4:DC | 10G | PXE |
++--------------+--------------------------------------+-------+-------------------+-----+-----------------------------+
+| | | em2 | A4:1F:72:11:B4:DF | 10G | Internal Networks (201-205) |
++--------------+--------------------------------------+-------+-------------------+-----+-----------------------------+
+| | 2, Intel 82599 | p3p1 | A4:1F:72:11:B4:E0 | 1G | Public |
++--------------+--------------------------------------+-------+-------------------+-----+-----------------------------+
+| | | p3p2 | A4:1F:72:11:B4:E2 | 10G | Unused |
++--------------+--------------------------------------+-------+-------------------+-----+-----------------------------+
+| | 3, Broadcom NetXtreme II BCM57810 | p1p1 | A4:1F:72:11:B4:E4 | 10G | Unused |
++--------------+--------------------------------------+-------+-------------------+-----+-----------------------------+
+| | | p1p2 | A4:1F:72:11:B4:E7 | 10G | Unused |
++--------------+--------------------------------------+-------+-------------------+-----+-----------------------------+
+| Node9 | 1, Broadcom NetXtreme II BCM57810 | em1 | A4:1F:72:11:B4:E9 | 10G | PXE |
++--------------+--------------------------------------+-------+-------------------+-----+-----------------------------+
+| | | em2 | A4:1F:72:11:B4:EC | 10G | Internal Networks (201-205) |
++--------------+--------------------------------------+-------+-------------------+-----+-----------------------------+
+| | 2, Intel 82599 | p3p1 | A4:1F:72:11:B4:ED | 1G | Public |
++--------------+--------------------------------------+-------+-------------------+-----+-----------------------------+
+| | | p3p2 | A4:1F:72:11:B4:EF | 10G | Unused |
++--------------+--------------------------------------+-------+-------------------+-----+-----------------------------+
+| | 3, Intel 82599 | p1p1 | A4:1F:72:11:B4:F1 | 10G | Unused |
++--------------+--------------------------------------+-------+-------------------+-----+-----------------------------+
+| | | p1p2 | A4:1F:72:11:B4:F3 | 10G | Unused |
++--------------+--------------------------------------+-------+-------------------+-----+-----------------------------+
+| Node11 | 1, Broadcom NetXtreme II BCM57810 | em1 | A4:1F:72:11:B5:03 | 10G | PXE |
++--------------+--------------------------------------+-------+-------------------+-----+-----------------------------+
+| | | em2 | A4:1F:72:11:B5:06 | 10G | Internal Networks (201-205) |
++--------------+--------------------------------------+-------+-------------------+-----+-----------------------------+
+| | 2, Intel 82599 | p3p1 | A4:1F:72:11:B5:07 | 10G | Public |
++--------------+--------------------------------------+-------+-------------------+-----+-----------------------------+
+| | | p3p2 | A4:1F:72:11:B5:09 | 10G | Unused |
++--------------+--------------------------------------+-------+-------------------+-----+-----------------------------+
+| | 3, Intel 82599 | p1p1 | A4:1F:72:11:B5:0B | 10G | Unused |
++--------------+--------------------------------------+-------+-------------------+-----+-----------------------------+
+| | | p1p2 | A4:1F:72:11:B5:0D | 10G | Unused |
++--------------+--------------------------------------+-------+-------------------+-----+-----------------------------+
+| Node12 | 1, Broadcom NetXtreme II BCM57810 | em1 | A4:1F:72:11:B5:10 | 10G | PXE |
++--------------+--------------------------------------+-------+-------------------+-----+-----------------------------+
+| | | em2 | A4:1F:72:11:B5:13 | 10G | Internal Networks (201-205) |
++--------------+--------------------------------------+-------+-------------------+-----+-----------------------------+
+| | 2, Intel 82599 | p3p1 | A4:1F:72:11:B5:14 | 1G | Public |
++--------------+--------------------------------------+-------+-------------------+-----+-----------------------------+
+| | | p3p2 | A4:1F:72:11:B5:16 | 10G | Unused |
++--------------+--------------------------------------+-------+-------------------+-----+-----------------------------+
+| | 3, TBD | p1p1 | A4:1F:72:11:B4:89 | 10G | Unused |
++--------------+--------------------------------------+-------+-------------------+-----+-----------------------------+
+| | | p1p2 | A4:1F:72:11:B4:8B | 10G | Unused |
++--------------+--------------------------------------+-------+-------------------+-----+-----------------------------+
+
+
+
+
+**POD3**
+
+
+
+.. image:: images/Dell_POD3.jpg
+ :height: 652
+ :width: 815
+ :alt: POD3 Overview
+ :align: left
+
+
+
++-------------+--------------------------------+-------+-------------------+-----+----------------------------------+
+| Hostname | NIC Model | Ports | MAC | BW | Roles (VLANs) |
++-------------+--------------------------------+-------+-------------------+-----+----------------------------------+
+| | | | | | |
+| Jump Server | 1, Intel 2P X520/2P I350 rNDC | em1 | EC:F4:BB:D7:14:20 | 1G | PXE |
+| | | | | | |
++-------------+--------------------------------+-------+-------------------+-----+----------------------------------+
+| | | | | | |
+| | | em2 | EC:F4:BB:D7:14:22 | 10G | Internal Networks (201,202,203) |
+| | | | | | |
++-------------+--------------------------------+-------+-------------------+-----+----------------------------------+
+| | | | | | |
+| | | p3p1 | EC:F4:BB:D7:14:24 | 1G | Public |
+| | | | | | |
++-------------+--------------------------------+-------+-------------------+-----+----------------------------------+
+| | | | | | |
+| Node1 | 1, Intel 2P X520/2P I350 rNDC | em1 | EC:F4:BB:D6:F2:98 | 10G | PXE |
+| | | | | | |
++-------------+--------------------------------+-------+-------------------+-----+----------------------------------+
+| | | | | | |
+| | | em2 | EC:F4:BB:D6:F2:9A | 10G | Internal Networks (201,202,203) |
+| | | | | | |
++-------------+--------------------------------+-------+-------------------+-----+----------------------------------+
+| | | | | | |
+| | | p3p1 | EC:F4:BB:D6:F2:9C | 1G | Public |
+| | | | | | |
++-------------+--------------------------------+-------+-------------------+-----+----------------------------------+
+| | | | | | |
+| Node2 | 1, Intel 2P X520/2P I350 rNDC | em1 | EC:F4:BB:D6:F9:10 | 1G | PXE |
+| | | | | | |
++-------------+--------------------------------+-------+-------------------+-----+----------------------------------+
+| | | | | | |
+| | | em2 | EC:F4:BB:D6:F9:12 | 10G | Internal Networks (201,202,203) |
+| | | | | | |
++-------------+--------------------------------+-------+-------------------+-----+----------------------------------+
+| | | | | | |
+| | | p3p1 | EC:F4:BB:D6:F9:14 | 1G | Public |
+| | | | | | |
++-------------+--------------------------------+-------+-------------------+-----+----------------------------------+
+| | | | | | |
+| Node3 | 1, Intel 2P X520/2P I350 rNDC | em1 | EC:F4:BB:D7:C9:B8 | 1G | PXE |
+| | | | | | |
++-------------+--------------------------------+-------+-------------------+-----+----------------------------------+
+| | | | | | |
+| | | em2 | EC:F4:BB:D7:C9:BA | 10G | Internal Networks (201,202,203) |
+| | | | | | |
++-------------+--------------------------------+-------+-------------------+-----+----------------------------------+
+| | | | | | |
+| | | p3p1 | EC:F4:BB:D7:C9:BC | 1G | Public |
+| | | | | | |
++-------------+--------------------------------+-------+-------------------+-----+----------------------------------+
+| | | | | | |
+| Node4 | 1, Intel 2P X520/2P I350 rNDC | em1 | EC:F4:BB:D7:16:E8 | 10G | PXE |
+| | | | | | |
++-------------+--------------------------------+-------+-------------------+-----+----------------------------------+
+| | | | | | |
+| | | em2 | EC:F4:BB:D7:16:EA | 10G | Internal Networks (201,202,203) |
+| | | | | | |
++-------------+--------------------------------+-------+-------------------+-----+----------------------------------+
+| | | | | | |
+| | | p3p1 | EC:F4:BB:D7:16:EA | 1G | Public |
+| | | | | | |
++-------------+--------------------------------+-------+-------------------+-----+----------------------------------+
+| | | | | | |
+| Node5 | 1, Intel 2P X520/2P I350 rNDC | em1 | EC:F4:BB:D6:FE:98 | 1G | Unused |
+| | | | | | |
++-------------+--------------------------------+-------+-------------------+-----+----------------------------------+
+| | | | | | |
+| | | em2 | EC:F4:BB:D6:FE:9A | 10G | Internal Networks (201,202,203) |
+| | | | | | |
++-------------+--------------------------------+-------+-------------------+-----+----------------------------------+
+| | | | | | |
+| | | p3p1 | EC:F4:BB:D6:FE:9C | 1G | Public |
+| | | | | | |
++-------------+--------------------------------+-------+-------------------+-----+----------------------------------+
+
+
+**Subnet allocations**
+
++--------------+--------------+------------------+--------------+----------+
+| Network name | Address | Mask | Gateway | VLAN id |
++--------------+--------------+------------------+--------------+----------+
+| POD1 Public | 172.18.0.64 | 255.255.255.192 | 172.18.0.65 | Untagged |
++--------------+--------------+------------------+--------------+----------+
+| POD2 Public | 172.18.0.0 | 255.255.255.192 | 172.18.0.1 | Untagged |
++--------------+--------------+------------------+--------------+----------+
+| POD3 Public | 172.18.1.0 | 255.255.255.0 | 172.18.1.1 | Untagged |
++--------------+--------------+------------------+--------------+----------+
+| Lights Out | 172.18.0.128 | 255.255.255.128 | 172.18.0.129 | Untagged |
++--------------+--------------+------------------+--------------+----------+
+
+
+**Lights out Network**
+
+**POD1**
+
++----------+--------------------+-------------------+-------------+-------------+
+| Hostname | Lights-out address | MAC | Username | Password |
++----------+--------------------+-------------------+-------------+-------------+
+| Jump | 172.18.0.131 | A4:1F:72:11:B4:80 | root | calvin |
++----------+--------------------+-------------------+-------------+-------------+
+| Node2 | 172.18.0.132 | A4:1F:72:11:B4:8D | root | calvin |
++----------+--------------------+-------------------+-------------+-------------+
+| Node3 | 172.18.0.133 | A4:1F:72:11:B4:9A | root | calvin |
++----------+--------------------+-------------------+-------------+-------------+
+| Node4 | 172.18.0.134 | A4:1F:72:11:B4:A7 | root | calvin |
++----------+--------------------+-------------------+-------------+-------------+
+| Node5 | 172.18.0.135 | A4:1F:72:11:B4:B4 | root | calvin |
++----------+--------------------+-------------------+-------------+-------------+
+| Node6 | 172.18.0.136 | A4:1F:72:11:B4:C1 | root | calvin |
++----------+--------------------+-------------------+-------------+-------------+
+
+**POD2**
+
++----------+--------------------+-------------------+-------------+-------------+
+| Hostname | Lights-out address | MAC | Username | Password |
++----------+--------------------+-------------------+-------------+-------------+
+| Jump | 172.18.0.143 | A4:1F:72:11:B5:1C | root | calvin |
++----------+--------------------+-------------------+-------------+-------------+
+| Node7 | 172.18.0.137 | A4:1F:72:11:B4:CE | root | calvin |
++----------+--------------------+-------------------+-------------+-------------+
+| Node8 | 172.18.0.138 | A4:1F:72:11:B4:DB | root | calvin |
++----------+--------------------+-------------------+-------------+-------------+
+| Node9 | 172.18.0.139 | A4:1F:72:11:B4:E8 | root | calvin |
++----------+--------------------+-------------------+-------------+-------------+
+| Node11 | 172.18.0.141 | A4:1F:72:11:B5:02 | root | calvin |
++----------+--------------------+-------------------+-------------+-------------+
+| Node12 | 172.18.0.142 | A4:1F:72:11:B5:0F | root | calvin |
++----------+--------------------+-------------------+-------------+-------------+
+
+
+
+**POD3**
+
++----------+--------------------+-------------------+-------------+-------------+
+| Hostname | Lights-out address | MAC | Username | Password |
++----------+--------------------+-------------------+-------------+-------------+
+| Jump | 172.18.0.181 | 74:E6:E2:FA:BB:D8 | root | calvin |
++----------+--------------------+-------------------+-------------+-------------+
+| Node1 | 172.18.0.182 | 74:E6:E2:FA:E9:2E | root | calvin |
++----------+--------------------+-------------------+-------------+-------------+
+| Node2 | 172.18.0.183 | 74:E6:E2:FA:FC:E2 | root | calvin |
++----------+--------------------+-------------------+-------------+-------------+
+| Node3 | 172.18.0.184 | 74:E6:E2:FB:05:68 | root | calvin |
++----------+--------------------+-------------------+-------------+-------------+
+| Node4 | 172.18.0.185 | 74:E6:E2:FA:A4:02 | root | calvin |
++----------+--------------------+-------------------+-------------+-------------+
+| Node5 | 172.18.0.186 | 74:E6:E2:FA:E4:18 | root | calvin |
++----------+--------------------+-------------------+-------------+-------------+
+
+
+Remote access infrastructure
+----------------------------
+
+The Dell OPNFV testlab is free to use for the OPNFV community.
+
+A VPN is used to provide access to the Dell Testlab.
+
+To access the Testlab, please visit the Dell OPNFV Lab's wiki page
+(https://wiki.opnfv.org/dell_hosting) for details.
+
+
+*Accessing the Teslab*
+-----------------------
+
+* POD1 JumpServer
+
+ IP: 172.18.0.67
+
+ User: opnfv
+
+ Passwd: d3ll1234
+
+
+
+* POD2 JumpServer
+
+ IP: 172.18.0.11
+
+ User: opnfv
+
+ Passwd: d3ll1234
+
+
+
+* POD3 JumpServer
+
+ IP: 172.18.1.3
+
+ User: opnfv
+
+ Passwd: d3ll1234
diff --git a/docs/labs/ericsson/images/ericsson-opnfv-topology.png b/docs/labs/ericsson/images/ericsson-opnfv-topology.png
new file mode 100644
index 0000000..b3c86d3
--- /dev/null
+++ b/docs/labs/ericsson/images/ericsson-opnfv-topology.png
Binary files differ
diff --git a/docs/labs/ericsson/index.rst b/docs/labs/ericsson/index.rst
new file mode 100644
index 0000000..f7058ec
--- /dev/null
+++ b/docs/labs/ericsson/index.rst
@@ -0,0 +1,15 @@
+.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+.. http://creativecommons.org/licenses/by/4.0
+.. (c) 2016 OPNFV.
+
+.. Top level of Pharos templates and configuration files
+
+**************************************
+ERICSSON OPNFV Lab Configuration Files
+**************************************
+
+
+.. toctree::
+ :maxdepth: 2
+
+ ./lab_description.rst
diff --git a/docs/labs/ericsson/lab_description.rst b/docs/labs/ericsson/lab_description.rst
new file mode 100644
index 0000000..b4c5b85
--- /dev/null
+++ b/docs/labs/ericsson/lab_description.rst
@@ -0,0 +1,148 @@
+.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+.. http://creativecommons.org/licenses/by/4.0
+.. (c) 2016 OPNFV.
+
+.. _pharos_lab:
+
+*********************************
+Ericssion OPNFV Lab Specification
+*********************************
+
+
+Introduction
+------------
+
+Ericsson OPNFV Lab currently has 2 Bare Metal and 3 Virtual PODs available globally (hosted in
+the GIC). Each POD has 5 servers, comprised of 3 controller nodes (HA) and 2 computes nodes. NOTE:
+(this make differ depending on scenario).
+
+.. _pharos_pod:
+
+These PODs are dedicated for use by Production/CI. These PODs focus on providing verification,
+build, deploy and testing for scenarios related with **test** projects, **installer** projects and
+perforamnce enhancement projects, such as KVM, OVS, FDS, etc.
+
+In addition to the full-time CI/CD resources, the Ericsson OPNFV lab provides developer labs (DRs)
+for project usage, testing and development.
+
+Scenarios services by this lab are:
+
+Scenario defitions can be found here:
+`Colorado Scenario Status <https://wiki.opnfv.org/display/SWREL/Colorado+Scenario+Status>`_
+
+Lab Resources
+-------------
+
+- `Ericsson Hostting And Request Page <https://wiki.opnfv.org/display/pharos/Ericsson+Hosting+and+Request+Process>`_
+
++------------+------------+------------+-------------------------------+------------+--------+---------+
+| POD Name | Project(s) | PTL(s) | Email(s) | POD Role | Status | Notes |
++------------+------------+------------+-------------------------------+------------+--------+---------+
+| POD1 | CI/CD | Daniel | daniel.smith@ericsson.com | CI: latest | Active | BM-CI |
+| | | Smith | | | | |
++------------+------------+------------+-------------------------------+------------+--------+---------+
+| POD2 | CI/CD | Daniel | daniel.smith@ericsson.com | CI: latest | Active | BM-CI |
+| | | Smith | | | | |
++------------+------------+------------+-------------------------------+------------+--------+---------+
+| vPOD1 | CI/CD | Fatih | fatih.degirmenci@ericsson.com | CI: latest | Active | Virt-CI |
+| | | Degirmenci | | | | |
++------------+------------+------------+-------------------------------+------------+--------+---------+
+| PHAROS-166 | FUEL | Constant | constant.wette@ericsson.com | DR: B-rel | Active | Nested |
+| | | Wette | | | | |
++------------+------------+------------+-------------------------------+------------+--------+---------+
+| PHAROS-167 | OVSNFV | Billy | billy.omahoney@intel.com | DR: C-rel | Active | Hybrid |
+| | | O'Mahoney | | | | |
++------------+------------+------------+-------------------------------+------------+--------+---------+
+| PHAROS-174 | GLUON | Bin | bh526r@att.com | DR: D-rel | Active | Nested* |
+| | | Hu | | | | |
++------------+------------+------------+-------------------------------+------------+--------+---------+
+| PHAROS-180 | SAVI | Rick | richard.brunner@ericsson.com | DR: D-rel | Active | Nested* |
+| | | Brunner | | | | |
++------------+------------+------------+-------------------------------+------------+--------+---------+
+| PHAROS-181 | IPV6-MULTI | Bin | bh526r@att.com | DR: D-rel | Active | Nested* |
+| | | Hu | | | | |
++------------+------------+------------+-------------------------------+------------+--------+---------+
+| PHAROS-191 | AUTO-DEP | Peter | Peter.Barabas@ericsson.com | DR: C-rel | Active | Nested* |
+| | | Barabas | | | | |
++------------+------------+------------+-------------------------------+------------+--------+---------+
+| PHAROS-199 | SDN-L3 | Tim | Tim.Irnich@ericsson.com | DR: C-rel | Active | Nested* |
+| | | Irnich | | | | |
++------------+------------+------------+-------------------------------+------------+--------+---------+
+| PHAROS-236 | LLT-TOOL | Jose | Jose.Lausuch@ericsson.com | DR: C-rel | Active | Nested* |
+| | | Lausuch | | | | |
++------------+------------+------------+-------------------------------+------------+--------+---------+
+| PHAROS-253 | ODL-II | Nikolas | Nikolas.Hermanns@ericsson.com | DR: C-rel | Active | Nested* |
+| | | Hermanns | | | | |
++------------+------------+------------+-------------------------------+------------+--------+---------+
+
+
+- `ACTIVE CI/CD LAB SPECS <https://wiki.opnfv.org/pages/viewpage.action?pageId=6829012>`_
+* `CI-ERICSSON-POD1 wiki page <https://wiki.opnfv.org/display/pharos/CI-ERICSSON-POD1>`_
+* `CI-ERICSSON-POD1 wiki page <https://wiki.opnfv.org/display/pharos/CI-ERICSSON-POD2>`_
+- `ACTIVE LAB SPECS <https://wiki.opnfv.org/display/pharos/Active+Lab+Specs>`_
+* `PHAROS-166 wiki page <https://wiki.opnfv.org/display/pharos/PHAROS-166%3A+++++++PaaS+PoC>`_
+* `PHAROS-167 wiki page <https://wiki.opnfv.org/display/pharos/PHAROS-167%3A+OVS-NFV+BareMetal+Lab>`_
+* `PHAROS-174 wiki page <https://wiki.opnfv.org/display/pharos/PHAROS-174%3A+Gluon+PoC+for+OPNFV+Summit>`_
+* `PHAROS-180 wiki page <https://wiki.opnfv.org/display/pharos/PHAROS-180%3A+++++++SAVI+CDN+POC>`_
+* `PHAROS-181 wiki page <https://wiki.opnfv.org/display/pharos/PHAROS-181%3A+IPV6+Multisite>`_
+* `PHAROS-191 wiki page <https://wiki.opnfv.org/display/pharos/PHAROS-191%3A+++++++Colorado+-+Autodeployer+Uplift>`_
+* `PHAROS-199 wiki page <https://wiki.opnfv.org/display/pharos/PHAROS-199%3A+++++++ODL-L3+troubleshooting>`_
+* `PHAROS-236 wiki page <https://wiki.opnfv.org/display/pharos/PHAROS-236%3A+Tracing+Tool+-+LLTng>`_
+* `PHAROS-253 wiki page <https://wiki.opnfv.org/pages/viewpage.action?pageId=6828594>`_
+- `Decommissioned Requets <https://wiki.opnfv.org/display/pharos/Decommissioned+Lab+Request>`_
+
+
+Acceptable Usage Policy
+-----------------------
+
+Resources located in Ericsson OPNFV lab shall only be used for CI, infra setup/configuration and
+troubleshooting purposes. No development work is allowed in these PODs. Development Work should
+only be performed on the DR labs assigned to individual projects.
+
+
+Remote Access Infrastructure
+----------------------------
+
+Ericsson OPNFV lab provides a SSH GW that allows for unlimited port-forwarding, as well as Remote
+Desktop, VNC and SOCKS proxy capability allowing the end user to feel as though directly connected
+to the lab.
+
+Remote Access Procedure
+-----------------------
+
+Access to this environment can be granted by sending an e-mail to: **daniel.smith@ericsson.com**.
+
+Subject: ericsson opnfv access.
+
+The following information should be provided in the request:
+
+::
+
+ Full name:
+ E-mail:
+ Organization:
+ Why is access needed:
+ How long is access needed:
+ Number of Hosts required:
+ Topology Required (HA, SA):
+ Feature/Plugins/Options Required (DPDK, ODL, ONOS):
+
+Enclosed a copy of your id_rsa.pub (public key) with your request and a login will be created for you
+
+
+Lab Documentation
+-----------------
+
+
+Lab Topology
+------------
+
+.. image:: ./images/ericsson_opnfv_topology.png
+ :alt: Lab diagram not found
+
+Each POD is an individual entity with its own set of independant networks allowing for
+interconnection between DR labs, intra connectinos within multiple Nested DRs all without touching
+the CI/CD running in production.
+
+Refer to each Lab specific wiki page for IP and Login and Topology Information.
+
diff --git a/docs/labs/huawei-us-lab/huawei-lab-pod.png b/docs/labs/huawei-us-lab/huawei-lab-pod.png
new file mode 100644
index 0000000..cfb51c9
--- /dev/null
+++ b/docs/labs/huawei-us-lab/huawei-lab-pod.png
Binary files differ
diff --git a/docs/labs/huawei-us-lab/huawei-lab-topology.png b/docs/labs/huawei-us-lab/huawei-lab-topology.png
new file mode 100644
index 0000000..681a10c
--- /dev/null
+++ b/docs/labs/huawei-us-lab/huawei-lab-topology.png
Binary files differ
diff --git a/docs/labs/huawei-us-lab/huawei-lab-virtual.png b/docs/labs/huawei-us-lab/huawei-lab-virtual.png
new file mode 100644
index 0000000..46c818d
--- /dev/null
+++ b/docs/labs/huawei-us-lab/huawei-lab-virtual.png
Binary files differ
diff --git a/docs/labs/huawei-us-lab/index.rst b/docs/labs/huawei-us-lab/index.rst
new file mode 100644
index 0000000..e2a03ab
--- /dev/null
+++ b/docs/labs/huawei-us-lab/index.rst
@@ -0,0 +1,16 @@
+.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+.. http://creativecommons.org/licenses/by/4.0
+.. (c) 2016 OPNFV.
+
+.. Top level of Pharos templates and configuration files
+
+******************
+Huawei's OPNFV Lab
+******************
+
+
+.. toctree::
+ :maxdepth: 2
+
+ ./lab_specification.rst
+ ./pod_specification.rst
diff --git a/docs/labs/huawei-us-lab/lab_specification.rst b/docs/labs/huawei-us-lab/lab_specification.rst
new file mode 100644
index 0000000..99d2196
--- /dev/null
+++ b/docs/labs/huawei-us-lab/lab_specification.rst
@@ -0,0 +1,100 @@
+.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+.. http://creativecommons.org/licenses/by/4.0
+
+**************************
+Huawei's Lab Specification
+**************************
+
+Introduction
+------------
+
+Huawei's lab providing 5 PODs for baremetal deployment, 4 standalone servers
+for virtual deployment. All the resources have been attached to jenkins master,
+you can view the slaves below in jenkins master. Current POD assignments and
+individual POD details are listed below.
+
+Lab Resources & Assignments
+^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
++-----------------+-------------+-----------+--------+
+| Resource | Project(s) | POD Role | Status |
++-----------------+-------------+-----------+--------+
+| huawei-pod1 | compass4nfv | CI Stable | Active |
++-----------------+-------------+-----------+--------+
+| huawei-pod2 | compass4nfv | CI Stable | Active |
++-----------------+-------------+-----------+--------+
+| huawei-pod3 | yardstick | Dev/Test | Active |
++-----------------+-------------+-----------+--------+
+| huawei-pod4 | compass4nfv | CI Stable | Active |
++-----------------+-------------+-----------+--------+
+| huawei-pod5 | compass4nfv | CI Stable | Active |
++-----------------+-------------+-----------+--------+
+| huawei-virtual1 | compass4nfv | CI Stable | Active |
++-----------------+-------------+-----------+--------+
+| huawei-virtual2 | compass4nfv | CI Stable | Active |
++-----------------+-------------+-----------+--------+
+| huawei-virtual3 | compass4nfv | CI Stable | Active |
++-----------------+-------------+-----------+--------+
+| huawei-virtual4 | compass4nfv | CI Stable | Active |
++-----------------+-------------+-----------+--------+
+
+Acceptable Usage Policy
+-----------------------
+
+All of these resources above are used for OPNFV CI, if there is any requirement by OPNFV
+contributor or committer for the purpose of OPNFV development, please apply to us for permission.
+
+
+Remote Access Infrastructure
+----------------------------
+
+Huawei provides VPN(OpenVPN) to connect the lab.
+
+Remote Access Procedure
+-----------------------
+
+This environment is free to use by any OPNFV contributor or committer for the
+purpose of OPNFV approved activities, you just need to obtain VPN credentials to access.
+
+Access to this environment can be granted by sending a e-mail to:
+ * chigang@huawei.com
+ * meimei@huawei.com
+
+Following information should be provided in the request:
+
+ * subject: opnfv_huawei_access
+ * Full name
+ * e-mail
+ * Phone
+ * Organization
+ * OPNFV Contributor/Committer name :
+ * OPNFV Project(s) Association:
+ * LF ID:
+ * Recommended by:
+ * PGP public key (preferably registered with a PGP PKI server)
+ * SSH public key
+
+Granting access normally takes 3-5 business days.
+
+Detailed access descriptions will be provided with your access grant e-mail.
+
+Lab Documentation
+-----------------
+
+
+Lab Topology
+------------
+
+Below you'll find a topological view of the hosting set-up,you can get more
+detailed information from the individual POD.
+
+.. image:: ./huawei-lab-topology.png
+ :height: 566
+ :width: 1061
+ :alt: OPNFV
+ :align: left
+
+|
+|
+
+Figure 1: Huawei lab OPNFV hosting environment overview
diff --git a/docs/labs/huawei-us-lab/net.png b/docs/labs/huawei-us-lab/net.png
new file mode 100644
index 0000000..9a12c82
--- /dev/null
+++ b/docs/labs/huawei-us-lab/net.png
Binary files differ
diff --git a/docs/labs/huawei-us-lab/pod_specification.rst b/docs/labs/huawei-us-lab/pod_specification.rst
new file mode 100644
index 0000000..5356515
--- /dev/null
+++ b/docs/labs/huawei-us-lab/pod_specification.rst
@@ -0,0 +1,124 @@
+=========================
+Huawei PODs Specification
+=========================
+
+NOTE: Illustrated by the example of huawei-pod1&huawei-virtual1.
+
+huawei-pod1
+===========
+
+Introduction
+------------
+
+This is a bare metal deployment pod deployed by compass installer
+
+Hardware
+^^^^^^^^
+
+* the pod1 consist of 6 Rack servers, the following is detail
+
++------------+-----------------------+---------+--------+-----------------------------+
+| Hostname | CPU | Storage | Memory | ipmi Mac &ip |
++------------+-----------------------+---------+--------+-----------------------------+
+| jumpserver | Intel(R) Xeon(R) CPU | 1.8TB | 31G | |
+| | X5650 @ 2.67GHz | | | |
++------------+-----------------------+---------+--------+-----------------------------+
+| Host1 | Intel(R) Xeon(R) CPU | 4.2TB | 188G | eth3:Mac F8:4A:BF:55:A2:8E |
+| controller | E5-2690 @ 2.90GHz | | | ip 172.16.130.26 |
++------------+-----------------------+---------+--------+-----------------------------+
+| Host2 | Intel(R) Xeon(R) CPU | 6TB | 188G | eth3:Mac D8:49:0B:DA:5A:B8 |
+| controller | E5-2670@ 2.60GHz | | | ip 172.16.130.27 |
++------------+-----------------------+---------+--------+-----------------------------+
+| Host3 | Intel(R) Xeon(R) CPU | 8.4TB | 188G | eth3:Mac 78:D7:52:A0:B1:9D |
+| controller | E5-2670@ 2.60GHz | | | ip 172.16.130.29 |
++------------+-----------------------+---------+--------+-----------------------------+
+| Host4 | Intel(R) Xeon(R) CPU | 7.2TB | 188G | eth3:Mac D8:49:0B:DA:5B:5E |
+| compute | E5-2670@ 2.60GHz | | | ip 172.16.130.30 |
++------------+-----------------------+---------+--------+-----------------------------+
+| Host5 | Intel(R) Xeon(R) CPU | 4.8TB | 188G | eth3:Mac D8:49:0B:DA:56:86 |
+| compute | E5-2670@ 2.60GHz | | | ip 172.16.130.31 |
++------------+-----------------------+---------+--------+-----------------------------+
+
+
+* 1 Huawei S9300 10G switch for storage, management and public traffic - 2x10GE to
+ each server.
+* 1 Huawei S5300 1G switch for installing and Lights+out management traffic - 2x1GE to
+ each server.
+* 1 VPN concentrator for remote access and management.
+* 1 Huawei firewall and router for public network secure access.
+
+
+huawei-pod1 Topology
+--------------------
+
+.. image:: ./huawei-lab-pod.png
+ :height: 950
+ :width: 900
+ :alt: OPNFV
+ :align: left
+
+|
+|
+
+Figure 1: Huawei lab pod1 topology
+
+
+
+huawei-pod1 Network
+-------------------
+Below you'll find a topological view of the huawei-Pod1 set-up:
+
+
+.. image:: ./net.png
+ :height: 764
+ :width: 633
+ :alt: OPNFV
+ :align: left
+
+|
+|
+
+Figure 2: Full Pod network configuration
+
+
+
+huawei-virtual1
+===============
+
+Introduction
+------------
+
+This is a virtual deployment POD deployed by compass installer
+
+Hardware
+^^^^^^^^
+virtual pod consist of one standalone server
+
++-----------+----------------------+
+| name | huawei-virtual1 |
++-----------+----------------------+
+| CPU | Intel(R) Xeon(R) CPU |
+| | E5-2680 v3 @ 2.50GHz |
++-----------+----------------------+
+| Memory | 251G |
++-----------+----------------------+
+| Storage | 4TB |
++-----------+----------------------+
+| IP | 192.168.107.2 |
++-----------+----------------------+
+
+Network
+-------
+Below you'll find a topological view of the huawei-virtual1 Pod set-up:
+
+.. image:: ./huawei-lab-virtual.png
+ :height: 950
+ :width: 900
+ :alt: OPNFV
+ :align: left
+
+|
+|
+
+Figure 3: virtual deployment pod network configuration
+
diff --git a/docs/labs/images/Dell_Overview.jpg b/docs/labs/images/Dell_Overview.jpg
new file mode 100644
index 0000000..131765d
--- /dev/null
+++ b/docs/labs/images/Dell_Overview.jpg
Binary files differ
diff --git a/docs/labs/images/Dell_POD1.jpg b/docs/labs/images/Dell_POD1.jpg
new file mode 100644
index 0000000..9afe340
--- /dev/null
+++ b/docs/labs/images/Dell_POD1.jpg
Binary files differ
diff --git a/docs/labs/images/Dell_POD2.jpg b/docs/labs/images/Dell_POD2.jpg
new file mode 100644
index 0000000..70d76e9
--- /dev/null
+++ b/docs/labs/images/Dell_POD2.jpg
Binary files differ
diff --git a/docs/labs/images/Dell_POD3.jpg b/docs/labs/images/Dell_POD3.jpg
new file mode 100644
index 0000000..c85a22b
--- /dev/null
+++ b/docs/labs/images/Dell_POD3.jpg
Binary files differ
diff --git a/docs/labs/images/orange_paris_pod1.jpg b/docs/labs/images/orange_paris_pod1.jpg
new file mode 100644
index 0000000..8e6427f
--- /dev/null
+++ b/docs/labs/images/orange_paris_pod1.jpg
Binary files differ
diff --git a/docs/labs/images/orange_pod2.png b/docs/labs/images/orange_pod2.png
new file mode 100644
index 0000000..0569328
--- /dev/null
+++ b/docs/labs/images/orange_pod2.png
Binary files differ
diff --git a/docs/labs/images/spirent_vptc-public-drawing.png b/docs/labs/images/spirent_vptc-public-drawing.png
new file mode 100644
index 0000000..174ac45
--- /dev/null
+++ b/docs/labs/images/spirent_vptc-public-drawing.png
Binary files differ
diff --git a/docs/labs/index.rst b/docs/labs/index.rst
new file mode 100755
index 0000000..9c000f9
--- /dev/null
+++ b/docs/labs/index.rst
@@ -0,0 +1,21 @@
+.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+.. http://creativecommons.org/licenses/by/4.0
+.. (c) 2016 OPNFV.
+
+
+=====================
+PHAROS Community Labs
+=====================
+
+
+.. toctree::
+ :maxdepth: 2
+
+ ./Dell.rst
+ ./ericsson/index.rst
+ ./huawei-us-lab/index.rst
+ ./ool/index.rst
+ ./orange-paris-lab/index.rst
+ ./orange-lannion-lab/index.rst
+ ./spirent.rst
+ ./zte-sh-lab/index.rst
diff --git a/docs/labs/ool/images/ool-testlab.png b/docs/labs/ool/images/ool-testlab.png
new file mode 100644
index 0000000..d0ac935
--- /dev/null
+++ b/docs/labs/ool/images/ool-testlab.png
Binary files differ
diff --git a/docs/labs/ool/index.rst b/docs/labs/ool/index.rst
new file mode 100644
index 0000000..6be3f1b
--- /dev/null
+++ b/docs/labs/ool/index.rst
@@ -0,0 +1,16 @@
+.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+.. http://creativecommons.org/licenses/by/4.0
+.. (c) 2016 OPNFV.
+
+.. Top level of Pharos templates and configuration files
+
+*****************
+OOL OPNFV Testbed
+*****************
+
+.. toctree::
+
+ ./lab_description.rst
+ ./pod1_description.rst
+ ./virtual1_description.rst
+ ./inventory.rst
diff --git a/docs/labs/ool/inventory.rst b/docs/labs/ool/inventory.rst
new file mode 100644
index 0000000..78368c4
--- /dev/null
+++ b/docs/labs/ool/inventory.rst
@@ -0,0 +1,11 @@
+.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+.. http://creativecommons.org/licenses/by/4.0
+.. (c) 2016 OPNFV.
+
+******************
+OOL Inventory File
+******************
+
+.. code-block:: yaml
+
+ import pod1_inventory.yaml
diff --git a/docs/labs/ool/lab_description.rst b/docs/labs/ool/lab_description.rst
new file mode 100644
index 0000000..72e2bd8
--- /dev/null
+++ b/docs/labs/ool/lab_description.rst
@@ -0,0 +1,72 @@
+.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+.. http://creativecommons.org/licenses/by/4.0
+.. (c) 2016 OPNFV.
+
+**********************
+Lab: OOL OPNFV Testbed
+**********************
+
+Introduction
+------------
+
+`Okinawa Open Laboratory (OOL)`_ provides the following facilities for OPNFV
+testing. The testlab is now located only at Okinwa in Japan.
+
+.. _Okinawa Open Laboratory (OOL): http://www.okinawaopenlabs.org/en/
+
+Lab Resources
+-------------
+
++--------------+--------------+-----------------+----------------------+-----------+-----------+-------+
+| POD Name | Project(s) | Project Lead(s) | Email(s) | POD Role | Status | Notes |
++--------------+--------------+-----------------+----------------------+-----------+-----------+-------+
+| `ool-pod1`_ | | | | CI stable | Available | |
++--------------+--------------+-----------------+----------------------+-----------+-----------+-------+
+| ool-virtual1 | Doctor | Ryota Mibu | r-mibu@cq.jp.nec.com | CI review | Assigned | |
++--------------+--------------+-----------------+----------------------+-----------+-----------+-------+
+
+.. _ool-pod1: https://build.opnfv.org/ci/computer/ool-pod1/
+
+Acceptable Usage Policy
+-----------------------
+
+These resources provided to OPNFV are free to use by any OPNFV contributor or
+committer for the purpose of OPNFV approved activities by permission of the
+operator, but shall be used for CI, infra setup/configuration and
+troubleshooting purposes.
+
+Remote Access Infrastructure
+----------------------------
+
+OOL provide VPN(OpenVPN) to connect this testlab.
+
+Remote Access Procedure
+-----------------------
+
+Access to this environment can be granted by sending a e-mail to: TBD
+
+subject: opnfv_access_ool
+
+Following information should be provided in the request:
+
+* Full name
+* e-mail
+* Phone
+* Organization
+* Resources required
+* How long is access needed
+* PGP public key
+* SSH public key
+
+Granting access normally takes 2-3 business days.
+
+Detailed access descriptions will be provided with your access grant e-mail.
+
+Lab Documentation
+-----------------
+
+Lab Topology
+------------
+
+.. image:: images/ool-testlab.png
+ :alt: not found
diff --git a/docs/labs/ool/pod1_description.rst b/docs/labs/ool/pod1_description.rst
new file mode 100644
index 0000000..1640070
--- /dev/null
+++ b/docs/labs/ool/pod1_description.rst
@@ -0,0 +1,185 @@
+.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+.. http://creativecommons.org/licenses/by/4.0
+.. (c) 2016 OPNFV.
+
+*************
+POD: ool-pod1
+*************
+
+Introduction
+------------
+
+This is a physical POD deployed by Fuel installer (Brahmputra).
+
+Additional Requirements
+-----------------------
+
+.. _ool-pod1-spec:
+
+Server Specifications
+---------------------
+
+**Jump Host**
+
++------------+------------+--------------+---------------+--------------+--------+---------------------+
+| Hostname | Vendor | Model | Serial Number | CPUs | Memory | Storage |
++============+============+==============+===============+==============+========+=====================+
+| OPNFV-jump | SuperMicro | SYS-5018R-WR | - | E5-2630v3 x1 | 32 GB | SATA 7.2krpm 2TB x1 |
++------------+------------+--------------+---------------+--------------+--------+---------------------+
+
++------------+---------------------+--------------------+-------------------+------------+
+| | Lights-out network | 1GbE: NIC#/IP | 10GbE: NIC#/IP | |
+| Hostname | (IPMI): IP/MAC, U/P | MAC/VLAN/Network | MAC/VLAN/Network | Notes |
++============+=====================+====================+===================+============+
+| OPNFV-jump | - | IF0: | | NIC Model: |
+| | | 0c:c4:7a:6c:a2:b2 | | Intel I350 |
+| | | VLAN untagged | | |
+| | | Public | | |
+| | | IF1: | | |
+| | | 0c:c4:7a:6c:a2:b3 | | |
+| | | VLAN 10{2-5} | | |
+| | | Admin/Mgmt/Private | | |
++------------+---------------------+--------------------+-------------------+------------+
+
+**Compute Nodes**
+
++----------+------------+-----------+---------------+--------------+--------+---------------------+
+| Hostname | Vendor | Model | Serial Number | CPUs | Memory | Storage |
++==========+============+===========+===============+==============+========+=====================+
+| node-9 | FUJITSU | RX2530 M1 | - | E5-2630v3 x1 | 32 GB | SATA 7.2krpm 2TB x2 |
+| | | | | | | SSD 100GB x1 |
++----------+------------+-----------+---------------+--------------+--------+---------------------+
+| node-10 | FUJITSU | RX2530 M1 | - | E5-2630v3 x1 | 32 GB | SATA 7.2krpm 2TB x2 |
+| | | | | | | SSD 100GB x1 |
++----------+------------+-----------+---------------+--------------+--------+---------------------+
+| node-11 | FUJITSU | RX2530 M1 | - | E5-2630v3 x1 | 32 GB | SATA 7.2krpm 2TB x2 |
+| | | | | | | SSD 100GB x1 |
++----------+------------+-----------+---------------+--------------+--------+---------------------+
+| node-12 | FUJITSU | RX2530 M1 | - | E5-2630v3 x1 | 32 GB | SATA 7.2krpm 2TB x2 |
+| | | | | | | SSD 100GB x1 |
++----------+------------+-----------+---------------+--------------+--------+---------------------+
+
++----------+---------------------+-------------------+-------------------+-----------------------+
+| | Lights-out network | 1GbE: NIC#/IP | 10GbE: NIC#/IP | |
+| Hostname | (IPMI): IP/MAC, U/P | MAC/VLAN/Network | MAC/VLAN/Network | Notes |
++==========+=====================+===================+===================+=======================+
+| node-9 | - | IF0: | IF4: | NIC Models: |
+| | | 90:1b:0e:6b:e8:a8 | 90:1b:0e:6d:09:71 | (1GbE) Emulex Skyhawk |
+| | | VLAN untagged | VLAN untagged | (10GbE) Intel 82599E |
+| | | Admin | Storage | |
+| | | IF1: | IF5: | |
+| | | 90:1b:0e:6b:e8:a9 | 90:1b:0e:6d:09:72 | |
+| | | VLAN untagged | VLAN untagged | |
+| | | Mgmt | Storage | |
+| | | IF2: | | |
+| | | 90:1b:0e:6b:e8:aa | | |
+| | | VLAN untagged | | |
+| | | Public | | |
+| | | IF3: | | |
+| | | 90:1b:0e:6b:e8:ab | | |
+| | | VLAN untagged | | |
+| | | Private | | |
++----------+---------------------+-------------------+-------------------+-----------------------+
+| node-10 | - | IF0: | IF4: | NIC Models: |
+| | | 90:1b:0e:6b:e3:00 | 90:1b:0e:6d:09:5f | (1GbE) Emulex Skyhawk |
+| | | VLAN untagged | VLAN untagged | (10GbE) Intel 82599E |
+| | | Admin | Storage | |
+| | | IF1: | IF5: | |
+| | | 90:1b:0e:6b:e3:01 | 90:1b:0e:6d:09:60 | |
+| | | VLAN untagged | VLAN untagged | |
+| | | Mgmt | Storage | |
+| | | IF2: | | |
+| | | 90:1b:0e:6b:e3:02 | | |
+| | | VLAN untagged | | |
+| | | Public | | |
+| | | IF3: | | |
+| | | 90:1b:0e:6b:e3:03 | | |
+| | | VLAN untagged | | |
+| | | Private | | |
++----------+---------------------+-------------------+-------------------+-----------------------+
+| node-11 | - | IF0: | IF4: | NIC Models: |
+| | | 90:1b:0e:6b:e5:b4 | 90:1b:0e:6d:09:6f | (1GbE) Emulex Skyhawk |
+| | | VLAN untagged | VLAN untagged | (10GbE) Intel 82599E |
+| | | Admin | Storage | |
+| | | IF1: | IF5: | |
+| | | 90:1b:0e:6b:e5:b5 | 90:1b:0e:6d:09:70 | |
+| | | VLAN untagged | VLAN untagged | |
+| | | Mgmt | Storage | |
+| | | IF2: | | |
+| | | 90:1b:0e:6b:e5:b6 | | |
+| | | VLAN untagged | | |
+| | | Public | | |
+| | | IF3: | | |
+| | | 90:1b:0e:6b:e5:b7 | | |
+| | | VLAN untagged | | |
+| | | Private | | |
++----------+---------------------+-------------------+-------------------+-----------------------+
+| node-12 | - | IF0: | IF4: | NIC Models: |
+| | | 90:1b:0e:6b:e2:bc | 90:1b:0e:6d:08:31 | (1GbE) Emulex Skyhawk |
+| | | VLAN untagged | VLAN untagged | (10GbE) Intel 82599E |
+| | | Admin | Storage | |
+| | | IF1: | IF5: | |
+| | | 90:1b:0e:6b:e2:bd | 90:1b:0e:6d:08:32 | |
+| | | VLAN untagged | VLAN untagged | |
+| | | Mgmt | Storage | |
+| | | IF2: | | |
+| | | 90:1b:0e:6b:e2:be | | |
+| | | VLAN untagged | | |
+| | | Public | | |
+| | | IF3: | | |
+| | | 90:1b:0e:6b:e2:bf | | |
+| | | VLAN untagged | | |
+| | | Private | | |
++----------+---------------------+-------------------+-------------------+-----------------------+
+
+**Switches**
+
++--------------------------------------------+--------------------+
+| Node | Hardware |
++============================================+====================+
+| Switch 1 (for each network except storage) | Juniper EX3300-24T |
++--------------------------------------------+--------------------+
+| Switch 2 (for storage) | Mellanox SX1024 |
++--------------------------------------------+--------------------+
+
+**Subnet Allocations**
+
++----------------+---------------+---------------+----------------+----------+
+| Network name | Address | Mask | Gateway | VLAN id |
++================+===============+===============+================+==========+
+| Public | 192.168.25.0 | 255.255.255.0 | 192.168.25.254 | 103 |
++----------------+---------------+---------------+----------------+----------+
+| Fuel Admin | 192.168.103.0 | 255.255.255.0 | 192.168.103.1 | 103 |
++----------------+---------------+---------------+----------------+----------+
+| Fuel Mangement | 192.168.104.0 | 255.255.255.0 | 192.168.104.1 | 104 |
++----------------+---------------+---------------+----------------+----------+
+| Fuel Public | 192.168.105.0 | 255.255.255.0 | 192.168.105.1 | 105 |
++----------------+---------------+---------------+----------------+----------+
+| Fuel Private | 192.168.106.0 | 255.255.255.0 | | Untagged |
++----------------+---------------+---------------+----------------+----------+
+| Fuel Storage | 192.168.107.0 | 255.255.255.0 | | Untagged |
++----------------+---------------+---------------+----------------+----------+
+
+VPN Users
+---------
+
++------------+----------------------+---------+--------------+-------+
+| Name | Email | Project | Role | Notes |
++------------+----------------------+---------+--------------+-------+
+| Ryota Mibu | r-mibu@cq.jp.nec.com | Doctor | Project Lead | |
++------------+----------------------+---------+--------------+-------+
+
+Firewall Rules
+--------------
+
++------------+------------+-------+
+| Port(s) | Service | Notes |
++------------+------------+-------+
+| | | |
++------------+------------+-------+
+
+POD Topology
+------------
+
+.. image:: images/ool-testlab.png
+ :alt: not found
diff --git a/docs/labs/ool/pod1_inventory.yaml b/docs/labs/ool/pod1_inventory.yaml
new file mode 100644
index 0000000..4f46b95
--- /dev/null
+++ b/docs/labs/ool/pod1_inventory.yaml
@@ -0,0 +1,37 @@
+nodes:
+ - name: node-9
+ tags: control
+ arch: "x86_64"
+ mac_address: "90:1b:0e:6b:e8:a8"
+ power:
+ type: ipmi
+ address: n/a
+ user: n/a
+ pass: n/a
+ - name: node-10
+ tags: control
+ arch: "x86_64"
+ mac_address: "90:1b:0e:6b:e3:00"
+ power:
+ type: ipmi
+ address: n/a
+ user: n/a
+ pass: n/a
+ - name: node-11
+ tags: control
+ arch: "x86_64"
+ mac_address: "90:1b:0e:6b:e5:b4"
+ power:
+ type: ipmi
+ address: n/a
+ user: n/a
+ pass: n/a
+ - name: node-12
+ tags: compute
+ arch: "x86_64"
+ mac_address: "90:1b:0e:6b:e2:bc"
+ power:
+ type: ipmi
+ address: n/a
+ user: n/a
+ pass: n/a
diff --git a/docs/labs/ool/virtual1_description.rst b/docs/labs/ool/virtual1_description.rst
new file mode 100644
index 0000000..089bd92
--- /dev/null
+++ b/docs/labs/ool/virtual1_description.rst
@@ -0,0 +1,102 @@
+.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+.. http://creativecommons.org/licenses/by/4.0
+.. (c) 2016 OPNFV.
+
+*****************
+POD: ool-virtual1
+*****************
+
+Introduction
+------------
+
+This is a virtual POD deployed by Apex installer (master/Colorado).
+This POD is built on one machine placed next to machines of the physical POD
+(ool-pod1). Controller and compute nodes are VM.
+
+Additional Requirements
+-----------------------
+
+Server Specifications
+---------------------
+
+**Jump Host**
+
+See :ref:`ool-pod1-spec`.
+
+**Compute Nodes**
+
++------------------+--------------+----------------------------+
+| Machine | Hostname | Hardware |
++==================+==============+============================+
+| Virtual POD | ool-virtual1 | FUJITSU PRIMERGY RX2530 M1 |
++------------------+--------------+----------------------------+
+
++------------------------------------+
+| FUJITSU PRIMERGY RX2530 M1 |
++============+==================+====+
+| CPU | Xeon E5-2630v3 | x1 |
++------------+------------------+----+
+| RAM | 32GB | - |
++------------+------------------+----+
+| HDD | SATA 7.2krpm 2TB | x2 |
++------------+------------------+----+
+| SSD | 100GB | x1 |
++------------+------------------+----+
+| 1000BASE-T | Emulex Skyhawk | x2 |
++------------+------------------+----+
+| 10GBASE-T | Intel 82599E | x2 |
++------------+------------------+----+
+| BMC | - | x1 |
++------------+------------------+----+
+
++--------------+-----+------+-------------------+----------+--------------------+
+| Hostname | IF# | BW | MAC | IF in OS | Role |
++==============+=====+======+===================+==========+====================+
+| ool-virtual1 | IF0 | 1Gb | 90:1b:0e:6b:e5:d8 | eno1 | Admin |
++--------------+-----+------+-------------------+----------+--------------------+
+| ool-virtual1 | IF1 | 1Gb | 90:1b:0e:6b:e5:d9 | eno2 | Mgmt |
++--------------+-----+------+-------------------+----------+--------------------+
+| ool-virtual1 | IF2 | 1Gb | 90:1b:0e:6b:e5:da | eno3 | Public |
++--------------+-----+------+-------------------+----------+--------------------+
+| ool-virtual1 | IF3 | 1Gb | 90:1b:0e:6b:e5:db | eno4 | Private |
++--------------+-----+------+-------------------+----------+--------------------+
+| ool-virtual1 | IF4 | 1Gb | 90:1b:0e:6d:08:f5 | ens2f0 | Storage |
++--------------+-----+------+-------------------+----------+--------------------+
+| ool-virtual1 | IF5 | 1Gb | 90:1b:0e:6d:08:f6 | ens2f1 | Storage |
++--------------+-----+------+-------------------+----------+--------------------+
+
+**Subnet Allocations in the host**
+
++--------------+---------------+---------------+----------------+----------+
+| Network name | Address | Mask | Gateway | VLAN id |
++==============+===============+===============+================+==========+
+| Admin | 192.0.2.0 | 255.255.255.0 | 192.168.103.1 | Untagged |
++--------------+---------------+---------------+----------------+----------+
+| Public | 192.168.37.0 | 255.255.255.0 | 192.168.105.1 | Untagged |
++--------------+---------------+---------------+----------------+----------+
+| Private | 11.0.0.0 | 255.255.255.0 | | Untagged |
++--------------+---------------+---------------+----------------+----------+
+| Storage | 12.0.0.0 | 255.255.255.0 | | Untagged |
++--------------+---------------+---------------+----------------+----------+
+
+VPN Users
+---------
+
++------------+----------------------+---------+--------------+-------+
+| Name | Email | Project | Role | Notes |
++------------+----------------------+---------+--------------+-------+
+| Ryota Mibu | r-mibu@cq.jp.nec.com | Doctor | Project Lead | |
++------------+----------------------+---------+--------------+-------+
+
+Firewall Rules
+--------------
+
++------------+------------+-------+
+| Port(s) | Service | Notes |
++------------+------------+-------+
+| | | |
++------------+------------+-------+
+
+POD Topology
+------------
+
diff --git a/docs/labs/orange-lannion-lab/index.rst b/docs/labs/orange-lannion-lab/index.rst
new file mode 100644
index 0000000..e8149d4
--- /dev/null
+++ b/docs/labs/orange-lannion-lab/index.rst
@@ -0,0 +1,216 @@
+Orange OPNFV Testlab
+==================================================
+
+Overview
+------------------
+
+Orange Labs is hosting an OPNFV testlab at its Lannion facility. The testlab would host baremetal
+servers for the use of OPNFV community as part of the OPNFV Pharos Project
+
+
+The Orange Testlab consists of PODs
+ * POD2 for Joid
+
+POD2 consists of 8 servers
+ * 1 Jump Server
+ * 4 Servers for Control Nodes
+ * 3 Servers for Compute Nodes
+
+
+
+Hardware details
+-----------------
+
+All the servers within the two PODs reside within a two chassis and have the
+following specifications:
+
+POD2-Joid
+^^^^^^^^^^^^
+
++---------+--------------------+-------+-----------------------------------+------------------------------------------+-----+-------+-------------------------------------------+-----+-------+
+| Hostname| Model |Memory |Storage | Processor 1 |Cores|Threads| Processor 2 |Cores|Threads|
++---------+--------------------+-------+-----------------------------------+------------------------------------------+-----+-------+-------------------------------------------+-----+-------+
+| Node1 |ProLiant DL380 Gen9 |128 GB |2xIntel SSD S3500 480GB+1 SAS 300GB|Intel(R) Xeon(R) CPU E5-2699 v3 @ 2.30GHz | 18 | 36 | Intel(R) Xeon(R) CPU E5-2699 v3 @ 2.30GHz | 18 | 36 |
++---------+--------------------+-------+-----------------------------------+------------------------------------------+-----+-------+-------------------------------------------+-----+-------+
+| Node2 |ProLiant DL380 Gen9 |128 GB |2xIntel SSD S3500 480GB+ SAS 300GB|Intel(R) Xeon(R) CPU E5-2699 v3 @ 2.30GHz | 18 | 36 | Intel(R) Xeon(R) CPU E5-2699 v3 @ 2.30GHz | 18 | 36 |
++---------+--------------------+-------+-----------------------------------+------------------------------------------+-----+-------+-------------------------------------------+-----+-------+
+| Node3 |ProLiant DL380 Gen9 |128 GB |2xIntel SSD S3500 480GB+1 SAS 300GB|Intel(R) Xeon(R) CPU E5-2699 v3 @ 2.30GHz | 18 | 36 | Intel(R) Xeon(R) CPU E5-2699 v3 @ 2.30GHz | 18 | 36 |
++---------+--------------------+-------+-----------------------------------+------------------------------------------+-----+-------+-------------------------------------------+-----+-------+
+| Node4 |ProLiant DL380 Gen9 |128 GB |2xIntel SSD S3500 480GB+1 SAS 300GB|Intel(R) Xeon(R) CPU E5-2609 v3 @ 1.90GHz | 6 | 6 | Intel(R) Xeon(R) CPU E5-2609 v3 @ 1.90GHz | 6 | 6 |
++---------+--------------------+-------+-----------------------------------+------------------------------------------+-----+-------+-------------------------------------------+-----+-------+
+| Node5 |ProLiant DL360 Gen9 |32 GB |2xSAS 300GB |Intel(R) Xeon(R) CPU E5-2683 v3 @ 2.00GHz | 14 | 28 | N/A | | |
++---------+--------------------+-------+-----------------------------------+------------------------------------------+-----+-------+-------------------------------------------+-----+-------+
+| Node6 |ProLiant DL360 Gen9 |32 GB |2xSAS 300GB |Intel(R) Xeon(R) CPU E5-2683 v3 @ 2.00GHz | 14 | 28 | N/A | | |
++---------+--------------------+-------+-----------------------------------+------------------------------------------+-----+-------+-------------------------------------------+-----+-------+
+| Node7 |ProLiant DL360 Gen9 |32 GB |2xSAS 300GB |Intel(R) Xeon(R) CPU E5-2683 v3 @ 2.00GHz | 14 | 28 | N/A | | |
++---------+--------------------+-------+-----------------------------------+------------------------------------------+-----+-------+-------------------------------------------+-----+-------+
+
+Software
+---------
+
+The Jump servers in the Testlab are pre-provisioned with the following softwares:
+
+ * Joid-Jump Server:
+
+ 1. OS: Ubuntu 14.04
+
+
+
+Networks
+----------
+
+POD2-Joid
+^^^^^^^^^^^^
+
+.. image:: ../images/orange_pod2.png
+ :height: 721
+ :width: 785
+ :alt: POD2-Joid Overview
+ :align: left
+
+
+
++--------------+--------------------------------------+-------+-------------------+-----+--------+
+| Hostname | NIC Model | Ports | MAC | BW | Roles |
++--------------+--------------------------------------+-------+-------------------+-----+--------+
+| Node1 | 1, Broadcom NetXtreme BCM5719 | eth0 | 38:63:bb:3f:bc:c8 | 10G | Admin |
++--------------+--------------------------------------+-------+-------------------+-----+--------+
+| | | eth1 | 38:63:bb:3f:bc:c9 | 10G | Public|
++--------------+--------------------------------------+-------+-------------------+-----+--------+
+| | 2, Broadcom NetXtreme BCM5719 | eth2 | 38:63:bb:3f:bc:ca | 10G | N/A |
++--------------+--------------------------------------+-------+-------------------+-----+--------+
+| | | eth3 | 38:63:bb:3f:bc:cb | 10G | N/A |
++--------------+--------------------------------------+-------+-------------------+-----+--------+
+| | 3, Intel X540-AT2 DPDK | eth4 | a0:36:9f:4e:88:5c | 10G | Storage|
++--------------+--------------------------------------+-------+-------------------+-----+--------+
+| | | eth5 | a0:36:9f:4e:88:5e | 10G | VM |
++--------------+--------------------------------------+-------+-------------------+-----+--------+
+| Node2 | 1, Broadcom NetXtreme BCM5719 | eth0 | 38:63:bb:44:34:84 | 10G | Admin |
++--------------+--------------------------------------+-------+-------------------+-----+--------+
+| | | eth1 | 38:63:bb:44:34:85 | 10G | Public|
++--------------+--------------------------------------+-------+-------------------+-----+--------+
+| | 2, Broadcom NetXtreme BCM5719 | eth2 | 38:63:bb:44:34:86 | 10G | N/A |
++--------------+--------------------------------------+-------+-------------------+-----+--------+
+| | | eth3 | 38:63:bb:44:34:87 | 10G | N/A |
++--------------+--------------------------------------+-------+-------------------+-----+--------+
+| | 3, Intel X540-AT2 DPDK | eth4 | a0:36:9f:4e:8b:0c | 10G | Storage|
++--------------+--------------------------------------+-------+-------------------+-----+--------+
+| | | eth5 | a0:36:9f:4e:8b:0e | 10G | VM |
++--------------+--------------------------------------+-------+-------------------+-----+--------+
+| Node3 | 1, Broadcom NetXtreme BCM5719 | eth0 | 38:63:bb:3f:1d:8c | 10G | Admin |
++--------------+--------------------------------------+-------+-------------------+-----+--------+
+| | | eth1 | 38:63:bb:3f:1d:8d | 10G | Public|
++--------------+--------------------------------------+-------+-------------------+-----+--------+
+| | 1, Broadcom NetXtreme BCM5719 | eth2 | 38:63:bb:3f:1d:8e | 10G | N/A |
++--------------+--------------------------------------+-------+-------------------+-----+--------+
+| | | eth3 | 38:63:bb:3f:1d:8f | 10G | N/A |
++--------------+--------------------------------------+-------+-------------------+-----+--------+
+| | 3, Intel X540-AT2 DPDK | eth4 | a0:36:9f:4e:88:38 | 10G | Storage|
++--------------+--------------------------------------+-------+-------------------+-----+--------+
+| | | eth5 | a0:36:9f:4e:88:3a | 10G | VM |
++--------------+--------------------------------------+-------+-------------------+-----+--------+
+| Node4 | 1, Broadcom NetXtreme BCM5719 | eth0 | 38:63:bb:3f:2d:a8 | 10G | Admin |
++--------------+--------------------------------------+-------+-------------------+-----+--------+
+| | | eth1 | 38:63:bb:3f:2d:a9 | 10G | Public|
++--------------+--------------------------------------+-------+-------------------+-----+--------+
+| | 1, Broadcom NetXtreme BCM5719 | eth2 | 38:63:bb:3f:2d:aa | 10G | N/A |
++--------------+--------------------------------------+-------+-------------------+-----+--------+
+| | | eth3 | 38:63:bb:3f:2d:ab | 10G | N/A |
++--------------+--------------------------------------+-------+-------------------+-----+--------+
+| | 3, Intel X540-AT2 DPDK | eth4 | a0:36:9f:4e:8b:18 | 10G | Storage|
++--------------+--------------------------------------+-------+-------------------+-----+--------+
+| | | eth5 | a0:36:9f:4e:8b:1a | 10G | VM |
++--------------+--------------------------------------+-------+-------------------+-----+--------+
+| Node5 | 1, Broadcom NetXtreme BCM5719 | eth0 | 94:57:a5:52:c9:48 | 10G | Admin |
++--------------+--------------------------------------+-------+-------------------+-----+--------+
+| | | eth1 | 94:57:a5:52:c9:49 | 10G | Public|
++--------------+--------------------------------------+-------+-------------------+-----+--------+
+| | 1, Broadcom NetXtreme BCM5719 | eth2 | 94:57:a5:52:c9:4a | 10G | Storage|
++--------------+--------------------------------------+-------+-------------------+-----+--------+
+| | | eth3 | 94:57:a5:52:c9:4b | 10G | VM |
++--------------+--------------------------------------+-------+-------------------+-----+--------+
+| Node6 | 1, Broadcom NetXtreme BCM5719 | eth0 | 94:57:a5:52:63:b0 | 10G | Admin |
++--------------+--------------------------------------+-------+-------------------+-----+--------+
+| | | eth1 | 94:57:a5:52:63:b1 | 10G | Public|
++--------------+--------------------------------------+-------+-------------------+-----+--------+
+| | 1, Broadcom NetXtreme BCM5719 | eth2 | 94:57:a5:52:63:b2 | 10G | Storage|
++--------------+--------------------------------------+-------+-------------------+-----+--------+
+| | | eth3 | 94:57:a5:52:63:b3 | 10G | VM |
++--------------+--------------------------------------+-------+-------------------+-----+--------+
+| Node7 | 1, Broadcom NetXtreme BCM5719 | eth0 | 94:57:a5:52:f1:80 | 10G | Admin |
++--------------+--------------------------------------+-------+-------------------+-----+--------+
+| | | eth1 | 94:57:a5:52:f1:81 | 10G | Public|
++--------------+--------------------------------------+-------+-------------------+-----+--------+
+| | 1, Broadcom NetXtreme BCM5719 | eth2 | 94:57:a5:52:f1:82 | 10G | Storage|
++--------------+--------------------------------------+-------+-------------------+-----+--------+
+| | | eth3 | 94:57:a5:52:f1:83 | 10G | VM |
++--------------+--------------------------------------+-------+-------------------+-----+--------+
+
+
+
+
+
+
+Subnet allocations Pod2
+^^^^^^^^^^^^^^^^^^^^^^^^
+
++-----------+--------------+------------------+--------------+----------+
+| Network | Address | Mask | Gateway | VLAN id |
++-----------+--------------+------------------+--------------+----------+
+|Admin | 192.168.2.0 | 255.255.255.0 | 192.168.2.1 | 200 |
++-----------+--------------+------------------+--------------+----------+
+|Public | 161.105.231.0| 255.255.255.192 | 161.105.231.1| 135 |
++-----------+--------------+------------------+--------------+----------+
+|Storage | 192.168.12.0 | 255.255.255.0 | 192.168.2.1 | 210 |
++-----------+--------------+------------------+--------------+----------+
+|VM | 192.168.22.0 | 255.255.255.0 | 192.168.22.1 | 230 |
++-----------+--------------+------------------+--------------+----------+
+
+
+ILO Pod2
+^^^^^^^^
+
+**POD2**
+
++-----------+--------------------+-------------------+-------------+-------------+
+| Hostname | Lights-out address | MAC | Username | Password |
++-----------+--------------------+-------------------+-------------+-------------+
+| Node1 | 192.168.2.11 | 38:63:bb:39:b2:2e |Administrator| pod2Admin |
++-----------+--------------------+-------------------+-------------+-------------+
+| Node2 | 192.168.2.12 | 14:58:d0:48:7b:7a |Administrator| pod2Admin |
++-----------+--------------------+-------------------+-------------+-------------+
+| Node3 | 192.168.2.13 | 38:63:bb:39:b2:86 |Administrator| pod2Admin |
++-----------+--------------------+-------------------+-------------+-------------+
+| Node4 | 192.168.2.14 | 38:63:bb:39:b2:40 |Administrator| pod2Admin |
++-----------+--------------------+-------------------+-------------+-------------+
+| Node5 | 192.168.2.15 | 94:57:a5:62:73:c2 |Administrator| pod2Admin |
++-----------+--------------------+-------------------+-------------+-------------+
+| Node6 | 192.168.2.16 | 94:57:a5:62:72:90 |Administrator| pod2Admin |
++-----------+--------------------+-------------------+-------------+-------------+
+| Node7 | 192.168.2.17 | 94:57:a5:62:f4:c6 |Administrator| pod2Admin |
++-----------+--------------------+-------------------+-------------+-------------+
+
+
+Remote access infrastructure
+-----------------------------
+
+The Orange OPNFV testlab is free to use for the OPNFV community.
+
+To access the Testlab, please contact bertrand.lelamer AT orange.com with
+the following details:
+* Name
+* Email
+* Designation
+* Organization
+* Purpose of using the lab
+* SSH public key
+
+
+
+*Accessing the Orange Lannion Testlab*
+--------------------------------------
+
+POD2 JumpServer
+^^^^^^^^^^^^^^^
+
+
diff --git a/docs/labs/orange-paris-lab/index.rst b/docs/labs/orange-paris-lab/index.rst
new file mode 100644
index 0000000..113d581
--- /dev/null
+++ b/docs/labs/orange-paris-lab/index.rst
@@ -0,0 +1,15 @@
+.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+.. http://creativecommons.org/licenses/by/4.0
+.. (c) 2016 OPNFV.
+
+.. Top level of Pharos templates and configuration files
+
+***********************************************
+Orange Paris Pharos Lab and Configuration Files
+***********************************************
+
+
+.. toctree::
+
+ ./orange_paris_lab_description.rst
+ ./orange_paris_pod1_description.rst
diff --git a/docs/labs/orange-paris-lab/orange_paris_lab_description.rst b/docs/labs/orange-paris-lab/orange_paris_lab_description.rst
new file mode 100644
index 0000000..5a25731
--- /dev/null
+++ b/docs/labs/orange-paris-lab/orange_paris_lab_description.rst
@@ -0,0 +1,75 @@
+.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+.. http://creativecommons.org/licenses/by/4.0
+
+**************************
+Lab Specification Template
+**************************
+
+Introduction
+------------
+
+Orange is hosting an OPNFV test lab at Chatillon (near Paris) facility. The test lab would host
+baremetal servers for the use of OPNFV community as part of the OPNFV Pharos Project.
+
+The Orange Paris lab consist of 1 POD
+ * POD for Fuel
+
+
+Lab Resources
+-------------
+
++-------------+------------+-----------------+----------+-----------+---------+-------+
+| POD Name | Project(s) | Project Lead(s) | Email(s) | POD Role | Status | Notes |
++-------------+------------+-----------------+----------+-----------+---------+-------+
+| opnfv-integ | | | | Dev/test | Active | |
++-------------+------------+-----------------+----------+-----------+---------+-------+
+
+* **POD Name:** Use consistent naming / numbering to avoid confusion. Hyperlinked to POD description.
+* **POD Role:** CI stable, CI latest, Dev/test, Stand-alone, Virtual, ...
+* **Status:** Assigned, Configuring, Active, Troubleshooting, Available, ...
+
+
+Acceptable Usage Policy
+-----------------------
+
+Define lab user policies and expectations
+
+
+Remote Access Infrastructure
+----------------------------
+
+The Orange Paris OPNFV test lab is free to use for the OPNFV community.
+
+A VPN is used to provide access to the Orange Paris Testlab.
+
+To access the Testlab, please contact Auboin Cyril (cyril.auboin@orange.com) with the following
+details:
+* Name
+* Organization
+* Purpose of using the labs
+* Dates start / end
+
+Processing the request can take 3-4 business days.
+
+
+Remote Access Procedure
+-----------------------
+
+Define lab process for requesting access to the lab (e.g. VPN guide, how to modify BIOS settings,
+etc.)
+
+
+Lab Documentation
+-----------------
+
+List lab specific documents here
+
+
+Lab Topology
+------------
+
+Provide a diagram showing the network topology of lab including lights-out network. Any security
+sensitive details should not be exposed publically. The following diagram is an example only.
+
+.. image:: ./images/orange_paris_pod1.jpg
+ :alt: Lab diagram not found
diff --git a/docs/labs/orange-paris-lab/orange_paris_pod1_description.rst b/docs/labs/orange-paris-lab/orange_paris_pod1_description.rst
new file mode 100644
index 0000000..75dcfdd
--- /dev/null
+++ b/docs/labs/orange-paris-lab/orange_paris_pod1_description.rst
@@ -0,0 +1,129 @@
+.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+.. http://creativecommons.org/licenses/by/4.0
+
+**************************
+POD Specification Template
+**************************
+
+Introduction
+------------
+
+Orange is hosting an OPNFV test lab at Chatillon (near Paris) facility. The test lab would host 4
+(1 controller and 3 computes) baremetal servers for the use of OPNFV community as part of the OPNFV
+Pharos Project.
+
+Version: Brahmaputra
+Installer: Fuel (with Ceph)
+
+Additional Requirements
+-----------------------
+
+Server Specifications
+---------------------
+
+**Switch**
+
++-----------+----------+---------+---------------+-------+--------+---------+---------------------+------------------+------------------+----------+
+| | | | | | | Local | Lights-out network | 1GbE: NIC#/IP | 10GbE: NIC#/IP | |
+| Hostname | Vendor | Model | Serial Number | CPUs | Memory | storage | (IPMI): IP/MAC, U/P | MAC/VLAN/Network | MAC/VLAN/Network | Notes |
++-----------+----------+---------+---------------+-------+--------+---------+---------------------+------------------+------------------+----------+
+| pod1- | JUNIPER | EX-4550 | 750-045407 | | | | 172.31.2.254 | | | 32 ports |
+| switch | | | | | | | CC:E1:7F:86:38:80 | | | |
+| | | | | | | | | | | |
++-----------+----------+---------+---------------+-------+--------+---------+---------------------+------------------+------------------+----------+
+
+**Jump Host**
+
++-----------+---------+----------+---------------+----------------+--------+-----------+---------------------+------------------+------------------+-------+
+| | | | | | | Local | Lights-out network | 1GbE: NIC#/IP | 10GbE: NIC#/IP | |
+| Hostname | Vendor | Model | Serial Number | CPUs | Memory | storage | (IPMI): IP/MAC, U/P | MAC/VLAN/Network | MAC/VLAN/Network | Notes |
++-----------+---------+----------+---------------+----------------+--------+-----------+---------------------+------------------+------------------+-------+
+| pod1- | DELL | Proliant | CZJ40901PV | Intel Xeon | 16 GB | 300GB SAS | | IF0: 172.31.13.5 | | |
+| jump-host | | DL 360e | | E5-2430 v2.2 | | 300GB SAS | | | | |
+| | | Gen8 | | 2,5Ghz 24 core | | | | | | |
++-----------+---------+----------+---------------+----------------+--------+-----------+---------------------+------------------+------------------+-------+
+
+**Firewall**
+
++-----------+---------+------------+---------------+-------------+--------+-----------+---------------------+--------------------+------------------+-------+
+| | | | | | | Local | Lights-out network | 1GbE: NIC#/IP | 10GbE: NIC#/IP | |
+| Hostname | Vendor | Model | Serial Number | CPUs | Memory | storage | (IPMI): IP/MAC, U/P | MAC/VLAN/Network | MAC/VLAN/Network | Notes |
++-----------+---------+------------+---------------+-------------+--------+-----------+---------------------+--------------------+------------------+-------+
+| pod1- | IBM | @Server | | Intel Xeon | 4 GB | 36GB SATA | | IF0: 161.105.211.2 | | |
+| firewall | | xSerie 336 | KKTVY4M | | | 36GB SATA | | | | |
+| | | | | | | | | | | |
++-----------+---------+------------+---------------+-------------+--------+-----------+---------------------+--------------------+------------------+-------+
+
+**Controller Node**
+
++------------+---------+-----------+---------------+---------------+--------+-----------+---------------------+------------------------+------------------+-------+
+| | | | | | | Local | Lights-out network | 1GbE: NIC#/IP | 10GbE: NIC#/IP | |
+| Hostname | Vendor | Model | Serial Number | CPUs | Memory | storage | (IPMI): IP/MAC, U/P | MAC/VLAN/Network | MAC/VLAN/Network | Notes |
++------------+---------+-----------+---------------+---------------+--------+-----------+---------------------+------------------------+------------------+-------+
+| pod1-ctrl1 | HP | Proliant | CZJ40901PT | Intel Xeon | 16GB | 300GB SAS | | IF0: 9C:B6:54:95:E4:74 | | |
+| | | DL 360e | | E5-2430 v2.2 | | 300GB SAS | | Admin | | |
+| | | Gen8 | | 2,5Ghz | | | | IF1: 9C:B6:54:95:E4:75 | | |
+| | | | | 24 core | | | | 18: Public | | |
+| | | | | | | | | 1500: Storage | | |
+| | | | | | | | | 17: Management | | |
+| | | | | | | | | 1502: Private | | |
++------------+---------+-----------+---------------+---------------+--------+-----------+---------------------+------------------------+------------------+-------+
+
+**Compute Nodes**
+
++------------+---------+-----------+---------------+---------------+----------+------------+---------------------+------------------------+------------------+-------+
+| | | | | | | Local | Lights-out network | 1GbE: NIC#/IP | 10GbE: NIC#/IP | |
+| Hostname | Vendor | Model | Serial Number | CPUs | Memory | storage | (IPMI): IP/MAC, U/P | MAC/VLAN/Network | MAC/VLAN/Network | Notes |
++------------+---------+-----------+---------------+---------------+----------+------------+---------------------+------------------------+------------------+-------+
+| pod1-node1 | DELL | R730 | 8F3J642 | Intel Xeon | 128GB | 250GB SATA | | IF0: EC:F4:BB:CB:62:9C | | |
+| | | | | E5-2603 v3 | (8x16GB) | 480GB SSD | | Admin | | |
+| | | | | 1,6Ghz | 1600Mhz | 480GB SSD | | IF1: EC:F4:BB:CB:62:9A | | |
+| | | | | 12 core | | | | 18: Public | | |
+| | | | | | | | | 1500: Storage | | |
+| | | | | | | | | 17: Management | | |
+| | | | | | | | | 1502: Private | | |
++------------+---------+-----------+---------------+---------------+----------+------------+---------------------+------------------------+------------------+-------+
+| pod1-node2 | HP | Proliant | CZJ40901PS | Intel Xeon | 16GB | 300GB SAS | | IF0: 9C:B6:54:95:D4:F0 | | |
+| | | DL 360e | | E5-2430 v2.2 | | 300GB SAS | | Admin | | |
+| | | Gen8 | | 2,5Ghz | | | | IF1: 9C:B6:54:95:D4:F1 | | |
+| | | | | 24 core | | | | 18: Public | | |
+| | | | | | | | | 1500: Storage | | |
+| | | | | | | | | 17: Management | | |
+| | | | | | | | | 1502: Private | | |
++------------+---------+-----------+---------------+---------------+----------+------------+---------------------+------------------------+------------------+-------+
+| pod1-node3 | DELL | R730 | FG3J642 | Intel Xeon | 128GB | 256GB SATA | | IF0: EC:F4:BB:CB:62:E4 | | |
+| | | | | E5-2603 v3 | (8x16GB) | 480GB SSD | | Admin | | |
+| | | | | 1,6Ghz | 1600Mhz | 480GB SSD | | IF1: EC:F4:BB:CB:62:E2 | | |
+| | | | | 12 core | | | | 18: Public | | |
+| | | | | | | | | 1500: Storage | | |
+| | | | | | | | | 17: Management | | |
+| | | | | | | | | 1502: Private | | |
++------------+---------+-----------+---------------+---------------+----------+------------+---------------------+------------------------+------------------+-------+
+
+Users
+-----
+
++------+-------+---------+------+-------+
+| Name | Email | Company | Role | Notes |
++------+-------+---------+------+-------+
+| | | | | |
++------+-------+---------+------+-------+
+
+Firewall Rules
+--------------
+
++------------+------------+------+
+| Port(s) | Service | Note |
++------------+------------+------+
+| 22, 43, 80 | Jenkins CI | |
++------------+------------+------+
+
+POD Topology
+------------
+
+Provide a diagram showing the network topology of the POD. Any security sensitive details should not
+be exposed publically and can be stored in the secure Pharos repo. The following diagram is an
+example only.
+
+.. image:: ./images/orange_paris_pod1.jpg
+ :alt: POD diagram not found
diff --git a/docs/labs/spirent.rst b/docs/labs/spirent.rst
new file mode 100644
index 0000000..2c6a0cf
--- /dev/null
+++ b/docs/labs/spirent.rst
@@ -0,0 +1,44 @@
+Spirent Virtual Cloud Test Lab
+===============================
+
+A community provided metal resource hosted at Nephoscale, leveraged for SDN/NFV public testing and
+OpenDaylight, OpenStack, OPNFV projects.
+
+**Spirent VCT Lab** is currently working on 3 different **OpenStack** environments each one of them
+deployed on different hardware configuration:
+
+ * **OpenStack Juno - 2014.2.2 release** (CentOS 7, 20 Cores, 64 GB RAM, 1 TB SATA, 40 Gbps)
+ * **OpenStack Juno - 2014.2.2 release** (Ubuntu 14.04, 8 cores, 32 GB RAM, 500 GB SATA, 10 Gbps)
+ * **OpenStack Icehouse - 2014.1.3 release**
+ * **OpenStack Icehouse - 2014.1.3 release**
+
+----
+
+There are a number of different networks referenced in the VPTC Design Blueprint.
+
+ * Public Internet – 1 g
+ * Private Management – 1g
+ * Mission Clients – 10g
+ * Mission Servers – 10g
+
+These can be added or removed as specified by the test methodology. There are 8 x 10 gige SFP+
+ports available on a typical C100MP used for Avalanche Layer 4-7 testing. The N4U offers 2 x 40
+gige QSFP+ ports with the MX-2 Spirent Test Center Layer 2-3 testing. There are 2 x Cumulus
+switches with 32 ports of 40 gige QSFP+ ports for a total capacity of 256 ports of 10 gige. We use
+QSFP+ to SFP+ break out cables to convert a single 40 gige port into 4 x 10 gige ports. Together
+these offer a flexible solution to allow up to 8 simultaneous tests to take place with physical
+traffic generators at the same time. Assuming a 10 to 1 oversubscription ratio we could handle 80
+community users with the current environment.
+
+For example:
+
+ * An 80 Gbps test would need 4 port pairs of 10 gige each and require 8 mission networks.
+ * Multiple clients sharing common test hardware might have dedicated management networks for their
+ DUTs yet communicate with the APIs and Management services via a shared DMZ network protected by
+ a firewall.
+ * SSL and IPSec VPN will typically be leveraged to connect networks across the
+ untrusted Internet or other third party networks.
+ * Stand-alone DUT servers using STCv and AVv traffic generators could easily scale to hundreds of
+ servers as needed.
+
+.. image:: images/spirent_vptc-public-drawing.png
diff --git a/docs/labs/zte-sh-lab/images/zte_sh_lab_topology.png b/docs/labs/zte-sh-lab/images/zte_sh_lab_topology.png
new file mode 100644
index 0000000..6c147db
--- /dev/null
+++ b/docs/labs/zte-sh-lab/images/zte_sh_lab_topology.png
Binary files differ
diff --git a/docs/labs/zte-sh-lab/images/zte_sh_pod_topology.png b/docs/labs/zte-sh-lab/images/zte_sh_pod_topology.png
new file mode 100644
index 0000000..592b607
--- /dev/null
+++ b/docs/labs/zte-sh-lab/images/zte_sh_pod_topology.png
Binary files differ
diff --git a/docs/labs/zte-sh-lab/index.rst b/docs/labs/zte-sh-lab/index.rst
new file mode 100644
index 0000000..f767d8d
--- /dev/null
+++ b/docs/labs/zte-sh-lab/index.rst
@@ -0,0 +1,18 @@
+.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+.. http://creativecommons.org/licenses/by/4.0
+.. (c) 2016 OPNFV.
+
+.. Top level of Pharos templates and configuration files
+
+*************************************
+ZTE SH Pharos Lab Configuration Files
+*************************************
+
+
+.. toctree::
+ :maxdepth: 2
+
+ ./lab_description.rst
+ ./pod-1.rst
+ ./pod-2.rst
+ ./pod-3.rst
diff --git a/docs/labs/zte-sh-lab/lab_description.rst b/docs/labs/zte-sh-lab/lab_description.rst
new file mode 100644
index 0000000..0d9d2dd
--- /dev/null
+++ b/docs/labs/zte-sh-lab/lab_description.rst
@@ -0,0 +1,127 @@
+.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+.. http://creativecommons.org/licenses/by/4.0
+.. (c) 2016 OPNFV.
+
+.. _pharos_lab:
+
+************************
+ZTE SH Lab Specification
+************************
+
+
+Introduction
+------------
+
+ZTE SH Pharos lab currently has three PODs available in Shanghai. Each POD has 5 servers, 3
+controller nodes and 2 computer nodes. These PODs are dedicated for use by Production/CI. These PODs
+focus scenarios related with **test** projects, **installer** projects and performance enhancement
+projects, such as KVM, OVS, FDS, etc.
+
+Scenarios planned are list here:
+
+- os-nosdn-kvm-ha
+- os-nosdn-kvm_ovs-ha
+
+Scenarios are defined in
+`Colorado Scenario Status <https://wiki.opnfv.org/display/SWREL/Colorado+Scenario+Status>`_
+
+
+Lab Resources
+-------------
+
++----------+------------+-----------+-------------------------+------------+--------+-----------+
+| POD Name | Project(s) | PTL(s) | Email(s) | POD Role | Status | Notes |
++----------+------------+-----------+-------------------------+------------+--------+-----------+
+| POD1 | FUEL | Gregory | gelkinbard@mirantis.com | CI: latest | Active | Yardstick |
+| | | Elkinbard | | | | Funtest |
+| | | | | | | Doctor |
+| | | | | | | Parser |
++----------+------------+-----------+-------------------------+------------+--------+-----------+
+| POD2 | FUEL | Gregory | gelkinbard@mirantis.com | CI: latest | Active | Qtip |
+| | | Elkinbard | | | | |
++----------+------------+-----------+-------------------------+------------+--------+-----------+
+| POD3 | FUEL | Gregory | gelkinbard@mirantis.com | CI: latest | Active | NFV-KVM |
+| | | Elkinbard | | | | OVSNFV |
++----------+------------+-----------+-------------------------+------------+--------+-----------+
+
+- `POD1-3 wiki page <https://wiki.opnfv.org/display/pharos/ZTE+SH+Testlab>`_
+* `POD1 jenkins slave <https://build.opnfv.org/ci/computer/zte-pod1/>`_
+* `POD2 jenkins slave <https://build.opnfv.org/ci/computer/zte-pod2/>`_
+- `POD3 jenkins slave <https://build.opnfv.org/ci/computer/zte-pod3/>`_
+
+
+Acceptable Usage Policy
+-----------------------
+
+Resources located in OPNFV ZTE SH lab shall only be used for CI, infra setup/configuration and
+troubleshooting purposes. No development work is allowed in these PODs.
+
+
+Remote Access Infrastructure
+----------------------------
+
+ZTE SH lab provide the OpenVPN access for you.
+
+
+Remote Access Procedure
+-----------------------
+
+Access to this environment can be granted by sending an e-mail to: **yangyang1@zte.com.cn**.
+
+Subject: opnfv zte-pod[1-3] access.
+
+The following information should be provided in the request:
+
+::
+
+ Full name:
+ E-mail:
+ Organization:
+ Why is access needed:
+ How long is access needed:
+ What specific Host will be accessed:
+ What support is needed from zte admin:
+
+Once access requirment is approved, the instructions for setting up VPN access will be send to you by mail.
+
+
+Lab Documentation
+-----------------
+
+
+Lab Topology
+------------
+
+.. image:: ./images/zte_sh_lab_topology.png
+ :alt: Lab diagram not found
+
+All the PODs share the same **Jump Host** for only one public IP address is allocated for ZTE
+Pharos Lab. Deploy servers are separated from Jump Host. Each POD has itsown **Deploy Server**.
+
+**Jump Host**
+
++----------+--------+-------+---------------+---------+--------+-----------+--------------------+------------------+-------+
+| | | | | | Memory | Local | 1GbE: NIC#/IP | 10GbE: NIC#/IP | |
+| Hostname | Vendor | Model | Serial Number | CPUs | (GB) | Storage | MAC/VLAN/Network | MAC/VLAN/Network | Notes |
++----------+--------+-------+---------------+---------+--------+-----------+--------------------+------------------+-------+
+| Rabbit | HP | 5500 | - | X5647x2 | 24 | 250GB SAS | IF0: | | |
+| | | | | | | 2 TB HDD | a0:36:9f:00:11:34/ | | |
+| | | | | | | | 192.168.1.1/ | | |
+| | | | | | | | native vlan/OA | | |
+| | | | | | | | IF1: | | |
+| | | | | | | | a0:36:9f:00:11:35/ | | |
+| | | | | | | | 172.10.0.1/ | | |
+| | | | | | | | vlan 103/Public | | |
+| | | | | | | | 172.20.0.1/ | | |
+| | | | | | | | vlan 113/Public | | |
+| | | | | | | | 172.60.0.1/ | | |
+| | | | | | | | vlan 163/Public | | |
+| | | | | | | | 172.70.0.1/ | | |
+| | | | | | | | vlan 173/Public | | |
+| | | | | | | | IF2: | | |
+| | | | | | | | a0.36:9:00:11:37/ | | |
+| | | | | | | | 116.228.53.183/ | | |
+| | | | | | | | native vlan/ | | |
+| | | | | | | | Internet | | |
++----------+--------+-------+---------------+---------+--------+-----------+--------------------+------------------+-------+
+
diff --git a/docs/labs/zte-sh-lab/pod-1.rst b/docs/labs/zte-sh-lab/pod-1.rst
new file mode 100644
index 0000000..7ec53a8
--- /dev/null
+++ b/docs/labs/zte-sh-lab/pod-1.rst
@@ -0,0 +1,160 @@
+.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+.. http://creativecommons.org/licenses/by/4.0
+.. (c) 2016 OPNFV.
+
+.. _pharos_pod:
+
+**********************
+ZTE POD1 Specification
+**********************
+
+
+Introduction
+------------
+
+POD1(means ZTE-POD1) uses Fuel as the installer and performs os-odl_l2-nofeature-ha CI latest
+verification. Currently, test projects such as Yardstick, Functest are performing daily CI tasks.
+Fueature projects such as Doctor, Parser will perform daily and verify CI tasks.
+
+
+Additional Requirements
+-----------------------
+
+
+Server Specifications
+---------------------
+
+
+**Jump Host**
+
+POD1 share the same **Jump Host** in the lab.
+
+**Deploy server**
+
++-----------+--------+-------+---------------+-----------+--------+-----------+--------------------+------------------+-------+
+| | | | | | Memory | Local | 1GbE: NIC#/IP | 10GbE: NIC#/IP | |
+| Hostname | Vendor | Model | Serial Number | CPUs | (GB) | Storage | MAC/VLAN/Network | MAC/VLAN/Network | Notes |
++-----------+--------+-------+---------------+-----------+--------+-----------+--------------------+------------------+-------+
+| Jellyfish | ZTE | R5300 | 277662500093 | E5-2620x2 | 128 | 600GB SAS | IF0: | | |
+| | | | | | | 4 TB HDD | 74:4a:a4:00:91:b3/ | | |
+| | | | | | | | 10.20.6.1/ | | |
+| | | | | | | | native vlan/PXE | | |
+| | | | | | | | IF1: | | |
+| | | | | | | | 74:4a:a4:00:91:b4/ | | |
+| | | | | | | | 10.20.7.1/ | | |
+| | | | | | | | native vlan/PXE | | |
++-----------+--------+-------+---------------+-----------+--------+-----------+--------------------+------------------+-------+
+
+
+**Nodes/Servers**
+
++----------+--------+-------+---------------+-----------+--------+-------------+---------------------+---------------------+----------------------+-------+
+| | | | | | Memory | Local | Lights-out network | 1GbE: NIC#/IP | 10GbE: NIC#/IP | |
+| Hostname | Vendor | Model | Serial Number | CPUs | (GB) | Storage | (IPMI): IP/MAC, U/P | MAC/VLAN/Network | MAC/VLAN/Network | Notes |
++----------+--------+-------+---------------+-----------+--------+-------------+---------------------+---------------------+----------------------+-------+
+| node1 | ZTE | E9000 | 701763100025 | E5-2650x2 | 128 | 600GB*2 HDD | 192.168.1.101 | ens4f0: | ens12f0: | |
+| | | | | | | | 74:4a:a4:00:cf:d9 | 74:4a:a4:00:cf:dc | 74:4a:a4:00:b0:e1 | |
+| | | | | | | | zteroot/superuser | native vlan 160/PXE | vlan 161/ management | |
+| | | | | | | | | | ens12f1: | |
+| | | | | | | | | | 74:4a:a4:00:b0:e2 | |
+| | | | | | | | | | vlan 162/ storage | |
+| | | | | | | | | | ens44f0: | |
+| | | | | | | | | | 74:4a:a4:00:b0:dd | |
+| | | | | | | | | | vlan 1120/ private | |
+| | | | | | | | | | ens44f1: | |
+| | | | | | | | | | 74:4a:a4:00:b0:de | |
+| | | | | | | | | | vlan 163/ public | |
++----------+--------+-------+---------------+-----------+--------+-------------+---------------------+---------------------+----------------------+-------+
+| node2 | ZTE | E9000 | 701763100224 | E5-2650x2 | 128 | 600GB*2 HDD | 192.168.1.102 | ens4f0: | ens12f0: | |
+| | | | | | | | 74:4a:a4:00:ce:cb | 74:4a:a4:00:ce:ce | 74:4a:a4:00:d6:ad | |
+| | | | | | | | zteroot/superuser | native vlan 160/PXE | vlan 161/ management | |
+| | | | | | | | | | ens12f1: | |
+| | | | | | | | | | 74:4a:a4:00:d6:ae | |
+| | | | | | | | | | vlan 162/ storage | |
+| | | | | | | | | | ens44f0: | |
+| | | | | | | | | | 74:4a:a4:00:d6:a9 | |
+| | | | | | | | | | vlan 1120/ private | |
+| | | | | | | | | | ens44f1: | |
+| | | | | | | | | | 74:4a:a4:00:d6:aa | |
+| | | | | | | | | | vlan 163/ public | |
++----------+--------+-------+---------------+-----------+--------+-------------+---------------------+---------------------+----------------------+-------+
+| node3 | ZTE | E9000 | 701763100064 | E5-2650x2 | 128 | 600GB*2 HDD | 192.168.1.103 | ens4f0: | ens12f0: | |
+| | | | | | | | 74:4a:a4:00:cf:55 | 74:4a:a4:00:cf:58 | 74:4a:a4:00:d6:ab | |
+| | | | | | | | zteroot/superuser | native vlan 160/PXE | vlan 161/ management | |
+| | | | | | | | | | ens12f1: | |
+| | | | | | | | | | 74:4a:a4:00:d6:ac | |
+| | | | | | | | | | vlan 162/ storage | |
+| | | | | | | | | | ens44f0: | |
+| | | | | | | | | | 74:4a:a4:00:d6:af | |
+| | | | | | | | | | vlan 1120/ private | |
+| | | | | | | | | | ens44f1: | |
+| | | | | | | | | | 74:4a:a4:00:d6:b0 | |
+| | | | | | | | | | vlan 163/ public | |
++----------+--------+-------+---------------+-----------+--------+-------------+---------------------+---------------------+----------------------+-------+
+| node4 | ZTE | E9000 | 289842100103 | E5-2650x2 | 128 | 600GB*2 HDD | 192.168.1.104 | ens4f0: | ens12f0: | |
+| | | | | | | | 74:4a:a4:00:49:81 | 74:4a:a4:00:49:84 | 74:4a:a4:00:b1:a5 | |
+| | | | | | | | zteroot/superuser | native vlan 160/PXE | vlan 161/ management | |
+| | | | | | | | | | ens12f1: | |
+| | | | | | | | | | 74:4a:a4:00:b1:a6 | |
+| | | | | | | | | | vlan 162/ storage | |
+| | | | | | | | | | ens44f0: | |
+| | | | | | | | | | 74:4a:a4:00:b1:b1 | |
+| | | | | | | | | | vlan 1120/ private | |
+| | | | | | | | | | ens44f1: | |
+| | | | | | | | | | 74:4a:a4:00:b1:b2 | |
+| | | | | | | | | | vlan 163/ public | |
++----------+--------+-------+---------------+-----------+--------+-------------+---------------------+---------------------+----------------------+-------+
+| node5 | ZTE | E9000 | 701763100220 | E5-2650x2 | 128 | 600GB*2 HDD | 192.168.1.105 | ens4f0: | ens12f0: | |
+| | | | | | | | 74:4a:a4:00:ce:bf | 74:4a:a4:00:ce:c2 | 74:4a:a4:00:d6:8d | |
+| | | | | | | | zteroot/superuser | native vlan 160/PXE | vlan 161/ management | |
+| | | | | | | | | | ens12f1: | |
+| | | | | | | | | | 74:4a:a4:00:d6:8e | |
+| | | | | | | | | | vlan 162/ storage | |
+| | | | | | | | | | ens44f0: | |
+| | | | | | | | | | 74:4a:a4:00:d6:9b | |
+| | | | | | | | | | vlan 1120/ private | |
+| | | | | | | | | | ens44f1: | |
+| | | | | | | | | | 74:4a:a4:00:d6:9c | |
+| | | | | | | | | | vlan 163/ public | |
++----------+--------+-------+---------------+-----------+--------+-------------+---------------------+---------------------+----------------------+-------+
+
+**Subnet allocations**
+
++----------------+--------------+----------------+------------+-----------------+
+| Network name | Address | Mask | Gateway | VLAN id |
++----------------+--------------+----------------+------------+-----------------+
+| Public | 172.60.0.0 | 255.255.255.0 | 172.60.0.1 | 163 |
++----------------+--------------+----------------+------------+-----------------+
+| Fuel Admin/PXE | 10.20.6.0 | 255.255.255.0 | 10.20.6.2 | native vlan 160 |
++----------------+--------------+----------------+------------+-----------------+
+| Fuel Mangement | 192.168.61.0 | 255.255.255.0 | | 161 |
++----------------+--------------+----------------+------------+-----------------+
+| Fuel Storage | 192.168.62.0 | 255.255.255.0 | | 162 |
++----------------+--------------+----------------+------------+-----------------+
+
+
+VPN Users
+---------
+
++--------------+--------------+--------------+--------------+--------------+
+| Name | Email | Project | Role | Notes |
++--------------+--------------+--------------+--------------+--------------+
+| | | | | |
++--------------+--------------+--------------+--------------+--------------+
+
+
+Firewall Rules
+--------------
+
++---------------+---------+------+
+| Port(s) | Service | Note |
++---------------+---------+------+
+| 1194(OpenVPN) | Jenkins | |
++---------------+---------+------+
+
+
+POD Topology
+------------
+
+.. image:: ./images/zte_sh_pod_topology.png
+ :alt: POD diagram not found
diff --git a/docs/labs/zte-sh-lab/pod-1.yaml b/docs/labs/zte-sh-lab/pod-1.yaml
new file mode 100644
index 0000000..747461a
--- /dev/null
+++ b/docs/labs/zte-sh-lab/pod-1.yaml
@@ -0,0 +1,55 @@
+##############################################################################
+## Copyright (c) 2015 ZTE Corp. and others.
+##
+## All rights reserved. This program and the accompanying materials
+## are made available under the terms of the Apache License, Version 2.0
+## which accompanies this distribution, and is available at
+## http://www.apache.org/licenses/LICENSE-2.0
+###############################################################################
+
+nodes:
+ - name: node1
+ tags: control #optional param, other valid value "compute"
+ arch: "x86_64"
+ mac_address: "74:4A:A4:00:CF:DC" #pxe boot interface mac
+ power:
+ type: ipmi
+ address: 192.168.1.101
+ user: zteroot
+ pass: superuser
+ - name: node2
+ tags: control
+ arch: "x86_64"
+ mac_address: "74:4A:A4:00:CE:CE"
+ power:
+ type: ipmi
+ address: 192.168.1.102
+ user: zteroot
+ pass: superuser
+ - name: node3
+ tags: control
+ arch: "x86_64"
+ mac_address: "74:4A:A4:00:CF:58"
+ power:
+ type: ipmi
+ address: 192.168.1.103
+ user: zteroot
+ pass: superuser
+ - name: node4
+ tags: compute
+ arch: "x86_64"
+ mac_address: "74:4A:A4:00:49:84"
+ power:
+ type: ipmi
+ address: 192.168.1.104
+ user: zteroot
+ pass: superuser
+ - name: node5
+ tags: compute
+ arch: "x86_64"
+ mac_address: "74:4A:A4:00:CE:C2"
+ power:
+ type: ipmi
+ address: 192.168.1.105
+ user: zteroot
+ pass: superuser
diff --git a/docs/labs/zte-sh-lab/pod-2.rst b/docs/labs/zte-sh-lab/pod-2.rst
new file mode 100644
index 0000000..961aa2a
--- /dev/null
+++ b/docs/labs/zte-sh-lab/pod-2.rst
@@ -0,0 +1,162 @@
+.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+.. http://creativecommons.org/licenses/by/4.0
+.. (c) 2016 OPNFV.
+
+.. _pharos_pod:
+
+**********************
+ZTE POD2 Specification
+**********************
+
+
+Introduction
+------------
+
+POD2(means ZTE-POD2) uses Fuel as the installer and performs os-odl_l2-nofeature-ha CI latest
+verification. Qtip daily CI task will be migrated from POD1 to POD2. Qtip is also working on
+integration with Yardstick umbrella project.
+
+
+Additional Requirements
+-----------------------
+
+
+Server Specifications
+---------------------
+
+**Jump Host**
+
+POD2 share the same **Jump Host** in the lab.
+
+**Deploy Server**
+
+POD2 share the same **Deploy Server** with POD1.
+
++-----------+--------+-------+---------------+-----------+--------+-----------+--------------------+------------------+-------+
+| | | | | | Memory | Local | 1GbE: NIC#/IP | 10GbE: NIC#/IP | |
+| Hostname | Vendor | Model | Serial Number | CPUs | (GB) | Storage | MAC/VLAN/Network | MAC/VLAN/Network | Notes |
++-----------+--------+-------+---------------+-----------+--------+-----------+--------------------+------------------+-------+
+| Jellyfish | ZTE | R5300 | 277662500093 | E5-2620x2 | 128 | 600GB SAS | IF0: | | |
+| | | | | | | 4 TB HDD | 74:4a:a4:00:91:b3/ | | |
+| | | | | | | | 10.20.6.1/ | | |
+| | | | | | | | native vlan/PXE | | |
+| | | | | | | | IF1: | | |
+| | | | | | | | 74:4a:a4:00:91:b4/ | | |
+| | | | | | | | 10.20.7.1/ | | |
+| | | | | | | | native vlan/PXE | | |
++-----------+--------+-------+---------------+-----------+--------+-----------+--------------------+------------------+-------+
+
+
+**Nodes/Servers**
+
++----------+--------+-------+---------------+-----------+--------+-------------+---------------------+---------------------+----------------------+-------+
+| | | | | | Memory | Local | Lights-out network | 1GbE: NIC#/IP | 10GbE: NIC#/IP | |
+| Hostname | Vendor | Model | Serial Number | CPUs | (GB) | Storage | (IPMI): IP/MAC, U/P | MAC/VLAN/Network | MAC/VLAN/Network | Notes |
++----------+--------+-------+---------------+-----------+--------+-------------+---------------------+---------------------+----------------------+-------+
+| node1 | ZTE | E9000 | 701763100114 | E5-2650x2 | 128 | 600GB*2 HDD | 192.168.1.106 | ens4f0: | ens12f0: | |
+| | | | | | | | 74:4a:a4:00:cd:6f | 74:4a:a4:00:cd:72 | 74:4a:a4:00:b0:e9 | |
+| | | | | | | | zteroot/superuser | native vlan 170/PXE | vlan 171/ management | |
+| | | | | | | | | | ens12f1: | |
+| | | | | | | | | | 74:4a:a4:00:b0:ea | |
+| | | | | | | | | | vlan 172/ storage | |
+| | | | | | | | | | ens44f0: | |
+| | | | | | | | | | 74:4a:a4:00:b0:eb | |
+| | | | | | | | | | vlan 1130/ private | |
+| | | | | | | | | | ens44f1: | |
+| | | | | | | | | | 74:4a:a4:00:b0:ec | |
+| | | | | | | | | | vlan 173/ public | |
++----------+--------+-------+---------------+-----------+--------+-------------+---------------------+---------------------+----------------------+-------+
+| node2 | ZTE | E9000 | 701360500105 | E5-2650x2 | 128 | 600GB*2 HDD | 192.168.1.107 | ens4f0: | ens12f0: | |
+| | | | | | | | 74:4a:a4:00:ca:c9 | 74:4a:a4:00:ca:cc | 74:4a:a4:00:d6:a3 | |
+| | | | | | | | zteroot/superuser | native vlan 170/PXE | vlan 171/ management | |
+| | | | | | | | | | ens12f1: | |
+| | | | | | | | | | 74:4a:a4:00:d6:a4 | |
+| | | | | | | | | | vlan 172/ storage | |
+| | | | | | | | | | ens44f0: | |
+| | | | | | | | | | 74:4a:a4:00:d6:99 | |
+| | | | | | | | | | vlan 1130/ private | |
+| | | | | | | | | | ens44f1: | |
+| | | | | | | | | | 74:4a:a4:00:d6:9a | |
+| | | | | | | | | | vlan 173/ public | |
++----------+--------+-------+---------------+-----------+--------+-------------+---------------------+---------------------+----------------------+-------+
+| node3 | ZTE | E9000 | 701360500026 | E5-2650x2 | 128 | 600GB*2 HDD | 192.168.1.108 | ens4f0: | ens12f0: | |
+| | | | | | | | 74:4a:a4:00:cd:0f | 74:4a:a4:00:cd:12 | 74:4a:a4:00:d6:9d | |
+| | | | | | | | zteroot/superuser | native vlan 170/PXE | vlan 171/ management | |
+| | | | | | | | | | ens12f1: | |
+| | | | | | | | | | 74:4a:a4:00:d6:9e | |
+| | | | | | | | | | vlan 172/ storage | |
+| | | | | | | | | | ens44f0: | |
+| | | | | | | | | | 74:4a:a4:00:d3:15 | |
+| | | | | | | | | | vlan 1130/ private | |
+| | | | | | | | | | ens44f1: | |
+| | | | | | | | | | 74:4a:a4:00:d3:16 | |
+| | | | | | | | | | vlan 173/ public | |
++----------+--------+-------+---------------+-----------+--------+-------------+---------------------+---------------------+----------------------+-------+
+| node4 | ZTE | E9000 | 701763100099 | E5-2650x2 | 128 | 600GB*2 HDD | 192.168.1.109 | ens4f0: | ens12f0: | |
+| | | | | | | | 74:4a:a4:00:cf:3d | 74:4a:a4:00:cf:40 | 74:4a:a4:00:d6:a5 | |
+| | | | | | | | zteroot/superuser | native vlan 170/PXE | vlan 171/ management | |
+| | | | | | | | | | ens12f1: | |
+| | | | | | | | | | 74:4a:a4:00:d6:a6 | |
+| | | | | | | | | | vlan 172/ storage | |
+| | | | | | | | | | ens44f0: | |
+| | | | | | | | | | 74:4a:a4:00:d6:a7 | |
+| | | | | | | | | | vlan 1130/ private | |
+| | | | | | | | | | ens44f1: | |
+| | | | | | | | | | 74:4a:a4:00:d6:a8 | |
+| | | | | | | | | | vlan 173/ public | |
++----------+--------+-------+---------------+-----------+--------+-------------+---------------------+---------------------+----------------------+-------+
+| node5 | ZTE | E9000 | 701763100018 | E5-2650x2 | 128 | 600GB*2 HDD | 192.168.1.110 | ens4f0: | ens12f0: | |
+| | | | | | | | 74:4a:a4:00:ce:d1 | 74:4a:a4:00:ce:d4 | 74:4a:a4:00:d2:c3 | |
+| | | | | | | | zteroot/superuser | native vlan 170/PXE | vlan 171/ management | |
+| | | | | | | | | | ens12f1: | |
+| | | | | | | | | | 74:4a:a4:00:d2:c4 | |
+| | | | | | | | | | vlan 172/ storage | |
+| | | | | | | | | | ens44f0: | |
+| | | | | | | | | | 74:4a:a4:00:d2:c1 | |
+| | | | | | | | | | vlan 1130/ private | |
+| | | | | | | | | | ens44f1: | |
+| | | | | | | | | | 74:4a:a4:00:d2:c2 | |
+| | | | | | | | | | vlan 173/ public | |
++----------+--------+-------+---------------+-----------+--------+-------------+---------------------+---------------------+----------------------+-------+
+
+
+**Subnet allocations**
+
++----------------+--------------+----------------+------------+-----------------+
+| Network name | Address | Mask | Gateway | VLAN id |
++----------------+--------------+----------------+------------+-----------------+
+| Public | 172.70.0.0 | 255.255.255.0 | 172.70.0.1 | 173 |
++----------------+--------------+----------------+------------+-----------------+
+| Fuel Admin | 10.20.7.0 | 255.255.255.0 | 10.20.7.1 | native vlan 170 |
++----------------+--------------+----------------+------------+-----------------+
+| Fuel Mangement | 192.168.71.0 | 255.255.255.0 | | 171 |
++----------------+--------------+----------------+------------+-----------------+
+| Fuel Storage | 192.168.72.0 | 255.255.255.0 | | 172 |
++----------------+--------------+----------------+------------+-----------------+
+
+
+VPN Users
+---------
+
++--------------+--------------+--------------+--------------+--------------+
+| Name | Email | Project | Role | Notes |
++--------------+--------------+--------------+--------------+--------------+
+| | | | | |
++--------------+--------------+--------------+--------------+--------------+
+
+
+Firewall Rules
+--------------
+
++---------------+---------+------+
+| Port(s) | Service | Note |
++---------------+---------+------+
+| 1194(OpenVPN) | Jenkins | |
++---------------+---------+------+
+
+
+POD Topology
+------------
+
+.. image:: ./images/zte_sh_pod_topology.png
+ :alt: POD diagram not found
diff --git a/docs/labs/zte-sh-lab/pod-2.yaml b/docs/labs/zte-sh-lab/pod-2.yaml
new file mode 100644
index 0000000..55d478f
--- /dev/null
+++ b/docs/labs/zte-sh-lab/pod-2.yaml
@@ -0,0 +1,55 @@
+##############################################################################
+## Copyright (c) 2015 ZTE Corp. and others.
+##
+## All rights reserved. This program and the accompanying materials
+## are made available under the terms of the Apache License, Version 2.0
+## which accompanies this distribution, and is available at
+## http://www.apache.org/licenses/LICENSE-2.0
+###############################################################################
+
+nodes:
+ - name: node1
+ tags: control #optional param, other valid value "compute"
+ arch: "x86_64"
+ mac_address: "74:4A:A4:00:CD:72" #pxe boot interface mac
+ power:
+ type: ipmi
+ address: 192.168.1.106
+ user: zteroot
+ pass: superuser
+ - name: node2
+ tags: control
+ arch: "x86_64"
+ mac_address: "74:4A:A4:00:CA:CC"
+ power:
+ type: ipmi
+ address: 192.168.1.107
+ user: zteroot
+ pass: superuser
+ - name: node3
+ tags: control
+ arch: "x86_64"
+ mac_address: "74:4A:A4:00:CD:12"
+ power:
+ type: ipmi
+ address: 192.168.1.108
+ user: zteroot
+ pass: superuser
+ - name: node4
+ tags: compute
+ arch: "x86_64"
+ mac_address: "74:4A:A4:00:CF:40"
+ power:
+ type: ipmi
+ address: 192.168.1.109
+ user: zteroot
+ pass: superuser
+ - name: node5
+ tags: compute
+ arch: "x86_64"
+ mac_address: "74:4A:A4:00:CE:D4"
+ power:
+ type: ipmi
+ address: 192.168.1.110
+ user: zteroot
+ pass: superuser
diff --git a/docs/labs/zte-sh-lab/pod-3.rst b/docs/labs/zte-sh-lab/pod-3.rst
new file mode 100644
index 0000000..f0cfae7
--- /dev/null
+++ b/docs/labs/zte-sh-lab/pod-3.rst
@@ -0,0 +1,163 @@
+.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+.. http://creativecommons.org/licenses/by/4.0
+.. (c) 2016 OPNFV.
+
+.. _pharos_pod:
+
+*************************
+ZTE SH POD3 Specification
+*************************
+
+
+Introduction
+------------
+
+POD3(means ZTE-POD3) uses Fuel as the installer and performs os-nosdn-kvm-ha CI latest verification.
+Feature projects like NFV-KVMV, OVSNFV will be run in this POD.
+
+
+Additional Requirements
+-----------------------
+
+
+Server Specifications
+---------------------
+
+**Jump Host**
+
+POD3 share the same **Jump Host** in the lab.
+
+**Deploy Server**
+
++----------+--------+-------+---------------+-----------+--------+------------+--------------------+------------------+-------+
+| | | | | | Memory | Local | 1GbE: NIC#/IP | 10GbE: NIC#/IP | |
+| Hostname | Vendor | Model | Serial Number | CPUs | (GB) | Storage | MAC/VLAN/Network | MAC/VLAN/Network | Notes |
++----------+--------+-------+---------------+-----------+--------+------------+--------------------+------------------+-------+
+| Spider | ZTE | R5300 | 210077307607 | E5-2609x1 | 32 | 600GB SAS | IF0: | | |
+| | | | | | | 1.2TB SCSI | 74:4a:a4:00:21:0b/ | | |
+| | | | | | | | 10.20.0.1/ | | |
+| | | | | | | | native vlan/PXE | | |
+| | | | | | | | IF1: | | |
+| | | | | | | | 74:4a:a4:00:21:0c/ | | |
+| | | | | | | | 10.20.1.1/ | | |
+| | | | | | | | native vlan/PXE | | |
++----------+--------+-------+---------------+-----------+--------+------------+--------------------+------------------+-------+
+
+
+**Compute Nodes**
+
++----------+--------+-------+---------------+-----------+--------+-----------+---------------------+------------------+----------------------+-------+
+| | | | | | Memory | Local | Lights-out network | 1GbE: NIC#/IP | 10GbE: NIC#/IP | |
+| Hostname | Vendor | Model | Serial Number | CPUs | (GB) | Storage | (IPMI): IP/MAC, U/P | MAC/VLAN/Network | MAC/VLAN/Network | Notes |
++----------+--------+-------+---------------+-----------+--------+-----------+---------------------+------------------+----------------------+-------+
+| node1 | ZTE | E9000 | 289016500203 | E5-2670x2 | 64 | 600GB HDD | 192.168.1.32 | | enp2s0f0: | |
+| | | | | | | | 0c:12:62:e4:bf:de | | 74:4a:a4:00:0b:85 | |
+| | | | | | | | zteroot/superuser | | vlan 100/ Admin(PXE) | |
+| | | | | | | | | | enp2s0f1: | |
+| | | | | | | | | | 74:4a:a4:00:0b:86 | |
+| | | | | | | | | | vlan 101/ mgmt | |
+| | | | | | | | | | enp132s0f0: | |
+| | | | | | | | | | 74:4a:a4:00:0b:87 | |
+| | | | | | | | | | vlan 102/ storage | |
+| | | | | | | | | | enp132s0f1: | |
+| | | | | | | | | | 74:4a:a4:00:0b:88 | |
+| | | | | | | | | | vlan 103/ public | |
+| | | | | | | | | | vlan 1020/ private | |
++----------+--------+-------+---------------+-----------+--------+-----------+---------------------+------------------+----------------------+-------+
+| node2 | ZTE | E9000 | 289016500197 | E5-2670x2 | 64 | 600GB HDD | 192.168.1.33 | | enp2s0f0: | |
+| | | | | | | | 0C:12:62:E4:C0:33 | | 74:4a:a4:00:5c:5d | |
+| | | | | | | | zteroot/superuser | | vlan 100/ Admin(PXE) | |
+| | | | | | | | | | enp2s0f1: | |
+| | | | | | | | | | 74:4a:a4:00:5c:5e | |
+| | | | | | | | | | vlan 101/ mgmt | |
+| | | | | | | | | | enp132s0f0: | |
+| | | | | | | | | | 74:4a:a4:00:5c:5f | |
+| | | | | | | | | | vlan 102/ storage | |
+| | | | | | | | | | enp132s0f1: | |
+| | | | | | | | | | 74:4a:a4:00:5c:60 | |
+| | | | | | | | | | vlan 103/ public | |
+| | | | | | | | | | vlan 1020/ private | |
++----------+--------+-------+---------------+-----------+--------+-----------+---------------------+------------------+----------------------+-------+
+| node3 | ZTE | E9000 | 289016500003 | E5-2670x2 | 64 | 600GB HDD | 192.168.1.34 | | enp2s0f0: | |
+| | | | | | | | 74:4A:A4:00:30:93 | | 74:4a:a4:00:5c:35 | |
+| | | | | | | | zteroot/superuser | | vlan 100/ Admin(PXE) | |
+| | | | | | | | | | enp2s0f1: | |
+| | | | | | | | | | 74:4a:a4:00:5c:36 | |
+| | | | | | | | | | vlan 101/ mgmt | |
+| | | | | | | | | | enp132s0f0: | |
+| | | | | | | | | | 74:4a:a4:00:5c:37 | |
+| | | | | | | | | | vlan 102/ storage | |
+| | | | | | | | | | enp132s0f1: | |
+| | | | | | | | | | 74:4a:a4:00:5c:38 | |
+| | | | | | | | | | vlan 103/ public | |
+| | | | | | | | | | vlan 1020/ private | |
++----------+--------+-------+---------------+-----------+--------+-----------+---------------------+------------------+----------------------+-------+
+| node4 | ZTE | E9000 | 289016500105 | E5-2670x2 | 64 | 600GB HDD | 192.168.1.35 | | enp2s0f0: | |
+| | | | | | | | 0C:12:62:E4:C0:42 | | 74:4a:a4:00:5c:69 | |
+| | | | | | | | zteroot/superuser | | vlan 100/ Admin(PXE) | |
+| | | | | | | | | | enp2s0f1: | |
+| | | | | | | | | | 74:4a:a4:00:5c:6a | |
+| | | | | | | | | | vlan 101/ mgmt | |
+| | | | | | | | | | enp132s0f0: | |
+| | | | | | | | | | 74:4a:a4:00:5c:6b | |
+| | | | | | | | | | vlan 102/ storage | |
+| | | | | | | | | | enp132s0f1: | |
+| | | | | | | | | | 74:4a:a4:00:5c:6c | |
+| | | | | | | | | | vlan 103/ public | |
+| | | | | | | | | | vlan 1020/ private | |
++----------+--------+-------+---------------+-----------+--------+-----------+---------------------+------------------+----------------------+-------+
+| node5 | ZTE | E9000 | 289016500195 | E5-2670x2 | 64 | 600GB HDD | 192.168.1.36 | | enp2s0f0: | |
+| | | | | | | | 74:4A:A4:00:30:43 | | 74:4a:a4:00:5c:6d | |
+| | | | | | | | zteroot/superuser | | vlan 100/ Admin(PXE) | |
+| | | | | | | | | | enp2s0f1: | |
+| | | | | | | | | | 74:4a:a4:00:5c:6e | |
+| | | | | | | | | | vlan 101/ mgmt | |
+| | | | | | | | | | enp132s0f0: | |
+| | | | | | | | | | 74:4a:a4:00:5c:6f | |
+| | | | | | | | | | vlan 102/ storage | |
+| | | | | | | | | | enp132s0f1: | |
+| | | | | | | | | | 74:4a:a4:00:5c:70 | |
+| | | | | | | | | | vlan 103/ public | |
+| | | | | | | | | | vlan 1020/ private | |
++----------+--------+-------+---------------+-----------+--------+-----------+---------------------+------------------+----------------------+-------+
+
+**Subnet allocations**
+
++----------------+--------------+----------------+------------+-----------------+
+| Network name | Address | Mask | Gateway | VLAN id |
++----------------+--------------+----------------+------------+-----------------+
+| Public | 172.10.0.0 | 255.255.255.0 | 172.10.0.1 | 103 |
++----------------+--------------+----------------+------------+-----------------+
+| Fuel Admin/PXE | 10.20.0.0 | 255.255.255.0 | 10.20.0.1 | native valn 100 |
++----------------+--------------+----------------+------------+-----------------+
+| Fuel Mangement | 192.168.11.0 | 255.255.255.0 | | 101 |
++----------------+--------------+----------------+------------+-----------------+
+| Fuel Storage | 192.168.12.0 | 255.255.255.0 | | 102 |
++----------------+--------------+----------------+------------+-----------------+
+
+
+VPN Users
+---------
+
++--------------+--------------+--------------+--------------+--------------+
+| Name | Email | Project | Role | Notes |
++--------------+--------------+--------------+--------------+--------------+
+| | | | | |
++--------------+--------------+--------------+--------------+--------------+
+
+
+Firewall Rules
+--------------
+
++---------------+---------+------+
+| Port(s) | Service | Note |
++---------------+---------+------+
+| 5000(OpenVPN) | Jenkins | |
++---------------+---------+------+
+
+
+POD Topology
+------------
+
+.. image:: ./images/zte_sh_pod_topology.png
+ :alt: POD diagram not found
diff --git a/docs/labs/zte-sh-lab/pod-3.yaml b/docs/labs/zte-sh-lab/pod-3.yaml
new file mode 100644
index 0000000..e3557fc
--- /dev/null
+++ b/docs/labs/zte-sh-lab/pod-3.yaml
@@ -0,0 +1,56 @@
+##############################################################################
+## Copyright (c) 2015 ZTE Corp. and others.
+##
+## All rights reserved. This program and the accompanying materials
+## are made available under the terms of the Apache License, Version 2.0
+## which accompanies this distribution, and is available at
+## http://www.apache.org/licenses/LICENSE-2.0
+###############################################################################
+
+nodes:
+ - name: node1
+ tags: control
+ arch: "x86_64"
+ mac_address: "74:4a:a4:00:0b:85"
+ power:
+ type: ipmi
+ address: 192.168.1.32
+ user: zteroot
+ pass: superuser
+ - name: node2
+ tags: control
+ arch: "x86_64"
+ mac_address: "74:4a:a4:00:5c:5d"
+ power:
+ type: ipmi
+ address: 192.168.1.33
+ user: zteroot
+ pass: superuser
+ - name: node3
+ tags: control
+ arch: "x86_64"
+ mac_address: "74:4a:a4:00:5c:35"
+ power:
+ type: ipmi
+ address: 192.168.1.34
+ user: zteroot
+ pass: superuser
+ - name: node4
+ tags: compute
+ arch: "x86_64"
+ mac_address: "74:4a:a4:00:5c:69"
+ power:
+ type: ipmi
+ address: 192.168.1.35
+ user: zteroot
+ pass: superuser
+ - name: node5
+ tags: compute
+ arch: "x86_64"
+ mac_address: "74:4a:a4:00:5c:6d"
+ power:
+ type: ipmi
+ address: 192.168.1.36
+ user: zteroot
+ pass: superuser
+
diff --git a/docs/octopus_docs/images/ci_infra.png b/docs/octopus_docs/images/ci_infra.png
new file mode 100644
index 0000000..53c329c
--- /dev/null
+++ b/docs/octopus_docs/images/ci_infra.png
Binary files differ
diff --git a/docs/octopus_docs/images/daily_job.png b/docs/octopus_docs/images/daily_job.png
new file mode 100644
index 0000000..7386ec6
--- /dev/null
+++ b/docs/octopus_docs/images/daily_job.png
Binary files differ
diff --git a/docs/octopus_docs/images/merge_job.png b/docs/octopus_docs/images/merge_job.png
new file mode 100644
index 0000000..e48ecde
--- /dev/null
+++ b/docs/octopus_docs/images/merge_job.png
Binary files differ
diff --git a/docs/octopus_docs/images/pipeline_overview.png b/docs/octopus_docs/images/pipeline_overview.png
new file mode 100644
index 0000000..de1d4bf
--- /dev/null
+++ b/docs/octopus_docs/images/pipeline_overview.png
Binary files differ
diff --git a/docs/octopus_docs/images/stability_screenshot10.png b/docs/octopus_docs/images/stability_screenshot10.png
new file mode 100644
index 0000000..da246d8
--- /dev/null
+++ b/docs/octopus_docs/images/stability_screenshot10.png
Binary files differ
diff --git a/docs/octopus_docs/images/stability_screenshot11.png b/docs/octopus_docs/images/stability_screenshot11.png
new file mode 100644
index 0000000..9778b58
--- /dev/null
+++ b/docs/octopus_docs/images/stability_screenshot11.png
Binary files differ
diff --git a/docs/octopus_docs/images/stability_screenshot9.png b/docs/octopus_docs/images/stability_screenshot9.png
new file mode 100644
index 0000000..b82b38a
--- /dev/null
+++ b/docs/octopus_docs/images/stability_screenshot9.png
Binary files differ
diff --git a/docs/octopus_docs/images/verify_job.png b/docs/octopus_docs/images/verify_job.png
new file mode 100644
index 0000000..e5ab9db
--- /dev/null
+++ b/docs/octopus_docs/images/verify_job.png
Binary files differ
diff --git a/docs/octopus_docs/index.rst b/docs/octopus_docs/index.rst
new file mode 100644
index 0000000..ce09c73
--- /dev/null
+++ b/docs/octopus_docs/index.rst
@@ -0,0 +1,22 @@
+**********************
+Octopus Project
+**********************
+
+Contents:
+
+.. toctree::
+ :numbered:
+ :maxdepth: 4
+
+ octopus_info.rst
+ opnfv-ci-infrastructure.rst
+ opnfv-ci-pipelines.rst
+ opnfv-jenkins-slave-connection.rst
+ opnfv-jjb-usage.rst
+ opnfv-artifact-repository.rst
+ opnfv-stablebranch.rst
+ images/ci_infra.png
+ images/daily_job.png
+ images/merge_job.png
+ images/pipeline_overview.png
+ images/verify_job.png
diff --git a/docs/octopus_docs/octopus_info.rst b/docs/octopus_docs/octopus_info.rst
new file mode 100644
index 0000000..a02f455
--- /dev/null
+++ b/docs/octopus_docs/octopus_info.rst
@@ -0,0 +1,38 @@
+Project: Continuous Integration (Octopus)
+==========================================
+
+Introduction
+-------------
+
+Problem Statement:
+^^^^^^^^^^^^^^^^^^^
+
+OPNFV will use many upstream open source projects to create the reference platform.
+All these projects are developed and tested independently and in many cases,
+not have use cases of other projects in mind.
+Therefore it is to be expected that integration of these projects probably will unveil some gaps in functionality,
+since testing the OPNFV use cases needs the interworking of many upstream projects.
+Thus this integration work will bring major benefit to the community.
+
+Therefore the goal of the CI project – Octopus – is to quickly provide
+prototype integration of a first set of upstream projects.
+Step by step this later will be evolved to a full blown development environment with
+automated test and verification as a continuous integration environment, supporting both,
+the parallel evolutionary work in the upstream projects, and the improvement of NFV support in this reference platform.
+
+Summary
+^^^^^^^^
+
+The CI project provides the starting point for all OPNFV development activities.
+It starts by integrating stable versions of basic upstream projects,
+and from there creates a full development environment for OPNFV including automatic builds and basic verification.
+This is a very complex task and therefore needs a step by step approach.
+At the same time it is urgent to have a basic environment in place very soon.
+
+* **Create a hierarchical build environment** for the same integrated upstream projects as "getstarted",
+that uses the build tools as defined by each of the upstream projects and combines them.
+This allows development and verification in OPNFV collaborative projects.
+
+* **Implement automatic build process on central servers** - Provide automation and periodic builds
+
+* **Execute the continuous automated builds and basic verification**
diff --git a/docs/octopus_docs/opnfv-artifact-repository.rst b/docs/octopus_docs/opnfv-artifact-repository.rst
new file mode 100644
index 0000000..29ec5dd
--- /dev/null
+++ b/docs/octopus_docs/opnfv-artifact-repository.rst
@@ -0,0 +1,193 @@
+===========================
+ OPNFV Artifact Repository
+===========================
+
+Artifact Repository
+===================
+
+What is Artifact Repository
+---------------------------
+
+An Artifact Repository is akin to what Subversion is to source code, i.e.
+it is a way of versioning artifacts produced by build systems, CI, and so on. [1]
+
+Why Artifact Repository is Needed
+---------------------------------
+
+Since many developers check their source code into the GIT repository it may seem natural
+to just place the files you've built into the repo too.
+This can work okay for a single developer working on a project over the weekends
+but with a team working on many components that need to be tested and integrated, this won't scale.
+
+The way git works, no revision of any file is ever lost.
+So if you ever check in a big file, the repository will always contain it,
+and a git clone will be that much slower for every clone from that point onward.
+
+The golden rule of revision control systems applies:
+*check in your build scripts, not your build products*.
+
+Unfortunately, it only takes one person to start doing this and we end up with huge repositories.
+Please don't do this. It will make your computers sad.
+Thankfully, Gerrit and code review systems are a massive disincentive to doing this.
+
+You definitely need to avoid storing binary images in git. This is what artifact repositories are for. [2]
+
+A “centralized image repository” is needed that can store multiple versions of various virtual machines
+and have something like /latest pointing to the newest uploaded image.
+It could be a simple nginx server that stores the output images from any jenkins job if it's successful, for instance.
+
+OPNFV Artifact Repository
+=========================
+
+What is used as Artifact Repository for OPNFV
+---------------------------------------------
+
+Setting up, hosting, and operating an artifact repository on OPNFV Infrastructure
+in Linux Foundation (LF) environment requires too much storage space.
+It is also not a straightforward undertaking to have robust Artifact Repository and provide 24/7 support.
+
+OPNFV Project decided to use **Google Cloud Storage** as OPNFV Artifact Repository due to reasons summarized above. [3]
+
+Usage of Artifact Repository in OPNFV CI
+----------------------------------------
+
+Binaries/packages that are produced by OPNFV Continuous Integration (CI) are deployed/uploaded
+to Artifact Repository making it possible to reuse artifacts during later stages of OPNFV CI.
+Stored artifacts can be consumed by individual developers/organizations as well.
+
+In OPNFV, we generally produce PDF, ISO and store them on OPNFV Artifact Repository.
+
+OPNFV Artifact Repository Web Interface
+----------------------------------------
+
+OPNFV Artifact Repository is accessible via link http://artifacts.opnfv.org/.
+
+A proxy has been set up by LF for the community members located in countries with access restrictions to Google http://build.opnfv.org/artifacts/.
+
+Access Rights to OPNFV Artifact Repository
+==========================================
+
+As summarized in previous sections, OPNFV uses Google Cloud Storage as Artifact Repository.
+By default, everyone has read access to it and artifacts can be fetched/downloaded using browser,
+a curl-like command line HTTP client, or gsutil.
+
+Write access to Artifact Repository is given per request basis and all the requests
+must go through `LF Helpdesk <opnfv-helpdesk@rt.linuxfoundation.org>`_ with an explanation
+regarding the purpose of write access.
+Once you are given write access, you can read corresponding section to store artifacts on OPNFV Artifact Repository.
+
+How to Use OPNFV Artifact Repository
+====================================
+
+There are 3 basic scenarios to use OPNFV Artifact repository.
+
+* browsing artifacts
+* downloading artifacts
+* uploading artifacts
+
+Please see corresponding sections regarding how to do these.
+
+How to Browse Artifacts Stored on OPNFV Artifact Repository
+-----------------------------------------------------------
+
+You can browse stored artifacts using
+
+* **Web Browser**: By navigating to the address `OPNFV Artifact Storage <http://artifacts.opnfv.org/>`_.
+
+* **Command Line HTTP-client**
+ ``curl -o <output_filename> http://artifacts.opnfv.org``
+
+ Example:
+
+ ``curl -o opnfv-artifact-repo.html http://artifacts.opnfv.org``
+
+* **Google Storage Util (gsutil)**
+ ``gsutil ls gs://artifacts.opnfv.org/<path_to_bucket>``
+
+ Example:
+
+ ``gsutil ls gs://artifacts.opnfv.org/octopus``
+
+How to Download Artifacts from OPNFV Artifact Repository
+--------------------------------------------------------
+
+You can download stored artifacts using
+
+* **Web Browser**: By navigating to the address `OPNFV Artifact Storage <http://artifacts.opnfv.org/>`_ and clicking the link of the artifact you want to download.
+
+* **Command Line HTTP-client**
+ ``curl -o <output_filename> http://artifacts.opnfv.org/<path/to/artifact>``
+
+ Example:
+
+ ``curl -o main.pdf http://artifacts.opnfv.org/octopus/docs/release/main.pdf``
+
+* **Google Storage Util (gsutil)**
+ ``gsutil cp gs://artifacts.opnfv.org/<path/to/artifact> <output_filename>``
+
+ Example:
+
+ ``gsutil cp gs://artifacts.opnfv.org/octopus/docs/release/main.pdf main.pdf``
+
+How to Upload Artifacts to OPNFV Artifact Repository
+----------------------------------------------------
+
+As explained in previous sections, you need to get write access for OPNFV Artifact Repository
+in order to upload artifacts.
+
+Apart from write access, you also need to have Google account and have the
+Google Cloud Storage utility, **gsutil**, installed on your computer.
+
+Install and Configure gsutil
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Please follow steps listed below.
+
+1. Install gsutil
+
+ Please follow steps listed on `this link <https://cloud.google.com/storage/docs/gsutil_install>`_ to install gsutil to your computer.
+
+2. Configure gsutil
+
+ Issue below command and follow the instructions. You will be asked for the project-id.
+The project-id is **linux-foundation-collab**.
+
+ ``gsutil config``
+
+3. Request write access for OPNFV Artifact Repository
+
+ Send an email to `LF Helpdesk <opnfv-helpdesk@rt.linuxfoundation.org>`_ and list the reasons for the request. Do not forget to include gmail mail address.
+
+Upload Artifacts
+~~~~~~~~~~~~~~~~
+
+Once you installed and configured gsutil and got write access from LF Helpdesk,
+you should be able to upload artifacts to OPNFV Artifact Repository.
+
+The command to upload artifacts is
+
+ ``gsutil cp <file_to_upload> gs://artifacts.opnfv.org/<path/to/bucket>``
+
+ Example:
+
+ ``gsutil cp README gs://artifacts.opnfv.org/octopus``
+
+Once the upload operation is completed,
+you can do the listing and check to see if the artifact is where it is expected to be.
+
+ ``gsutil ls gs://artifacts.opnfv.org/<path/to/bucket>``
+
+ Example:
+
+ ``gsutil ls gs://artifacts.opnfv.org/octopus``
+
+Getting Help
+============
+
+Send an email to `LF Helpdesk <opnfv-helpdesk@rt.linuxfoundation.org>`_ or join the channel **#opnfv-octopus** on IRC.
+
+References
+----------
+1. `Why you should be using an Artifact Repository <http://blogs.collab.net/subversion/why-you-should-be-using-an-artifact-repository-part-1>`_
+2. `Regarding VM image and Git repo <http://lists.opnfv.org/pipermail/opnfv-tech-discuss/2015-January/000591.html>`_
+3. `Google Cloud Storage <https://cloud.google.com/storage/>`_
diff --git a/docs/octopus_docs/opnfv-ci-infrastructure.rst b/docs/octopus_docs/opnfv-ci-infrastructure.rst
new file mode 100644
index 0000000..b37c112
--- /dev/null
+++ b/docs/octopus_docs/opnfv-ci-infrastructure.rst
@@ -0,0 +1,85 @@
+===========================================
+OPNFV Continuous Integration Infrastructure
+===========================================
+
+This document covers the Continuous Integration(CI) infrastructure used in the day to day operation of the OPNFV,
+which may be of interest to people who want to help develop this infrastructure or integrate their tools into it.
+
+Infra-overview
+===============
+
+The OPNFV CI infrastructure includes hardware/tools to check, build, deploy and test, etc, in pipeline.
+Below diagram shows the hardware/tools resources used in Octopus project,
+
+.. image:: images/ci_infra.png
+ :scale: 50%
+ :alt: OPNFV CI Infrastructure
+
+The table below lists the tools/resources that are used in OPNFV CI,
+
++-----------------------------+-----------------------+
+| Tools/Resources | Name |
++=============================+=======================+
+| CI Engine | Jenkins |
++-----------------------------+-----------------------+
+| Source Code Management(SCM) | Git |
++-----------------------------+-----------------------+
+| Code Review | Gerrit |
++-----------------------------+-----------------------+
+| Artifact Repository | Google Storage |
++-----------------------------+-----------------------+
+| Hardware Resources | servers supplied by LF|
++-----------------------------+-----------------------+
+
+The hardware resources are located in Linux Foundation(LF) lab and community labs,
+for more details, please read the Pharos project,
+which is shown in https://wiki.opnfv.org/pharos, a guide is provided to describe how to connect your hosting to LF Jenkins,
+shown in https://wiki.opnfv.org/octopus/jenkins_slave_connection.
+
+Major Infrastructures
+======================
+
+Git/Gerrit
+-----------
+
+Git is the famous open source distributed version control software,
+it was initially designed and developed for Linux Kernel development,
+and has become the most widely adopted version control system for software development.
+Git puts emphasis on speed, data integrity, and support for distributed, non-linear workflows.
+
+Jenkins git plugin is used as the Source Code Manager(SCM), in some way,
+the Git features make it stand out apart from nearly every other SCM, for more details,
+please refer to http://git-scm.com/about/. For developers, you can refer to http://git-scm.com/docs for its usage.
+
+Gerrit is used here for facilitating online code reviews for our git version control system.
+As a reviewer, you can see the changes are shown in a side-by-side style and add some inline comments.
+
+Jenkins and JJB
+----------------
+
+Jenkins is a Continuous Integration system that runs tests and automates some parts of project operations.
+Jenkins mainly focuses on building/testing software projects continuously and monitoring
+executions of externally-run jobs. Upstream documentation is available at https://jenkins-ci.org/.
+
+Jenkins supports plugins, which allows to be extended to meet specific requirements.
+Numbers of plugins have been installed, and new ones can be installed when requirements arise.
+
+The Jenkins jobs are defined by Jenkins Job Builder(JJB) in human readable YAML format.
+The jobs defined are the key points of CI pipeline.
+To make clear how the jobs in pipeline run to complete the build, deploy, test works,
+you can refer to https://wiki.opnfv.org/octopus/pipelines.
+Moreover, to start your own job in Jenkins,
+you can write a JJB under the guide of https://wiki.opnfv.org/octopus/jenkins_wow.
+
+Artifact Repository
+-------------------
+
+An artifact repository is a collection of binary software artifacts
+and metadata stored in a defined directory structure,
+it is a kin to what subversion is to source code, i.e.,
+it is a way of versioning artifacts produced by build systems, CI, and so on.
+At this moment, since there is not enough storage space of LF environment,
+Google Cloud Storage is used as the OPNFV artifact repository temporarily.
+
+If you want to further find out what the artifact repository is and how to use OPNFV artifact repository,
+the wiki link https://wiki.opnfv.org/octopus/artifact_repository provides a good reference.
diff --git a/docs/octopus_docs/opnfv-ci-pipelines.rst b/docs/octopus_docs/opnfv-ci-pipelines.rst
new file mode 100644
index 0000000..a3e5eb9
--- /dev/null
+++ b/docs/octopus_docs/opnfv-ci-pipelines.rst
@@ -0,0 +1,109 @@
+=================
+OPNFV CI PIPELINE
+=================
+
+OPNFV CI
+========
+
+OPNFV Continuous Integration (CI) project provides the starting point for all OPNFV development activities.
+It creates a full development environment for OPNFV including automatic build, deployment, and testing.
+
+In order to provide fast and continuous feedback to OPNFV community, CI pipeline utilizes different tools,
+runs different type of verification activities in different phases
+depending on the needs of different OPNFV projects and the needs of the OPNFV community.
+
+This document aims to provide information regarding OPNFV CI Pipeline
+which is currently being enabled for the projects.
+
+CI Pipeline Overview
+====================
+
+OPNFV CI Pipeline starts with a change (commit) and stages in the pipeline
+are triggered based on events that happen while the change travels through the pipeline,
+producing feedback based on different verification activities.
+
+Below diagram shows overview of the OPNFV CI pipeline.
+
+.. image:: images/pipeline_overview.png
+ :scale: 50%
+ :alt: OPNFV CI Pipeline Overview
+
+Please note that the daily job is neglected on above diagram as the daily job
+is currently triggered once during night time based on timer, not based on Gerrit events.
+
+
+Jenkins Jobs in CI Pipeline
+===========================
+
+Verify Jenkins Job
+------------------
+
+OPNFV CI Pipeline has **verify** jobs for all OPNFV Projects in order to
+run quick verification activities for each and every patchset sent to Gerrit for review.
+
+The main purpose of this job is to keep the quality of codebase on certain level
+so whoever clones the repo at any given time can get *stable* version of the software.
+It also provides feedback regarding the quality of the patchset to developer who submitted the patchset for review,
+reviewer(s) who are requested to do review(s) and the rest of the OPNFV community, as early as possible.
+
+This job is triggered automatically when developers issue **git review** command to publish their changes to Gerrit.
+Gerrit then publishes **patchset created** event under normal circumstances, triggering the job.
+If the job fails to trigger or fails during execution for some reason that is not related to patchset itself,
+developers can retrigger it by adding a new comment to change on Gerrit
+and include either one of the keywords **recheck** or **reverify**.
+
+The result of this job will be verified/failed vote (+1 or -1) on Gerrit.
+Depending on reviews, the commit can later be submitted to master and merged.
+
+This job does not produce any artifact (document, ISO, etc.).
+
+Please check the diagram below to see how the flow looks.
+
+.. image:: images/verify_job.png
+ :scale: 50%
+ :alt: Verify Jenkins Job
+
+Merge Jenkins Job
+-----------------
+
+OPNFV CI Pipeline has **merge** jobs for all OPNFV Projects in order to
+run verification activities for each and every change that gets merged to master.
+
+The main purpose of this job is to give feedback regarding the quality of the master branch
+once a certain change gets merged to master and the current scope of the job is same as verify job.
+
+This job is triggered automatically by Gerrit **change merged** event under normal circumstances.
+If the job fails to trigger or fails during execution for some reason that is not related to patchset itself,
+developers can retrigger it by adding a new comment to change on Gerrit and include the keyword **remerge**.
+
+This job currently produces documents and publishes them on `OPNFV Artifact Repository <http://artifacts.opnfv.org/>`_.
+
+Please check the diagram below to see how the flow looks.
+
+
+.. image:: images/merge_job.png
+ :scale: 50%
+ :alt: Merge Jenkins Job
+
+Daily Jenkins Job
+-----------------
+
+OPNFV CI Pipeline has **daily** jobs for all OPNFV Projects in order to
+run more extensive verification activities that can take long time to finish.
+
+The main purpose of this job is to run full build, produce artifacts (installer ISOs, etc.),
+store artifacts in OPNFV Artifact Repository so they can be downloaded to target environment,
+do the deployment using build artifacts, run tests (Tempest, Robot, etc.)
+and give feedback regarding the quality of the master branch.
+
+This job is triggered automatically every night 00:00UTC based on **timer** under normal circumstances.
+If the job fails to trigger or fails during execution for some reason that is not related to software itself,
+it can only be retriggered by LF aor Octopus team members.
+
+This job produces build artifacts and documents and publishes them on `OPNFV Artifact Repository <http://artifacts.opnfv.org/>`_.
+
+Please check the diagram below to see how the flow looks.
+
+.. image:: images/daily_job.png
+ :scale: 50%
+ :alt: Daily Jenkins Job
diff --git a/docs/octopus_docs/opnfv-jenkins-slave-connection.rst b/docs/octopus_docs/opnfv-jenkins-slave-connection.rst
new file mode 100644
index 0000000..1c6eb82
--- /dev/null
+++ b/docs/octopus_docs/opnfv-jenkins-slave-connection.rst
@@ -0,0 +1,181 @@
+================================================
+Connecting OPNFV Community Labs to OPNFV Jenkins
+================================================
+
+.. contents:: Table of Contents
+ :backlinks: none
+
+Abstract
+========
+
+This document describes how to connect resources (servers) located in Linux Foundation (LF) lab
+and labs provided by the OPNFV Community to OPNFV Jenkins.
+
+License
+=======
+Connecting OPNFV Community Labs to OPNFV Jenkins (c) by Fatih Degirmenci (Ericsson AB) and others.
+
+Connecting OPNFV Labs to OPNFV Jenkins document is licensed under a Creative Commons
+Attribution 4.0 International License.
+
+You should have received a copy of the license along with this. If not, see <http://creativecommons.org/licenses/by/4.0/>.
+
+
+Version History
+===============
+
++------------+-------------+------------------+---------------------------------------+
+| **Date** | **Version** | **Author** | **Comment** |
+| | | | |
++------------+-------------+------------------+---------------------------------------+
+| 2015-05-05 | 0.1.0 | Fatih Degirmenci | First draft |
+| | | | |
++------------+-------------+------------------+---------------------------------------+
+| 2015-09-25 | 1.0.0 | Fatih Degirmenci | Instructions for the |
+| | | | Arno SR1 release |
++------------+-------------+------------------+---------------------------------------+
+| 2016-01-25 | 1.1.0 | Jun Li | Change the format for |
+| | | | new doc toolchain |
++------------+-------------+------------------+---------------------------------------+
+| 2016-01-27 | 1.2.0 | Fatih Degirmenci | Instructions for the |
+| | | | Brahmaputra release |
++------------+-------------+------------------+---------------------------------------+
+| 2016-05-25 | 1.3.0 | Julien | Add an additional step after step9 to |
+| | | | output the correct monit config file |
++------------+-------------+------------------+---------------------------------------+
+
+Jenkins
+=======
+
+Jenkins is an extensible open source Continuous Integration (CI) server. [1]
+
+Linux Foundation (LF) hosts and operates `OPNFV Jenkins <https://build.opnfv.org/ci/>`_.
+
+Jenkins Slaves
+==============
+
+**Slaves** are computers that are set up to build projects for a **Jenkins Master**. [2]
+
+Jenkins runs a separate program called "**slave agent**" on slaves.
+When slaves are registered to a master, the master starts distributing load to slaves by
+scheduling jobs to run on slaves if the jobs are set to run on them. [2]
+
+Term **Node** is used to refer to all machines that are part of Jenkins grid, slaves and
+master. [2]
+
+Two types of slaves are currently connected to OPNFV Jenkins and handling
+different tasks depending on the purpose of connecting the slave.
+
+* Slaves hosted in `LF Lab <https://wiki.opnfv.org/get_started/lflab_hosting#hardware_setup>`_
+* Slaves hosted in `Community Test Labs <https://wiki.opnfv.org/pharos#community_test_labs>`_
+
+The slaves connected to OPNFV Jenkins can be seen using this link:
+https://build.opnfv.org/ci/computer/
+
+Slaves without red cross next to computer icon are fully functional.
+
+Connecting Slaves to OPNFV Jenkins
+==================================
+
+The method that is normally used for connecting slaves to Jenkins requires direct SSH access to
+servers.
+[3] This is the method that is used for connecting slaves hosted in LF Lab.
+
+Connecting slaves using direct SSH access can become a challenge given that OPNFV Project
+has number of different labs provided by community as mentioned in previous section.
+All these labs have different security requirements which can increase the effort
+and the time needed for connecting slaves to Jenkins.
+In order to reduce the effort and the time needed for connecting slaves and streamline the
+process, it has been decided to connect slaves using
+`Java Network Launch Protocol (JNLP) <https://docs.oracle.com/javase/tutorial/deployment/deploymentInDepth/jnlp.html>`_.
+
+Connecting Slaves from LF Lab to OPNFV Jenkins
+----------------------------------------------
+
+Slaves hosted in LF Lab are handled by LF. All the requests and questions regarding
+these slaves should be submitted to `OPNFV LF Helpdesk <opnfv-helpdesk@rt.linuxfoundation.org>`_.
+
+Connecting Slaves from Community Labs to OPNFV Jenkins
+------------------------------------------------------
+
+As noted in corresponding section, slaves from Community Labs are connected using JNLP. Via JNLP,
+slaves open connection towards Jenkins Master instead of Jenkins Master accessing to them directly.
+
+Servers connecting to OPNFV Jenkins using this method must have access to internet.
+
+Please follow below steps to connect a slave to OPNFV Jenkins.
+
+ 1. Create a user named **jenkins** on the machine you want to connect to OPNFV Jenkins and give the user sudo rights.
+ 2. Install needed software on the machine you want to connect to OPNFV Jenkins as slave.
+ - openjdk 7
+ - monit
+ 3. If the slave will be used for running virtual deployments, Functest, and Yardstick, install below software and make jenkins user the member of the groups.
+ - docker
+ - libvirt
+ 4. Create slave root in Jenkins user home directory.
+ ``mkdir -p /home/jenkins/opnfv/slave_root``
+ 5. Clone OPNFV Releng Git repository.
+ ``mkdir -p /home/jenkins/opnfv/repos``
+
+ ``cd /home/jenkins/opnfv/repos``
+
+ ``git clone https://gerrit.opnfv.org/gerrit/p/releng.git``
+ 6. Contact LF by sending mail to `OPNFV LF Helpdesk <opnfv-helpdesk@rt.linuxfoundation.org>`_ and request creation of a slave on OPNFV Jenkins. Include below information in your mail.
+ - Slave root (/home/jenkins/opnfv/slave_root)
+ - Public IP of the slave (You can get the IP by executing ``curl http://icanhazip.com/``)
+ - PGP Key (attached to the mail or exported to a key server)
+ 7. Once you get confirmation from LF stating that your slave is created on OPNFV Jenkins, check if the firewall on LF is open for the server you are trying to connect to Jenkins.
+ ``cp /home/jenkins/opnfv/repos/releng/utils/jenkins-jnlp-connect.sh /home/jenkins/``
+ ``cd /home/jenkins/``
+ ``sudo ./jenkins-jnlp-connect.sh -j /home/jenkins -u jenkins -n <slave name on OPNFV Jenkins> -s <the token you received from LF> -f``
+
+ - If you receive an error, follow the steps listed on the command output.
+ 8. Run the same script with test(-t) on foreground in order to make sure no problem on connection. You should see **INFO: Connected** in the console log.
+ ``sudo ./jenkins-jnlp-connect.sh -j /home/jenkins -u jenkins -n <slave name on OPNFV Jenkins> -s <the token you received from LF> -t``
+
+ - If you receive an error similar to the one shown `on this link <http://hastebin.com/ozadagirax.avrasm>`_, you need to check your firewall and allow outgoing connections for the port.
+ 9. Kill the Java slave.jar process.
+ 10. Run the same script normally without test(-t) in order to get monit script created.
+ ``sudo ./jenkins-jnlp-connect.sh -j /home/jenkins -u jenkins -n <slave name on OPNFV Jenkins> -s <the token you received from LF>``
+ 11. Edit monit configuration and enable http interface. The file to edit is /etc/monit/monitrc on Ubuntu systems. Uncomment below lines.
+ set httpd port 2812 and
+ use address localhost # only accept connection from localhost
+ allow localhost # allow localhost to connect to the server and
+ 12. Restart monit service.
+ - Without systemd:
+
+ ``sudo service monit restart``
+ - With systemd: you have to enable monit service first and then restart it.
+
+ ``sudo systemctl enable monit``
+
+ ``sudo systemctl restart monit``
+ 13. Check to see if jenkins comes up as managed service in monit.
+ ``sudo monit status``
+ 14. Connect slave to OPNFV Jenkins using monit.
+ ``sudo monit start jenkins``
+ 15. Check slave on OPNFV Jenkins to verify the slave is reported as connected.
+ - The slave on OPNFV Jenkins should have some executors in “Idle” state if the connection is successful.
+
+Notes
+==========
+
+PGP Key Instructions
+--------------------
+
+Public PGP Key can be uploaded to public key server so it can be taken from
+there using your mail address. Example command to upload the key to key server is
+
+ ``gpg --keyserver hkp://keys.gnupg.net:80 --send-keys XXXXXXX``
+
+The Public PGP Key can also be attached to the email by storing the key in a file and then
+attaching it to the email.
+
+ ``gpg --export -a '<your email address>' > pgp.pubkey``
+
+References
+==========
+
+* `What is Jenkins <https://wiki.jenkins-ci.org/display/JENKINS/Meet+Jenkins>`_
+* `Jenkins Terminology <https://wiki.jenkins-ci.org/display/JENKINS/Terminology>`_
+* `Jenkins SSH Slaves Plugin <https://wiki.jenkins-ci.org/display/JENKINS/SSH+Slaves+plugin>`_
diff --git a/docs/octopus_docs/opnfv-jjb-usage.rst b/docs/octopus_docs/opnfv-jjb-usage.rst
new file mode 100644
index 0000000..4c800a1
--- /dev/null
+++ b/docs/octopus_docs/opnfv-jjb-usage.rst
@@ -0,0 +1,179 @@
+How to write and use JJB?
+============================================
+
+What is Jenkins Job Builder
+----------------------------
+
+Jenkins Job Builder(JJB) takes simple descriptions of Jenkins jobs in YAML format,
+and uses them to configure Jenkins jobs.
+JJB keeps your job descriptions in human readable format and job template system
+simplifies the configuration of Jenkins jobs.
+Upstream documentation is available at http://ci.openstack.org/jenkins-job-builder/.
+
+How to Write/Use Jenkins Job Builder
+------------------------------------
+
+Job template is widely used in JJBs and makes the configuration of Jenkins jobs simple,
+if you need to define several jobs which are nearly identical, except perhaps in their names, SCP targets, etc.,
+then you may use a job template to specify the particulars of the job,
+and then use a project to realize the job with appropriate variable substitution.
+
+To illustrate how to configure Jenkins jobs by using job template,
+we can start with a simple example used in our OPNFV releng project octopus directory,
+which is just used to print "Hello world from Octopus", shown as below::
+
+ -job-template:
+ name: octopus-test
+
+ node: master
+
+ project-type: freestyle
+
+ logrotate:
+ daysToKeep: 30
+ numToKeep: 10
+ artifactDaysToKeep: -1
+ artifactNumToKeep: -1
+
+ builders:
+ - shell: |
+ echo "Hello world from octopus"
+
+the value "-1" here means keep forever, you can add this job template into the project jobs to run in jenkins::
+
+ - project:
+ name: octopus
+ jobs:
+ - 'octopus-test'
+
+then this job works!
+
+Further, if you are a developer who wants to set up a job template to be used in jenkins,
+we should dive into much more, taking one job template in releng project used for octopus project,
+the 'octopus-daily-{stream}', as an example::
+
+ -job-template:
+ name: 'octopus-daily-{stream}'
+
+ node: master
+
+ # Job template for daily builders
+ #
+ # Required Variables:
+ # stream: branch with - in place of / (eg. stable)
+ # branch: branch (eg. stable)
+
+ project-type: freestyle
+ varsetabove: '{somevar}'
+
+ logrotate:
+ daysToKeep: '{build-days-to-keep}'
+ numToKeep: '{build-num-to-keep}'
+ artifactDaysToKeep: '{build-artifact-days-to-keep}'
+ artifactNumToKeep: '{build-artifact-num-to-keep}'
+
+ parameters:
+ - project-parameter:
+ project: '{project}'
+
+ scm:
+ - git-scm:
+ credentials-id: '{ssh-credentials}'
+ refspec: ''
+ branch: '{branch}'
+
+ wrappers:
+ - ssh-agent-credentials:
+ user: '{ssh-credentials}'
+
+ triggers:
+ - timed: 'H H * * *'
+
+ prebuilders:
+ - test-macro
+
+ builders:
+ - shell:
+ !include-raw build-upload-docu.sh
+
+ postbulders:
+ - test-macro
+
+the {stream} here means when you add this into jobs, you can replace {stream} with what you want, such as::
+
+ stream:
+ - master:
+ branch: 'master'
+
+the::
+
+ node: master
+
+means to restrict this job to run in Jenkins master node. Next, several important procedures are illustrated here,
+
+- scm, this mudule allows you to specify the source code location for the project,
+and it allows referencing multiple repositories in a Jenkins job.
+- triggers, this module defines what causes a Jenkins job to start building.
+- prebuilders and postbuilders, which define job need done pre and post the builders.
+- builders, which defines actions that the Jenkins job should execute,
+usually the shell scripts or maven targets are existed there, e.g., build-upload-docu.sh used in our example.
+
+Generally, the modules used in a job template is sequenced as
+
+1. parameters, properties
+2. scm
+3. triggers
+4. wrappers
+5. prebuilders
+6. builders
+7. postbuilders
+8. publishers, reporters, notifications
+
+Working with OPNFV Jenkins Jobs
+-------------------------------
+
+By now, the releng project of OPNFV is the release engineering project for JJBs, you can clone the repo::
+
+ git clone ssh://YOU@gerrit.opnfv.org:29418/releng
+
+make changes::
+
+ git commit -sv
+ git review
+ remote: Resolving deltas: 100% (3/3)
+ remote: Processing changes: new: 1, refs: 1, done
+ remote:
+ remote: New Changes:
+ remote: https://gerrit.opnfv.org/gerrit/51
+ remote:
+ To ssh://agardner@gerrit.opnfv.org:29418/releng.git
+ "* [new branch] HEAD -> refs/publish/master
+
+Follow the link to gerrit https://gerrit.opnfv.org/gerrit/51 in a few moments
+the verify job will have completed and you will see Verified +1 jenkins-ci in the gerrit ui.
+
+If the changes pass the verify job https://build.opnfv.org/ci/view/builder/job/builder-verify-jjb/ The patch can be submitted by a committer.
+
+The verify and merge jobs are retriggerable in Gerrit by simply leaving a comment with one of the keywords listed below.
+This is useful in case you need to re-run one of those jobs in case if build issues or
+something changed with the environment.
+
+* Verify Job: Trigger: **recheck** or **reverify**
+
+* Merge Job: Trigger: **remerge**
+
+You can add below persons as reviewers to your patch in order to get it reviewed and submitted.
+
+* Ulrich Kleber (Ulrich.Kleber@huawei.com)
+* Fatih Degirmenci (fatih.degirmenci@ericsson.com)
+* Xinyu Zhao(Jerry) (zhaoxinyu@huawei.com)
+* Aric Gardner (agardner@linuxfoundation.org)
+
+The Current merge and verify jobs for jenkins job builder for releng project, shown in https://git.opnfv.org/cgit/releng/tree/jjb.
+
+Assuming that you have set up some job templates and put them into a project,
+then the question is that how they work? Taking the jobs 'builder-verify-jjb',
+'builder-merge' used in releng project as examples, 'builder-verify-jjb' is to verify jobs you commited,
+you will see verified '+1' jenkins-ci in gerrit if it succeed,
+'builder-merge' is to set up a merge job and update all the JJBs.
+If you have some new jobs need to be run, you can set up your own job templates and add them into the project.
diff --git a/docs/octopus_docs/opnfv-stablebranch.rst b/docs/octopus_docs/opnfv-stablebranch.rst
new file mode 100644
index 0000000..1dce7e0
--- /dev/null
+++ b/docs/octopus_docs/opnfv-stablebranch.rst
@@ -0,0 +1,187 @@
+=============
+Stable Branch
+=============
+
+Overview of Stable Branch Handling
+----------------------------------
+
+The stable branch is intended to be a safe source of fixes for high impact bugs and security issues
+which have been fixed on master since a given release.
+It allows users of release (stable) versions to benefit from the ongoing bugfix work after the release.
+
+Official point releases for each project are published from the branch on a per need basis, as decided by the TSC.
+In later stages, a regular cadence for point releases may be introduced.
+
+It's possible to check current maintained versions in the releases page.
+
+OPNFV's stable branch policy borrows much from prior art, in particular from OpenStack
+and OpenDaylight.
+
+In general all fixes should be made on the main branch and cherry picked to stable.
+If there is a case where the fix is not able to be merged backwards
+only then we would need to do any work directly on stable.
+The documented method for getting a fix into stable should be by a **cherry-pick process**.
+
+Stable branch policy
+--------------------
+
+Appropriate fixes
+~~~~~~~~~~~~~~~~~
+
+Only a limited class of changes are appropriate for inclusion on the stable branch.
+
+A number of factors must be weighed when considering a change:
+
+- **The risk of regression** - even the tiniest changes carry some risk of
+ breaking something and we really want to avoid regressions on the stable branch
+- **The user visible benefit** - are we fixing something that users might actually
+ notice and, if so, how important is it?
+- **How self-contained the fix is** - if it fixes a significant issue but also
+ refactors a lot of code, it's probably worth thinking about what a less risky
+ fix might look like
+- Whether the fix is **already on master** - a change must be a **backport** of a change
+ already merged onto master, unless the change simply does not make sense on master
+ (e.g. because of a change of architecture).
+- If there is a suitable **work-around** for a bug, normally there won't be a fix on stable.
+- Since OPNFV is a midstream integration effort, also **test cases** might be suitable backports
+ in case they are related to critical bugs found in stable.
+
+Some types of changes are completely forbidden:
+
+- New features
+- Changes to the external APIs
+- Changes to the notification definitions
+- DB schema changes
+- Incompatible config file changes
+- Changes including a version upgrade of an upstream component of OPNFV
+ (since this will typically violate the above points)
+
+Support phases
+~~~~~~~~~~~~~~
+
+Support phases will be introduced at a later time
+
+Review of fixes
+~~~~~~~~~~~~~~~
+
+Each backported commit proposed to gerrit should be reviewed and +2ed by committer(s) of the
+corresponding projects.
+
+If unsure about the technical details of a given fix, contributors should consult with
+the committers from the affected projects for a more detailed technical review.
+
+If unsure if a fix is appropriate for the stable branch, at this time the TSC will do the final decision.
+
+Security fixes
+~~~~~~~~~~~~~~
+
+Fixes for embargoed security issues receive special treatment.
+These should be reviewed in advance of disclosure by committers.
+At the time of coordinated public disclosure,
+the fix is proposed simultaneously to master and the stable branches and immediately approved.
+
+Processes
+---------
+
+Proposing fixes
+~~~~~~~~~~~~~~~
+
+Anyone can propose a cherry-pick to the project committers.
+
+One way is that if a bugfix on master looks like a good candidate for backporting
+- e.g. if it's a significant bug with the revious release - then just nominating the bug
+for the maintenance will bring it to the attention of the maintainers.
+
+If you don't have the appropriate permissions to nominate the bug, then send an email via the user list.
+
+The best way to get the patch merged in timely manner is to send it backported by yourself.
+To do so, you may try to use "Cherry Pick To" button in Gerrit UI for the original patch in master.
+Gerrit will take care of creating a new review, modifying commit message to include 'cherry-picked from ...' line etc.
+
+If the patch you're proposing will not cherry-pick cleanly,
+you can help by resolving the conflicts yourself and proposing the resulting patch.
+Please keep Conflicts lines in the commit message to help reviewers!
+You can use git-review to propose a change to the stable branch with:
+
+**Cherry Pick Patches from Master to a Stability Branch From Command Line**::
+
+ $> git checkout stable/arno
+ $> git cherry-pick -x $master_commit_d
+ $> git review stable/arno
+
+Note: cherry-pick -x option includes 'cherry-picked from ...' line
+in the commit message which is required to avoid Gerrit bug
+
+**Cherry Pick Patches from Master to a Stability Branch via Gerrit UI**
+
+1. Select the patch that you want to cherry pick from Master to Stable Branch
+2. Locate the Cherry Pick To button
+
+.. image:: images/stability_screenshot9.png
+ :scale: 100%
+
+3. Start to type in "re" in the branch text box to get suggestions to appear, then select "refs/heads/stable/<release-name>"
+
+.. image:: images/stability_screenshot10.png
+ :scale: 100%
+
+4. Click Cherry Pick Change
+
+.. image:: images/stability_screenshot11.png
+ :scale: 100%
+
+Failing all that, just ping one of the team and mention that you think the bug/commit is a good candidate.
+
+Change-Ids
+~~~~~~~~~~
+
+When cherry-picking a commit, keep the original Change-Id and gerrit will
+show a separate review for the stable branch
+while still allowing you to use the Change-Id to see all the reviews associated with it.
+
+Hint: Change-Id line must be in the last paragraph. Conflicts in the backport: add a new paragraph,
+creating a new Change-Id but you can avoid that by moving conflicts
+above the paragraph with Change-Id line or removing empty lines to make a single paragraph.
+
+Email Notifications
+~~~~~~~~~~~~~~~~~~~
+
+If you want to be notified of these patches you can create a watch on this screen:
+https://gerrit.opnfv.org/gerrit/#/settings/projects
+click "Watched Projects"
+
+Project Name: All-Projects
+
+Only If: branch:<stable branch of your choice>
+
+Then check the "Email Notifications - New Changes" checkbox.
+That will cause gerrit to send an email whenever a matching change is proposed,
+and better yet, the change shows up in your 'watched changes' list in gerrit.
+
+Bug Tags
+~~~~~~~~
+
+will be introduced when we see the need.
+
+CI Pipeline
+~~~~~~~~~~~
+
+Octopus team will setup separate pipeline for stable branches.
+
+
+Stable Branch Maintenance Team organization
+-------------------------------------------
+
+TBD.
+
+References
+----------
+
+
+
+https://wiki.openstack.org/wiki/StableBranch
+
+https://wiki.opendaylight.org/view/Simultaneous_Release:Cutting_Stability_Branches
+
+
+
diff --git a/docs/platformoverview/labinfrastructure.rst b/docs/platformoverview/labinfrastructure.rst
new file mode 100644
index 0000000..09c253d
--- /dev/null
+++ b/docs/platformoverview/labinfrastructure.rst
@@ -0,0 +1,21 @@
+.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+.. http://creativecommons.org/licenses/by/4.0
+.. (c) 2016 OPNFV.
+
+.. Overview of OPNFV lab infrastructure (Pharos).
+
+************************
+OPNFV Lab Infrastructure
+************************
+
+The `Pharos Project <https://www.opnfv.org/developers/pharos>`_ provides a lab infrastructure that
+is geographically and technically diverse. Labs instantiate **bare-metal** and **virtual**
+environments that are accessed remotely by the community and used for OPNFV platform and feature
+development, builds, deploys and testing. This will greatly assist in developing a highly robust and
+stable OPNFV platform with well understood performance characteristics. Community labs are hosted by
+OPNFV member companies on a voluntary basis. The Linux Foundation also hosts an OPNFV lab that
+provides centralised CI and other production resources which are linked to community labs.
+
+Future lab capabilities will include the ability easily automate deploy and test of any OPNFV
+install scenario in any lab environemnt as well as a *Virtual Lab* for developer on-boarding with
+minimal effort. :ref:`pharos_master` documents are included with this release.
diff --git a/docs/scenario-lifecycle/From OS-BASIC to NOSDN-FDIO.png b/docs/scenario-lifecycle/From OS-BASIC to NOSDN-FDIO.png
new file mode 100644
index 0000000..8ac9950
--- /dev/null
+++ b/docs/scenario-lifecycle/From OS-BASIC to NOSDN-FDIO.png
Binary files differ
diff --git a/docs/scenario-lifecycle/From OS-BASIC to NOSDN-OVS.png b/docs/scenario-lifecycle/From OS-BASIC to NOSDN-OVS.png
new file mode 100644
index 0000000..15f3893
--- /dev/null
+++ b/docs/scenario-lifecycle/From OS-BASIC to NOSDN-OVS.png
Binary files differ
diff --git a/docs/scenario-lifecycle/ODL Generic Scenarios Evolution.png b/docs/scenario-lifecycle/ODL Generic Scenarios Evolution.png
new file mode 100644
index 0000000..6f2e0a4
--- /dev/null
+++ b/docs/scenario-lifecycle/ODL Generic Scenarios Evolution.png
Binary files differ
diff --git a/docs/scenario-lifecycle/creating-scenarios.rst b/docs/scenario-lifecycle/creating-scenarios.rst
new file mode 100644
index 0000000..f445a00
--- /dev/null
+++ b/docs/scenario-lifecycle/creating-scenarios.rst
@@ -0,0 +1,96 @@
+.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+.. http://creativecommons.org/licenses/by/4.0
+.. (c) 2017 OPNFV Ulrich Kleber (Huawei)
+
+
+Creating Scenarios
+--------------------
+
+Purpose
+^^^^^^^^^^^
+
+A new scenario needs to be created, when a new combination of upstream
+components or features shall be supported, that cannot be provided with the
+existing scenarios in parallel to their existing features.
+
+Typically new scenarios are created as children of existing scenarios.
+
+* In some cases an upstream implementation can be replaced by a different solution.
+ The most obvious example here is the SDN controller. In the first OPNFV release,
+ only ODL was supported. Later ONOS and OpenContrail were added, thus creating
+ new scenarios.
+
+ In most cases, only one of the SDN controllers is needed, thus OPNFV will support
+ the different SDN controllers by different scenarios. This support will be long-
+ term, so there will be multiple generic scenarios for these options.
+
+* Another usecase is feature incompatibilities. For instance, OVS and FD.io
+ cannot be combined today. Therefore we need different scenarios for them.
+ If it is expected that such an incompatibility is not solved for longer time,
+ there can be even separate generic scenarios for these options.
+
+The overlap between scenarios should only be allowed where they add components
+that cannot be integrated in a single deployment.
+
+If scenario A completely covers scenario B, support of scenario B will be
+only provided as long as isolation of development risks is necessary.
+However, there might be cases where somebody wants to use scenario B
+still as a parent for specific scenarios.
+
+This is especially the case for generic scenarios, since they need more CI and testing
+resources. Therefore a gating process will be introduced for generic scenarios.
+
+
+Creating Generic Scenarios
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Generic scenarios provide stable and mature deployments of an OPNFV release. Therefore
+it is important to have generic scenarios in place that provide the main capabilities
+needed for NFV environments. On the other hand the number of generic scenarios needs
+to be limited because of resources.
+
+* Creation of a new generic scenario needs TSC consensus.
+* Typically the generic scenario is created by promoting an existing specific
+ scenario. Thus the only the additional information needs to be provided.
+* The scenario owner needs to verify that the scenario fulfills the above requirements.
+* Since specific scenarios typically are owned by the project who have initiated it,
+ and generic scenarios provide a much broader set of features, in many cases a
+ change of owner is appropriate. In most cases it will be appropriate to assign
+ a testing expert as scenario owner.
+
+Creating Specific Scenarios
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+As already stated, typically specific scenarios are created as children of existing
+scenarios. The parent can be a generic or a specific scenario.
+
+Creation of specific scenarios shall be very easy and can be done any time. However,
+support might be low priority during a final release preparation, e.g. after a MS6.
+
+* The PTL of the project developing the feature(s) or integrating a component etc can
+ request the scenario (tbd from whom: CI or release manager, no need for TSC)
+* The PTL shall provide some justification why a new scenario is needed.
+ It will be approptiate to discuss that justification in the weekly technical
+ discussion meeting.
+* The PTL should have prepared that by finding support from one of the installers.
+* The PTL should explain from which "parent scenario" (see below) the work will start,
+ and what are the planned additions.
+* The PTL shall assign a unique name. Naming rules will be set by TSC.
+* The PTL shall provide some time schedule plans when the scenario wants to join
+ a release, when he expects the scenario merge to other scenarios, and he expects
+ the features may be made available in generic scenarios.
+ A scenario can join a release at the MS0 after its creation.
+ It should join a release latest on the next MS0 6 month after its
+ creation (that is it should skip only one release) and merge to its parent
+ maximum 2 releases later.
+ .. Editors note: "2 releases" is rather strict maybe refine?
+* The PTL should explain the infrastructure requirements and clarify that sufficient
+ resources are available for the scenario.
+* The PTL shall assign a scenario owner.
+* The scenario owner shall maintain the scenario descriptor file according to the
+ template.
+* The scenario owner shall initiate the scenario be integrated in CI or releases.
+* When the scenario joins a release this needs to be done in time for the relevant
+ milestones.
+
+
diff --git a/docs/scenario-lifecycle/current-status.rst b/docs/scenario-lifecycle/current-status.rst
new file mode 100644
index 0000000..08a3776
--- /dev/null
+++ b/docs/scenario-lifecycle/current-status.rst
@@ -0,0 +1,50 @@
+.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+.. http://creativecommons.org/licenses/by/4.0
+.. (c) 2017 OPNFV Ulrich Kleber (Huawei)
+
+
+Current Status
+---------------
+
+tdb: this chapter will summarize the scenario analysis.
+
+Arno
+^^^^^^^^
+
+In Arno release, the scenario concept was not created yet.
+Looking back, we can say we had one scenario with OpenStack, ODL and KVM,
+that could be deployed in two ways, by the two installers available in Arno.
+
+Brahmaputra
+^^^^^^^^^^^^^^^^
+
+tbd
+
+Colorado
+^^^^^^^^^^^^
+
+tbd
+
+Danube
+^^^^^^^^^^
+
+tbd: Analysis of the 58 scenarios
+The analysis can be found in the slides at
+https://wiki.opnfv.org/display/INF/Scenario+Consolidation
+and will be explain with some text here.
+The text will also use the diagrams from the slides, e.g.
+show a scenario tree:
+
+.. figure:: scenario-tree.png
+
+and an idea about possible generic scenarios:
+
+.. figure:: scenario-tree+idea.png
+
+as well as possible ways to reach this.
+
+
+
+
+
+
diff --git a/docs/scenario-lifecycle/deployment-options.rst b/docs/scenario-lifecycle/deployment-options.rst
new file mode 100644
index 0000000..2c0a342
--- /dev/null
+++ b/docs/scenario-lifecycle/deployment-options.rst
@@ -0,0 +1,128 @@
+.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+.. http://creativecommons.org/licenses/by/4.0
+.. (c) 2017 OPNFV Ulrich Kleber (Huawei)
+
+
+Deployment Options
+-------------------
+
+What are deployment options?
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+.. Editors note: Some installers call it settings. Prefer options, because it allows
+.. cases with multiple options.
+
+During the analysis of scenario definitions in Colorado and Danube releases, it became
+visible, that HA and NOHA deployment of otherwise identical scenarios shouldn't be
+called different scenarios.
+
+This understanding leads to the definition of another kind of attributes
+in scenario definitions. Many scenarios can be deployed in different ways:
+
+* **HA** configuration of OpenStack modules (that is redundancy using multiple
+ controllers running OpenStack services) versus NOHA with only a single controller
+ running a single instance of each OpenStack service
+* Some scenarios can be deployed on intel and on ARM **hardware**.
+* We can see the **installation tools** in the same way. Independent of the installer
+ that was used for the deployment of a scenario, the same functionality will be
+ provided and we can run the same testcases.
+
+Please note that a scenario can support multiple deployment options. And a scenario
+definition must specify at least one option of each type.
+
+In future there will be more deployment options, e.g. redundancy models or other
+clustering options of SDN controllers, or upscaling compute or control nodes.
+
+CI Pipeline needs to test all configuration options of a scenario.
+
+* Development cycles (verify-jobs, daily, weekly) don‘t need to run all
+ options each time
+* Release testing must cover all those combinations of configuration options that
+ will be part of the release. Typically the HA configurations are released on
+ bare metal with the allowed hardware options and all installers that can deploy
+ those. Release of an NOHA option should be an exception, e.g. for a scenarios
+ that are not mature yet.
+* Virtual deployments are not mentioned here. All scenarios should allow virtual
+ deployment where applicable.
+ But in release testing, bare metal deployment will be necessary.
+ CI will use virtual deployments as much as appropriate for resource reasons.
+
+
+Deployment options or new scenarios
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+In general we can say that a different scenario is needed when the set of components
+is changed (or in some cases a general deploy-time configuration of a component). If
+we deploy the same components in a different way, we can define this via deployment
+options.
+
+**Examples**
+
+* Deploying different SDN controller or data plane (OVS/FD.IO) requires different
+ scenario.
+* HA/NOHA will deploy the same components on different number of nodes, so it is a
+ deployment option.
+* Different hardware types should not lead to new scenarios. Typically the same
+ scenario can be deployed on multiple hardware.
+
+
+HA and NOHA
+^^^^^^^^^^^^^
+
+Both, HA and NOHA options of a scenario are important.
+
+* HA deployment is important to be released in major OPNFV releases, because
+ telco deployments typically have strong requirements on availability.
+* NOHA deployments require less resources and are sufficient for many use cases.
+ For instance sandbox testing can be done easier and also automatic verification
+ in the CI pipeline can make use of it.
+* Generic scenarios shall support the HA and NOHA option.
+* Specific scenarios can focus on the NOHA option if their features are independent
+ from the controller redundancy. But before merging with generic scenarios, they
+ should provide both options.
+
+
+Hardware types
+^^^^^^^^^^^^^^^^^
+
+In its first releases, OPNFV could be deployed on Intel hardware only. Later, support
+for ARM hardware was added and now 5 scenarios can already be deployed on both.
+
+
+Virtual deployment
+^^^^^^^^^^^^^^^^^^^^^^
+
+Many, but not all scenarios can be deployed on virtual PODs. Therefore the scenario
+definition shall specify whether virtual deployment is possible.
+
+Typically a virtual HA deployment shall look very much the same as a bare-metal HA
+deployment, that is the distribution of modules on nodes/VMs is similar. But there
+might be cases where there are differences. Thus, the scenario specification needs
+to provide the data for each separately.
+
+
+Deployment tools
+^^^^^^^^^^^^^^^^^^^
+
+Deployment tools (installers) are in a very similar relation to the scenarios.
+Each scenario can be deployed by one or more installer. Thus we can specify the
+installers for a scenario as a deployment option.
+
+However, the installers need additional detailed information for the deployment.
+Every installer may not support the same HA, hardware, virtualization options,
+or same distribution of modules. Each deployment may look slightly different
+per installer.
+
+The scenario definition needs to provide such information in a way it can be easily
+consumed by the installers.
+
+
+
+Other deployment options
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+This set of deployment options is based on what is required by Danube scenarios.
+Future releases will most likely introduce additional deployment options.
+
+
+
diff --git a/docs/scenario-lifecycle/feature-compatibility-nosdn.png b/docs/scenario-lifecycle/feature-compatibility-nosdn.png
new file mode 100644
index 0000000..09520aa
--- /dev/null
+++ b/docs/scenario-lifecycle/feature-compatibility-nosdn.png
Binary files differ
diff --git a/docs/scenario-lifecycle/feature-compatibility-odl.png b/docs/scenario-lifecycle/feature-compatibility-odl.png
new file mode 100644
index 0000000..600082a
--- /dev/null
+++ b/docs/scenario-lifecycle/feature-compatibility-odl.png
Binary files differ
diff --git a/docs/scenario-lifecycle/generic-scenarios.rst b/docs/scenario-lifecycle/generic-scenarios.rst
new file mode 100644
index 0000000..f159c0c
--- /dev/null
+++ b/docs/scenario-lifecycle/generic-scenarios.rst
@@ -0,0 +1,53 @@
+.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+.. http://creativecommons.org/licenses/by/4.0
+.. (c) 2017 OPNFV Ulrich Kleber (Huawei)
+
+
+Generic Scenarios
+------------------
+
+Generic scenarios provide a stable environment for users who want to base their
+products on them.
+
+* Generic scenarios provide a basic combination of upstream components together
+ with the superset of possible mature features that can be deployed on them.
+* Generic scenarios should be supported by all installers.
+* All generic scenarios in a release should have the same common major versions
+ of the included upstream components.
+ These upstream versions can then be seen as the upstream versions for the
+ release. E.g. that way we can say: “OPNFV xxx contains OpenStack abc,
+ ODL def, ONOS ghi, OVS jkl“.
+ But most installers cannot directly reference any
+ upstream version. This may lead to minor differences.
+ Nevertheless features and test cases require all installers using the same
+ major versions.
+* Generic scenarios should use stable sources
+ and lock the versions before the release by either pointing to a tag or sha1.
+ According to the LF badging program it should be possible to reproduce
+ the release from source again.
+ Thus the upstream repos should be in safe locations.
+ Also only tagged source versions should be used for the release, so the
+ release can be reproduced identically for different purposes such as
+ reproducing a baug reported by users and issuing the fix appropriately,
+ even after the upstream project has applied patches.
+ .. Editors note: There is discussion ongoing in INFRA and SEC working groups how
+ .. to realize this. Thus the description is still a bit vague. Details will be
+ .. added later either here or in some INFRA document.
+* Generic scenarios should be stable and mature. Therefore they will be tested more
+ thoroughly and run special release testing so a high level of stability can be
+ provided.
+* Generic scenarios will live through many OPNFV releases.
+* More resources will be allocated to maintaining generic scenarios and they will
+ have priority for CI resources.
+ .. Editors note: Discussion ongoing in INFRA about toolchain issues.
+
+Note: in some cases it might be difficult for an installer to support all generic
+scenarios immediately. In this case an exception can be defined, but the installer
+has to provide a plan how to achieve support for all generic scenarios.
+
+Note: in some cases, upstream projects don‘t have proper CI process with
+tagged stable versions. Also some installers‘ way of working doesn‘t allow
+selecting the repo and tag. Thus a stepwise approach will be necessary to
+fulfill this requirement.
+
+
diff --git a/docs/scenario-lifecycle/index.rst b/docs/scenario-lifecycle/index.rst
new file mode 100644
index 0000000..36dd92a
--- /dev/null
+++ b/docs/scenario-lifecycle/index.rst
@@ -0,0 +1,24 @@
+.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+.. http://creativecommons.org/licenses/by/4.0
+.. (c) 2017 OPNFV Ulrich Kleber (Huawei)
+
+**********************
+Scenario Lifecycle
+**********************
+
+Contents:
+
+.. toctree::
+ :numbered:
+ :maxdepth: 4
+
+ scenario-overview.rst
+ generic-scenarios.rst
+ specific-scenarios.rst
+ parent-child-relations.rst
+ creating-scenarios.rst
+ deployment-options.rst
+ mano-scenarios.rst
+ current-status.rst
+ scenario-descriptor-files.rst
+
diff --git a/docs/scenario-lifecycle/mano-scenarios.rst b/docs/scenario-lifecycle/mano-scenarios.rst
new file mode 100644
index 0000000..0eee143
--- /dev/null
+++ b/docs/scenario-lifecycle/mano-scenarios.rst
@@ -0,0 +1,31 @@
+.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+.. http://creativecommons.org/licenses/by/4.0
+.. (c) 2017 OPNFV Ulrich Kleber (Huawei)
+
+
+MANO Scenarios
+---------------
+
+Since OPNFV organizes all deployments using scenarios, also MANO components need
+to be covered by scenarios.
+
+On the other side all NFVI/VIM level scenarios need to be orchestrated using a
+set of components from the NFVO and VNFM layer.
+
+The idea here is therefore to specify for a MANO scenario:
+
+* The MANO components to deploy
+* A list of supported NFVI/VIM level scenarios that can be orchestrated
+ using this MANO scenario.
+
+The MANO test cases will define the VNFs to use.
+
+MANO scenarios will have more work to do if they require new nodes to be deployed on.
+They should include this aspect in their resource planning/requests and contact
+Infra/Pharos in case that a change of the Pharos spec is needed and new PODs need
+to be made available based on the amended spec.
+
+More details need to be investigated as we gain experience with the MANO scenarios
+
+
+
diff --git a/docs/scenario-lifecycle/parent-child-relations.rst b/docs/scenario-lifecycle/parent-child-relations.rst
new file mode 100644
index 0000000..ca15619
--- /dev/null
+++ b/docs/scenario-lifecycle/parent-child-relations.rst
@@ -0,0 +1,62 @@
+.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+.. http://creativecommons.org/licenses/by/4.0
+.. (c) 2017 OPNFV Ulrich Kleber (Huawei)
+
+
+Parent - Child Relations
+-------------------------
+
+In many cases, development adds a feature to an existing scenario by adding additional
+components. This is called creating a child scenario from a parent.
+
+* Parent scenarios typically are more stable than children.
+* Children should plan to merge their feature back to the parent.
+* Merge back will often add components to the parent.
+
+.. figure:: parent-child.png
+
+* Child scenarios can be part of releases.
+* Child scenarios should merge back to their parent after 2 releases.
+* If a child scenario lives through several releases, it might be desirable
+ to “rebase/cherrypick” a child scenario to follow changes in the parent scenario.
+* Child scenarios typically support a smaller number of deployment options than
+ their parent
+
+Child scenarios are specific scenarios. Parent scenarios can be generic or specific
+scenarios.
+
+Child scenarios can be created any time. If they want to join a release, they have
+to be created before MS0 of that release.
+
+
+Siblings
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+In some cases it could make more sense to create a sibling rather than a child
+(e.g. if expected that merging back to parent will be difficult).
+In other words, the content of a child scenario will be incompatible with content
+of the parent scenario.
+In that case, the child scenario should rather become a new branch instead of
+merging back to the parent.
+
+.. figure:: sibling.png
+
+Typically the sibling uses alternate components/solutions than the parent – in
+long term it might evolve into a new generic scenario, that is a new branch
+in the scenario tree.
+
+Creation of the sibling shall not be gated. It should be covered in the scope of
+an approved project, so there cannot be too big surprises.
+
+But at a certain time the new scenario will want to change its status from a
+specific scenario to a generic scenario. This move will need TSC approval.
+For the application, the scenario owner shall demonstrate that the scenario
+fulfills the requirements of a generic scenario (see later).
+
+Examples: SDN controller options, Container technologies, data plane solutions,
+MANO solutions.
+
+Please note that from time to time, the TSC will need to review the
+set of generic scenarios and "branches" in the scenario tree.
+
+
diff --git a/docs/scenario-lifecycle/parent-child.png b/docs/scenario-lifecycle/parent-child.png
new file mode 100644
index 0000000..2f71149
--- /dev/null
+++ b/docs/scenario-lifecycle/parent-child.png
Binary files differ
diff --git a/docs/scenario-lifecycle/scenario-descriptor-files.rst b/docs/scenario-lifecycle/scenario-descriptor-files.rst
new file mode 100644
index 0000000..b6c44f7
--- /dev/null
+++ b/docs/scenario-lifecycle/scenario-descriptor-files.rst
@@ -0,0 +1,228 @@
+.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+.. http://creativecommons.org/licenses/by/4.0
+.. (c) 2017 OPNFV Ulrich Kleber (Huawei)
+
+
+Scenario Descriptor Files
+----------------------------
+
+What are Scenario Descriptor Files?
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Every scenario is described in its own descriptor file.
+The scenario descriptor file will be used by several parties:
+
+* Installer tools will read from it the list of components to be installed
+ and the configuration (e.g. deployment options and necessary details) to use.
+* The dynamic CI process will read from it the prerequisites of the scenario
+ to select the resource that has the needed capabilities for the deployment.
+ It will also select the installer
+ from the list of supported installers and the other deployment options as
+ supported in their combination.
+
+ The dynamic CI process will provide the installers with the deployment option
+ to use for a particular deployment.
+
+* The scenario owner needs to provide the descriptor file.
+
+ When compiling it the scenario owner typically needs to work together with
+ the installers, so the installers will support the required components and
+ options.
+* The testing framework can read from the scenario descriptor file necessary
+ information to know which features can be tested on the scenario.
+* The scenario descriptor file will also contain some maintenance information
+
+
+Structure of the file
+^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+The scenario descriptor file is a yaml file. The syntax will allow to specify
+additional descriptor files, to make it better readable or structure common
+configurations across multiple scenarios.
+
+The file has following main sections:
+
+* metadata (owner, history, description)
+* list of components (names, versions, submodules)
+* deployment options (HA/NOHA, hardware&virtualization, installers, including
+ possible combinations and necessary details)
+* other prerequisites (e.g. memory requirement more than pharos spec)
+* list of features to be tested
+
+More information to be provided in next version of this document. The file will
+be defined based on the installer-specific files for scenario specification
+used by the 4 installers in Danube release. Thus it will be made sure that the
+information needed by the installers will be covered.
+
+All scenario files will be stored in a central repo, e.g. Octopus. There will
+also be a commented template to help create scenario descriptor files.
+
+
+Metadata
+^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+In Danube timeframe only Fuel installer has some metadata in the descriptor file.
+The new template contains:
+
+* Unique name
+
+ This is a free name, there is a recommendation to take fish for names, matching
+ OPNFV release naming with rivers.
+
+* A free text title
+
+ This should be a short descriptive text telling the main purpose
+
+* A version number for the descriptor file
+
+ Three digits, separated with dots, as used by Fuel in Danube
+
+* Creation date
+* Comment
+
+ The file should contain a clear description of the purpose of the scenario,
+ including the main benefits and major features.
+ If applicable, the parent scenario should be mentioned.
+
+* First OPNFV version to use the scenario
+* Author/Owner
+
+* A list of additional contact persons, e.g. from installers or major components
+
+Components
+^^^^^^^^^^^^^^^^
+
+In this section all components are listed together with their version.
+For some components in addtion submodules can be listed.
+
+More details will be added.
+
+
+Deployment options
+^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+This section will list the supported deployment options. In each category at least
+one option must be supported.
+
+* hardware (cpu) types (intel or ARM)
+* Virtualization (bare-metal or vPOD)
+* availability (HA or NOHA)
+
+ This subsection needs to specify also what does an HA deployment need, e.g.:
+
+::
+
+ availability:
+
+ - type: HA
+ nodes:
+ - name: host1
+ roles:
+ - openstack-controller
+ - odl
+ - ceph-adm
+ - ceph-mon
+ - name: host2
+ roles:
+ - openstack-controller
+ - odl
+ - ceph-adm
+ - ceph-mon
+ - name: host3
+ roles:
+ - openstack-controller
+ - odl
+ - ceph-adm
+ - ceph-mon
+ - name: host4
+ - openstack-compute
+ - ceph-osd
+ - name: host5
+ - openstack-compute
+ - ceph-osd
+ - type: NOHA
+ hosts:
+ - name: host1
+ roles:
+ - openstack-controller
+ - odl
+ - ceph-adm
+ - ceph-mon
+ - name: host2
+ - openstack-compute
+ - ceph-osd
+ - name: host3
+ - openstack-compute
+ - ceph-osd
+
+
+
+* deployment tool (apex, compass, fuel, daisy, joid)
+
+ In the section for each deployment tool, the combinations of the first three
+ options have to be listed, e.g.:
+
+::
+
+ deployment-tools:
+
+ - type: fuel
+ cpu: intel
+ pod: baremetal
+ availability: HA
+ - type: fuel
+ cpu: intel
+ pod: virtual
+ availability: HA
+ - type: fuel
+ cpu: intel
+ pod: virtual
+ availability: NOHA
+
+Please note that this allows easy definition of other availability options
+including scaling and redundant configuration of SDN controllers.
+
+
+Prerequisites
+^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+This section will list additional prerequisites. Currently there is only
+one case where a scenario has additional prerequisites to the Pharos spec.
+E.g. a component could requires more RAM on the nodes than defined in
+Pharos spec.
+In general it should be preferred to issue such requirements to pharos
+using the pharos change request process, but in some cases in might be
+better to specify additional prerequisites.
+
+Another use case for these prerequisites will be usage of specilized
+hardware, e.g. for acceleration. This needs further study.
+
+The section can be empty or omitted.
+
+
+Testcases
+^^^^^^^^^^^^^^^^
+
+This section will provide information for functest and yardstick to decide
+on the proper test cases for the scenario.
+
+More details will be added.
+
+
+Shared settings
+^^^^^^^^^^^^^^^^
+
+This descriptor file might get quite long and complex. Also some of the settings
+will be shared between several scenarios, e.g. a long OpenStack module list.
+
+Therefore it shall be possible to reference another file like a macro.
+In that case all the file content is included in that place, e.g.:
+
+::
+
+ availability:
+
+ - type: HA
+ file: odl-ha-configuration.yaml
+
+
diff --git a/docs/scenario-lifecycle/scenario-overview.rst b/docs/scenario-lifecycle/scenario-overview.rst
new file mode 100644
index 0000000..9c9c508
--- /dev/null
+++ b/docs/scenario-lifecycle/scenario-overview.rst
@@ -0,0 +1,97 @@
+.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+.. http://creativecommons.org/licenses/by/4.0
+.. (c) 2017 OPNFV Ulrich Kleber (Huawei)
+
+
+.. Scenario Lifecycle
+.. ==========================================
+
+Note: This document is still work in progress.
+
+Overview
+-------------
+
+Problem Statement:
+^^^^^^^^^^^^^^^^^^^
+
+OPNFV provides the NFV reference platform in different variants, using different
+upstream open source projects.
+In many cases this includes also different upstream projects providing similar or
+overlapping functionality.
+
+OPNFV introduces scenarios to define various combinations of components from upstream
+projects or configuration options for these components.
+
+The number of such scenarios has increased over time, so it is necessary to clearly
+define how to handle the scenarios.
+
+Introduction:
+^^^^^^^^^^^^^^^^^^^
+Some OPNFV scenarios have an experimental nature, since they introduce
+new technologies that are not yet mature enough to provide a stable release.
+Nevertheless there also needs to be a way to provide the user with the
+opportunity to try these new features in an OPNFV release context.
+
+Other scenarios are used to provide stable environments for users
+that wish to build products or live deployments on them.
+
+OPNFV scenario lifecycle process will support this by defining two types of scenarios:
+
+* **Generic scenarios** cover a stable set of common features provided
+ by different components and target long-term usage.
+* **Specific scenarios** are needed during development to introduce new upstream
+ components or new features.
+ They are intended to merge with other specific scenarios
+ and bring their features into at least one generic scenario.
+
+OPNFV scenarios are deployed using one of the installer tools.
+A scenario can be deployed by multiple installers and the result will look
+very similar but different. The capabilities provided by the deployments
+should be identical. Results of functional tests should be the same,
+independent of the installer that had been used. Performance or other
+behavioral aspects could be different.
+The scenario lifecycle process will also define how to document which installer
+can be used for a scenario and how the CI process can trigger automatic deployment
+for a scenario via one of the supported installers.
+
+When a developer decides to define a new scenario, he typically will take one
+of the existing scenarios and does some changes, such as:
+
+* add additional components
+* change a deploy-time configuration
+* use a component in a more experimental version
+
+In this case the already existing scenario is called a "parent" and the new
+scenario a "child".
+
+Typically parent scenarios are generic scenarios, but this is not mandated.
+In most times the child scenario will develop the new functionality over some
+time and then try to merge its configuration back to the parent.
+But in other cases, the child will introduce a technology that cannot easily
+be combined with the parent.
+For this case this document will define how a new generic scenario can be created.
+
+Many OPNFV scenarios can be deployed in a HA (high availability) or non-HA
+configuration.
+HA configurations deploy some components according to a redundancy model,
+as the components support.
+In these cases multiple deployment options are defined for the same scenario.
+
+Deployment options will also be used if the same scenario can be deployed
+on multiple types of hardware, i.e. Intel and ARM.
+
+Every scenario will be described in a scenario descriptor yaml file.
+This file shall contain all the necessary information for different users, such
+as the installers (which components to deploy etc.),
+the ci process (to find the right resources),
+the test projects (to select correct test cases), etc.
+
+In early OPNFV releases, scenarios covered components of the infrastructure,
+that is NFVI and VIM.
+With the introduction of MANO, an additional dimension for scenarios is needed.
+The same MANO components need to be used together with each of the infrastructure
+scenarios. Thus MANO scenarios will define the MANO components and a list of
+infrastructure scenarios to work with. Please note that MANO scenarios follow
+the same lifecycle and rules for generic and specific scenarios like the
+infrastructure scenarios.
+
diff --git a/docs/scenario-lifecycle/scenario-tree+idea.png b/docs/scenario-lifecycle/scenario-tree+idea.png
new file mode 100644
index 0000000..b6d4d8a
--- /dev/null
+++ b/docs/scenario-lifecycle/scenario-tree+idea.png
Binary files differ
diff --git a/docs/scenario-lifecycle/scenario-tree.png b/docs/scenario-lifecycle/scenario-tree.png
new file mode 100644
index 0000000..619b5a3
--- /dev/null
+++ b/docs/scenario-lifecycle/scenario-tree.png
Binary files differ
diff --git a/docs/scenario-lifecycle/sibling.png b/docs/scenario-lifecycle/sibling.png
new file mode 100644
index 0000000..82d4805
--- /dev/null
+++ b/docs/scenario-lifecycle/sibling.png
Binary files differ
diff --git a/docs/scenario-lifecycle/specific-scenarios.rst b/docs/scenario-lifecycle/specific-scenarios.rst
new file mode 100644
index 0000000..5f426e7
--- /dev/null
+++ b/docs/scenario-lifecycle/specific-scenarios.rst
@@ -0,0 +1,34 @@
+.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+.. http://creativecommons.org/licenses/by/4.0
+.. (c) 2017 OPNFV Ulrich Kleber (Huawei)
+
+
+Specific Scenarios
+------------------
+
+Specific scenarios are used for OPNFV development and help to isolate a path of development.
+
+* Specific scenarios typically focus on a feature or topic.
+* Specific scenarios allow to advance in development for their main feature without
+ de-stabilizing other features.
+* Specific scenarios provide additional flexibility in their handling to allow the
+ development be agile.
+* Specific scenarios can use new version of their main upstream component or even
+ apply midstream patches during OPNFV deployment, i.e. the deployable artifact
+ is created via cross community CI or even only in OPNFV and not upstream.
+* Specific scenarios should have a limited life time. After a few releases, the feature
+ development should have matured and the feature made available different configurations
+ if possible. Typically the scenario then should be merged with other scenarios, best
+ with generic scenarios.
+* Normally specific scenarios will be released within the major OPNFV releases. But
+ they don't need to fulfill maturity requirements (stable upstream versions and repos,
+ stability testing), and can deviate in the used upstream versions.
+* In exceptional cases we might release a specific scenario independently, in case there
+ is a need. Thus specific scenarios provide a way to a more DevOps-like process.
+* Specific scenarios will likely have a shorter support period after release as they are of
+ interest to a smaller user community vs generic scenarios.
+* They will be granted less CI resources than generic scenarios, e.g. for periodic
+ CI jobs.
+* We may need to prioritize resources post-release for maintenance / regression testing.
+
+
diff --git a/docs/specification/hardwarespec.rst b/docs/specification/hardwarespec.rst
new file mode 100644
index 0000000..8086aa9
--- /dev/null
+++ b/docs/specification/hardwarespec.rst
@@ -0,0 +1,52 @@
+.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+.. http://creativecommons.org/licenses/by/4.0
+.. (c) 2016 OPNFV.
+
+
+Hardware
+--------
+
+A pharos compliant OPNFV test-bed provides:
+
+- One CentOS 7 jump server on which the virtualized Openstack/OPNFV installer runs
+- In the Brahmaputra release you may select a variety of deployment toolchains to deploy from the
+ jump server.
+- 5 compute / controller nodes (`BGS
+ <https://wiki.opnfv.org/get_started/get_started_work_environment>`_ requires 5 nodes)
+- A configured network topology allowing for LOM, Admin, Public, Private, and Storage Networks
+- Remote access as defined by the Jenkins slave configuration guide
+
+http://artifacts.opnfv.org/brahmaputra.1.0/docs/opnfv-jenkins-slave-connection.brahmaputra.1.0.html
+
+**Servers**
+
+**CPU:**
+
+* Intel Xeon E5-2600v2 Series or newer
+* AArch64 (64bit ARM architecture) compatible (ARMv8 or newer)
+
+**Firmware:**
+
+* BIOS/EFI compatible for x86-family blades
+* EFI compatible for AArch64 blades
+
+**Local Storage:**
+
+Below describes the minimum for the Pharos spec, which is designed to provide enough capacity for
+a reasonably functional environment. Additional and/or faster disks are nice to have and mayproduce
+a better result.
+
+* Disks: 2 x 1TB HDD + 1 x 100GB SSD (or greater capacity)
+* The first HDD should be used for OS & additional software/tool installation
+* The second HDD is configured for CEPH object storage
+* The SSD should be used as the CEPH journal
+* Performance testing requires a mix of compute nodes with CEPH (Swift+Cinder) and without CEPH storage
+* Virtual ISO boot capabilities or a separate PXE boot server (DHCP/tftp or Cobbler)
+
+**Memory:**
+
+* 32G RAM Minimum
+
+**Power Supply**
+
+* Single power supply acceptable (redundant power not required/nice to have)
diff --git a/docs/specification/index.rst b/docs/specification/index.rst
new file mode 100644
index 0000000..c3eb45a
--- /dev/null
+++ b/docs/specification/index.rst
@@ -0,0 +1,18 @@
+.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+.. http://creativecommons.org/licenses/by/4.0
+.. (c) 2016 OPNFV.
+
+.. Top level of Pharos specification documents.
+
+********************
+Pharos Specification
+********************
+
+The Pharos Specification provides information on Pharos hardware and network requirements
+
+.. toctree::
+
+ ./objectives.rst
+ ./hardwarespec.rst
+ ./networkconfig.rst
+ ./remoteaccess.rst
diff --git a/docs/specification/networkconfig.rst b/docs/specification/networkconfig.rst
new file mode 100644
index 0000000..fa95faa
--- /dev/null
+++ b/docs/specification/networkconfig.rst
@@ -0,0 +1,62 @@
+.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+.. http://creativecommons.org/licenses/by/4.0
+.. (c) 2016 OPNFV.
+
+
+Networking
+----------
+
+**Network Hardware**
+
+ * 24 or 48 Port TOR Switch
+ * NICs - Combination of 1GE and 10GE based on network topology options (per server can be on-board
+ or use PCI-e)
+ * Connectivity for each data/control network is through a separate NIC. This
+ simplifies Switch Management however requires more NICs on the server and also more switch ports
+ * BMC (Baseboard Management Controller) for lights-out mangement network using IPMI (Intelligent
+ Platform Management Interface)
+
+**Network Options**
+
+ * Option I: 4x1G Control, 2x10G Data, 48 Port Switch
+
+ * 1 x 1G for lights-out Management
+ * 1 x 1G for Admin/PXE boot
+ * 1 x 1G for control-plane connectivity
+ * 1 x 1G for storage
+ * 2 x 10G for data network (redundancy, NIC bonding, High bandwidth testing)
+
+ * Option II: 1x1G Control, 2x 10G Data, 24 Port Switch
+
+ * Connectivity to networks is through VLANs on the Control NIC
+ * Data NIC used for VNF traffic and storage traffic segmented through VLANs
+
+ * Option III: 2x1G Control, 2x10G Data, 2x10G Storage, 24 Port Switch
+
+ * Data NIC used for VNF traffic
+ * Storage NIC used for control plane and Storage segmented through VLANs (separate host traffic
+ from VNF)
+ * 1 x 1G for lights-out mangement
+ * 1 x 1G for Admin/PXE boot
+ * 2 x 10G for control-plane connectivity/storage
+ * 2 x 10G for data network
+
+Documented configuration to include:
+
+ - Subnet, VLANs (may be constrained by existing lab setups or rules)
+ - IPs
+ - Types of NW - lights-out, public, private, admin, storage
+ - May be special NW requirements for performance related projects
+ - Default gateways
+
+**Sample Network Drawings**
+
+.. image:: ../images/bridge2.png
+
+.. image:: ../images/opnfv-pharos-diagram-v01.jpg
+
+.. image:: ../images/opnfv-example-lab-diagram.png
+
+Download the visio zip file here:
+`opnfv-example-lab-diagram.vsdx.zip
+<https://wiki.opnfv.org/_media/opnfv-example-lab-diagram.vsdx.zip>`_
diff --git a/docs/specification/objectives.rst b/docs/specification/objectives.rst
new file mode 100644
index 0000000..0a0ad6a
--- /dev/null
+++ b/docs/specification/objectives.rst
@@ -0,0 +1,29 @@
+.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+.. http://creativecommons.org/licenses/by/4.0
+.. (c) 2016 OPNFV.
+
+
+Pharos Compliance
+-----------------
+
+The **Pharos Specification** defines a hardware environment for deployment and testing of the
+**Brahmaputra** platform release. The **Pharos Project** is also responsible for defining lab
+capabilities, developing management/usage policies and process; and a support plan for reliable
+access to project and release resources. Community labs are provided as a service by companies and
+are not controlled by Pharos however our objective is to provide easy visibility of all lab
+capabilities and their usage at all-times.
+
+Pharos lab infrastructure has the following objectives:
+- Provides secure, scalable, standard and HA environments for feature development
+- Supports the full Brahmaputra deployment lifecycle (this requires a **bare-metal** environment)
+- Supports functional and performance testing of the Brahmaputra release
+- Provides mechanisms and procedures for secure remote access to Pharos compliant environments for
+ OPNFV community
+
+Deploying Brahmaputra in a Virtualized environment is possible and will be useful, however it does
+not provide a fully featured deployment and realistic test environment for the Brahmaputra release
+of OPNFV.
+
+The high level architecture is outlined in the following diagram:
+
+.. image:: ../images/pharos-archi1.jpg
diff --git a/docs/specification/remoteaccess.rst b/docs/specification/remoteaccess.rst
new file mode 100644
index 0000000..4b8160b
--- /dev/null
+++ b/docs/specification/remoteaccess.rst
@@ -0,0 +1,63 @@
+.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+.. http://creativecommons.org/licenses/by/4.0
+.. (c) 2016 OPNFV.
+
+
+Remote Management
+------------------
+
+Remote access is required for …
+
+ * Developers to access deploy/test environments (credentials to be issued per POD / user)
+ * Connection of each environment to Jenkins master hosted by Linux Foundation for automated
+ deployment and test
+
+OpenVPN is generally used for remote however community hosted labs may vary due to company security
+rules. For POD access rules / restrictions refer to individual lab documentation as each company may
+have different access rules and acceptable usage policies.
+
+Basic requirements:
+
+ * SSH sessions to be established (initially on the jump server)
+ * Packages to be installed on a system (tools or applications) by pullig from an external repo.
+
+Firewall rules accomodate:
+
+ * SSH sessions
+ * Jenkins sessions
+
+Lights-out management network requirements:
+
+ * Out-of-band management for power on/off/reset and bare-metal provisioning
+ * Access to server is through a lights-out-management tool and/or a serial console
+ * Refer to applicable light-out mangement information from server manufacturer, such as ...
+
+ * Intel lights-out
+ `RMM <http://www.intel.com/content/www/us/en/server-management/intel-remote-management-module.html>`_
+ * HP lights-out `ILO <http://www8.hp.com/us/en/products/servers/ilo/index.html>`_
+ * CISCO lights-out `UCS <https://developer.cisco.com/site/ucs-dev-center/index.gsp>`_
+
+Linux Foundation Lab is a UCS-M hardware environment with controlled access *as needed*
+
+ * `Access rules and procedure <https://wiki.opnfv.org/display/pharos/Lflab+Hosting>`_ are
+ maintained on the Wiki
+ * `A list of people <https://wiki.opnfv.org/display/pharos/Lf+Support>`_ with access is
+ maintained on the Wiki
+ * Send access requests to infra-steering@lists.opnfv.org with the
+ following information ...
+
+ * Name:
+ * Company:
+ * Approved Project:
+ * Project role:
+ * Why is access needed:
+ * How long is access needed (either a specified time period or define "done"):
+ * What specific POD/machines will be accessed:
+ * What support is needed from LF admins and LF community support team:
+
+ * Once access is approved please follow instructions for setting up VPN access ...
+ https://wiki.opnfv.org/get_started/lflab_hosting
+ * The people who require VPN access must have a valid PGP key bearing a valid signature from LF
+ * When issuing OpenVPN credentials, LF will be sending TLS certificates and 2-factor
+ authentication tokens, encrypted to each recipient's PGP key
+