summaryrefslogtreecommitdiffstats
path: root/validator/src/validation_tool/doc
diff options
context:
space:
mode:
Diffstat (limited to 'validator/src/validation_tool/doc')
-rw-r--r--validator/src/validation_tool/doc/config.yaml37
-rw-r--r--validator/src/validation_tool/doc/example/config.yaml37
-rw-r--r--validator/src/validation_tool/doc/example/inventory.yaml38
-rw-r--r--validator/src/validation_tool/doc/example/network.yaml207
-rw-r--r--validator/src/validation_tool/doc/inventory.yaml38
-rw-r--r--validator/src/validation_tool/doc/network.yaml221
-rw-r--r--validator/src/validation_tool/doc/pharos-validator.154
7 files changed, 632 insertions, 0 deletions
diff --git a/validator/src/validation_tool/doc/config.yaml b/validator/src/validation_tool/doc/config.yaml
new file mode 100644
index 0000000..f2146ff
--- /dev/null
+++ b/validator/src/validation_tool/doc/config.yaml
@@ -0,0 +1,37 @@
+---
+###################
+# Program arguments
+###################
+#
+# Define path of output file
+output: "yaml" # Values are ("yaml", )
+
+#################
+# Server settings
+#################
+
+# Address that the client should attempt to connect to. Has no effect on server
+server-addr: "0.0.0.0"
+# Server port, shouldn't need to change
+server-port: 12121
+
+#############################
+# Filepaths for program files
+#############################
+
+# Define directory for the program to monitor files pushed back by clients
+tmppath: "/tmp/pharosqa"
+# dhcpd configuration output filepath
+confpath: "/etc/pharosqa/"
+# Examplepath
+sharepath: "/usr/share/pharosqa/"
+# Path to inventory file
+inventoryfile: "/etc/pharosvalidator/inventory.yaml"
+# Path to network file
+networkfile: "/etc/pharosvalidator/network.yaml"
+# Path to logfile
+logfile: "/tmp/runtime_logs.txt"
+# Path to test logs
+test_log: "/tmp/results.yaml"
+# Path to dhcpd configuration file
+dhcpdfile: "/etc/dhcp/dhcpd.conf"
diff --git a/validator/src/validation_tool/doc/example/config.yaml b/validator/src/validation_tool/doc/example/config.yaml
new file mode 100644
index 0000000..f2146ff
--- /dev/null
+++ b/validator/src/validation_tool/doc/example/config.yaml
@@ -0,0 +1,37 @@
+---
+###################
+# Program arguments
+###################
+#
+# Define path of output file
+output: "yaml" # Values are ("yaml", )
+
+#################
+# Server settings
+#################
+
+# Address that the client should attempt to connect to. Has no effect on server
+server-addr: "0.0.0.0"
+# Server port, shouldn't need to change
+server-port: 12121
+
+#############################
+# Filepaths for program files
+#############################
+
+# Define directory for the program to monitor files pushed back by clients
+tmppath: "/tmp/pharosqa"
+# dhcpd configuration output filepath
+confpath: "/etc/pharosqa/"
+# Examplepath
+sharepath: "/usr/share/pharosqa/"
+# Path to inventory file
+inventoryfile: "/etc/pharosvalidator/inventory.yaml"
+# Path to network file
+networkfile: "/etc/pharosvalidator/network.yaml"
+# Path to logfile
+logfile: "/tmp/runtime_logs.txt"
+# Path to test logs
+test_log: "/tmp/results.yaml"
+# Path to dhcpd configuration file
+dhcpdfile: "/etc/dhcp/dhcpd.conf"
diff --git a/validator/src/validation_tool/doc/example/inventory.yaml b/validator/src/validation_tool/doc/example/inventory.yaml
new file mode 100644
index 0000000..2ba768a
--- /dev/null
+++ b/validator/src/validation_tool/doc/example/inventory.yaml
@@ -0,0 +1,38 @@
+---
+nodes:
+ - name: node0
+ tags: control # optional param, other valid value "compute"
+ arch: "x86_64"
+ mac_address: "de:ad:be:ef:11:11" # pxe boot interface mac
+ cpus: 2 # required only for virtual deployments
+ memory: 2048 # required only for virtual deployments
+ disk: 40 # required only for virtual deployments
+ power:
+ type: ipmi
+ address: 10.4.7.2
+ user: root
+ pass: root
+ - name: node1
+ tags: control # optional param, other valid value "compute"
+ arch: "x86_64"
+ mac_address: "de:ad:be:ef:22:22" # pxe boot interface mac
+ cpus: 2 # required only for virtual deployments
+ memory: 2048 # required only for virtual deployments
+ disk: 40 # required only for virtual deployments
+ power:
+ type: ipmi
+ address: 10.4.7.3
+ user: root
+ pass: root
+ - name: node2
+ tags: control # optional param, other valid value "compute"
+ arch: "x86_64"
+ mac_address: "de:ad:be:ef:33:33" # pxe boot interface mac
+ cpus: 2 # required only for virtual deployments
+ memory: 2048 # required only for virtual deployments
+ disk: 40 # required only for virtual deployments
+ power:
+ type: ipmi
+ address: 10.4.7.4
+ user: root
+ pass: root
diff --git a/validator/src/validation_tool/doc/example/network.yaml b/validator/src/validation_tool/doc/example/network.yaml
new file mode 100644
index 0000000..91c1be9
--- /dev/null
+++ b/validator/src/validation_tool/doc/example/network.yaml
@@ -0,0 +1,207 @@
+---
+# This configuration file defines Network Environment for a
+# Baremetal Deployment of OPNFV. It contains default values
+# for 5 following networks:
+#
+# - admin
+# - tenant*
+# - external*
+# - storage*
+# - api*
+# *) optional networks
+#
+# Optional networks will be consolidated with the admin network
+# if not explicitly configured.
+#
+# See short description of the networks in the comments below.
+#
+# "admin" is the short name for Control Plane Network.
+# During OPNFV deployment it is used for node provisioning which will require
+# PXE booting as well as running a DHCP server on this network. Be sure to
+# disable any other DHCP/TFTP server on this network.
+#
+# "tenant" is the network used for tenant traffic.
+#
+# "external" is the network which should have internet or external
+# connectivity. External OpenStack networks will be configured to egress this
+# network. There can be multiple external networks, but only one assigned as
+# "public" which OpenStack public API's will register.
+#
+# "storage" is the network for storage I/O.
+#
+# "api" is an optional network for splitting out OpenStack service API
+# communication. This should be used for IPv6 deployments.
+
+network-config-metadata: # Meta data for the network configuration
+ title: LF-POD-1 Network config #
+ version: 0.1 #
+ created: Mon Dec 28 2015 #
+ comment: None #
+
+# yamllint disable rule:line-length
+networks: # Network configurations
+ admin: # Admin configuration (pxe and jumpstart),
+ enabled: true
+ vlan: native # VLAN tag to use for Overcloud hosts on this network
+ installer_vm: # Network settings for the Installer VM on admin network
+ nic_type: interface # Indicates if this VM will be bridged to an interface, or to a bond
+ members:
+ - em1 # Member Interface to bridge to for installer VM (use multiple values for bond)
+ vlan: 29 # VLAN tag to use for this network on Installer VM, native means none
+ ip: 10.10.25.100 # IP to assign to Installer VM on this network
+ usable_ip_range: 10.10.25.0,10.10.25.100 # Usable ip range, if empty entire range is usable, ex. 192.168.1.10,192.168.1.20
+ gateway: 192.0.2.1 # Gateway (only needed when public_network is disabled)
+ cidr: 10.10.25.0/24 # Subnet in CIDR format 192.168.1.0/24
+ dhcp_range: 10.10.25.50,10.10.25.99 # DHCP range for the admin network, if empty it will be automatically provisioned
+ dns-domain: opnfvlf.org # Admin network dns domain
+ dns-search: opnfvlf.org # Admin network dns-search domain
+ dns-upstream: # Admin network upstream dns servers
+ - 8.8.8.8 #
+ - 8.8.4.4 #
+ ntp-upstream: # Admin upstream ntp servers
+ - 0.se.pool.ntp.org #
+ - 1.se.pool.ntp.org #
+ nic_mapping: # Mapping of network configuration for Overcloud Nodes
+ compute: # Mapping for compute profile (nodes that will be used as Compute nodes)
+ phys_type: interface # Physical interface type (interface or bond)
+ vlan: native # VLAN tag to use with this NIC
+ members: # Physical NIC members of this mapping (Single value allowed for interface phys_type)
+ - eth1
+ controller: # Mapping for controller profile (nodes that will be used as Controller nodes)
+ phys_type: bond
+ vlan: native
+ members:
+ - eth1
+ - eth2
+
+ tenant: # Tenant network configuration
+ enabled: true
+ cidr: 11.0.0.0/24 # Subnet in CIDR format 192.168.1.0/24
+ vlan: 10 # VLAN tag to use for Overcloud hosts on this network
+ mtu: 64000 # Tenant network MTU
+ overlay_id_range: 2,65535 # Tenant network Overlay segmentation ID range: VNI, VLAN-ID, etc.
+
+ segmentation_type: vxlan # Tenant network segmentation type:vlan, vxlan, gre
+
+ nic_mapping: # Mapping of network configuration for Overcloud Nodes
+ compute: # Mapping for compute profile (nodes that will be used as Compute nodes)
+ phys_type: interface # Physical interface type (interface or bond)
+ vlan: 10 # VLAN tag to use with this NIC
+ members: # Physical NIC members of this mapping (Single value allowed for interface phys_type)
+ - eth1 # Note, for Apex you may also use the logical nic name (found by nic order), such as "nic1"
+ controller: # Mapping for controller profile (nodes that will be used as Controller nodes)
+ phys_type: interface
+ vlan: 10
+ members:
+ - eth1 # Note, for Apex you may also use the logical nic name (found by nic order), such as "nic1"
+
+ external: # Can contain 1 or more external networks
+ - public: # "public" network will be the network the installer VM attaches to
+ enabled: true
+ vlan: native
+ mtu: 64000 # Public network MTU
+ installer_vm: # Network settings for the Installer VM on admin network (note only valid on 'public' external network)
+ nic_type: interface # Indicates if this VM will be bridged to an interface, or to a bond
+ members:
+ - em1 # Member Interface to bridge to for installer VM (use multiple values for bond)
+ ip: 192.168.37.12 # IP to assign to Installer VM on this network
+ cidr: 192.168.37.0/24
+ gateway: 192.168.37.1
+ floating_ip_range: 192.168.37.200,192.168.37.220 # Range to allocate to floating IPs for the public network with Neutron
+ usable_ip_range: 192.168.37.10,192.168.37.199 # Usable IP range on the public network, usually this is a shared subnet
+ dns_nameservers: # External dns servers
+ - 8.8.8.8 #
+ - 8.8.4.4 #
+ ntp: # External upstream NTP servers
+ - 0.se.pool.ntp.org #
+ - 1.se.pool.ntp.org #
+ syslog: # External Syslog server
+ server: 10.128.1.24 #
+ transport: 'tcp' #
+ nic_mapping: # Mapping of network configuration for Overcloud Nodes
+ compute: # Mapping for compute profile (nodes that will be used as Compute nodes)
+ phys_type: interface # Physical interface type (interface or bond)
+ vlan: 10 # VLAN tag to use with this NIC
+ members: # Physical NIC members of this mapping (Single value allowed for interface phys_type)
+ - eth1
+ controller: # Mapping for controller profile (nodes that will be used as Controller nodes)
+ phys_type: interface
+ vlan: 10
+ members:
+ - eth1
+ external_overlay: # External network to be created in OpenStack by Services tenant
+ name: Public_internet
+ type: flat
+ gateway: 192.168.37.1
+ - private_cloud: # another external network
+ enabled: false
+ vlan: 101
+ mtu: 64000
+ cidr: 192.168.38.0/24
+ gateway: 192.168.38.1
+ floating_ip_range: 192.168.38.200,192.168.38.220 # Range to allocate to floating IPs for the public network with Neutron
+ usable_ip_range: 192.168.38.10,192.168.38.199 # Usable IP range on the public network, usually this is a shared subnet
+ dns_nameservers: # External dns servers
+ - 8.8.8.8 #
+ - 8.8.4.4 #
+ ntp: # External upstream NTP servers
+ - 0.se.pool.ntp.org #
+ - 1.se.pool.ntp.org #
+ syslog: # External Syslog server
+ server: 10.128.1.24 #
+ transport: 'tcp' #
+ nic_mapping: # Mapping of network configuration for Overcloud Nodes
+ compute: # Mapping for compute profile (nodes that will be used as Compute nodes)
+ phys_type: interface # Physical interface type (interface or bond)
+ vlan: 101 # VLAN tag to use with this NIC
+ members: # Physical NIC members of this mapping (Single value allowed for interface phys_type)
+ - eth1 # Note, for Apex you may also use the logical nic name (found by nic order), such as "nic1"
+ controller: # Mapping for controller profile (nodes that will be used as Controller nodes)
+ phys_type: interface
+ vlan: 101
+ members:
+ - eth1
+ external_overlay: # External network to be created in OpenStack by Services tenant
+ name: private_cloud
+ type: vlan
+ segmentation_id: 101
+ gateway: 192.168.38.1
+
+ storage: # Storage network configuration
+ enabled: true
+ cidr: 12.0.0.0/24 # Subnet in CIDR format
+ vlan: 12 # VLAN tag to use for Overcloud hosts on this network
+ mtu: 64000 # Tenant network MTU
+ nic_mapping: # Mapping of network configuration for Overcloud Nodes
+ compute: # Mapping for compute profile (nodes that will be used as Compute nodes)
+ phys_type: bond # Physical interface type (interface or bond)
+ vlan: 12 # VLAN tag to use with this NIC
+ members: # Physical NIC members of this mapping (Single value allowed for interface phys_type)
+ - eth3 # Note, for Apex you may also use the logical nic name (found by nic order), such as "nic1"
+ - eth4
+ controller: # Mapping for controller profile (nodes that will be used as Controller nodes)
+ phys_type: bond
+ vlan: 12
+ members:
+ - eth3
+ - eth4
+
+ api: # API network configuration
+ enabled: false
+ cidr: fd00:fd00:fd00:4000::/64 # Subnet in CIDR format
+ vlan: 13 # VLAN tag to use for Overcloud hosts on this network
+ mtu: 64000 # Tenant network MTU
+ nic_mapping: # Mapping of network configuration for Overcloud Nodes
+ compute: # Mapping for compute profile (nodes that will be used as Compute nodes)
+ phys_type: bond # Physical interface type (interface or bond)
+ vlan: 13 # VLAN tag to use with this NIC
+ members: # Physical NIC members of this mapping (Single value allowed for interface phys_type)
+ - eth3 # Note, for Apex you may also use the logical nic name (found by nic order), such as "nic1"
+ - eth4
+ controller: # Mapping for controller profile (nodes that will be used as Controller nodes)
+ phys_type: bond
+ vlan: 13
+ members:
+ - eth3
+ - eth4
+# yamllint enable rule:line-length
diff --git a/validator/src/validation_tool/doc/inventory.yaml b/validator/src/validation_tool/doc/inventory.yaml
new file mode 100644
index 0000000..2ba768a
--- /dev/null
+++ b/validator/src/validation_tool/doc/inventory.yaml
@@ -0,0 +1,38 @@
+---
+nodes:
+ - name: node0
+ tags: control # optional param, other valid value "compute"
+ arch: "x86_64"
+ mac_address: "de:ad:be:ef:11:11" # pxe boot interface mac
+ cpus: 2 # required only for virtual deployments
+ memory: 2048 # required only for virtual deployments
+ disk: 40 # required only for virtual deployments
+ power:
+ type: ipmi
+ address: 10.4.7.2
+ user: root
+ pass: root
+ - name: node1
+ tags: control # optional param, other valid value "compute"
+ arch: "x86_64"
+ mac_address: "de:ad:be:ef:22:22" # pxe boot interface mac
+ cpus: 2 # required only for virtual deployments
+ memory: 2048 # required only for virtual deployments
+ disk: 40 # required only for virtual deployments
+ power:
+ type: ipmi
+ address: 10.4.7.3
+ user: root
+ pass: root
+ - name: node2
+ tags: control # optional param, other valid value "compute"
+ arch: "x86_64"
+ mac_address: "de:ad:be:ef:33:33" # pxe boot interface mac
+ cpus: 2 # required only for virtual deployments
+ memory: 2048 # required only for virtual deployments
+ disk: 40 # required only for virtual deployments
+ power:
+ type: ipmi
+ address: 10.4.7.4
+ user: root
+ pass: root
diff --git a/validator/src/validation_tool/doc/network.yaml b/validator/src/validation_tool/doc/network.yaml
new file mode 100644
index 0000000..705e848
--- /dev/null
+++ b/validator/src/validation_tool/doc/network.yaml
@@ -0,0 +1,221 @@
+---
+# This configuration file defines Network Environment for a
+# Baremetal Deployment of OPNFV. It contains default values
+# for 5 following networks:
+#
+# - admin
+# - tenant*
+# - external*
+# - storage*
+# - api*
+# *) optional networks
+#
+# Optional networks will be consolidated with the admin network
+# if not explicitly configured.
+#
+# See short description of the networks in the comments below.
+#
+# "admin" is the short name for Control Plane Network.
+# During OPNFV deployment it is used for node provisioning which will require
+# PXE booting as well as running a DHCP server on this network. Be sure to
+# disable any other DHCP/TFTP server on this network.
+#
+# "tenant" is the network used for tenant traffic.
+#
+# "external" is the network which should have internet or external
+# connectivity. External OpenStack networks will be configured to egress this
+# network. There can be multiple external networks, but only one assigned as
+# "public" which OpenStack public API's will register.
+#
+# "storage" is the network for storage I/O.
+#
+# "api" is an optional network for splitting out OpenStack service API
+# communication. This should be used for IPv6 deployments.
+
+# yamllint disable rule:line-length
+network-config-metadata: # Meta data for the network configuration
+ title: LF-POD-1 Network config #
+ version: 0.1 #
+ created: Mon Dec 28 2015 #
+ comment: None #
+
+
+networks: # Network configurations
+ admin: # Admin configuration (pxe and jumpstart),
+ enabled: true
+ vlan: native # VLAN tag to use for Overcloud hosts on this network (Admin network is required to be native / untagged for PXE booting)
+ installer_vm: # Network settings for the Installer VM on admin network
+ nic_type: interface # Indicates if this VM will be bridged to an interface, or to a bond
+ members:
+ - em1 # Member Interface to bridge to for installer VM (use multiple values for bond)
+ vlan: native # VLAN tag to use for this network on Installer VM, native means none
+ ip: 192.0.2.1 # IP to assign to Installer VM on this network
+ usable_ip_range: 192.0.2.11,192.0.2.99 # Usable ip range, if empty entire range is usable, ex. 192.168.1.10,192.168.1.20
+ gateway: 192.0.2.1 # Gateway (only needed when public_network is disabled)
+ cidr: 192.0.2.0/24 # Subnet in CIDR format 192.168.1.0/24
+ dhcp_range: 192.0.2.2,192.0.2.10 # DHCP range for the admin network, if empty it will be automatically provisioned
+ dns-domain: opnfvlf.org # Admin network dns domain
+ dns-search: opnfvlf.org # Admin network dns-search domain
+ dns-upstream: # Admin network upstream dns servers
+ - 8.8.8.8 #
+ - 8.8.4.4 #
+ ntp-upstream: # Admin upstream ntp servers
+ - 0.se.pool.ntp.org #
+ - 1.se.pool.ntp.org #
+ nic_mapping: # Mapping of network configuration for Overcloud Nodes
+ compute: # Mapping for compute profile (nodes that will be used as Compute nodes)
+ phys_type: interface # Physical interface type (interface or bond)
+ vlan: native # VLAN tag to use with this NIC
+ members: # Physical NIC members of this mapping (Single value allowed for interface phys_type)
+ - eth1
+ controller: # Mapping for controller profile (nodes that will be used as Controller nodes)
+ phys_type: bond
+ vlan: native
+ members:
+ - eth1
+ - eth2
+
+ tenant: # Tenant network configuration
+ enabled: true
+ cidr: 11.0.0.0/24 # Subnet in CIDR format 192.168.1.0/24
+ vlan: 10 # VLAN tag to use for Overcloud hosts on this network
+ mtu: 64000 # Tenant network MTU
+ overlay_id_range: 2,65535 # Tenant network Overlay segmentation ID range: VNI, VLAN-ID, etc.
+
+ segmentation_type: vxlan # Tenant network segmentation type: vlan, vxlan, gre
+
+ nic_mapping: # Mapping of network configuration for Overcloud Nodes
+ compute: # Mapping for compute profile (nodes that will be used as Compute nodes)
+ phys_type: interface # Physical interface type (interface or bond)
+ vlan: 10 # VLAN tag to use with this NIC
+ members: # Physical NIC members of this mapping (Single value allowed for interface phys_type)
+ - eth1 # Note, for Apex you may also use the logical nic name (found by nic order), such as "nic1"
+ controller: # Mapping for controller profile (nodes that will be used as Controller nodes)
+ phys_type: interface
+ vlan: 10
+ members:
+ - eth1 # Note, for Apex you may also use the logical nic name (found by nic order), such as "nic1"
+
+ external: # Can contain 1 or more external networks
+ - public: # "public" network will be the network the installer VM attaches to
+ enabled: true
+ vlan: native
+ mtu: 64000 # Public network MTU
+ installer_vm: # Network settings for the Installer VM on admin network (note only valid on 'public' external network)
+ nic_type: interface # Indicates if this VM will be bridged to an interface, or to a bond
+ members:
+ - em1 # Member Interface to bridge to for installer VM (use multiple values for bond)
+ ip: 192.168.37.12 # IP to assign to Installer VM on this network
+ cidr: 192.168.37.0/24
+ gateway: 192.168.37.1
+ floating_ip_range: 192.168.37.200,192.168.37.220 # Range to allocate to floating IPs for the public network with Neutron
+ usable_ip_range: 192.168.37.10,192.168.37.199 # Usable IP range on the public network, usually this is a shared subnet
+ dns_nameservers: # External dns servers
+ - 8.8.8.8 #
+ - 8.8.4.4 #
+ ntp: # External upstream NTP servers
+ - 0.se.pool.ntp.org #
+ - 1.se.pool.ntp.org #
+ syslog: # External Syslog server
+ server: 10.128.1.24 #
+ transport: 'tcp' #
+ nic_mapping: # Mapping of network configuration for Overcloud Nodes
+ compute: # Mapping for compute profile (nodes that will be used as Compute nodes)
+ phys_type: interface # Physical interface type (interface or bond)
+ vlan: 10 # VLAN tag to use with this NIC
+ members: # Physical NIC members of this mapping (Single value allowed for interface phys_type)
+ - eth1
+ controller: # Mapping for controller profile (nodes that will be used as Controller nodes)
+ phys_type: interface
+ vlan: 10
+ members:
+ - eth1
+ external_overlay: # External network to be created in OpenStack by Services tenant
+ name: Public_internet
+ type: flat
+ gateway: 192.168.37.1
+ - private_cloud: # another external network
+ enabled: false
+ vlan: 101
+ mtu: 64000
+ cidr: 192.168.38.0/24
+ gateway: 192.168.38.1
+ floating_ip_range: 192.168.38.200,192.168.38.220 # Range to allocate to floating IPs for the public network with Neutron
+ usable_ip_range: 192.168.38.10,192.168.38.199 # Usable IP range on the public network, usually this is a shared subnet
+ dns_nameservers: # External dns servers
+ - 8.8.8.8 #
+ - 8.8.4.4 #
+ ntp: # External upstream NTP servers
+ - 0.se.pool.ntp.org #
+ - 1.se.pool.ntp.org #
+ syslog: # External Syslog server
+ server: 10.128.1.24 #
+ transport: 'tcp' #
+ nic_mapping: # Mapping of network configuration for Overcloud Nodes
+ compute: # Mapping for compute profile (nodes that will be used as Compute nodes)
+ phys_type: interface # Physical interface type (interface or bond)
+ vlan: 101 # VLAN tag to use with this NIC
+ members: # Physical NIC members of this mapping (Single value allowed for interface phys_type)
+ - eth1 # Note, for Apex you may also use the logical nic name (found by nic order), such as "nic1"
+ controller: # Mapping for controller profile (nodes that will be used as Controller nodes)
+ phys_type: interface
+ vlan: 101
+ members:
+ - eth1
+ external_overlay: # External network to be created in OpenStack by Services tenant
+ name: private_cloud
+ type: vlan
+ segmentation_id: 101
+ gateway: 192.168.38.1
+
+ storage: # Storage network configuration
+ enabled: true
+ cidr: 12.0.0.0/24 # Subnet in CIDR format
+ vlan: 12 # VLAN tag to use for Overcloud hosts on this network
+ mtu: 64000 # Tenant network MTU
+ nic_mapping: # Mapping of network configuration for Overcloud Nodes
+ compute: # Mapping for compute profile (nodes that will be used as Compute nodes)
+ phys_type: bond # Physical interface type (interface or bond)
+ vlan: 12 # VLAN tag to use with this NIC
+ members: # Physical NIC members of this mapping (Single value allowed for interface phys_type)
+ - eth3 # Note, for Apex you may also use the logical nic name (found by nic order), such as "nic1"
+ - eth4
+ controller: # Mapping for controller profile (nodes that will be used as Controller nodes)
+ phys_type: bond
+ vlan: 12
+ members:
+ - eth3
+ - eth4
+
+ api: # API network configuration
+ enabled: false
+ cidr: fd00:fd00:fd00:4000::/64 # Subnet in CIDR format
+ vlan: 13 # VLAN tag to use for Overcloud hosts on this network
+ mtu: 64000 # Tenant network MTU
+ nic_mapping: # Mapping of network configuration for Overcloud Nodes
+ compute: # Mapping for compute profile (nodes that will be used as Compute nodes)
+ phys_type: bond # Physical interface type (interface or bond)
+ vlan: 13 # VLAN tag to use with this NIC
+ members: # Physical NIC members of this mapping (Single value allowed for interface phys_type)
+ - eth3 # Note, for Apex you may also use the logical nic name (found by nic order), such as "nic1"
+ - eth4
+ controller: # Mapping for controller profile (nodes that will be used as Controller nodes)
+ phys_type: bond
+ vlan: 13
+ members:
+ - eth3
+ - eth4
+
+# JOID specific settings
+joid:
+
+# Compass specific settings
+compass:
+
+# Apex specific settings
+apex:
+ networks:
+ admin:
+ introspection_range: 192.0.2.100,192.0.2.120 # Range used for introspection phase (examining nodes)
+# Fuel specific settings
+fuel:
diff --git a/validator/src/validation_tool/doc/pharos-validator.1 b/validator/src/validation_tool/doc/pharos-validator.1
new file mode 100644
index 0000000..c76176f
--- /dev/null
+++ b/validator/src/validation_tool/doc/pharos-validator.1
@@ -0,0 +1,54 @@
+.TH pharoscmp 1 "2016-06-01" "version 0.1"
+.SH NAME
+pharoscmp - Testing tool for Pharos specification
+
+.SH SYNOPSIS
+.B pharoscmp [ options ]
+
+.SH DESCRIPTION
+This tool uses the virtual Linux filesystems to determine a computer's hardware and evaluate it against the Pharos specification to see if said computer is sufficient for being a node as part of the pod.
+
+.SH OPTIONS
+.BR \-h ", " \-\-help
+Show a help message and exit
+.PP
+.BR \-\-version
+Show program version
+.PP
+.BR \-c ", " \-\-color
+Enable colored console output
+.PP
+.BR \-v ", " \-\-verbose
+Enable more verbose console output
+.PP
+.BR \-q ", " \-\-quiet
+Disable console output
+.PP
+.BR \-o ", " \-\-output
+Define path to output file for yaml file of results
+.PP
+.BR \-f ", " \-\-force
+Forcefully override file defined by --output
+.PP
+.BR \-P ", " \-\-path
+Path to the directory where the test should poll for the file called "nodeinfo.yaml" to arrive in. This file is usually sent via ssh to wherever the test is running
+.PP
+
+.SH BUGS
+Please report bugs to https://tommy.iol.unh.edu/redmine/projects/pharoscmp
+
+.SH AUTHOR
+AUTHOR INFO GOES HERE
+
+.SH LICENSE
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.