summaryrefslogtreecommitdiffstats
path: root/tools/pharos-validator/src/validation_tool
diff options
context:
space:
mode:
Diffstat (limited to 'tools/pharos-validator/src/validation_tool')
-rw-r--r--tools/pharos-validator/src/validation_tool/.gitignore2
-rw-r--r--tools/pharos-validator/src/validation_tool/LICENSE202
-rwxr-xr-xtools/pharos-validator/src/validation_tool/bin/pharos-validator-node92
-rwxr-xr-xtools/pharos-validator/src/validation_tool/bin/pharos-validator-server183
-rw-r--r--tools/pharos-validator/src/validation_tool/doc/config.yaml37
-rw-r--r--tools/pharos-validator/src/validation_tool/doc/example/config.yaml37
-rw-r--r--tools/pharos-validator/src/validation_tool/doc/example/inventory.yaml38
-rw-r--r--tools/pharos-validator/src/validation_tool/doc/example/network.yaml207
-rw-r--r--tools/pharos-validator/src/validation_tool/doc/inventory.yaml38
-rw-r--r--tools/pharos-validator/src/validation_tool/doc/network.yaml221
-rw-r--r--tools/pharos-validator/src/validation_tool/doc/pharos-validator.154
-rw-r--r--tools/pharos-validator/src/validation_tool/requirements.txt3
-rwxr-xr-xtools/pharos-validator/src/validation_tool/setup.py31
-rw-r--r--tools/pharos-validator/src/validation_tool/src/__init__.py0
-rw-r--r--tools/pharos-validator/src/validation_tool/src/config.py176
-rw-r--r--tools/pharos-validator/src/validation_tool/src/const.py48
-rw-r--r--tools/pharos-validator/src/validation_tool/src/dhcp.py102
-rw-r--r--tools/pharos-validator/src/validation_tool/src/ipmi.py63
-rw-r--r--tools/pharos-validator/src/validation_tool/src/jenkins.py8
-rw-r--r--tools/pharos-validator/src/validation_tool/src/node.py85
-rw-r--r--tools/pharos-validator/src/validation_tool/src/receiver.py46
-rw-r--r--tools/pharos-validator/src/validation_tool/src/server.py111
-rw-r--r--tools/pharos-validator/src/validation_tool/src/test/__init__.py0
-rw-r--r--tools/pharos-validator/src/validation_tool/src/test/evaluate.py159
-rw-r--r--tools/pharos-validator/src/validation_tool/src/test/probe.py137
-rw-r--r--tools/pharos-validator/src/validation_tool/src/util.py107
-rw-r--r--tools/pharos-validator/src/validation_tool/tests/test_node.py0
-rw-r--r--tools/pharos-validator/src/validation_tool/tests/test_probe.py0
-rw-r--r--tools/pharos-validator/src/validation_tool/tests/test_server.py9
-rw-r--r--tools/pharos-validator/src/validation_tool/tests/test_util.py12
30 files changed, 0 insertions, 2208 deletions
diff --git a/tools/pharos-validator/src/validation_tool/.gitignore b/tools/pharos-validator/src/validation_tool/.gitignore
deleted file mode 100644
index 5559a0a2..00000000
--- a/tools/pharos-validator/src/validation_tool/.gitignore
+++ /dev/null
@@ -1,2 +0,0 @@
-tests/*.xml
-build/
diff --git a/tools/pharos-validator/src/validation_tool/LICENSE b/tools/pharos-validator/src/validation_tool/LICENSE
deleted file mode 100644
index 7a7c11af..00000000
--- a/tools/pharos-validator/src/validation_tool/LICENSE
+++ /dev/null
@@ -1,202 +0,0 @@
-
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright 2016 Todd Gaunt and others
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/tools/pharos-validator/src/validation_tool/bin/pharos-validator-node b/tools/pharos-validator/src/validation_tool/bin/pharos-validator-node
deleted file mode 100755
index e81bc1bf..00000000
--- a/tools/pharos-validator/src/validation_tool/bin/pharos-validator-node
+++ /dev/null
@@ -1,92 +0,0 @@
-#!/usr/bin/env/python3
-##############################################################################
-# Copyright (c) 2015 Todd Gaunt and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-
-import argparse
-import os
-import sys
-import logging
-
-from pharosvalidator import node
-
-def main():
- """Run validation tests on machine, then send results back to server
- on jump host"""
- args = parse_args()
-
- logger = configure_root_logger(0)
-
- if args["test"] == "hardware":
- result = node.hardware_test()
- elif args["test"] == "network":
- result = node.network_test()
- else:
- logger.error("Invalid test name chosen, please choose \"hardware\" or \"network\"")
- quit()
-
- logger.debug("TEST RESULTS\n" + "#"*50 + '\n' + result + "#"*50 + '\n')
- logger.info("Sending results to host...")
- node.send_result(args["host"], args["port"], result)
-
-def configure_root_logger(loglevel):
- # Add a file handler to the default logger
- formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
-
- # Configure the root logger
- stdout_handler = logging.StreamHandler(sys.stdout)
- stdout_handler.setLevel(loglevel)
- stdout_handler.setFormatter(formatter)
-
- root_logger = logging.getLogger()
- root_logger.addHandler(stdout_handler)
- root_logger.setLevel(loglevel)
-
- return root_logger
-
-def parse_args():
- """
- parse_args: parse the commandline arguments and configuration file into
- a dictionary that can be easily passed and referenced by other functions
-
- input: None
-
- output: Dictionary of all commandline arguments and configuration file
- settings
- """
- logger = logging.getLogger(__name__)
-
- parser = argparse.ArgumentParser( \
- description='evaluates a system against the pharos specification')
-
- parser.add_argument('--version',
- action="store_true", default=False,
- help='display version then exit')
-
- # Address that the client should connect to
- parser.add_argument('-H', '--host',
- type=str, default="0.0.0.0",
- help='Address of the server results should be \
- uploaded to')
-
- # Port that the client should connect to
- parser.add_argument('-p', '--port',
- type=str, default=0,
- help='Port of the server results will be uploaded to')
-
- # Specify which test to run on the node
- parser.add_argument('test', metavar='test',
- type=str,
- help='Which test should be run ["hardware", "network"]')
-
- args = vars(parser.parse_args())
-
- return args
-
-if __name__ == "__main__":
- main()
diff --git a/tools/pharos-validator/src/validation_tool/bin/pharos-validator-server b/tools/pharos-validator/src/validation_tool/bin/pharos-validator-server
deleted file mode 100755
index ac9e4f88..00000000
--- a/tools/pharos-validator/src/validation_tool/bin/pharos-validator-server
+++ /dev/null
@@ -1,183 +0,0 @@
-#!/usr/bin/env/python3
-##############################################################################
-# Copyright (c) 2015 Todd Gaunt and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-
-import yaml
-import threading
-import queue
-import argparse
-import logging
-import os
-import sys
-
-from pharosvalidator import receiver
-from pharosvalidator import util
-from pharosvalidator import dhcp
-from pharosvalidator import ipmi
-from pharosvalidator import config
-from pharosvalidator import server
-
-def main():
- """PXE boot each nodes, then start up server to receive results"""
- # The logger instance for this function
- logger = logging.getLogger("pharosvalidator")
- args = parse_args()
-
- # Determine the logging level
- loglevel = logging.INFO
- if args["verbose"]:
- loglevel = logging.DEBUG
- if args["quiet"]:
- loglevel = logging.CRITICAL
-
- configure_root_logger(loglevel, args["logfile"])
-
- # Create a new logger strictly for logging test results to a file
- test_logger = logging.getLogger('test_logger')
- test_logger.setLevel(logging.INFO)
- tl_handler = logging.FileHandler(args["test_log"])
- tl_handler.setFormatter(logging.Formatter("%(message)s"))
- test_logger.addHandler(tl_handler)
-
- # Open up the inventory file
- invconf = config.Inventory(args["inventoryfile"])
-
- # Open up the network configuration fil
- netconf = config.Topology(args["networkfile"])
-
- # Assign yourself an ip
- #bring_up_admin_ip(netconf.networks["admin"].installer_ip)
-
- # Start dhcp server
- dhcp.gen_dhcpd_file(args["dhcpdfile"], invconf.nodes, netconf.networks["admin"])
- if dhcp.start_server() != 0:
- logger.error("Halting, cannot bring up dhcp server")
- quit()
-
-
- # Queue for holding test logs, makes program thread safe
- logs_q = queue.Queue()
-
- # Start a new thread for the server that receives results
- threading.Thread(target=receiver.start, \
- args=(invconf.nodecount(), args["server-port"], logs_q), \
- daemon=True).start()
-
- failed_nodes = ipmi.power_nodes(invconf.nodes, "on")
-
- # If the failed nodes list is not empty, then fail
- if failed_nodes != []:
- logger.error("Halting, {} were unable to be powered on".format(", ".join(failed_nodes)))
- quit()
-
- admin_network = netconf.networks["admin"]
-
- ip_range = util.gen_ip_range(admin_network.cidr, [admin_network.installer_ip], admin_network.usable_ip_range.minimum, \
- admin_network.usable_ip_range.maximum)
-
- logger.info(ip_range)
-
- available_ips = server.ping_network(ip_range_list=ip_range, ipcnt=len(invconf.nodes), passes=20)
-
- logger.info(available_ips)
-
- # Start a thread to run tests on each different node, and setup
- # their NICs
- for ip in available_ips:
- threading.Thread( \
- target=server.ssh_thread, \
- args=(str(ip), str(admin_network.installer_ip), str(args["port"]), 200), \
- daemon=True).start()
-
- while True:
- logger.info("Awaiting test result...")
- test_logger.info(logs_q.get())
- logger.info("Logging test result...")
- if logs_q.empty():
- break
-
- logger.info("Finished test, check {} and {}".format(args["logfile"], args["test_log"]))
-
-
-def configure_root_logger(loglevel, logfile):
- # Add a file handler to the default logger
- formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
-
- # Configure the root logger
- stdout_handler = logging.StreamHandler(sys.stdout)
- stdout_handler.setLevel(loglevel)
- stdout_handler.setFormatter(formatter)
- rl_handler = logging.FileHandler(logfile)
- rl_handler.setFormatter(formatter)
-
- root_logger = logging.getLogger()
- root_logger.addHandler(rl_handler)
- root_logger.addHandler(stdout_handler)
- root_logger.setLevel(loglevel)
-
-def parse_args():
- """
- parse_args: parse the commandline arguments and configuration file into
- a dictionary that can be easily passed and referenced by other functions
-
- input: None
-
- output: Dictionary of all commandline arguments and configuration file
- settings
- """
- logger = logging.getLogger(__name__)
-
- parser = argparse.ArgumentParser( \
- description='evaluates a system against the pharos specification')
-
- parser.add_argument('--version',
- action="store_true", default=False,
- help='display version then exit')
-
- parser.add_argument('-q', '--quiet',
- action="store_true", default=False,
- help='disable console output')
-
- parser.add_argument('-v', '--verbose',
- action="store_true", default=False,
- help='Enable debugging level output')
-
- parser.add_argument('-o', '--output',
- type=str, default="yaml",
- help='Define which machine readable format to output')
-
- # port that the client should connect to
- parser.add_argument('-c', '--config',
- type=str, default="/etc/pharosvalidator/config.yaml",
- help='Configuration file to read')
-
- # port that the server should use
- parser.add_argument('-p', '--port',
- type=str, default=12121,
- help='flag to determine if server or client behavior \
- should be used')
-
- args = vars(parser.parse_args())
-
- # Read the configuration file first to get extra information
- if os.path.isfile(args["config"]):
- with open(args["config"], 'r') as fd:
- conf = yaml.load(fd.read())
- else:
- logger.error("Halting, unable to load configuration file")
- quit(1)
-
- for field in args:
- conf[field] = args[field]
- args = conf
-
- return args
-
-if __name__ == "__main__":
- main()
diff --git a/tools/pharos-validator/src/validation_tool/doc/config.yaml b/tools/pharos-validator/src/validation_tool/doc/config.yaml
deleted file mode 100644
index f2146ff9..00000000
--- a/tools/pharos-validator/src/validation_tool/doc/config.yaml
+++ /dev/null
@@ -1,37 +0,0 @@
----
-###################
-# Program arguments
-###################
-#
-# Define path of output file
-output: "yaml" # Values are ("yaml", )
-
-#################
-# Server settings
-#################
-
-# Address that the client should attempt to connect to. Has no effect on server
-server-addr: "0.0.0.0"
-# Server port, shouldn't need to change
-server-port: 12121
-
-#############################
-# Filepaths for program files
-#############################
-
-# Define directory for the program to monitor files pushed back by clients
-tmppath: "/tmp/pharosqa"
-# dhcpd configuration output filepath
-confpath: "/etc/pharosqa/"
-# Examplepath
-sharepath: "/usr/share/pharosqa/"
-# Path to inventory file
-inventoryfile: "/etc/pharosvalidator/inventory.yaml"
-# Path to network file
-networkfile: "/etc/pharosvalidator/network.yaml"
-# Path to logfile
-logfile: "/tmp/runtime_logs.txt"
-# Path to test logs
-test_log: "/tmp/results.yaml"
-# Path to dhcpd configuration file
-dhcpdfile: "/etc/dhcp/dhcpd.conf"
diff --git a/tools/pharos-validator/src/validation_tool/doc/example/config.yaml b/tools/pharos-validator/src/validation_tool/doc/example/config.yaml
deleted file mode 100644
index f2146ff9..00000000
--- a/tools/pharos-validator/src/validation_tool/doc/example/config.yaml
+++ /dev/null
@@ -1,37 +0,0 @@
----
-###################
-# Program arguments
-###################
-#
-# Define path of output file
-output: "yaml" # Values are ("yaml", )
-
-#################
-# Server settings
-#################
-
-# Address that the client should attempt to connect to. Has no effect on server
-server-addr: "0.0.0.0"
-# Server port, shouldn't need to change
-server-port: 12121
-
-#############################
-# Filepaths for program files
-#############################
-
-# Define directory for the program to monitor files pushed back by clients
-tmppath: "/tmp/pharosqa"
-# dhcpd configuration output filepath
-confpath: "/etc/pharosqa/"
-# Examplepath
-sharepath: "/usr/share/pharosqa/"
-# Path to inventory file
-inventoryfile: "/etc/pharosvalidator/inventory.yaml"
-# Path to network file
-networkfile: "/etc/pharosvalidator/network.yaml"
-# Path to logfile
-logfile: "/tmp/runtime_logs.txt"
-# Path to test logs
-test_log: "/tmp/results.yaml"
-# Path to dhcpd configuration file
-dhcpdfile: "/etc/dhcp/dhcpd.conf"
diff --git a/tools/pharos-validator/src/validation_tool/doc/example/inventory.yaml b/tools/pharos-validator/src/validation_tool/doc/example/inventory.yaml
deleted file mode 100644
index 2ba768ab..00000000
--- a/tools/pharos-validator/src/validation_tool/doc/example/inventory.yaml
+++ /dev/null
@@ -1,38 +0,0 @@
----
-nodes:
- - name: node0
- tags: control # optional param, other valid value "compute"
- arch: "x86_64"
- mac_address: "de:ad:be:ef:11:11" # pxe boot interface mac
- cpus: 2 # required only for virtual deployments
- memory: 2048 # required only for virtual deployments
- disk: 40 # required only for virtual deployments
- power:
- type: ipmi
- address: 10.4.7.2
- user: root
- pass: root
- - name: node1
- tags: control # optional param, other valid value "compute"
- arch: "x86_64"
- mac_address: "de:ad:be:ef:22:22" # pxe boot interface mac
- cpus: 2 # required only for virtual deployments
- memory: 2048 # required only for virtual deployments
- disk: 40 # required only for virtual deployments
- power:
- type: ipmi
- address: 10.4.7.3
- user: root
- pass: root
- - name: node2
- tags: control # optional param, other valid value "compute"
- arch: "x86_64"
- mac_address: "de:ad:be:ef:33:33" # pxe boot interface mac
- cpus: 2 # required only for virtual deployments
- memory: 2048 # required only for virtual deployments
- disk: 40 # required only for virtual deployments
- power:
- type: ipmi
- address: 10.4.7.4
- user: root
- pass: root
diff --git a/tools/pharos-validator/src/validation_tool/doc/example/network.yaml b/tools/pharos-validator/src/validation_tool/doc/example/network.yaml
deleted file mode 100644
index 91c1be91..00000000
--- a/tools/pharos-validator/src/validation_tool/doc/example/network.yaml
+++ /dev/null
@@ -1,207 +0,0 @@
----
-# This configuration file defines Network Environment for a
-# Baremetal Deployment of OPNFV. It contains default values
-# for 5 following networks:
-#
-# - admin
-# - tenant*
-# - external*
-# - storage*
-# - api*
-# *) optional networks
-#
-# Optional networks will be consolidated with the admin network
-# if not explicitly configured.
-#
-# See short description of the networks in the comments below.
-#
-# "admin" is the short name for Control Plane Network.
-# During OPNFV deployment it is used for node provisioning which will require
-# PXE booting as well as running a DHCP server on this network. Be sure to
-# disable any other DHCP/TFTP server on this network.
-#
-# "tenant" is the network used for tenant traffic.
-#
-# "external" is the network which should have internet or external
-# connectivity. External OpenStack networks will be configured to egress this
-# network. There can be multiple external networks, but only one assigned as
-# "public" which OpenStack public API's will register.
-#
-# "storage" is the network for storage I/O.
-#
-# "api" is an optional network for splitting out OpenStack service API
-# communication. This should be used for IPv6 deployments.
-
-network-config-metadata: # Meta data for the network configuration
- title: LF-POD-1 Network config #
- version: 0.1 #
- created: Mon Dec 28 2015 #
- comment: None #
-
-# yamllint disable rule:line-length
-networks: # Network configurations
- admin: # Admin configuration (pxe and jumpstart),
- enabled: true
- vlan: native # VLAN tag to use for Overcloud hosts on this network
- installer_vm: # Network settings for the Installer VM on admin network
- nic_type: interface # Indicates if this VM will be bridged to an interface, or to a bond
- members:
- - em1 # Member Interface to bridge to for installer VM (use multiple values for bond)
- vlan: 29 # VLAN tag to use for this network on Installer VM, native means none
- ip: 10.10.25.100 # IP to assign to Installer VM on this network
- usable_ip_range: 10.10.25.0,10.10.25.100 # Usable ip range, if empty entire range is usable, ex. 192.168.1.10,192.168.1.20
- gateway: 192.0.2.1 # Gateway (only needed when public_network is disabled)
- cidr: 10.10.25.0/24 # Subnet in CIDR format 192.168.1.0/24
- dhcp_range: 10.10.25.50,10.10.25.99 # DHCP range for the admin network, if empty it will be automatically provisioned
- dns-domain: opnfvlf.org # Admin network dns domain
- dns-search: opnfvlf.org # Admin network dns-search domain
- dns-upstream: # Admin network upstream dns servers
- - 8.8.8.8 #
- - 8.8.4.4 #
- ntp-upstream: # Admin upstream ntp servers
- - 0.se.pool.ntp.org #
- - 1.se.pool.ntp.org #
- nic_mapping: # Mapping of network configuration for Overcloud Nodes
- compute: # Mapping for compute profile (nodes that will be used as Compute nodes)
- phys_type: interface # Physical interface type (interface or bond)
- vlan: native # VLAN tag to use with this NIC
- members: # Physical NIC members of this mapping (Single value allowed for interface phys_type)
- - eth1
- controller: # Mapping for controller profile (nodes that will be used as Controller nodes)
- phys_type: bond
- vlan: native
- members:
- - eth1
- - eth2
-
- tenant: # Tenant network configuration
- enabled: true
- cidr: 11.0.0.0/24 # Subnet in CIDR format 192.168.1.0/24
- vlan: 10 # VLAN tag to use for Overcloud hosts on this network
- mtu: 64000 # Tenant network MTU
- overlay_id_range: 2,65535 # Tenant network Overlay segmentation ID range: VNI, VLAN-ID, etc.
-
- segmentation_type: vxlan # Tenant network segmentation type:vlan, vxlan, gre
-
- nic_mapping: # Mapping of network configuration for Overcloud Nodes
- compute: # Mapping for compute profile (nodes that will be used as Compute nodes)
- phys_type: interface # Physical interface type (interface or bond)
- vlan: 10 # VLAN tag to use with this NIC
- members: # Physical NIC members of this mapping (Single value allowed for interface phys_type)
- - eth1 # Note, for Apex you may also use the logical nic name (found by nic order), such as "nic1"
- controller: # Mapping for controller profile (nodes that will be used as Controller nodes)
- phys_type: interface
- vlan: 10
- members:
- - eth1 # Note, for Apex you may also use the logical nic name (found by nic order), such as "nic1"
-
- external: # Can contain 1 or more external networks
- - public: # "public" network will be the network the installer VM attaches to
- enabled: true
- vlan: native
- mtu: 64000 # Public network MTU
- installer_vm: # Network settings for the Installer VM on admin network (note only valid on 'public' external network)
- nic_type: interface # Indicates if this VM will be bridged to an interface, or to a bond
- members:
- - em1 # Member Interface to bridge to for installer VM (use multiple values for bond)
- ip: 192.168.37.12 # IP to assign to Installer VM on this network
- cidr: 192.168.37.0/24
- gateway: 192.168.37.1
- floating_ip_range: 192.168.37.200,192.168.37.220 # Range to allocate to floating IPs for the public network with Neutron
- usable_ip_range: 192.168.37.10,192.168.37.199 # Usable IP range on the public network, usually this is a shared subnet
- dns_nameservers: # External dns servers
- - 8.8.8.8 #
- - 8.8.4.4 #
- ntp: # External upstream NTP servers
- - 0.se.pool.ntp.org #
- - 1.se.pool.ntp.org #
- syslog: # External Syslog server
- server: 10.128.1.24 #
- transport: 'tcp' #
- nic_mapping: # Mapping of network configuration for Overcloud Nodes
- compute: # Mapping for compute profile (nodes that will be used as Compute nodes)
- phys_type: interface # Physical interface type (interface or bond)
- vlan: 10 # VLAN tag to use with this NIC
- members: # Physical NIC members of this mapping (Single value allowed for interface phys_type)
- - eth1
- controller: # Mapping for controller profile (nodes that will be used as Controller nodes)
- phys_type: interface
- vlan: 10
- members:
- - eth1
- external_overlay: # External network to be created in OpenStack by Services tenant
- name: Public_internet
- type: flat
- gateway: 192.168.37.1
- - private_cloud: # another external network
- enabled: false
- vlan: 101
- mtu: 64000
- cidr: 192.168.38.0/24
- gateway: 192.168.38.1
- floating_ip_range: 192.168.38.200,192.168.38.220 # Range to allocate to floating IPs for the public network with Neutron
- usable_ip_range: 192.168.38.10,192.168.38.199 # Usable IP range on the public network, usually this is a shared subnet
- dns_nameservers: # External dns servers
- - 8.8.8.8 #
- - 8.8.4.4 #
- ntp: # External upstream NTP servers
- - 0.se.pool.ntp.org #
- - 1.se.pool.ntp.org #
- syslog: # External Syslog server
- server: 10.128.1.24 #
- transport: 'tcp' #
- nic_mapping: # Mapping of network configuration for Overcloud Nodes
- compute: # Mapping for compute profile (nodes that will be used as Compute nodes)
- phys_type: interface # Physical interface type (interface or bond)
- vlan: 101 # VLAN tag to use with this NIC
- members: # Physical NIC members of this mapping (Single value allowed for interface phys_type)
- - eth1 # Note, for Apex you may also use the logical nic name (found by nic order), such as "nic1"
- controller: # Mapping for controller profile (nodes that will be used as Controller nodes)
- phys_type: interface
- vlan: 101
- members:
- - eth1
- external_overlay: # External network to be created in OpenStack by Services tenant
- name: private_cloud
- type: vlan
- segmentation_id: 101
- gateway: 192.168.38.1
-
- storage: # Storage network configuration
- enabled: true
- cidr: 12.0.0.0/24 # Subnet in CIDR format
- vlan: 12 # VLAN tag to use for Overcloud hosts on this network
- mtu: 64000 # Tenant network MTU
- nic_mapping: # Mapping of network configuration for Overcloud Nodes
- compute: # Mapping for compute profile (nodes that will be used as Compute nodes)
- phys_type: bond # Physical interface type (interface or bond)
- vlan: 12 # VLAN tag to use with this NIC
- members: # Physical NIC members of this mapping (Single value allowed for interface phys_type)
- - eth3 # Note, for Apex you may also use the logical nic name (found by nic order), such as "nic1"
- - eth4
- controller: # Mapping for controller profile (nodes that will be used as Controller nodes)
- phys_type: bond
- vlan: 12
- members:
- - eth3
- - eth4
-
- api: # API network configuration
- enabled: false
- cidr: fd00:fd00:fd00:4000::/64 # Subnet in CIDR format
- vlan: 13 # VLAN tag to use for Overcloud hosts on this network
- mtu: 64000 # Tenant network MTU
- nic_mapping: # Mapping of network configuration for Overcloud Nodes
- compute: # Mapping for compute profile (nodes that will be used as Compute nodes)
- phys_type: bond # Physical interface type (interface or bond)
- vlan: 13 # VLAN tag to use with this NIC
- members: # Physical NIC members of this mapping (Single value allowed for interface phys_type)
- - eth3 # Note, for Apex you may also use the logical nic name (found by nic order), such as "nic1"
- - eth4
- controller: # Mapping for controller profile (nodes that will be used as Controller nodes)
- phys_type: bond
- vlan: 13
- members:
- - eth3
- - eth4
-# yamllint enable rule:line-length
diff --git a/tools/pharos-validator/src/validation_tool/doc/inventory.yaml b/tools/pharos-validator/src/validation_tool/doc/inventory.yaml
deleted file mode 100644
index 2ba768ab..00000000
--- a/tools/pharos-validator/src/validation_tool/doc/inventory.yaml
+++ /dev/null
@@ -1,38 +0,0 @@
----
-nodes:
- - name: node0
- tags: control # optional param, other valid value "compute"
- arch: "x86_64"
- mac_address: "de:ad:be:ef:11:11" # pxe boot interface mac
- cpus: 2 # required only for virtual deployments
- memory: 2048 # required only for virtual deployments
- disk: 40 # required only for virtual deployments
- power:
- type: ipmi
- address: 10.4.7.2
- user: root
- pass: root
- - name: node1
- tags: control # optional param, other valid value "compute"
- arch: "x86_64"
- mac_address: "de:ad:be:ef:22:22" # pxe boot interface mac
- cpus: 2 # required only for virtual deployments
- memory: 2048 # required only for virtual deployments
- disk: 40 # required only for virtual deployments
- power:
- type: ipmi
- address: 10.4.7.3
- user: root
- pass: root
- - name: node2
- tags: control # optional param, other valid value "compute"
- arch: "x86_64"
- mac_address: "de:ad:be:ef:33:33" # pxe boot interface mac
- cpus: 2 # required only for virtual deployments
- memory: 2048 # required only for virtual deployments
- disk: 40 # required only for virtual deployments
- power:
- type: ipmi
- address: 10.4.7.4
- user: root
- pass: root
diff --git a/tools/pharos-validator/src/validation_tool/doc/network.yaml b/tools/pharos-validator/src/validation_tool/doc/network.yaml
deleted file mode 100644
index 705e8486..00000000
--- a/tools/pharos-validator/src/validation_tool/doc/network.yaml
+++ /dev/null
@@ -1,221 +0,0 @@
----
-# This configuration file defines Network Environment for a
-# Baremetal Deployment of OPNFV. It contains default values
-# for 5 following networks:
-#
-# - admin
-# - tenant*
-# - external*
-# - storage*
-# - api*
-# *) optional networks
-#
-# Optional networks will be consolidated with the admin network
-# if not explicitly configured.
-#
-# See short description of the networks in the comments below.
-#
-# "admin" is the short name for Control Plane Network.
-# During OPNFV deployment it is used for node provisioning which will require
-# PXE booting as well as running a DHCP server on this network. Be sure to
-# disable any other DHCP/TFTP server on this network.
-#
-# "tenant" is the network used for tenant traffic.
-#
-# "external" is the network which should have internet or external
-# connectivity. External OpenStack networks will be configured to egress this
-# network. There can be multiple external networks, but only one assigned as
-# "public" which OpenStack public API's will register.
-#
-# "storage" is the network for storage I/O.
-#
-# "api" is an optional network for splitting out OpenStack service API
-# communication. This should be used for IPv6 deployments.
-
-# yamllint disable rule:line-length
-network-config-metadata: # Meta data for the network configuration
- title: LF-POD-1 Network config #
- version: 0.1 #
- created: Mon Dec 28 2015 #
- comment: None #
-
-
-networks: # Network configurations
- admin: # Admin configuration (pxe and jumpstart),
- enabled: true
- vlan: native # VLAN tag to use for Overcloud hosts on this network (Admin network is required to be native / untagged for PXE booting)
- installer_vm: # Network settings for the Installer VM on admin network
- nic_type: interface # Indicates if this VM will be bridged to an interface, or to a bond
- members:
- - em1 # Member Interface to bridge to for installer VM (use multiple values for bond)
- vlan: native # VLAN tag to use for this network on Installer VM, native means none
- ip: 192.0.2.1 # IP to assign to Installer VM on this network
- usable_ip_range: 192.0.2.11,192.0.2.99 # Usable ip range, if empty entire range is usable, ex. 192.168.1.10,192.168.1.20
- gateway: 192.0.2.1 # Gateway (only needed when public_network is disabled)
- cidr: 192.0.2.0/24 # Subnet in CIDR format 192.168.1.0/24
- dhcp_range: 192.0.2.2,192.0.2.10 # DHCP range for the admin network, if empty it will be automatically provisioned
- dns-domain: opnfvlf.org # Admin network dns domain
- dns-search: opnfvlf.org # Admin network dns-search domain
- dns-upstream: # Admin network upstream dns servers
- - 8.8.8.8 #
- - 8.8.4.4 #
- ntp-upstream: # Admin upstream ntp servers
- - 0.se.pool.ntp.org #
- - 1.se.pool.ntp.org #
- nic_mapping: # Mapping of network configuration for Overcloud Nodes
- compute: # Mapping for compute profile (nodes that will be used as Compute nodes)
- phys_type: interface # Physical interface type (interface or bond)
- vlan: native # VLAN tag to use with this NIC
- members: # Physical NIC members of this mapping (Single value allowed for interface phys_type)
- - eth1
- controller: # Mapping for controller profile (nodes that will be used as Controller nodes)
- phys_type: bond
- vlan: native
- members:
- - eth1
- - eth2
-
- tenant: # Tenant network configuration
- enabled: true
- cidr: 11.0.0.0/24 # Subnet in CIDR format 192.168.1.0/24
- vlan: 10 # VLAN tag to use for Overcloud hosts on this network
- mtu: 64000 # Tenant network MTU
- overlay_id_range: 2,65535 # Tenant network Overlay segmentation ID range: VNI, VLAN-ID, etc.
-
- segmentation_type: vxlan # Tenant network segmentation type: vlan, vxlan, gre
-
- nic_mapping: # Mapping of network configuration for Overcloud Nodes
- compute: # Mapping for compute profile (nodes that will be used as Compute nodes)
- phys_type: interface # Physical interface type (interface or bond)
- vlan: 10 # VLAN tag to use with this NIC
- members: # Physical NIC members of this mapping (Single value allowed for interface phys_type)
- - eth1 # Note, for Apex you may also use the logical nic name (found by nic order), such as "nic1"
- controller: # Mapping for controller profile (nodes that will be used as Controller nodes)
- phys_type: interface
- vlan: 10
- members:
- - eth1 # Note, for Apex you may also use the logical nic name (found by nic order), such as "nic1"
-
- external: # Can contain 1 or more external networks
- - public: # "public" network will be the network the installer VM attaches to
- enabled: true
- vlan: native
- mtu: 64000 # Public network MTU
- installer_vm: # Network settings for the Installer VM on admin network (note only valid on 'public' external network)
- nic_type: interface # Indicates if this VM will be bridged to an interface, or to a bond
- members:
- - em1 # Member Interface to bridge to for installer VM (use multiple values for bond)
- ip: 192.168.37.12 # IP to assign to Installer VM on this network
- cidr: 192.168.37.0/24
- gateway: 192.168.37.1
- floating_ip_range: 192.168.37.200,192.168.37.220 # Range to allocate to floating IPs for the public network with Neutron
- usable_ip_range: 192.168.37.10,192.168.37.199 # Usable IP range on the public network, usually this is a shared subnet
- dns_nameservers: # External dns servers
- - 8.8.8.8 #
- - 8.8.4.4 #
- ntp: # External upstream NTP servers
- - 0.se.pool.ntp.org #
- - 1.se.pool.ntp.org #
- syslog: # External Syslog server
- server: 10.128.1.24 #
- transport: 'tcp' #
- nic_mapping: # Mapping of network configuration for Overcloud Nodes
- compute: # Mapping for compute profile (nodes that will be used as Compute nodes)
- phys_type: interface # Physical interface type (interface or bond)
- vlan: 10 # VLAN tag to use with this NIC
- members: # Physical NIC members of this mapping (Single value allowed for interface phys_type)
- - eth1
- controller: # Mapping for controller profile (nodes that will be used as Controller nodes)
- phys_type: interface
- vlan: 10
- members:
- - eth1
- external_overlay: # External network to be created in OpenStack by Services tenant
- name: Public_internet
- type: flat
- gateway: 192.168.37.1
- - private_cloud: # another external network
- enabled: false
- vlan: 101
- mtu: 64000
- cidr: 192.168.38.0/24
- gateway: 192.168.38.1
- floating_ip_range: 192.168.38.200,192.168.38.220 # Range to allocate to floating IPs for the public network with Neutron
- usable_ip_range: 192.168.38.10,192.168.38.199 # Usable IP range on the public network, usually this is a shared subnet
- dns_nameservers: # External dns servers
- - 8.8.8.8 #
- - 8.8.4.4 #
- ntp: # External upstream NTP servers
- - 0.se.pool.ntp.org #
- - 1.se.pool.ntp.org #
- syslog: # External Syslog server
- server: 10.128.1.24 #
- transport: 'tcp' #
- nic_mapping: # Mapping of network configuration for Overcloud Nodes
- compute: # Mapping for compute profile (nodes that will be used as Compute nodes)
- phys_type: interface # Physical interface type (interface or bond)
- vlan: 101 # VLAN tag to use with this NIC
- members: # Physical NIC members of this mapping (Single value allowed for interface phys_type)
- - eth1 # Note, for Apex you may also use the logical nic name (found by nic order), such as "nic1"
- controller: # Mapping for controller profile (nodes that will be used as Controller nodes)
- phys_type: interface
- vlan: 101
- members:
- - eth1
- external_overlay: # External network to be created in OpenStack by Services tenant
- name: private_cloud
- type: vlan
- segmentation_id: 101
- gateway: 192.168.38.1
-
- storage: # Storage network configuration
- enabled: true
- cidr: 12.0.0.0/24 # Subnet in CIDR format
- vlan: 12 # VLAN tag to use for Overcloud hosts on this network
- mtu: 64000 # Tenant network MTU
- nic_mapping: # Mapping of network configuration for Overcloud Nodes
- compute: # Mapping for compute profile (nodes that will be used as Compute nodes)
- phys_type: bond # Physical interface type (interface or bond)
- vlan: 12 # VLAN tag to use with this NIC
- members: # Physical NIC members of this mapping (Single value allowed for interface phys_type)
- - eth3 # Note, for Apex you may also use the logical nic name (found by nic order), such as "nic1"
- - eth4
- controller: # Mapping for controller profile (nodes that will be used as Controller nodes)
- phys_type: bond
- vlan: 12
- members:
- - eth3
- - eth4
-
- api: # API network configuration
- enabled: false
- cidr: fd00:fd00:fd00:4000::/64 # Subnet in CIDR format
- vlan: 13 # VLAN tag to use for Overcloud hosts on this network
- mtu: 64000 # Tenant network MTU
- nic_mapping: # Mapping of network configuration for Overcloud Nodes
- compute: # Mapping for compute profile (nodes that will be used as Compute nodes)
- phys_type: bond # Physical interface type (interface or bond)
- vlan: 13 # VLAN tag to use with this NIC
- members: # Physical NIC members of this mapping (Single value allowed for interface phys_type)
- - eth3 # Note, for Apex you may also use the logical nic name (found by nic order), such as "nic1"
- - eth4
- controller: # Mapping for controller profile (nodes that will be used as Controller nodes)
- phys_type: bond
- vlan: 13
- members:
- - eth3
- - eth4
-
-# JOID specific settings
-joid:
-
-# Compass specific settings
-compass:
-
-# Apex specific settings
-apex:
- networks:
- admin:
- introspection_range: 192.0.2.100,192.0.2.120 # Range used for introspection phase (examining nodes)
-# Fuel specific settings
-fuel:
diff --git a/tools/pharos-validator/src/validation_tool/doc/pharos-validator.1 b/tools/pharos-validator/src/validation_tool/doc/pharos-validator.1
deleted file mode 100644
index c76176f1..00000000
--- a/tools/pharos-validator/src/validation_tool/doc/pharos-validator.1
+++ /dev/null
@@ -1,54 +0,0 @@
-.TH pharoscmp 1 "2016-06-01" "version 0.1"
-.SH NAME
-pharoscmp - Testing tool for Pharos specification
-
-.SH SYNOPSIS
-.B pharoscmp [ options ]
-
-.SH DESCRIPTION
-This tool uses the virtual Linux filesystems to determine a computer's hardware and evaluate it against the Pharos specification to see if said computer is sufficient for being a node as part of the pod.
-
-.SH OPTIONS
-.BR \-h ", " \-\-help
-Show a help message and exit
-.PP
-.BR \-\-version
-Show program version
-.PP
-.BR \-c ", " \-\-color
-Enable colored console output
-.PP
-.BR \-v ", " \-\-verbose
-Enable more verbose console output
-.PP
-.BR \-q ", " \-\-quiet
-Disable console output
-.PP
-.BR \-o ", " \-\-output
-Define path to output file for yaml file of results
-.PP
-.BR \-f ", " \-\-force
-Forcefully override file defined by --output
-.PP
-.BR \-P ", " \-\-path
-Path to the directory where the test should poll for the file called "nodeinfo.yaml" to arrive in. This file is usually sent via ssh to wherever the test is running
-.PP
-
-.SH BUGS
-Please report bugs to https://tommy.iol.unh.edu/redmine/projects/pharoscmp
-
-.SH AUTHOR
-AUTHOR INFO GOES HERE
-
-.SH LICENSE
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
diff --git a/tools/pharos-validator/src/validation_tool/requirements.txt b/tools/pharos-validator/src/validation_tool/requirements.txt
deleted file mode 100644
index dd5a2e6d..00000000
--- a/tools/pharos-validator/src/validation_tool/requirements.txt
+++ /dev/null
@@ -1,3 +0,0 @@
-netifaces
-watchdog
-pytest
diff --git a/tools/pharos-validator/src/validation_tool/setup.py b/tools/pharos-validator/src/validation_tool/setup.py
deleted file mode 100755
index 6b00b388..00000000
--- a/tools/pharos-validator/src/validation_tool/setup.py
+++ /dev/null
@@ -1,31 +0,0 @@
-#! /usr/bin/env python3
-
-from distutils.core import setup
-
-import subprocess
-
-setup(
- name = 'pharos-validator',
- description = 'Testing tool for Pharos spec compliance',
- author = 'Todd Gaunt',
- url = '',
- download_url = '',
- author_email = 'singularik@iol.unh.edu',
- version = '0.1',
- license = 'TBD',
-
- packages = ['pharosvalidator',
- 'pharosvalidator.test'],
-
- package_dir = {'pharosvalidator':'src',
- 'pharosvalidator.test':'src/test'},
-
- # Change these per distribution
- data_files = [('share/man/man1/', ['doc/pharos-validator.1']),
- ('share/licenses/pharos-validator/LICENSE', ['LICENSE']),
- ('share/pharos-validator/', ['doc/config.yaml', 'doc/inventory.yaml', 'doc/network.yaml']),
- ],
-
- scripts = ['bin/pharos-validator-node',
- 'bin/pharos-validator-server']
- )
diff --git a/tools/pharos-validator/src/validation_tool/src/__init__.py b/tools/pharos-validator/src/validation_tool/src/__init__.py
deleted file mode 100644
index e69de29b..00000000
--- a/tools/pharos-validator/src/validation_tool/src/__init__.py
+++ /dev/null
diff --git a/tools/pharos-validator/src/validation_tool/src/config.py b/tools/pharos-validator/src/validation_tool/src/config.py
deleted file mode 100644
index 443467ee..00000000
--- a/tools/pharos-validator/src/validation_tool/src/config.py
+++ /dev/null
@@ -1,176 +0,0 @@
-##############################################################################
-# Copyright (c) 2015 Todd Gaunt and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-
-import logging
-import sys
-import os
-import yaml
-import struct
-import socket
-
-from pharosvalidator import util
-from collections import namedtuple
-
-class Topology():
- """
- Topology: Class to store any number of Network classes
- and metadata about them
- """
- def __init__(self, yaml_config):
- # Dictionary of available networks
- self.logger = logging.getLogger(__name__)
- self.networks = {}
- self.external_networks = []
-
- # Fill the above dictionaries
- self.parse_yaml(yaml_config)
-
- def parse_yaml(self, yaml_config):
- """
- parse_yaml: parses the yaml configuration file this program uses
- for all the network and node information
- """
- config = safe_yaml_read(yaml_config)
- for network in config["networks"]:
- self.logger.info("Reading network section {}".format(network))
- if network == "admin":
- self.networks[network] = Network(config["networks"][network])
- #TODO
- elif network == "external":
- for external_network in config["networks"][network]:
- self.external_networks.append(Network(external_network))
-
-class Network():
- """
- Network: Class to store all information on a given network
- """
- def __init__(self, network):
- try:
- self.logger = logging.getLogger(__name__)
-
- # Some generic settings
- self.enabled = network["enabled"]
- self.vlan = network["vlan"]
-
- # VM settings
- self.installer_nic_type = network["installer_vm"]["nic_type"]
- self.installer_members = network["installer_vm"]["members"]
- self.installer_ip = network["installer_vm"]["ip"]
-
- # Tuple containing the minimum and maximum
- self.usable_ip_range = self.parse_ip_range(network["usable_ip_range"])
- self.gateway = network["gateway"]
- self.cidr = network["cidr"]
- self.dhcp_range = network["dhcp_range"]
- self.dns_domain = network["dns-domain"]
- self.dns_search = network["dns-search"]
-
- subnet, netmask = self.split_cidr(network["cidr"])
- self.subnet = subnet
- self.netmask = netmask
-
- # List of all dns servers
- self.dns_upstream = network["dns-upstream"]
-
- self.nic_mapping = {}
- except KeyError as e:
- self.logger.error("Field {} not available in network configuration file".format(e))
-
- def split_cidr(self, cidr):
- """
- split_cidr: Split up cidr notation subnets into a subnet string and a
- netmask string
-
- input: cidr notation of a subnet
-
- output: Subnet string; Netmask string
- """
- split = cidr.split('/')
- host_bits = int(split[1])
- netmask = self.cidr_to_netmask(host_bits)
- subnet = split[0]
-
- return subnet, netmask
-
- def parse_ip_range(self, ip_range_string):
- """
- parse_ip_range: Create a named tuple object that contains the lowest
- ip address and the highest ip address from a configuration file
-
- input: String formatted like so "min, max" where min/max are ip addresses
-
- output: Named tuple object containing a minimum and maximum field
- """
- rp = ip_range_string.split(",")
- ip_range = namedtuple("ip_range", ['minimum', 'maximum'])(minimum=min(rp), maximum=max(rp))
- return ip_range
-
- def cidr_to_netmask(self, cidr):
- bits = 0xffffffff ^ (1 << 32 - cidr) - 1
- netmask = socket.inet_ntoa(struct.pack('>I', bits))
- self.logger.debug("Netmask generated from cidr '{}': '{}'".format(cidr, netmask))
- return netmask
-
-class Inventory():
- """
- Inventory: Class to hold configuration file data
- """
- def __init__(self, yaml_config):
- # Create the class logger
- self.logger = logging.getLogger(__name__)
-
- self.nodes = []
-
- # Fill the above list
- self.parse_yaml(yaml_config)
-
- def parse_yaml(self, yaml_config):
- config = safe_yaml_read(yaml_config)
- nodes = []
- for node in config["nodes"]:
- self.nodes.append(Node(node))
-
- def nodecount(self):
- return len(self.nodes)
-
-class Node():
- """
- Node: Class to hold
- """
- def __init__(self, node):
- self.logger = logging.getLogger(__name__)
- try:
- self.name = node["name"]
- self.tags = node["tags"]
- self.arch = node["arch"]
- self.mac_address = node["mac_address"] # ipmi mac address
- self.cpus = node["cpus"]
- self.memory = node["memory"]
- self.disk = node["disk"]
- except KeyError as e:
- self.logger.error("Field {} not available in inventory file".format(e))
-
- # Power sub section
- if node["power"]["type"] == "ipmi":
- try:
- self.ipmi_addr = node["power"]["address"]
- self.ipmi_user = node["power"]["user"]
- self.ipmi_pass = node["power"]["pass"]
- except KeyError as e:
- self.logger.error("Field {} not available in inventory file".format(e))
- else:
- pass
-
-def safe_yaml_read(yamlfile):
- logger = logging.getLogger(__name__)
- if os.path.isfile(yamlfile) == False:
- logger.critical("Could not open find {}".format(yamlfile))
- quit(1)
- with open(yamlfile, 'r') as fd:
- return yaml.load(fd.read())
diff --git a/tools/pharos-validator/src/validation_tool/src/const.py b/tools/pharos-validator/src/validation_tool/src/const.py
deleted file mode 100644
index a204a964..00000000
--- a/tools/pharos-validator/src/validation_tool/src/const.py
+++ /dev/null
@@ -1,48 +0,0 @@
-##############################################################################
-# Copyright (c) 2015 Todd Gaunt and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-
-## Various constant strings used throughout program
-HARDWARE_TEST="pharos-validator-node"
-
-## Pharos hardware specification
-# memory
-MIN_MEMSIZE = 32000000 # In Kb
-
-# cpu
-MIN_CPUFREQ = 1800.000 # In Mhz
-MIN_CORECOUNT = 4
-
-# storage
-MIN_DISKCOUNT = 3
-MIN_SSDCOUNT = 1
-MIN_HDDSIZE = 1000 # In Gb
-MIN_SSDSIZE = 100 # In Gb
-# Smallest possible disk size
-MIN_DISKSIZE = min(MIN_HDDSIZE, MIN_SSDSIZE)
-
-# Virtual deployments
-# Requirements are per node
-APEX_REQ = {"cores": 2, \
- "ram": 8000000, \
- "disk": 40}
-
-# Requirements are per node
-COMPASS_REQ = {"cores": 4, \
- "ram": 4000000, \
- "disk": 100}
-
-# Requirements are per node
-JOID_REQ = {"cores": 4, \
- "ram": 4000000, \
- "disk": 100}
-
-# Requirements are per node
-FUEL_REQ = {"cores": 4, \
- "ram": 4000000, \
- "disk": 100}
diff --git a/tools/pharos-validator/src/validation_tool/src/dhcp.py b/tools/pharos-validator/src/validation_tool/src/dhcp.py
deleted file mode 100644
index 26c42f84..00000000
--- a/tools/pharos-validator/src/validation_tool/src/dhcp.py
+++ /dev/null
@@ -1,102 +0,0 @@
-##############################################################################
-# Copyright (c) 2015 Todd Gaunt and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-
-import yaml
-import netifaces
-import subprocess
-import copy
-import re
-import os
-import logging
-
-from pharosvalidator.specification import *
-from pharosvalidator import util
-
-init_cmd = ["systemctl", "start", "dhcpd.service"]
-
-def gen_dhcpd_file(dhcpdfile, nodes, network):
- """Generates and associates incremental ip addresses to
- MAC addresses according to restrictions placed by network
- configuration file. Writes all of this out in dhcpd.conf format"""
- logger = logging.getLogger(__name__)
- logger.info("Generating dhcpfile...")
-
- header = "default-lease-time 86400;\n\
- max-lease-time 604800;\n\
- max-lease-time 604800;\n\
- \n\
- allow booting;\n\
- authoritative;\n\
- \n"
-
- # Skip this network if it is disabled
- if network.enabled == False:
- logger.info("Admin network is disabled, please change the configuration to \"enabled\" if you would like this test to run")
- quit()
-
- # Not explicitly in the cofiguration file
- broadcastaddr = "0.0.0.0"
- next_server = "0.0.0.0"
-
- ip_range = util.gen_ip_range(network.cidr, [network.installer_ip], network.usable_ip_range.minimum, \
- network.usable_ip_range.maximum)
-
- tab = ' '
- subnetconf = "subnet {} netmask {} {{\n".format(network.subnet, network.netmask)\
- + tab+"range {} {};\n".format(network.usable_ip_range.minimum, network.usable_ip_range.maximum)\
- + tab+"option broadcast-address {};\n".format(broadcastaddr)\
- + tab+'filename "pxelinux.0";\n'\
- + tab+"next-server {};\n".format(next_server)
-
- # For now no static addresses are assigned
- """
- static_addrs = []
- for node in nodes:
- # Skip the node if it doesn't have a name or mac address specified
- if not node.name or not node.mac_address:
- continue
-
- if node.ipmi_addr in ip_range:
- ip_range.remove(node.ipmi_addr)
-
- static_line = "host {node} {{ hardware ethernet {ipmi_mac}; fixed-address {ip_addr}; }}\n".format\
- (node=node.name, ipmi_mac=node.mac_address, ip_addr=ip_range[0])
- ip_range = ip_range[1::] # Remove the assigned ip address
- static_addrs.append(static_line)
-
- # Now add all statically assigned ip addresses
- for addr in static_addrs:
- subnetconf += tab+addr
- """
-
- subnetconf += "}\n" # Just the closing bracket
-
- # The final text to be written out to a file
- dhcpdtext = header + subnetconf
-
- with open(dhcpdfile, "w+") as fd:
- logger.info("Writing out dhcpd file to {}".format(dhcpdfile))
- fd.write(dhcpdtext)
-
- return dhcpdtext
-
-def start_server():
- logger = logging.getLogger(__name__)
- global init_cmd
- cmd = init_cmd
- with open(os.devnull, 'w') as fn:
- status = subprocess.Popen(cmd, stdout=fn, stderr=fn).wait()
- if int(status) != 0:
- logger.error("Could not bring up dhcpd server")
- else:
- logger.info("Dhcp server brought up")
- return status
-
-if __name__ == "__main__":
- split("inventory.yaml", "eth0")
diff --git a/tools/pharos-validator/src/validation_tool/src/ipmi.py b/tools/pharos-validator/src/validation_tool/src/ipmi.py
deleted file mode 100644
index 44be207a..00000000
--- a/tools/pharos-validator/src/validation_tool/src/ipmi.py
+++ /dev/null
@@ -1,63 +0,0 @@
-##############################################################################
-# Copyright (c) 2015 Todd Gaunt and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-
-import os
-import subprocess
-import logging
-
-def power_nodes(nodes, action):
- """ Attempts to power on all nodes specified in a list, then returns a list
- of the names of all failures. The list will be empty if no failures."""
- failed_nodes = []
- logger = logging.getLogger(__name__)
- if not nodes:
- logger.info("No nodes, is empty list")
- for node in nodes:
- # -I flag must be 'lanplus', 'lan' by itself doesn't work with
- # the most recent idrac/ipmi version
- if action == "on":
- pass
- elif action == "off":
- pass
- else:
- logger.error("Invalid ipmi command")
-
- cmd = ["ipmitool", \
- "-I", "lanplus", \
- "-H ", "'"+node.ipmi_addr+"'", \
- "-U ", "'"+node.ipmi_user+"'", \
- "-P ", "'"+node.ipmi_pass+"'", \
- "power", action]
-
- logger.debug("Running: \"{}\"".format(' '.join(cmd)))
- try:
- with open(os.devnull, 'w') as fn:
- status = subprocess.check_call(" ".join(cmd), \
- stdout=fn, stderr=fn, shell=True)
- except subprocess.CalledProcessError as e:
- status = e.returncode
- logger.error("{} could not be accessed at {} (exit code {})".format(\
- node.name, node.ipmi_addr, status))
- failed_nodes.append(node.name)
- if int(status) == 0:
- logger.info("{} successfully powered {}".format(node.name, action))
-
- return failed_nodes
-
-def status(node, ipaddr, username, passwd):
- # -I flag must be 'lanplus', 'lan' by itself doesn't work with
- # the most recent idrac/ipmi version
- chkcmd = ["ipmitool", \
- "-I", "lanplus", \
- "-H", ipaddr, \
- "-U", username, \
- "-P", passwd, \
- "chassis", "status"]
- print(chkcmd)
- subprocess.Popen(chkcmd)
diff --git a/tools/pharos-validator/src/validation_tool/src/jenkins.py b/tools/pharos-validator/src/validation_tool/src/jenkins.py
deleted file mode 100644
index 443a6157..00000000
--- a/tools/pharos-validator/src/validation_tool/src/jenkins.py
+++ /dev/null
@@ -1,8 +0,0 @@
-##############################################################################
-# Copyright (c) 2015 Todd Gaunt and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
diff --git a/tools/pharos-validator/src/validation_tool/src/node.py b/tools/pharos-validator/src/validation_tool/src/node.py
deleted file mode 100644
index 280abb7f..00000000
--- a/tools/pharos-validator/src/validation_tool/src/node.py
+++ /dev/null
@@ -1,85 +0,0 @@
-##############################################################################
-# Copyright (c) 2015 Todd Gaunt and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-
-import logging
-import socket
-import yaml
-import os
-
-import pharosvalidator.test.probe as probe
-import pharosvalidator.test.evaluate as evaluate
-from pharosvalidator.util import send_msg
-
-def hardware_test():
- """
- hardware_test: Run hardware probing/testing functions
-
- input: None
-
- output: String in YAML format of the tests that were run
- """
- logger = logging.getLogger(__name__)
- logger.info("Beginning hardware test")
-
- # Run test scripts
- results = []
- results.append(testinterpreter("CPU test", evaluate.cpu, probe.cpu()))
- results.append(testinterpreter("Memory test", evaluate.memory, probe.memory()))
- results.append(testinterpreter("Storage test", evaluate.storage, probe.storage()))
-
- # Start generating the yaml file
- yamltext = ""
- for result in results:
- yamltext += yaml.dump(result, default_flow_style=False)
- return yamltext
-
-def network_test(networkfile):
- logger = logging.getLogger(__name__)
- logger.info("Beginning network test")
- logger.info("Ending network test")
- pass
-
-def send_result(host, port, result):
- """
- send_result: Send the final test result to the central test server
-
- input: Host address of target; Port of target; String to send to server
-
- output: None
- """
- logger = logging.getLogger(__name__)
- logger.info("Sending test result")
-
- # Format the results properly
- linecount = 0
- for c in result:
- if c == "\n":
- linecount += 1
-
- result = str(linecount) + "\n" + result
-
- sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
- sock.connect((host, int(port)))
- send_msg(sock, result)
-
-def testinterpreter(name, test, dataset):
- """High level function for test functions within this module to print out
- their results in an ordered function while also writing out logs,
- expects a list of testresults objects"""
-
- # Start the yaml file contents
- data = {name:[]}
-
- # test the dataset
- results = test(dataset)
-
- for result in results:
- data[name].append(result)
-
- return data
diff --git a/tools/pharos-validator/src/validation_tool/src/receiver.py b/tools/pharos-validator/src/validation_tool/src/receiver.py
deleted file mode 100644
index 07d968e7..00000000
--- a/tools/pharos-validator/src/validation_tool/src/receiver.py
+++ /dev/null
@@ -1,46 +0,0 @@
-##############################################################################
-# Copyright (c) 2015 Todd Gaunt and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-
-import socket
-import threading
-import logging
-
-from pharosvalidator.util import read_msg
-
-def start(nodecount, port, q):
- """Start a server to retrieve the files from the nodes. Server will run
- indefinetely until the parent process ends"""
- logging.basicConfig(level=0)
- logger = logging.getLogger(__name__)
-
- address = "" # Empty means act as a server on all available interfaces
-
- logger.info("Bringing up receiver server...")
- sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
- sock.bind((address, port))
- sock.listen(nodecount) # Max connections is the amount of nodes
-
- while True:
- # Receive a descriptor for the client socket, cl stands for client
- (clsock, claddress) = sock.accept()
- logger.info("Received client connection...")
- client_thread = threading.Thread(target=_server_accept_thread, \
- args=(clsock, claddress, q), daemon=True)
- # Start a new thread to read the new client socket connection
- client_thread.start()
-
- socket.close()
- logger.info("Bringing down receiver server...")
-
-def _server_accept_thread(clsock, claddress, q):
- """Read from the socket into the queue, then close the connection"""
- logger = logging.getLogger(__name__)
- q.put(read_msg(clsock))
- logger.info("Retreived message from socket")
- clsock.close()
diff --git a/tools/pharos-validator/src/validation_tool/src/server.py b/tools/pharos-validator/src/validation_tool/src/server.py
deleted file mode 100644
index 91c9a4f2..00000000
--- a/tools/pharos-validator/src/validation_tool/src/server.py
+++ /dev/null
@@ -1,111 +0,0 @@
-##############################################################################
-# Copyright (c) 2015 Todd Gaunt and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-
-import logging
-import os
-import subprocess
-import time
-
-# Constant definitions
-from pharosvalidator.const import *
-
-def ssh_thread(remoteaddr, returnaddr, port, passes):
- """
- ssh_thread: the main loop of a thread the server spawns to connect to a node
- over ssh.
-
- input: remoteaddr, returnaddr, and port to forward to run_remote_test;
- passes to specify how many attempts should be made
- """
- for i in range(passes):
- status = run_remote_test(remoteaddr, returnaddr, port)
- time.sleep(1)
-
-def run_remote_test(remoteaddr, returnaddr, port):
- """
- run_remote_tests: ssh to a give remote address, and run a test program
- on the remote machine specifying the address and port of where the results
- should be sent (usually back to the machine this program was run on)
-
- input: ip address of the ssh target; Adress of the test results target;
- Port of the test results target
-
- output: 0 if the test ran over ssh perfectly, non-zero if the test faild
- """
- #TODO add way to keep attempting to ssh until service is up and running aka ping part 2
- logger = logging.getLogger(__name__)
-
- cmd = ["ssh", "root@"+remoteaddr, HARDWARE_TEST, \
- "-p", port, "-H", returnaddr, "hardware"]
-
- logger.debug("Running: {}".format(" ".join(cmd)))
- try:
- with open(os.devnull, 'w') as fn:
- status = subprocess.check_call(" ".join(cmd), stdout=fn, stderr=fn, shell=True)
- except subprocess.CalledProcessError as e:
- status = e.returncode
- logger.error("ssh attempt to '{}' failed".format(remoteaddr))
-
- return status
-
-def ping_network(ip_range_list, ipcnt, passes):
- """
- ping_network: Ping a range of ips until the amount of successful pings
- reaches a number n
-
- input: List of ip addresses to be pinged; Counter for threshold
- of successful pings; Number of iterations to pass
-
- output: List of ip addresses that were found to be up
- """
- logger = logging.getLogger("pharosvalidator")
- assert isinstance(ip_range_list, list)
- ips_found = 0
- alive_ips = []
- for t in range(passes):
- for addr in list(ip_range_list):
- cmd = [ \
- "ping", \
- "-c", "1", \
- "-w", "1", \
- addr]
- logger.debug("Running: \"{}\"".format(' '.join(cmd)))
- try:
- with open(os.devnull, 'w') as fn:
- status = subprocess.check_call(" ".join(cmd), \
- stdout=fn, stderr=fn, shell=True)
- except subprocess.CalledProcessError as e:
- status = e.returncode
- logger.error("Ping at '{}' failed".format(addr))
- # If the ip address was pinged successfully, then remove it from future attempts
- if status == 0:
- ips_found += 1
- logger.info("{} is up, {} total nodes up".format(addr, ips_found))
-
- # Remove the ip that was successfully pinged from being tested again
- ip_range_list.remove(addr)
-
- # Add the successfully pinged node to a list of successful pings
- alive_ips.append(addr)
-
- if ips_found >= ipcnt:
- break
-
- if ips_found >= ipcnt:
- break
-
- return alive_ips
-
-def bring_up_admin_ip(ipaddr):
- """
- Assign the machine this test is running on an address according to the
- configuration file
- """
- cmd = [""]
- subprocess.Popen(cmd)
diff --git a/tools/pharos-validator/src/validation_tool/src/test/__init__.py b/tools/pharos-validator/src/validation_tool/src/test/__init__.py
deleted file mode 100644
index e69de29b..00000000
--- a/tools/pharos-validator/src/validation_tool/src/test/__init__.py
+++ /dev/null
diff --git a/tools/pharos-validator/src/validation_tool/src/test/evaluate.py b/tools/pharos-validator/src/validation_tool/src/test/evaluate.py
deleted file mode 100644
index 81a837d6..00000000
--- a/tools/pharos-validator/src/validation_tool/src/test/evaluate.py
+++ /dev/null
@@ -1,159 +0,0 @@
-##############################################################################
-# Copyright (c) 2015 Todd Gaunt and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-
-import logging
-
-from pharosvalidator.util import approxsize
-
-# Constant macros
-from pharosvalidator.const import *
-
-def cpu(cpudata):
- """Compares system cpu against the pharos specification"""
- results = []
-
- # Architecture evaluation, a value of 63 or greater indicates at least a 64-bit OS
- if cpudata["bitsize"] >= 63:
- val = True
- else:
- val = False
- result = {"architecture": {
- "pass": val,
- "description": str(cpudata["architecture"])}}
- results.append(result)
-
- # Core evaluation
- if cpudata["cores"] < MIN_CORECOUNT:
- val = False
- else:
- val = True
- desc = "Have {0}, Need at least {1}".format(cpudata["cores"], MIN_CORECOUNT)
- result = {"corecount": {
- "pass": val,
- "description": desc}}
- results.append(result)
-
- # Speed evaluation
- i = 0
- for cpufreq in cpudata["frequency"]:
- # Cpufrequency was not read if this is the result
- if cpufreq == -1:
- desc = "(Cpu freuency could not be read)"
- else:
- if approxsize(cpufreq, MIN_CPUFREQ, 5) or cpufreq > MIN_CPUFREQ:
- val = True
- else:
- val = False
- desc = "Have {:.2f}Mhz, Need at least ~= {:.2f}Mhz".format( \
- cpufreq, MIN_CPUFREQ)
- result = {"cpu"+str(i): {
- "pass": val,
- "description": desc}}
- results.append(result)
- i += 1
-
- return results
-
-def memory(memdata):
- """Compares system meminfo object against the pharos specification"""
- logger = logging.getLogger(__name__)
-
- results = []
-
- logger.debug("required memory: {}, detected memory: {}".format(\
- MIN_MEMSIZE, memdata["size"]))
- # Capacity evaluation
- if approxsize(memdata["size"], MIN_MEMSIZE, 5) or memdata["size"] > MIN_MEMSIZE:
- val = True
- else:
- val = False
-
- desc = "Have {:.2f}G, Need at least ~= {:.2f}G".format( \
- memdata["size"], MIN_MEMSIZE/1000000)
-
- result = {"memory capacity": {
- "pass": val,
- "description": desc}}
- results.append(result)
-
- return results
-
-def storage(diskdata):
- """Compares system storage against the Pharos specification"""
- def sizecmp(a, b, unit):
- if approxsize(a, b, 10) or a > b:
- val = True
- else:
- val = False
- desc = "capacity is {:.2f}{}, Need at least ~= {:.2f}{}".format(a, \
- unit, b, unit)
- return (val,desc)
-
- results = []
- # Disk size evaluation (also counts the disks)
- diskcount = {"ssd":0, "non-ssd":0}
- for disk in diskdata["names"]:
- if diskdata["rotational"][disk]:
- disktype = "non-ssd"
- diskcount["non-ssd"] += 1
- else:
- disktype = "ssd"
- diskcount["ssd"] += 1
- val, desc = sizecmp(diskdata["sizes"][disk], MIN_SSDSIZE, 'G')
- data = diskdata["sizes"][disk]
- result = {disk: {
- "pass": val,
- "description": "Disk type: disktype; " + desc}}
- results.append(result)
-
- # Disk number evaluation
- if sum(diskcount.values()) >= 3 and diskcount["ssd"] >= 1:
- val = True
- else:
- val = False
- desc = "Have {0} drives, Need at least {1} drives and {3} ssds".format( \
- sum(diskcount.values()), MIN_DISKCOUNT, \
- diskcount["ssd"], MIN_SSDCOUNT)
-
- data = diskcount
- result = {"diskcount": {
- "pass": val,
- "description": desc}}
- results.append(result)
- return results
-
-"""
-def netinterfaces(netfaces):
- results = []
- for netface in netfaces:
- if netface.status <= 0:
- val = False
- state = "down"
- else:
- val = True
- state = "up"
- try:
- MACaddr = netface.MAC[0]["addr"]
- except IndexError:
- MACaddr = "no MAC"
- if len(netface.addrs) > 0:
- addrs = ""
- for addr in netface.addrs:
- if len(addrs) > 0:
- addrs += ", "
- addrs += addr['addr']
- addrs = "addresses: " + addrs
- else:
- addrs = "no address"
- desc = "({0} is {1} with {2})".format(netface.name, state, addrs)
- data = MACaddr
- results.append(gen_yamltext(netface.name, val, desc, data))
- return results
- """
-
diff --git a/tools/pharos-validator/src/validation_tool/src/test/probe.py b/tools/pharos-validator/src/validation_tool/src/test/probe.py
deleted file mode 100644
index daeccbc0..00000000
--- a/tools/pharos-validator/src/validation_tool/src/test/probe.py
+++ /dev/null
@@ -1,137 +0,0 @@
-##############################################################################
-# Copyright (c) 2015 Todd Gaunt and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-
-import os
-import re
-import sys
-import platform
-import subprocess
-import netifaces
-import logging
-
-from pharosvalidator.util import cd # Contains the pharos specification values
-
-# Static vars
-mempath="/proc/meminfo"
-cpuinfopath="/proc/cpuinfo"
-cpupath="/sys/devices/system/cpu/"
-diskpath="/sys/block/"
-
-def readproc(path):
- """Reads and parses /proc from [path] argument files
- and returns a hashmap of values"""
- logger = logging.getLogger(__name__)
- # Fail if path does not exist
- try:
- hashmap = {}
- with open(path) as fd:
- logger.debug("Reading {}".format(path))
- for line in fd:
- data = line.split(":")
- if len(data) == 2:
- # Strip trailing characters from hashmap names and entries
- # for less junk
- hashmap[data[0].strip()] = data[1].strip()
- return hashmap
- except IOError:
- logger.error("Path to file does not exist: {}".format(path))
- quit(1)
-
-def cpu():
- logger = logging.getLogger(__name__)
- cpudata = {}
- cpuinfo = readproc(cpuinfopath)
- cpudata["bitsize"] = sys.maxsize.bit_length()
- cpudata["architecture"] = platform.architecture()[0]
- cpudata["cores"] = int(cpuinfo["cpu cores"])
- cpudata["frequency"] = []
- for i in range(cpudata["cores"]):
- freqpath = "{0}/cpu{1}/cpufreq/cpuinfo_max_freq".format(cpupath, \
- str(i))
- try:
- with open(freqpath) as fd:
- logger.debug("Opening {}".format(freqpath))
- cpufreq = (float(fd.read(-1)))/1000
- except IOError:
- # Less accurate way of getting cpu information as
- # this frequency may change during operation,
- # if dynamic frequency scaling is enabled,
- # however it is better than nothing.
- logger.error("Path to file does not exist: {}".format(freqpath))
- logger.error("Reading cpu frequency from {} instead".format(freqpath))
- cpufreq = float(cpuinfo["cpu MHz"])
-
- if cpufreq < 0:
- cpudata["frequency"].append(0)
- else:
- cpudata["frequency"].append(cpufreq)
-
- return cpudata
-
-def memory():
- logger = logging.getLogger(__name__)
- meminfo=readproc(mempath)
- # Create the memory object to store memory information
- memdata = {}
- memdata["size"] = (int(meminfo["MemTotal"].split(' ')[0]))/1000000
- return memdata
-
-def storage():
- """Gather's disk information"""
- logger = logging.getLogger(__name__)
- diskdata = {"names":[],"rotational":{},"sizes":{}}
- for disk in os.listdir(diskpath):
- #sdX is the naming schema for IDE/SATA interfaces in Linux
- if re.match(r"sd\w",disk):
- logger.debug("Found disk {}".format(disk))
- diskdata["names"].append(disk)
- sizepath = "{0}/{1}/size".format(diskpath, disk)
- try:
- with open(sizepath) as fd:
- size = int(fd.read(-1))
- except IOError:
- size = -1
- # If the read was successful
- if size != -1:
- # Converts the value to Gb
- diskdata["sizes"][disk] = (size * 512)/1000000000
-
- rotationalpath = "{0}/{1}/queue/rotational".format(diskpath, disk)
- try:
- with open(rotationalpath) as fd:
- rotational = int(fd.read(-1))
- except IOError:
- rotational = -1
- if rotational == 0:
- diskdata["rotational"][disk] = False
- else:
- diskdata["rotational"][disk] = True
-
- return diskdata
-
-def netinterfaces(nodeinfo):
- """Uses netifaces to probe the system for network interface information"""
- netfaces = []
- for interface in netifaces.interfaces():
- netface = netdata()
- netface.name = interface
- tmp = netifaces.ifaddresses(interface)
- # If the interface is up and has at least one ip address
- if netifaces.AF_INET in tmp:
- netface.status = 1 # 1 stands for "up"
- netface.addrs = tmp[netifaces.AF_INET]
- # If the interface is down
- else:
- netface.status = 0 # 0 stands for "down"
- # The file /proc/net/arp may also be used to read MAC addresses
- if netifaces.AF_LINK in tmp:
- netface.MAC = tmp[netifaces.AF_LINK]
- netfaces.append(netface)
-
- return netfaces
diff --git a/tools/pharos-validator/src/validation_tool/src/util.py b/tools/pharos-validator/src/validation_tool/src/util.py
deleted file mode 100644
index 67a75a56..00000000
--- a/tools/pharos-validator/src/validation_tool/src/util.py
+++ /dev/null
@@ -1,107 +0,0 @@
-##############################################################################
-# Copyright (c) 2015 Todd Gaunt and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-
-import ipaddress
-import logging
-import os
-
-class cd:
- """Context manager for changing the current working directory"""
- def __init__(self, new_path):
- self.new_path = os.path.expanduser(new_path)
-
- def __enter__(self):
- self.saved_path = os.getcwd()
- os.chdir(self.new_path)
-
- def __exit__(self, etype, value, traceback):
- os.chdir(self.saved_path)
-
-def approxsize(x, y, deviation):
- """Approximately compares 'x' to 'y' with in % of 'deviation'"""
- logger = logging.getLogger(__name__)
-
- dev = (y * .01 * deviation)
-
- if x >= round(y - dev, 0) and x <= round(y + dev, 0):
- logger.debug("{} is approximately {}".format(x, y))
- return True
- else:
- logger.debug("{} is not approximately {}".format(x, y))
- return False
-
-def read_line(sock):
- """Reads from a socket until a \n character or 512 bytes have been read,
- whichever comes first"""
- c = ""
- recvline = ""
- reads = 0
- while (c != "\n" and reads < 512):
- # Decode bytes to str, sockets output bytes which aren't pretty
- c = sock.recv(1).decode("utf-8")
- #print("char: '" + c + "'") # Debugging code
- recvline += c
- reads += 1
- return recvline
-
-def read_msg(sock):
- """Reads a message prefixed with a number and a newline char, eg. "20\n"
- then reads x lines, where x is equal to the number in the first line."""
- # Read the socket once initially for the line count
- buf = read_line(sock)
- buf = buf[:-1] # Cut off the '\n' character
- length = int(buf)
-
- lines = []
- for i in range(length):
- lines.append(read_line(sock))
- return "".join(lines)
-
-def send_msg(sock, msg):
- """Sends a message to a socket"""
- # Encode python str to bytes beforehand, sockets only deal in bytes
- msg = bytes(msg, "utf-8")
- totalsent = 0
- while totalsent < len(msg):
- sent = sock.send(msg[totalsent:])
- if sent == 0:
- return -1
- totalsent = totalsent + sent
- return totalsent
-
-def get_addr(interface):
- """Get the address of the machine that this program is running on"""
- return netifaces.ifaddresses(interface)[netifaces.AF_INET][0]["addr"]
-
-def gen_ip_range(cidr, excluded, minimum, maximum ):
- """Takes a network cidr number, and then a min max value, and creates a list
- of ip addresses avalable on [a,b]. Also removes "excluded" addresses
- from the range"""
- logger = logging.getLogger(__name__)
- # Generate a list of available ip addresses for the dhcp server
- ip_range = list(map(lambda x: x.exploded, ipaddress.ip_network(cidr).hosts()))
-
- for addr in excluded:
- # Remove the value from the list, if it isn't in the list then whatever
- try:
- ip_range.remove(addr)
- except ValueError:
- logger.debug("{} not in ip_range, cannot remove".format(addr))
-
- # Remove all values before the minimum usable value
- for i in range(len(ip_range)):
- if ip_range[i] == minimum:
- ip_range = ip_range[i::]
- break
- # Remove all values after the maximum usable value
- for i in range(len(ip_range)):
- if ip_range[i] == maximum:
- ip_range = ip_range[0:i+1]
- break
- return ip_range
diff --git a/tools/pharos-validator/src/validation_tool/tests/test_node.py b/tools/pharos-validator/src/validation_tool/tests/test_node.py
deleted file mode 100644
index e69de29b..00000000
--- a/tools/pharos-validator/src/validation_tool/tests/test_node.py
+++ /dev/null
diff --git a/tools/pharos-validator/src/validation_tool/tests/test_probe.py b/tools/pharos-validator/src/validation_tool/tests/test_probe.py
deleted file mode 100644
index e69de29b..00000000
--- a/tools/pharos-validator/src/validation_tool/tests/test_probe.py
+++ /dev/null
diff --git a/tools/pharos-validator/src/validation_tool/tests/test_server.py b/tools/pharos-validator/src/validation_tool/tests/test_server.py
deleted file mode 100644
index 35388b6d..00000000
--- a/tools/pharos-validator/src/validation_tool/tests/test_server.py
+++ /dev/null
@@ -1,9 +0,0 @@
-#!/usr/bin/env python3
-
-def test_ssh_thread():
- """Test to see if ssh connections are attempted the proper amount of times"""
- from pharosvalidator.server import ssh_thread
- ssh_thread("127.0.0.1", "0.0.0.0", "1", 10)
-
-
-
diff --git a/tools/pharos-validator/src/validation_tool/tests/test_util.py b/tools/pharos-validator/src/validation_tool/tests/test_util.py
deleted file mode 100644
index 9ce939bd..00000000
--- a/tools/pharos-validator/src/validation_tool/tests/test_util.py
+++ /dev/null
@@ -1,12 +0,0 @@
-#!/usr/bin/env python3
-
-import random
-
-def test_approxsize():
- from pharosvalidator.util import approxsize
- assert approxsize(100, 95, 5) == True
- assert approxsize(100, 105, 5) == True
-
- assert approxsize(100, 94, 5) == False
- assert approxsize(100, 106, 5) == False
-