aboutsummaryrefslogtreecommitdiffstats
path: root/tools
diff options
context:
space:
mode:
Diffstat (limited to 'tools')
-rw-r--r--tools/opnfvdashboard/opnfvdashboard.py131
-rw-r--r--tools/pkt_gen/trex/__init__.py13
-rw-r--r--tools/pkt_gen/trex/trex.py338
-rw-r--r--tools/systeminfo.py4
4 files changed, 422 insertions, 64 deletions
diff --git a/tools/opnfvdashboard/opnfvdashboard.py b/tools/opnfvdashboard/opnfvdashboard.py
index c24b9f8c..a77a9c3a 100644
--- a/tools/opnfvdashboard/opnfvdashboard.py
+++ b/tools/opnfvdashboard/opnfvdashboard.py
@@ -17,10 +17,14 @@ vsperf2dashboard
import os
import csv
+import copy
import logging
+from datetime import datetime as dt
import requests
-def results2opnfv_dashboard(results_path, int_data):
+_DETAILS = {"64": '', "128": '', "512": '', "1024": '', "1518": ''}
+
+def results2opnfv_dashboard(tc_names, results_path, int_data):
"""
the method open the csv file with results and calls json encoder
"""
@@ -31,22 +35,33 @@ def results2opnfv_dashboard(results_path, int_data):
resfile = results_path + '/' + test
with open(resfile, 'r') as in_file:
reader = csv.DictReader(in_file)
- _push_results(reader, int_data)
+ tc_data = _prepare_results(reader, int_data)
+ _push_results(tc_data)
+ tc_names.remove(tc_data['id'])
+
+ # report TCs without results as FAIL
+ if tc_names:
+ tc_fail = copy.deepcopy(int_data)
+ tc_fail['start_time'] = dt.now().strftime('%Y-%m-%d %H:%M:%S')
+ tc_fail['stop_time'] = tc_fail['start_time']
+ tc_fail['criteria'] = 'FAIL'
+ tc_fail['version'] = 'N/A'
+ tc_fail['details'] = copy.deepcopy(_DETAILS)
+ for tc_name in tc_names:
+ tc_fail['dashboard_id'] = "{}_{}".format(tc_name, tc_fail['vswitch'])
+ _push_results(tc_fail)
-def _push_results(reader, int_data):
+def _prepare_results(reader, int_data):
"""
- the method encodes results and sends them into opnfv dashboard
+ the method prepares dashboard details for passed testcases
"""
- db_url = int_data['db_url']
- url = db_url + "/results"
- casename = ""
- version_ovs = ""
+ version_vswitch = ""
version_dpdk = ""
- version = ""
allowed_pkt = ["64", "128", "512", "1024", "1518"]
- details = {"64": '', "128": '', "512": '', "1024": '', "1518": ''}
- test_start = None
- test_stop = None
+ vswitch = None
+ details = copy.deepcopy(_DETAILS)
+ tc_data = copy.deepcopy(int_data)
+ tc_data['criteria'] = 'PASS'
for row_reader in reader:
if allowed_pkt.count(row_reader['packet_size']) == 0:
@@ -55,77 +70,65 @@ def _push_results(reader, int_data):
# test execution time includes all frame sizes, so start & stop time
# is the same (repeated) for every framesize in CSV file
- if test_start is None:
- test_start = row_reader['start_time']
- test_stop = row_reader['stop_time']
+ if not 'test_start' in tc_data:
+ tc_data['start_time'] = row_reader['start_time']
+ tc_data['stop_time'] = row_reader['stop_time']
+ tc_data['id'] = row_reader['id']
+ # CI job executes/reports TCs per vswitch type
+ vswitch = row_reader['vswitch']
- casename = _generate_test_name(row_reader['id'], int_data)
+ tc_data['dashboard_id'] = "{}_{}".format(row_reader['id'], row_reader['vswitch'].lower())
if "back2back" in row_reader['id']:
+ # 0 B2B frames is quite common, so we can't mark such TC as FAIL
details[row_reader['packet_size']] = row_reader['b2b_frames']
else:
details[row_reader['packet_size']] = row_reader['throughput_rx_fps']
+ # 0 PPS is definitelly a failure
+ if float(row_reader['throughput_rx_fps']) == 0:
+ tc_data['criteria'] = 'FAIL'
# Create version field
- with open(int_data['pkg_list'], 'r') as pkg_file:
+ with open(tc_data['pkg_list'], 'r') as pkg_file:
for line in pkg_file:
- if "OVS_TAG" in line:
- version_ovs = line.replace(' ', '')
- version_ovs = version_ovs.replace('OVS_TAG?=', '')
+ if "OVS_TAG" in line and vswitch.startswith('Ovs'):
+ version_vswitch = line.replace(' ', '')
+ version_vswitch = "OVS " + version_vswitch.replace('OVS_TAG?=', '')
+ if "VPP_TAG" in line and vswitch.startswith('Vpp'):
+ version_vswitch = line.replace(' ', '')
+ version_vswitch = "VPP " + version_vswitch.replace('VPP_TAG?=', '')
if "DPDK_TAG" in line:
- if int_data['vanilla'] is False:
+ # DPDK_TAG is not used by VPP, it downloads its onw DPDK version
+ if vswitch == "OvsDpdkVhost":
version_dpdk = line.replace(' ', '')
- version_dpdk = version_dpdk.replace('DPDK_TAG?=', '')
- else:
- version_dpdk = "not used"
- version = "OVS " + version_ovs.replace('\n', '') + " DPDK " + version_dpdk.replace('\n', '')
+ version_dpdk = " DPDK {}".format(
+ version_dpdk.replace('DPDK_TAG?=', ''))
+
+ tc_data['details'] = details
+ tc_data['version'] = version_vswitch.replace('\n', '') + version_dpdk.replace('\n', '')
+
+ return tc_data
+
+def _push_results(int_data):
+ """
+ the method sends testcase details into dashboard database
+ """
+ url = int_data['db_url'] + "/results"
# Build body
body = {"project_name": "vsperf",
"scenario": "vsperf",
- "start_date": test_start,
- "stop_date": test_stop,
- "case_name": casename,
+ "start_date": int_data['start_time'],
+ "stop_date": int_data['stop_time'],
+ "case_name": int_data['dashboard_id'],
"pod_name": int_data['pod'],
"installer": int_data['installer'],
- "version": version,
+ "version": int_data['version'],
"build_tag": int_data['build_tag'],
"criteria": int_data['criteria'],
- "details": details}
+ "details": int_data['details']}
my_data = requests.post(url, json=body)
- logging.info("Results for %s sent to opnfv, http response: %s", casename, my_data)
- logging.debug("opnfv url: %s", db_url)
+ logging.info("Results for %s sent to opnfv, http response: %s", int_data['dashboard_id'], my_data)
+ logging.debug("opnfv url: %s", int_data['db_url'])
logging.debug("the body sent to opnfv")
logging.debug(body)
-
-def _generate_test_name(testcase, int_data):
- """
- the method generates testcase name for releng
- """
- vanilla = int_data['vanilla']
- res_name = ""
-
- names = {'phy2phy_tput': ["tput_ovsdpdk", "tput_ovs"],
- 'back2back': ["b2b_ovsdpdk", "b2b_ovs"],
- 'phy2phy_tput_mod_vlan': ["tput_mod_vlan_ovsdpdk", "tput_mod_vlan_ovs"],
- 'phy2phy_cont': ["cont_ovsdpdk", "cont_ovs"],
- 'pvp_cont': ["pvp_cont_ovsdpdkuser", "pvp_cont_ovsvirtio"],
- 'pvvp_cont': ["pvvp_cont_ovsdpdkuser", "pvvp_cont_ovsvirtio"],
- 'phy2phy_scalability': ["scalability_ovsdpdk", "scalability_ovs"],
- 'pvp_tput': ["pvp_tput_ovsdpdkuser", "pvp_tput_ovsvirtio"],
- 'pvp_back2back': ["pvp_b2b_ovsdpdkuser", "pvp_b2b_ovsvirtio"],
- 'pvvp_tput': ["pvvp_tput_ovsdpdkuser", "pvvp_tput_ovsvirtio"],
- 'pvvp_back2back': ["pvvp_b2b_ovsdpdkuser", "pvvp_b2b_ovsvirtio"],
- 'phy2phy_cpu_load': ["cpu_load_ovsdpdk", "cpu_load_ovs"],
- 'phy2phy_mem_load': ["mem_load_ovsdpdk", "mem_load_ovs"]}
-
- for name, name_list in names.items():
- if name != testcase:
- continue
- if vanilla is True:
- res_name = name_list[1]
- else:
- res_name = name_list[0]
- break
-
- return res_name
diff --git a/tools/pkt_gen/trex/__init__.py b/tools/pkt_gen/trex/__init__.py
new file mode 100644
index 00000000..455a1ef0
--- /dev/null
+++ b/tools/pkt_gen/trex/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2017 Martin Goldammer OPNFV.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/tools/pkt_gen/trex/trex.py b/tools/pkt_gen/trex/trex.py
new file mode 100644
index 00000000..ae262306
--- /dev/null
+++ b/tools/pkt_gen/trex/trex.py
@@ -0,0 +1,338 @@
+# Copyright 2017 Martin Goldammer, OPNFV, Red Hat Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+"""
+Trex Traffic Generator Model
+"""
+# pylint: disable=undefined-variable
+import logging
+import subprocess
+import sys
+from collections import OrderedDict
+# pylint: disable=unused-import
+import zmq
+from conf import settings
+from conf import merge_spec
+from core.results.results_constants import ResultsConstants
+from tools.pkt_gen.trafficgen.trafficgen import ITrafficGenerator
+# pylint: disable=wrong-import-position, import-error
+sys.path.append(settings.getValue('PATHS')['trafficgen']['trex']['src']['path'])
+from trex_stl_lib.api import *
+
+class Trex(ITrafficGenerator):
+ """Trex Traffic generator wrapper."""
+ _logger = logging.getLogger(__name__)
+
+ def __init__(self):
+ """Trex class constructor."""
+ super().__init__()
+ self._logger.info("In trex __init__ method")
+ self._params = {}
+ self._trex_host_ip_addr = (
+ settings.getValue('TRAFFICGEN_TREX_HOST_IP_ADDR'))
+ self._trex_base_dir = (
+ settings.getValue('TRAFFICGEN_TREX_BASE_DIR'))
+ self._trex_user = settings.getValue('TRAFFICGEN_TREX_USER')
+ self._stlclient = None
+
+ def connect(self):
+ '''Connect to Trex traffic generator
+
+ Verify that Trex is on the system indicated by
+ the configuration file
+ '''
+ self._stlclient = STLClient()
+ self._logger.info("TREX: In Trex connect method...")
+ if self._trex_host_ip_addr:
+ cmd_ping = "ping -c1 " + self._trex_host_ip_addr
+ else:
+ raise RuntimeError('TREX: Trex host not defined')
+
+ ping = subprocess.Popen(cmd_ping, shell=True, stderr=subprocess.PIPE)
+ output, error = ping.communicate()
+
+ if ping.returncode:
+ self._logger.error(error)
+ self._logger.error(output)
+ raise RuntimeError('TREX: Cannot ping Trex host at ' + \
+ self._trex_host_ip_addr)
+
+ connect_trex = "ssh " + self._trex_user + \
+ "@" + self._trex_host_ip_addr
+
+ cmd_find_trex = connect_trex + " ls " + \
+ self._trex_base_dir + "t-rex-64"
+
+
+ find_trex = subprocess.Popen(cmd_find_trex,
+ shell=True,
+ stderr=subprocess.PIPE)
+ output, error = find_trex.communicate()
+
+ if find_trex.returncode:
+ self._logger.error(error)
+ self._logger.error(output)
+ raise RuntimeError(
+ 'TREX: Cannot locate Trex program at %s within %s' \
+ % (self._trex_host_ip_addr, self._trex_base_dir))
+
+ self._stlclient = STLClient(username=self._trex_user, server=self._trex_host_ip_addr,
+ verbose_level=0)
+ self._stlclient.connect()
+ self._logger.info("TREX: Trex host successfully found...")
+
+ def disconnect(self):
+ """Disconnect from the traffic generator.
+
+ As with :func:`connect`, this function is optional.
+
+ Where implemented, this function should raise an exception on
+ failure.
+
+ :returns: None
+ """
+ self._logger.info("TREX: In trex disconnect method")
+ self._stlclient.disconnect(stop_traffic=True, release_ports=True)
+
+ @staticmethod
+ def create_packets(traffic, ports_info):
+ """Create base packet according to traffic specification.
+ If traffic haven't specified srcmac and dstmac fields
+ packet will be create with mac address of trex server.
+ """
+ mac_add = [li['hw_mac'] for li in ports_info]
+
+ if traffic and traffic['l2']['framesize'] > 0:
+ if traffic['l2']['dstmac'] == '00:00:00:00:00:00' and \
+ traffic['l2']['srcmac'] == '00:00:00:00:00:00':
+ base_pkt_a = Ether(src=mac_add[0], dst=mac_add[1])/ \
+ IP(proto=traffic['l3']['proto'], src=traffic['l3']['srcip'],
+ dst=traffic['l3']['dstip'])/ \
+ UDP(dport=traffic['l4']['dstport'], sport=traffic['l4']['srcport'])
+ base_pkt_b = Ether(src=mac_add[1], dst=mac_add[0])/ \
+ IP(proto=traffic['l3']['proto'], src=traffic['l3']['dstip'],
+ dst=traffic['l3']['srcip'])/ \
+ UDP(dport=traffic['l4']['srcport'], sport=traffic['l4']['dstport'])
+ else:
+ base_pkt_a = Ether(src=traffic['l2']['srcmac'], dst=traffic['l2']['dstmac'])/ \
+ IP(proto=traffic['l3']['proto'], src=traffic['l3']['dstip'],
+ dst=traffic['l3']['srcip'])/ \
+ UDP(dport=traffic['l4']['dstport'], sport=traffic['l4']['srcport'])
+
+ base_pkt_b = Ether(src=traffic['l2']['dstmac'], dst=traffic['l2']['srcmac'])/ \
+ IP(proto=traffic['l3']['proto'], src=traffic['l3']['dstip'],
+ dst=traffic['l3']['srcip'])/ \
+ UDP(dport=traffic['l4']['srcport'], sport=traffic['l4']['dstport'])
+
+ return (base_pkt_a, base_pkt_b)
+
+ @staticmethod
+ def create_streams(base_pkt_a, base_pkt_b, traffic):
+ """Add the base packet to the streams. Erase FCS and add payload
+ according to traffic specification
+ """
+ stream_1_lat = None
+ stream_2_lat = None
+ frame_size = int(traffic['l2']['framesize'])
+ fsize_no_fcs = frame_size - 4
+ payload_a = max(0, fsize_no_fcs - len(base_pkt_a)) * 'x'
+ payload_b = max(0, fsize_no_fcs - len(base_pkt_b)) * 'x'
+ pkt_a = STLPktBuilder(pkt=base_pkt_a/payload_a)
+ pkt_b = STLPktBuilder(pkt=base_pkt_b/payload_b)
+ stream_1 = STLStream(packet=pkt_a,
+ name='stream_1',
+ mode=STLTXCont(percentage=traffic['frame_rate']))
+ stream_2 = STLStream(packet=pkt_b,
+ name='stream_2',
+ mode=STLTXCont(percentage=traffic['frame_rate']))
+ lat_pps = settings.getValue('TRAFFICGEN_TREX_LATENCY_PPS')
+ if lat_pps > 0:
+ stream_1_lat = STLStream(packet=pkt_a,
+ flow_stats=STLFlowLatencyStats(pg_id=0),
+ name='stream_1_lat',
+ mode=STLTXCont(pps=lat_pps))
+ stream_2_lat = STLStream(packet=pkt_b,
+ flow_stats=STLFlowLatencyStats(pg_id=1),
+ name='stream_2_lat',
+ mode=STLTXCont(pps=lat_pps))
+
+ return (stream_1, stream_2, stream_1_lat, stream_2_lat)
+
+ def generate_traffic(self, traffic, duration):
+ """The method that generate a stream
+ """
+ my_ports = [0, 1]
+ self._stlclient.reset(my_ports)
+ ports_info = self._stlclient.get_port_info(my_ports)
+ packet_1, packet_2 = Trex.create_packets(traffic, ports_info)
+ stream_1, stream_2, stream_1_lat, stream_2_lat = Trex.create_streams(packet_1, packet_2, traffic)
+ self._stlclient.add_streams(stream_1, ports=[0])
+ self._stlclient.add_streams(stream_2, ports=[1])
+
+ if stream_1_lat is not None:
+ self._stlclient.add_streams(stream_1_lat, ports=[0])
+ self._stlclient.add_streams(stream_2_lat, ports=[1])
+
+ self._stlclient.clear_stats()
+ self._stlclient.start(ports=[0, 1], force=True, duration=duration)
+ self._stlclient.wait_on_traffic(ports=[0, 1])
+ stats = self._stlclient.get_stats(sync_now=True)
+ return stats
+
+ @staticmethod
+ def calculate_results(stats):
+ """Calculate results from Trex statistic
+ """
+ result = OrderedDict()
+ result[ResultsConstants.TX_RATE_FPS] = (
+ '{:.3f}'.format(
+ float(stats["total"]["tx_pps"])))
+
+ result[ResultsConstants.THROUGHPUT_RX_FPS] = (
+ '{:.3f}'.format(
+ float(stats["total"]["rx_pps"])))
+
+ result[ResultsConstants.TX_RATE_MBPS] = (
+ '{:.3f}'.format(
+ float(stats["total"]["tx_bps"] / 1000000)))
+ result[ResultsConstants.THROUGHPUT_RX_MBPS] = (
+ '{:.3f}'.format(
+ float(stats["total"]["rx_bps"] / 1000000)))
+
+ result[ResultsConstants.TX_RATE_PERCENT] = 'Unknown'
+
+ result[ResultsConstants.THROUGHPUT_RX_PERCENT] = 'Unknown'
+
+ result[ResultsConstants.FRAME_LOSS_PERCENT] = (
+ '{:.3f}'.format(
+ float((stats["total"]["opackets"] - stats["total"]["ipackets"]) * 100 /
+ stats["total"]["opackets"])))
+
+ if settings.getValue('TRAFFICGEN_TREX_LATENCY_PPS') > 0:
+ result[ResultsConstants.MIN_LATENCY_NS] = (
+ '{:.3f}'.format(
+ (float(min(stats["latency"][0]["latency"]["total_min"],
+ stats["latency"][1]["latency"]["total_min"])))))
+
+ result[ResultsConstants.MAX_LATENCY_NS] = (
+ '{:.3f}'.format(
+ (float(max(stats["latency"][0]["latency"]["total_max"],
+ stats["latency"][1]["latency"]["total_max"])))))
+
+ result[ResultsConstants.AVG_LATENCY_NS] = (
+ '{:.3f}'.format(
+ float((stats["latency"][0]["latency"]["average"]+
+ stats["latency"][1]["latency"]["average"])/2)))
+
+ else:
+ result[ResultsConstants.MIN_LATENCY_NS] = 'Unknown'
+ result[ResultsConstants.MAX_LATENCY_NS] = 'Unknown'
+ result[ResultsConstants.AVG_LATENCY_NS] = 'Unknown'
+ return result
+
+ def send_cont_traffic(self, traffic=None, duration=30):
+ """See ITrafficGenerator for description
+ """
+ self._logger.info("In Trex send_cont_traffic method")
+ self._params.clear()
+
+ self._params['traffic'] = self.traffic_defaults.copy()
+ if traffic:
+ self._params['traffic'] = merge_spec(
+ self._params['traffic'], traffic)
+
+ stats = self.generate_traffic(traffic, duration)
+
+ return self.calculate_results(stats)
+
+ def start_cont_traffic(self, traffic=None, duration=30):
+ raise NotImplementedError(
+ 'Trex start cont traffic not implemented')
+
+ def stop_cont_traffic(self):
+ """See ITrafficGenerator for description
+ """
+ raise NotImplementedError(
+ 'Trex stop_cont_traffic method not implemented')
+
+ def send_rfc2544_throughput(self, traffic=None, duration=60,
+ lossrate=0.0, tests=10):
+ """See ITrafficGenerator for description
+ """
+ self._logger.info("In Trex send_rfc2544_throughput method")
+ self._params.clear()
+ test_lossrate = 0
+ left = 0
+ num_test = 1
+ self._params['traffic'] = self.traffic_defaults.copy()
+ if traffic:
+ self._params['traffic'] = merge_spec(
+ self._params['traffic'], traffic)
+ new_params = copy.deepcopy(traffic)
+ stats = self.generate_traffic(traffic, duration)
+ right = traffic['frame_rate']
+ center = traffic['frame_rate']
+
+ while num_test <= tests:
+ test_lossrate = ((stats["total"]["opackets"] - stats["total"]
+ ["ipackets"]) * 100) / stats["total"]["opackets"]
+ self._logger.debug("Iteration: %s, frame rate: %s, throughput_rx_fps: %s, frame_loss_percent: %s",
+ num_test, "{:.3f}".format(new_params['frame_rate']), stats['total']['rx_pps'],
+ "{:.3f}".format(test_lossrate))
+ if test_lossrate == 0.0 and new_params['frame_rate'] == traffic['frame_rate']:
+ break
+ elif test_lossrate > lossrate:
+ right = center
+ center = (left+right) / 2
+ new_params = copy.deepcopy(traffic)
+ new_params['frame_rate'] = center
+ stats = self.generate_traffic(new_params, duration)
+ else:
+ left = center
+ center = (left+right) / 2
+ new_params = copy.deepcopy(traffic)
+ new_params['frame_rate'] = center
+ stats = self.generate_traffic(new_params, duration)
+ num_test += 1
+ return self.calculate_results(stats)
+
+ def start_rfc2544_throughput(self, traffic=None, tests=1, duration=60,
+ lossrate=0.0):
+ raise NotImplementedError(
+ 'Trex start rfc2544 throughput not implemented')
+
+ def wait_rfc2544_throughput(self):
+ raise NotImplementedError(
+ 'Trex wait rfc2544 throughput not implemented')
+
+ def send_burst_traffic(self, traffic=None, numpkts=100, duration=5):
+ raise NotImplementedError(
+ 'Trex send burst traffic not implemented')
+
+ def send_rfc2544_back2back(self, traffic=None, tests=1, duration=30,
+ lossrate=0.0):
+ raise NotImplementedError(
+ 'Trex send rfc2544 back2back not implemented')
+
+ def start_rfc2544_back2back(self, traffic=None, tests=1, duration=30,
+ lossrate=0.0):
+ raise NotImplementedError(
+ 'Trex start rfc2544 back2back not implemented')
+
+ def wait_rfc2544_back2back(self):
+ raise NotImplementedError(
+ 'Trex wait rfc2544 back2back not implemented')
+
+if __name__ == "__main__":
+ pass
diff --git a/tools/systeminfo.py b/tools/systeminfo.py
index 75b7aa0d..f34bcce6 100644
--- a/tools/systeminfo.py
+++ b/tools/systeminfo.py
@@ -230,6 +230,7 @@ def get_version(app_name):
'loopback_l2fwd' : os.path.join(S.getValue('ROOT_DIR'), 'src/l2fwd/l2fwd.c'),
'ixnet' : os.path.join(S.getValue('TRAFFICGEN_IXNET_LIB_PATH'), 'pkgIndex.tcl'),
'ixia' : os.path.join(S.getValue('TRAFFICGEN_IXIA_ROOT_DIR'), 'lib/ixTcl1.0/ixTclHal.tcl'),
+ 'trex' : os.path.join(S.getValue('ROOT_DIR'), 'src/trex/trex'),
}
@@ -312,6 +313,9 @@ def get_version(app_name):
app_version = match_line(app_version_file['ixia'], 'package provide IxTclHal')
if app_version:
app_version = app_version.split(' ')[3]
+ elif app_name.lower() == 'trex':
+ app_version = match_line(os.path.join(app_version_file['trex'], 'VERSION'), 'v')
+ app_git_tag = get_git_tag(app_version_file['trex'])
elif app_name.lower() == 'xena':
try:
app_version = S.getValue('XENA_VERSION')