aboutsummaryrefslogtreecommitdiffstats
path: root/yardstick
diff options
context:
space:
mode:
authorchenjiankun <chenjiankun1@huawei.com>2017-08-30 02:31:40 +0000
committerchenjiankun <chenjiankun1@huawei.com>2017-09-21 11:01:25 +0000
commit677779ce5267e1265262fa6ae7584a81583113bc (patch)
tree26569b148bb2491a55a73a8f116919e34663998b /yardstick
parentb00112e33caffee6b6b01402537e68007fdc8cb2 (diff)
Add service in kubernetes context
JIRA: YARDSTICK-803 Currently kubernetes test case can only run in master node. We need to support it run in jump server. So I add service and use nodePort type. Then we can login the pod using nodePort. Change-Id: Ia7900d263f1c5323f132435addec27ad10547ef9 Signed-off-by: chenjiankun <chenjiankun1@huawei.com>
Diffstat (limited to 'yardstick')
-rw-r--r--yardstick/benchmark/contexts/kubernetes.py35
-rw-r--r--yardstick/benchmark/core/task.py10
-rw-r--r--yardstick/common/kubernetes_utils.py59
-rw-r--r--yardstick/orchestrator/kubernetes.py33
4 files changed, 122 insertions, 15 deletions
diff --git a/yardstick/benchmark/contexts/kubernetes.py b/yardstick/benchmark/contexts/kubernetes.py
index a39f63137..2334e5076 100644
--- a/yardstick/benchmark/contexts/kubernetes.py
+++ b/yardstick/benchmark/contexts/kubernetes.py
@@ -54,6 +54,7 @@ class KubernetesContext(Context):
LOG.info('Launch containers')
self._create_rcs()
+ self._create_services()
time.sleep(1)
self.template.get_rc_pods()
@@ -63,6 +64,7 @@ class KubernetesContext(Context):
self._delete_ssh_key()
self._delete_rcs()
self._delete_pods()
+ self._delete_services()
super(KubernetesContext, self).undeploy()
@@ -80,6 +82,14 @@ class KubernetesContext(Context):
return False
return True
+ def _create_services(self):
+ for obj in self.template.service_objs:
+ obj.create()
+
+ def _delete_services(self):
+ for obj in self.template.service_objs:
+ obj.delete()
+
def _create_rcs(self):
for obj in self.template.k8s_objs:
self._create_rc(obj.get_template())
@@ -126,15 +136,22 @@ class KubernetesContext(Context):
utils.remove_file(self.public_key_path)
def _get_server(self, name):
- resp = k8s_utils.get_pod_list()
- hosts = ({'name': n.metadata.name,
- 'ip': n.status.pod_ip,
- 'user': 'root',
- 'key_filename': self.key_path,
- 'private_ip': n.status.pod_ip}
- for n in resp.items if n.metadata.name.startswith(name))
-
- return next(hosts, None)
+ service_name = '{}-service'.format(name)
+ service = k8s_utils.get_service_by_name(service_name).ports[0]
+
+ host = {
+ 'name': service.name,
+ 'ip': self._get_node_ip(),
+ 'private_ip': k8s_utils.get_pod_by_name(name).status.pod_ip,
+ 'ssh_port': service.node_port,
+ 'user': 'root',
+ 'key_filename': self.key_path,
+ }
+
+ return host
+
+ def _get_node_ip(self):
+ return k8s_utils.get_node_list().items[0].status.addresses[0].address
def _get_network(self, attr_name):
return None
diff --git a/yardstick/benchmark/core/task.py b/yardstick/benchmark/core/task.py
index 0b6e3230b..a32e990ff 100644
--- a/yardstick/benchmark/core/task.py
+++ b/yardstick/benchmark/core/task.py
@@ -333,7 +333,7 @@ class Task(object): # pragma: no cover
context_cfg['target'] = {"ipaddr": target}
else:
context_cfg['target'] = Context.get_server(target)
- if self._is_same_heat_context(cfg["host"], target):
+ if self._is_same_context(cfg["host"], target):
context_cfg['target']["ipaddr"] = context_cfg['target']["private_ip"]
else:
context_cfg['target']["ipaddr"] = context_cfg['target']["ip"]
@@ -358,8 +358,8 @@ class Task(object): # pragma: no cover
context_cfg['target'] = {}
else:
context_cfg['target'] = Context.get_server(target)
- if self._is_same_heat_context(scenario_cfg["host"],
- target):
+ if self._is_same_context(scenario_cfg["host"],
+ target):
ip_list.append(context_cfg["target"]["private_ip"])
else:
ip_list.append(context_cfg["target"]["ip"])
@@ -377,7 +377,7 @@ class Task(object): # pragma: no cover
return runner
- def _is_same_heat_context(self, host_attr, target_attr):
+ def _is_same_context(self, host_attr, target_attr):
"""check if two servers are in the same heat context
host_attr: either a name for a server created by yardstick or a dict
with attribute name mapping when using external heat templates
@@ -385,7 +385,7 @@ class Task(object): # pragma: no cover
with attribute name mapping when using external heat templates
"""
for context in self.contexts:
- if context.__context_type__ != "Heat":
+ if context.__context_type__ not in {"Heat", "Kubernetes"}:
continue
host = context._get_server(host_attr)
diff --git a/yardstick/common/kubernetes_utils.py b/yardstick/common/kubernetes_utils.py
index e4c232830..0cf7b9eab 100644
--- a/yardstick/common/kubernetes_utils.py
+++ b/yardstick/common/kubernetes_utils.py
@@ -28,6 +28,60 @@ def get_core_api(): # pragma: no cover
return client.CoreV1Api()
+def get_node_list(**kwargs): # pragma: no cover
+ core_v1_api = get_core_api()
+ try:
+ return core_v1_api.list_node(**kwargs)
+ except ApiException:
+ LOG.exception('Get node list failed')
+ raise
+
+
+def create_service(template,
+ namespace='default',
+ wait=False,
+ **kwargs): # pragma: no cover
+ core_v1_api = get_core_api()
+ metadata = client.V1ObjectMeta(**template.get('metadata', {}))
+
+ ports = [client.V1ServicePort(**port) for port in
+ template.get('spec', {}).get('ports', [])]
+ template['spec']['ports'] = ports
+ spec = client.V1ServiceSpec(**template.get('spec', {}))
+
+ service = client.V1Service(metadata=metadata, spec=spec)
+
+ try:
+ core_v1_api.create_namespaced_service('default', service)
+ except ApiException:
+ LOG.exception('Create Service failed')
+ raise
+
+
+def delete_service(name,
+ namespace='default',
+ **kwargs): # pragma: no cover
+ core_v1_api = get_core_api()
+ try:
+ core_v1_api.delete_namespaced_service(name, namespace, **kwargs)
+ except ApiException:
+ LOG.exception('Delete Service failed')
+
+
+def get_service_list(namespace='default', **kwargs):
+ core_v1_api = get_core_api()
+ try:
+ return core_v1_api.list_namespaced_service(namespace, **kwargs)
+ except ApiException:
+ LOG.exception('Get Service list failed')
+ raise
+
+
+def get_service_by_name(name): # pragma: no cover
+ service_list = get_service_list()
+ return next((s.spec for s in service_list.items if s.metadata.name == name), None)
+
+
def create_replication_controller(template,
namespace='default',
wait=False,
@@ -135,3 +189,8 @@ def get_pod_list(namespace='default'): # pragma: no cover
except ApiException:
LOG.exception('Get pod list failed')
raise
+
+
+def get_pod_by_name(name): # pragma: no cover
+ pod_list = get_pod_list()
+ return next((n for n in pod_list.items if n.metadata.name.startswith(name)), None)
diff --git a/yardstick/orchestrator/kubernetes.py b/yardstick/orchestrator/kubernetes.py
index 6d7045f58..9f94fd4ff 100644
--- a/yardstick/orchestrator/kubernetes.py
+++ b/yardstick/orchestrator/kubernetes.py
@@ -37,7 +37,7 @@ class KubernetesObject(object):
"template": {
"metadata": {
"labels": {
- "app": ""
+ "app": name
}
},
"spec": {
@@ -106,6 +106,35 @@ class KubernetesObject(object):
self._add_volume(key_volume)
+class ServiceObject(object):
+
+ def __init__(self, name):
+ self.name = '{}-service'.format(name)
+ self.template = {
+ 'metadata': {
+ 'name': '{}-service'.format(name)
+ },
+ 'spec': {
+ 'type': 'NodePort',
+ 'ports': [
+ {
+ 'port': 22,
+ 'protocol': 'TCP'
+ }
+ ],
+ 'selector': {
+ 'app': name
+ }
+ }
+ }
+
+ def create(self):
+ k8s_utils.create_service(self.template)
+
+ def delete(self):
+ k8s_utils.delete_service(self.name)
+
+
class KubernetesTemplate(object):
def __init__(self, name, template_cfg):
@@ -117,6 +146,8 @@ class KubernetesTemplate(object):
ssh_key=self.ssh_key,
**cfg)
for rc, cfg in template_cfg.items()]
+ self.service_objs = [ServiceObject(s) for s in self.rcs]
+
self.pods = []
def _get_rc_name(self, rc_name):