aboutsummaryrefslogtreecommitdiffstats
path: root/functest_kubernetes/rally
diff options
context:
space:
mode:
authorSylvain Desbureaux <sylvain.desbureaux@orange.com>2021-03-16 14:09:29 +0100
committerCédric Ollivier <cedric.ollivier@orange.com>2021-03-19 11:05:35 +0100
commit998438b8a8c0a2ae64f6fd4940e1dbe1c78b65d5 (patch)
tree515478db423684cf922185f92ffbf1f70e7bd5b7 /functest_kubernetes/rally
parent394c758d7c78696b30dbe1bce01f46ccca977f6f (diff)
Provide support for air gapped env for rally
Sometimes, tested Kubernetes doesn't have direct access to Internet but access through repository mirrors. This patch handles this case for rally test case. There's two ways for providing the repository mirrors: - Give an environment variable (`MIRROR_REPO`) which gives a repository mirro with all needed images. - Gives an environment variable per needed repo: - `DOCKERHUB_REPO` for DockerHub repository (`docker.io`) - `GCR_REPO` for Google Cloud repository (`gcr.io`) - `K8S_GCR_REPO` for Kubernetes repository (`k8s.gcr.io`) Needed images list has also been extracted so Kubernetes administrator can easily upload these images to the mirror if the mirror also doesn't have access to Internet. Signed-off-by: Sylvain Desbureaux <sylvain.desbureaux@orange.com> Change-Id: I2ea6622b79f7e3c3c63c1441c4dab48e9bc4fb1a (cherry picked from commit b26ac308d4d441195fcd804d4d0ff36356fa8a90)
Diffstat (limited to 'functest_kubernetes/rally')
-rw-r--r--functest_kubernetes/rally/all-in-one.yaml40
-rw-r--r--functest_kubernetes/rally/rally_kubernetes.py8
2 files changed, 27 insertions, 21 deletions
diff --git a/functest_kubernetes/rally/all-in-one.yaml b/functest_kubernetes/rally/all-in-one.yaml
index 03d016ca..db30194e 100644
--- a/functest_kubernetes/rally/all-in-one.yaml
+++ b/functest_kubernetes/rally/all-in-one.yaml
@@ -22,7 +22,7 @@ subtasks:
- title: Run a single workload with create/read/delete pod
scenario:
Kubernetes.create_and_delete_pod:
- image: k8s.gcr.io/pause:3.3
+ image: {{ k8s_gcr_repo }}/pause:3.3
runner:
constant:
concurrency: {{ concurrency }}
@@ -35,7 +35,7 @@ subtasks:
- title: Run a single workload with create/read/delete replication controller
scenario:
Kubernetes.create_and_delete_replication_controller:
- image: k8s.gcr.io/pause:3.3
+ image: {{ k8s_gcr_repo }}/pause:3.3
replicas: 2
runner:
constant:
@@ -49,7 +49,7 @@ subtasks:
- title: Run a single workload with create/scale/delete replication controller
scenario:
Kubernetes.create_scale_and_delete_replication_controller:
- image: k8s.gcr.io/pause:3.3
+ image: {{ k8s_gcr_repo }}/pause:3.3
replicas: 2
scale_replicas: 3
runner:
@@ -64,7 +64,7 @@ subtasks:
- title: Run a single workload with create/read/delete replicaset
scenario:
Kubernetes.create_and_delete_replicaset:
- image: k8s.gcr.io/pause:3.3
+ image: {{ k8s_gcr_repo }}/pause:3.3
replicas: 1
runner:
constant:
@@ -78,7 +78,7 @@ subtasks:
- title: Run a single workload with create/scale/delete replicaset
scenario:
Kubernetes.create_scale_and_delete_replicaset:
- image: k8s.gcr.io/pause:3.3
+ image: {{ k8s_gcr_repo }}/pause:3.3
replicas: 1
scale_replicas: 2
runner:
@@ -94,7 +94,7 @@ subtasks:
Run a single workload with create/read/delete pod with emptyDir volume
scenario:
Kubernetes.create_and_delete_pod_with_emptydir_volume:
- image: k8s.gcr.io/pause:3.3
+ image: {{ k8s_gcr_repo }}/pause:3.3
mount_path: /opt/check
runner:
constant:
@@ -110,7 +110,7 @@ subtasks:
volume
scenario:
Kubernetes.create_and_delete_pod_with_emptydir_volume:
- image: busybox:1.28
+ image: {{ dockerhub_repo }}/busybox:1.28
command:
- sleep
- "3600"
@@ -131,7 +131,7 @@ subtasks:
- title: Run a single workload with create/read/delete pod with secret volume
scenario:
Kubernetes.create_and_delete_pod_with_secret_volume:
- image: busybox:1.28
+ image: {{ dockerhub_repo }}/busybox:1.28
command:
- sleep
- "3600"
@@ -148,7 +148,7 @@ subtasks:
- title: Run a single workload with create/check/delete pod with secret volume
scenario:
Kubernetes.create_and_delete_pod_with_secret_volume:
- image: busybox:1.28
+ image: {{ dockerhub_repo }}/busybox:1.28
command:
- sleep
- "3600"
@@ -171,7 +171,7 @@ subtasks:
volume
scenario:
Kubernetes.create_and_delete_pod_with_hostpath_volume:
- image: busybox:1.28
+ image: {{ dockerhub_repo }}/busybox:1.28
command:
- sleep
- "3600"
@@ -195,7 +195,7 @@ subtasks:
Run a single workload with create/read/delete pod with configMap volume
scenario:
Kubernetes.create_and_delete_pod_with_configmap_volume:
- image: busybox:1.28
+ image: {{ dockerhub_repo }}/busybox:1.28
command:
- "sleep"
- "3600"
@@ -218,7 +218,7 @@ subtasks:
volume
scenario:
Kubernetes.create_and_delete_pod_with_configmap_volume:
- image: busybox:1.28
+ image: {{ dockerhub_repo }}/busybox:1.28
command:
- "sleep"
- "3600"
@@ -243,7 +243,7 @@ subtasks:
- title: Run a single workload with create/read/delete deployment
scenario:
Kubernetes.create_and_delete_deployment:
- image: k8s.gcr.io/pause:3.3
+ image: {{ k8s_gcr_repo }}/pause:3.3
replicas: 2
runner:
constant:
@@ -257,7 +257,7 @@ subtasks:
- title: Run a single workload with create/rollout/delete deployment
scenario:
Kubernetes.create_rollout_and_delete_deployment:
- image: busybox:1.28
+ image: {{ dockerhub_repo }}/busybox:1.28
replicas: 1
command:
- sleep
@@ -281,7 +281,7 @@ subtasks:
- title: Run a single workload with create/read/delete statefulset
scenario:
Kubernetes.create_and_delete_statefulset:
- image: k8s.gcr.io/pause:3.3
+ image: {{ k8s_gcr_repo }}/pause:3.3
replicas: 2
runner:
constant:
@@ -295,7 +295,7 @@ subtasks:
- title: Run a single workload with create/scale/delete statefulset
scenario:
Kubernetes.create_scale_and_delete_statefulset:
- image: k8s.gcr.io/pause:3.3
+ image: {{ k8s_gcr_repo }}/pause:3.3
replicas: 1
scale_replicas: 2
runner:
@@ -310,7 +310,7 @@ subtasks:
- title: Run a single workload with create/read/delete job
scenario:
Kubernetes.create_and_delete_job:
- image: busybox:1.28
+ image: {{ dockerhub_repo }}/busybox:1.28
command:
- echo
- "SUCCESS"
@@ -326,7 +326,7 @@ subtasks:
- title: Run a single workload with create/check/delete clusterIP service
scenario:
Kubernetes.create_check_and_delete_pod_with_cluster_ip_service:
- image: gcr.io/google-samples/hello-go-gke:1.0
+ image: {{ gcr_repo }}/google-samples/hello-go-gke:1.0
port: 80
protocol: TCP
runner:
@@ -343,7 +343,7 @@ subtasks:
custom endpoint
scenario:
Kubernetes.create_check_and_delete_pod_with_cluster_ip_service:
- image: gcr.io/google-samples/hello-go-gke:1.0
+ image: {{ gcr_repo }}/google-samples/hello-go-gke:1.0
port: 80
protocol: TCP
custom_endpoint: true
@@ -359,7 +359,7 @@ subtasks:
- title: Run a single workload with create/check/delete NodePort service
scenario:
Kubernetes.create_check_and_delete_pod_with_node_port_service:
- image: gcr.io/google-samples/hello-go-gke:1.0
+ image: {{ gcr_repo }}/google-samples/hello-go-gke:1.0
port: 80
protocol: TCP
request_timeout: 10
diff --git a/functest_kubernetes/rally/rally_kubernetes.py b/functest_kubernetes/rally/rally_kubernetes.py
index fb20296e..f5d7e955 100644
--- a/functest_kubernetes/rally/rally_kubernetes.py
+++ b/functest_kubernetes/rally/rally_kubernetes.py
@@ -31,6 +31,9 @@ class RallyKubernetes(testcase.TestCase):
concurrency = 1
times = 1
namespaces_count = 1
+ dockerhub_repo = os.getenv("MIRROR_REPO", "docker.io")
+ gcr_repo = os.getenv("MIRROR_REPO", "gcr.io")
+ k8s_gcr_repo = os.getenv("MIRROR_REPO", "k8s.gcr.io")
def __init__(self, **kwargs):
super(RallyKubernetes, self).__init__(**kwargs)
@@ -68,7 +71,10 @@ class RallyKubernetes(testcase.TestCase):
concurrency=kwargs.get("concurrency", self.concurrency),
times=kwargs.get("times", self.times),
namespaces_count=kwargs.get(
- "namespaces_count", self.namespaces_count)))
+ "namespaces_count", self.namespaces_count),
+ dockerhub_repo=os.getenv("DOCKERHUB_REPO", self.dockerhub_repo),
+ gcr_repo=os.getenv("GCR_REPO", self.gcr_repo),
+ k8s_gcr_repo=os.getenv("K8S_GCR_REPO", self.k8s_gcr_repo)))
rapi.task.validate(deployment='my-kubernetes', config=task)
task_instance = rapi.task.create(deployment='my-kubernetes')
rapi.task.start(