diff options
author | Sylvain Desbureaux <sylvain.desbureaux@orange.com> | 2021-03-16 14:09:29 +0100 |
---|---|---|
committer | Cédric Ollivier <cedric.ollivier@orange.com> | 2021-03-19 10:05:48 +0100 |
commit | b26ac308d4d441195fcd804d4d0ff36356fa8a90 (patch) | |
tree | 60596049d1c4497657ef85fd4a7cddb988901506 /functest_kubernetes | |
parent | 4117257dfb7462f0892ec2bc057af86dc56194d8 (diff) |
Provide support for air gapped env for rally
Sometimes, tested Kubernetes doesn't have direct access to Internet but
access through repository mirrors.
This patch handles this case for rally test case.
There's two ways for providing the repository mirrors:
- Give an environment variable (`MIRROR_REPO`) which gives a repository
mirro with all needed images.
- Gives an environment variable per needed repo:
- `DOCKERHUB_REPO` for DockerHub repository (`docker.io`)
- `GCR_REPO` for Google Cloud repository (`gcr.io`)
- `K8S_GCR_REPO` for Kubernetes repository (`k8s.gcr.io`)
Needed images list has also been extracted so Kubernetes administrator can
easily upload these images to the mirror if the mirror also doesn't have
access to Internet.
Signed-off-by: Sylvain Desbureaux <sylvain.desbureaux@orange.com>
Change-Id: I2ea6622b79f7e3c3c63c1441c4dab48e9bc4fb1a
Diffstat (limited to 'functest_kubernetes')
-rw-r--r-- | functest_kubernetes/ci/download_images.sh | 60 | ||||
-rw-r--r-- | functest_kubernetes/ci/images.txt | 53 | ||||
-rw-r--r-- | functest_kubernetes/rally/all-in-one.yaml | 40 | ||||
-rw-r--r-- | functest_kubernetes/rally/rally_kubernetes.py | 8 |
4 files changed, 82 insertions, 79 deletions
diff --git a/functest_kubernetes/ci/download_images.sh b/functest_kubernetes/ci/download_images.sh index 1d982ce1..dfadb024 100644 --- a/functest_kubernetes/ci/download_images.sh +++ b/functest_kubernetes/ci/download_images.sh @@ -2,66 +2,10 @@ set -e -tmpfile=$(mktemp) -cat << EOF > $tmpfile -docker.io/appropriate/curl:edge -docker.io/aquasec/kube-bench:0.3.1 -docker.io/aquasec/kube-hunter:0.3.1 -docker.io/gluster/glusterdynamic-provisioner:v1.0 -docker.io/library/busybox:1.28 -docker.io/library/busybox:1.29 -docker.io/library/httpd:2.4.38-alpine -docker.io/library/httpd:2.4.39-alpine -docker.io/library/nginx:1.14-alpine -docker.io/library/nginx:1.15-alpine -docker.io/library/perl:5.26 -docker.io/library/redis:5.0.5-alpine -docker.io/ollivier/clearwater-astaire:hunter -docker.io/ollivier/clearwater-bono:hunter -docker.io/ollivier/clearwater-cassandra:hunter -docker.io/ollivier/clearwater-chronos:hunter -docker.io/ollivier/clearwater-ellis:hunter -docker.io/ollivier/clearwater-homer:hunter -docker.io/ollivier/clearwater-homestead:hunter -docker.io/ollivier/clearwater-homestead-prov:hunter -docker.io/ollivier/clearwater-live-test:hunter -docker.io/ollivier/clearwater-ralf:hunter -docker.io/ollivier/clearwater-sprout:hunter -gcr.io/google-samples/hello-go-gke:1.0 -gcr.io/kubernetes-e2e-test-images/apparmor-loader:1.0 -gcr.io/kubernetes-e2e-test-images/cuda-vector-add:1.0 -gcr.io/kubernetes-e2e-test-images/cuda-vector-add:2.0 -gcr.io/kubernetes-e2e-test-images/echoserver:2.2 -gcr.io/kubernetes-e2e-test-images/ipc-utils:1.0 -gcr.io/kubernetes-e2e-test-images/jessie-dnsutils:1.0 -gcr.io/kubernetes-e2e-test-images/kitten:1.0 -gcr.io/kubernetes-e2e-test-images/metadata-concealment:1.2 -gcr.io/kubernetes-e2e-test-images/nautilus:1.0 -gcr.io/kubernetes-e2e-test-images/nonewprivs:1.0 -gcr.io/kubernetes-e2e-test-images/nonroot:1.0 -gcr.io/kubernetes-e2e-test-images/regression-issue-74839-amd64:1.0 -gcr.io/kubernetes-e2e-test-images/resource-consumer:1.5 -gcr.io/kubernetes-e2e-test-images/sample-apiserver:1.17 -gcr.io/kubernetes-e2e-test-images/volume/gluster:1.0 -gcr.io/kubernetes-e2e-test-images/volume/iscsi:2.0 -gcr.io/kubernetes-e2e-test-images/volume/nfs:1.0 -gcr.io/kubernetes-e2e-test-images/volume/rbd:1.0.1 -k8s.gcr.io/build-image/debian-iptables:v12.1.2 -k8s.gcr.io/conformance:v1.19.0 -k8s.gcr.io/e2e-test-images/agnhost:2.20 -k8s.gcr.io/etcd:3.4.13-0 -k8s.gcr.io/pause:3.2 -k8s.gcr.io/pause:3.3 -k8s.gcr.io/prometheus-dummy-exporter:v0.1.0 -k8s.gcr.io/prometheus-to-sd:v0.5.0 -k8s.gcr.io/sd-dummy-exporter:v0.2.0 -k8s.gcr.io/sig-storage/nfs-provisioner:v2.2.2 -quay.io/coreos/etcd:v2.2.5 -EOF -for i in $(cat $tmpfile); do +DIR="$(cd "$( dirname "${BASH_SOURCE[0]}" )" &> /dev/null && pwd)" +for i in $(cat $DIR/images.txt); do sudo docker pull $i # https://kind.sigs.k8s.io/docs/user/quick-start/ # Be free to use docker save && kind load image-archive kind load docker-image $i --name latest done -rm -f $tmpfile diff --git a/functest_kubernetes/ci/images.txt b/functest_kubernetes/ci/images.txt new file mode 100644 index 00000000..d6e148b1 --- /dev/null +++ b/functest_kubernetes/ci/images.txt @@ -0,0 +1,53 @@ +docker.io/appropriate/curl:edge +docker.io/aquasec/kube-bench:0.3.1 +docker.io/aquasec/kube-hunter:0.3.1 +docker.io/gluster/glusterdynamic-provisioner:v1.0 +docker.io/library/busybox:1.28 +docker.io/library/busybox:1.29 +docker.io/library/httpd:2.4.38-alpine +docker.io/library/httpd:2.4.39-alpine +docker.io/library/nginx:1.14-alpine +docker.io/library/nginx:1.15-alpine +docker.io/library/perl:5.26 +docker.io/library/redis:5.0.5-alpine +docker.io/ollivier/clearwater-astaire:hunter +docker.io/ollivier/clearwater-bono:hunter +docker.io/ollivier/clearwater-cassandra:hunter +docker.io/ollivier/clearwater-chronos:hunter +docker.io/ollivier/clearwater-ellis:hunter +docker.io/ollivier/clearwater-homer:hunter +docker.io/ollivier/clearwater-homestead:hunter +docker.io/ollivier/clearwater-homestead-prov:hunter +docker.io/ollivier/clearwater-live-test:hunter +docker.io/ollivier/clearwater-ralf:hunter +docker.io/ollivier/clearwater-sprout:hunter +gcr.io/google-samples/hello-go-gke:1.0 +gcr.io/kubernetes-e2e-test-images/apparmor-loader:1.0 +gcr.io/kubernetes-e2e-test-images/cuda-vector-add:1.0 +gcr.io/kubernetes-e2e-test-images/cuda-vector-add:2.0 +gcr.io/kubernetes-e2e-test-images/echoserver:2.2 +gcr.io/kubernetes-e2e-test-images/ipc-utils:1.0 +gcr.io/kubernetes-e2e-test-images/jessie-dnsutils:1.0 +gcr.io/kubernetes-e2e-test-images/kitten:1.0 +gcr.io/kubernetes-e2e-test-images/metadata-concealment:1.2 +gcr.io/kubernetes-e2e-test-images/nautilus:1.0 +gcr.io/kubernetes-e2e-test-images/nonewprivs:1.0 +gcr.io/kubernetes-e2e-test-images/nonroot:1.0 +gcr.io/kubernetes-e2e-test-images/regression-issue-74839-amd64:1.0 +gcr.io/kubernetes-e2e-test-images/resource-consumer:1.5 +gcr.io/kubernetes-e2e-test-images/sample-apiserver:1.17 +gcr.io/kubernetes-e2e-test-images/volume/gluster:1.0 +gcr.io/kubernetes-e2e-test-images/volume/iscsi:2.0 +gcr.io/kubernetes-e2e-test-images/volume/nfs:1.0 +gcr.io/kubernetes-e2e-test-images/volume/rbd:1.0.1 +k8s.gcr.io/build-image/debian-iptables:v12.1.2 +k8s.gcr.io/conformance:v1.19.0 +k8s.gcr.io/e2e-test-images/agnhost:2.20 +k8s.gcr.io/etcd:3.4.13-0 +k8s.gcr.io/pause:3.2 +k8s.gcr.io/pause:3.3 +k8s.gcr.io/prometheus-dummy-exporter:v0.1.0 +k8s.gcr.io/prometheus-to-sd:v0.5.0 +k8s.gcr.io/sd-dummy-exporter:v0.2.0 +k8s.gcr.io/sig-storage/nfs-provisioner:v2.2.2 +quay.io/coreos/etcd:v2.2.5 diff --git a/functest_kubernetes/rally/all-in-one.yaml b/functest_kubernetes/rally/all-in-one.yaml index 03d016ca..db30194e 100644 --- a/functest_kubernetes/rally/all-in-one.yaml +++ b/functest_kubernetes/rally/all-in-one.yaml @@ -22,7 +22,7 @@ subtasks: - title: Run a single workload with create/read/delete pod scenario: Kubernetes.create_and_delete_pod: - image: k8s.gcr.io/pause:3.3 + image: {{ k8s_gcr_repo }}/pause:3.3 runner: constant: concurrency: {{ concurrency }} @@ -35,7 +35,7 @@ subtasks: - title: Run a single workload with create/read/delete replication controller scenario: Kubernetes.create_and_delete_replication_controller: - image: k8s.gcr.io/pause:3.3 + image: {{ k8s_gcr_repo }}/pause:3.3 replicas: 2 runner: constant: @@ -49,7 +49,7 @@ subtasks: - title: Run a single workload with create/scale/delete replication controller scenario: Kubernetes.create_scale_and_delete_replication_controller: - image: k8s.gcr.io/pause:3.3 + image: {{ k8s_gcr_repo }}/pause:3.3 replicas: 2 scale_replicas: 3 runner: @@ -64,7 +64,7 @@ subtasks: - title: Run a single workload with create/read/delete replicaset scenario: Kubernetes.create_and_delete_replicaset: - image: k8s.gcr.io/pause:3.3 + image: {{ k8s_gcr_repo }}/pause:3.3 replicas: 1 runner: constant: @@ -78,7 +78,7 @@ subtasks: - title: Run a single workload with create/scale/delete replicaset scenario: Kubernetes.create_scale_and_delete_replicaset: - image: k8s.gcr.io/pause:3.3 + image: {{ k8s_gcr_repo }}/pause:3.3 replicas: 1 scale_replicas: 2 runner: @@ -94,7 +94,7 @@ subtasks: Run a single workload with create/read/delete pod with emptyDir volume scenario: Kubernetes.create_and_delete_pod_with_emptydir_volume: - image: k8s.gcr.io/pause:3.3 + image: {{ k8s_gcr_repo }}/pause:3.3 mount_path: /opt/check runner: constant: @@ -110,7 +110,7 @@ subtasks: volume scenario: Kubernetes.create_and_delete_pod_with_emptydir_volume: - image: busybox:1.28 + image: {{ dockerhub_repo }}/busybox:1.28 command: - sleep - "3600" @@ -131,7 +131,7 @@ subtasks: - title: Run a single workload with create/read/delete pod with secret volume scenario: Kubernetes.create_and_delete_pod_with_secret_volume: - image: busybox:1.28 + image: {{ dockerhub_repo }}/busybox:1.28 command: - sleep - "3600" @@ -148,7 +148,7 @@ subtasks: - title: Run a single workload with create/check/delete pod with secret volume scenario: Kubernetes.create_and_delete_pod_with_secret_volume: - image: busybox:1.28 + image: {{ dockerhub_repo }}/busybox:1.28 command: - sleep - "3600" @@ -171,7 +171,7 @@ subtasks: volume scenario: Kubernetes.create_and_delete_pod_with_hostpath_volume: - image: busybox:1.28 + image: {{ dockerhub_repo }}/busybox:1.28 command: - sleep - "3600" @@ -195,7 +195,7 @@ subtasks: Run a single workload with create/read/delete pod with configMap volume scenario: Kubernetes.create_and_delete_pod_with_configmap_volume: - image: busybox:1.28 + image: {{ dockerhub_repo }}/busybox:1.28 command: - "sleep" - "3600" @@ -218,7 +218,7 @@ subtasks: volume scenario: Kubernetes.create_and_delete_pod_with_configmap_volume: - image: busybox:1.28 + image: {{ dockerhub_repo }}/busybox:1.28 command: - "sleep" - "3600" @@ -243,7 +243,7 @@ subtasks: - title: Run a single workload with create/read/delete deployment scenario: Kubernetes.create_and_delete_deployment: - image: k8s.gcr.io/pause:3.3 + image: {{ k8s_gcr_repo }}/pause:3.3 replicas: 2 runner: constant: @@ -257,7 +257,7 @@ subtasks: - title: Run a single workload with create/rollout/delete deployment scenario: Kubernetes.create_rollout_and_delete_deployment: - image: busybox:1.28 + image: {{ dockerhub_repo }}/busybox:1.28 replicas: 1 command: - sleep @@ -281,7 +281,7 @@ subtasks: - title: Run a single workload with create/read/delete statefulset scenario: Kubernetes.create_and_delete_statefulset: - image: k8s.gcr.io/pause:3.3 + image: {{ k8s_gcr_repo }}/pause:3.3 replicas: 2 runner: constant: @@ -295,7 +295,7 @@ subtasks: - title: Run a single workload with create/scale/delete statefulset scenario: Kubernetes.create_scale_and_delete_statefulset: - image: k8s.gcr.io/pause:3.3 + image: {{ k8s_gcr_repo }}/pause:3.3 replicas: 1 scale_replicas: 2 runner: @@ -310,7 +310,7 @@ subtasks: - title: Run a single workload with create/read/delete job scenario: Kubernetes.create_and_delete_job: - image: busybox:1.28 + image: {{ dockerhub_repo }}/busybox:1.28 command: - echo - "SUCCESS" @@ -326,7 +326,7 @@ subtasks: - title: Run a single workload with create/check/delete clusterIP service scenario: Kubernetes.create_check_and_delete_pod_with_cluster_ip_service: - image: gcr.io/google-samples/hello-go-gke:1.0 + image: {{ gcr_repo }}/google-samples/hello-go-gke:1.0 port: 80 protocol: TCP runner: @@ -343,7 +343,7 @@ subtasks: custom endpoint scenario: Kubernetes.create_check_and_delete_pod_with_cluster_ip_service: - image: gcr.io/google-samples/hello-go-gke:1.0 + image: {{ gcr_repo }}/google-samples/hello-go-gke:1.0 port: 80 protocol: TCP custom_endpoint: true @@ -359,7 +359,7 @@ subtasks: - title: Run a single workload with create/check/delete NodePort service scenario: Kubernetes.create_check_and_delete_pod_with_node_port_service: - image: gcr.io/google-samples/hello-go-gke:1.0 + image: {{ gcr_repo }}/google-samples/hello-go-gke:1.0 port: 80 protocol: TCP request_timeout: 10 diff --git a/functest_kubernetes/rally/rally_kubernetes.py b/functest_kubernetes/rally/rally_kubernetes.py index fb20296e..f5d7e955 100644 --- a/functest_kubernetes/rally/rally_kubernetes.py +++ b/functest_kubernetes/rally/rally_kubernetes.py @@ -31,6 +31,9 @@ class RallyKubernetes(testcase.TestCase): concurrency = 1 times = 1 namespaces_count = 1 + dockerhub_repo = os.getenv("MIRROR_REPO", "docker.io") + gcr_repo = os.getenv("MIRROR_REPO", "gcr.io") + k8s_gcr_repo = os.getenv("MIRROR_REPO", "k8s.gcr.io") def __init__(self, **kwargs): super(RallyKubernetes, self).__init__(**kwargs) @@ -68,7 +71,10 @@ class RallyKubernetes(testcase.TestCase): concurrency=kwargs.get("concurrency", self.concurrency), times=kwargs.get("times", self.times), namespaces_count=kwargs.get( - "namespaces_count", self.namespaces_count))) + "namespaces_count", self.namespaces_count), + dockerhub_repo=os.getenv("DOCKERHUB_REPO", self.dockerhub_repo), + gcr_repo=os.getenv("GCR_REPO", self.gcr_repo), + k8s_gcr_repo=os.getenv("K8S_GCR_REPO", self.k8s_gcr_repo))) rapi.task.validate(deployment='my-kubernetes', config=task) task_instance = rapi.task.create(deployment='my-kubernetes') rapi.task.start( |