Add test setup for helm chart tests
This change implements fixtures required to set up a Kubernetes cluster
to run integrations tests of helm charts on. The tests require
a kubeconfig file of an existing Kubernetes cluster. The test
fixtures will then initialize helm, a read-write-many filesystem
provisioner, an image-pull-secret and delete all created resources after
finishing the tests. This change further adds a single test to test
whether the gerrit-master helm-chart successfully deploys. This test
shows that the fixtures function correctly.
Change-Id: I106f65395f82286d86ee73198f5c996d94fbc610
diff --git a/.pylintrc b/.pylintrc
index cfda12b..2e74428 100644
--- a/.pylintrc
+++ b/.pylintrc
@@ -10,3 +10,6 @@
[SIMILARITIES]
min-similarity-lines=6
+
+[MASTER]
+init-hook='import sys; sys.path.append("./tests/helpers")'
diff --git a/LICENSE b/LICENSE
index 51706c7..6650a4c 100644
--- a/LICENSE
+++ b/LICENSE
@@ -220,6 +220,11 @@
All rights reserved. \
3-Clause BSD License (https://github.com/gitpython-developers/GitPython/blob/master/LICENSE)
+Kubernetes Python CLient \
+https://github.com/kubernetes-client/python \
+Copyright (c) 2014 The Kubernetes Authors \
+Apache 2 license (https://github.com/kubernetes-client/python/blob/master/LICENSE)
+
Passlib \
https://bitbucket.org/ecollins/passlib/wiki/Home \
Copyright (c) 2008-2017 Assurance Technologies, LLC.
diff --git a/Pipfile b/Pipfile
index ce95123..17241e6 100644
--- a/Pipfile
+++ b/Pipfile
@@ -15,6 +15,7 @@
pyopenssl = "~=18.0.0"
requests = "~=2.21.0"
pytest-timeout = "~=1.3.3"
+kubernetes = "~=8.0.1"
[requires]
python_version = "3.7"
diff --git a/Pipfile.lock b/Pipfile.lock
index 9d7a564..ddfe496 100644
--- a/Pipfile.lock
+++ b/Pipfile.lock
@@ -1,7 +1,7 @@
{
"_meta": {
"hash": {
- "sha256": "5b2cf04682c1f1aba0992c3674b336ae743a64ea96ef515a4661db5d182e8cf5"
+ "sha256": "4421021398a3aee3a5265400439d234bb44ff715b3f19d7f93e7f61ec6c718ab"
},
"pipfile-spec": 6,
"requires": {
@@ -16,6 +16,13 @@
]
},
"default": {
+ "adal": {
+ "hashes": [
+ "sha256:5a7f1e037c6290c6d7609cab33a9e5e988c2fbec5c51d1c4c649ee3faff37eaf",
+ "sha256:fd17e5661f60634ddf96a569b95d34ccb8a98de60593d729c28bdcfe360eaad1"
+ ],
+ "version": "==1.2.2"
+ },
"asn1crypto": {
"hashes": [
"sha256:2f1adbb7546ed199e3c90ef23ec95c5cf3585bac7d11fb7eb562a3fe89c64e87",
@@ -37,6 +44,13 @@
],
"version": "==19.1.0"
},
+ "cachetools": {
+ "hashes": [
+ "sha256:428266a1c0d36dc5aca63a2d7c5942e88c2c898d72139fca0e97fdd2380517ae",
+ "sha256:8ea2d3ce97850f31e4a08b0e2b5e6c34997d7216a9d2c98e0f3978630d4da69a"
+ ],
+ "version": "==3.1.1"
+ },
"certifi": {
"hashes": [
"sha256:046832c04d4e752f37383b628bc601a7ea7211496b4638f6514d0e5b9acc4939",
@@ -135,6 +149,13 @@
"index": "pypi",
"version": "==2.1.14"
},
+ "google-auth": {
+ "hashes": [
+ "sha256:0f7c6a64927d34c1a474da92cfc59e552a5d3b940d3266606c6a28b72888b9e4",
+ "sha256:20705f6803fd2c4d1cc2dcb0df09d4dfcb9a7d51fd59e94a3a28231fd93119ed"
+ ],
+ "version": "==1.6.3"
+ },
"idna": {
"hashes": [
"sha256:c357b3f628cf53ae2c4c05627ecc484553142ca23264e593d327bcde5e9c3407",
@@ -149,6 +170,14 @@
],
"version": "==0.20"
},
+ "kubernetes": {
+ "hashes": [
+ "sha256:cbf217c5988516f79dec14f82a228f9897817717b7a53c538f84adf5217b9ca7",
+ "sha256:ef2b6d53708cf7f8864e6775fb45e3f76a92607785c422034d7e1d1063399597"
+ ],
+ "index": "pypi",
+ "version": "==8.0.2"
+ },
"more-itertools": {
"hashes": [
"sha256:409cd48d4db7052af495b09dec721011634af3753ae1ef92d2b32f73a745f832",
@@ -157,6 +186,13 @@
"markers": "python_version > '2.7'",
"version": "==7.2.0"
},
+ "oauthlib": {
+ "hashes": [
+ "sha256:bee41cc35fcca6e988463cacc3bcb8a96224f470ca547e697b604cc697b2f889",
+ "sha256:df884cd6cbe20e32633f1db1072e9356f53638e4361bef4e8b03c9127c9328ea"
+ ],
+ "version": "==3.1.0"
+ },
"packaging": {
"hashes": [
"sha256:a7ac867b97fdc07ee80a8058fe4435ccd274ecc3b0ed61d852d7d53055528cf9",
@@ -186,12 +222,33 @@
],
"version": "==1.8.0"
},
+ "pyasn1": {
+ "hashes": [
+ "sha256:62cdade8b5530f0b185e09855dd422bc05c0bbff6b72ff61381c09dac7befd8c",
+ "sha256:a9495356ca1d66ed197a0f72b41eb1823cf7ea8b5bd07191673e8147aecf8604"
+ ],
+ "version": "==0.4.7"
+ },
+ "pyasn1-modules": {
+ "hashes": [
+ "sha256:43c17a83c155229839cc5c6b868e8d0c6041dba149789b6d6e28801c64821722",
+ "sha256:e30199a9d221f1b26c885ff3d87fd08694dbbe18ed0e8e405a2a7126d30ce4c0"
+ ],
+ "version": "==0.2.6"
+ },
"pycparser": {
"hashes": [
"sha256:a988718abfad80b6b157acce7bf130a30876d27603738ac39f140993246b25b3"
],
"version": "==2.19"
},
+ "pyjwt": {
+ "hashes": [
+ "sha256:5c6eca3c2940464d106b99ba83b00c6add741c9becaec087fb7ccdefea71350e",
+ "sha256:8d59a976fb773f3e6a39c85636357c4f0e242707394cadadd9814f5cbaa20e96"
+ ],
+ "version": "==1.7.1"
+ },
"pyopenssl": {
"hashes": [
"sha256:26ff56a6b5ecaf3a2a59f132681e2a80afcc76b4f902f612f518f92c2a1bf854",
@@ -223,6 +280,31 @@
"index": "pypi",
"version": "==1.3.3"
},
+ "python-dateutil": {
+ "hashes": [
+ "sha256:7e6584c74aeed623791615e26efd690f29817a27c73085b78e4bad02493df2fb",
+ "sha256:c89805f6f4d64db21ed966fda138f8a5ed7a4fdbc1a8ee329ce1b74e3c74da9e"
+ ],
+ "version": "==2.8.0"
+ },
+ "pyyaml": {
+ "hashes": [
+ "sha256:0113bc0ec2ad727182326b61326afa3d1d8280ae1122493553fd6f4397f33df9",
+ "sha256:01adf0b6c6f61bd11af6e10ca52b7d4057dd0be0343eb9283c878cf3af56aee4",
+ "sha256:5124373960b0b3f4aa7df1707e63e9f109b5263eca5976c66e08b1c552d4eaf8",
+ "sha256:5ca4f10adbddae56d824b2c09668e91219bb178a1eee1faa56af6f99f11bf696",
+ "sha256:7907be34ffa3c5a32b60b95f4d95ea25361c951383a894fec31be7252b2b6f34",
+ "sha256:7ec9b2a4ed5cad025c2278a1e6a19c011c80a3caaac804fd2d329e9cc2c287c9",
+ "sha256:87ae4c829bb25b9fe99cf71fbb2140c448f534e24c998cc60f39ae4f94396a73",
+ "sha256:9de9919becc9cc2ff03637872a440195ac4241c80536632fffeb6a1e25a74299",
+ "sha256:a5a85b10e450c66b49f98846937e8cfca1db3127a9d5d1e31ca45c3d0bef4c5b",
+ "sha256:b0997827b4f6a7c286c01c5f60384d218dca4ed7d9efa945c3e1aa623d5709ae",
+ "sha256:b631ef96d3222e62861443cc89d6563ba3eeb816eeb96b2629345ab795e53681",
+ "sha256:bf47c0607522fdbca6c9e817a6e81b08491de50f3766a7a0e6a5be7905961b41",
+ "sha256:f81025eddd0327c7d4cfe9b62cf33190e1e736cc6e97502b3ec425f574b3e7a8"
+ ],
+ "version": "==5.1.2"
+ },
"requests": {
"hashes": [
"sha256:502a824f31acdacb3a35b6690b5fbf0bc41d63a24a45c4004352b0242707598e",
@@ -231,6 +313,20 @@
"index": "pypi",
"version": "==2.21.0"
},
+ "requests-oauthlib": {
+ "hashes": [
+ "sha256:bd6533330e8748e94bf0b214775fed487d309b8b8fe823dc45641ebcd9a32f57",
+ "sha256:d3ed0c8f2e3bbc6b344fa63d6f933745ab394469da38db16bdddb461c7e25140"
+ ],
+ "version": "==1.2.0"
+ },
+ "rsa": {
+ "hashes": [
+ "sha256:14ba45700ff1ec9eeb206a2ce76b32814958a98e372006c8fb76ba820211be66",
+ "sha256:1a836406405730121ae9823e19c6e806c62bbad73f890574fff50efa4122c487"
+ ],
+ "version": "==4.0"
+ },
"six": {
"hashes": [
"sha256:3350809f0555b11f552448330d0b52d5f24c91a322ea4a15ef22629740f3761c",
diff --git a/README.md b/README.md
index 9cda08a..2aebbea 100644
--- a/README.md
+++ b/README.md
@@ -214,6 +214,15 @@
between outside clients and containers and between the components installed
by a helm chart.
+### kubernetes
+
+Marks tests that require a Kubernetes cluster. These tests are used to test the
+functionality of the helm charts in this project and the interaction of the
+components installed by them. The cluster should not be used for other purposes
+to minimize unforeseen interactions.
+
+Currently these tests also require access to an EFS volume.
+
### slow
Marks tests that need an above average time to run.
diff --git a/setup.cfg b/setup.cfg
index 7708c62..0c90095 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -5,5 +5,6 @@
docker: Tests that require to run and interact with a docker container
incremental: Test classes containing tests that need to run incrementally
integration: Integration tests
+ kubernetes: Tests that require a Kubernetes cluster
slow: Tests that run slower than the average test
structure: Structure tests
diff --git a/tests/conftest.py b/tests/conftest.py
index de98db8..f49b90d 100644
--- a/tests/conftest.py
+++ b/tests/conftest.py
@@ -72,6 +72,41 @@
help="If set, the docker cache will be used when building container images.",
)
parser.addoption(
+ "--kubeconfig",
+ action="store",
+ default=None,
+ help="Kubeconfig to use for cluster connection. If none is given the currently"
+ + "configured context is used.",
+ )
+ parser.addoption(
+ "--infra-provider",
+ action="store",
+ default="aws",
+ choices=["aws"],
+ help="Infrastructure provider used for Kubernetes cluster deployments."
+ + "(default: aws; options: [aws])",
+ )
+ parser.addoption(
+ "--efs-id",
+ action="store",
+ default=None,
+ help="ID of EFS-volume. Required to set up shared volume, if using AWS as "
+ + "infrastructure provider.",
+ )
+ parser.addoption(
+ "--efs-region",
+ action="store",
+ default=None,
+ help="AWS region of EFS-volume. Required to set up shared volume, if using AWS"
+ + "as infrastructure provider.",
+ )
+ parser.addoption(
+ "--ingress-url",
+ action="store",
+ default=None,
+ help="URL of the ingress domain used by the cluster.",
+ )
+ parser.addoption(
"--skip-slow", action="store_true", help="If set, skip slow tests."
)
diff --git a/tests/helm-charts/conftest.py b/tests/helm-charts/conftest.py
new file mode 100644
index 0000000..02c8f05
--- /dev/null
+++ b/tests/helm-charts/conftest.py
@@ -0,0 +1,266 @@
+# pylint: disable=W0613
+
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from abc import ABC, abstractmethod
+from argparse import ArgumentTypeError
+
+import base64
+import json
+import re
+import subprocess
+import warnings
+
+from kubernetes import client, config
+
+import pytest
+
+from helm import Helm
+
+HELM_SERVICE_ACCOUNT_NAME = "helm"
+HELM_SERVICE_ACCOUNT_NAMESPACE = "kube-system"
+
+
+class AbstractStorageProvisioner(ABC):
+ def __init__(self, name):
+ self.name = name
+
+ @abstractmethod
+ def deploy(self):
+ """
+ Deploy provisioner on cluster
+ """
+
+ @abstractmethod
+ def delete(self):
+ """
+ Delete provisioner from cluster
+ """
+
+
+class EFSProvisioner(AbstractStorageProvisioner):
+ def __init__(self, efs_id, efs_region, chart_name="efs"):
+ super().__init__(chart_name)
+
+ self.efs_id = efs_id
+ self.efs_region = efs_region
+
+ self.helm = None
+
+ def set_helm_connector(self, helm):
+ self.helm = helm
+
+ def deploy(self):
+ chart_opts = {
+ "efsProvisioner.efsFileSystemId": self.efs_id,
+ "efsProvisioner.awsRegion": self.efs_region,
+ "efsProvisioner.storageClass.name": "shared-storage",
+ }
+
+ res = self.helm.install(
+ "stable/efs-provisioner",
+ self.name,
+ set_values=chart_opts,
+ fail_on_err=False,
+ )
+
+ if res.returncode == 0:
+ return
+
+ if re.match(r"Error: a release named efs already exists.", res.stderr):
+ warnings.warn(
+ "Kubernetes Cluster not empty. EFS provisioner already exists."
+ )
+ else:
+ print(res.stderr)
+ raise subprocess.CalledProcessError(
+ res.returncode, res.args, output=res.stdout, stderr=res.stderr
+ )
+
+ def delete(self):
+ try:
+ self.helm.delete(self.name)
+ except subprocess.CalledProcessError as exc:
+ print("deletion of EFS-provisioner failed: ", exc)
+
+
+class TestCluster:
+ def __init__(self, kube_config, storage_provisioner, registry):
+ self.kube_config = kube_config
+ self.registry = registry
+ self.storage_provisioner = storage_provisioner
+
+ self.current_context = None
+ self.helm = None
+
+ def _load_kube_config(self):
+ config.load_kube_config(config_file=self.kube_config)
+ _, context = config.list_kube_config_contexts(config_file=self.kube_config)
+ self.current_context = context["name"]
+
+ def _create_and_deploy_helm_crb(self):
+ crb_meta = client.V1ObjectMeta(name="helm")
+ crb_name = "cluster-admin"
+ crb_role_ref = client.V1RoleRef(
+ api_group="rbac.authorization.k8s.io", kind="ClusterRole", name=crb_name
+ )
+ crb_subjects = [
+ client.V1Subject(
+ kind="ServiceAccount",
+ name=HELM_SERVICE_ACCOUNT_NAME,
+ namespace=HELM_SERVICE_ACCOUNT_NAMESPACE,
+ )
+ ]
+ crb = client.V1ClusterRoleBinding(
+ metadata=crb_meta, role_ref=crb_role_ref, subjects=crb_subjects
+ )
+
+ rbac_v1 = client.RbacAuthorizationV1Api()
+ try:
+ rbac_v1.create_cluster_role_binding(crb)
+ except client.rest.ApiException as exc:
+ if exc.status == 409 and exc.reason == "Conflict":
+ warnings.warn(
+ "Kubernetes Cluster not empty. Helm cluster role binding already exists."
+ )
+ else:
+ raise exc
+
+ def _create_and_deploy_helm_service_account(self):
+ helm_service_account_metadata = client.V1ObjectMeta(
+ name=HELM_SERVICE_ACCOUNT_NAME, namespace=HELM_SERVICE_ACCOUNT_NAMESPACE
+ )
+ helm_service_account = client.V1ServiceAccount(
+ metadata=helm_service_account_metadata
+ )
+
+ core_v1 = client.CoreV1Api()
+ try:
+ core_v1.create_namespaced_service_account(
+ HELM_SERVICE_ACCOUNT_NAMESPACE, helm_service_account
+ )
+ except client.rest.ApiException as exc:
+ if exc.status == 409 and exc.reason == "Conflict":
+ warnings.warn(
+ "Kubernetes Cluster not empty. Helm service account already exists."
+ )
+ else:
+ raise exc
+
+ def _create_image_pull_secret(self):
+ secret_metadata = client.V1ObjectMeta(name="image-pull-secret")
+ auth_string = str.encode(
+ "%s:%s" % (self.registry["user"], self.registry["pwd"])
+ )
+ secret_data = {
+ "auths": {
+ self.registry["url"]: {
+ "auth": base64.b64encode(auth_string).decode("utf-8")
+ }
+ }
+ }
+ secret_data = json.dumps(secret_data).encode()
+ secret_body = client.V1Secret(
+ api_version="v1",
+ kind="Secret",
+ metadata=secret_metadata,
+ type="kubernetes.io/dockerconfigjson",
+ data={".dockerconfigjson": base64.b64encode(secret_data).decode("utf-8")},
+ )
+ core_v1 = client.CoreV1Api()
+ try:
+ core_v1.create_namespaced_secret("default", secret_body)
+ except client.rest.ApiException as exc:
+ if exc.status == 409 and exc.reason == "Conflict":
+ warnings.warn(
+ "Kubernetes Cluster not empty. Image pull secret already exists."
+ )
+ else:
+ raise exc
+
+ def init_helm(self):
+ self._create_and_deploy_helm_crb()
+ self._create_and_deploy_helm_service_account()
+ self.helm = Helm(self.kube_config, self.current_context)
+ self.helm.init(HELM_SERVICE_ACCOUNT_NAME)
+
+ def remove_helm(self):
+ self.helm.reset()
+ apps_v1 = client.AppsV1Api()
+ replica_sets = apps_v1.list_namespaced_replica_set("kube-system")
+ for replica_set in replica_sets.items:
+ if re.match(r"tiller-deploy-.*", replica_set.metadata.name):
+ apps_v1.delete_namespaced_replica_set(
+ replica_set.metadata.name,
+ "kube-system",
+ body=client.V1DeleteOptions(),
+ )
+ break
+ core_v1 = client.CoreV1Api()
+ core_v1.delete_namespaced_service_account(
+ HELM_SERVICE_ACCOUNT_NAME,
+ HELM_SERVICE_ACCOUNT_NAMESPACE,
+ body=client.V1DeleteOptions(),
+ )
+ rbac_v1 = client.RbacAuthorizationV1Api()
+ rbac_v1.delete_cluster_role_binding(
+ HELM_SERVICE_ACCOUNT_NAME, body=client.V1DeleteOptions()
+ )
+
+ def install_storage_provisioner(self):
+ self.storage_provisioner.set_helm_connector(self.helm)
+ self.storage_provisioner.deploy()
+
+ def setup(self):
+ self._load_kube_config()
+ self._create_image_pull_secret()
+ self.init_helm()
+ self.install_storage_provisioner()
+
+ def cleanup(self):
+ self.helm.delete_all(exceptions=[self.storage_provisioner.name])
+ self.storage_provisioner.delete()
+ self.remove_helm()
+ core_v1 = client.CoreV1Api()
+ core_v1.delete_namespaced_secret(
+ "image-pull-secret", "default", body=client.V1DeleteOptions()
+ )
+
+
+@pytest.fixture(scope="session")
+def test_cluster(request):
+ kube_config = request.config.getoption("--kubeconfig")
+ infra_provider = request.config.getoption("--infra-provider").lower()
+
+ if infra_provider == "aws":
+ efs_id = request.config.getoption("--efs-id")
+ if not efs_id:
+ raise ArgumentTypeError("No EFS-ID was provided.")
+ efs_region = request.config.getoption("--efs-region")
+ if not efs_region:
+ raise ArgumentTypeError("No EFS-region was provided.")
+ storage_provisioner = EFSProvisioner(efs_id, efs_region)
+
+ registry = {
+ "url": request.config.getoption("--registry"),
+ "user": request.config.getoption("--registry-user"),
+ "pwd": request.config.getoption("--registry-pwd"),
+ }
+ test_cluster = TestCluster(kube_config, storage_provisioner, registry)
+ test_cluster.setup()
+
+ yield test_cluster
+
+ test_cluster.cleanup()
diff --git a/tests/helm-charts/gerrit-master/conftest.py b/tests/helm-charts/gerrit-master/conftest.py
new file mode 100644
index 0000000..19b5abc
--- /dev/null
+++ b/tests/helm-charts/gerrit-master/conftest.py
@@ -0,0 +1,46 @@
+# pylint: disable=W0613
+
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os.path
+
+import pytest
+
+
+@pytest.fixture(scope="module")
+def gerrit_master_deployment(
+ request,
+ repository_root,
+ test_cluster,
+ docker_tag,
+ gerrit_master_image,
+ gitgc_image,
+ gerrit_init_image,
+):
+ chart_path = os.path.join(repository_root, "helm-charts", "gerrit-master")
+ chart_name = "gerrit-master"
+ chart_opts = {
+ "images.registry.name": request.config.getoption("--registry"),
+ "images.version": docker_tag,
+ "gerritMaster.ingress.host": "master.%s"
+ % request.config.getoption("--ingress-url"),
+ }
+ test_cluster.helm.install(
+ chart_path, chart_name, set_values=chart_opts, fail_on_err=True
+ )
+
+ yield
+
+ test_cluster.helm.delete(chart_name)
diff --git a/tests/helm-charts/gerrit-master/test_chart_gerrit_master_setup.py b/tests/helm-charts/gerrit-master/test_chart_gerrit_master_setup.py
new file mode 100644
index 0000000..5a40968
--- /dev/null
+++ b/tests/helm-charts/gerrit-master/test_chart_gerrit_master_setup.py
@@ -0,0 +1,29 @@
+# pylint: disable=W0613
+
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import pytest
+
+
+@pytest.mark.integration
+@pytest.mark.kubernetes
+def test_deployment(test_cluster, gerrit_master_deployment):
+ installed_charts = test_cluster.helm.list()
+ gerrit_master_chart = None
+ for chart in installed_charts:
+ if chart["Name"].startswith("gerrit-master"):
+ gerrit_master_chart = chart
+ assert gerrit_master_chart is not None
+ assert gerrit_master_chart["Status"] == "DEPLOYED"
diff --git a/tests/helpers/helm.py b/tests/helpers/helm.py
new file mode 100644
index 0000000..2c7aadc
--- /dev/null
+++ b/tests/helpers/helm.py
@@ -0,0 +1,189 @@
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import json
+import subprocess
+
+
+class Helm:
+ def __init__(self, kubeconfig, kubecontext):
+ """Wrapper for Helm CLI.
+
+ Arguments:
+ kubeconfig {str} -- Path to kubeconfig-file describing the cluster to
+ connect to.
+ kubecontext {str} -- Name of the context to use.
+ """
+
+ self.kubeconfig = kubeconfig
+ self.kubecontext = kubecontext
+
+ def _exec_command(self, cmd, fail_on_err=True):
+ base_cmd = [
+ "helm",
+ "--kubeconfig",
+ self.kubeconfig,
+ "--kube-context",
+ self.kubecontext,
+ ]
+ return subprocess.run(
+ base_cmd + cmd,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ check=fail_on_err,
+ text=True,
+ )
+
+ def init(self, serviceaccount):
+ """Installs tiller on the cluster.
+
+ Arguments:
+ serviceaccount {str} -- Name of the service account, which tiller is meant
+ to use.
+
+ Returns:
+ CompletedProcess -- CompletedProcess-object returned by subprocess
+ containing details about the result and output of the
+ executed command.
+ """
+
+ helm_cmd = ["init", "--wait", "--service-account", serviceaccount]
+ return self._exec_command(helm_cmd)
+
+ def install(self, chart, name, values_file=None, set_values=None, fail_on_err=True):
+ """Installs a chart on the cluster
+
+ Arguments:
+ chart {str} -- Release name or path of a helm chart
+ name {str} -- Name with which the chart will be installed on the cluster
+
+ Keyword Arguments:
+ values_file {str} -- Path to a custom values.yaml file (default: {None})
+ set_values {dict} -- Dictionary containing key-value-pairs that are used
+ to overwrite values in the values.yaml-file.
+ (default: {None})
+ fail_on_err {bool} -- Whether to fail with an exception if the installation
+ fails (default: {True})
+
+ Returns:
+ CompletedProcess -- CompletedProcess-object returned by subprocess
+ containing details about the result and output of the
+ executed command.
+ """
+
+ helm_cmd = ["install", chart, "--dep-up", "-n", name, "--wait"]
+ if values_file:
+ helm_cmd.extend(("-f", values_file))
+ if set_values:
+ opt_list = ["%s=%s" % (k, v) for k, v in set_values.items()]
+ helm_cmd.extend(("--set", ",".join(opt_list)))
+ return self._exec_command(helm_cmd, fail_on_err)
+
+ def list(self):
+ """Lists helm charts installed on the cluster.
+
+ Returns:
+ list -- List of helm chart realeases installed on the cluster.
+ """
+
+ helm_cmd = ["list", "--all", "--output", "json"]
+ output = self._exec_command(helm_cmd).stdout
+ output = json.loads(output)
+ return output["Releases"]
+
+ def upgrade(
+ self,
+ chart,
+ name,
+ values_file=None,
+ set_values=None,
+ reuse_values=True,
+ fail_on_err=True,
+ ):
+ """Updates a chart on the cluster
+
+ Arguments:
+ chart {str} -- Release name or path of a helm chart
+ name {str} -- Name with which the chart will be installed on the cluster
+
+ Keyword Arguments:
+ values_file {str} -- Path to a custom values.yaml file (default: {None})
+ set_values {dict} -- Dictionary containing key-value-pairs that are used
+ to overwrite values in the values.yaml-file.
+ (default: {None})
+ reuse_values {bool} -- Whether to reuse existing not overwritten values
+ (default: {True})
+ fail_on_err {bool} -- Whether to fail with an exception if the installation
+ fails (default: {True})
+
+ Returns:
+ CompletedProcess -- CompletedProcess-object returned by subprocess
+ containing details about the result and output of the
+ executed command.
+ """
+ helm_cmd = ["upgrade", name, chart, "--wait"]
+ if values_file:
+ helm_cmd.extend(("-f", values_file))
+ if reuse_values:
+ helm_cmd.append("--reuse-values")
+ if set_values:
+ opt_list = ["%s=%s" % (k, v) for k, v in set_values.items()]
+ helm_cmd.extend(("--set", ",".join(opt_list)))
+ return self._exec_command(helm_cmd, fail_on_err)
+
+ def delete(self, name, purge=True):
+ """Deletes a chart from the cluster
+
+ Arguments:
+ name {str} -- Name of the chart to delete
+
+ Keyword Arguments:
+ purge {bool} -- Whether to also remove the release metadata as well
+ (default: {True})
+
+ Returns:
+ CompletedProcess -- CompletedProcess-object returned by subprocess
+ containing details about the result and output of the
+ executed command.
+ """
+
+ helm_cmd = ["delete", name]
+ if purge:
+ helm_cmd.append("--purge")
+ return self._exec_command(helm_cmd)
+
+ def delete_all(self, exceptions=None):
+ """Deletes all charts on the cluster
+
+ Keyword Arguments:
+ exceptions {list} -- List of chart names not to delete (default: {None})
+ """
+
+ charts = self.list()
+ for chart in charts:
+ if chart["Name"] in exceptions:
+ continue
+ self.delete(chart["Name"])
+
+ def reset(self):
+ """Uninstall Tiller from cluster
+
+ Returns:
+ CompletedProcess -- CompletedProcess-object returned by subprocess
+ containing details about the result and output of the
+ executed command.
+ """
+
+ helm_cmd = ["reset", "--force"]
+ return self._exec_command(helm_cmd, fail_on_err=True)