From d87c8a1bb9d5a41869322ebe3b2343281a98bee6 Mon Sep 17 00:00:00 2001 From: Ferenc Gerlits Date: Tue, 10 Feb 2026 09:50:49 +0100 Subject: [PATCH] MINIFICPP-2714 Move Kubernetes to modular docker tests --- .gitignore | 1 + .../containers/container.py | 4 +- .../steps/checking_steps.py | 1 + .../steps/flow_building_steps.py | 1 + .../integration/cluster/ContainerStore.py | 15 +- .../integration/cluster/DockerTestCluster.py | 2 +- .../cluster/DockerTestDirectoryBindings.py | 5 +- .../integration/cluster/KubernetesProxy.py | 161 ----------------- .../MinifiAsPodInKubernetesCluster.py | 73 -------- .../test/integration/features/environment.py | 16 -- .../test/integration/features/steps/steps.py | 27 --- .../KubernetesControllerService.py | 28 --- .../processors/CollectKubernetesPodMetrics.py | 25 --- .../minifi-conf/minifi-log.properties | 3 - .../kubernetes/minifi-conf/minifi.properties | 10 -- .../kubernetes/tests/features/environment.py | 41 +++++ .../tests}/features/kubernetes.feature | 30 ++-- .../features/resources}/daemon.namespace.yml | 0 .../resources}/hello-world-one.helper-pod.yml | 0 .../resources}/hello-world-two.helper-pod.yml | 0 .../metrics-server.dependencies.yml | 0 .../features/resources}/minifi.test-pod.yml | 6 +- .../namespace-reader.clusterrole.yml | 0 .../namespace-reader.clusterrolebinding.yml | 0 .../resources}/pod-reader.clusterrole.yml | 0 .../pod-reader.clusterrolebinding.yml | 0 .../tests/features/steps/kubernetes_proxy.py | 164 ++++++++++++++++++ .../minifi_as_pod_in_kubernetes_cluster.py | 35 ++++ .../kubernetes/tests/features/steps/steps.py | 59 +++++++ 29 files changed, 328 insertions(+), 379 deletions(-) delete mode 100644 docker/test/integration/cluster/KubernetesProxy.py delete mode 100644 docker/test/integration/cluster/containers/MinifiAsPodInKubernetesCluster.py delete mode 100644 docker/test/integration/minifi/controllers/KubernetesControllerService.py delete mode 100644 docker/test/integration/minifi/processors/CollectKubernetesPodMetrics.py delete mode 100644 docker/test/integration/resources/kubernetes/minifi-conf/minifi-log.properties delete mode 100644 docker/test/integration/resources/kubernetes/minifi-conf/minifi.properties create mode 100644 extensions/kubernetes/tests/features/environment.py rename {docker/test/integration => extensions/kubernetes/tests}/features/kubernetes.feature (76%) rename {docker/test/integration/resources/kubernetes/pods-etc => extensions/kubernetes/tests/features/resources}/daemon.namespace.yml (100%) rename {docker/test/integration/resources/kubernetes/pods-etc => extensions/kubernetes/tests/features/resources}/hello-world-one.helper-pod.yml (100%) rename {docker/test/integration/resources/kubernetes/pods-etc => extensions/kubernetes/tests/features/resources}/hello-world-two.helper-pod.yml (100%) rename {docker/test/integration/resources/kubernetes/pods-etc => extensions/kubernetes/tests/features/resources}/metrics-server.dependencies.yml (100%) rename {docker/test/integration/resources/kubernetes/pods-etc => extensions/kubernetes/tests/features/resources}/minifi.test-pod.yml (87%) rename {docker/test/integration/resources/kubernetes/pods-etc => extensions/kubernetes/tests/features/resources}/namespace-reader.clusterrole.yml (100%) rename {docker/test/integration/resources/kubernetes/pods-etc => extensions/kubernetes/tests/features/resources}/namespace-reader.clusterrolebinding.yml (100%) rename {docker/test/integration/resources/kubernetes/pods-etc => extensions/kubernetes/tests/features/resources}/pod-reader.clusterrole.yml (100%) rename {docker/test/integration/resources/kubernetes/pods-etc => extensions/kubernetes/tests/features/resources}/pod-reader.clusterrolebinding.yml (100%) create mode 100644 extensions/kubernetes/tests/features/steps/kubernetes_proxy.py create mode 100644 extensions/kubernetes/tests/features/steps/minifi_as_pod_in_kubernetes_cluster.py create mode 100644 extensions/kubernetes/tests/features/steps/steps.py diff --git a/.gitignore b/.gitignore index d30e23ff66..8900c53b26 100644 --- a/.gitignore +++ b/.gitignore @@ -69,6 +69,7 @@ __pycache__/ /logs packaging/msi/WixWin.wsi docker/behavex_output +behavex_output_modular output .vs/** diff --git a/behave_framework/src/minifi_test_framework/containers/container.py b/behave_framework/src/minifi_test_framework/containers/container.py index 528398e26d..db58af730e 100644 --- a/behave_framework/src/minifi_test_framework/containers/container.py +++ b/behave_framework/src/minifi_test_framework/containers/container.py @@ -201,10 +201,10 @@ def directory_contains_file_with_regex(self, directory_path: str, regex_str: str command = (f"find {safe_dir_path} -maxdepth 1 -type f -print0 | " f"xargs -0 -r grep -l -E -- {safe_regex_str}") - exit_code, output = self.exec_run(f"sh -c \"{command}\"") + exit_code, output = self.exec_run("sh -c {}".format(shlex.quote(command))) if exit_code != 0: - logging.warning(f"directory_contains_file_with_regex {output}") + logging.debug("While looking for regex %s in directory %s, grep returned exit code %d, output: %s", regex_str, directory_path, exit_code, output) return exit_code == 0 def path_with_content_exists(self, path: str, content: str) -> bool: diff --git a/behave_framework/src/minifi_test_framework/steps/checking_steps.py b/behave_framework/src/minifi_test_framework/steps/checking_steps.py index 410eb8a593..450c85bf5d 100644 --- a/behave_framework/src/minifi_test_framework/steps/checking_steps.py +++ b/behave_framework/src/minifi_test_framework/steps/checking_steps.py @@ -144,6 +144,7 @@ def step_impl(context: MinifiTestContext, num: int, directory: str, duration: st @then('at least one file in "{directory}" content match the following regex: "{regex_str}" in less than {duration}') +@then('the content of at least one file in the "{directory}" directory matches the \'{regex_str}\' regex in less than {duration}') def step_impl(context: MinifiTestContext, directory: str, regex_str: str, duration: str): duration_seconds = humanfriendly.parse_timespan(duration) assert wait_for_condition( diff --git a/behave_framework/src/minifi_test_framework/steps/flow_building_steps.py b/behave_framework/src/minifi_test_framework/steps/flow_building_steps.py index 08c6bd04dc..32258a51f0 100644 --- a/behave_framework/src/minifi_test_framework/steps/flow_building_steps.py +++ b/behave_framework/src/minifi_test_framework/steps/flow_building_steps.py @@ -190,6 +190,7 @@ def step_impl(context: MinifiTestContext, processor_name: str, relationship: str @step("{processor_name}'s {relationship} relationship is auto-terminated") +@step("the \"{relationship}\" relationship of the {processor_name} processor is auto-terminated") def step_impl(context: MinifiTestContext, processor_name: str, relationship: str): context.execute_steps(f'given {processor_name}\'s {relationship} relationship is auto-terminated in the "{DEFAULT_MINIFI_CONTAINER_NAME}" flow') diff --git a/docker/test/integration/cluster/ContainerStore.py b/docker/test/integration/cluster/ContainerStore.py index af588fa027..b5e3df7961 100644 --- a/docker/test/integration/cluster/ContainerStore.py +++ b/docker/test/integration/cluster/ContainerStore.py @@ -23,19 +23,17 @@ from .containers.PostgreSQLServerContainer import PostgreSQLServerContainer from .containers.SyslogUdpClientContainer import SyslogUdpClientContainer from .containers.SyslogTcpClientContainer import SyslogTcpClientContainer -from .containers.MinifiAsPodInKubernetesCluster import MinifiAsPodInKubernetesCluster from .FeatureContext import FeatureContext class ContainerStore: - def __init__(self, network, image_store, kubernetes_proxy, feature_id): + def __init__(self, network, image_store, feature_id): self.feature_id = feature_id self.minifi_options = MinifiOptions() self.containers = {} self.data_directories = {} self.network = network self.image_store = image_store - self.kubernetes_proxy = kubernetes_proxy self.nifi_options = NiFiOptions() def get_container_name_with_postfix(self, container_name: str): @@ -91,17 +89,6 @@ def acquire_container(self, context, container_name: str, engine='minifi-cpp', c network=self.network, image_store=self.image_store, command=command)) - elif engine == 'kubernetes': - return self.containers.setdefault(container_name, - MinifiAsPodInKubernetesCluster(feature_context=feature_context, - kubernetes_proxy=self.kubernetes_proxy, - config_dir=self.data_directories["kubernetes_config_dir"], - minifi_options=self.minifi_options, - name=container_name, - vols=self.vols, - network=self.network, - image_store=self.image_store, - command=command)) elif engine == 'http-proxy': return self.containers.setdefault(container_name, HttpProxyContainer(feature_context=feature_context, diff --git a/docker/test/integration/cluster/DockerTestCluster.py b/docker/test/integration/cluster/DockerTestCluster.py index 8c8904cdd8..32d158170f 100644 --- a/docker/test/integration/cluster/DockerTestCluster.py +++ b/docker/test/integration/cluster/DockerTestCluster.py @@ -30,7 +30,7 @@ def __init__(self, context, feature_id): self.segfault = False self.vols = {} self.container_communicator = DockerCommunicator() - self.container_store = ContainerStore(self.container_communicator.create_docker_network(feature_id), context.image_store, context.kubernetes_proxy, feature_id=feature_id) + self.container_store = ContainerStore(self.container_communicator.create_docker_network(feature_id), context.image_store, feature_id=feature_id) self.azure_checker = AzureChecker(self.container_communicator) self.postgres_checker = PostgresChecker(self.container_communicator) self.modbus_checker = ModbusChecker(self.container_communicator) diff --git a/docker/test/integration/cluster/DockerTestDirectoryBindings.py b/docker/test/integration/cluster/DockerTestDirectoryBindings.py index 80e04c0ecc..8a4e58759d 100644 --- a/docker/test/integration/cluster/DockerTestDirectoryBindings.py +++ b/docker/test/integration/cluster/DockerTestDirectoryBindings.py @@ -45,9 +45,7 @@ def create_new_data_directories(self): "resources_dir": "/tmp/.nifi-test-resources." + self.feature_id, "system_certs_dir": "/tmp/.nifi-test-resources." + self.feature_id + "/system_certs_dir", "minifi_config_dir": "/tmp/.nifi-test-minifi-config-dir." + self.feature_id, - "nifi_config_dir": "/tmp/.nifi-test-nifi-config-dir." + self.feature_id, - "kubernetes_temp_dir": "/tmp/.nifi-test-kubernetes-temp-dir." + self.feature_id, - "kubernetes_config_dir": "/tmp/.nifi-test-kubernetes-config-dir." + self.feature_id + "nifi_config_dir": "/tmp/.nifi-test-nifi-config-dir." + self.feature_id } [self.create_directory(directory) for directory in self.data_directories[self.feature_id].values()] @@ -88,7 +86,6 @@ def get_directory_bindings(self): vols[self.data_directories[self.feature_id]["system_certs_dir"]] = {"bind": "/usr/local/share/certs", "mode": "rw"} vols[self.data_directories[self.feature_id]["minifi_config_dir"]] = {"bind": "/tmp/minifi_config", "mode": "rw"} vols[self.data_directories[self.feature_id]["nifi_config_dir"]] = {"bind": "/tmp/nifi_config", "mode": "rw"} - vols[self.data_directories[self.feature_id]["kubernetes_config_dir"]] = {"bind": "/tmp/kubernetes_config", "mode": "rw"} return vols @staticmethod diff --git a/docker/test/integration/cluster/KubernetesProxy.py b/docker/test/integration/cluster/KubernetesProxy.py deleted file mode 100644 index 109c56b7ae..0000000000 --- a/docker/test/integration/cluster/KubernetesProxy.py +++ /dev/null @@ -1,161 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import docker -import glob -import logging -import os -import re -import stat -import subprocess -import time -import platform -from textwrap import dedent - - -class KubernetesProxy: - def __init__(self, temp_directory, resources_directory): - self.temp_directory = temp_directory - self.resources_directory = resources_directory - - self.kind_binary_path = os.path.join(self.temp_directory, 'kind') - self.kind_config_path = os.path.join(self.temp_directory, 'kind-config.yml') - self.__download_kind() - self.docker_client = docker.from_env() - - def cleanup(self): - if os.path.exists(self.kind_binary_path): - subprocess.run([self.kind_binary_path, 'delete', 'cluster']) - - def __download_kind(self): - is_x86 = platform.machine() in ("i386", "AMD64", "x86_64") - download_link = 'https://kind.sigs.k8s.io/dl/v0.18.0/kind-linux-amd64' - if not is_x86: - if 'Linux' in platform.system(): - download_link = 'https://kind.sigs.k8s.io/dl/v0.18.0/kind-linux-arm64' - else: - download_link = 'https://kind.sigs.k8s.io/dl/v0.18.0/kind-darwin-arm64' - - if not os.path.exists(self.kind_binary_path): - if subprocess.run(['curl', '-Lo', self.kind_binary_path, download_link]).returncode != 0: - raise Exception("Could not download kind") - os.chmod(self.kind_binary_path, stat.S_IXUSR) - - def create_config(self, volumes): - kind_config = dedent("""\ - apiVersion: kind.x-k8s.io/v1alpha4 - kind: Cluster - nodes: - - role: control-plane - """) - - if volumes: - kind_config += " extraMounts:\n" - - for host_path, container_path in volumes.items(): - kind_config += " - hostPath: {path}\n".format(path=host_path) - kind_config += " containerPath: {path}\n".format(path=container_path['bind']) - if container_path['mode'] != 'rw': - kind_config += " readOnly: true\n" - - with open(self.kind_config_path, 'wb') as config_file: - config_file.write(kind_config.encode('utf-8')) - - def start_cluster(self): - subprocess.run([self.kind_binary_path, 'delete', 'cluster']) - - if subprocess.run([self.kind_binary_path, 'create', 'cluster', '--config=' + self.kind_config_path]).returncode != 0: - raise Exception("Could not start the kind cluster") - - def load_docker_image(self, image_name, image_tag): - if subprocess.run([self.kind_binary_path, 'load', 'docker-image', image_name + ':' + image_tag]).returncode != 0: - raise Exception("Could not load the %s docker image into the kind cluster" % image_name) - - def create_helper_objects(self): - self.__wait_for_default_service_account('default') - namespaces = self.__create_objects_of_type(self.resources_directory, 'namespace') - for namespace in namespaces: - self.__wait_for_default_service_account(namespace) - - self.__create_objects_of_type(self.resources_directory, 'dependencies') - self.__create_objects_of_type(self.resources_directory, 'helper-pod') - self.__create_objects_of_type(self.resources_directory, 'clusterrole') - self.__create_objects_of_type(self.resources_directory, 'clusterrolebinding') - - self.__wait_for_pod_startup('default', 'hello-world-one') - self.__wait_for_pod_startup('default', 'hello-world-two') - self.__wait_for_pod_startup('kube-system', 'metrics-server') - - def create_minifi_pod(self): - self.__create_objects_of_type(self.resources_directory, 'test-pod') - self.__wait_for_pod_startup('daemon', 'minifi') - - def delete_pods(self): - self.__delete_objects_of_type('test-pod') - self.__delete_objects_of_type('helper-pod') - - def __wait_for_pod_startup(self, namespace, pod_name): - for _ in range(120): - (code, output) = self.docker_client.containers.get('kind-control-plane').exec_run(['kubectl', '-n', namespace, 'get', 'pods']) - if code == 0 and re.search(f'{pod_name}.*Running', output.decode('utf-8')): - return - time.sleep(1) - raise Exception(f"The pod {namespace}:{pod_name} in the Kubernetes cluster failed to start up") - - def __wait_for_default_service_account(self, namespace): - for _ in range(120): - (code, output) = self.docker_client.containers.get('kind-control-plane').exec_run(['kubectl', '-n', namespace, 'get', 'serviceaccount', 'default']) - if code == 0: - return - time.sleep(1) - raise Exception("Default service account for namespace '%s' not found" % namespace) - - def __create_objects_of_type(self, directory, type): - found_objects = [] - for full_file_name in glob.iglob(os.path.join(directory, f'*.{type}.yml')): - file_name = os.path.basename(full_file_name) - file_name_in_container = os.path.join('/var/tmp', file_name) - self.__copy_file_to_container(full_file_name, 'kind-control-plane', file_name_in_container) - - (code, output) = self.docker_client.containers.get('kind-control-plane').exec_run(['kubectl', 'apply', '-f', file_name_in_container]) - if code != 0: - raise Exception("Could not create kubernetes object from file '%s': %s" % full_file_name, output.decode('utf-8')) - - object_name = file_name.replace(f'.{type}.yml', '') - found_objects.append(object_name) - return found_objects - - def __delete_objects_of_type(self, type): - for full_file_name in glob.iglob(os.path.join(self.resources_directory, f'*.{type}.yml')): - file_name = os.path.basename(full_file_name) - file_name_in_container = os.path.join('/var/tmp', file_name) - - (code, output) = self.docker_client.containers.get('kind-control-plane').exec_run(['kubectl', 'delete', '-f', file_name_in_container, '--grace-period=0', '--force']) - if code == 0: - logging.info("Created component from file '%s': %s", full_file_name, output.decode('utf-8')) - else: - raise Exception("Could not delete kubernetes object from file '%s': %s", full_file_name, output.decode('utf-8')) - - def __copy_file_to_container(self, host_file, container_name, container_file): - if subprocess.run(['docker', 'cp', host_file, container_name + ':' + container_file]).returncode != 0: - raise Exception("Could not copy file '%s' into container '%s' as '%s'" % (host_file, container_name, container_file)) - - def get_logs(self, namespace, pod_name): - (code, output) = self.docker_client.containers.get('kind-control-plane').exec_run(['kubectl', '-n', namespace, 'logs', pod_name]) - if code == 0: - return output - else: - return None diff --git a/docker/test/integration/cluster/containers/MinifiAsPodInKubernetesCluster.py b/docker/test/integration/cluster/containers/MinifiAsPodInKubernetesCluster.py deleted file mode 100644 index 4a660906c2..0000000000 --- a/docker/test/integration/cluster/containers/MinifiAsPodInKubernetesCluster.py +++ /dev/null @@ -1,73 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import docker -import logging -import os -import shutil - -from ..LogSource import LogSource -from .MinifiContainer import MinifiContainer - - -class MinifiAsPodInKubernetesCluster(MinifiContainer): - MINIFI_IMAGE_NAME = 'apacheminificpp' - MINIFI_IMAGE_TAG = 'docker_test' - - def __init__(self, feature_context, kubernetes_proxy, config_dir, minifi_options, name, vols, network, image_store, command=None): - test_dir = os.environ['TEST_DIRECTORY'] - shutil.copy(os.path.join(test_dir, 'resources', 'kubernetes', 'minifi-conf', 'minifi.properties'), config_dir) - shutil.copy(os.path.join(test_dir, 'resources', 'kubernetes', 'minifi-conf', 'minifi-log.properties'), config_dir) - super().__init__(feature_context=feature_context, - config_dir=config_dir, - options=minifi_options, - name=name, - vols=vols, - network=network, - image_store=image_store, - command=command) - - self.kubernetes_proxy = kubernetes_proxy - - docker_client = docker.from_env() - minifi_image = docker_client.images.get(MinifiAsPodInKubernetesCluster.MINIFI_IMAGE_NAME + ':' + MinifiContainer.MINIFI_TAG_PREFIX + MinifiContainer.MINIFI_VERSION) - minifi_image.tag(MinifiAsPodInKubernetesCluster.MINIFI_IMAGE_NAME, MinifiAsPodInKubernetesCluster.MINIFI_IMAGE_TAG) - - def _create_container_config_dir(self, config_dir): - return config_dir - - def deploy(self): - if not self.set_deployed(): - return - - logging.info('Setting up container: %s', self.name) - - self._create_config() - self.kubernetes_proxy.create_helper_objects() - self.kubernetes_proxy.load_docker_image(MinifiAsPodInKubernetesCluster.MINIFI_IMAGE_NAME, MinifiAsPodInKubernetesCluster.MINIFI_IMAGE_TAG) - self.kubernetes_proxy.create_minifi_pod() - - logging.info('Finished setting up container: %s', self.name) - - def log_source(self): - return LogSource.FROM_GET_APP_LOG_METHOD - - def get_app_log(self): - return 'OK', self.kubernetes_proxy.get_logs('daemon', 'minifi') - - def cleanup(self): - # cleanup is done through the kubernetes cluster in the environment.py - pass diff --git a/docker/test/integration/features/environment.py b/docker/test/integration/features/environment.py index 91ad1cd882..f5772d6ac9 100644 --- a/docker/test/integration/features/environment.py +++ b/docker/test/integration/features/environment.py @@ -27,7 +27,6 @@ from minifi import * # noqa from cluster.ImageStore import ImageStore # noqa from cluster.DockerTestDirectoryBindings import DockerTestDirectoryBindings # noqa -from cluster.KubernetesProxy import KubernetesProxy # noqa def inject_feature_id(context, step): @@ -60,14 +59,11 @@ def after_scenario(context, scenario): logging.info("Integration test teardown at {time:%H:%M:%S.%f}".format(time=datetime.datetime.now())) context.test.cleanup() context.directory_bindings.cleanup_io() - if context.kubernetes_proxy: - context.kubernetes_proxy.delete_pods() def before_all(context): context.config.setup_logging() context.image_store = ImageStore() - context.kubernetes_proxy = None def before_feature(context, feature): @@ -86,15 +82,3 @@ def before_feature(context, feature): context.directory_bindings.create_cert_files() context.root_ca_cert = context.directory_bindings.root_ca_cert context.root_ca_key = context.directory_bindings.root_ca_key - if "requires.kubernetes.cluster" in feature.tags: - context.kubernetes_proxy = KubernetesProxy( - context.directory_bindings.get_data_directories()["kubernetes_temp_dir"], - os.path.join(os.environ['TEST_DIRECTORY'], 'resources', 'kubernetes', 'pods-etc')) - context.kubernetes_proxy.create_config(context.directory_bindings.get_directory_bindings()) - context.kubernetes_proxy.start_cluster() - - -def after_feature(context, feature): - if "requires.kubernetes.cluster" in feature.tags and context.kubernetes_proxy: - context.kubernetes_proxy.cleanup() - context.kubernetes_proxy = None diff --git a/docker/test/integration/features/steps/steps.py b/docker/test/integration/features/steps/steps.py index 544a52bf9a..205b38769e 100644 --- a/docker/test/integration/features/steps/steps.py +++ b/docker/test/integration/features/steps/steps.py @@ -19,7 +19,6 @@ from minifi.controllers.SSLContextService import SSLContextService from minifi.controllers.ODBCService import ODBCService -from minifi.controllers.KubernetesControllerService import KubernetesControllerService from minifi.controllers.JsonRecordSetWriter import JsonRecordSetWriter from minifi.controllers.JsonTreeReader import JsonTreeReader from minifi.controllers.XMLReader import XMLReader @@ -125,12 +124,6 @@ def step_impl(context, processor_type): __create_processor(context, processor_type, processor_type, None, None, "minifi-cpp-flow") -@given("a {processor_type} processor in a Kubernetes cluster") -@given("a {processor_type} processor in the Kubernetes cluster") -def step_impl(context, processor_type): - __create_processor(context, processor_type, processor_type, None, None, "kubernetes", "kubernetes") - - @given("a set of processors in the \"{minifi_container_name}\" flow") def step_impl(context, minifi_container_name): container = context.test.acquire_container(context=context, name=minifi_container_name) @@ -455,26 +448,6 @@ def step_impl(context): container.add_controller(xml_record_set_writer) -# Kubernetes -def __set_up_the_kubernetes_controller_service(context, processor_name, service_property_name, properties): - kubernetes_controller_service = KubernetesControllerService("Kubernetes Controller Service", properties) - processor = context.test.get_node_by_name(processor_name) - processor.controller_services.append(kubernetes_controller_service) - processor.set_property(service_property_name, kubernetes_controller_service.name) - - -@given("the {processor_name} processor has a {service_property_name} which is a Kubernetes Controller Service") -@given("the {processor_name} processor has an {service_property_name} which is a Kubernetes Controller Service") -def step_impl(context, processor_name, service_property_name): - __set_up_the_kubernetes_controller_service(context, processor_name, service_property_name, {}) - - -@given("the {processor_name} processor has a {service_property_name} which is a Kubernetes Controller Service with the \"{property_name}\" property set to \"{property_value}\"") -@given("the {processor_name} processor has an {service_property_name} which is a Kubernetes Controller Service with the \"{property_name}\" property set to \"{property_value}\"") -def step_impl(context, processor_name, service_property_name, property_name, property_value): - __set_up_the_kubernetes_controller_service(context, processor_name, service_property_name, {property_name: property_value}) - - # azure storage setup @given("an Azure storage server is set up") def step_impl(context): diff --git a/docker/test/integration/minifi/controllers/KubernetesControllerService.py b/docker/test/integration/minifi/controllers/KubernetesControllerService.py deleted file mode 100644 index 5b024d873c..0000000000 --- a/docker/test/integration/minifi/controllers/KubernetesControllerService.py +++ /dev/null @@ -1,28 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -from ..core.ControllerService import ControllerService - - -class KubernetesControllerService(ControllerService): - def __init__(self, name=None, properties=None): - super(KubernetesControllerService, self).__init__(name=name) - - self.service_class = 'KubernetesControllerService' - - if properties is not None: - for key, value in properties.items(): - self.properties[key] = value diff --git a/docker/test/integration/minifi/processors/CollectKubernetesPodMetrics.py b/docker/test/integration/minifi/processors/CollectKubernetesPodMetrics.py deleted file mode 100644 index edfac441e4..0000000000 --- a/docker/test/integration/minifi/processors/CollectKubernetesPodMetrics.py +++ /dev/null @@ -1,25 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -from ..core.Processor import Processor - - -class CollectKubernetesPodMetrics(Processor): - def __init__(self, context, schedule={'scheduling strategy': 'TIMER_DRIVEN', 'scheduling period': '1 second'}): - super(CollectKubernetesPodMetrics, self).__init__( - context=context, - clazz='CollectKubernetesPodMetrics', - schedule=schedule) diff --git a/docker/test/integration/resources/kubernetes/minifi-conf/minifi-log.properties b/docker/test/integration/resources/kubernetes/minifi-conf/minifi-log.properties deleted file mode 100644 index f350eb80ed..0000000000 --- a/docker/test/integration/resources/kubernetes/minifi-conf/minifi-log.properties +++ /dev/null @@ -1,3 +0,0 @@ -spdlog.pattern=[%Y-%m-%d %H:%M:%S.%e] [%n] [%l] %v -appender.stdout=stdout -logger.root=INFO,stdout diff --git a/docker/test/integration/resources/kubernetes/minifi-conf/minifi.properties b/docker/test/integration/resources/kubernetes/minifi-conf/minifi.properties deleted file mode 100644 index 0fdb377b11..0000000000 --- a/docker/test/integration/resources/kubernetes/minifi-conf/minifi.properties +++ /dev/null @@ -1,10 +0,0 @@ -nifi.administrative.yield.duration=30 sec -nifi.bored.yield.duration=100 millis - -nifi.provenance.repository.max.storage.time=1 MIN -nifi.provenance.repository.max.storage.size=1 MB -nifi.provenance.repository.class.name=NoOpRepository -nifi.content.repository.class.name=DatabaseContentRepository - -nifi.c2.root.classes=DeviceInfoNode,AgentInformation,FlowInformation,AssetInformation -nifi.c2.full.heartbeat=false diff --git a/extensions/kubernetes/tests/features/environment.py b/extensions/kubernetes/tests/features/environment.py new file mode 100644 index 0000000000..f25e1dd527 --- /dev/null +++ b/extensions/kubernetes/tests/features/environment.py @@ -0,0 +1,41 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import docker +import os + +from minifi_test_framework.core.hooks import common_after_scenario, common_before_scenario, get_minifi_container_image +from steps.kubernetes_proxy import KubernetesProxy + + +def before_feature(context, feature): + if "rpm" in os.environ['MINIFI_TAG_PREFIX']: + feature.skip("This feature is not yet supported on RPM installed images") + + minifi_image = docker.from_env().images.get(get_minifi_container_image()) + minifi_image.tag("apacheminificpp", "docker_test") + + context.kubernetes_proxy = KubernetesProxy() + context.kubernetes_proxy.delete_cluster() + + +def before_scenario(context, scenario): + common_before_scenario(context, scenario) + context.kubernetes_proxy.create_cluster() + + +def after_scenario(context, scenario): + common_after_scenario(context, scenario) + context.kubernetes_proxy.delete_cluster() diff --git a/docker/test/integration/features/kubernetes.feature b/extensions/kubernetes/tests/features/kubernetes.feature similarity index 76% rename from docker/test/integration/features/kubernetes.feature rename to extensions/kubernetes/tests/features/kubernetes.feature index a88dd417d2..33ccdf2154 100644 --- a/docker/test/integration/features/kubernetes.feature +++ b/extensions/kubernetes/tests/features/kubernetes.feature @@ -13,13 +13,8 @@ # See the License for the specific language governing permissions and # limitations under the License. -@requires.kubernetes.cluster @ENABLE_KUBERNETES -@SKIP_RPM -Feature: TailFile can collect logs from Kubernetes pods - - Background: - Given the content of "/tmp/output" is monitored +Feature: MiNiFi can get logs and metrics from Kubernetes pods Scenario: Collect all logs from the default namespace Given a TailFile processor in a Kubernetes cluster @@ -30,9 +25,12 @@ Feature: TailFile can collect logs from Kubernetes pods And the TailFile processor has an Attribute Provider Service which is a Kubernetes Controller Service And a PutFile processor in the Kubernetes cluster And the "Directory" property of the PutFile processor is set to "/tmp/output" + And the "Conflict Resolution Strategy" property of the PutFile processor is set to "ignore" And the "success" relationship of the TailFile processor is connected to the PutFile + And the "success" relationship of the PutFile processor is auto-terminated When the MiNiFi instance starts up - Then two flowfiles with the contents "Hello World!" and "Hello again, World!" are placed in the monitored directory in less than 30 seconds + Then the content of at least one file in the "/tmp/output" directory matches the '[0-9TZ:.-]+ stdout F Hello World!' regex in less than 30 seconds + And the content of at least one file in the "/tmp/output" directory matches the '[0-9TZ:.-]+ stdout F Hello again, World!' regex in less than 30 seconds Scenario: Collect logs from selected pods Given a TailFile processor in a Kubernetes cluster @@ -43,9 +41,11 @@ Feature: TailFile can collect logs from Kubernetes pods And the TailFile processor has an Attribute Provider Service which is a Kubernetes Controller Service with the "Pod Name Filter" property set to ".*one" And a PutFile processor in the Kubernetes cluster And the "Directory" property of the PutFile processor is set to "/tmp/output" + And the "Conflict Resolution Strategy" property of the PutFile processor is set to "ignore" And the "success" relationship of the TailFile processor is connected to the PutFile + And the "success" relationship of the PutFile processor is auto-terminated When the MiNiFi instance starts up - Then one flowfile with the contents "Hello World!" is placed in the monitored directory in less than 30 seconds + Then the content of at least one file in the "/tmp/output" directory matches the '[0-9TZ:.-]+ stdout F Hello World!' regex in less than 30 seconds Scenario: Collect logs from selected containers Given a TailFile processor in a Kubernetes cluster @@ -56,9 +56,11 @@ Feature: TailFile can collect logs from Kubernetes pods And the TailFile processor has an Attribute Provider Service which is a Kubernetes Controller Service with the "Container Name Filter" property set to "echo-[^o].." And a PutFile processor in the Kubernetes cluster And the "Directory" property of the PutFile processor is set to "/tmp/output" + And the "Conflict Resolution Strategy" property of the PutFile processor is set to "ignore" And the "success" relationship of the TailFile processor is connected to the PutFile + And the "success" relationship of the PutFile processor is auto-terminated When the MiNiFi instance starts up - Then one flowfile with the contents "Hello again, World!" is placed in the monitored directory in less than 30 seconds + Then the content of at least one file in the "/tmp/output" directory matches the '[0-9TZ:.-]+ stdout F Hello again, World!' regex in less than 30 seconds Scenario: Pod name etc are added as flow file attributes Given a TailFile processor in a Kubernetes cluster @@ -69,6 +71,7 @@ Feature: TailFile can collect logs from Kubernetes pods And the TailFile processor has an Attribute Provider Service which is a Kubernetes Controller Service with the "Pod Name Filter" property set to ".*one" And a LogAttribute processor in the Kubernetes cluster And the "success" relationship of the TailFile processor is connected to the LogAttribute + And the "success" relationship of the LogAttribute processor is auto-terminated When the MiNiFi instance starts up Then the Minifi logs contain the following message: "key:kubernetes.namespace value:default" in less than 30 seconds And the Minifi logs contain the following message: "key:kubernetes.pod value:hello-world-one" in less than 1 second @@ -81,8 +84,9 @@ Feature: TailFile can collect logs from Kubernetes pods And a PutFile processor in the Kubernetes cluster And the "Directory" property of the PutFile processor is set to "/tmp/output" And the "success" relationship of the CollectKubernetesPodMetrics processor is connected to the PutFile + And the "success" relationship of the PutFile processor is auto-terminated When the MiNiFi instance starts up - Then at least one flowfile with the content '"kind":"PodMetricsList","apiVersion":"metrics.k8s.io/v1beta1"' is placed in the monitored directory in less than 2 minutes + Then the content of at least one file in the "/tmp/output" directory matches the '"kind":"PodMetricsList","apiVersion":"metrics.k8s.io/v1beta1"' regex in less than 2 minutes Scenario: Collect metrics from selected pods Given a CollectKubernetesPodMetrics processor in a Kubernetes cluster @@ -90,8 +94,9 @@ Feature: TailFile can collect logs from Kubernetes pods And a PutFile processor in the Kubernetes cluster And the "Directory" property of the PutFile processor is set to "/tmp/output" And the "success" relationship of the CollectKubernetesPodMetrics processor is connected to the PutFile + And the "success" relationship of the PutFile processor is auto-terminated When the MiNiFi instance starts up - Then at least one flowfile with the content '"metadata":{"name":"hello-world-one","namespace":"default"' is placed in the monitored directory in less than 2 minutes + Then the content of at least one file in the "/tmp/output" directory matches the '"metadata":{"name":"hello-world-one","namespace":"default"' regex in less than 2 minutes Scenario: Collect metrics from selected containers Given a CollectKubernetesPodMetrics processor in a Kubernetes cluster @@ -99,5 +104,6 @@ Feature: TailFile can collect logs from Kubernetes pods And a PutFile processor in the Kubernetes cluster And the "Directory" property of the PutFile processor is set to "/tmp/output" And the "success" relationship of the CollectKubernetesPodMetrics processor is connected to the PutFile + And the "success" relationship of the PutFile processor is auto-terminated When the MiNiFi instance starts up - Then at least one flowfile with the content '"containers":[{"name":"echo-two","usage":{"cpu":"0","memory":' is placed in the monitored directory in less than 2 minutes + Then the content of at least one file in the "/tmp/output" directory matches the '"containers":\[{"name":"echo-two","usage":{"cpu":"0","memory":' regex in less than 2 minutes diff --git a/docker/test/integration/resources/kubernetes/pods-etc/daemon.namespace.yml b/extensions/kubernetes/tests/features/resources/daemon.namespace.yml similarity index 100% rename from docker/test/integration/resources/kubernetes/pods-etc/daemon.namespace.yml rename to extensions/kubernetes/tests/features/resources/daemon.namespace.yml diff --git a/docker/test/integration/resources/kubernetes/pods-etc/hello-world-one.helper-pod.yml b/extensions/kubernetes/tests/features/resources/hello-world-one.helper-pod.yml similarity index 100% rename from docker/test/integration/resources/kubernetes/pods-etc/hello-world-one.helper-pod.yml rename to extensions/kubernetes/tests/features/resources/hello-world-one.helper-pod.yml diff --git a/docker/test/integration/resources/kubernetes/pods-etc/hello-world-two.helper-pod.yml b/extensions/kubernetes/tests/features/resources/hello-world-two.helper-pod.yml similarity index 100% rename from docker/test/integration/resources/kubernetes/pods-etc/hello-world-two.helper-pod.yml rename to extensions/kubernetes/tests/features/resources/hello-world-two.helper-pod.yml diff --git a/docker/test/integration/resources/kubernetes/pods-etc/metrics-server.dependencies.yml b/extensions/kubernetes/tests/features/resources/metrics-server.dependencies.yml similarity index 100% rename from docker/test/integration/resources/kubernetes/pods-etc/metrics-server.dependencies.yml rename to extensions/kubernetes/tests/features/resources/metrics-server.dependencies.yml diff --git a/docker/test/integration/resources/kubernetes/pods-etc/minifi.test-pod.yml b/extensions/kubernetes/tests/features/resources/minifi.test-pod.yml similarity index 87% rename from docker/test/integration/resources/kubernetes/pods-etc/minifi.test-pod.yml rename to extensions/kubernetes/tests/features/resources/minifi.test-pod.yml index 5a3ea3191d..57cb863d73 100644 --- a/docker/test/integration/resources/kubernetes/pods-etc/minifi.test-pod.yml +++ b/extensions/kubernetes/tests/features/resources/minifi.test-pod.yml @@ -16,7 +16,7 @@ spec: - name: var-log-pods mountPath: /var/log/pods readOnly: true - - name: tmp-minifi-config + - name: minifi-conf mountPath: /opt/minifi/minifi-current/conf readOnly: false - name: tmp-output @@ -25,9 +25,9 @@ spec: - name: var-log-pods hostPath: path: /var/log/pods - - name: tmp-minifi-config + - name: minifi-conf hostPath: - path: /tmp/kubernetes_config + path: /tmp/minifi_conf - name: tmp-output hostPath: path: /tmp/output diff --git a/docker/test/integration/resources/kubernetes/pods-etc/namespace-reader.clusterrole.yml b/extensions/kubernetes/tests/features/resources/namespace-reader.clusterrole.yml similarity index 100% rename from docker/test/integration/resources/kubernetes/pods-etc/namespace-reader.clusterrole.yml rename to extensions/kubernetes/tests/features/resources/namespace-reader.clusterrole.yml diff --git a/docker/test/integration/resources/kubernetes/pods-etc/namespace-reader.clusterrolebinding.yml b/extensions/kubernetes/tests/features/resources/namespace-reader.clusterrolebinding.yml similarity index 100% rename from docker/test/integration/resources/kubernetes/pods-etc/namespace-reader.clusterrolebinding.yml rename to extensions/kubernetes/tests/features/resources/namespace-reader.clusterrolebinding.yml diff --git a/docker/test/integration/resources/kubernetes/pods-etc/pod-reader.clusterrole.yml b/extensions/kubernetes/tests/features/resources/pod-reader.clusterrole.yml similarity index 100% rename from docker/test/integration/resources/kubernetes/pods-etc/pod-reader.clusterrole.yml rename to extensions/kubernetes/tests/features/resources/pod-reader.clusterrole.yml diff --git a/docker/test/integration/resources/kubernetes/pods-etc/pod-reader.clusterrolebinding.yml b/extensions/kubernetes/tests/features/resources/pod-reader.clusterrolebinding.yml similarity index 100% rename from docker/test/integration/resources/kubernetes/pods-etc/pod-reader.clusterrolebinding.yml rename to extensions/kubernetes/tests/features/resources/pod-reader.clusterrolebinding.yml diff --git a/extensions/kubernetes/tests/features/steps/kubernetes_proxy.py b/extensions/kubernetes/tests/features/steps/kubernetes_proxy.py new file mode 100644 index 0000000000..c23a164e35 --- /dev/null +++ b/extensions/kubernetes/tests/features/steps/kubernetes_proxy.py @@ -0,0 +1,164 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import docker +import glob +import os +import platform +import re +import stat +import subprocess +import tempfile +import time +from pathlib import Path +from textwrap import dedent + + +KUBERNETES_CONTAINER_NAME = "kind-control-plane" + + +class KubernetesProxy: + def __init__(self): + self.temp_directory = tempfile.TemporaryDirectory() + + self.resources_directory = Path(__file__).resolve().parent.parent / "resources" + self.minifi_conf_directory = os.path.join(self.temp_directory.name, 'minifi_conf') + os.mkdir(self.minifi_conf_directory) + + self.kind_binary_path = os.path.join(self.temp_directory.name, 'kind') + self.kind_config_path = os.path.join(self.temp_directory.name, 'kind-config.yml') + self.__download_kind() + self.__create_kind_config() + + self.docker_client = docker.from_env() + self.status = "initialized" + + def __download_kind(self): + is_x86 = platform.machine() in ("i386", "AMD64", "x86_64") + download_link = 'https://kind.sigs.k8s.io/dl/v0.31.0/kind-linux-amd64' + if not is_x86: + if 'Linux' in platform.system(): + download_link = 'https://kind.sigs.k8s.io/dl/v0.31.0/kind-linux-arm64' + else: + download_link = 'https://kind.sigs.k8s.io/dl/v0.31.0/kind-darwin-arm64' + + if not os.path.exists(self.kind_binary_path): + if subprocess.run(['curl', '-Lo', self.kind_binary_path, download_link]).returncode != 0: + raise Exception("Could not download kind") + os.chmod(self.kind_binary_path, stat.S_IXUSR) + + def __create_kind_config(self): + kind_config = dedent(f"""\ + apiVersion: kind.x-k8s.io/v1alpha4 + kind: Cluster + nodes: + - role: control-plane + extraMounts: + - hostPath: {self.resources_directory} + containerPath: /var/tmp + readOnly: true + - hostPath: {self.minifi_conf_directory} + containerPath: /tmp/minifi_conf + """) + + with open(self.kind_config_path, 'wb') as config_file: + config_file.write(kind_config.encode('utf-8')) + + def write_minifi_conf_file(self, file_name: str, content: str): + file_path = os.path.join(self.minifi_conf_directory, file_name) + with open(file_path, "w") as file: + file.write(content) + + def create_cluster(self): + if subprocess.run([self.kind_binary_path, 'create', 'cluster', '--config=' + self.kind_config_path]).returncode != 0: + raise Exception("Could not create the kind Kubernetes cluster") + self.status = "running" + + def delete_cluster(self): + if subprocess.run([self.kind_binary_path, 'delete', 'cluster']).returncode != 0: + raise Exception("Could not delete the kind Kubernetes cluster") + self.status = "exited" + + def load_docker_image(self, image_name: str): + if subprocess.run([self.kind_binary_path, 'load', 'docker-image', image_name]).returncode != 0: + raise Exception("Could not load the %s docker image into the kind Kubernetes cluster" % image_name) + + def create_helper_objects(self): + self.__wait_for_default_service_account('default') + namespaces = self.__create_objects_of_type('namespace') + for namespace in namespaces: + self.__wait_for_default_service_account(namespace) + + self.__create_objects_of_type('dependencies') + self.__create_objects_of_type('helper-pod') + self.__create_objects_of_type('clusterrole') + self.__create_objects_of_type('clusterrolebinding') + + self.__wait_for_pod_startup('default', 'hello-world-one') + self.__wait_for_pod_startup('default', 'hello-world-two') + self.__wait_for_pod_startup('kube-system', 'metrics-server') + + def create_minifi_pod(self): + self.__create_objects_of_type('test-pod') + self.__wait_for_pod_startup('daemon', 'minifi') + + def __wait_for_pod_startup(self, namespace: str, pod_name: str): + for i in range(120): + if i > 0: + time.sleep(1) + (code, output) = self.docker_client.containers.get(KUBERNETES_CONTAINER_NAME).exec_run(['kubectl', '-n', namespace, 'get', 'pods']) + if code == 0 and re.search(f'{pod_name}.*Running', output.decode('utf-8')): + return + raise Exception(f"The pod {namespace}:{pod_name} in the Kubernetes cluster failed to start up") + + def __wait_for_default_service_account(self, namespace: str): + for i in range(120): + if i > 0: + time.sleep(1) + (code, output) = self.docker_client.containers.get(KUBERNETES_CONTAINER_NAME).exec_run(['kubectl', '-n', namespace, 'get', 'serviceaccount', 'default']) + if code == 0: + return + raise Exception("Default service account for namespace '%s' not found" % namespace) + + def __create_objects_of_type(self, type: str) -> list[str]: + found_objects = [] + for full_file_name in glob.iglob(os.path.join(self.resources_directory, f'*.{type}.yml')): + file_name = os.path.basename(full_file_name) + file_name_in_container = os.path.join('/var/tmp', file_name) + + (code, output) = self.docker_client.containers.get(KUBERNETES_CONTAINER_NAME).exec_run(['kubectl', 'apply', '-f', file_name_in_container]) + if code != 0: + raise Exception("Could not create kubernetes object from file '%s': %s" % full_file_name, output.decode('utf-8')) + + object_name = file_name.replace(f'.{type}.yml', '') + found_objects.append(object_name) + + return found_objects + + def reload(self): + pass + + def exec_run(self, command: str) -> tuple[int | None, bytes]: + container = self.docker_client.containers.get(KUBERNETES_CONTAINER_NAME) + if container: + return container.exec_run(command) + return None, b"The kind Kubernetes cluster is not running." + + def logs(self) -> bytes: + (code, output) = self.docker_client.containers.get(KUBERNETES_CONTAINER_NAME).exec_run(['kubectl', '-n', 'daemon', 'logs', 'minifi']) + if code == 0: + return output + else: + raise Exception("Could not get logs from the kind Kubernetes cluster, error %d: %s", code, output.decode('utf-8')) diff --git a/extensions/kubernetes/tests/features/steps/minifi_as_pod_in_kubernetes_cluster.py b/extensions/kubernetes/tests/features/steps/minifi_as_pod_in_kubernetes_cluster.py new file mode 100644 index 0000000000..934c46ba4a --- /dev/null +++ b/extensions/kubernetes/tests/features/steps/minifi_as_pod_in_kubernetes_cluster.py @@ -0,0 +1,35 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging + +from minifi_test_framework.containers.minifi_container import MinifiContainer +from minifi_test_framework.core.minifi_test_context import MinifiTestContext + + +class MinifiAsPodInKubernetesCluster(MinifiContainer): + def __init__(self, container_name: str, test_context: MinifiTestContext): + super().__init__(container_name, test_context) + self.container = test_context.kubernetes_proxy + + def deploy(self) -> bool: + logging.debug('Setting up the kind Kubernetes cluster') + self.container.write_minifi_conf_file("minifi.properties", self._get_properties_file_content()) + self.container.write_minifi_conf_file("minifi-log.properties", self._get_log_properties_file_content()) + self.container.write_minifi_conf_file("config.yml", self.flow_definition.to_yaml()) + self.container.create_helper_objects() + self.container.load_docker_image("apacheminificpp:docker_test") + self.container.create_minifi_pod() + return True diff --git a/extensions/kubernetes/tests/features/steps/steps.py b/extensions/kubernetes/tests/features/steps/steps.py new file mode 100644 index 0000000000..1c88e5f69f --- /dev/null +++ b/extensions/kubernetes/tests/features/steps/steps.py @@ -0,0 +1,59 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from behave import given + +from minifi_test_framework.steps import checking_steps # noqa: F401 +from minifi_test_framework.steps import configuration_steps # noqa: F401 +from minifi_test_framework.steps import core_steps # noqa: F401 +from minifi_test_framework.steps import flow_building_steps # noqa: F401 + +from minifi_test_framework.core.minifi_test_context import DEFAULT_MINIFI_CONTAINER_NAME, MinifiTestContext +from minifi_test_framework.minifi.processor import Processor +from minifi_test_framework.minifi.controller_service import ControllerService +from minifi_as_pod_in_kubernetes_cluster import MinifiAsPodInKubernetesCluster + + +def __ensure_kubernetes_cluster(context: MinifiTestContext): + if DEFAULT_MINIFI_CONTAINER_NAME not in context.containers or not isinstance(context.containers[DEFAULT_MINIFI_CONTAINER_NAME], MinifiAsPodInKubernetesCluster): + context.containers[DEFAULT_MINIFI_CONTAINER_NAME] = MinifiAsPodInKubernetesCluster("kubernetes", context) + + +@given("a {processor_type} processor in a Kubernetes cluster") +@given("a {processor_type} processor in the Kubernetes cluster") +def step_impl(context: MinifiTestContext, processor_type: str): + __ensure_kubernetes_cluster(context) + processor = Processor(class_name=processor_type, proc_name=processor_type) + context.get_or_create_default_minifi_container().flow_definition.add_processor(processor) + + +def __set_up_the_kubernetes_controller_service(context: MinifiTestContext, processor_name: str, service_property_name: str, properties: dict[str, str]): + kubernetes_controller_service = ControllerService(class_name="KubernetesControllerService", service_name="Kubernetes Controller Service") + kubernetes_controller_service.properties = properties + flow = context.get_or_create_default_minifi_container().flow_definition + flow.controller_services.append(kubernetes_controller_service) + flow.get_processor(processor_name).add_property(service_property_name, kubernetes_controller_service.name) + + +@given("the {processor_name} processor has a {service_property_name} which is a Kubernetes Controller Service") +@given("the {processor_name} processor has an {service_property_name} which is a Kubernetes Controller Service") +def step_impl(context: MinifiTestContext, processor_name: str, service_property_name: str): + __set_up_the_kubernetes_controller_service(context, processor_name, service_property_name, {}) + + +@given("the {processor_name} processor has a {service_property_name} which is a Kubernetes Controller Service with the \"{property_name}\" property set to \"{property_value}\"") +@given("the {processor_name} processor has an {service_property_name} which is a Kubernetes Controller Service with the \"{property_name}\" property set to \"{property_value}\"") +def step_impl(context: MinifiTestContext, processor_name: str, service_property_name: str, property_name: str, property_value: str): + __set_up_the_kubernetes_controller_service(context, processor_name, service_property_name, {property_name: property_value})