diff --git a/CHANGELOG.md b/CHANGELOG.md index 2f0eb81..daecb08 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -14,6 +14,17 @@ All notable changes to this project will be documented in this file. - Enable the [restart-controller](https://docs.stackable.tech/home/nightly/commons-operator/restarter/), so that the Pods are automatically restarted on config changes ([#97]). - Configure OpenSearch to publish the fully-qualified domain names of the nodes instead of the IP addresses, so that TLS certificates can be verified ([#100]). +- Add service discovery and exposition ([#94]): + - Service to set up the cluster renamed to `-seed-nodes`. + - Discovery service named ``, added. + The discovery service is used to populate the discovery ConfigMap. + - Discovery ConfigMap named ``, added. + The ConfigMap contains the keys `OPENSEARCH_HOSTNAME`, `OPENSEARCH_PORT`, `OPENSEARCH_PROTOCOL` + and `OPENSEARCH_HOSTS`. Users should use this information to connect to the cluster. + - Configuration parameter `spec.nodes.roleConfig.discoveryServiceListenerClass` added to set the + ListenerClass for the discovery service. + - Configuration parameter `spec.nodes.roleGroups..config.discoveryServiceExposed` + added to expose a role-group via the discovery service. ### Changed @@ -24,6 +35,7 @@ All notable changes to this project will be documented in this file. [#76]: https://github.com/stackabletech/opensearch-operator/pull/76 [#91]: https://github.com/stackabletech/opensearch-operator/pull/91 [#93]: https://github.com/stackabletech/opensearch-operator/pull/93 +[#94]: https://github.com/stackabletech/opensearch-operator/pull/94 [#97]: https://github.com/stackabletech/opensearch-operator/pull/97 [#100]: https://github.com/stackabletech/opensearch-operator/pull/100 diff --git a/deploy/helm/opensearch-operator/crds/crds.yaml b/deploy/helm/opensearch-operator/crds/crds.yaml index 76ed611..9937558 100644 --- a/deploy/helm/opensearch-operator/crds/crds.yaml +++ b/deploy/helm/opensearch-operator/crds/crds.yaml @@ -246,6 +246,10 @@ spec: type: object x-kubernetes-preserve-unknown-fields: true type: object + discoveryServiceExposed: + description: Determines whether this role group is exposed in the discovery service. + nullable: true + type: boolean gracefulShutdownTimeout: description: |- Time period Pods have to gracefully shut down, e.g. `30m`, `1h` or `2d`. Consult the @@ -517,11 +521,19 @@ spec: x-kubernetes-preserve-unknown-fields: true roleConfig: default: + discoveryServiceListenerClass: cluster-internal podDisruptionBudget: enabled: true maxUnavailable: null description: This is a product-agnostic RoleConfig, which is sufficient for most of the products. properties: + discoveryServiceListenerClass: + default: cluster-internal + description: The [ListenerClass](https://docs.stackable.tech/home/nightly/listener-operator/listenerclass.html) that is used for the discovery service. + maxLength: 253 + minLength: 1 + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ + type: string podDisruptionBudget: default: enabled: true @@ -600,6 +612,10 @@ spec: type: object x-kubernetes-preserve-unknown-fields: true type: object + discoveryServiceExposed: + description: Determines whether this role group is exposed in the discovery service. + nullable: true + type: boolean gracefulShutdownTimeout: description: |- Time period Pods have to gracefully shut down, e.g. `30m`, `1h` or `2d`. Consult the diff --git a/docs/modules/opensearch/examples/getting_started/getting_started.sh b/docs/modules/opensearch/examples/getting_started/getting_started.sh index a6ada9f..ae98ed8 100755 --- a/docs/modules/opensearch/examples/getting_started/getting_started.sh +++ b/docs/modules/opensearch/examples/getting_started/getting_started.sh @@ -75,25 +75,21 @@ kubectl rollout status --watch statefulset/simple-opensearch-nodes-default --tim # wait a bit for the port to open sleep 10 -echo "Starting port-forwarding of port 9200" -# tag::opensearch-port-forwarding[] -kubectl port-forward services/simple-opensearch 9200 > /dev/null 2>&1 & -# end::opensearch-port-forwarding[] -PORT_FORWARD_PID=$! -# shellcheck disable=2064 # we want the PID evaluated now, not at the time the trap is -trap "kill $PORT_FORWARD_PID" EXIT -sleep 5 - echo "Using the REST API" # tag::rest-api[] export CREDENTIALS=admin:AJVFsGJBbpT6mChn +OPENSEARCH_HOST=$( + kubectl get configmap simple-opensearch \ + --output=jsonpath='{.data.OPENSEARCH_HOSTS}' +) + curl \ --insecure \ --user $CREDENTIALS \ --request PUT \ --json '{"name": "Stackable"}' \ - https://localhost:9200/sample_index/_doc/1 + "$OPENSEARCH_HOST/sample_index/_doc/1" # Output: # {"_index":"sample_index","_id":"1","_version":1,"result":"created","_shards":{"total":2,"successful":1,"failed":0},"_seq_no":0,"_primary_term":1} @@ -102,7 +98,7 @@ curl \ --insecure \ --user $CREDENTIALS \ --request GET \ - https://localhost:9200/sample_index/_doc/1 + "$OPENSEARCH_HOST/sample_index/_doc/1" # Output: # {"_index":"sample_index","_id":"1","_version":1,"_seq_no":0,"_primary_term":1,"found":true,"_source":{"name": "Stackable"}} diff --git a/docs/modules/opensearch/examples/getting_started/getting_started.sh.j2 b/docs/modules/opensearch/examples/getting_started/getting_started.sh.j2 index ec622c9..0705511 100755 --- a/docs/modules/opensearch/examples/getting_started/getting_started.sh.j2 +++ b/docs/modules/opensearch/examples/getting_started/getting_started.sh.j2 @@ -75,25 +75,21 @@ kubectl rollout status --watch statefulset/simple-opensearch-nodes-default --tim # wait a bit for the port to open sleep 10 -echo "Starting port-forwarding of port 9200" -# tag::opensearch-port-forwarding[] -kubectl port-forward services/simple-opensearch 9200 > /dev/null 2>&1 & -# end::opensearch-port-forwarding[] -PORT_FORWARD_PID=$! -# shellcheck disable=2064 # we want the PID evaluated now, not at the time the trap is -trap "kill $PORT_FORWARD_PID" EXIT -sleep 5 - echo "Using the REST API" # tag::rest-api[] export CREDENTIALS=admin:AJVFsGJBbpT6mChn +OPENSEARCH_HOST=$( + kubectl get configmap simple-opensearch \ + --output=jsonpath='{.data.OPENSEARCH_HOSTS}' +) + curl \ --insecure \ --user $CREDENTIALS \ --request PUT \ --json '{"name": "Stackable"}' \ - https://localhost:9200/sample_index/_doc/1 + "$OPENSEARCH_HOST/sample_index/_doc/1" # Output: # {"_index":"sample_index","_id":"1","_version":1,"result":"created","_shards":{"total":2,"successful":1,"failed":0},"_seq_no":0,"_primary_term":1} @@ -102,7 +98,7 @@ curl \ --insecure \ --user $CREDENTIALS \ --request GET \ - https://localhost:9200/sample_index/_doc/1 + "$OPENSEARCH_HOST/sample_index/_doc/1" # Output: # {"_index":"sample_index","_id":"1","_version":1,"_seq_no":0,"_primary_term":1,"found":true,"_source":{"name": "Stackable"}} diff --git a/docs/modules/opensearch/examples/getting_started/opensearch-dashboards-values.yaml b/docs/modules/opensearch/examples/getting_started/opensearch-dashboards-values.yaml index 9e30d3e..c311e76 100644 --- a/docs/modules/opensearch/examples/getting_started/opensearch-dashboards-values.yaml +++ b/docs/modules/opensearch/examples/getting_started/opensearch-dashboards-values.yaml @@ -1,5 +1,4 @@ --- -opensearchHosts: https://simple-opensearch-nodes-default.default.svc.cluster.local:9200 image: repository: oci.stackable.tech/sdp/opensearch-dashboards tag: 3.1.0-stackable0.0.0-dev @@ -23,6 +22,11 @@ config: cookie: secure: true extraEnvs: + - name: OPENSEARCH_HOSTS + valueFrom: + configMapKeyRef: + name: simple-opensearch + key: OPENSEARCH_HOSTS - name: OPENSEARCH_PASSWORD valueFrom: secretKeyRef: diff --git a/docs/modules/opensearch/examples/getting_started/opensearch-dashboards-values.yaml.j2 b/docs/modules/opensearch/examples/getting_started/opensearch-dashboards-values.yaml.j2 index 6ba71f5..aa62bb8 100644 --- a/docs/modules/opensearch/examples/getting_started/opensearch-dashboards-values.yaml.j2 +++ b/docs/modules/opensearch/examples/getting_started/opensearch-dashboards-values.yaml.j2 @@ -1,5 +1,4 @@ --- -opensearchHosts: https://simple-opensearch-nodes-default.default.svc.cluster.local:9200 image: repository: oci.stackable.tech/sdp/opensearch-dashboards tag: 3.1.0-stackable{{ versions.opensearch }} @@ -23,6 +22,11 @@ config: cookie: secure: true extraEnvs: + - name: OPENSEARCH_HOSTS + valueFrom: + configMapKeyRef: + name: simple-opensearch + key: OPENSEARCH_HOSTS - name: OPENSEARCH_PASSWORD valueFrom: secretKeyRef: diff --git a/docs/modules/opensearch/examples/getting_started/opensearch.yaml b/docs/modules/opensearch/examples/getting_started/opensearch.yaml index 6def257..d5c0d84 100644 --- a/docs/modules/opensearch/examples/getting_started/opensearch.yaml +++ b/docs/modules/opensearch/examples/getting_started/opensearch.yaml @@ -7,6 +7,8 @@ spec: image: productVersion: 3.1.0 nodes: + roleConfig: + discoveryServiceListenerClass: external-stable roleGroups: default: replicas: 3 diff --git a/docs/modules/opensearch/images/opensearch_overview.drawio.svg b/docs/modules/opensearch/images/opensearch_overview.drawio.svg index 90da284..70e771f 100644 --- a/docs/modules/opensearch/images/opensearch_overview.drawio.svg +++ b/docs/modules/opensearch/images/opensearch_overview.drawio.svg @@ -1,4 +1,4 @@ -
Pod
<name>-nodes-<rg1>-1
Pod...
OpenSearch
Operator
OpenSearch...
StatefulSet
<name>-nodes-<rg1>
StatefulSet...
Service
<name>-nodes-<rg1>-headless
Service...
Pod
<name>-nodes-<rg1>-0
Pod...
ConfigMap
<name>-nodes-<rg1>
ConfigMap...
OpenSearchCluster
<name>
OpenSearchCluster...
create
create
read
read
Legend
Legend
Operator
Operator
Resource
Resource
Custom
Resource
Custom...
role group
<rg1>
role group...
StatefulSet
<name>-nodes-<rg2>
StatefulSet...
Service
<name>-nodes-<rg2>-headless
Service...
ConfigMap
<name>-nodes-<rg2>
ConfigMap...
Service
<name>
Service...
role
nodes
role...
references
references
role group
<rg2>
role group...
Pod
<name>-nodes-<rg2>-0
Pod...
Text is not SVG - cannot display
+
Pod
<name>-nodes-<rg1>-1
Pod...
OpenSearch
Operator
OpenSearch...
StatefulSet
<name>-nodes-<rg1>
StatefulSet...
Service
<name>-nodes-<rg1>-headless
Service...
Pod
<name>-nodes-<rg1>-0
Pod...
ConfigMap
<name>-nodes-<rg1>
ConfigMap...
OpenSearchCluster
<name>
OpenSearchCluster...
create
create
read
read
Legend
Legend
Operator
Operator
Resource
Resource
Custom
Resource
Custom...
role group
<rg1>
role group...
role
nodes
role...
references
references
Listener
<name>-nodes-<rg1>
Listener...
PersistentVolumes
PersistentVolumes
PersistentVolumeClaims
PersistentVolumeClaims
Listener
<name>
Listener...
Service
<name>
Service...
Service
<name>-seed-nodes
Service...
ConfigMap
<name>
ConfigMap...
ServiceAccount
<name>
ServiceAccount...
RoleBinding
<name>-rolebinding
RoleBinding...
PodDisruptionBudget
<name>-nodes
PodDisruptionBudget...
Service
<name>-nodes-<rg1>
Service...
Text is not SVG - cannot display
diff --git a/docs/modules/opensearch/pages/getting_started/first_steps.adoc b/docs/modules/opensearch/pages/getting_started/first_steps.adoc index 7e4ec55..0119d9a 100644 --- a/docs/modules/opensearch/pages/getting_started/first_steps.adoc +++ b/docs/modules/opensearch/pages/getting_started/first_steps.adoc @@ -58,17 +58,6 @@ You can do so with this command: include::example$getting_started/getting_started.sh[tag=await-cluster] ---- -== Connecting to the HTTP endpoint - -Once the OpenSearch nodes are created, you can use the REST API of OpenSearch. - -To forward the HTTP port (`9200`) to localhost, run: - -[source,bash] ----- -include::example$getting_started/getting_started.sh[tag=opensearch-port-forwarding] ----- - == Using the REST API You can use the REST API as follows: diff --git a/docs/modules/opensearch/pages/index.adoc b/docs/modules/opensearch/pages/index.adoc index ae7011a..8292090 100644 --- a/docs/modules/opensearch/pages/index.adoc +++ b/docs/modules/opensearch/pages/index.adoc @@ -41,17 +41,18 @@ It helps you tune your cluster to your needs by configuring xref:usage-guide/sto === Kubernetes resources -Based on the custom resources you define, the operator creates ConfigMaps, StatefulSets and Services. +Based on the custom resources you define, the operator creates ConfigMaps, StatefulSets, Services and so on. image::opensearch_overview.drawio.svg[A diagram depicting the Kubernetes resources created by the operator] The diagram above depicts all the Kubernetes resources created by the operator, and how they relate to each other. -For every xref:concepts:roles-and-role-groups.adoc#role-groups[role group] you define, the operator creates a StatefulSet with the amount of replicas defined in the role group. -For every role group, a Service is created, as well as one for the whole cluster that references the cluster manager nodes. +What should be highlighted, is the xref:reference/discovery.adoc[discovery ConfigMap] which is named the same as the OpenSearchCluster. +It references the Service that should be used to connect to the cluster. -Additionally, a ConfigMap is created for each role group. -These ConfigMaps contain configuration files like `opensearch.yml`. +For every xref:concepts:roles-and-role-groups.adoc#role-groups[role group] you define, the operator deploys OpenSearch as a StatefulSet with the amount of replicas defined in the role group. +The pods of a StatefulSet use the configuration from the role group ConfigMap, i.e. they all use the same OpenSearch node roles, e.g. `cluster-manager` or `data`, and xref:opensearch:usage-guide/storage-resource-configuration.adoc[resource configurations]. +If you want dedicated `cluster-manager` and `data` nodes, you just define two role groups with according configurations as described in xref:opensearch:usage-guide/node-roles.adoc[]. == Supported versions diff --git a/docs/modules/opensearch/pages/reference/discovery.adoc b/docs/modules/opensearch/pages/reference/discovery.adoc new file mode 100644 index 0000000..ad57c0f --- /dev/null +++ b/docs/modules/opensearch/pages/reference/discovery.adoc @@ -0,0 +1,100 @@ +:clusterName: simple-opensearch +:namespace: stackable +:exampleDiscoveryServiceListenerClass: external-unstable +:exampleOpensearchProtocol: https +:exampleOpensearchIp: 10.104.213.49 +:exampleOpensearchPort: 31315 + += Discovery +:page-aliases: discovery.adoc + +The Stackable Operator for OpenSearch publishes a xref:concepts:service_discovery.adoc[service discovery ConfigMap] which exposes a client configuration bundle that allows access to the OpenSearch cluster. + +The bundle includes the connection parameters to access the OpenSearch cluster. +These parameters may be used by other operators or tools to configure their products with access to OpenSearch. + +== Example + +Given the following OpenSearch cluster: + +[source,yaml,subs="normal,callouts"] +---- +apiVersion: opensearch.stackable.tech/v1alpha1 +kind: OpenSearchCluster +metadata: + name: {clusterName} # <1> + namespace: {namespace} # <2> +spec: + clusterConfig: + tls: + serverSecretClass: tls # <3> + nodes: + roleConfig: + discoveryServiceListenerClass: {exampleDiscoveryServiceListenerClass} # <4> + roleGroups: + cluster-manager: + config: + discoveryServiceExposed: true # <5> + nodeRoles: + - cluster_manager + data: + config: + discoveryServiceExposed: false # <6> + nodeRoles: + - ingest + - data + - remote_cluster_client +---- +<1> The name of the OpenSearch cluster which is also the name of the created discovery ConfigMap. +<2> The namespace of the cluster and the discovery ConfigMap. +<3> Whether a `serverSecretClass` is set or not, determines the value of the `OPENSEARCH_PROTOCOL` key in the discovery ConfigMap. +<4> The xref:listener-operator:listenerclass.adoc[ListenerClass] that is used for the discovery service. +<5> The `cluster-manager` role group is exposed in the discovery service. +<6> The `data` role group is not exposed in the discovery service. + +The resulting discovery ConfigMap is `{namespace}/{clusterName}`. + +== Contents + +The `{namespace}/{clusterName}` discovery ConfigMap contains the following fields where `{clusterName}` represents the name and `{namespace}` the namespace of the cluster: + +`OPENSEARCH_HOSTNAME`:: +==== +Contains the hostname or IP of the service that references the exposed role groups. + +In case, `discoveryServiceListenerClass` was set to `cluster-internal`, the following hostname will be set: + +[subs="normal"] + {clusterName}.{namespace}.svc.cluster.local + +If `discoveryServiceListenerClass` was set to `{exampleDiscoveryServiceListenerClass}`, the content could look like: + +[subs="normal"] + {exampleOpensearchIp} +==== + +`OPENSEARCH_PORT`:: +==== +Contains the port of the service that references the exposed role groups. + +Depending on the `discoveryServiceListenerClass`, the port will be either the default HTTP port 9200 or a NodePort: + +[subs="normal"] + {exampleOpensearchPort} +==== + +`OPENSEARCH_PROTOCOL`:: +==== +Contains either `http` or `https`, depending on whether a `serverSecretClass` is configured: + +[subs="normal"] + {exampleOpensearchProtocol} +==== + +`OPENSEARCH_HOSTS`:: +==== +Contains the URL of the service that references the exposed role groups. + +[subs="normal"] + {exampleOpensearchProtocol}://{exampleOpensearchIp}:{exampleOpensearchPort} +==== diff --git a/docs/modules/opensearch/pages/reference/index.adoc b/docs/modules/opensearch/pages/reference/index.adoc index 3cfa028..8484d99 100644 --- a/docs/modules/opensearch/pages/reference/index.adoc +++ b/docs/modules/opensearch/pages/reference/index.adoc @@ -3,4 +3,5 @@ Consult the reference documentation section to find exhaustive information on: * Descriptions and default values of all properties in the CRDs used by this operator in the xref:reference/crds.adoc[]. +* The properties in the xref:reference/discovery.adoc[Discovery ConfigMap]. * The xref:reference/commandline-parameters.adoc[] and xref:reference/environment-variables.adoc[] accepted by the operator. diff --git a/docs/modules/opensearch/pages/usage-guide/listenerclass.adoc b/docs/modules/opensearch/pages/usage-guide/listenerclass.adoc index 0d4312f..fb0297d 100644 --- a/docs/modules/opensearch/pages/usage-guide/listenerclass.adoc +++ b/docs/modules/opensearch/pages/usage-guide/listenerclass.adoc @@ -1,7 +1,12 @@ = Service exposition with ListenerClasses :description: Configure OpenSearch service exposure with ListenerClasses: cluster-internal, external-unstable, or external-stable. -The operator deploys a xref:listener-operator:listener.adoc[Listener] for OpenSearch role-groups. +[NOTE] +==== +While the listeners described here provide access to individual role groups, the xref:opensearch:reference/discovery.adoc[Discovery ConfigMap] is the recommended approach for general access to the OpenSearch cluster. +==== + +The operator deploys a xref:listener-operator:listener.adoc[Listener] for each OpenSearch role group. The listener defaults to only being accessible from within the Kubernetes cluster, but this can be changed by setting `.spec.nodes.roleGroups.\{role-group-name}.config.listenerClass`: [source,yaml] @@ -13,5 +18,5 @@ spec: config: listenerClass: external-stable # <1> ---- -<1> Specify a ListenerClass, such as `external-stable`, `external-unstable`, or `cluster-internal` (the default setting is `cluster-internal`) at role-group level. -This can be set for all role-groups individually. +<1> Specify a ListenerClass, such as `external-stable`, `external-unstable`, or `cluster-internal` (the default setting is `cluster-internal`) at role group level. +This can be set for all role groups individually. diff --git a/docs/modules/opensearch/pages/usage-guide/opensearch-dashboards.adoc b/docs/modules/opensearch/pages/usage-guide/opensearch-dashboards.adoc index 7031f89..784502f 100644 --- a/docs/modules/opensearch/pages/usage-guide/opensearch-dashboards.adoc +++ b/docs/modules/opensearch/pages/usage-guide/opensearch-dashboards.adoc @@ -10,7 +10,7 @@ A basic `values.yaml` file to deploy OpenSearch Dashboards with this chart might [source,yaml] ---- -opensearchHosts: https://opensearch-nodes-default..svc.cluster.local:9200 # <1> +opensearchHosts: null # <1> image: # <2> repository: oci.stackable.tech/sdp/opensearch-dashboards tag: 3.1.0-stackable0.0.0-dev @@ -34,13 +34,18 @@ config: cookie: secure: true # <10> extraEnvs: + - name: OPENSEARCH_HOSTS + valueFrom: + configMapKeyRef: + name: opensearch # <11> + key: OPENSEARCH_HOSTS - name: OPENSEARCH_PASSWORD valueFrom: secretKeyRef: name: opensearch-credentials - key: kibanaserver # <11> + key: kibanaserver # <12> extraVolumes: - - name: tls # <12> + - name: tls # <13> ephemeral: volumeClaimTemplate: metadata: @@ -58,13 +63,12 @@ extraVolumeMounts: - mountPath: /stackable/opensearch-dashboards/config/tls name: tls - mountPath: /stackable/opensearch-dashboards/config/opensearch_dashboards.yml - name: config # <13> + name: config # <14> subPath: opensearch_dashboards.yml podSecurityContext: - fsGroup: 1000 # <14> + fsGroup: 1000 # <15> ---- -<1> Address of the OpenSearch Service deployed by the operator; - This address must be adapted according to your deployment. +<1> The address of the OpenSearch cluster is provided in `extraEnvs` in the `OPENSEARCH_HOSTS` variable. <2> Use the OCI image provided by the Stackable Data Platform <3> If running on OpenShift, use the ServiceAccount of OpenSearch because its permissions are already configured to work on OpenShift. This ServiceAccount name must probably adapted according to your deployment. @@ -75,10 +79,12 @@ podSecurityContext: <8> OpenSearch Dashboards verifies the certificate of OpenSearch. This is disabled by default. <9> The CA certificate which is used to verify the OpenSearch certificate <10> Ensure that cookies are not sent via an insecure connection. -<11> The password for the `kibanaserver` user -<12> This example uses the secret operator to provide a TLS certificate. -<13> The Helm chart only adds a volume mount at `/usr/share/opensearch-dashboards/config`, but in the image provided by Stackable, OpenSearch Dashboards is located in `/stackable/opensearch-dashboards`. -<14> Mount the volumes with the `stackable` group so that the files are accessible by OpenSearch Dashboards. +<11> Address of the OpenSearch Service taken from the xref:opensearch:reference/discovery.adoc[discovery ConfigMap] + The name of the discovery ConfigMap corresponds to the name of the OpenSearch cluster and must be adapted according to your deployment. +<12> The password for the `kibanaserver` user +<13> This example uses the secret operator to provide a TLS certificate. +<14> The Helm chart only adds a volume mount at `/usr/share/opensearch-dashboards/config`, but in the image provided by Stackable, OpenSearch Dashboards is located in `/stackable/opensearch-dashboards`. +<15> Mount the volumes with the `stackable` group so that the files are accessible by OpenSearch Dashboards. After the values are adjusted according to your deployment, especially `opensearchHosts` and `serviceAccount.name`, you can deploy the Helm chart as follows: diff --git a/docs/modules/opensearch/pages/usage-guide/opensearch-dashboards.adoc.j2 b/docs/modules/opensearch/pages/usage-guide/opensearch-dashboards.adoc.j2 index 5f333ae..b5d25bd 100644 --- a/docs/modules/opensearch/pages/usage-guide/opensearch-dashboards.adoc.j2 +++ b/docs/modules/opensearch/pages/usage-guide/opensearch-dashboards.adoc.j2 @@ -10,7 +10,7 @@ A basic `values.yaml` file to deploy OpenSearch Dashboards with this chart might [source,yaml] ---- -opensearchHosts: https://opensearch-nodes-default..svc.cluster.local:9200 # <1> +opensearchHosts: null # <1> image: # <2> repository: oci.stackable.tech/sdp/opensearch-dashboards tag: 3.1.0-stackable{{ versions.opensearch }} @@ -34,13 +34,18 @@ config: cookie: secure: true # <10> extraEnvs: + - name: OPENSEARCH_HOSTS + valueFrom: + configMapKeyRef: + name: opensearch # <11> + key: OPENSEARCH_HOSTS - name: OPENSEARCH_PASSWORD valueFrom: secretKeyRef: name: opensearch-credentials - key: kibanaserver # <11> + key: kibanaserver # <12> extraVolumes: - - name: tls # <12> + - name: tls # <13> ephemeral: volumeClaimTemplate: metadata: @@ -58,13 +63,12 @@ extraVolumeMounts: - mountPath: /stackable/opensearch-dashboards/config/tls name: tls - mountPath: /stackable/opensearch-dashboards/config/opensearch_dashboards.yml - name: config # <13> + name: config # <14> subPath: opensearch_dashboards.yml podSecurityContext: - fsGroup: 1000 # <14> + fsGroup: 1000 # <15> ---- -<1> Address of the OpenSearch Service deployed by the operator; - This address must be adapted according to your deployment. +<1> The address of the OpenSearch cluster is provided in `extraEnvs` in the `OPENSEARCH_HOSTS` variable. <2> Use the OCI image provided by the Stackable Data Platform <3> If running on OpenShift, use the ServiceAccount of OpenSearch because its permissions are already configured to work on OpenShift. This ServiceAccount name must probably adapted according to your deployment. @@ -75,10 +79,12 @@ podSecurityContext: <8> OpenSearch Dashboards verifies the certificate of OpenSearch. This is disabled by default. <9> The CA certificate which is used to verify the OpenSearch certificate <10> Ensure that cookies are not sent via an insecure connection. -<11> The password for the `kibanaserver` user -<12> This example uses the secret operator to provide a TLS certificate. -<13> The Helm chart only adds a volume mount at `/usr/share/opensearch-dashboards/config`, but in the image provided by Stackable, OpenSearch Dashboards is located in `/stackable/opensearch-dashboards`. -<14> Mount the volumes with the `stackable` group so that the files are accessible by OpenSearch Dashboards. +<11> Address of the OpenSearch Service taken from the xref:opensearch:reference/discovery.adoc[discovery ConfigMap] + The name of the discovery ConfigMap corresponds to the name of the OpenSearch cluster and must be adapted according to your deployment. +<12> The password for the `kibanaserver` user +<13> This example uses the secret operator to provide a TLS certificate. +<14> The Helm chart only adds a volume mount at `/usr/share/opensearch-dashboards/config`, but in the image provided by Stackable, OpenSearch Dashboards is located in `/stackable/opensearch-dashboards`. +<15> Mount the volumes with the `stackable` group so that the files are accessible by OpenSearch Dashboards. After the values are adjusted according to your deployment, especially `opensearchHosts` and `serviceAccount.name`, you can deploy the Helm chart as follows: diff --git a/docs/modules/opensearch/partials/nav.adoc b/docs/modules/opensearch/partials/nav.adoc index 9183f5b..779028f 100644 --- a/docs/modules/opensearch/partials/nav.adoc +++ b/docs/modules/opensearch/partials/nav.adoc @@ -21,5 +21,6 @@ * xref:opensearch:reference/index.adoc[] ** xref:opensearch:reference/crds.adoc[] *** {crd-docs}/opensearch.stackable.tech/opensearchcluster/v1alpha1/[OpenSearchCluster {external-link-icon}^] +** xref:opensearch:reference/discovery.adoc[] ** xref:opensearch:reference/commandline-parameters.adoc[] ** xref:opensearch:reference/environment-variables.adoc[] diff --git a/rust/operator-binary/src/controller.rs b/rust/operator-binary/src/controller.rs index da9c75a..cb3b938 100644 --- a/rust/operator-binary/src/controller.rs +++ b/rust/operator-binary/src/controller.rs @@ -7,6 +7,7 @@ use std::{collections::BTreeMap, marker::PhantomData, str::FromStr, sync::Arc}; use apply::Applier; use build::build; +use dereference::dereference; use snafu::{ResultExt, Snafu}; use stackable_operator::{ cluster_resources::ClusterResourceApplyStrategy, @@ -14,7 +15,7 @@ use stackable_operator::{ affinity::StackableAffinity, networking::DomainName, product_image_selection::ResolvedProductImage, }, - crd::listener::v1alpha1::Listener, + crd::listener, k8s_openapi::api::{ apps::v1::StatefulSet, core::v1::{ConfigMap, Service, ServiceAccount}, @@ -23,7 +24,6 @@ use stackable_operator::{ }, kube::{Resource, api::ObjectMeta, core::DeserializeGuard, runtime::controller::Action}, logging::controller::ReconcilerError, - role_utils::GenericRoleConfig, shared::time::Duration, }; use strum::{EnumDiscriminants, IntoStaticStr}; @@ -37,7 +37,8 @@ use crate::{ product_logging::framework::{ValidatedContainerLogConfigChoice, VectorContainerLogConfig}, role_utils::{GenericProductSpecificCommonConfig, RoleGroupConfig}, types::{ - kubernetes::{ListenerClassName, NamespaceName, Uid}, + common::Port, + kubernetes::{Hostname, ListenerClassName, NamespaceName, Uid}, operator::{ ClusterName, ControllerName, OperatorName, ProductName, ProductVersion, RoleGroupName, RoleName, @@ -48,9 +49,15 @@ use crate::{ mod apply; mod build; +mod dereference; mod update_status; mod validate; +pub const HTTP_PORT_NAME: &str = "http"; +pub const HTTP_PORT: Port = Port(9200); +pub const TRANSPORT_PORT_NAME: &str = "transport"; +pub const TRANSPORT_PORT: Port = Port(9300); + /// Names in the controller context which are passed to the submodules of the controller /// /// The names are not directly defined in [`Context`] because not every submodule requires a @@ -110,6 +117,9 @@ pub enum Error { source: Box, }, + #[snafu(display("failed to dereference resources"))] + Dereference { source: dereference::Error }, + #[snafu(display("failed to validate cluster"))] ValidateCluster { source: validate::Error }, @@ -134,10 +144,16 @@ type OpenSearchRoleGroupConfig = type OpenSearchNodeResources = stackable_operator::commons::resources::Resources; +/// Additional objects required for building the cluster +pub struct DereferencedObjects { + pub maybe_discovery_service_listener: Option, +} + /// Validated [`v1alpha1::OpenSearchConfig`] #[derive(Clone, Debug, PartialEq)] pub struct ValidatedOpenSearchConfig { pub affinity: StackableAffinity, + pub discovery_service_exposed: bool, pub listener_class: ListenerClassName, pub logging: ValidatedLogging, pub node_roles: NodeRoles, @@ -159,6 +175,12 @@ impl ValidatedLogging { } } +#[derive(Clone, Debug, PartialEq)] +pub struct ValidatedDiscoveryEndpoint { + pub hostname: Hostname, + pub port: Port, +} + /// The validated [`v1alpha1::OpenSearchCluster`] /// /// Validated means that there should be no reason for Kubernetes to reject resources generated @@ -175,10 +197,11 @@ pub struct ValidatedCluster { pub name: ClusterName, pub namespace: NamespaceName, pub uid: Uid, - pub role_config: GenericRoleConfig, + pub role_config: v1alpha1::OpenSearchRoleConfig, pub role_group_configs: BTreeMap, pub tls_config: v1alpha1::OpenSearchTls, pub keystores: Vec, + pub discovery_endpoint: Option, } impl ValidatedCluster { @@ -189,13 +212,14 @@ impl ValidatedCluster { name: ClusterName, namespace: NamespaceName, uid: impl Into, - role_config: GenericRoleConfig, + role_config: v1alpha1::OpenSearchRoleConfig, role_group_configs: BTreeMap, tls_config: v1alpha1::OpenSearchTls, keystores: Vec, + discovery_endpoint: Option, ) -> Self { let uid = uid.into(); - ValidatedCluster { + Self { metadata: ObjectMeta { name: Some(name.to_string()), namespace: Some(namespace.to_string()), @@ -211,6 +235,7 @@ impl ValidatedCluster { role_group_configs, tls_config, keystores, + discovery_endpoint, } } @@ -293,6 +318,27 @@ impl Resource for ValidatedCluster { } } +/// Marker for prepared Kubernetes resources which are not applied yet +struct Prepared; +/// Marker for applied Kubernetes resources +struct Applied; + +/// List of all Kubernetes resources produced by this controller +/// +/// `T` is a marker that indicates if these resources are only [`Prepared`] or already [`Applied`]. +/// The marker is useful e.g. to ensure that the cluster status is updated based on the applied +/// resources. +struct KubernetesResources { + stateful_sets: Vec, + services: Vec, + listeners: Vec, + config_maps: Vec, + service_accounts: Vec, + role_bindings: Vec, + pod_disruption_budgets: Vec, + status: PhantomData, +} + pub fn error_policy( _object: Arc>, error: &Error, @@ -324,10 +370,14 @@ pub async fn reconcile( .map_err(stackable_operator::kube::core::error_boundary::InvalidObject::clone) .context(DeserializeClusterDefinitionSnafu)?; - // not necessary in this controller: dereference (client required) + // dereference (client required) + let dereferenced_objects = dereference(&context.client, cluster) + .await + .context(DereferenceSnafu)?; // validate (no client required) - let validated_cluster = validate(&context.names, cluster).context(ValidateClusterSnafu)?; + let validated_cluster = + validate(&context.names, cluster, &dereferenced_objects).context(ValidateClusterSnafu)?; // build (no client required; infallible) let prepared_resources = build(&context.names, validated_cluster.clone()); @@ -357,27 +407,6 @@ pub async fn reconcile( Ok(Action::await_change()) } -/// Marker for prepared Kubernetes resources which are not applied yet -struct Prepared; -/// Marker for applied Kubernetes resources -struct Applied; - -/// List of all Kubernetes resources produced by this controller -/// -/// `T` is a marker that indicates if these resources are only [`Prepared`] or already [`Applied`]. -/// The marker is useful e.g. to ensure that the cluster status is updated based on the applied -/// resources. -struct KubernetesResources { - stateful_sets: Vec, - services: Vec, - listeners: Vec, - config_maps: Vec, - service_accounts: Vec, - role_bindings: Vec, - pod_disruption_budgets: Vec, - status: PhantomData, -} - #[cfg(test)] mod tests { use std::{ @@ -393,7 +422,6 @@ mod tests { k8s_openapi::api::core::v1::PodTemplateSpec, kvp::LabelValue, product_logging::spec::AutomaticContainerLogConfig, - role_utils::GenericRoleConfig, shared::time::Duration, }; use uuid::uuid; @@ -489,7 +517,7 @@ mod tests { ClusterName::from_str_unsafe("my-opensearch"), NamespaceName::from_str_unsafe("default"), uuid!("e6ac237d-a6d4-43a1-8135-f36506110912"), - GenericRoleConfig::default(), + v1alpha1::OpenSearchRoleConfig::default(), [ ( RoleGroupName::from_str_unsafe("coordinating"), @@ -525,6 +553,7 @@ mod tests { .into(), v1alpha1::OpenSearchTls::default(), vec![], + None, ) } @@ -536,6 +565,7 @@ mod tests { replicas, config: ValidatedOpenSearchConfig { affinity: StackableAffinity::default(), + discovery_service_exposed: true, listener_class: ListenerClassName::from_str_unsafe("external-stable"), logging: ValidatedLogging { opensearch_container: ValidatedContainerLogConfigChoice::Automatic( diff --git a/rust/operator-binary/src/controller/build.rs b/rust/operator-binary/src/controller/build.rs index b93b2b1..841eabd 100644 --- a/rust/operator-binary/src/controller/build.rs +++ b/rust/operator-binary/src/controller/build.rs @@ -33,8 +33,11 @@ pub fn build(names: &ContextNames, cluster: ValidatedCluster) -> KubernetesResou listeners.push(role_group_builder.build_listener()); } - let cluster_manager_service = role_builder.build_cluster_manager_service(); - services.push(cluster_manager_service); + if let Some(discovery_config_map) = role_builder.build_discovery_config_map() { + config_maps.push(discovery_config_map); + } + services.push(role_builder.build_seed_nodes_service()); + listeners.push(role_builder.build_discovery_service_listener()); let service_accounts = vec![role_builder.build_service_account()]; @@ -70,7 +73,6 @@ mod tests { kube::Resource, kvp::LabelValue, product_logging::spec::AutomaticContainerLogConfig, - role_utils::GenericRoleConfig, shared::time::Duration, }; use uuid::uuid; @@ -79,14 +81,16 @@ mod tests { use crate::{ controller::{ ContextNames, OpenSearchNodeResources, OpenSearchRoleGroupConfig, ValidatedCluster, - ValidatedContainerLogConfigChoice, ValidatedLogging, ValidatedOpenSearchConfig, + ValidatedContainerLogConfigChoice, ValidatedDiscoveryEndpoint, ValidatedLogging, + ValidatedOpenSearchConfig, }, crd::{NodeRoles, v1alpha1}, framework::{ builder::pod::container::EnvVarSet, role_utils::GenericProductSpecificCommonConfig, types::{ - kubernetes::{ListenerClassName, NamespaceName}, + common::Port, + kubernetes::{Hostname, ListenerClassName, NamespaceName}, operator::{ ClusterName, ControllerName, OperatorName, ProductName, ProductVersion, RoleGroupName, @@ -109,15 +113,16 @@ mod tests { ); assert_eq!( vec![ - "my-opensearch", "my-opensearch-nodes-cluster-manager-headless", "my-opensearch-nodes-coordinating-headless", - "my-opensearch-nodes-data-headless" + "my-opensearch-nodes-data-headless", + "my-opensearch-seed-nodes" ], extract_resource_names(&resources.services) ); assert_eq!( vec![ + "my-opensearch", "my-opensearch-nodes-cluster-manager", "my-opensearch-nodes-coordinating", "my-opensearch-nodes-data" @@ -126,6 +131,7 @@ mod tests { ); assert_eq!( vec![ + "my-opensearch", "my-opensearch-nodes-cluster-manager", "my-opensearch-nodes-coordinating", "my-opensearch-nodes-data" @@ -180,7 +186,7 @@ mod tests { ClusterName::from_str_unsafe("my-opensearch"), NamespaceName::from_str_unsafe("default"), uuid!("e6ac237d-a6d4-43a1-8135-f36506110912"), - GenericRoleConfig::default(), + v1alpha1::OpenSearchRoleConfig::default(), [ ( RoleGroupName::from_str_unsafe("coordinating"), @@ -205,6 +211,10 @@ mod tests { .into(), v1alpha1::OpenSearchTls::default(), vec![], + Some(ValidatedDiscoveryEndpoint { + hostname: Hostname::from_str_unsafe("1.2.3.4"), + port: Port(12345), + }), ) } @@ -216,6 +226,7 @@ mod tests { replicas, config: ValidatedOpenSearchConfig { affinity: StackableAffinity::default(), + discovery_service_exposed: true, listener_class: ListenerClassName::from_str_unsafe("external-stable"), logging: ValidatedLogging { opensearch_container: ValidatedContainerLogConfigChoice::Automatic( diff --git a/rust/operator-binary/src/controller/build/node_config.rs b/rust/operator-binary/src/controller/build/node_config.rs index 9862140..8238db3 100644 --- a/rust/operator-binary/src/controller/build/node_config.rs +++ b/rust/operator-binary/src/controller/build/node_config.rs @@ -121,7 +121,7 @@ pub struct NodeConfig { cluster: ValidatedCluster, role_group_name: RoleGroupName, role_group_config: OpenSearchRoleGroupConfig, - pub discovery_service_name: ServiceName, + pub seed_nodes_service_name: ServiceName, cluster_domain_name: DomainName, headless_service_name: ServiceName, } @@ -133,7 +133,7 @@ impl NodeConfig { cluster: ValidatedCluster, role_group_name: RoleGroupName, role_group_config: OpenSearchRoleGroupConfig, - discovery_service_name: ServiceName, + seed_nodes_service_name: ServiceName, cluster_domain_name: DomainName, headless_service_name: ServiceName, ) -> Self { @@ -141,7 +141,7 @@ impl NodeConfig { cluster, role_group_name, role_group_config, - discovery_service_name, + seed_nodes_service_name, cluster_domain_name, headless_service_name, } @@ -286,7 +286,7 @@ impl NodeConfig { self.headless_service_name, self.cluster.namespace, self.cluster_domain_name ); - EnvVarSet::new() + let mut env_vars = EnvVarSet::new() .with_field_path( // Prefix with an underscore, so that it occurs before the other environment // variables which depend on it. @@ -313,11 +313,10 @@ impl NodeConfig { ) .with_value( &EnvVarName::from_str_unsafe(CONFIG_OPTION_DISCOVERY_SEED_HOSTS), - &self.discovery_service_name, - ) - .with_value( - &EnvVarName::from_str_unsafe(CONFIG_OPTION_INITIAL_CLUSTER_MANAGER_NODES), - self.initial_cluster_manager_nodes(), + format!( + "{}.{}.svc.{}", + self.seed_nodes_service_name, self.cluster.namespace, self.cluster_domain_name + ), ) .with_value( &EnvVarName::from_str_unsafe(CONFIG_OPTION_NODE_ROLES), @@ -330,8 +329,16 @@ impl NodeConfig { // Node roles cannot contain commas, therefore creating a comma-separated list // is safe. .join(","), - ) - .merge(self.role_group_config.env_overrides.clone()) + ); + + if let Some(initial_cluster_manager_nodes) = self.initial_cluster_manager_nodes() { + env_vars = env_vars.with_value( + &EnvVarName::from_str_unsafe(CONFIG_OPTION_INITIAL_CLUSTER_MANAGER_NODES), + initial_cluster_manager_nodes, + ); + } + + env_vars.merge(self.role_group_config.env_overrides.clone()) } fn to_yaml(kv: serde_json::Map) -> String { @@ -357,33 +364,70 @@ impl NodeConfig { } } - /// Configuration for `cluster.initial_cluster_manager_nodes` which replaces - /// `cluster.initial_master_nodes`, see - /// . + /// Configuration for `cluster.initial_cluster_manager_nodes` + /// + /// Returns the node names of the initial cluster-manager nodes if + /// * this is a multi-node cluster and + /// * this node has the cluster-manager node role. + /// + /// Please read the following sections for an explanation of these restrictions. + /// + /// This configuration setting replaces the setting `cluster.initial_master_nodes`, see + /// . /// - /// According to - /// , - /// it contains "a list of cluster-manager-eligible nodes used to bootstrap the cluster." + /// This setting is required on nodes with the cluster-manager node role on a multi-node + /// cluster. Otherwise the bootstrapping of the cluster fails and all pods report: + /// > Wait for cluster to be available ... /// - /// However, the documentation for Elasticsearch is more detailed and contains the following - /// notes (see ): + /// This setting must not be set on a single-node cluster, because otherwise the following + /// error is thrown: + /// > setting [cluster.initial_cluster_manager_nodes] is not allowed when [discovery.type] is set to [single-node] + /// + /// see + /// + /// This setting does not seem to have an effect on nodes without the cluster-manager node + /// role. However, as it is recommended (see the Elasticsearch documentation below) to not set + /// it on master-ineligible nodes, it is not set. + /// + /// This setting seems to be ignored when the cluster has already formed. It is recommended in + /// the Elasticsearch documentation to remove it once the cluster has formed, but as it is hard + /// to determine if the bootstrapping was successfully completed, this setting is still set. + /// Adding a new cluster-manager node and updating this setting also seems to be okay. + /// + /// # OpenSearch documentation + /// + /// > This setting is required when bootstrapping a cluster for the first time and should + /// > contain the node names (as defined by `node.name`) of the initial cluster-manager-eligible + /// > nodes. This list should be empty for nodes joining an existing cluster. + /// + /// see + /// + /// # Elasticsearch documentation + /// + /// The documentation for Elasticsearch is more detailed and contains the following + /// notes: /// * Remove this setting once the cluster has formed, and never set it again for this cluster. /// * Do not configure this setting on master-ineligible nodes. /// * Do not configure this setting on nodes joining an existing cluster. /// * Do not configure this setting on nodes which are restarting. /// * Do not configure this setting when performing a full-cluster restart. /// - /// The OpenSearch Helm chart only sets master nodes but does not handle the other cases (see - /// ), - /// so they are also ignored here for the moment. - fn initial_cluster_manager_nodes(&self) -> String { - if !self.cluster.is_single_node() - && self + /// see + /// + /// # Implementation in the OpenSearch Helm chart + /// + /// The OpenSearch Helm chart sets this setting on master nodes on multi-node clusters, see + /// see . + fn initial_cluster_manager_nodes(&self) -> Option { + if self.cluster.is_single_node() + || !self .role_group_config .config .node_roles .contains(&v1alpha1::NodeRole::ClusterManager) { + None + } else { let cluster_manager_configs = self .cluster .role_group_configs_filtered_by_node_role(&v1alpha1::NodeRole::ClusterManager); @@ -406,11 +450,7 @@ impl NodeConfig { ); } // Pod names cannot contain commas, therefore creating a comma-separated list is safe. - pod_names.join(",") - } else { - // This setting is not allowed on single node cluster, see - // - String::new() + Some(pod_names.join(",")) } } @@ -436,6 +476,7 @@ impl NodeConfig { mod tests { use std::collections::BTreeMap; + use pretty_assertions::assert_eq; use stackable_operator::{ commons::{ affinity::StackableAffinity, @@ -445,7 +486,6 @@ mod tests { k8s_openapi::api::core::v1::PodTemplateSpec, kvp::LabelValue, product_logging::spec::AutomaticContainerLogConfig, - role_utils::GenericRoleConfig, shared::time::Duration, }; use uuid::uuid; @@ -490,6 +530,7 @@ mod tests { replicas: test_config.replicas, config: ValidatedOpenSearchConfig { affinity: StackableAffinity::default(), + discovery_service_exposed: true, listener_class: ListenerClassName::from_str_unsafe("cluster-internal"), logging: ValidatedLogging { opensearch_container: ValidatedContainerLogConfigChoice::Automatic( @@ -541,7 +582,7 @@ mod tests { ClusterName::from_str_unsafe("my-opensearch-cluster"), NamespaceName::from_str_unsafe("default"), uuid!("0b1e30e6-326e-4c1a-868d-ad6598b49e8b"), - GenericRoleConfig::default(), + v1alpha1::OpenSearchRoleConfig::default(), [( RoleGroupName::from_str_unsafe("default"), role_group_config.clone(), @@ -549,13 +590,14 @@ mod tests { .into(), v1alpha1::OpenSearchTls::default(), vec![], + None, ); NodeConfig::new( cluster, role_group_name, role_group_config, - ServiceName::from_str_unsafe("my-opensearch-cluster-manager"), + ServiceName::from_str_unsafe("my-opensearch-seed-nodes"), DomainName::from_str("cluster.local").expect("should be a valid domain name"), ServiceName::from_str_unsafe("my-opensearch-cluster-default-headless"), ) @@ -667,7 +709,7 @@ mod tests { ) .with_value( &EnvVarName::from_str_unsafe("discovery.seed_hosts"), - "my-opensearch-cluster-manager", + "my-opensearch-seed-nodes.default.svc.cluster.local", ) .with_value( &EnvVarName::from_str_unsafe("http.publish_host"), @@ -728,11 +770,11 @@ mod tests { }); assert_eq!( - "".to_owned(), + None, node_config_single_node.initial_cluster_manager_nodes() ); assert_eq!( - "my-opensearch-cluster-nodes-default-0,my-opensearch-cluster-nodes-default-1,my-opensearch-cluster-nodes-default-2".to_owned(), + Some("my-opensearch-cluster-nodes-default-0,my-opensearch-cluster-nodes-default-1,my-opensearch-cluster-nodes-default-2".to_owned()), node_config_multiple_nodes.initial_cluster_manager_nodes() ); } diff --git a/rust/operator-binary/src/controller/build/role_builder.rs b/rust/operator-binary/src/controller/build/role_builder.rs index 0ecbbd1..f7332e5 100644 --- a/rust/operator-binary/src/controller/build/role_builder.rs +++ b/rust/operator-binary/src/controller/build/role_builder.rs @@ -1,11 +1,14 @@ //! Builder for role resources +use std::str::FromStr; + use stackable_operator::{ builder::meta::ObjectMetaBuilder, + crd::listener, k8s_openapi::{ Resource, api::{ - core::v1::{Service, ServiceAccount, ServicePort, ServiceSpec}, + core::v1::{ConfigMap, Service, ServiceAccount, ServicePort, ServiceSpec}, policy::v1::PodDisruptionBudget, rbac::v1::{ClusterRole, RoleBinding, RoleRef, Subject}, }, @@ -17,17 +20,21 @@ use stackable_operator::{ }, }; -use super::role_group_builder::{ - HTTP_PORT, HTTP_PORT_NAME, RoleGroupBuilder, TRANSPORT_PORT, TRANSPORT_PORT_NAME, -}; use crate::{ - controller::{ContextNames, ValidatedCluster}, + controller::{ + ContextNames, HTTP_PORT, HTTP_PORT_NAME, TRANSPORT_PORT, TRANSPORT_PORT_NAME, + ValidatedCluster, build::role_group_builder::RoleGroupBuilder, + }, framework::{ NameIsValidLabelValue, builder::{ meta::ownerreference_from_resource, pdb::pod_disruption_budget_builder_with_role, }, role_utils::ResourceNames, + types::{ + kubernetes::{ConfigMapName, ListenerName, ServiceName}, + operator::ClusterName, + }, }, }; @@ -64,7 +71,8 @@ impl<'a> RoleBuilder<'a> { role_group_name.clone(), role_group_config.clone(), self.context_names, - self.resource_names.discovery_service_name(), + seed_nodes_service_name(&self.cluster.name), + discovery_service_listener_name(&self.cluster.name), ) }) .collect() @@ -101,28 +109,14 @@ impl<'a> RoleBuilder<'a> { } /// Builds a Service that references all nodes with the cluster_manager node role - /// - /// Initially, this service was meant to be used by - /// [`super::node_config::NodeConfig::initial_cluster_manager_nodes`], but the function uses now another approach. - /// Afterwards, it was meant to be used as an entry point to OpenSearch, but it could also make - /// sense to use coordinating only nodes as entry points and not cluster manager nodes. - /// Therefore, this service will bei either adapted or removed. There is already an according - /// task entry in . - pub fn build_cluster_manager_service(&self) -> Service { - let ports = vec![ - ServicePort { - name: Some(HTTP_PORT_NAME.to_owned()), - port: HTTP_PORT.into(), - ..ServicePort::default() - }, - ServicePort { - name: Some(TRANSPORT_PORT_NAME.to_owned()), - port: TRANSPORT_PORT.into(), - ..ServicePort::default() - }, - ]; + pub fn build_seed_nodes_service(&self) -> Service { + let ports = vec![ServicePort { + name: Some(TRANSPORT_PORT_NAME.to_owned()), + port: TRANSPORT_PORT.into(), + ..ServicePort::default() + }]; - let metadata = self.common_metadata(self.resource_names.discovery_service_name()); + let metadata = self.common_metadata(seed_nodes_service_name(&self.cluster.name)); let service_selector = RoleGroupBuilder::cluster_manager_labels(&self.cluster, self.context_names); @@ -144,9 +138,75 @@ impl<'a> RoleBuilder<'a> { } } + /// Builds a Listener whose status is used to populate the discovery ConfigMap. + pub fn build_discovery_service_listener(&self) -> listener::v1alpha1::Listener { + let metadata = self.common_metadata(discovery_service_listener_name(&self.cluster.name)); + + let listener_class = &self.cluster.role_config.discovery_service_listener_class; + + let ports = vec![listener::v1alpha1::ListenerPort { + name: HTTP_PORT_NAME.to_owned(), + port: HTTP_PORT.into(), + protocol: Some("TCP".to_owned()), + }]; + + listener::v1alpha1::Listener { + metadata, + spec: listener::v1alpha1::ListenerSpec { + class_name: Some(listener_class.to_string()), + ports: Some(ports), + ..listener::v1alpha1::ListenerSpec::default() + }, + status: None, + } + } + + /// Builds the discovery ConfigMap if the discovery endpoint is already known. + /// + /// The discovery endpoint is derived from the status of the discovery service Listener. If the + /// status is not set yet, the reconciliation process will occur again once the Listener status + /// is updated, leading to the eventual creation of the discovery ConfigMap. + pub fn build_discovery_config_map(&self) -> Option { + let discovery_endpoint = self.cluster.discovery_endpoint.as_ref()?; + + let metadata = self.common_metadata(discovery_config_map_name(&self.cluster.name)); + + let protocol = if self.cluster.tls_config.server_secret_class.is_some() { + "https" + } else { + "http" + }; + + let data = [ + ("OPENSEARCH_PROTOCOL".to_owned(), protocol.to_owned()), + ( + "OPENSEARCH_HOSTNAME".to_owned(), + discovery_endpoint.hostname.to_string(), + ), + ( + "OPENSEARCH_PORT".to_owned(), + discovery_endpoint.port.to_string(), + ), + ( + "OPENSEARCH_HOSTS".to_owned(), + format!( + "{protocol}://{hostname}:{port}", + hostname = discovery_endpoint.hostname, + port = discovery_endpoint.port + ), + ), + ]; + + Some(ConfigMap { + metadata, + data: Some(data.into()), + ..ConfigMap::default() + }) + } + /// Builds a [`PodDisruptionBudget`] used by all role-groups pub fn build_pdb(&self) -> Option { - let pdb_config = &self.cluster.role_config.pod_disruption_budget; + let pdb_config = &self.cluster.role_config.common.pod_disruption_budget; if pdb_config.enabled { let max_unavailable = pdb_config @@ -211,6 +271,43 @@ impl<'a> RoleBuilder<'a> { } } +fn seed_nodes_service_name(cluster_name: &ClusterName) -> ServiceName { + const SUFFIX: &str = "-seed-nodes"; + + // compile-time checks + const _: () = assert!( + ClusterName::MAX_LENGTH + SUFFIX.len() <= ServiceName::MAX_LENGTH, + "The string `-seed-nodes` must not exceed the limit of Service names." + ); + let _ = ClusterName::IS_RFC_1035_LABEL_NAME; + let _ = ClusterName::IS_VALID_LABEL_VALUE; + + ServiceName::from_str(&format!("{}{SUFFIX}", cluster_name.as_ref())) + .expect("should be a valid Service name") +} + +fn discovery_config_map_name(cluster_name: &ClusterName) -> ConfigMapName { + // compile-time checks + const _: () = assert!( + ClusterName::MAX_LENGTH <= ConfigMapName::MAX_LENGTH, + "The string `` must not exceed the limit of ConfigMap names." + ); + let _ = ClusterName::IS_RFC_1123_SUBDOMAIN_NAME; + + ConfigMapName::from_str(cluster_name.as_ref()).expect("should be a valid ConfigMap name") +} + +pub fn discovery_service_listener_name(cluster_name: &ClusterName) -> ListenerName { + // compile-time checks + const _: () = assert!( + ClusterName::MAX_LENGTH <= ListenerName::MAX_LENGTH, + "The string `` must not exceed the limit of Listener names." + ); + let _ = ClusterName::IS_RFC_1123_SUBDOMAIN_NAME; + + ListenerName::from_str(cluster_name.as_ref()).expect("should be a valid Listener name") +} + #[cfg(test)] mod tests { use std::{ @@ -230,7 +327,6 @@ mod tests { k8s_openapi::api::core::v1::PodTemplateSpec, kvp::LabelValue, product_logging::spec::AutomaticContainerLogConfig, - role_utils::GenericRoleConfig, shared::time::Duration, }; use uuid::uuid; @@ -239,14 +335,22 @@ mod tests { use crate::{ controller::{ ContextNames, OpenSearchRoleGroupConfig, ValidatedCluster, - ValidatedContainerLogConfigChoice, ValidatedLogging, ValidatedOpenSearchConfig, + ValidatedContainerLogConfigChoice, ValidatedDiscoveryEndpoint, ValidatedLogging, + ValidatedOpenSearchConfig, + build::role_builder::{ + discovery_config_map_name, discovery_service_listener_name, seed_nodes_service_name, + }, }, crd::{NodeRoles, v1alpha1}, framework::{ builder::pod::container::EnvVarSet, role_utils::GenericProductSpecificCommonConfig, types::{ - kubernetes::{ListenerClassName, NamespaceName}, + common::Port, + kubernetes::{ + ConfigMapName, Hostname, ListenerClassName, ListenerName, NamespaceName, + ServiceName, + }, operator::{ ClusterName, ControllerName, OperatorName, ProductName, ProductVersion, RoleGroupName, @@ -273,6 +377,7 @@ mod tests { replicas: 1, config: ValidatedOpenSearchConfig { affinity: StackableAffinity::default(), + discovery_service_exposed: true, listener_class: ListenerClassName::from_str_unsafe("cluster-internal"), logging: ValidatedLogging { opensearch_container: ValidatedContainerLogConfigChoice::Automatic( @@ -311,7 +416,12 @@ mod tests { ClusterName::from_str_unsafe("my-opensearch-cluster"), NamespaceName::from_str_unsafe("default"), uuid!("0b1e30e6-326e-4c1a-868d-ad6598b49e8b"), - GenericRoleConfig::default(), + v1alpha1::OpenSearchRoleConfig { + discovery_service_listener_class: ListenerClassName::from_str_unsafe( + "external-stable", + ), + ..v1alpha1::OpenSearchRoleConfig::default() + }, [( RoleGroupName::from_str_unsafe("default"), role_group_config.clone(), @@ -319,6 +429,10 @@ mod tests { .into(), v1alpha1::OpenSearchTls::default(), vec![], + Some(ValidatedDiscoveryEndpoint { + hostname: Hostname::from_str_unsafe("1.2.3.4"), + port: Port(12345), + }), ); RoleBuilder::new(cluster, context_names) @@ -414,13 +528,12 @@ mod tests { } #[test] - fn test_build_cluster_manager_service() { + fn test_build_seed_nodes_service() { let context_names = context_names(); let role_builder = role_builder(&context_names); - let cluster_manager_service = - serde_json::to_value(role_builder.build_cluster_manager_service()) - .expect("should be serializable"); + let seed_nodes_service = serde_json::to_value(role_builder.build_seed_nodes_service()) + .expect("should be serializable"); assert_eq!( json!({ @@ -435,7 +548,7 @@ mod tests { "app.kubernetes.io/version": "3.1.0", "stackable.tech/vendor": "Stackable" }, - "name": "my-opensearch-cluster", + "name": "my-opensearch-cluster-seed-nodes", "namespace": "default", "ownerReferences": [ { @@ -450,10 +563,6 @@ mod tests { "spec": { "clusterIP": "None", "ports": [ - { - "name": "http", - "port": 9200 - }, { "name": "transport", "port": 9300 @@ -469,7 +578,103 @@ mod tests { "type": "ClusterIP" } }), - cluster_manager_service + seed_nodes_service + ); + } + + #[test] + fn test_build_discovery_service_listener() { + let context_names = context_names(); + let role_builder = role_builder(&context_names); + + let discovery_service_listener = + serde_json::to_value(role_builder.build_discovery_service_listener()) + .expect("should be serializable"); + + assert_eq!( + json!({ + "apiVersion": "listeners.stackable.tech/v1alpha1", + "kind": "Listener", + "metadata": { + "labels": { + "app.kubernetes.io/component": "nodes", + "app.kubernetes.io/instance": "my-opensearch-cluster", + "app.kubernetes.io/managed-by": "opensearch.stackable.tech_opensearchcluster", + "app.kubernetes.io/name": "opensearch", + "app.kubernetes.io/version": "3.1.0", + "stackable.tech/vendor": "Stackable", + }, + "name": "my-opensearch-cluster", + "namespace": "default", + "ownerReferences": [ + { + "apiVersion": "opensearch.stackable.tech/v1alpha1", + "controller": true, + "kind": "OpenSearchCluster", + "name": "my-opensearch-cluster", + "uid": "0b1e30e6-326e-4c1a-868d-ad6598b49e8b", + }, + ], + }, + "spec": { + "className": "external-stable", + "extraPodSelectorLabels": {}, + "objectOverrides": [], + "ports": [ + { + "name": "http", + "port": 9200, + "protocol": "TCP", + }, + ], + "publishNotReadyAddresses": null, + }, + }), + discovery_service_listener + ); + } + + #[test] + fn test_build_discovery_config_map() { + let context_names = context_names(); + let role_builder = role_builder(&context_names); + + let discovery_config_map = serde_json::to_value(role_builder.build_discovery_config_map()) + .expect("should be serializable"); + + assert_eq!( + json!({ + "apiVersion": "v1", + "kind": "ConfigMap", + "metadata": { + "labels": { + "app.kubernetes.io/component": "nodes", + "app.kubernetes.io/instance": "my-opensearch-cluster", + "app.kubernetes.io/managed-by": "opensearch.stackable.tech_opensearchcluster", + "app.kubernetes.io/name": "opensearch", + "app.kubernetes.io/version": "3.1.0", + "stackable.tech/vendor": "Stackable", + }, + "name": "my-opensearch-cluster", + "namespace": "default", + "ownerReferences": [ + { + "apiVersion": "opensearch.stackable.tech/v1alpha1", + "controller": true, + "kind": "OpenSearchCluster", + "name": "my-opensearch-cluster", + "uid": "0b1e30e6-326e-4c1a-868d-ad6598b49e8b", + }, + ], + }, + "data": { + "OPENSEARCH_HOSTNAME": "1.2.3.4", + "OPENSEARCH_PORT": "12345", + "OPENSEARCH_PROTOCOL": "https", + "OPENSEARCH_HOSTS": "https://1.2.3.4:12345", + }, + }), + discovery_config_map ); } @@ -517,4 +722,34 @@ mod tests { pdb ); } + + #[test] + fn test_seed_nodes_service_name() { + let cluster_name = ClusterName::from_str_unsafe("test-cluster"); + + assert_eq!( + ServiceName::from_str_unsafe("test-cluster-seed-nodes"), + seed_nodes_service_name(&cluster_name) + ); + } + + #[test] + fn test_discovery_config_map_name() { + let cluster_name = ClusterName::from_str_unsafe("test-cluster"); + + assert_eq!( + ConfigMapName::from_str_unsafe("test-cluster"), + discovery_config_map_name(&cluster_name) + ); + } + + #[test] + fn test_discovery_service_listener_name() { + let cluster_name = ClusterName::from_str_unsafe("test-cluster"); + + assert_eq!( + ListenerName::from_str_unsafe("test-cluster"), + discovery_service_listener_name(&cluster_name) + ); + } } diff --git a/rust/operator-binary/src/controller/build/role_group_builder.rs b/rust/operator-binary/src/controller/build/role_group_builder.rs index 9fce8a4..c964ac8 100644 --- a/rust/operator-binary/src/controller/build/role_group_builder.rs +++ b/rust/operator-binary/src/controller/build/role_group_builder.rs @@ -15,9 +15,9 @@ use stackable_operator::{ apps::v1::{StatefulSet, StatefulSetSpec}, core::v1::{ Affinity, ConfigMap, ConfigMapVolumeSource, Container, ContainerPort, - EmptyDirVolumeSource, KeyToPath, PersistentVolumeClaim, PodSecurityContext, - PodSpec, PodTemplateSpec, Probe, SecretVolumeSource, Service, ServicePort, - ServiceSpec, TCPSocketAction, Volume, VolumeMount, + EmptyDirVolumeSource, KeyToPath, PodSecurityContext, PodSpec, PodTemplateSpec, + Probe, SecretVolumeSource, Service, ServicePort, ServiceSpec, TCPSocketAction, + Volume, VolumeMount, }, }, apimachinery::pkg::{ @@ -42,7 +42,8 @@ use super::{ use crate::{ constant, controller::{ - ContextNames, OpenSearchRoleGroupConfig, ValidatedCluster, + ContextNames, HTTP_PORT, HTTP_PORT_NAME, OpenSearchRoleGroupConfig, TRANSPORT_PORT, + TRANSPORT_PORT_NAME, ValidatedCluster, build::product_logging::config::{ MAX_OPENSEARCH_SERVER_LOG_FILES_SIZE, vector_config_file_extra_env_vars, }, @@ -62,28 +63,27 @@ use crate::{ }, role_group_utils::ResourceNames, types::{ - common::Port, kubernetes::{ - PersistentVolumeClaimName, SecretClassName, ServiceAccountName, ServiceName, - VolumeName, + ListenerName, PersistentVolumeClaimName, SecretClassName, ServiceAccountName, + ServiceName, VolumeName, }, operator::RoleGroupName, }, }, }; -pub const HTTP_PORT_NAME: &str = "http"; -pub const HTTP_PORT: Port = Port(9200); -pub const TRANSPORT_PORT_NAME: &str = "transport"; -pub const TRANSPORT_PORT: Port = Port(9300); - constant!(CONFIG_VOLUME_NAME: VolumeName = "config"); constant!(LOG_CONFIG_VOLUME_NAME: VolumeName = "log-config"); constant!(DATA_VOLUME_NAME: VolumeName = "data"); -constant!(LISTENER_VOLUME_NAME: PersistentVolumeClaimName = "listener"); -const LISTENER_VOLUME_DIR: &str = "/stackable/listener"; +// This is the main listener which is sometimes referenced by users in podOverrides, so keep its +// name simple. +constant!(ROLE_GROUP_LISTENER_VOLUME_NAME: PersistentVolumeClaimName = "listener"); +const ROLE_GROUP_LISTENER_VOLUME_DIR: &str = "/stackable/listeners/role-group"; + +constant!(DISCOVERY_SERVICE_LISTENER_VOLUME_NAME: PersistentVolumeClaimName = "discovery-service-listener"); +const DISCOVERY_SERVICE_LISTENER_VOLUME_DIR: &str = "/stackable/listeners/discovery-service"; constant!(TLS_SERVER_VOLUME_NAME: VolumeName = "tls-server"); constant!(TLS_INTERNAL_VOLUME_NAME: VolumeName = "tls-internal"); @@ -106,6 +106,7 @@ pub struct RoleGroupBuilder<'a> { role_group_config: OpenSearchRoleGroupConfig, context_names: &'a ContextNames, resource_names: ResourceNames, + discovery_service_listener_name: ListenerName, } impl<'a> RoleGroupBuilder<'a> { @@ -115,7 +116,8 @@ impl<'a> RoleGroupBuilder<'a> { role_group_name: RoleGroupName, role_group_config: OpenSearchRoleGroupConfig, context_names: &'a ContextNames, - discovery_service_name: ServiceName, + seed_nodes_service_name: ServiceName, + discovery_service_listener_name: ListenerName, ) -> RoleGroupBuilder<'a> { let resource_names = ResourceNames { cluster_name: cluster.name.clone(), @@ -129,7 +131,7 @@ impl<'a> RoleGroupBuilder<'a> { cluster.clone(), role_group_name.clone(), role_group_config.clone(), - discovery_service_name, + seed_nodes_service_name, context_names.cluster_domain_name.clone(), resource_names.headless_service_name(), ), @@ -137,6 +139,7 @@ impl<'a> RoleGroupBuilder<'a> { role_group_config, context_names, resource_names, + discovery_service_listener_name, } } @@ -196,23 +199,36 @@ impl<'a> RoleGroupBuilder<'a> { .data .build_pvc(DATA_VOLUME_NAME.as_ref(), Some(vec!["ReadWriteOnce"])); - let listener_group_name = self.resource_names.listener_name(); - - // Listener endpoints for the all rolegroups will use persistent - // volumes so that load balancers can hard-code the target - // addresses. This will be the case even when no class is set (and - // the value defaults to cluster-internal) as the address should - // still be consistent. - let listener_volume_claim_template = listener_operator_volume_source_builder_build_pvc( - &ListenerReference::Listener(listener_group_name), - &self.recommended_labels(), - &LISTENER_VOLUME_NAME, - ); + // Listener endpoints for all rolegroups will use persistent volumes so that load balancers + // can hard-code the target addresses. This will be the case even when no class is set (and + // the value defaults to cluster-internal) as the address should still be consistent. + let role_group_listener_volume_claim_template = + listener_operator_volume_source_builder_build_pvc( + &ListenerReference::Listener(self.resource_names.listener_name()), + &self.recommended_labels(), + &ROLE_GROUP_LISTENER_VOLUME_NAME, + ); + + let maybe_discovery_service_listener_volume_claim_template = self + .role_group_config + .config + .discovery_service_exposed + .then(|| { + listener_operator_volume_source_builder_build_pvc( + &ListenerReference::Listener(self.discovery_service_listener_name.to_owned()), + &self.recommended_labels(), + &DISCOVERY_SERVICE_LISTENER_VOLUME_NAME, + ) + }); - let pvcs: Option> = Some(vec![ - data_volume_claim_template, - listener_volume_claim_template, - ]); + let pvcs = vec![ + Some(data_volume_claim_template), + Some(role_group_listener_volume_claim_template), + maybe_discovery_service_listener_volume_claim_template, + ] + .into_iter() + .flatten() + .collect(); let spec = StatefulSetSpec { // Order does not matter for OpenSearch @@ -224,7 +240,7 @@ impl<'a> RoleGroupBuilder<'a> { }, service_name: Some(self.resource_names.headless_service_name().to_string()), template, - volume_claim_templates: pvcs, + volume_claim_templates: Some(pvcs), ..StatefulSetSpec::default() }; @@ -238,7 +254,6 @@ impl<'a> RoleGroupBuilder<'a> { /// Builds the [`PodTemplateSpec`] for the role-group [`StatefulSet`] fn build_pod_template(&self) -> PodTemplateSpec { let mut node_role_labels = Labels::new(); - let service_scopes = vec![self.node_config.discovery_service_name.clone()]; for node_role in self.role_group_config.config.node_roles.iter() { node_role_labels.insert(Self::build_node_role_label(node_role)); @@ -289,6 +304,25 @@ impl<'a> RoleGroupBuilder<'a> { self.resource_names.role_group_config_map() }; + let mut internal_tls_volume_service_scopes = vec![]; + if self + .role_group_config + .config + .node_roles + .contains(&v1alpha1::NodeRole::ClusterManager) + { + internal_tls_volume_service_scopes + .push(self.node_config.seed_nodes_service_name.clone()); + } + let internal_tls_volume = self.build_tls_volume( + &TLS_INTERNAL_VOLUME_NAME, + &self.cluster.tls_config.internal_secret_class, + internal_tls_volume_service_scopes, + SecretFormat::TlsPem, + &self.role_group_config.config.requested_secret_lifetime, + vec![ROLE_GROUP_LISTENER_VOLUME_NAME.clone()], + ); + let mut volumes = vec![ Volume { name: CONFIG_VOLUME_NAME.to_string(), @@ -318,24 +352,22 @@ impl<'a> RoleGroupBuilder<'a> { }), ..Volume::default() }, - self.build_tls_volume( - &TLS_INTERNAL_VOLUME_NAME, - &self.cluster.tls_config.internal_secret_class, - vec![], - SecretFormat::TlsPem, - &self.role_group_config.config.requested_secret_lifetime, - &LISTENER_VOLUME_NAME, - ), + internal_tls_volume, ]; if let Some(tls_http_secret_class_name) = &self.cluster.tls_config.server_secret_class { + let mut listener_scopes = vec![ROLE_GROUP_LISTENER_VOLUME_NAME.to_owned()]; + if self.role_group_config.config.discovery_service_exposed { + listener_scopes.push(DISCOVERY_SERVICE_LISTENER_VOLUME_NAME.to_owned()); + } + volumes.push(self.build_tls_volume( &TLS_SERVER_VOLUME_NAME, tls_http_secret_class_name, - service_scopes, + vec![], SecretFormat::TlsPem, &self.role_group_config.config.requested_secret_lifetime, - &LISTENER_VOLUME_NAME, + listener_scopes, )) }; @@ -417,9 +449,6 @@ impl<'a> RoleGroupBuilder<'a> { } /// Returns the labels of OpenSearch nodes with the `cluster_manager` role. - /// - /// As described in [`super::role_builder::RoleBuilder::build_cluster_manager_service`], this - /// function will be changed or deleted. pub fn cluster_manager_labels( cluster: &ValidatedCluster, context_names: &ContextNames, @@ -551,8 +580,8 @@ cp --archive config/opensearch.keystore {OPENSEARCH_INITIALIZED_KEYSTORE_DIRECTO ..VolumeMount::default() }, VolumeMount { - mount_path: LISTENER_VOLUME_DIR.to_owned(), - name: LISTENER_VOLUME_NAME.to_string(), + mount_path: ROLE_GROUP_LISTENER_VOLUME_DIR.to_owned(), + name: ROLE_GROUP_LISTENER_VOLUME_NAME.to_string(), ..VolumeMount::default() }, VolumeMount { @@ -567,12 +596,20 @@ cp --archive config/opensearch.keystore {OPENSEARCH_INITIALIZED_KEYSTORE_DIRECTO }, ]; + if self.role_group_config.config.discovery_service_exposed { + volume_mounts.push(VolumeMount { + mount_path: DISCOVERY_SERVICE_LISTENER_VOLUME_DIR.to_owned(), + name: DISCOVERY_SERVICE_LISTENER_VOLUME_NAME.to_string(), + ..VolumeMount::default() + }); + } + if self.cluster.tls_config.server_secret_class.is_some() { volume_mounts.push(VolumeMount { mount_path: format!("{opensearch_path_conf}/tls/server"), name: TLS_SERVER_VOLUME_NAME.to_string(), ..VolumeMount::default() - }) + }); } if !self.cluster.keystores.is_empty() { @@ -714,17 +751,17 @@ cp --archive config/opensearch.keystore {OPENSEARCH_INITIALIZED_KEYSTORE_DIRECTO let listener_class = self.role_group_config.config.listener_class.to_owned(); - let ports = [listener::v1alpha1::ListenerPort { - name: HTTP_PORT_NAME.to_string(), + let ports = vec![listener::v1alpha1::ListenerPort { + name: HTTP_PORT_NAME.to_owned(), port: HTTP_PORT.into(), - protocol: Some("TCP".to_string()), + protocol: Some("TCP".to_owned()), }]; listener::v1alpha1::Listener { metadata, spec: listener::v1alpha1::ListenerSpec { class_name: Some(listener_class.to_string()), - ports: Some(ports.to_vec()), + ports: Some(ports), ..listener::v1alpha1::ListenerSpec::default() }, status: None, @@ -780,7 +817,7 @@ cp --archive config/opensearch.keystore {OPENSEARCH_INITIALIZED_KEYSTORE_DIRECTO service_scopes: Vec, secret_format: SecretFormat, requested_secret_lifetime: &Duration, - listener_scope: &PersistentVolumeClaimName, + listener_volume_scopes: Vec, ) -> Volume { let mut secret_volume_source_builder = SecretOperatorVolumeSourceBuilder::new(tls_secret_class_name); @@ -788,11 +825,13 @@ cp --archive config/opensearch.keystore {OPENSEARCH_INITIALIZED_KEYSTORE_DIRECTO for scope in service_scopes { secret_volume_source_builder.with_service_scope(scope); } + for scope in listener_volume_scopes { + secret_volume_source_builder.with_listener_volume_scope(scope); + } VolumeBuilder::new(volume_name.to_string()) .ephemeral( secret_volume_source_builder - .with_listener_volume_scope(listener_scope) .with_pod_scope() .with_format(secret_format) .with_auto_tls_cert_lifetime(*requested_secret_lifetime) @@ -820,20 +859,23 @@ mod tests { k8s_openapi::api::core::v1::PodTemplateSpec, kvp::LabelValue, product_logging::spec::AutomaticContainerLogConfig, - role_utils::GenericRoleConfig, shared::time::Duration, }; use strum::IntoEnumIterator; use uuid::uuid; use super::{ - CONFIG_VOLUME_NAME, DATA_VOLUME_NAME, LISTENER_VOLUME_NAME, LOG_CONFIG_VOLUME_NAME, - LOG_VOLUME_NAME, RoleGroupBuilder, + CONFIG_VOLUME_NAME, DATA_VOLUME_NAME, LOG_CONFIG_VOLUME_NAME, LOG_VOLUME_NAME, + ROLE_GROUP_LISTENER_VOLUME_NAME, RoleGroupBuilder, }; use crate::{ controller::{ ContextNames, OpenSearchRoleGroupConfig, ValidatedCluster, ValidatedContainerLogConfigChoice, ValidatedLogging, ValidatedOpenSearchConfig, + build::role_group_builder::{ + DISCOVERY_SERVICE_LISTENER_VOLUME_NAME, OPENSEARCH_KEYSTORE_VOLUME_NAME, + TLS_INTERNAL_VOLUME_NAME, TLS_SERVER_VOLUME_NAME, + }, }, crd::{NodeRoles, OpenSearchKeystoreKey, v1alpha1}, framework::{ @@ -842,8 +884,8 @@ mod tests { role_utils::GenericProductSpecificCommonConfig, types::{ kubernetes::{ - ConfigMapName, ListenerClassName, NamespaceName, SecretKey, SecretName, - ServiceAccountName, ServiceName, + ConfigMapName, ListenerClassName, ListenerName, NamespaceName, SecretKey, + SecretName, ServiceAccountName, ServiceName, }, operator::{ ClusterName, ControllerName, OperatorName, ProductName, ProductVersion, @@ -859,8 +901,12 @@ mod tests { let _ = CONFIG_VOLUME_NAME; let _ = LOG_CONFIG_VOLUME_NAME; let _ = DATA_VOLUME_NAME; - let _ = LISTENER_VOLUME_NAME; + let _ = ROLE_GROUP_LISTENER_VOLUME_NAME; + let _ = DISCOVERY_SERVICE_LISTENER_VOLUME_NAME; + let _ = TLS_SERVER_VOLUME_NAME; + let _ = TLS_INTERNAL_VOLUME_NAME; let _ = LOG_VOLUME_NAME; + let _ = OPENSEARCH_KEYSTORE_VOLUME_NAME; } fn context_names() -> ContextNames { @@ -887,6 +933,7 @@ mod tests { replicas: 1, config: ValidatedOpenSearchConfig { affinity: StackableAffinity::default(), + discovery_service_exposed: true, listener_class: ListenerClassName::from_str_unsafe("cluster-internal"), logging: ValidatedLogging { opensearch_container: ValidatedContainerLogConfigChoice::Automatic( @@ -925,7 +972,7 @@ mod tests { ClusterName::from_str_unsafe("my-opensearch-cluster"), NamespaceName::from_str_unsafe("default"), uuid!("0b1e30e6-326e-4c1a-868d-ad6598b49e8b"), - GenericRoleConfig::default(), + v1alpha1::OpenSearchRoleConfig::default(), [( RoleGroupName::from_str_unsafe("default"), role_group_config.clone(), @@ -939,6 +986,7 @@ mod tests { key: SecretKey::from_str_unsafe("my-keystore-file"), }, }], + None, ) } @@ -959,7 +1007,8 @@ mod tests { role_group_name, role_group_config, context_names, - ServiceName::from_str_unsafe("my-opensearch-cluster"), + ServiceName::from_str_unsafe("my-opensearch-cluster-seed-nodes"), + ListenerName::from_str_unsafe("my-opensearch-cluster"), ) } @@ -1146,13 +1195,9 @@ mod tests { } } }, - { - "name": "cluster.initial_cluster_manager_nodes", - "value": "" - }, { "name": "discovery.seed_hosts", - "value": "my-opensearch-cluster" + "value": "my-opensearch-cluster-seed-nodes.default.svc.cluster.local" }, { "name": "http.publish_host", @@ -1228,17 +1273,21 @@ mod tests { "name": "data" }, { - "mountPath": "/stackable/listener", + "mountPath": "/stackable/listeners/role-group", "name": "listener" }, { "mountPath": "/stackable/log", "name": "log" }, - { + { "mountPath": "/stackable/opensearch/config/tls/internal", "name": "tls-internal" }, + { + "mountPath": "/stackable/listeners/discovery-service", + "name": "discovery-service-listener" + }, { "mountPath": "/stackable/opensearch/config/tls/server", "name": "tls-server", @@ -1420,7 +1469,7 @@ mod tests { "secrets.stackable.tech/backend.autotls.cert.lifetime": "1d", "secrets.stackable.tech/class": "tls", "secrets.stackable.tech/format": "tls-pem", - "secrets.stackable.tech/scope": "listener-volume=listener,pod" + "secrets.stackable.tech/scope": "service=my-opensearch-cluster-seed-nodes,listener-volume=listener,pod" } }, "spec": { @@ -1446,7 +1495,7 @@ mod tests { "secrets.stackable.tech/backend.autotls.cert.lifetime": "1d", "secrets.stackable.tech/class": "tls", "secrets.stackable.tech/format": "tls-pem", - "secrets.stackable.tech/scope": "service=my-opensearch-cluster,listener-volume=listener,pod" + "secrets.stackable.tech/scope": "listener-volume=listener,listener-volume=discovery-service-listener,pod" } }, "spec": { @@ -1520,6 +1569,36 @@ mod tests { }, "name": "listener" }, + "spec": { + "accessModes": [ + "ReadWriteMany", + ], + "resources": { + "requests": { + "storage": "1", + }, + }, + "storageClassName": "listeners.stackable.tech", + }, + }, + { + "apiVersion": "v1", + "kind": "PersistentVolumeClaim", + "metadata": { + "annotations": { + "listeners.stackable.tech/listener-name": "my-opensearch-cluster", + }, + "labels": { + "app.kubernetes.io/component": "nodes", + "app.kubernetes.io/instance": "my-opensearch-cluster", + "app.kubernetes.io/managed-by": "opensearch.stackable.tech_opensearchcluster", + "app.kubernetes.io/name": "opensearch", + "app.kubernetes.io/role-group": "default", + "app.kubernetes.io/version": "3.1.0", + "stackable.tech/vendor": "Stackable", + }, + "name": "discovery-service-listener", + }, "spec": { "accessModes": [ "ReadWriteMany" diff --git a/rust/operator-binary/src/controller/dereference.rs b/rust/operator-binary/src/controller/dereference.rs new file mode 100644 index 0000000..24adf74 --- /dev/null +++ b/rust/operator-binary/src/controller/dereference.rs @@ -0,0 +1,65 @@ +//! The dereference step in the OpenSearchCluster controller + +use snafu::{ResultExt, Snafu}; +use stackable_operator::{client::Client, crd::listener}; +use strum::{EnumDiscriminants, IntoStaticStr}; + +use crate::{ + controller::{DereferencedObjects, build::role_builder}, + crd::v1alpha1, + framework::{ + controller_utils::{get_cluster_name, get_namespace}, + types::{kubernetes::NamespaceName, operator::ClusterName}, + }, +}; + +#[derive(Snafu, Debug, EnumDiscriminants)] +#[strum_discriminants(derive(IntoStaticStr))] +pub enum Error { + #[snafu(display("failed to get the cluster name"))] + GetClusterName { + source: crate::framework::controller_utils::Error, + }, + + #[snafu(display("failed to get the cluster namespace"))] + GetClusterNamespace { + source: crate::framework::controller_utils::Error, + }, + + #[snafu(display("failed to fetch the discovery service listener"))] + FetchDiscoveryServiceListener { + source: stackable_operator::client::Error, + }, +} + +type Result = std::result::Result; + +/// Dereference additional objects that are required to build the cluster resources. +pub async fn dereference( + client: &Client, + cluster: &v1alpha1::OpenSearchCluster, +) -> Result { + let cluster_name = get_cluster_name(cluster).context(GetClusterNameSnafu)?; + let namespace = get_namespace(cluster).context(GetClusterNamespaceSnafu)?; + + let maybe_discovery_service_listener = + fetch_discovery_service_listener(client, &cluster_name, &namespace).await?; + + Ok(DereferencedObjects { + maybe_discovery_service_listener, + }) +} + +async fn fetch_discovery_service_listener( + client: &Client, + cluster_name: &ClusterName, + namespace: &NamespaceName, +) -> Result> { + let discovery_service_listener_name = + role_builder::discovery_service_listener_name(cluster_name); + + client + .get_opt(discovery_service_listener_name.as_ref(), namespace.as_ref()) + .await + .context(FetchDiscoveryServiceListenerSnafu) +} diff --git a/rust/operator-binary/src/controller/validate.rs b/rust/operator-binary/src/controller/validate.rs index 717aae5..bacae8b 100644 --- a/rust/operator-binary/src/controller/validate.rs +++ b/rust/operator-binary/src/controller/validate.rs @@ -4,7 +4,8 @@ use std::{collections::BTreeMap, str::FromStr}; use snafu::{OptionExt, ResultExt, Snafu}; use stackable_operator::{ - product_logging::spec::Logging, role_utils::RoleGroup, shared::time::Duration, + crd::listener, kube::ResourceExt, product_logging::spec::Logging, role_utils::RoleGroup, + shared::time::Duration, }; use strum::{EnumDiscriminants, IntoStaticStr}; @@ -13,6 +14,7 @@ use super::{ ValidatedLogging, ValidatedOpenSearchConfig, }; use crate::{ + controller::{DereferencedObjects, HTTP_PORT_NAME, ValidatedDiscoveryEndpoint}, crd::v1alpha1::{self}, framework::{ builder::pod::container::{EnvVarName, EnvVarSet}, @@ -21,7 +23,11 @@ use crate::{ VectorContainerLogConfig, validate_logging_configuration_for_container, }, role_utils::{GenericProductSpecificCommonConfig, RoleGroupConfig, with_validated_config}, - types::{kubernetes::ConfigMapName, operator::ClusterName}, + types::{ + common::Port, + kubernetes::{ConfigMapName, Hostname}, + operator::ClusterName, + }, }, }; @@ -43,6 +49,9 @@ pub enum Error { source: crate::framework::controller_utils::Error, }, + #[snafu(display("failed to get the port of the Listener status"))] + GetListenerStatusPort {}, + #[snafu(display( "failed to get vectorAggregatorConfigMapName; It must be set if enableVectorAgent is true." ))] @@ -53,6 +62,16 @@ pub enum Error { source: crate::framework::builder::pod::container::Error, }, + #[snafu(display("failed to parse the hostname of the Listener status"))] + ParseListenerStatusHostname { + source: crate::framework::macros::attributed_string_type::Error, + }, + + #[snafu(display("failed to parse the port of the Listener status"))] + ParseListenerStatusPort { + source: crate::framework::types::common::Error, + }, + #[snafu(display("failed to set product version"))] ParseProductVersion { source: crate::framework::macros::attributed_string_type::Error, @@ -99,6 +118,7 @@ const DEFAULT_IMAGE_BASE_NAME: &str = "opensearch"; pub fn validate( context_names: &ContextNames, cluster: &v1alpha1::OpenSearchCluster, + dereferenced_objects: &DereferencedObjects, ) -> Result { let cluster_name = get_cluster_name(cluster).context(GetClusterNameSnafu)?; let namespace = get_namespace(cluster).context(GetClusterNamespaceSnafu)?; @@ -126,6 +146,12 @@ pub fn validate( role_group_configs.insert(role_group_name, validated_role_group_config); } + let validated_discovery_endpoint = validate_discovery_endpoint( + dereferenced_objects + .maybe_discovery_service_listener + .as_ref(), + )?; + Ok(ValidatedCluster::new( product_image, product_version, @@ -136,6 +162,7 @@ pub fn validate( role_group_configs, cluster.spec.cluster_config.tls.clone(), cluster.spec.cluster_config.keystore.clone(), + validated_discovery_endpoint, )) } @@ -176,6 +203,7 @@ fn validate_role_group_config( let validated_config = ValidatedOpenSearchConfig { affinity: merged_role_group.config.config.affinity, + discovery_service_exposed: merged_role_group.config.config.discovery_service_exposed, listener_class: merged_role_group.config.config.listener_class, logging, node_roles: merged_role_group.config.config.node_roles, @@ -235,6 +263,71 @@ fn validate_logging_configuration( }) } +/// Return the validated discovery endpoint if a Listener is given with a status containing the +/// endpoint +fn validate_discovery_endpoint( + maybe_discovery_service_listener: Option<&listener::v1alpha1::Listener>, +) -> Result> { + let validated_discovery_endpoint = if let Some(discovery_service_listener) = + maybe_discovery_service_listener + { + if let Some((hostname, port)) = extract_listener_ingresses(discovery_service_listener)? { + tracing::info!( + "The status of the discovery service listener {} contains the discovery endpoint. \ + The discovery ConfigMap will be created or updated.", + discovery_service_listener.name_any() + ); + Some(ValidatedDiscoveryEndpoint { hostname, port }) + } else { + tracing::info!( + "The status of the discovery service listener {} does not yet contain the \ + discovery endpoint. The creation of the discovery ConfigMap will be postponed \ + until the status is updated.", + discovery_service_listener.name_any() + ); + None + } + } else { + tracing::info!( + "The discovery service listener is not yet deployed. The creation of the discovery \ + ConfigMap will be postponed until the discovery service listener is deployed and its \ + status is set." + ); + None + }; + + Ok(validated_discovery_endpoint) +} + +/// Return the first address and the HTTP port from the given Listener if it has a status +fn extract_listener_ingresses( + discovery_service_listener: &listener::v1alpha1::Listener, +) -> Result> { + let maybe_first_ingress_address = discovery_service_listener + .status + .as_ref() + .and_then(|status| status.ingress_addresses.as_ref()) + .into_iter() + .flatten() + .next(); + + // It is okay if the status is not set yet. But if it is set, then it must be valid. + if let Some(ingress_address) = maybe_first_ingress_address { + let hostname = Hostname::from_str(&ingress_address.address) + .context(ParseListenerStatusHostnameSnafu)?; + + let raw_port = *ingress_address + .ports + .get(HTTP_PORT_NAME) + .context(GetListenerStatusPortSnafu)?; + let port = Port::try_from(raw_port).context(ParseListenerStatusPortSnafu)?; + + Ok(Some((hostname, port))) + } else { + Ok(None) + } +} + #[cfg(test)] mod tests { use std::{collections::BTreeMap, str::FromStr}; @@ -248,6 +341,7 @@ mod tests { product_image_selection::ResolvedProductImage, resources::{CpuLimits, MemoryLimits, PvcConfig, Resources}, }, + crd::listener::{self}, deep_merger::ObjectOverrides, k8s_openapi::{ api::core::v1::{ @@ -262,7 +356,7 @@ mod tests { ContainerLogConfigChoiceFragment, ContainerLogConfigFragment, CustomContainerLogConfigFragment, LogLevel, LoggerConfig, LoggingFragment, }, - role_utils::{CommonConfiguration, GenericRoleConfig, Role, RoleGroup}, + role_utils::{CommonConfiguration, Role, RoleGroup}, shared::time::Duration, }; use uuid::uuid; @@ -270,7 +364,10 @@ mod tests { use super::{ErrorDiscriminants, validate}; use crate::{ built_info, - controller::{ContextNames, ValidatedCluster, ValidatedLogging, ValidatedOpenSearchConfig}, + controller::{ + ContextNames, DereferencedObjects, ValidatedCluster, ValidatedDiscoveryEndpoint, + ValidatedLogging, ValidatedOpenSearchConfig, + }, crd::{NodeRoles, OpenSearchKeystoreKey, v1alpha1}, framework::{ builder::pod::container::{EnvVarName, EnvVarSet}, @@ -279,9 +376,10 @@ mod tests { }, role_utils::{GenericProductSpecificCommonConfig, RoleGroupConfig}, types::{ + common::Port, kubernetes::{ - ConfigMapName, ListenerClassName, NamespaceName, SecretClassName, SecretKey, - SecretName, + ConfigMapName, Hostname, ListenerClassName, NamespaceName, SecretClassName, + SecretKey, SecretName, }, operator::{ ClusterName, ControllerName, OperatorName, ProductName, ProductVersion, @@ -293,7 +391,7 @@ mod tests { #[test] fn test_validate_ok() { - let result = validate(&context_names(), &cluster()); + let result = validate(&context_names(), &cluster(), &dereferenced_objects()); assert_eq!( Some(ValidatedCluster::new( @@ -315,7 +413,7 @@ mod tests { ClusterName::from_str_unsafe("my-opensearch"), NamespaceName::from_str_unsafe("default"), uuid!("e6ac237d-a6d4-43a1-8135-f36506110912"), - GenericRoleConfig::default(), + v1alpha1::OpenSearchRoleConfig::default(), [( RoleGroupName::from_str_unsafe("default"), RoleGroupConfig { @@ -359,6 +457,7 @@ mod tests { }), ..StackableAffinity::default() }, + discovery_service_exposed: true, listener_class: ListenerClassName::from_str_unsafe( "listener-class-from-role-group-level" ), @@ -508,7 +607,11 @@ mod tests { name: SecretName::from_str_unsafe("my-keystore-secret"), key: SecretKey::from_str_unsafe("my-keystore-file") } - }] + }], + Some(ValidatedDiscoveryEndpoint { + hostname: Hostname::from_str_unsafe("my-opensearch.default.svc.cluster.local"), + port: Port(9200), + }) )), result.ok() ); @@ -517,7 +620,7 @@ mod tests { #[test] fn test_validate_err_get_cluster_name() { test_validate_err( - |cluster| cluster.metadata.name = None, + |cluster, _| cluster.metadata.name = None, ErrorDiscriminants::GetClusterName, ); } @@ -525,7 +628,7 @@ mod tests { #[test] fn test_validate_err_get_cluster_namespace() { test_validate_err( - |cluster| cluster.metadata.namespace = None, + |cluster, _| cluster.metadata.namespace = None, ErrorDiscriminants::GetClusterNamespace, ); } @@ -533,7 +636,7 @@ mod tests { #[test] fn test_validate_err_get_cluster_uid() { test_validate_err( - |cluster| cluster.metadata.uid = None, + |cluster, _| cluster.metadata.uid = None, ErrorDiscriminants::GetClusterUid, ); } @@ -541,7 +644,7 @@ mod tests { #[test] fn test_validate_err_resolve_product_image() { test_validate_err( - |cluster| { + |cluster, _| { cluster.spec.image = serde_json::from_str(r#"{"productVersion": "invalid product version"}"#) .expect("should be a valid ProductImage structure") @@ -553,7 +656,7 @@ mod tests { #[test] fn test_validate_err_parse_role_group_name() { test_validate_err( - |cluster| { + |cluster, _| { let role_group = cluster .spec .nodes @@ -573,7 +676,7 @@ mod tests { #[test] fn test_validate_err_validate_logging_config() { test_validate_err( - |cluster| { + |cluster, _| { cluster.spec.nodes.config.config.logging.containers = [( v1alpha1::Container::OpenSearch, ContainerLogConfigFragment { @@ -595,7 +698,7 @@ mod tests { #[test] fn test_validate_err_get_vector_aggregator_config_map_name() { test_validate_err( - |cluster| { + |cluster, _| { cluster .spec .cluster_config @@ -608,7 +711,7 @@ mod tests { #[test] fn test_validate_err_termination_grace_period_too_long() { test_validate_err( - |cluster| { + |cluster, _| { cluster.spec.nodes.config.config.graceful_shutdown_timeout = Some(Duration::from_secs(u64::MAX)) }, @@ -619,7 +722,7 @@ mod tests { #[test] fn test_validate_err_parse_environment_variable() { test_validate_err( - |cluster| { + |cluster, _| { cluster.spec.nodes.config.env_overrides = [( "INVALID_ENVIRONMENT_VARIABLE_WITH_=".to_owned(), "value".to_owned(), @@ -630,14 +733,82 @@ mod tests { ); } + #[test] + fn test_validate_err_parse_listener_status_hostname() { + test_validate_err( + |_, dereferenced_objects| { + dereferenced_objects.maybe_discovery_service_listener = + Some(listener::v1alpha1::Listener { + metadata: ObjectMeta::default(), + spec: listener::v1alpha1::ListenerSpec::default(), + status: Some(listener::v1alpha1::ListenerStatus { + ingress_addresses: Some(vec![listener::v1alpha1::ListenerIngress { + address: "invalid hostname".to_owned(), + address_type: listener::v1alpha1::AddressType::Hostname, + ports: [("http".to_owned(), 9200)].into(), + }]), + ..listener::v1alpha1::ListenerStatus::default() + }), + }); + }, + ErrorDiscriminants::ParseListenerStatusHostname, + ); + } + + #[test] + fn test_validate_err_get_listener_status_port() { + test_validate_err( + |_, dereferenced_objects| { + dereferenced_objects.maybe_discovery_service_listener = + Some(listener::v1alpha1::Listener { + metadata: ObjectMeta::default(), + spec: listener::v1alpha1::ListenerSpec::default(), + status: Some(listener::v1alpha1::ListenerStatus { + ingress_addresses: Some(vec![listener::v1alpha1::ListenerIngress { + address: "my-opensearch.default.svc.cluster.local".to_owned(), + address_type: listener::v1alpha1::AddressType::Hostname, + // Validation should fail because the http port is expected. + ports: [("transport".to_owned(), 9300)].into(), + }]), + ..listener::v1alpha1::ListenerStatus::default() + }), + }); + }, + ErrorDiscriminants::GetListenerStatusPort, + ); + } + + #[test] + fn test_validate_err_parse_listener_status_port() { + test_validate_err( + |_, dereferenced_objects| { + dereferenced_objects.maybe_discovery_service_listener = + Some(listener::v1alpha1::Listener { + metadata: ObjectMeta::default(), + spec: listener::v1alpha1::ListenerSpec::default(), + status: Some(listener::v1alpha1::ListenerStatus { + ingress_addresses: Some(vec![listener::v1alpha1::ListenerIngress { + address: "my-opensearch.default.svc.cluster.local".to_owned(), + address_type: listener::v1alpha1::AddressType::Hostname, + ports: [("http".to_owned(), -1)].into(), + }]), + ..listener::v1alpha1::ListenerStatus::default() + }), + }); + }, + ErrorDiscriminants::ParseListenerStatusPort, + ); + } + fn test_validate_err( - f: fn(&mut v1alpha1::OpenSearchCluster) -> (), + change_test_objects: fn(&mut v1alpha1::OpenSearchCluster, &mut DereferencedObjects) -> (), expected_err: ErrorDiscriminants, ) { let mut cluster = cluster(); - f(&mut cluster); + let mut dereferenced_objects = dereferenced_objects(); + change_test_objects(&mut cluster, &mut dereferenced_objects); - let result = validate(&context_names(), &cluster); + let result = validate(&context_names(), &cluster, &dereferenced_objects); assert_eq!(Err(expected_err), result.map_err(ErrorDiscriminants::from)); } @@ -726,7 +897,7 @@ mod tests { product_specific_common_config: GenericProductSpecificCommonConfig::default( ), }, - role_config: GenericRoleConfig::default(), + role_config: v1alpha1::OpenSearchRoleConfig::default(), role_groups: [( "default".to_owned(), RoleGroup { @@ -799,4 +970,21 @@ mod tests { status: None, } } + + fn dereferenced_objects() -> DereferencedObjects { + DereferencedObjects { + maybe_discovery_service_listener: Some(listener::v1alpha1::Listener { + metadata: ObjectMeta::default(), + spec: listener::v1alpha1::ListenerSpec::default(), + status: Some(listener::v1alpha1::ListenerStatus { + ingress_addresses: Some(vec![listener::v1alpha1::ListenerIngress { + address: "my-opensearch.default.svc.cluster.local".to_owned(), + address_type: listener::v1alpha1::AddressType::Hostname, + ports: [("http".to_owned(), 9200)].into(), + }]), + ..listener::v1alpha1::ListenerStatus::default() + }), + }), + } + } } diff --git a/rust/operator-binary/src/crd/mod.rs b/rust/operator-binary/src/crd/mod.rs index de71092..1ddfa03 100644 --- a/rust/operator-binary/src/crd/mod.rs +++ b/rust/operator-binary/src/crd/mod.rs @@ -42,7 +42,8 @@ use crate::{ }, }; -constant!(DEFAULT_LISTENER_CLASS: ListenerClassName = "cluster-internal"); +constant!(DEFAULT_ROLE_GROUP_LISTENER_CLASS: ListenerClassName = "cluster-internal"); +constant!(DEFAULT_DISCOVERY_SERVICE_LISTENER_CLASS: ListenerClassName = "cluster-internal"); constant!(TLS_DEFAULT_SECRET_CLASS: SecretClassName = "tls"); #[versioned( @@ -86,8 +87,11 @@ pub mod versioned { pub object_overrides: ObjectOverrides, // no doc - docs in Role struct - pub nodes: - Role, + pub nodes: Role< + OpenSearchConfigFragment, + OpenSearchRoleConfig, + GenericProductSpecificCommonConfig, + >, } #[derive(Clone, Debug, Default, Deserialize, Eq, JsonSchema, PartialEq, Serialize)] @@ -207,6 +211,9 @@ pub mod versioned { #[fragment_attrs(serde(default))] pub affinity: StackableAffinity, + /// Determines whether this role group is exposed in the discovery service. + pub discovery_service_exposed: bool, + /// Time period Pods have to gracefully shut down, e.g. `30m`, `1h` or `2d`. Consult the /// operator documentation for details. #[fragment_attrs(serde(default))] @@ -282,6 +289,17 @@ pub mod versioned { pub data: PvcConfig, } + #[derive(Clone, Debug, Deserialize, JsonSchema, PartialEq, Serialize)] + #[serde(rename_all = "camelCase")] + pub struct OpenSearchRoleConfig { + #[serde(flatten)] + pub common: GenericRoleConfig, + + /// The [ListenerClass](https://docs.stackable.tech/home/nightly/listener-operator/listenerclass.html) that is used for the discovery service. + #[serde(default = "discovery_service_listener_class_default")] + pub discovery_service_listener_class: ListenerClassName, + } + #[derive(Clone, Default, Debug, Deserialize, Eq, JsonSchema, PartialEq, Serialize)] #[serde(rename_all = "camelCase")] pub struct OpenSearchClusterStatus { @@ -325,12 +343,13 @@ impl v1alpha1::OpenSearchConfig { node_affinity: None, node_selector: None, }, + discovery_service_exposed: Some(true), // Default taken from the Helm chart, see // https://github.com/opensearch-project/helm-charts/blob/opensearch-3.0.0/charts/opensearch/values.yaml#L364 graceful_shutdown_timeout: Some( Duration::from_str("2m").expect("should be a valid duration"), ), - listener_class: Some(DEFAULT_LISTENER_CLASS.to_owned()), + listener_class: Some(DEFAULT_ROLE_GROUP_LISTENER_CLASS.to_owned()), logging: product_logging::spec::default_logging(), // Defaults taken from the Helm chart, see // https://github.com/opensearch-project/helm-charts/blob/opensearch-3.0.0/charts/opensearch/values.yaml#L16-L20 @@ -389,6 +408,19 @@ fn internal_secret_class_default() -> SecretClassName { TLS_DEFAULT_SECRET_CLASS.to_owned() } +impl Default for v1alpha1::OpenSearchRoleConfig { + fn default() -> Self { + v1alpha1::OpenSearchRoleConfig { + common: GenericRoleConfig::default(), + discovery_service_listener_class: discovery_service_listener_class_default(), + } + } +} + +fn discovery_service_listener_class_default() -> ListenerClassName { + DEFAULT_DISCOVERY_SERVICE_LISTENER_CLASS.to_owned() +} + #[derive(Clone, Debug, Default, Deserialize, JsonSchema, PartialEq, Serialize)] pub struct NodeRoles(pub Vec); diff --git a/rust/operator-binary/src/framework/role_utils.rs b/rust/operator-binary/src/framework/role_utils.rs index d6facda..dd5b464 100644 --- a/rust/operator-binary/src/framework/role_utils.rs +++ b/rust/operator-binary/src/framework/role_utils.rs @@ -17,7 +17,7 @@ use stackable_operator::{ use super::{ builder::pod::container::EnvVarSet, types::{ - kubernetes::{ClusterRoleName, RoleBindingName, ServiceAccountName, ServiceName}, + kubernetes::{ClusterRoleName, RoleBindingName, ServiceAccountName}, operator::{ClusterName, ProductName}, }, }; @@ -220,18 +220,6 @@ impl ResourceNames { ClusterRoleName::from_str(&format!("{}{SUFFIX}", self.product_name)) .expect("should be a valid cluster role name") } - - pub fn discovery_service_name(&self) -> ServiceName { - // compile-time checks - const _: () = assert!( - ClusterName::MAX_LENGTH <= ServiceName::MAX_LENGTH, - "The string `` must not exceed the limit of Service names." - ); - let _ = ClusterName::IS_RFC_1035_LABEL_NAME; - let _ = ClusterName::IS_VALID_LABEL_VALUE; - - ServiceName::from_str(self.cluster_name.as_ref()).expect("should be a valid Service name") - } } #[cfg(test)] @@ -252,7 +240,7 @@ mod tests { use crate::framework::{ role_utils::with_validated_config, types::{ - kubernetes::{ClusterRoleName, RoleBindingName, ServiceAccountName, ServiceName}, + kubernetes::{ClusterRoleName, RoleBindingName, ServiceAccountName}, operator::{ClusterName, ProductName}, }, }; @@ -410,9 +398,5 @@ mod tests { ClusterRoleName::from_str_unsafe("my-product-clusterrole"), resource_names.cluster_role_name() ); - assert_eq!( - ServiceName::from_str_unsafe("my-cluster"), - resource_names.discovery_service_name() - ); } } diff --git a/rust/operator-binary/src/framework/types/kubernetes.rs b/rust/operator-binary/src/framework/types/kubernetes.rs index 6c01e24..861e8a9 100644 --- a/rust/operator-binary/src/framework/types/kubernetes.rs +++ b/rust/operator-binary/src/framework/types/kubernetes.rs @@ -40,6 +40,16 @@ attributed_string_type! { is_rfc_1123_dns_subdomain_name } +attributed_string_type! { + Hostname, + "A hostname", + "example.com", + (min_length = 1), + (max_length = 253), + // see https://en.wikipedia.org/wiki/Hostname#Syntax + (regex = "^[a-zA-Z0-9]([-a-zA-Z0-9]{0,60}[a-zA-Z0-9])?(\\.[a-zA-Z0-9]([-a-zA-Z0-9]{0,60}[a-zA-Z0-9])?)*\\.?$") +} + attributed_string_type! { ListenerName, "The name of a Listener", @@ -152,7 +162,7 @@ attributed_string_type! { #[cfg(test)] mod tests { use super::{ - ClusterRoleName, ConfigMapKey, ConfigMapName, ContainerName, ListenerClassName, + ClusterRoleName, ConfigMapKey, ConfigMapName, ContainerName, Hostname, ListenerClassName, ListenerName, NamespaceName, PersistentVolumeClaimName, RoleBindingName, SecretClassName, SecretKey, SecretName, ServiceAccountName, ServiceName, StatefulSetName, Uid, VolumeName, }; @@ -163,6 +173,7 @@ mod tests { ConfigMapKey::test_example(); ContainerName::test_example(); ClusterRoleName::test_example(); + Hostname::test_example(); ListenerName::test_example(); ListenerClassName::test_example(); NamespaceName::test_example(); diff --git a/tests/templates/kuttl/backup-restore/22-create-testuser.yaml b/tests/templates/kuttl/backup-restore/22-create-testuser.yaml index 46c3181..6d171a0 100644 --- a/tests/templates/kuttl/backup-restore/22-create-testuser.yaml +++ b/tests/templates/kuttl/backup-restore/22-create-testuser.yaml @@ -22,10 +22,9 @@ spec: # required for pip install - name: HOME value: /stackable - - name: NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace + envFrom: + - configMapRef: + name: opensearch-1 volumeMounts: - name: script mountPath: /stackable/scripts @@ -66,17 +65,16 @@ data: from opensearchpy import OpenSearch from opensearchpy.exceptions import RequestError - namespace = os.environ['NAMESPACE'] + host = os.environ['OPENSEARCH_HOSTNAME'] + port = os.environ['OPENSEARCH_PORT'] + http_use_tls = os.environ['OPENSEARCH_PROTOCOL'] == 'https' # Login as admin client = OpenSearch( + hosts = [{'host': host, 'port': port}], http_auth=('admin', 'AJVFsGJBbpT6mChn'), - hosts=[{ - 'host': f'opensearch-1.{namespace}.svc.cluster.local', - 'port': 9200 - }], http_compress=True, - use_ssl=True, + use_ssl=http_use_tls, verify_certs=True, ca_certs='/stackable/tls/ca.crt' ) diff --git a/tests/templates/kuttl/backup-restore/23-create-data.yaml b/tests/templates/kuttl/backup-restore/23-create-data.yaml index bcde316..24b9ebe 100644 --- a/tests/templates/kuttl/backup-restore/23-create-data.yaml +++ b/tests/templates/kuttl/backup-restore/23-create-data.yaml @@ -22,10 +22,9 @@ spec: # required for pip install - name: HOME value: /stackable - - name: NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace + envFrom: + - configMapRef: + name: opensearch-1 volumeMounts: - name: script mountPath: /stackable/scripts @@ -66,17 +65,16 @@ data: from opensearchpy import OpenSearch from opensearchpy.exceptions import RequestError - namespace = os.environ['NAMESPACE'] + host = os.environ['OPENSEARCH_HOSTNAME'] + port = os.environ['OPENSEARCH_PORT'] + http_use_tls = os.environ['OPENSEARCH_PROTOCOL'] == 'https' # Login as test user client = OpenSearch( + hosts = [{'host': host, 'port': port}], http_auth=('testuser', 'L9hUHtLVVEsrcLzZ'), - hosts=[{ - 'host': f'opensearch-1.{namespace}.svc.cluster.local', - 'port': 9200 - }], http_compress=True, - use_ssl=True, + use_ssl=http_use_tls, verify_certs=True, ca_certs='/stackable/tls/ca.crt' ) diff --git a/tests/templates/kuttl/backup-restore/30-create-snapshot.yaml b/tests/templates/kuttl/backup-restore/30-create-snapshot.yaml index 48d3d17..ff2c654 100644 --- a/tests/templates/kuttl/backup-restore/30-create-snapshot.yaml +++ b/tests/templates/kuttl/backup-restore/30-create-snapshot.yaml @@ -22,10 +22,9 @@ spec: # required for pip install - name: HOME value: /stackable - - name: NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace + envFrom: + - configMapRef: + name: opensearch-1 volumeMounts: - name: script mountPath: /stackable/scripts @@ -66,16 +65,15 @@ data: from opensearchpy import OpenSearch from opensearchpy.exceptions import RequestError - namespace = os.environ['NAMESPACE'] + host = os.environ['OPENSEARCH_HOSTNAME'] + port = os.environ['OPENSEARCH_PORT'] + http_use_tls = os.environ['OPENSEARCH_PROTOCOL'] == 'https' client = OpenSearch( - hosts=[{ - 'host': f'opensearch-1.{namespace}.svc.cluster.local', - 'port': 9200 - }], + hosts = [{'host': host, 'port': port}], http_auth=('admin', 'AJVFsGJBbpT6mChn'), http_compress=True, - use_ssl=True, + use_ssl=http_use_tls, verify_certs=True, ca_certs='/stackable/tls/ca.crt' ) diff --git a/tests/templates/kuttl/backup-restore/31-backup-security-indices.yaml.j2 b/tests/templates/kuttl/backup-restore/31-backup-security-indices.yaml.j2 index 3bf641b..8da84ca 100644 --- a/tests/templates/kuttl/backup-restore/31-backup-security-indices.yaml.j2 +++ b/tests/templates/kuttl/backup-restore/31-backup-security-indices.yaml.j2 @@ -15,11 +15,9 @@ spec: {% endif %} command: - /stackable/scripts/backup-security-indices.sh - env: - - name: NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace + envFrom: + - configMapRef: + name: opensearch-1 volumeMounts: - name: scripts mountPath: /stackable/scripts @@ -131,7 +129,7 @@ data: -cacert config/tls/ca.crt \ -cert config/tls-client/tls.crt \ -key config/tls-client/tls.key \ - --hostname opensearch-1.$NAMESPACE.svc.cluster.local \ + --hostname $OPENSEARCH_HOSTNAME \ -backup /tmp/backup upload-security-indices-backup.sh: | #!/usr/bin/env sh diff --git a/tests/templates/kuttl/backup-restore/60-restore-security-indices.yaml.j2 b/tests/templates/kuttl/backup-restore/60-restore-security-indices.yaml.j2 index b1b390c..d2c41fd 100644 --- a/tests/templates/kuttl/backup-restore/60-restore-security-indices.yaml.j2 +++ b/tests/templates/kuttl/backup-restore/60-restore-security-indices.yaml.j2 @@ -57,11 +57,9 @@ spec: {% endif %} command: - /stackable/scripts/restore-security-indices.sh - env: - - name: NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace + envFrom: + - configMapRef: + name: opensearch-2 volumeMounts: - name: scripts mountPath: /stackable/scripts @@ -141,5 +139,5 @@ data: -cacert config/tls/ca.crt \ -cert config/tls-client/tls.crt \ -key config/tls-client/tls.key \ - --hostname opensearch-2.$NAMESPACE.svc.cluster.local \ + --hostname $OPENSEARCH_HOSTNAME \ --configdir /tmp/backup diff --git a/tests/templates/kuttl/backup-restore/61-restore-snapshot.yaml b/tests/templates/kuttl/backup-restore/61-restore-snapshot.yaml index d185dc9..870792a 100644 --- a/tests/templates/kuttl/backup-restore/61-restore-snapshot.yaml +++ b/tests/templates/kuttl/backup-restore/61-restore-snapshot.yaml @@ -22,10 +22,9 @@ spec: # required for pip install - name: HOME value: /stackable - - name: NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace + envFrom: + - configMapRef: + name: opensearch-2 volumeMounts: - name: script mountPath: /stackable/scripts @@ -66,16 +65,15 @@ data: from opensearchpy import OpenSearch from opensearchpy.exceptions import RequestError - namespace = os.environ['NAMESPACE'] + host = os.environ['OPENSEARCH_HOSTNAME'] + port = os.environ['OPENSEARCH_PORT'] + http_use_tls = os.environ['OPENSEARCH_PROTOCOL'] == 'https' client = OpenSearch( - hosts=[{ - 'host': f'opensearch-2.{namespace}.svc.cluster.local', - 'port': 9200 - }], + hosts = [{'host': host, 'port': port}], http_auth=('admin', 'AJVFsGJBbpT6mChn'), http_compress=True, - use_ssl=True, + use_ssl=http_use_tls, verify_certs=True, ca_certs='/stackable/tls/ca.crt' ) diff --git a/tests/templates/kuttl/backup-restore/70-test-opensearch-2.yaml b/tests/templates/kuttl/backup-restore/70-test-opensearch-2.yaml index b5ec26f..0414a6f 100644 --- a/tests/templates/kuttl/backup-restore/70-test-opensearch-2.yaml +++ b/tests/templates/kuttl/backup-restore/70-test-opensearch-2.yaml @@ -22,10 +22,9 @@ spec: # required for pip install - name: HOME value: /stackable - - name: NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace + envFrom: + - configMapRef: + name: opensearch-2 volumeMounts: - name: script mountPath: /stackable/scripts @@ -66,17 +65,16 @@ data: from opensearchpy import OpenSearch from opensearchpy.exceptions import RequestError - namespace = os.environ['NAMESPACE'] + host = os.environ['OPENSEARCH_HOSTNAME'] + port = os.environ['OPENSEARCH_PORT'] + http_use_tls = os.environ['OPENSEARCH_PROTOCOL'] == 'https' # Login as test user client = OpenSearch( + hosts = [{'host': host, 'port': port}], http_auth=('testuser', 'L9hUHtLVVEsrcLzZ'), - hosts=[{ - 'host': f'opensearch-2.{namespace}.svc.cluster.local', - 'port': 9200 - }], http_compress=True, - use_ssl=True, + use_ssl=http_use_tls, verify_certs=True, ca_certs='/stackable/tls/ca.crt' ) diff --git a/tests/templates/kuttl/external-access/20-assert.yaml b/tests/templates/kuttl/external-access/20-assert.yaml index 6e7c514..1d8b6db 100644 --- a/tests/templates/kuttl/external-access/20-assert.yaml +++ b/tests/templates/kuttl/external-access/20-assert.yaml @@ -59,4 +59,4 @@ kind: Service metadata: name: opensearch spec: - type: ClusterIP + type: NodePort # external-stable diff --git a/tests/templates/kuttl/external-access/opensearch.yaml.j2 b/tests/templates/kuttl/external-access/opensearch.yaml.j2 index 514fef6..71eca2f 100644 --- a/tests/templates/kuttl/external-access/opensearch.yaml.j2 +++ b/tests/templates/kuttl/external-access/opensearch.yaml.j2 @@ -18,6 +18,8 @@ spec: config: logging: enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} + roleConfig: + discoveryServiceListenerClass: test-external-stable-$NAMESPACE roleGroups: cluster-manager: config: diff --git a/tests/templates/kuttl/ldap/30-test-opensearch.yaml b/tests/templates/kuttl/ldap/30-test-opensearch.yaml index 1c2b31b..ee17e17 100644 --- a/tests/templates/kuttl/ldap/30-test-opensearch.yaml +++ b/tests/templates/kuttl/ldap/30-test-opensearch.yaml @@ -22,10 +22,9 @@ spec: # required for pip install - name: HOME value: /stackable - - name: NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace + envFrom: + - configMapRef: + name: opensearch volumeMounts: - name: script mountPath: /stackable/scripts @@ -75,20 +74,17 @@ data: import os from opensearchpy import OpenSearch - namespace = os.environ['NAMESPACE'] - - host = f'opensearch.{namespace}.svc.cluster.local' - port = 9200 - auth = ('integrationtest', 'integrationtest') - ca_certs_path = '/stackable/tls/ca.crt' + host = os.environ['OPENSEARCH_HOSTNAME'] + port = os.environ['OPENSEARCH_PORT'] + http_use_tls = os.environ['OPENSEARCH_PROTOCOL'] == 'https' client = OpenSearch( - hosts = [{'host': host, 'port': port}], - http_compress = True, - http_auth = auth, - use_ssl = True, - verify_certs = True, - ca_certs = ca_certs_path + hosts = [{'host': host, 'port': port}], + http_auth=('integrationtest', 'integrationtest'), + http_compress=True, + use_ssl=http_use_tls, + verify_certs=True, + ca_certs='/stackable/tls/ca.crt' ) # Create an index diff --git a/tests/templates/kuttl/logging/20-assert.yaml.j2 b/tests/templates/kuttl/logging/20-assert.yaml.j2 index e705ea3..76e2a2f 100644 --- a/tests/templates/kuttl/logging/20-assert.yaml.j2 +++ b/tests/templates/kuttl/logging/20-assert.yaml.j2 @@ -10,3 +10,11 @@ metadata: status: readyReplicas: 1 replicas: 1 +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: opensearch-nodes-custom +status: + readyReplicas: 1 + replicas: 1 diff --git a/tests/templates/kuttl/opensearch-dashboards/20-install-opensearch-dashboards.yaml.j2 b/tests/templates/kuttl/opensearch-dashboards/20-install-opensearch-dashboards.yaml.j2 index fbd2371..79e6543 100644 --- a/tests/templates/kuttl/opensearch-dashboards/20-install-opensearch-dashboards.yaml.j2 +++ b/tests/templates/kuttl/opensearch-dashboards/20-install-opensearch-dashboards.yaml.j2 @@ -7,11 +7,6 @@ commands: --repo https://opensearch-project.github.io/helm-charts --version "{{ test_scenario['values']['opensearch'].split(',')[0] }}" --values 20_opensearch-dashboards-values.yaml -{% if test_scenario['values']['server-use-tls'] == 'true' %} - --set opensearchHosts=https://opensearch.$NAMESPACE.svc.cluster.local:9200 -{% else %} - --set opensearchHosts=http://opensearch.$NAMESPACE.svc.cluster.local:9200 -{% endif %} --namespace $NAMESPACE --wait timeout: 600 diff --git a/tests/templates/kuttl/opensearch-dashboards/20_opensearch-dashboards-values.yaml.j2 b/tests/templates/kuttl/opensearch-dashboards/20_opensearch-dashboards-values.yaml.j2 index 30b5b56..38e2a52 100644 --- a/tests/templates/kuttl/opensearch-dashboards/20_opensearch-dashboards-values.yaml.j2 +++ b/tests/templates/kuttl/opensearch-dashboards/20_opensearch-dashboards-values.yaml.j2 @@ -9,6 +9,11 @@ serviceAccount: # OpenShift. name: opensearch-serviceaccount extraEnvs: + - name: OPENSEARCH_HOSTS + valueFrom: + configMapKeyRef: + name: opensearch + key: OPENSEARCH_HOSTS - name: OPENSEARCH_PASSWORD valueFrom: secretKeyRef: diff --git a/tests/templates/kuttl/smoke/11-assert.yaml.j2 b/tests/templates/kuttl/smoke/11-assert.yaml.j2 index 9e1ce93..2db20d3 100644 --- a/tests/templates/kuttl/smoke/11-assert.yaml.j2 +++ b/tests/templates/kuttl/smoke/11-assert.yaml.j2 @@ -122,7 +122,7 @@ spec: - name: cluster.initial_cluster_manager_nodes value: opensearch-nodes-cluster-manager-0,opensearch-nodes-cluster-manager-1,opensearch-nodes-cluster-manager-2 - name: discovery.seed_hosts - value: opensearch + # value: opensearch-seed-nodes.$NAMESPACE.svc.cluster.local - name: http.publish_host # value: $(_POD_NAME).opensearch-nodes-cluster-manager-headless.$NAMESPACE.svc.cluster.local - name: network.publish_host @@ -178,12 +178,14 @@ spec: subPath: log4j2.properties - mountPath: {{ test_scenario['values']['opensearch_home'] }}/data name: data - - mountPath: /stackable/listener + - mountPath: /stackable/listeners/role-group name: listener - mountPath: /stackable/log name: log - mountPath: {{ test_scenario['values']['opensearch_home'] }}/config/tls/internal name: tls-internal + - mountPath: /stackable/listeners/discovery-service + name: discovery-service-listener {% if test_scenario['values']['server-use-tls'] == 'true' %} - mountPath: {{ test_scenario['values']['opensearch_home'] }}/config/tls/server name: tls-server @@ -275,8 +277,10 @@ spec: volumeClaimTemplate: metadata: annotations: + secrets.stackable.tech/backend.autotls.cert.lifetime: 1d secrets.stackable.tech/class: tls - secrets.stackable.tech/scope: listener-volume=listener,pod + secrets.stackable.tech/format: tls-pem + secrets.stackable.tech/scope: service=opensearch-seed-nodes,listener-volume=listener,pod spec: accessModes: - ReadWriteOnce @@ -294,7 +298,7 @@ spec: secrets.stackable.tech/backend.autotls.cert.lifetime: 1d secrets.stackable.tech/class: tls secrets.stackable.tech/format: tls-pem - secrets.stackable.tech/scope: service=opensearch,listener-volume=listener,pod + secrets.stackable.tech/scope: listener-volume=listener,listener-volume=discovery-service-listener,pod spec: accessModes: - ReadWriteOnce @@ -321,8 +325,6 @@ spec: requests: storage: 100Mi volumeMode: Filesystem - status: - phase: Pending - apiVersion: v1 kind: PersistentVolumeClaim metadata: @@ -345,8 +347,28 @@ spec: storage: "1" storageClassName: listeners.stackable.tech volumeMode: Filesystem - status: - phase: Pending + - apiVersion: v1 + kind: PersistentVolumeClaim + metadata: + annotations: + listeners.stackable.tech/listener-name: opensearch + labels: + app.kubernetes.io/component: nodes + app.kubernetes.io/instance: opensearch + app.kubernetes.io/managed-by: opensearch.stackable.tech_opensearchcluster + app.kubernetes.io/name: opensearch + app.kubernetes.io/role-group: cluster-manager + app.kubernetes.io/version: {{ test_scenario['values']['opensearch'].split(',')[0] }} + stackable.tech/vendor: Stackable + name: discovery-service-listener + spec: + accessModes: + - ReadWriteMany + resources: + requests: + storage: "1" + storageClassName: listeners.stackable.tech + volumeMode: Filesystem status: readyReplicas: 3 replicas: 3 @@ -466,9 +488,8 @@ spec: fieldRef: apiVersion: v1 fieldPath: metadata.name - - name: cluster.initial_cluster_manager_nodes - name: discovery.seed_hosts - value: opensearch + # value: opensearch-seed-nodes.$NAMESPACE.svc.cluster.local - name: http.publish_host # value: $(_POD_NAME).opensearch-nodes-data-headless.$NAMESPACE.svc.cluster.local - name: network.publish_host @@ -524,7 +545,7 @@ spec: subPath: log4j2.properties - mountPath: {{ test_scenario['values']['opensearch_home'] }}/data name: data - - mountPath: /stackable/listener + - mountPath: /stackable/listeners/role-group name: listener - mountPath: /stackable/log name: log @@ -621,7 +642,9 @@ spec: volumeClaimTemplate: metadata: annotations: + secrets.stackable.tech/backend.autotls.cert.lifetime: 1d secrets.stackable.tech/class: tls + secrets.stackable.tech/format: tls-pem secrets.stackable.tech/scope: listener-volume=listener,pod spec: accessModes: @@ -640,7 +663,7 @@ spec: secrets.stackable.tech/backend.autotls.cert.lifetime: 1d secrets.stackable.tech/class: tls secrets.stackable.tech/format: tls-pem - secrets.stackable.tech/scope: service=opensearch,listener-volume=listener,pod + secrets.stackable.tech/scope: listener-volume=listener,pod spec: accessModes: - ReadWriteOnce @@ -667,8 +690,6 @@ spec: requests: storage: 2Gi volumeMode: Filesystem - status: - phase: Pending - apiVersion: v1 kind: PersistentVolumeClaim metadata: @@ -691,8 +712,6 @@ spec: storage: "1" storageClassName: listeners.stackable.tech volumeMode: Filesystem - status: - phase: Pending status: readyReplicas: 2 replicas: 2 @@ -847,7 +866,7 @@ metadata: app.kubernetes.io/name: opensearch app.kubernetes.io/version: {{ test_scenario['values']['opensearch'].split(',')[0] }} stackable.tech/vendor: Stackable - name: opensearch + name: opensearch-seed-nodes ownerReferences: - apiVersion: opensearch.stackable.tech/v1alpha1 controller: true @@ -855,10 +874,6 @@ metadata: name: opensearch spec: ports: - - name: http - port: 9200 - protocol: TCP - targetPort: 9200 - name: transport port: 9300 protocol: TCP @@ -947,7 +962,7 @@ metadata: kind: OpenSearchCluster name: opensearch spec: - className: external-unstable + className: cluster-internal extraPodSelectorLabels: {} ports: - name: http @@ -980,3 +995,54 @@ spec: port: 9200 protocol: TCP publishNotReadyAddresses: null +--- +apiVersion: listeners.stackable.tech/v1alpha1 +kind: Listener +metadata: + labels: + app.kubernetes.io/component: nodes + app.kubernetes.io/instance: opensearch + app.kubernetes.io/managed-by: opensearch.stackable.tech_opensearchcluster + app.kubernetes.io/name: opensearch + app.kubernetes.io/version: {{ test_scenario['values']['opensearch'].split(',')[0] }} + stackable.tech/vendor: Stackable + name: opensearch + ownerReferences: + - apiVersion: opensearch.stackable.tech/v1alpha1 + controller: true + kind: OpenSearchCluster + name: opensearch +spec: + className: external-unstable + extraPodSelectorLabels: {} + ports: + - name: http + port: 9200 + protocol: TCP + publishNotReadyAddresses: null +--- +apiVersion: v1 +kind: ConfigMap +metadata: + labels: + app.kubernetes.io/component: nodes + app.kubernetes.io/instance: opensearch + app.kubernetes.io/managed-by: opensearch.stackable.tech_opensearchcluster + app.kubernetes.io/name: opensearch + app.kubernetes.io/version: {{ test_scenario['values']['opensearch'].split(',')[0] }} + stackable.tech/vendor: Stackable + name: opensearch + ownerReferences: + - apiVersion: opensearch.stackable.tech/v1alpha1 + controller: true + kind: OpenSearchCluster + name: opensearch +data: + # OPENSEARCH_HOSTNAME: ... + # OPENSEARCH_HOSTS: ... + # OPENSEARCH_PORT: ... +{% if test_scenario['values']['server-use-tls'] == 'true' %} + OPENSEARCH_PROTOCOL: https +{% else %} + OPENSEARCH_PROTOCOL: http +{% endif %} diff --git a/tests/templates/kuttl/smoke/11-install-opensearch.yaml.j2 b/tests/templates/kuttl/smoke/11-install-opensearch.yaml.j2 index 2f49a20..d61f6cf 100644 --- a/tests/templates/kuttl/smoke/11-install-opensearch.yaml.j2 +++ b/tests/templates/kuttl/smoke/11-install-opensearch.yaml.j2 @@ -22,19 +22,22 @@ spec: config: logging: enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} + roleConfig: + discoveryServiceListenerClass: external-unstable roleGroups: cluster-manager: config: + discoveryServiceExposed: true nodeRoles: - cluster_manager resources: storage: data: capacity: 100Mi - listenerClass: external-unstable replicas: 3 data: config: + discoveryServiceExposed: false nodeRoles: - ingest - data @@ -43,7 +46,6 @@ spec: storage: data: capacity: 2Gi - listenerClass: cluster-internal replicas: 2 envOverrides: # Only required for the official image diff --git a/tests/templates/kuttl/smoke/20-test-opensearch.yaml.j2 b/tests/templates/kuttl/smoke/20-test-opensearch.yaml.j2 index f63d3b1..1fc0950 100644 --- a/tests/templates/kuttl/smoke/20-test-opensearch.yaml.j2 +++ b/tests/templates/kuttl/smoke/20-test-opensearch.yaml.j2 @@ -22,12 +22,9 @@ spec: # required for pip install - name: HOME value: /stackable - - name: HTTP_USE_TLS - value: "{{ test_scenario['values']['server-use-tls'] }}" - - name: NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace + envFrom: + - configMapRef: + name: opensearch volumeMounts: - name: script mountPath: /stackable/scripts @@ -51,23 +48,22 @@ spec: configMap: name: test-opensearch - name: tls - ephemeral: - volumeClaimTemplate: - metadata: - annotations: - secrets.stackable.tech/class: tls - spec: - storageClassName: secrets.stackable.tech - accessModes: - - ReadWriteOnce - resources: - requests: - storage: "1" + configMap: + name: truststore-pem serviceAccountName: test-service-account securityContext: fsGroup: 1000 restartPolicy: OnFailure --- +apiVersion: secrets.stackable.tech/v1alpha1 +kind: TrustStore +metadata: + name: truststore-pem +spec: + secretClassName: tls + format: tls-pem + targetKind: ConfigMap +--- apiVersion: v1 kind: ConfigMap metadata: @@ -79,11 +75,10 @@ data: import os from opensearchpy import OpenSearch - namespace = os.environ['NAMESPACE'] - http_use_tls = os.environ['HTTP_USE_TLS'] == 'true' + host = os.environ['OPENSEARCH_HOSTNAME'] + port = os.environ['OPENSEARCH_PORT'] + http_use_tls = os.environ['OPENSEARCH_PROTOCOL'] == 'https' - host = f'opensearch.{namespace}.svc.cluster.local' - port = 9200 auth = ('admin', 'AJVFsGJBbpT6mChn') # For testing only. Don't store credentials in code. ca_certs_path = '/stackable/tls/ca.crt'