Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 2 additions & 1 deletion .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -99,4 +99,5 @@ bundle_*/
test/secret/*.log
kubeconfig
.devcontainer/devcontainer.json
kuttl-artifacts/*
kuttl-artifacts/*
.skaffold/
25 changes: 25 additions & 0 deletions aws/ecr_ensure_repo.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,25 @@
#!/usr/bin/env bash
set -euo pipefail

region="${AWS_REGION:-us-west-2}"
repo="${1:-}"

if [[ -z "${repo}" ]]; then
echo "usage: $0 <repo-name>"
echo "example: $0 vivek/splunk-operator"
exit 2
fi

if ! command -v aws >/dev/null 2>&1; then
echo "aws CLI not found"
exit 1
fi

if aws ecr describe-repositories --region "${region}" --repository-names "${repo}" >/dev/null 2>&1; then
echo "exists: ${repo}"
exit 0
fi

aws ecr create-repository --region "${region}" --repository-name "${repo}" >/dev/null
echo "created: ${repo}"

19 changes: 19 additions & 0 deletions aws/ecr_login.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@
#!/usr/bin/env bash
set -euo pipefail

region="${AWS_REGION:-us-west-2}"
account_id="${AWS_ACCOUNT_ID:-667741767953}"
registry="${account_id}.dkr.ecr.${region}.amazonaws.com"

if ! command -v aws >/dev/null 2>&1; then
echo "aws CLI not found"
exit 1
fi
if ! command -v docker >/dev/null 2>&1; then
echo "docker not found"
exit 1
fi

aws ecr get-login-password --region "${region}" | docker login --username AWS --password-stdin "${registry}"
echo "logged in: ${registry}"

14 changes: 14 additions & 0 deletions config/skaffold-ecr-vivek/kustomization.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization

resources:
- ../default

# Override operator manager image to ECR so Skaffold can build/push and deploy without `make deploy`.
images:
- name: docker.io/splunk/splunk-operator
newName: 667741767953.dkr.ecr.us-west-2.amazonaws.com/vivek/splunk-operator
newTag: latest

patchesStrategicMerge:
- skaffold_env_patch.yaml
26 changes: 26 additions & 0 deletions config/skaffold-ecr-vivek/skaffold_env_patch.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: splunk-operator-controller-manager
namespace: splunk-operator
spec:
template:
spec:
containers:
- name: manager
env:
# These values are normally injected by `make deploy` via sed placeholders.
# In Skaffold workflows we set concrete defaults here so `skaffold dev` works end-to-end.
- name: WATCH_NAMESPACE
value: ""
- name: SPLUNK_GENERAL_TERMS
# Update if your org requires a different SGT acceptance string.
value: "--accept-sgt-current-at-splunk-com"

# Multi-container pod orchestration (distroless Splunk + init + sidecar).
- name: SPLUNK_POD_ARCH
value: "multi-container"
- name: RELATED_IMAGE_SPLUNK_INIT
value: "667741767953.dkr.ecr.us-west-2.amazonaws.com/splunk-init:latest"
- name: RELATED_IMAGE_SPLUNK_SIDECAR
value: "667741767953.dkr.ecr.us-west-2.amazonaws.com/splunk-sidecar:latest"
9 changes: 9 additions & 0 deletions config/skaffold/kustomization.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization

resources:
- ../default

patchesStrategicMerge:
- skaffold_env_patch.yaml

22 changes: 22 additions & 0 deletions config/skaffold/skaffold_env_patch.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: splunk-operator-controller-manager
namespace: splunk-operator
spec:
template:
spec:
containers:
- name: manager
env:
- name: WATCH_NAMESPACE
value: ""
- name: SPLUNK_GENERAL_TERMS
value: "--accept-sgt-current-at-splunk-com"

- name: SPLUNK_POD_ARCH
value: "multi-container"
- name: RELATED_IMAGE_SPLUNK_INIT
value: "667741767953.dkr.ecr.us-west-2.amazonaws.com/splunk-init:latest"
- name: RELATED_IMAGE_SPLUNK_SIDECAR
value: "667741767953.dkr.ecr.us-west-2.amazonaws.com/splunk-sidecar:latest"
33 changes: 33 additions & 0 deletions docs/CONTRIBUTING.md
Original file line number Diff line number Diff line change
Expand Up @@ -176,6 +176,39 @@ We can always use improvements to our documentation! Anyone can contribute to th

You can also edit documentation files directly in the GitHub web interface, without creating a local copy. This can be convenient for small typos or grammar fixes.

## Skaffold (Dev + CI/CD)

This fork supports Skaffold-based build/push/deploy loops for the operator manager image.

### ECR + EKS

1. Authenticate Docker to ECR (adjust via `AWS_ACCOUNT_ID` / `AWS_REGION` if needed):

```bash
./aws/ecr_login.sh
./aws/ecr_ensure_repo.sh vivek/splunk-operator
```

2. Deploy to a kubecontext (example: `vivek-ipv6-splunk-20260227`):

```bash
skaffold dev -p ecr-vivek --kube-context vivek-ipv6-splunk-20260227
```

Notes:
- Profile `ecr-vivek` deploys `config/skaffold-ecr-vivek` which sets concrete env values (no `make deploy` placeholder substitution required).
- Multi-container pods are enabled via `SPLUNK_POD_ARCH=multi-container`.
- Update init/sidecar image envs in `config/skaffold-ecr-vivek/skaffold_env_patch.yaml` when publishing new images.

### Make Deploy (Same Overlay)

If you prefer `make deploy`, you can use the same overlay:

```bash
make docker-buildx IMG=667741767953.dkr.ecr.us-west-2.amazonaws.com/vivek/splunk-operator:dev PLATFORMS=linux/amd64
make deploy ENVIRONMENT=skaffold-ecr-vivek IMG=667741767953.dkr.ecr.us-west-2.amazonaws.com/vivek/splunk-operator:dev
```

## Maintainers

If you need help, tag one of the active maintainers of this project in a post or comment. We'll do our best to reach out to you as quickly as we can.
Expand Down
189 changes: 189 additions & 0 deletions pkg/splunk/enterprise/configuration.go
Original file line number Diff line number Diff line change
Expand Up @@ -247,6 +247,10 @@ func getSplunkService(ctx context.Context, cr splcommon.MetaObject, spec *enterp
// required for SHC bootstrap process; use services with heads when readiness is desired
service.Spec.PublishNotReadyAddresses = true
}
if isHeadless && isMultiContainerPodEnabled() {
// In multi-container mode the operator may need to reach the sidecar before the pod is Ready.
service.Spec.PublishNotReadyAddresses = true
}

service.SetOwnerReferences(append(service.GetOwnerReferences(), splcommon.AsOwner(cr, true)))

Expand Down Expand Up @@ -818,6 +822,9 @@ func updateSplunkPodTemplateWithConfig(ctx context.Context, client splcommon.Con
// Add custom ports to splunk containers
if spec.ServiceTemplate.Spec.Ports != nil {
for idx := range podTemplateSpec.Spec.Containers {
if podTemplateSpec.Spec.Containers[idx].Name != "splunk" {
continue
}
for _, p := range spec.ServiceTemplate.Spec.Ports {

podTemplateSpec.Spec.Containers[idx].Ports = append(podTemplateSpec.Spec.Containers[idx].Ports, corev1.ContainerPort{
Expand All @@ -833,6 +840,9 @@ func updateSplunkPodTemplateWithConfig(ctx context.Context, client splcommon.Con
if spec.Volumes != nil {
podTemplateSpec.Spec.Volumes = append(podTemplateSpec.Spec.Volumes, spec.Volumes...)
for idx := range podTemplateSpec.Spec.Containers {
if podTemplateSpec.Spec.Containers[idx].Name != "splunk" {
continue
}
for v := range spec.Volumes {
podTemplateSpec.Spec.Containers[idx].VolumeMounts = append(podTemplateSpec.Spec.Containers[idx].VolumeMounts, corev1.VolumeMount{
Name: spec.Volumes[v].Name,
Expand Down Expand Up @@ -1087,9 +1097,188 @@ func updateSplunkPodTemplateWithConfig(ctx context.Context, client splcommon.Con
env = removeDuplicateEnvVars(env)
}

// Multi-container mode: inject init + sidecar, and rewire Splunk probes to HTTP so the main container can be distroless.
// This is strictly opt-in via SPLUNK_POD_ARCH to avoid changing legacy behavior and fixtures.
if isMultiContainerPodEnabled() {
// Ensure podTemplate annotations map is initialized (we may append later in other paths).
if podTemplateSpec.ObjectMeta.Annotations == nil {
podTemplateSpec.ObjectMeta.Annotations = make(map[string]string)
}

// Sidecar health endpoints back Splunk probes via kubelet httpGet.
livenessProbe = &corev1.Probe{
ProbeHandler: corev1.ProbeHandler{
HTTPGet: &corev1.HTTPGetAction{
Path: "/healthz/pod-live",
Port: intstr.FromInt(8080),
},
},
InitialDelaySeconds: livenessProbeDefaultDelaySec,
TimeoutSeconds: livenessProbeTimeoutSec,
PeriodSeconds: livenessProbePeriodSec,
FailureThreshold: livenessProbeFailureThreshold,
}
startupProbe = &corev1.Probe{
ProbeHandler: corev1.ProbeHandler{
HTTPGet: &corev1.HTTPGetAction{
Path: "/healthz/pod-startup",
Port: intstr.FromInt(8080),
},
},
InitialDelaySeconds: startupProbeDefaultDelaySec,
TimeoutSeconds: startupProbeTimeoutSec,
PeriodSeconds: startupProbePeriodSec,
FailureThreshold: startupProbeFailureThreshold,
}
readinessProbe = &corev1.Probe{
ProbeHandler: corev1.ProbeHandler{
HTTPGet: &corev1.HTTPGetAction{
Path: "/healthz/pod-ready",
Port: intstr.FromInt(8080),
},
},
InitialDelaySeconds: readinessProbeDefaultDelaySec,
TimeoutSeconds: readinessProbeTimeoutSec,
PeriodSeconds: readinessProbePeriodSec,
FailureThreshold: readinessProbeFailureThreshold,
}

// Choose sidecar role. For SHC members we want strict gating until SHC join completes.
sidecarRole := instanceType.ToString()
if instanceType == SplunkSearchHead {
if strings.EqualFold(cr.GetObjectKind().GroupVersionKind().Kind, "SearchHeadCluster") {
sidecarRole = "shc-member"
}
}

// Copy selected mounts from the Splunk container so the sidecar and init container see the same filesystem.
var splunkVM []corev1.VolumeMount
for i := range podTemplateSpec.Spec.Containers {
if podTemplateSpec.Spec.Containers[i].Name == "splunk" {
splunkVM = append([]corev1.VolumeMount(nil), podTemplateSpec.Spec.Containers[i].VolumeMounts...)
break
}
}
needMount := func(mountPath string) bool {
switch mountPath {
case "/opt/splunk/etc", "/opt/splunk/var", "/mnt/splunk-secrets":
return true
default:
// also propagate custom /mnt/<volume> mounts
return strings.HasPrefix(mountPath, "/mnt/")
}
}
sharedMounts := make([]corev1.VolumeMount, 0, len(splunkVM))
for _, m := range splunkVM {
if needMount(m.MountPath) {
sharedMounts = append(sharedMounts, m)
}
}

// Inject sidecar container (if image provided).
if img := strings.TrimSpace(GetSplunkSidecarImage()); img != "" {
privileged := false
sc := corev1.Container{
Name: "splunk-sidecar",
Image: img,
ImagePullPolicy: corev1.PullPolicy(spec.ImagePullPolicy),
Ports: []corev1.ContainerPort{
{Name: "sidecar-http", ContainerPort: 8080, Protocol: corev1.ProtocolTCP},
{Name: "sidecar-metrics", ContainerPort: 8081, Protocol: corev1.ProtocolTCP},
},
Env: []corev1.EnvVar{
{Name: "SPLUNK_HOME", Value: "/opt/splunk"},
{Name: "SPLUNK_ROLE", Value: sidecarRole},
{Name: "WATCH_PATHS", Value: "/mnt/certificates,/mnt/splunk-secrets"},
},
VolumeMounts: sharedMounts,
LivenessProbe: &corev1.Probe{
ProbeHandler: corev1.ProbeHandler{
HTTPGet: &corev1.HTTPGetAction{Path: "/healthz/live", Port: intstr.FromInt(8080)},
},
PeriodSeconds: 10,
TimeoutSeconds: 5,
FailureThreshold: 3,
},
ReadinessProbe: &corev1.Probe{
ProbeHandler: corev1.ProbeHandler{
HTTPGet: &corev1.HTTPGetAction{Path: "/healthz/ready", Port: intstr.FromInt(8080)},
},
PeriodSeconds: 10,
TimeoutSeconds: 5,
FailureThreshold: 3,
},
SecurityContext: &corev1.SecurityContext{
RunAsUser: &runAsUser,
RunAsNonRoot: &runAsNonRoot,
AllowPrivilegeEscalation: &[]bool{false}[0],
Capabilities: &corev1.Capabilities{
Drop: []corev1.Capability{"ALL"},
Add: []corev1.Capability{"NET_BIND_SERVICE"},
},
Privileged: &privileged,
SeccompProfile: &corev1.SeccompProfile{
Type: corev1.SeccompProfileTypeRuntimeDefault,
},
},
}

// Upsert by name.
found := false
for i := range podTemplateSpec.Spec.Containers {
if podTemplateSpec.Spec.Containers[i].Name == sc.Name {
podTemplateSpec.Spec.Containers[i] = sc
found = true
break
}
}
if !found {
podTemplateSpec.Spec.Containers = append(podTemplateSpec.Spec.Containers, sc)
}
}

// Inject init container (if image provided).
if img := strings.TrimSpace(GetSplunkInitImage()); img != "" {
ic := corev1.Container{
Name: "splunk-init",
Image: img,
ImagePullPolicy: corev1.PullPolicy(spec.ImagePullPolicy),
Env: []corev1.EnvVar{
{Name: "SPLUNK_HOME", Value: "/opt/splunk"},
{Name: "SPLUNK_ROLE", Value: role},
// Reuse the same "defaults URL" logic to seed the init pipeline.
{Name: "SPLUNK_CONFIG_SOURCES", Value: splunkDefaults},
},
VolumeMounts: sharedMounts,
SecurityContext: &corev1.SecurityContext{
RunAsUser: &runAsUser,
RunAsNonRoot: &runAsNonRoot,
SeccompProfile: &corev1.SeccompProfile{
Type: corev1.SeccompProfileTypeRuntimeDefault,
},
},
}

found := false
for i := range podTemplateSpec.Spec.InitContainers {
if podTemplateSpec.Spec.InitContainers[i].Name == ic.Name {
podTemplateSpec.Spec.InitContainers[i] = ic
found = true
break
}
}
if !found {
podTemplateSpec.Spec.InitContainers = append(podTemplateSpec.Spec.InitContainers, ic)
}
}
}

privileged := false
// update each container in pod
for idx := range podTemplateSpec.Spec.Containers {
if podTemplateSpec.Spec.Containers[idx].Name != "splunk" {
continue
}
podTemplateSpec.Spec.Containers[idx].Resources = spec.Resources
podTemplateSpec.Spec.Containers[idx].LivenessProbe = livenessProbe
podTemplateSpec.Spec.Containers[idx].ReadinessProbe = readinessProbe
Expand Down
Loading
Loading